Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Kitware
GitHub Repository: Kitware/CMake
Path: blob/master/Utilities/cmbzip2/compress.c
3150 views
1
2
/*-------------------------------------------------------------*/
3
/*--- Compression machinery (not incl block sorting) ---*/
4
/*--- compress.c ---*/
5
/*-------------------------------------------------------------*/
6
7
/* ------------------------------------------------------------------
8
This file is part of bzip2/libbzip2, a program and library for
9
lossless, block-sorting data compression.
10
11
bzip2/libbzip2 version 1.0.8 of 13 July 2019
12
Copyright (C) 1996-2019 Julian Seward <[email protected]>
13
14
Please read the WARNING, DISCLAIMER and PATENTS sections in the
15
README file.
16
17
This program is released under the terms of the license contained
18
in the file LICENSE.
19
------------------------------------------------------------------ */
20
21
22
/* CHANGES
23
0.9.0 -- original version.
24
0.9.0a/b -- no changes in this file.
25
0.9.0c -- changed setting of nGroups in sendMTFValues()
26
so as to do a bit better on small files
27
*/
28
29
#include "bzlib_private.h"
30
31
32
/*---------------------------------------------------*/
33
/*--- Bit stream I/O ---*/
34
/*---------------------------------------------------*/
35
36
/*---------------------------------------------------*/
37
void BZ2_bsInitWrite ( EState* s )
38
{
39
s->bsLive = 0;
40
s->bsBuff = 0;
41
}
42
43
44
/*---------------------------------------------------*/
45
static
46
void bsFinishWrite ( EState* s )
47
{
48
while (s->bsLive > 0) {
49
s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24);
50
s->numZ++;
51
s->bsBuff <<= 8;
52
s->bsLive -= 8;
53
}
54
}
55
56
57
/*---------------------------------------------------*/
58
#define bsNEEDW(nz) \
59
{ \
60
while (s->bsLive >= 8) { \
61
s->zbits[s->numZ] \
62
= (UChar)(s->bsBuff >> 24); \
63
s->numZ++; \
64
s->bsBuff <<= 8; \
65
s->bsLive -= 8; \
66
} \
67
}
68
69
70
/*---------------------------------------------------*/
71
static
72
__inline__
73
void bsW ( EState* s, Int32 n, UInt32 v )
74
{
75
bsNEEDW ( n );
76
s->bsBuff |= (v << (32 - s->bsLive - n));
77
s->bsLive += n;
78
}
79
80
81
/*---------------------------------------------------*/
82
static
83
void bsPutUInt32 ( EState* s, UInt32 u )
84
{
85
bsW ( s, 8, (u >> 24) & 0xffL );
86
bsW ( s, 8, (u >> 16) & 0xffL );
87
bsW ( s, 8, (u >> 8) & 0xffL );
88
bsW ( s, 8, u & 0xffL );
89
}
90
91
92
/*---------------------------------------------------*/
93
static
94
void bsPutUChar ( EState* s, UChar c )
95
{
96
bsW( s, 8, (UInt32)c );
97
}
98
99
100
/*---------------------------------------------------*/
101
/*--- The back end proper ---*/
102
/*---------------------------------------------------*/
103
104
/*---------------------------------------------------*/
105
static
106
void makeMaps_e ( EState* s )
107
{
108
Int32 i;
109
s->nInUse = 0;
110
for (i = 0; i < 256; i++)
111
if (s->inUse[i]) {
112
s->unseqToSeq[i] = s->nInUse;
113
s->nInUse++;
114
}
115
}
116
117
118
/*---------------------------------------------------*/
119
static
120
void generateMTFValues ( EState* s )
121
{
122
UChar yy[256];
123
Int32 i, j;
124
Int32 zPend;
125
Int32 wr;
126
Int32 EOB;
127
128
/*
129
After sorting (eg, here),
130
s->arr1 [ 0 .. s->nblock-1 ] holds sorted order,
131
and
132
((UChar*)s->arr2) [ 0 .. s->nblock-1 ]
133
holds the original block data.
134
135
The first thing to do is generate the MTF values,
136
and put them in
137
((UInt16*)s->arr1) [ 0 .. s->nblock-1 ].
138
Because there are strictly fewer or equal MTF values
139
than block values, ptr values in this area are overwritten
140
with MTF values only when they are no longer needed.
141
142
The final compressed bitstream is generated into the
143
area starting at
144
(UChar*) (&((UChar*)s->arr2)[s->nblock])
145
146
These storage aliases are set up in bzCompressInit(),
147
except for the last one, which is arranged in
148
compressBlock().
149
*/
150
UInt32* ptr = s->ptr;
151
UChar* block = s->block;
152
UInt16* mtfv = s->mtfv;
153
154
#ifdef __clang_analyzer__
155
memset(yy, 0, sizeof(yy));
156
#endif
157
158
makeMaps_e ( s );
159
EOB = s->nInUse+1;
160
161
for (i = 0; i <= EOB; i++) s->mtfFreq[i] = 0;
162
163
wr = 0;
164
zPend = 0;
165
for (i = 0; i < s->nInUse; i++) yy[i] = (UChar) i;
166
167
for (i = 0; i < s->nblock; i++) {
168
UChar ll_i;
169
AssertD ( wr <= i, "generateMTFValues(1)" );
170
j = ptr[i]-1; if (j < 0) j += s->nblock;
171
ll_i = s->unseqToSeq[block[j]];
172
AssertD ( ll_i < s->nInUse, "generateMTFValues(2a)" );
173
174
if (yy[0] == ll_i) {
175
zPend++;
176
} else {
177
178
if (zPend > 0) {
179
zPend--;
180
while (True) {
181
if (zPend & 1) {
182
mtfv[wr] = BZ_RUNB; wr++;
183
s->mtfFreq[BZ_RUNB]++;
184
} else {
185
mtfv[wr] = BZ_RUNA; wr++;
186
s->mtfFreq[BZ_RUNA]++;
187
}
188
if (zPend < 2) break;
189
zPend = (zPend - 2) / 2;
190
};
191
zPend = 0;
192
}
193
{
194
register UChar rtmp;
195
register UChar* ryy_j;
196
register UChar rll_i;
197
rtmp = yy[1];
198
yy[1] = yy[0];
199
ryy_j = &(yy[1]);
200
rll_i = ll_i;
201
while ( rll_i != rtmp ) {
202
register UChar rtmp2;
203
ryy_j++;
204
rtmp2 = rtmp;
205
rtmp = *ryy_j;
206
*ryy_j = rtmp2;
207
};
208
yy[0] = rtmp;
209
j = ryy_j - &(yy[0]);
210
mtfv[wr] = j+1; wr++; s->mtfFreq[j+1]++;
211
}
212
213
}
214
}
215
216
if (zPend > 0) {
217
zPend--;
218
while (True) {
219
if (zPend & 1) {
220
mtfv[wr] = BZ_RUNB; wr++;
221
s->mtfFreq[BZ_RUNB]++;
222
} else {
223
mtfv[wr] = BZ_RUNA; wr++;
224
s->mtfFreq[BZ_RUNA]++;
225
}
226
if (zPend < 2) break;
227
zPend = (zPend - 2) / 2;
228
};
229
zPend = 0;
230
#ifdef __clang_analyzer__
231
/* Tolerate deadcode.DeadStores to avoid modifying upstream. */
232
(void)zPend;
233
#endif
234
}
235
236
mtfv[wr] = EOB; wr++; s->mtfFreq[EOB]++;
237
238
s->nMTF = wr;
239
}
240
241
242
/*---------------------------------------------------*/
243
#define BZ_LESSER_ICOST 0
244
#define BZ_GREATER_ICOST 15
245
246
static
247
void sendMTFValues ( EState* s )
248
{
249
Int32 v, t, i, j, gs, ge, totc, bt, bc, iter;
250
Int32 nSelectors, alphaSize, minLen, maxLen, selCtr;
251
Int32 nGroups, nBytes;
252
253
/*--
254
UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
255
is a global since the decoder also needs it.
256
257
Int32 code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
258
Int32 rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
259
are also globals only used in this proc.
260
Made global to keep stack frame size small.
261
--*/
262
263
264
UInt16 cost[BZ_N_GROUPS];
265
Int32 fave[BZ_N_GROUPS];
266
267
UInt16* mtfv = s->mtfv;
268
269
if (s->verbosity >= 3)
270
VPrintf3( " %d in block, %d after MTF & 1-2 coding, "
271
"%d+2 syms in use\n",
272
s->nblock, s->nMTF, s->nInUse );
273
274
alphaSize = s->nInUse+2;
275
for (t = 0; t < BZ_N_GROUPS; t++)
276
for (v = 0; v < alphaSize; v++)
277
s->len[t][v] = BZ_GREATER_ICOST;
278
279
/*--- Decide how many coding tables to use ---*/
280
AssertH ( s->nMTF > 0, 3001 );
281
if (s->nMTF < 200) nGroups = 2; else
282
if (s->nMTF < 600) nGroups = 3; else
283
if (s->nMTF < 1200) nGroups = 4; else
284
if (s->nMTF < 2400) nGroups = 5; else
285
nGroups = 6;
286
287
/*--- Generate an initial set of coding tables ---*/
288
{
289
Int32 nPart, remF, tFreq, aFreq;
290
291
nPart = nGroups;
292
remF = s->nMTF;
293
gs = 0;
294
while (nPart > 0) {
295
tFreq = remF / nPart;
296
ge = gs-1;
297
aFreq = 0;
298
while (aFreq < tFreq && ge < alphaSize-1) {
299
ge++;
300
aFreq += s->mtfFreq[ge];
301
}
302
303
if (ge > gs
304
&& nPart != nGroups && nPart != 1
305
&& ((nGroups-nPart) % 2 == 1)) {
306
aFreq -= s->mtfFreq[ge];
307
ge--;
308
}
309
310
if (s->verbosity >= 3)
311
VPrintf5( " initial group %d, [%d .. %d], "
312
"has %d syms (%4.1f%%)\n",
313
nPart, gs, ge, aFreq,
314
(100.0 * (float)aFreq) / (float)(s->nMTF) );
315
316
for (v = 0; v < alphaSize; v++)
317
if (v >= gs && v <= ge)
318
s->len[nPart-1][v] = BZ_LESSER_ICOST; else
319
s->len[nPart-1][v] = BZ_GREATER_ICOST;
320
321
nPart--;
322
gs = ge+1;
323
remF -= aFreq;
324
}
325
}
326
327
/*---
328
Iterate up to BZ_N_ITERS times to improve the tables.
329
---*/
330
for (iter = 0; iter < BZ_N_ITERS; iter++) {
331
332
for (t = 0; t < nGroups; t++) fave[t] = 0;
333
334
for (t = 0; t < nGroups; t++)
335
for (v = 0; v < alphaSize; v++)
336
s->rfreq[t][v] = 0;
337
338
/*---
339
Set up an auxiliary length table which is used to fast-track
340
the common case (nGroups == 6).
341
---*/
342
if (nGroups == 6) {
343
for (v = 0; v < alphaSize; v++) {
344
s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
345
s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
346
s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
347
}
348
}
349
350
nSelectors = 0;
351
totc = 0;
352
gs = 0;
353
while (True) {
354
355
/*--- Set group start & end marks. --*/
356
if (gs >= s->nMTF) break;
357
ge = gs + BZ_G_SIZE - 1;
358
if (ge >= s->nMTF) ge = s->nMTF-1;
359
360
/*--
361
Calculate the cost of this group as coded
362
by each of the coding tables.
363
--*/
364
for (t = 0; t < nGroups; t++) cost[t] = 0;
365
366
if (nGroups == 6 && 50 == ge-gs+1) {
367
/*--- fast track the common case ---*/
368
register UInt32 cost01, cost23, cost45;
369
register UInt16 icv;
370
cost01 = cost23 = cost45 = 0;
371
372
# define BZ_ITER(nn) \
373
icv = mtfv[gs+(nn)]; \
374
cost01 += s->len_pack[icv][0]; \
375
cost23 += s->len_pack[icv][1]; \
376
cost45 += s->len_pack[icv][2]; \
377
378
BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
379
BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
380
BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
381
BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
382
BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
383
BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
384
BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
385
BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
386
BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
387
BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
388
389
# undef BZ_ITER
390
391
cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
392
cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
393
cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
394
395
} else {
396
/*--- slow version which correctly handles all situations ---*/
397
for (i = gs; i <= ge; i++) {
398
UInt16 icv = mtfv[i];
399
for (t = 0; t < nGroups; t++) cost[t] += s->len[t][icv];
400
}
401
}
402
403
/*--
404
Find the coding table which is best for this group,
405
and record its identity in the selector table.
406
--*/
407
bc = 999999999; bt = -1;
408
for (t = 0; t < nGroups; t++)
409
if (cost[t] < bc) { bc = cost[t]; bt = t; };
410
totc += bc;
411
fave[bt]++;
412
s->selector[nSelectors] = bt;
413
nSelectors++;
414
415
/*--
416
Increment the symbol frequencies for the selected table.
417
--*/
418
if (nGroups == 6 && 50 == ge-gs+1) {
419
/*--- fast track the common case ---*/
420
421
# define BZ_ITUR(nn) s->rfreq[bt][ mtfv[gs+(nn)] ]++
422
423
BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
424
BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
425
BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
426
BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
427
BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
428
BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
429
BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
430
BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
431
BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
432
BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
433
434
# undef BZ_ITUR
435
436
} else {
437
/*--- slow version which correctly handles all situations ---*/
438
for (i = gs; i <= ge; i++)
439
s->rfreq[bt][ mtfv[i] ]++;
440
}
441
442
gs = ge+1;
443
}
444
if (s->verbosity >= 3) {
445
VPrintf2 ( " pass %d: size is %d, grp uses are ",
446
iter+1, totc/8 );
447
for (t = 0; t < nGroups; t++)
448
VPrintf1 ( "%d ", fave[t] );
449
VPrintf0 ( "\n" );
450
}
451
452
/*--
453
Recompute the tables based on the accumulated frequencies.
454
--*/
455
/* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
456
comment in huffman.c for details. */
457
for (t = 0; t < nGroups; t++)
458
BZ2_hbMakeCodeLengths ( &(s->len[t][0]), &(s->rfreq[t][0]),
459
alphaSize, 17 /*20*/ );
460
}
461
462
463
AssertH( nGroups < 8, 3002 );
464
AssertH( nSelectors < 32768 &&
465
nSelectors <= BZ_MAX_SELECTORS,
466
3003 );
467
468
469
/*--- Compute MTF values for the selectors. ---*/
470
{
471
UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
472
for (i = 0; i < nGroups; i++) pos[i] = i;
473
for (i = 0; i < nSelectors; i++) {
474
ll_i = s->selector[i];
475
j = 0;
476
tmp = pos[j];
477
while ( ll_i != tmp ) {
478
j++;
479
tmp2 = tmp;
480
tmp = pos[j];
481
pos[j] = tmp2;
482
};
483
pos[0] = tmp;
484
s->selectorMtf[i] = j;
485
}
486
};
487
488
/*--- Assign actual codes for the tables. --*/
489
for (t = 0; t < nGroups; t++) {
490
minLen = 32;
491
maxLen = 0;
492
for (i = 0; i < alphaSize; i++) {
493
if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
494
if (s->len[t][i] < minLen) minLen = s->len[t][i];
495
}
496
AssertH ( !(maxLen > 17 /*20*/ ), 3004 );
497
AssertH ( !(minLen < 1), 3005 );
498
BZ2_hbAssignCodes ( &(s->code[t][0]), &(s->len[t][0]),
499
minLen, maxLen, alphaSize );
500
}
501
502
/*--- Transmit the mapping table. ---*/
503
{
504
Bool inUse16[16];
505
for (i = 0; i < 16; i++) {
506
inUse16[i] = False;
507
for (j = 0; j < 16; j++)
508
if (s->inUse[i * 16 + j]) inUse16[i] = True;
509
}
510
511
nBytes = s->numZ;
512
for (i = 0; i < 16; i++)
513
if (inUse16[i]) bsW(s,1,1); else bsW(s,1,0);
514
515
for (i = 0; i < 16; i++)
516
if (inUse16[i])
517
for (j = 0; j < 16; j++) {
518
if (s->inUse[i * 16 + j]) bsW(s,1,1); else bsW(s,1,0);
519
}
520
521
if (s->verbosity >= 3)
522
VPrintf1( " bytes: mapping %d, ", s->numZ-nBytes );
523
}
524
525
/*--- Now the selectors. ---*/
526
nBytes = s->numZ;
527
bsW ( s, 3, nGroups );
528
bsW ( s, 15, nSelectors );
529
for (i = 0; i < nSelectors; i++) {
530
for (j = 0; j < s->selectorMtf[i]; j++) bsW(s,1,1);
531
bsW(s,1,0);
532
}
533
if (s->verbosity >= 3)
534
VPrintf1( "selectors %d, ", s->numZ-nBytes );
535
536
/*--- Now the coding tables. ---*/
537
nBytes = s->numZ;
538
539
for (t = 0; t < nGroups; t++) {
540
Int32 curr = s->len[t][0];
541
bsW ( s, 5, curr );
542
for (i = 0; i < alphaSize; i++) {
543
while (curr < s->len[t][i]) { bsW(s,2,2); curr++; /* 10 */ };
544
while (curr > s->len[t][i]) { bsW(s,2,3); curr--; /* 11 */ };
545
bsW ( s, 1, 0 );
546
}
547
}
548
549
if (s->verbosity >= 3)
550
VPrintf1 ( "code lengths %d, ", s->numZ-nBytes );
551
552
/*--- And finally, the block data proper ---*/
553
nBytes = s->numZ;
554
selCtr = 0;
555
gs = 0;
556
while (True) {
557
if (gs >= s->nMTF) break;
558
ge = gs + BZ_G_SIZE - 1;
559
if (ge >= s->nMTF) ge = s->nMTF-1;
560
AssertH ( s->selector[selCtr] < nGroups, 3006 );
561
562
if (nGroups == 6 && 50 == ge-gs+1) {
563
/*--- fast track the common case ---*/
564
UInt16 mtfv_i;
565
UChar* s_len_sel_selCtr
566
= &(s->len[s->selector[selCtr]][0]);
567
Int32* s_code_sel_selCtr
568
= &(s->code[s->selector[selCtr]][0]);
569
570
# define BZ_ITAH(nn) \
571
mtfv_i = mtfv[gs+(nn)]; \
572
bsW ( s, \
573
s_len_sel_selCtr[mtfv_i], \
574
s_code_sel_selCtr[mtfv_i] )
575
576
BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
577
BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
578
BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
579
BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
580
BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
581
BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
582
BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
583
BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
584
BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
585
BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
586
587
# undef BZ_ITAH
588
589
} else {
590
/*--- slow version which correctly handles all situations ---*/
591
for (i = gs; i <= ge; i++) {
592
bsW ( s,
593
s->len [s->selector[selCtr]] [mtfv[i]],
594
s->code [s->selector[selCtr]] [mtfv[i]] );
595
}
596
}
597
598
599
gs = ge+1;
600
selCtr++;
601
}
602
AssertH( selCtr == nSelectors, 3007 );
603
604
if (s->verbosity >= 3)
605
VPrintf1( "codes %d\n", s->numZ-nBytes );
606
}
607
608
609
/*---------------------------------------------------*/
610
void BZ2_compressBlock ( EState* s, Bool is_last_block )
611
{
612
if (s->nblock > 0) {
613
614
BZ_FINALISE_CRC ( s->blockCRC );
615
s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
616
s->combinedCRC ^= s->blockCRC;
617
if (s->blockNo > 1) s->numZ = 0;
618
619
if (s->verbosity >= 2)
620
VPrintf4( " block %d: crc = 0x%08x, "
621
"combined CRC = 0x%08x, size = %d\n",
622
s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
623
624
BZ2_blockSort ( s );
625
}
626
627
s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
628
629
/*-- If this is the first block, create the stream header. --*/
630
if (s->blockNo == 1) {
631
BZ2_bsInitWrite ( s );
632
bsPutUChar ( s, BZ_HDR_B );
633
bsPutUChar ( s, BZ_HDR_Z );
634
bsPutUChar ( s, BZ_HDR_h );
635
bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) );
636
}
637
638
if (s->nblock > 0) {
639
640
bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
641
bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
642
bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
643
644
/*-- Now the block's CRC, so it is in a known place. --*/
645
bsPutUInt32 ( s, s->blockCRC );
646
647
/*--
648
Now a single bit indicating (non-)randomisation.
649
As of version 0.9.5, we use a better sorting algorithm
650
which makes randomisation unnecessary. So always set
651
the randomised bit to 'no'. Of course, the decoder
652
still needs to be able to handle randomised blocks
653
so as to maintain backwards compatibility with
654
older versions of bzip2.
655
--*/
656
bsW(s,1,0);
657
658
bsW ( s, 24, s->origPtr );
659
generateMTFValues ( s );
660
sendMTFValues ( s );
661
}
662
663
664
/*-- If this is the last block, add the stream trailer. --*/
665
if (is_last_block) {
666
667
bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
668
bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
669
bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
670
bsPutUInt32 ( s, s->combinedCRC );
671
if (s->verbosity >= 2)
672
VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC );
673
bsFinishWrite ( s );
674
}
675
}
676
677
678
/*-------------------------------------------------------------*/
679
/*--- end compress.c ---*/
680
/*-------------------------------------------------------------*/
681
682