001/* 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 * 017 */ 018package org.apache.commons.compress.archivers.zip; 019 020import static org.apache.commons.compress.archivers.zip.ZipConstants.DATA_DESCRIPTOR_MIN_VERSION; 021import static org.apache.commons.compress.archivers.zip.ZipConstants.DEFLATE_MIN_VERSION; 022import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD; 023import static org.apache.commons.compress.archivers.zip.ZipConstants.INITIAL_VERSION; 024import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT; 025import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD; 026import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC; 027import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC_SHORT; 028import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MIN_VERSION; 029import static org.apache.commons.compress.archivers.zip.ZipLong.putLong; 030import static org.apache.commons.compress.archivers.zip.ZipShort.putShort; 031 032import java.io.ByteArrayOutputStream; 033import java.io.File; 034import java.io.IOException; 035import java.io.InputStream; 036import java.io.OutputStream; 037import java.nio.ByteBuffer; 038import java.nio.channels.SeekableByteChannel; 039import java.nio.file.Files; 040import java.nio.file.LinkOption; 041import java.nio.file.OpenOption; 042import java.nio.file.Path; 043import java.nio.file.StandardOpenOption; 044import java.util.Calendar; 045import java.util.EnumSet; 046import java.util.HashMap; 047import java.util.LinkedList; 048import java.util.List; 049import java.util.Map; 050import java.util.zip.Deflater; 051import java.util.zip.ZipException; 052 053import org.apache.commons.compress.archivers.ArchiveEntry; 054import org.apache.commons.compress.archivers.ArchiveOutputStream; 055import org.apache.commons.compress.utils.ByteUtils; 056import org.apache.commons.compress.utils.IOUtils; 057 058/** 059 * Reimplementation of {@link java.util.zip.ZipOutputStream 060 * java.util.zip.ZipOutputStream} that does handle the extended 061 * functionality of this package, especially internal/external file 062 * attributes and extra fields with different layouts for local file 063 * data and central directory entries. 064 * 065 * <p>This class will try to use {@link 066 * java.nio.channels.SeekableByteChannel} when it knows that the 067 * output is going to go to a file and no split archive shall be 068 * created.</p> 069 * 070 * <p>If SeekableByteChannel cannot be used, this implementation will use 071 * a Data Descriptor to store size and CRC information for {@link 072 * #DEFLATED DEFLATED} entries, this means, you don't need to 073 * calculate them yourself. Unfortunately this is not possible for 074 * the {@link #STORED STORED} method, here setting the CRC and 075 * uncompressed size information is required before {@link 076 * #putArchiveEntry(ArchiveEntry)} can be called.</p> 077 * 078 * <p>As of Apache Commons Compress 1.3 it transparently supports Zip64 079 * extensions and thus individual entries and archives larger than 4 080 * GB or with more than 65536 entries in most cases but explicit 081 * control is provided via {@link #setUseZip64}. If the stream can not 082 * use SeekableByteChannel and you try to write a ZipArchiveEntry of 083 * unknown size then Zip64 extensions will be disabled by default.</p> 084 * 085 * @NotThreadSafe 086 */ 087public class ZipArchiveOutputStream extends ArchiveOutputStream { 088 089 static final int BUFFER_SIZE = 512; 090 private static final int LFH_SIG_OFFSET = 0; 091 private static final int LFH_VERSION_NEEDED_OFFSET = 4; 092 private static final int LFH_GPB_OFFSET = 6; 093 private static final int LFH_METHOD_OFFSET = 8; 094 private static final int LFH_TIME_OFFSET = 10; 095 private static final int LFH_CRC_OFFSET = 14; 096 private static final int LFH_COMPRESSED_SIZE_OFFSET = 18; 097 private static final int LFH_ORIGINAL_SIZE_OFFSET = 22; 098 private static final int LFH_FILENAME_LENGTH_OFFSET = 26; 099 private static final int LFH_EXTRA_LENGTH_OFFSET = 28; 100 private static final int LFH_FILENAME_OFFSET = 30; 101 private static final int CFH_SIG_OFFSET = 0; 102 private static final int CFH_VERSION_MADE_BY_OFFSET = 4; 103 private static final int CFH_VERSION_NEEDED_OFFSET = 6; 104 private static final int CFH_GPB_OFFSET = 8; 105 private static final int CFH_METHOD_OFFSET = 10; 106 private static final int CFH_TIME_OFFSET = 12; 107 private static final int CFH_CRC_OFFSET = 16; 108 private static final int CFH_COMPRESSED_SIZE_OFFSET = 20; 109 private static final int CFH_ORIGINAL_SIZE_OFFSET = 24; 110 private static final int CFH_FILENAME_LENGTH_OFFSET = 28; 111 private static final int CFH_EXTRA_LENGTH_OFFSET = 30; 112 private static final int CFH_COMMENT_LENGTH_OFFSET = 32; 113 private static final int CFH_DISK_NUMBER_OFFSET = 34; 114 private static final int CFH_INTERNAL_ATTRIBUTES_OFFSET = 36; 115 private static final int CFH_EXTERNAL_ATTRIBUTES_OFFSET = 38; 116 private static final int CFH_LFH_OFFSET = 42; 117 private static final int CFH_FILENAME_OFFSET = 46; 118 119 /** indicates if this archive is finished. protected for use in Jar implementation */ 120 protected boolean finished; 121 122 /** 123 * Compression method for deflated entries. 124 */ 125 public static final int DEFLATED = java.util.zip.ZipEntry.DEFLATED; 126 127 /** 128 * Default compression level for deflated entries. 129 */ 130 public static final int DEFAULT_COMPRESSION = Deflater.DEFAULT_COMPRESSION; 131 132 /** 133 * Compression method for stored entries. 134 */ 135 public static final int STORED = java.util.zip.ZipEntry.STORED; 136 137 /** 138 * default encoding for file names and comment. 139 */ 140 static final String DEFAULT_ENCODING = ZipEncodingHelper.UTF8; 141 142 /** 143 * General purpose flag, which indicates that file names are 144 * written in UTF-8. 145 * @deprecated use {@link GeneralPurposeBit#UFT8_NAMES_FLAG} instead 146 */ 147 @Deprecated 148 public static final int EFS_FLAG = GeneralPurposeBit.UFT8_NAMES_FLAG; 149 150 /** 151 * Current entry. 152 */ 153 private CurrentEntry entry; 154 155 /** 156 * The file comment. 157 */ 158 private String comment = ""; 159 160 /** 161 * Compression level for next entry. 162 */ 163 private int level = DEFAULT_COMPRESSION; 164 165 /** 166 * Has the compression level changed when compared to the last 167 * entry? 168 */ 169 private boolean hasCompressionLevelChanged; 170 171 /** 172 * Default compression method for next entry. 173 */ 174 private int method = java.util.zip.ZipEntry.DEFLATED; 175 176 /** 177 * List of ZipArchiveEntries written so far. 178 */ 179 private final List<ZipArchiveEntry> entries = 180 new LinkedList<>(); 181 182 private final StreamCompressor streamCompressor; 183 184 /** 185 * Start of central directory. 186 */ 187 private long cdOffset; 188 189 /** 190 * Length of central directory. 191 */ 192 private long cdLength; 193 194 /** 195 * Disk number start of central directory. 196 */ 197 private long cdDiskNumberStart; 198 199 /** 200 * Length of end of central directory 201 */ 202 private long eocdLength; 203 204 /** 205 * Helper, a 0 as ZipShort. 206 */ 207 private static final byte[] ZERO = {0, 0}; 208 209 /** 210 * Helper, a 0 as ZipLong. 211 */ 212 private static final byte[] LZERO = {0, 0, 0, 0}; 213 214 private static final byte[] ONE = ZipLong.getBytes(1L); 215 216 /** 217 * Holds some book-keeping data for each entry. 218 */ 219 private final Map<ZipArchiveEntry, EntryMetaData> metaData = 220 new HashMap<>(); 221 222 /** 223 * The encoding to use for file names and the file comment. 224 * 225 * <p>For a list of possible values see <a 226 * href="http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html">http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html</a>. 227 * Defaults to UTF-8.</p> 228 */ 229 private String encoding = DEFAULT_ENCODING; 230 231 /** 232 * The zip encoding to use for file names and the file comment. 233 * 234 * This field is of internal use and will be set in {@link 235 * #setEncoding(String)}. 236 */ 237 private ZipEncoding zipEncoding = 238 ZipEncodingHelper.getZipEncoding(DEFAULT_ENCODING); 239 240 241 /** 242 * This Deflater object is used for output. 243 * 244 */ 245 protected final Deflater def; 246 /** 247 * Optional random access output. 248 */ 249 private final SeekableByteChannel channel; 250 251 private final OutputStream out; 252 253 /** 254 * whether to use the general purpose bit flag when writing UTF-8 255 * file names or not. 256 */ 257 private boolean useUTF8Flag = true; 258 259 /** 260 * Whether to encode non-encodable file names as UTF-8. 261 */ 262 private boolean fallbackToUTF8; 263 264 /** 265 * whether to create UnicodePathExtraField-s for each entry. 266 */ 267 private UnicodeExtraFieldPolicy createUnicodeExtraFields = UnicodeExtraFieldPolicy.NEVER; 268 269 /** 270 * Whether anything inside this archive has used a ZIP64 feature. 271 * 272 * @since 1.3 273 */ 274 private boolean hasUsedZip64; 275 276 private Zip64Mode zip64Mode = Zip64Mode.AsNeeded; 277 278 private final byte[] copyBuffer = new byte[32768]; 279 private final Calendar calendarInstance = Calendar.getInstance(); 280 281 /** 282 * Whether we are creating a split zip 283 */ 284 private final boolean isSplitZip; 285 286 /** 287 * Holds the number of Central Directories on each disk, this is used 288 * when writing Zip64 End Of Central Directory and End Of Central Directory 289 */ 290 private final Map<Integer, Integer> numberOfCDInDiskData = new HashMap<>(); 291 292 /** 293 * Creates a new ZIP OutputStream filtering the underlying stream. 294 * @param out the outputstream to zip 295 */ 296 public ZipArchiveOutputStream(final OutputStream out) { 297 this.out = out; 298 this.channel = null; 299 def = new Deflater(level, true); 300 streamCompressor = StreamCompressor.create(out, def); 301 isSplitZip = false; 302 } 303 304 /** 305 * Creates a new ZIP OutputStream writing to a File. Will use 306 * random access if possible. 307 * @param file the file to zip to 308 * @throws IOException on error 309 */ 310 public ZipArchiveOutputStream(final File file) throws IOException { 311 this(file.toPath()); 312 } 313 314 /** 315 * Creates a new ZIP OutputStream writing to a Path. Will use 316 * random access if possible. 317 * @param file the file to zip to 318 * @param options options specifying how the file is opened. 319 * @throws IOException on error 320 * @since 1.21 321 */ 322 public ZipArchiveOutputStream(final Path file, final OpenOption... options) throws IOException { 323 def = new Deflater(level, true); 324 OutputStream o = null; 325 SeekableByteChannel _channel = null; 326 StreamCompressor _streamCompressor = null; 327 try { 328 _channel = Files.newByteChannel(file, 329 EnumSet.of(StandardOpenOption.CREATE, StandardOpenOption.WRITE, 330 StandardOpenOption.READ, 331 StandardOpenOption.TRUNCATE_EXISTING)); 332 // will never get opened properly when an exception is thrown so doesn't need to get closed 333 _streamCompressor = StreamCompressor.create(_channel, def); //NOSONAR 334 } catch (final IOException e) { // NOSONAR 335 IOUtils.closeQuietly(_channel); 336 _channel = null; 337 o = Files.newOutputStream(file, options); 338 _streamCompressor = StreamCompressor.create(o, def); 339 } 340 out = o; 341 channel = _channel; 342 streamCompressor = _streamCompressor; 343 isSplitZip = false; 344 } 345 346 /** 347 * Creates a split ZIP Archive. 348 * 349 * <p>The files making up the archive will use Z01, Z02, 350 * ... extensions and the last part of it will be the given {@code 351 * file}.</p> 352 * 353 * <p>Even though the stream writes to a file this stream will 354 * behave as if no random access was possible. This means the 355 * sizes of stored entries need to be known before the actual 356 * entry data is written.</p> 357 * 358 * @param file the file that will become the last part of the split archive 359 * @param zipSplitSize maximum size of a single part of the split 360 * archive created by this stream. Must be between 64kB and about 361 * 4GB. 362 * 363 * @throws IOException on error 364 * @throws IllegalArgumentException if zipSplitSize is not in the required range 365 * @since 1.20 366 */ 367 public ZipArchiveOutputStream(final File file, final long zipSplitSize) throws IOException { 368 def = new Deflater(level, true); 369 this.out = new ZipSplitOutputStream(file, zipSplitSize); 370 streamCompressor = StreamCompressor.create(this.out, def); 371 channel = null; 372 isSplitZip = true; 373 } 374 375 /** 376 * Creates a new ZIP OutputStream writing to a SeekableByteChannel. 377 * 378 * <p>{@link 379 * org.apache.commons.compress.utils.SeekableInMemoryByteChannel} 380 * allows you to write to an in-memory archive using random 381 * access.</p> 382 * 383 * @param channel the channel to zip to 384 * @throws IOException on error 385 * @since 1.13 386 */ 387 public ZipArchiveOutputStream(final SeekableByteChannel channel) throws IOException { 388 this.channel = channel; 389 def = new Deflater(level, true); 390 streamCompressor = StreamCompressor.create(channel, def); 391 out = null; 392 isSplitZip = false; 393 } 394 395 /** 396 * This method indicates whether this archive is writing to a 397 * seekable stream (i.e., to a random access file). 398 * 399 * <p>For seekable streams, you don't need to calculate the CRC or 400 * uncompressed size for {@link #STORED} entries before 401 * invoking {@link #putArchiveEntry(ArchiveEntry)}. 402 * @return true if seekable 403 */ 404 public boolean isSeekable() { 405 return channel != null; 406 } 407 408 /** 409 * The encoding to use for file names and the file comment. 410 * 411 * <p>For a list of possible values see <a 412 * href="http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html">http://java.sun.com/j2se/1.5.0/docs/guide/intl/encoding.doc.html</a>. 413 * Defaults to UTF-8.</p> 414 * @param encoding the encoding to use for file names, use null 415 * for the platform's default encoding 416 */ 417 public void setEncoding(final String encoding) { 418 this.encoding = encoding; 419 this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 420 if (useUTF8Flag && !ZipEncodingHelper.isUTF8(encoding)) { 421 useUTF8Flag = false; 422 } 423 } 424 425 /** 426 * The encoding to use for file names and the file comment. 427 * 428 * @return null if using the platform's default character encoding. 429 */ 430 public String getEncoding() { 431 return encoding; 432 } 433 434 /** 435 * Whether to set the language encoding flag if the file name 436 * encoding is UTF-8. 437 * 438 * <p>Defaults to true.</p> 439 * 440 * @param b whether to set the language encoding flag if the file 441 * name encoding is UTF-8 442 */ 443 public void setUseLanguageEncodingFlag(final boolean b) { 444 useUTF8Flag = b && ZipEncodingHelper.isUTF8(encoding); 445 } 446 447 /** 448 * Whether to create Unicode Extra Fields. 449 * 450 * <p>Defaults to NEVER.</p> 451 * 452 * @param b whether to create Unicode Extra Fields. 453 */ 454 public void setCreateUnicodeExtraFields(final UnicodeExtraFieldPolicy b) { 455 createUnicodeExtraFields = b; 456 } 457 458 /** 459 * Whether to fall back to UTF and the language encoding flag if 460 * the file name cannot be encoded using the specified encoding. 461 * 462 * <p>Defaults to false.</p> 463 * 464 * @param b whether to fall back to UTF and the language encoding 465 * flag if the file name cannot be encoded using the specified 466 * encoding. 467 */ 468 public void setFallbackToUTF8(final boolean b) { 469 fallbackToUTF8 = b; 470 } 471 472 /** 473 * Whether Zip64 extensions will be used. 474 * 475 * <p>When setting the mode to {@link Zip64Mode#Never Never}, 476 * {@link #putArchiveEntry}, {@link #closeArchiveEntry}, {@link 477 * #finish} or {@link #close} may throw a {@link 478 * Zip64RequiredException} if the entry's size or the total size 479 * of the archive exceeds 4GB or there are more than 65536 entries 480 * inside the archive. Any archive created in this mode will be 481 * readable by implementations that don't support Zip64.</p> 482 * 483 * <p>When setting the mode to {@link Zip64Mode#Always Always}, 484 * Zip64 extensions will be used for all entries. Any archive 485 * created in this mode may be unreadable by implementations that 486 * don't support Zip64 even if all its contents would be.</p> 487 * 488 * <p>When setting the mode to {@link Zip64Mode#AsNeeded 489 * AsNeeded}, Zip64 extensions will transparently be used for 490 * those entries that require them. This mode can only be used if 491 * the uncompressed size of the {@link ZipArchiveEntry} is known 492 * when calling {@link #putArchiveEntry} or the archive is written 493 * to a seekable output (i.e. you have used the {@link 494 * #ZipArchiveOutputStream(java.io.File) File-arg constructor}) - 495 * this mode is not valid when the output stream is not seekable 496 * and the uncompressed size is unknown when {@link 497 * #putArchiveEntry} is called.</p> 498 * 499 * <p>If no entry inside the resulting archive requires Zip64 500 * extensions then {@link Zip64Mode#Never Never} will create the 501 * smallest archive. {@link Zip64Mode#AsNeeded AsNeeded} will 502 * create a slightly bigger archive if the uncompressed size of 503 * any entry has initially been unknown and create an archive 504 * identical to {@link Zip64Mode#Never Never} otherwise. {@link 505 * Zip64Mode#Always Always} will create an archive that is at 506 * least 24 bytes per entry bigger than the one {@link 507 * Zip64Mode#Never Never} would create.</p> 508 * 509 * <p>Defaults to {@link Zip64Mode#AsNeeded AsNeeded} unless 510 * {@link #putArchiveEntry} is called with an entry of unknown 511 * size and data is written to a non-seekable stream - in this 512 * case the default is {@link Zip64Mode#Never Never}.</p> 513 * 514 * @since 1.3 515 * @param mode Whether Zip64 extensions will be used. 516 */ 517 public void setUseZip64(final Zip64Mode mode) { 518 zip64Mode = mode; 519 } 520 521 /** 522 * {@inheritDoc} 523 * @throws Zip64RequiredException if the archive's size exceeds 4 524 * GByte or there are more than 65535 entries inside the archive 525 * and {@link #setUseZip64} is {@link Zip64Mode#Never}. 526 */ 527 @Override 528 public void finish() throws IOException { 529 if (finished) { 530 throw new IOException("This archive has already been finished"); 531 } 532 533 if (entry != null) { 534 throw new IOException("This archive contains unclosed entries."); 535 } 536 537 final long cdOverallOffset = streamCompressor.getTotalBytesWritten(); 538 cdOffset = cdOverallOffset; 539 if (isSplitZip) { 540 // when creating a split zip, the offset should be 541 // the offset to the corresponding segment disk 542 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream)this.out; 543 cdOffset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 544 cdDiskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 545 } 546 writeCentralDirectoryInChunks(); 547 548 cdLength = streamCompressor.getTotalBytesWritten() - cdOverallOffset; 549 550 // calculate the length of end of central directory, as it may be used in writeZip64CentralDirectory 551 final ByteBuffer commentData = this.zipEncoding.encode(comment); 552 final long commentLength = (long) commentData.limit() - commentData.position(); 553 eocdLength = WORD /* length of EOCD_SIG */ 554 + SHORT /* number of this disk */ 555 + SHORT /* disk number of start of central directory */ 556 + SHORT /* total number of entries on this disk */ 557 + SHORT /* total number of entries */ 558 + WORD /* size of central directory */ 559 + WORD /* offset of start of central directory */ 560 + SHORT /* zip comment length */ 561 + commentLength /* zip comment */; 562 563 writeZip64CentralDirectory(); 564 writeCentralDirectoryEnd(); 565 metaData.clear(); 566 entries.clear(); 567 streamCompressor.close(); 568 if (isSplitZip) { 569 // trigger the ZipSplitOutputStream to write the final split segment 570 out.close(); 571 } 572 finished = true; 573 } 574 575 private void writeCentralDirectoryInChunks() throws IOException { 576 final int NUM_PER_WRITE = 1000; 577 final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(70 * NUM_PER_WRITE); 578 int count = 0; 579 for (final ZipArchiveEntry ze : entries) { 580 byteArrayOutputStream.write(createCentralFileHeader(ze)); 581 if (++count > NUM_PER_WRITE){ 582 writeCounted(byteArrayOutputStream.toByteArray()); 583 byteArrayOutputStream.reset(); 584 count = 0; 585 } 586 } 587 writeCounted(byteArrayOutputStream.toByteArray()); 588 } 589 590 /** 591 * Writes all necessary data for this entry. 592 * @throws IOException on error 593 * @throws Zip64RequiredException if the entry's uncompressed or 594 * compressed size exceeds 4 GByte and {@link #setUseZip64} 595 * is {@link Zip64Mode#Never}. 596 */ 597 @Override 598 public void closeArchiveEntry() throws IOException { 599 preClose(); 600 601 flushDeflater(); 602 603 final long bytesWritten = streamCompressor.getTotalBytesWritten() - entry.dataStart; 604 final long realCrc = streamCompressor.getCrc32(); 605 entry.bytesRead = streamCompressor.getBytesRead(); 606 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 607 final boolean actuallyNeedsZip64 = handleSizesAndCrc(bytesWritten, realCrc, effectiveMode); 608 closeEntry(actuallyNeedsZip64, false); 609 streamCompressor.reset(); 610 } 611 612 /** 613 * Writes all necessary data for this entry. 614 * 615 * @param phased This entry is second phase of a 2-phase zip creation, size, compressed size and crc 616 * are known in ZipArchiveEntry 617 * @throws IOException on error 618 * @throws Zip64RequiredException if the entry's uncompressed or 619 * compressed size exceeds 4 GByte and {@link #setUseZip64} 620 * is {@link Zip64Mode#Never}. 621 */ 622 private void closeCopiedEntry(final boolean phased) throws IOException { 623 preClose(); 624 entry.bytesRead = entry.entry.getSize(); 625 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 626 final boolean actuallyNeedsZip64 = checkIfNeedsZip64(effectiveMode); 627 closeEntry(actuallyNeedsZip64, phased); 628 } 629 630 private void closeEntry(final boolean actuallyNeedsZip64, final boolean phased) throws IOException { 631 if (!phased && channel != null) { 632 rewriteSizesAndCrc(actuallyNeedsZip64); 633 } 634 635 if (!phased) { 636 writeDataDescriptor(entry.entry); 637 } 638 entry = null; 639 } 640 641 private void preClose() throws IOException { 642 if (finished) { 643 throw new IOException("Stream has already been finished"); 644 } 645 646 if (entry == null) { 647 throw new IOException("No current entry to close"); 648 } 649 650 if (!entry.hasWritten) { 651 write(ByteUtils.EMPTY_BYTE_ARRAY, 0, 0); 652 } 653 } 654 655 /** 656 * Adds an archive entry with a raw input stream. 657 * 658 * If crc, size and compressed size are supplied on the entry, these values will be used as-is. 659 * Zip64 status is re-established based on the settings in this stream, and the supplied value 660 * is ignored. 661 * 662 * The entry is put and closed immediately. 663 * 664 * @param entry The archive entry to add 665 * @param rawStream The raw input stream of a different entry. May be compressed/encrypted. 666 * @throws IOException If copying fails 667 */ 668 public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream) 669 throws IOException { 670 final ZipArchiveEntry ae = new ZipArchiveEntry(entry); 671 if (hasZip64Extra(ae)) { 672 // Will be re-added as required. this may make the file generated with this method 673 // somewhat smaller than standard mode, 674 // since standard mode is unable to remove the zip 64 header. 675 ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 676 } 677 final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN 678 && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN 679 && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN; 680 putArchiveEntry(ae, is2PhaseSource); 681 copyFromZipInputStream(rawStream); 682 closeCopiedEntry(is2PhaseSource); 683 } 684 685 /** 686 * Ensures all bytes sent to the deflater are written to the stream. 687 */ 688 private void flushDeflater() throws IOException { 689 if (entry.entry.getMethod() == DEFLATED) { 690 streamCompressor.flushDeflater(); 691 } 692 } 693 694 /** 695 * Ensures the current entry's size and CRC information is set to 696 * the values just written, verifies it isn't too big in the 697 * Zip64Mode.Never case and returns whether the entry would 698 * require a Zip64 extra field. 699 */ 700 private boolean handleSizesAndCrc(final long bytesWritten, final long crc, 701 final Zip64Mode effectiveMode) 702 throws ZipException { 703 if (entry.entry.getMethod() == DEFLATED) { 704 /* It turns out def.getBytesRead() returns wrong values if 705 * the size exceeds 4 GB on Java < Java7 706 entry.entry.setSize(def.getBytesRead()); 707 */ 708 entry.entry.setSize(entry.bytesRead); 709 entry.entry.setCompressedSize(bytesWritten); 710 entry.entry.setCrc(crc); 711 712 } else if (channel == null) { 713 if (entry.entry.getCrc() != crc) { 714 throw new ZipException("Bad CRC checksum for entry " 715 + entry.entry.getName() + ": " 716 + Long.toHexString(entry.entry.getCrc()) 717 + " instead of " 718 + Long.toHexString(crc)); 719 } 720 721 if (entry.entry.getSize() != bytesWritten) { 722 throw new ZipException("Bad size for entry " 723 + entry.entry.getName() + ": " 724 + entry.entry.getSize() 725 + " instead of " 726 + bytesWritten); 727 } 728 } else { /* method is STORED and we used SeekableByteChannel */ 729 entry.entry.setSize(bytesWritten); 730 entry.entry.setCompressedSize(bytesWritten); 731 entry.entry.setCrc(crc); 732 } 733 734 return checkIfNeedsZip64(effectiveMode); 735 } 736 737 /** 738 * Verifies the sizes aren't too big in the Zip64Mode.Never case 739 * and returns whether the entry would require a Zip64 extra 740 * field. 741 */ 742 private boolean checkIfNeedsZip64(final Zip64Mode effectiveMode) 743 throws ZipException { 744 final boolean actuallyNeedsZip64 = isZip64Required(entry.entry, effectiveMode); 745 if (actuallyNeedsZip64 && effectiveMode == Zip64Mode.Never) { 746 throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry)); 747 } 748 return actuallyNeedsZip64; 749 } 750 751 private boolean isZip64Required(final ZipArchiveEntry entry1, final Zip64Mode requestedMode) { 752 return requestedMode == Zip64Mode.Always || requestedMode == Zip64Mode.AlwaysWithCompatibility 753 || isTooLargeForZip32(entry1); 754 } 755 756 private boolean isTooLargeForZip32(final ZipArchiveEntry zipArchiveEntry){ 757 return zipArchiveEntry.getSize() >= ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZIP64_MAGIC; 758 } 759 760 /** 761 * When using random access output, write the local file header 762 * and potentially the ZIP64 extra containing the correct CRC and 763 * compressed/uncompressed sizes. 764 */ 765 private void rewriteSizesAndCrc(final boolean actuallyNeedsZip64) 766 throws IOException { 767 final long save = channel.position(); 768 769 channel.position(entry.localDataStart); 770 writeOut(ZipLong.getBytes(entry.entry.getCrc())); 771 if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) { 772 writeOut(ZipLong.getBytes(entry.entry.getCompressedSize())); 773 writeOut(ZipLong.getBytes(entry.entry.getSize())); 774 } else { 775 writeOut(ZipLong.ZIP64_MAGIC.getBytes()); 776 writeOut(ZipLong.ZIP64_MAGIC.getBytes()); 777 } 778 779 if (hasZip64Extra(entry.entry)) { 780 final ByteBuffer name = getName(entry.entry); 781 final int nameLen = name.limit() - name.position(); 782 // seek to ZIP64 extra, skip header and size information 783 channel.position(entry.localDataStart + 3 * WORD + 2 * SHORT 784 + nameLen + 2 * SHORT); 785 // inside the ZIP64 extra uncompressed size comes 786 // first, unlike the LFH, CD or data descriptor 787 writeOut(ZipEightByteInteger.getBytes(entry.entry.getSize())); 788 writeOut(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize())); 789 790 if (!actuallyNeedsZip64) { 791 // do some cleanup: 792 // * rewrite version needed to extract 793 channel.position(entry.localDataStart - 5 * SHORT); 794 writeOut(ZipShort.getBytes(versionNeededToExtract(entry.entry.getMethod(), false, false))); 795 796 // * remove ZIP64 extra so it doesn't get written 797 // to the central directory 798 entry.entry.removeExtraField(Zip64ExtendedInformationExtraField 799 .HEADER_ID); 800 entry.entry.setExtra(); 801 802 // * reset hasUsedZip64 if it has been set because 803 // of this entry 804 if (entry.causedUseOfZip64) { 805 hasUsedZip64 = false; 806 } 807 } 808 } 809 channel.position(save); 810 } 811 812 /** 813 * {@inheritDoc} 814 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 815 * @throws Zip64RequiredException if the entry's uncompressed or 816 * compressed size is known to exceed 4 GByte and {@link #setUseZip64} 817 * is {@link Zip64Mode#Never}. 818 */ 819 @Override 820 public void putArchiveEntry(final ArchiveEntry archiveEntry) throws IOException { 821 putArchiveEntry(archiveEntry, false); 822 } 823 824 /** 825 * Writes the headers for an archive entry to the output stream. 826 * The caller must then write the content to the stream and call 827 * {@link #closeArchiveEntry()} to complete the process. 828 829 * @param archiveEntry The archiveEntry 830 * @param phased If true size, compressedSize and crc required to be known up-front in the archiveEntry 831 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 832 * @throws Zip64RequiredException if the entry's uncompressed or 833 * compressed size is known to exceed 4 GByte and {@link #setUseZip64} 834 * is {@link Zip64Mode#Never}. 835 */ 836 private void putArchiveEntry(final ArchiveEntry archiveEntry, final boolean phased) throws IOException { 837 if (finished) { 838 throw new IOException("Stream has already been finished"); 839 } 840 841 if (entry != null) { 842 closeArchiveEntry(); 843 } 844 845 entry = new CurrentEntry((ZipArchiveEntry) archiveEntry); 846 entries.add(entry.entry); 847 848 setDefaults(entry.entry); 849 850 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 851 validateSizeInformation(effectiveMode); 852 853 if (shouldAddZip64Extra(entry.entry, effectiveMode)) { 854 855 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(entry.entry); 856 857 final ZipEightByteInteger size; 858 final ZipEightByteInteger compressedSize; 859 if (phased) { 860 // sizes are already known 861 size = new ZipEightByteInteger(entry.entry.getSize()); 862 compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize()); 863 } else if (entry.entry.getMethod() == STORED 864 && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 865 // actually, we already know the sizes 866 compressedSize = size = new ZipEightByteInteger(entry.entry.getSize()); 867 } else { 868 // just a placeholder, real data will be in data 869 // descriptor or inserted later via SeekableByteChannel 870 compressedSize = size = ZipEightByteInteger.ZERO; 871 } 872 z64.setSize(size); 873 z64.setCompressedSize(compressedSize); 874 entry.entry.setExtra(); 875 } 876 877 if (entry.entry.getMethod() == DEFLATED && hasCompressionLevelChanged) { 878 def.setLevel(level); 879 hasCompressionLevelChanged = false; 880 } 881 writeLocalFileHeader((ZipArchiveEntry) archiveEntry, phased); 882 } 883 884 /** 885 * Provides default values for compression method and last 886 * modification time. 887 */ 888 private void setDefaults(final ZipArchiveEntry entry) { 889 if (entry.getMethod() == -1) { // not specified 890 entry.setMethod(method); 891 } 892 893 if (entry.getTime() == -1) { // not specified 894 entry.setTime(System.currentTimeMillis()); 895 } 896 } 897 898 /** 899 * Throws an exception if the size is unknown for a stored entry 900 * that is written to a non-seekable output or the entry is too 901 * big to be written without Zip64 extra but the mode has been set 902 * to Never. 903 */ 904 private void validateSizeInformation(final Zip64Mode effectiveMode) 905 throws ZipException { 906 // Size/CRC not required if SeekableByteChannel is used 907 if (entry.entry.getMethod() == STORED && channel == null) { 908 if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) { 909 throw new ZipException("Uncompressed size is required for" 910 + " STORED method when not writing to a" 911 + " file"); 912 } 913 if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) { 914 throw new ZipException("CRC checksum is required for STORED" 915 + " method when not writing to a file"); 916 } 917 entry.entry.setCompressedSize(entry.entry.getSize()); 918 } 919 920 if ((entry.entry.getSize() >= ZIP64_MAGIC 921 || entry.entry.getCompressedSize() >= ZIP64_MAGIC) 922 && effectiveMode == Zip64Mode.Never) { 923 throw new Zip64RequiredException(Zip64RequiredException 924 .getEntryTooBigMessage(entry.entry)); 925 } 926 } 927 928 /** 929 * Whether to add a Zip64 extended information extra field to the 930 * local file header. 931 * 932 * <p>Returns true if</p> 933 * 934 * <ul> 935 * <li>mode is Always</li> 936 * <li>or we already know it is going to be needed</li> 937 * <li>or the size is unknown and we can ensure it won't hurt 938 * other implementations if we add it (i.e. we can erase its 939 * usage</li> 940 * </ul> 941 */ 942 private boolean shouldAddZip64Extra(final ZipArchiveEntry entry, final Zip64Mode mode) { 943 return mode == Zip64Mode.Always 944 || mode == Zip64Mode.AlwaysWithCompatibility 945 || entry.getSize() >= ZIP64_MAGIC 946 || entry.getCompressedSize() >= ZIP64_MAGIC 947 || (entry.getSize() == ArchiveEntry.SIZE_UNKNOWN 948 && channel != null && mode != Zip64Mode.Never); 949 } 950 951 /** 952 * Set the file comment. 953 * @param comment the comment 954 */ 955 public void setComment(final String comment) { 956 this.comment = comment; 957 } 958 959 /** 960 * Sets the compression level for subsequent entries. 961 * 962 * <p>Default is Deflater.DEFAULT_COMPRESSION.</p> 963 * @param level the compression level. 964 * @throws IllegalArgumentException if an invalid compression 965 * level is specified. 966 */ 967 public void setLevel(final int level) { 968 if (level < Deflater.DEFAULT_COMPRESSION 969 || level > Deflater.BEST_COMPRESSION) { 970 throw new IllegalArgumentException("Invalid compression level: " 971 + level); 972 } 973 if (this.level == level) { 974 return; 975 } 976 hasCompressionLevelChanged = true; 977 this.level = level; 978 } 979 980 /** 981 * Sets the default compression method for subsequent entries. 982 * 983 * <p>Default is DEFLATED.</p> 984 * @param method an <code>int</code> from java.util.zip.ZipEntry 985 */ 986 public void setMethod(final int method) { 987 this.method = method; 988 } 989 990 /** 991 * Whether this stream is able to write the given entry. 992 * 993 * <p>May return false if it is set up to use encryption or a 994 * compression method that hasn't been implemented yet.</p> 995 * @since 1.1 996 */ 997 @Override 998 public boolean canWriteEntryData(final ArchiveEntry ae) { 999 if (ae instanceof ZipArchiveEntry) { 1000 final ZipArchiveEntry zae = (ZipArchiveEntry) ae; 1001 return zae.getMethod() != ZipMethod.IMPLODING.getCode() 1002 && zae.getMethod() != ZipMethod.UNSHRINKING.getCode() 1003 && ZipUtil.canHandleEntryData(zae); 1004 } 1005 return false; 1006 } 1007 1008 /** 1009 * Write preamble data. For most of time, this is used to 1010 * make self-extracting zips. 1011 * 1012 * @param preamble data to write 1013 * @throws IOException if an entry already exists 1014 * @since 1.21 1015 */ 1016 public void writePreamble(final byte[] preamble) throws IOException { 1017 writePreamble(preamble, 0, preamble.length); 1018 } 1019 1020 /** 1021 * Write preamble data. For most of time, this is used to 1022 * make self-extracting zips. 1023 * 1024 * @param preamble data to write 1025 * @param offset the start offset in the data 1026 * @param length the number of bytes to write 1027 * @throws IOException if an entry already exists 1028 * @since 1.21 1029 */ 1030 public void writePreamble(final byte[] preamble, final int offset, final int length) throws IOException { 1031 if (entry != null) { 1032 throw new IllegalStateException("Preamble must be written before creating an entry"); 1033 } 1034 this.streamCompressor.writeCounted(preamble, offset, length); 1035 } 1036 1037 /** 1038 * Writes bytes to ZIP entry. 1039 * @param b the byte array to write 1040 * @param offset the start position to write from 1041 * @param length the number of bytes to write 1042 * @throws IOException on error 1043 */ 1044 @Override 1045 public void write(final byte[] b, final int offset, final int length) throws IOException { 1046 if (entry == null) { 1047 throw new IllegalStateException("No current entry"); 1048 } 1049 ZipUtil.checkRequestedFeatures(entry.entry); 1050 final long writtenThisTime = streamCompressor.write(b, offset, length, entry.entry.getMethod()); 1051 count(writtenThisTime); 1052 } 1053 1054 /** 1055 * Write bytes to output or random access file. 1056 * @param data the byte array to write 1057 * @throws IOException on error 1058 */ 1059 private void writeCounted(final byte[] data) throws IOException { 1060 streamCompressor.writeCounted(data); 1061 } 1062 1063 private void copyFromZipInputStream(final InputStream src) throws IOException { 1064 if (entry == null) { 1065 throw new IllegalStateException("No current entry"); 1066 } 1067 ZipUtil.checkRequestedFeatures(entry.entry); 1068 entry.hasWritten = true; 1069 int length; 1070 while ((length = src.read(copyBuffer)) >= 0 ) 1071 { 1072 streamCompressor.writeCounted(copyBuffer, 0, length); 1073 count( length ); 1074 } 1075 } 1076 1077 /** 1078 * Closes this output stream and releases any system resources 1079 * associated with the stream. 1080 * 1081 * @throws IOException if an I/O error occurs. 1082 * @throws Zip64RequiredException if the archive's size exceeds 4 1083 * GByte or there are more than 65535 entries inside the archive 1084 * and {@link #setUseZip64} is {@link Zip64Mode#Never}. 1085 */ 1086 @Override 1087 public void close() throws IOException { 1088 try { 1089 if (!finished) { 1090 finish(); 1091 } 1092 } finally { 1093 destroy(); 1094 } 1095 } 1096 1097 /** 1098 * Flushes this output stream and forces any buffered output bytes 1099 * to be written out to the stream. 1100 * 1101 * @throws IOException if an I/O error occurs. 1102 */ 1103 @Override 1104 public void flush() throws IOException { 1105 if (out != null) { 1106 out.flush(); 1107 } 1108 } 1109 1110 /* 1111 * Various ZIP constants shared between this class, ZipArchiveInputStream and ZipFile 1112 */ 1113 /** 1114 * local file header signature 1115 */ 1116 static final byte[] LFH_SIG = ZipLong.LFH_SIG.getBytes(); //NOSONAR 1117 /** 1118 * data descriptor signature 1119 */ 1120 static final byte[] DD_SIG = ZipLong.DD_SIG.getBytes(); //NOSONAR 1121 /** 1122 * central file header signature 1123 */ 1124 static final byte[] CFH_SIG = ZipLong.CFH_SIG.getBytes(); //NOSONAR 1125 /** 1126 * end of central dir signature 1127 */ 1128 static final byte[] EOCD_SIG = ZipLong.getBytes(0X06054B50L); //NOSONAR 1129 /** 1130 * ZIP64 end of central dir signature 1131 */ 1132 static final byte[] ZIP64_EOCD_SIG = ZipLong.getBytes(0X06064B50L); //NOSONAR 1133 /** 1134 * ZIP64 end of central dir locator signature 1135 */ 1136 static final byte[] ZIP64_EOCD_LOC_SIG = ZipLong.getBytes(0X07064B50L); //NOSONAR 1137 1138 /** 1139 * Writes next block of compressed data to the output stream. 1140 * @throws IOException on error 1141 */ 1142 protected final void deflate() throws IOException { 1143 streamCompressor.deflate(); 1144 } 1145 1146 /** 1147 * Writes the local file header entry 1148 * @param ze the entry to write 1149 * @throws IOException on error 1150 */ 1151 protected void writeLocalFileHeader(final ZipArchiveEntry ze) throws IOException { 1152 writeLocalFileHeader(ze, false); 1153 } 1154 1155 private void writeLocalFileHeader(final ZipArchiveEntry ze, final boolean phased) throws IOException { 1156 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1157 final ByteBuffer name = getName(ze); 1158 1159 if (createUnicodeExtraFields != UnicodeExtraFieldPolicy.NEVER) { 1160 addUnicodeExtraFields(ze, encodable, name); 1161 } 1162 1163 long localHeaderStart = streamCompressor.getTotalBytesWritten(); 1164 if (isSplitZip) { 1165 // when creating a split zip, the offset should be 1166 // the offset to the corresponding segment disk 1167 final ZipSplitOutputStream splitOutputStream = (ZipSplitOutputStream)this.out; 1168 ze.setDiskNumberStart(splitOutputStream.getCurrentSplitSegmentIndex()); 1169 localHeaderStart = splitOutputStream.getCurrentSplitSegmentBytesWritten(); 1170 } 1171 1172 final byte[] localHeader = createLocalFileHeader(ze, name, encodable, phased, localHeaderStart); 1173 metaData.put(ze, new EntryMetaData(localHeaderStart, usesDataDescriptor(ze.getMethod(), phased))); 1174 entry.localDataStart = localHeaderStart + LFH_CRC_OFFSET; // At crc offset 1175 writeCounted(localHeader); 1176 entry.dataStart = streamCompressor.getTotalBytesWritten(); 1177 } 1178 1179 1180 private byte[] createLocalFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, final boolean encodable, 1181 final boolean phased, final long archiveOffset) { 1182 final ZipExtraField oldEx = ze.getExtraField(ResourceAlignmentExtraField.ID); 1183 if (oldEx != null) { 1184 ze.removeExtraField(ResourceAlignmentExtraField.ID); 1185 } 1186 final ResourceAlignmentExtraField oldAlignmentEx = 1187 oldEx instanceof ResourceAlignmentExtraField ? (ResourceAlignmentExtraField) oldEx : null; 1188 1189 int alignment = ze.getAlignment(); 1190 if (alignment <= 0 && oldAlignmentEx != null) { 1191 alignment = oldAlignmentEx.getAlignment(); 1192 } 1193 1194 if (alignment > 1 || (oldAlignmentEx != null && !oldAlignmentEx.allowMethodChange())) { 1195 final int oldLength = LFH_FILENAME_OFFSET + 1196 name.limit() - name.position() + 1197 ze.getLocalFileDataExtra().length; 1198 1199 final int padding = (int) ((-archiveOffset - oldLength - ZipExtraField.EXTRAFIELD_HEADER_SIZE 1200 - ResourceAlignmentExtraField.BASE_SIZE) & 1201 (alignment - 1)); 1202 ze.addExtraField(new ResourceAlignmentExtraField(alignment, 1203 oldAlignmentEx != null && oldAlignmentEx.allowMethodChange(), padding)); 1204 } 1205 1206 final byte[] extra = ze.getLocalFileDataExtra(); 1207 final int nameLen = name.limit() - name.position(); 1208 final int len = LFH_FILENAME_OFFSET + nameLen + extra.length; 1209 final byte[] buf = new byte[len]; 1210 1211 System.arraycopy(LFH_SIG, 0, buf, LFH_SIG_OFFSET, WORD); 1212 1213 //store method in local variable to prevent multiple method calls 1214 final int zipMethod = ze.getMethod(); 1215 final boolean dataDescriptor = usesDataDescriptor(zipMethod, phased); 1216 1217 putShort(versionNeededToExtract(zipMethod, hasZip64Extra(ze), dataDescriptor), buf, LFH_VERSION_NEEDED_OFFSET); 1218 1219 final GeneralPurposeBit generalPurposeBit = getGeneralPurposeBits(!encodable && fallbackToUTF8, dataDescriptor); 1220 generalPurposeBit.encode(buf, LFH_GPB_OFFSET); 1221 1222 // compression method 1223 putShort(zipMethod, buf, LFH_METHOD_OFFSET); 1224 1225 ZipUtil.toDosTime(calendarInstance, ze.getTime(), buf, LFH_TIME_OFFSET); 1226 1227 // CRC 1228 if (phased || !(zipMethod == DEFLATED || channel != null)){ 1229 putLong(ze.getCrc(), buf, LFH_CRC_OFFSET); 1230 } else { 1231 System.arraycopy(LZERO, 0, buf, LFH_CRC_OFFSET, WORD); 1232 } 1233 1234 // compressed length 1235 // uncompressed length 1236 if (hasZip64Extra(entry.entry)){ 1237 // point to ZIP64 extended information extra field for 1238 // sizes, may get rewritten once sizes are known if 1239 // stream is seekable 1240 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_COMPRESSED_SIZE_OFFSET); 1241 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET); 1242 } else if (phased) { 1243 putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 1244 putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 1245 } else if (zipMethod == DEFLATED || channel != null) { 1246 System.arraycopy(LZERO, 0, buf, LFH_COMPRESSED_SIZE_OFFSET, WORD); 1247 System.arraycopy(LZERO, 0, buf, LFH_ORIGINAL_SIZE_OFFSET, WORD); 1248 } else { // Stored 1249 putLong(ze.getSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 1250 putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 1251 } 1252 // file name length 1253 putShort(nameLen, buf, LFH_FILENAME_LENGTH_OFFSET); 1254 1255 // extra field length 1256 putShort(extra.length, buf, LFH_EXTRA_LENGTH_OFFSET); 1257 1258 // file name 1259 System.arraycopy( name.array(), name.arrayOffset(), buf, LFH_FILENAME_OFFSET, nameLen); 1260 1261 // extra fields 1262 System.arraycopy(extra, 0, buf, LFH_FILENAME_OFFSET + nameLen, extra.length); 1263 1264 return buf; 1265 } 1266 1267 1268 /** 1269 * Adds UnicodeExtra fields for name and file comment if mode is 1270 * ALWAYS or the data cannot be encoded using the configured 1271 * encoding. 1272 */ 1273 private void addUnicodeExtraFields(final ZipArchiveEntry ze, final boolean encodable, 1274 final ByteBuffer name) 1275 throws IOException { 1276 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS 1277 || !encodable) { 1278 ze.addExtraField(new UnicodePathExtraField(ze.getName(), 1279 name.array(), 1280 name.arrayOffset(), 1281 name.limit() 1282 - name.position())); 1283 } 1284 1285 final String comm = ze.getComment(); 1286 if (comm != null && !"".equals(comm)) { 1287 1288 final boolean commentEncodable = zipEncoding.canEncode(comm); 1289 1290 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS 1291 || !commentEncodable) { 1292 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 1293 ze.addExtraField(new UnicodeCommentExtraField(comm, 1294 commentB.array(), 1295 commentB.arrayOffset(), 1296 commentB.limit() 1297 - commentB.position()) 1298 ); 1299 } 1300 } 1301 } 1302 1303 /** 1304 * Writes the data descriptor entry. 1305 * @param ze the entry to write 1306 * @throws IOException on error 1307 */ 1308 protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException { 1309 if (!usesDataDescriptor(ze.getMethod(), false)) { 1310 return; 1311 } 1312 writeCounted(DD_SIG); 1313 writeCounted(ZipLong.getBytes(ze.getCrc())); 1314 if (!hasZip64Extra(ze)) { 1315 writeCounted(ZipLong.getBytes(ze.getCompressedSize())); 1316 writeCounted(ZipLong.getBytes(ze.getSize())); 1317 } else { 1318 writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize())); 1319 writeCounted(ZipEightByteInteger.getBytes(ze.getSize())); 1320 } 1321 } 1322 1323 /** 1324 * Writes the central file header entry. 1325 * @param ze the entry to write 1326 * @throws IOException on error 1327 * @throws Zip64RequiredException if the archive's size exceeds 4 1328 * GByte and {@link #setUseZip64(Zip64Mode)} is {@link 1329 * Zip64Mode#Never}. 1330 */ 1331 protected void writeCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 1332 final byte[] centralFileHeader = createCentralFileHeader(ze); 1333 writeCounted(centralFileHeader); 1334 } 1335 1336 private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 1337 1338 final EntryMetaData entryMetaData = metaData.get(ze); 1339 final boolean needsZip64Extra = hasZip64Extra(ze) 1340 || ze.getCompressedSize() >= ZIP64_MAGIC 1341 || ze.getSize() >= ZIP64_MAGIC 1342 || entryMetaData.offset >= ZIP64_MAGIC 1343 || ze.getDiskNumberStart() >= ZIP64_MAGIC_SHORT 1344 || zip64Mode == Zip64Mode.Always 1345 || zip64Mode == Zip64Mode.AlwaysWithCompatibility; 1346 1347 if (needsZip64Extra && zip64Mode == Zip64Mode.Never) { 1348 // must be the offset that is too big, otherwise an 1349 // exception would have been throw in putArchiveEntry or 1350 // closeArchiveEntry 1351 throw new Zip64RequiredException(Zip64RequiredException 1352 .ARCHIVE_TOO_BIG_MESSAGE); 1353 } 1354 1355 1356 handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra); 1357 1358 return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra); 1359 } 1360 1361 /** 1362 * Writes the central file header entry. 1363 * @param ze the entry to write 1364 * @param name The encoded name 1365 * @param entryMetaData meta data for this file 1366 * @throws IOException on error 1367 */ 1368 private byte[] createCentralFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, 1369 final EntryMetaData entryMetaData, 1370 final boolean needsZip64Extra) throws IOException { 1371 if(isSplitZip) { 1372 // calculate the disk number for every central file header, 1373 // this will be used in writing End Of Central Directory and Zip64 End Of Central Directory 1374 final int currentSplitSegment = ((ZipSplitOutputStream)this.out).getCurrentSplitSegmentIndex(); 1375 if(numberOfCDInDiskData.get(currentSplitSegment) == null) { 1376 numberOfCDInDiskData.put(currentSplitSegment, 1); 1377 } else { 1378 final int originalNumberOfCD = numberOfCDInDiskData.get(currentSplitSegment); 1379 numberOfCDInDiskData.put(currentSplitSegment, originalNumberOfCD + 1); 1380 } 1381 } 1382 1383 final byte[] extra = ze.getCentralDirectoryExtra(); 1384 final int extraLength = extra.length; 1385 1386 // file comment length 1387 String comm = ze.getComment(); 1388 if (comm == null) { 1389 comm = ""; 1390 } 1391 1392 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 1393 final int nameLen = name.limit() - name.position(); 1394 final int commentLen = commentB.limit() - commentB.position(); 1395 final int len= CFH_FILENAME_OFFSET + nameLen + extraLength + commentLen; 1396 final byte[] buf = new byte[len]; 1397 1398 System.arraycopy(CFH_SIG, 0, buf, CFH_SIG_OFFSET, WORD); 1399 1400 // version made by 1401 // CheckStyle:MagicNumber OFF 1402 putShort((ze.getPlatform() << 8) | (!hasUsedZip64 ? DATA_DESCRIPTOR_MIN_VERSION : ZIP64_MIN_VERSION), 1403 buf, CFH_VERSION_MADE_BY_OFFSET); 1404 1405 final int zipMethod = ze.getMethod(); 1406 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1407 putShort(versionNeededToExtract(zipMethod, needsZip64Extra, entryMetaData.usesDataDescriptor), 1408 buf, CFH_VERSION_NEEDED_OFFSET); 1409 getGeneralPurposeBits(!encodable && fallbackToUTF8, entryMetaData.usesDataDescriptor).encode(buf, CFH_GPB_OFFSET); 1410 1411 // compression method 1412 putShort(zipMethod, buf, CFH_METHOD_OFFSET); 1413 1414 1415 // last mod. time and date 1416 ZipUtil.toDosTime(calendarInstance, ze.getTime(), buf, CFH_TIME_OFFSET); 1417 1418 // CRC 1419 // compressed length 1420 // uncompressed length 1421 putLong(ze.getCrc(), buf, CFH_CRC_OFFSET); 1422 if (ze.getCompressedSize() >= ZIP64_MAGIC 1423 || ze.getSize() >= ZIP64_MAGIC 1424 || zip64Mode == Zip64Mode.Always 1425 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 1426 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_COMPRESSED_SIZE_OFFSET); 1427 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET); 1428 } else { 1429 putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET); 1430 putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET); 1431 } 1432 1433 putShort(nameLen, buf, CFH_FILENAME_LENGTH_OFFSET); 1434 1435 // extra field length 1436 putShort(extraLength, buf, CFH_EXTRA_LENGTH_OFFSET); 1437 1438 putShort(commentLen, buf, CFH_COMMENT_LENGTH_OFFSET); 1439 1440 // disk number start 1441 if(isSplitZip) { 1442 if (ze.getDiskNumberStart() >= ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always) { 1443 putShort(ZIP64_MAGIC_SHORT, buf, CFH_DISK_NUMBER_OFFSET); 1444 } else { 1445 putShort((int) ze.getDiskNumberStart(), buf, CFH_DISK_NUMBER_OFFSET); 1446 } 1447 } else { 1448 System.arraycopy(ZERO, 0, buf, CFH_DISK_NUMBER_OFFSET, SHORT); 1449 } 1450 1451 // internal file attributes 1452 putShort(ze.getInternalAttributes(), buf, CFH_INTERNAL_ATTRIBUTES_OFFSET); 1453 1454 // external file attributes 1455 putLong(ze.getExternalAttributes(), buf, CFH_EXTERNAL_ATTRIBUTES_OFFSET); 1456 1457 // relative offset of LFH 1458 if (entryMetaData.offset >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) { 1459 putLong(ZIP64_MAGIC, buf, CFH_LFH_OFFSET); 1460 } else { 1461 putLong(Math.min(entryMetaData.offset, ZIP64_MAGIC), buf, CFH_LFH_OFFSET); 1462 } 1463 1464 // file name 1465 System.arraycopy(name.array(), name.arrayOffset(), buf, CFH_FILENAME_OFFSET, nameLen); 1466 1467 final int extraStart = CFH_FILENAME_OFFSET + nameLen; 1468 System.arraycopy(extra, 0, buf, extraStart, extraLength); 1469 1470 final int commentStart = extraStart + extraLength; 1471 1472 // file comment 1473 System.arraycopy(commentB.array(), commentB.arrayOffset(), buf, commentStart, commentLen); 1474 return buf; 1475 } 1476 1477 /** 1478 * If the entry needs Zip64 extra information inside the central 1479 * directory then configure its data. 1480 */ 1481 private void handleZip64Extra(final ZipArchiveEntry ze, final long lfhOffset, 1482 final boolean needsZip64Extra) { 1483 if (needsZip64Extra) { 1484 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze); 1485 if (ze.getCompressedSize() >= ZIP64_MAGIC 1486 || ze.getSize() >= ZIP64_MAGIC 1487 || zip64Mode == Zip64Mode.Always 1488 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 1489 z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize())); 1490 z64.setSize(new ZipEightByteInteger(ze.getSize())); 1491 } else { 1492 // reset value that may have been set for LFH 1493 z64.setCompressedSize(null); 1494 z64.setSize(null); 1495 } 1496 1497 final boolean needsToEncodeLfhOffset = 1498 lfhOffset >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always; 1499 final boolean needsToEncodeDiskNumberStart = 1500 ze.getDiskNumberStart() >= ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always; 1501 1502 if (needsToEncodeLfhOffset || needsToEncodeDiskNumberStart) { 1503 z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset)); 1504 } 1505 if (needsToEncodeDiskNumberStart) { 1506 z64.setDiskStartNumber(new ZipLong(ze.getDiskNumberStart())); 1507 } 1508 ze.setExtra(); 1509 } 1510 } 1511 1512 /** 1513 * Writes the "End of central dir record". 1514 * @throws IOException on error 1515 * @throws Zip64RequiredException if the archive's size exceeds 4 1516 * GByte or there are more than 65535 entries inside the archive 1517 * and {@link #setUseZip64(Zip64Mode)} is {@link Zip64Mode#Never}. 1518 */ 1519 protected void writeCentralDirectoryEnd() throws IOException { 1520 if(!hasUsedZip64 && isSplitZip) { 1521 ((ZipSplitOutputStream)this.out).prepareToWriteUnsplittableContent(eocdLength); 1522 } 1523 1524 validateIfZip64IsNeededInEOCD(); 1525 1526 writeCounted(EOCD_SIG); 1527 1528 // number of this disk 1529 int numberOfThisDisk = 0; 1530 if(isSplitZip) { 1531 numberOfThisDisk = ((ZipSplitOutputStream)this.out).getCurrentSplitSegmentIndex(); 1532 } 1533 writeCounted(ZipShort.getBytes(numberOfThisDisk)); 1534 1535 // disk number of the start of central directory 1536 writeCounted(ZipShort.getBytes((int)cdDiskNumberStart)); 1537 1538 // number of entries 1539 final int numberOfEntries = entries.size(); 1540 1541 // total number of entries in the central directory on this disk 1542 final int numOfEntriesOnThisDisk = isSplitZip 1543 ? (numberOfCDInDiskData.get(numberOfThisDisk) == null ? 0 : numberOfCDInDiskData.get(numberOfThisDisk)) 1544 : numberOfEntries; 1545 final byte[] numOfEntriesOnThisDiskData = ZipShort 1546 .getBytes(Math.min(numOfEntriesOnThisDisk, ZIP64_MAGIC_SHORT)); 1547 writeCounted(numOfEntriesOnThisDiskData); 1548 1549 // number of entries 1550 final byte[] num = ZipShort.getBytes(Math.min(numberOfEntries, 1551 ZIP64_MAGIC_SHORT)); 1552 writeCounted(num); 1553 1554 // length and location of CD 1555 writeCounted(ZipLong.getBytes(Math.min(cdLength, ZIP64_MAGIC))); 1556 writeCounted(ZipLong.getBytes(Math.min(cdOffset, ZIP64_MAGIC))); 1557 1558 // ZIP file comment 1559 final ByteBuffer data = this.zipEncoding.encode(comment); 1560 final int dataLen = data.limit() - data.position(); 1561 writeCounted(ZipShort.getBytes(dataLen)); 1562 streamCompressor.writeCounted(data.array(), data.arrayOffset(), dataLen); 1563 } 1564 1565 /** 1566 * If the Zip64 mode is set to never, then all the data in End Of Central Directory 1567 * should not exceed their limits. 1568 * @throws Zip64RequiredException if Zip64 is actually needed 1569 */ 1570 private void validateIfZip64IsNeededInEOCD() throws Zip64RequiredException { 1571 // exception will only be thrown if the Zip64 mode is never while Zip64 is actually needed 1572 if (zip64Mode != Zip64Mode.Never) { 1573 return; 1574 } 1575 1576 int numberOfThisDisk = 0; 1577 if (isSplitZip) { 1578 numberOfThisDisk = ((ZipSplitOutputStream)this.out).getCurrentSplitSegmentIndex(); 1579 } 1580 if (numberOfThisDisk >= ZIP64_MAGIC_SHORT) { 1581 throw new Zip64RequiredException(Zip64RequiredException 1582 .NUMBER_OF_THIS_DISK_TOO_BIG_MESSAGE); 1583 } 1584 1585 if (cdDiskNumberStart >= ZIP64_MAGIC_SHORT) { 1586 throw new Zip64RequiredException(Zip64RequiredException 1587 .NUMBER_OF_THE_DISK_OF_CENTRAL_DIRECTORY_TOO_BIG_MESSAGE); 1588 } 1589 1590 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.get(numberOfThisDisk) == null 1591 ? 0 : numberOfCDInDiskData.get(numberOfThisDisk); 1592 if (numOfEntriesOnThisDisk >= ZIP64_MAGIC_SHORT) { 1593 throw new Zip64RequiredException(Zip64RequiredException 1594 .TOO_MANY_ENTRIES_ON_THIS_DISK_MESSAGE); 1595 } 1596 1597 // number of entries 1598 if (entries.size() >= ZIP64_MAGIC_SHORT) { 1599 throw new Zip64RequiredException(Zip64RequiredException 1600 .TOO_MANY_ENTRIES_MESSAGE); 1601 } 1602 1603 if (cdLength >= ZIP64_MAGIC) { 1604 throw new Zip64RequiredException(Zip64RequiredException 1605 .SIZE_OF_CENTRAL_DIRECTORY_TOO_BIG_MESSAGE); 1606 } 1607 1608 if (cdOffset >= ZIP64_MAGIC) { 1609 throw new Zip64RequiredException(Zip64RequiredException 1610 .ARCHIVE_TOO_BIG_MESSAGE); 1611 } 1612 } 1613 1614 /** 1615 * Writes the "ZIP64 End of central dir record" and 1616 * "ZIP64 End of central dir locator". 1617 * @throws IOException on error 1618 * @since 1.3 1619 */ 1620 protected void writeZip64CentralDirectory() throws IOException { 1621 if (zip64Mode == Zip64Mode.Never) { 1622 return; 1623 } 1624 1625 if (!hasUsedZip64 && shouldUseZip64EOCD()) { 1626 // actually "will use" 1627 hasUsedZip64 = true; 1628 } 1629 1630 if (!hasUsedZip64) { 1631 return; 1632 } 1633 1634 long offset = streamCompressor.getTotalBytesWritten(); 1635 long diskNumberStart = 0L; 1636 if(isSplitZip) { 1637 // when creating a split zip, the offset of should be 1638 // the offset to the corresponding segment disk 1639 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream)this.out; 1640 offset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 1641 diskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 1642 } 1643 1644 1645 writeOut(ZIP64_EOCD_SIG); 1646 // size of zip64 end of central directory, we don't have any variable length 1647 // as we don't support the extensible data sector, yet 1648 writeOut(ZipEightByteInteger 1649 .getBytes(SHORT /* version made by */ 1650 + SHORT /* version needed to extract */ 1651 + WORD /* disk number */ 1652 + WORD /* disk with central directory */ 1653 + DWORD /* number of entries in CD on this disk */ 1654 + DWORD /* total number of entries */ 1655 + DWORD /* size of CD */ 1656 + (long) DWORD /* offset of CD */ 1657 )); 1658 1659 // version made by and version needed to extract 1660 writeOut(ZipShort.getBytes(ZIP64_MIN_VERSION)); 1661 writeOut(ZipShort.getBytes(ZIP64_MIN_VERSION)); 1662 1663 // number of this disk 1664 int numberOfThisDisk = 0; 1665 if (isSplitZip) { 1666 numberOfThisDisk = ((ZipSplitOutputStream)this.out).getCurrentSplitSegmentIndex(); 1667 } 1668 writeOut(ZipLong.getBytes(numberOfThisDisk)); 1669 1670 // disk number of the start of central directory 1671 writeOut(ZipLong.getBytes(cdDiskNumberStart)); 1672 1673 // total number of entries in the central directory on this disk 1674 final int numOfEntriesOnThisDisk = isSplitZip 1675 ? (numberOfCDInDiskData.get(numberOfThisDisk) == null ? 0 : numberOfCDInDiskData.get(numberOfThisDisk)) 1676 : entries.size(); 1677 final byte[] numOfEntriesOnThisDiskData = ZipEightByteInteger.getBytes(numOfEntriesOnThisDisk); 1678 writeOut(numOfEntriesOnThisDiskData); 1679 1680 // number of entries 1681 final byte[] num = ZipEightByteInteger.getBytes(entries.size()); 1682 writeOut(num); 1683 1684 // length and location of CD 1685 writeOut(ZipEightByteInteger.getBytes(cdLength)); 1686 writeOut(ZipEightByteInteger.getBytes(cdOffset)); 1687 1688 // no "zip64 extensible data sector" for now 1689 1690 if(isSplitZip) { 1691 // based on the zip specification, the End Of Central Directory record and 1692 // the Zip64 End Of Central Directory locator record must be on the same segment 1693 final int zip64EOCDLOCLength = WORD /* length of ZIP64_EOCD_LOC_SIG */ 1694 + WORD /* disk number of ZIP64_EOCD_SIG */ 1695 + DWORD /* offset of ZIP64_EOCD_SIG */ 1696 + WORD /* total number of disks */; 1697 1698 final long unsplittableContentSize = zip64EOCDLOCLength + eocdLength; 1699 ((ZipSplitOutputStream)this.out).prepareToWriteUnsplittableContent(unsplittableContentSize); 1700 } 1701 1702 // and now the "ZIP64 end of central directory locator" 1703 writeOut(ZIP64_EOCD_LOC_SIG); 1704 1705 // disk number holding the ZIP64 EOCD record 1706 writeOut(ZipLong.getBytes(diskNumberStart)); 1707 // relative offset of ZIP64 EOCD record 1708 writeOut(ZipEightByteInteger.getBytes(offset)); 1709 // total number of disks 1710 if(isSplitZip) { 1711 // the Zip64 End Of Central Directory Locator and the End Of Central Directory must be 1712 // in the same split disk, it means they must be located in the last disk 1713 final int totalNumberOfDisks = ((ZipSplitOutputStream)this.out).getCurrentSplitSegmentIndex() + 1; 1714 writeOut(ZipLong.getBytes(totalNumberOfDisks)); 1715 } else { 1716 writeOut(ONE); 1717 } 1718 } 1719 1720 /** 1721 * 4.4.1.4 If one of the fields in the end of central directory 1722 * record is too small to hold required data, the field SHOULD be 1723 * set to -1 (0xFFFF or 0xFFFFFFFF) and the ZIP64 format record 1724 * SHOULD be created. 1725 * @return true if zip64 End Of Central Directory is needed 1726 */ 1727 private boolean shouldUseZip64EOCD() { 1728 int numberOfThisDisk = 0; 1729 if(isSplitZip) { 1730 numberOfThisDisk = ((ZipSplitOutputStream)this.out).getCurrentSplitSegmentIndex(); 1731 } 1732 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.get(numberOfThisDisk) == null ? 0 : numberOfCDInDiskData.get(numberOfThisDisk); 1733 return numberOfThisDisk >= ZIP64_MAGIC_SHORT /* number of this disk */ 1734 || cdDiskNumberStart >= ZIP64_MAGIC_SHORT /* number of the disk with the start of the central directory */ 1735 || numOfEntriesOnThisDisk >= ZIP64_MAGIC_SHORT /* total number of entries in the central directory on this disk */ 1736 || entries.size() >= ZIP64_MAGIC_SHORT /* total number of entries in the central directory */ 1737 || cdLength >= ZIP64_MAGIC /* size of the central directory */ 1738 || cdOffset >= ZIP64_MAGIC; /* offset of start of central directory with respect to 1739 the starting disk number */ 1740 } 1741 1742 /** 1743 * Write bytes to output or random access file. 1744 * @param data the byte array to write 1745 * @throws IOException on error 1746 */ 1747 protected final void writeOut(final byte[] data) throws IOException { 1748 streamCompressor.writeOut(data, 0, data.length); 1749 } 1750 1751 1752 /** 1753 * Write bytes to output or random access file. 1754 * @param data the byte array to write 1755 * @param offset the start position to write from 1756 * @param length the number of bytes to write 1757 * @throws IOException on error 1758 */ 1759 protected final void writeOut(final byte[] data, final int offset, final int length) 1760 throws IOException { 1761 streamCompressor.writeOut(data, offset, length); 1762 } 1763 1764 1765 private GeneralPurposeBit getGeneralPurposeBits(final boolean utfFallback, final boolean usesDataDescriptor) { 1766 final GeneralPurposeBit b = new GeneralPurposeBit(); 1767 b.useUTF8ForNames(useUTF8Flag || utfFallback); 1768 if (usesDataDescriptor) { 1769 b.useDataDescriptor(true); 1770 } 1771 return b; 1772 } 1773 1774 private int versionNeededToExtract(final int zipMethod, final boolean zip64, final boolean usedDataDescriptor) { 1775 if (zip64) { 1776 return ZIP64_MIN_VERSION; 1777 } 1778 if (usedDataDescriptor) { 1779 return DATA_DESCRIPTOR_MIN_VERSION; 1780 } 1781 return versionNeededToExtractMethod(zipMethod); 1782 } 1783 1784 private boolean usesDataDescriptor(final int zipMethod, final boolean phased) { 1785 return !phased && zipMethod == DEFLATED && channel == null; 1786 } 1787 1788 private int versionNeededToExtractMethod(final int zipMethod) { 1789 return zipMethod == DEFLATED ? DEFLATE_MIN_VERSION : INITIAL_VERSION; 1790 } 1791 1792 /** 1793 * Creates a new zip entry taking some information from the given 1794 * file and using the provided name. 1795 * 1796 * <p>The name will be adjusted to end with a forward slash "/" if 1797 * the file is a directory. If the file is not a directory a 1798 * potential trailing forward slash will be stripped from the 1799 * entry name.</p> 1800 * 1801 * <p>Must not be used if the stream has already been closed.</p> 1802 */ 1803 @Override 1804 public ArchiveEntry createArchiveEntry(final File inputFile, final String entryName) 1805 throws IOException { 1806 if (finished) { 1807 throw new IOException("Stream has already been finished"); 1808 } 1809 return new ZipArchiveEntry(inputFile, entryName); 1810 } 1811 1812 /** 1813 * Creates a new zip entry taking some information from the given 1814 * file and using the provided name. 1815 * 1816 * <p>The name will be adjusted to end with a forward slash "/" if 1817 * the file is a directory. If the file is not a directory a 1818 * potential trailing forward slash will be stripped from the 1819 * entry name.</p> 1820 * 1821 * <p>Must not be used if the stream has already been closed.</p> 1822 * @param inputPath path to create the entry from. 1823 * @param entryName name of the entry. 1824 * @param options options indicating how symbolic links are handled. 1825 * @return a new instance. 1826 * @throws IOException if an I/O error occurs. 1827 * @since 1.21 1828 */ 1829 @Override 1830 public ArchiveEntry createArchiveEntry(final Path inputPath, final String entryName, final LinkOption... options) 1831 throws IOException { 1832 if (finished) { 1833 throw new IOException("Stream has already been finished"); 1834 } 1835 return new ZipArchiveEntry(inputPath, entryName); 1836 } 1837 1838 /** 1839 * Get the existing ZIP64 extended information extra field or 1840 * create a new one and add it to the entry. 1841 * 1842 * @since 1.3 1843 */ 1844 private Zip64ExtendedInformationExtraField 1845 getZip64Extra(final ZipArchiveEntry ze) { 1846 if (entry != null) { 1847 entry.causedUseOfZip64 = !hasUsedZip64; 1848 } 1849 hasUsedZip64 = true; 1850 final ZipExtraField extra = ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 1851 Zip64ExtendedInformationExtraField z64 = extra instanceof Zip64ExtendedInformationExtraField 1852 ? (Zip64ExtendedInformationExtraField) extra : null; 1853 if (z64 == null) { 1854 /* 1855 System.err.println("Adding z64 for " + ze.getName() 1856 + ", method: " + ze.getMethod() 1857 + " (" + (ze.getMethod() == STORED) + ")" 1858 + ", channel: " + (channel != null)); 1859 */ 1860 z64 = new Zip64ExtendedInformationExtraField(); 1861 } 1862 1863 // even if the field is there already, make sure it is the first one 1864 ze.addAsFirstExtraField(z64); 1865 1866 return z64; 1867 } 1868 1869 /** 1870 * Is there a ZIP64 extended information extra field for the 1871 * entry? 1872 * 1873 * @since 1.3 1874 */ 1875 private boolean hasZip64Extra(final ZipArchiveEntry ze) { 1876 return ze.getExtraField(Zip64ExtendedInformationExtraField 1877 .HEADER_ID) 1878 instanceof Zip64ExtendedInformationExtraField; 1879 } 1880 1881 /** 1882 * If the mode is AsNeeded and the entry is a compressed entry of 1883 * unknown size that gets written to a non-seekable stream then 1884 * change the default to Never. 1885 * 1886 * @since 1.3 1887 */ 1888 private Zip64Mode getEffectiveZip64Mode(final ZipArchiveEntry ze) { 1889 if (zip64Mode != Zip64Mode.AsNeeded 1890 || channel != null 1891 || ze.getMethod() != DEFLATED 1892 || ze.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1893 return zip64Mode; 1894 } 1895 return Zip64Mode.Never; 1896 } 1897 1898 private ZipEncoding getEntryEncoding(final ZipArchiveEntry ze) { 1899 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1900 return !encodable && fallbackToUTF8 1901 ? ZipEncodingHelper.UTF8_ZIP_ENCODING : zipEncoding; 1902 } 1903 1904 private ByteBuffer getName(final ZipArchiveEntry ze) throws IOException { 1905 return getEntryEncoding(ze).encode(ze.getName()); 1906 } 1907 1908 /** 1909 * Closes the underlying stream/file without finishing the 1910 * archive, the result will likely be a corrupt archive. 1911 * 1912 * <p>This method only exists to support tests that generate 1913 * corrupt archives so they can clean up any temporary files.</p> 1914 */ 1915 void destroy() throws IOException { 1916 try { 1917 if (channel != null) { 1918 channel.close(); 1919 } 1920 } finally { 1921 if (out != null) { 1922 out.close(); 1923 } 1924 } 1925 } 1926 1927 /** 1928 * enum that represents the possible policies for creating Unicode 1929 * extra fields. 1930 */ 1931 public static final class UnicodeExtraFieldPolicy { 1932 /** 1933 * Always create Unicode extra fields. 1934 */ 1935 public static final UnicodeExtraFieldPolicy ALWAYS = new UnicodeExtraFieldPolicy("always"); 1936 /** 1937 * Never create Unicode extra fields. 1938 */ 1939 public static final UnicodeExtraFieldPolicy NEVER = new UnicodeExtraFieldPolicy("never"); 1940 /** 1941 * Create Unicode extra fields for file names that cannot be 1942 * encoded using the specified encoding. 1943 */ 1944 public static final UnicodeExtraFieldPolicy NOT_ENCODEABLE = 1945 new UnicodeExtraFieldPolicy("not encodeable"); 1946 1947 private final String name; 1948 private UnicodeExtraFieldPolicy(final String n) { 1949 name = n; 1950 } 1951 @Override 1952 public String toString() { 1953 return name; 1954 } 1955 } 1956 1957 /** 1958 * Structure collecting information for the entry that is 1959 * currently being written. 1960 */ 1961 private static final class CurrentEntry { 1962 private CurrentEntry(final ZipArchiveEntry entry) { 1963 this.entry = entry; 1964 } 1965 /** 1966 * Current ZIP entry. 1967 */ 1968 private final ZipArchiveEntry entry; 1969 /** 1970 * Offset for CRC entry in the local file header data for the 1971 * current entry starts here. 1972 */ 1973 private long localDataStart; 1974 /** 1975 * Data for local header data 1976 */ 1977 private long dataStart; 1978 /** 1979 * Number of bytes read for the current entry (can't rely on 1980 * Deflater#getBytesRead) when using DEFLATED. 1981 */ 1982 private long bytesRead; 1983 /** 1984 * Whether current entry was the first one using ZIP64 features. 1985 */ 1986 private boolean causedUseOfZip64; 1987 /** 1988 * Whether write() has been called at all. 1989 * 1990 * <p>In order to create a valid archive {@link 1991 * #closeArchiveEntry closeArchiveEntry} will write an empty 1992 * array to get the CRC right if nothing has been written to 1993 * the stream at all.</p> 1994 */ 1995 private boolean hasWritten; 1996 } 1997 1998 private static final class EntryMetaData { 1999 private final long offset; 2000 private final boolean usesDataDescriptor; 2001 private EntryMetaData(final long offset, final boolean usesDataDescriptor) { 2002 this.offset = offset; 2003 this.usesDataDescriptor = usesDataDescriptor; 2004 } 2005 } 2006}