1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package com.android.tools.build.apkzlib.zip;
18 
19 import com.android.tools.build.apkzlib.utils.CachedFileContents;
20 import com.android.tools.build.apkzlib.utils.IOExceptionFunction;
21 import com.android.tools.build.apkzlib.utils.IOExceptionRunnable;
22 import com.android.tools.build.apkzlib.zip.compress.Zip64NotSupportedException;
23 import com.android.tools.build.apkzlib.zip.utils.ByteTracker;
24 import com.android.tools.build.apkzlib.zip.utils.CloseableByteSource;
25 import com.android.tools.build.apkzlib.zip.utils.LittleEndianUtils;
26 import com.google.common.base.Preconditions;
27 import com.google.common.base.Verify;
28 import com.google.common.base.VerifyException;
29 import com.google.common.collect.ImmutableList;
30 import com.google.common.collect.Iterables;
31 import com.google.common.collect.Lists;
32 import com.google.common.collect.Maps;
33 import com.google.common.collect.Sets;
34 import com.google.common.hash.Hashing;
35 import com.google.common.io.ByteSource;
36 import com.google.common.io.Closer;
37 import com.google.common.io.Files;
38 import com.google.common.primitives.Ints;
39 import com.google.common.util.concurrent.FutureCallback;
40 import com.google.common.util.concurrent.Futures;
41 import com.google.common.util.concurrent.ListenableFuture;
42 import com.google.common.util.concurrent.MoreExecutors;
43 import com.google.common.util.concurrent.SettableFuture;
44 import java.io.ByteArrayInputStream;
45 import java.io.Closeable;
46 import java.io.EOFException;
47 import java.io.File;
48 import java.io.FileInputStream;
49 import java.io.IOException;
50 import java.io.InputStream;
51 import java.io.RandomAccessFile;
52 import java.nio.ByteBuffer;
53 import java.nio.channels.FileChannel;
54 import java.util.ArrayList;
55 import java.util.HashSet;
56 import java.util.List;
57 import java.util.Map;
58 import java.util.Set;
59 import java.util.SortedSet;
60 import java.util.TreeMap;
61 import java.util.TreeSet;
62 import java.util.concurrent.ExecutionException;
63 import java.util.concurrent.Future;
64 import java.util.function.Function;
65 import java.util.function.Predicate;
66 import java.util.function.Supplier;
67 import javax.annotation.Nonnull;
68 import javax.annotation.Nullable;
69 
70 /**
71  * The {@code ZFile} provides the main interface for interacting with zip files. A {@code ZFile}
72  * can be created on a new file or in an existing file. Once created, files can be added or removed
73  * from the zip file.
74  *
75  * <p>Changes in the zip file are always deferred. Any change requested is made in memory and
76  * written to disk only when {@link #update()} or {@link #close()} is invoked.
77  *
78  * <p>Zip files are open initially in read-only mode and will switch to read-write when needed. This
79  * is done automatically. Because modifications to the file are done in-memory, the zip file can
80  * be manipulated when closed. When invoking {@link #update()} or {@link #close()} the zip file
81  * will be reopen and changes will be written. However, the zip file cannot be modified outside
82  * the control of {@code ZFile}. So, if a {@code ZFile} is closed, modified outside and then a file
83  * is added or removed from the zip file, when reopening the zip file, {@link ZFile} will detect
84  * the outside modification and will fail.
85  *
86  * <p>In memory manipulation means that files added to the zip file are kept in memory until written
87  * to disk. This provides much faster operation and allows better zip file allocation (see below).
88  * It may, however, increase the memory footprint of the application. When adding large files, if
89  * memory consumption is a concern, a call to {@link #update()} will actually write the file to
90  * disk and discard the memory buffer. Information about allocation can be obtained from a
91  * {@link ByteTracker} that can be given to the file on creation.
92  *
93  * <p>{@code ZFile} keeps track of allocation inside of the zip file. If a file is deleted, its
94  * space is marked as freed and will be reused for an added file if it fits in the space.
95  * Allocation of files to empty areas is done using a <em>best fit</em> algorithm. When adding a
96  * file, if it doesn't fit in any free area, the zip file will be extended.
97  *
98  * <p>{@code ZFile} provides a fast way to merge data from another zip file
99  * (see {@link #mergeFrom(ZFile, Predicate)}) avoiding recompression and copying of equal files.
100  * When merging, patterns of files may be provided that are ignored. This allows handling special
101  * files in the merging process, such as files in {@code META-INF}.
102  *
103  * <p>When adding files to the zip file, unless files are explicitly required to be stored, files
104  * will be deflated. However, deflating will not occur if the deflated file is larger then the
105  * stored file, <em>e.g.</em> if compression would yield a bigger file. See {@link Compressor} for
106  * details on how compression works.
107  *
108  * <p>Because {@code ZFile} was designed to be used in a build system and not as general-purpose
109  * zip utility, it is very strict (and unforgiving) about the zip format and unsupported features.
110  *
111  * <p>{@code ZFile} supports <em>alignment</em>. Alignment means that file data (not entries -- the
112  * local header must be discounted) must start at offsets that are multiple of a number -- the
113  * alignment. Alignment is defined by an alignment rules ({@link AlignmentRule} in the
114  * {@link ZFileOptions} object used to create the {@link ZFile}.
115  *
116  * <p>When a file is added to the zip, the alignment rules will be checked and alignment will be
117  * honored when positioning the file in the zip. This means that unused spaces in the zip may
118  * be generated as a result. However, alignment of existing entries will not be changed.
119  *
120  * <p>Entries can be realigned individually (see {@link StoredEntry#realign()} or the full zip file
121  * may be realigned (see {@link #realign()}). When realigning the full zip entries that are already
122  * aligned will not be affected.
123  *
124  * <p>Because realignment may cause files to move in the zip, realignment is done in-memory meaning
125  * that files that need to change location will moved to memory and will only be flushed when
126  * either {@link #update()} or {@link #close()} are called.
127  *
128  * <p>Alignment only applies to filed that are forced to be uncompressed. This is because alignment
129  * is used to allow mapping files in the archive directly into memory and compressing defeats the
130  * purpose of alignment.
131  *
132  * <p>Manipulating zip files with {@link ZFile} may yield zip files with empty spaces between files.
133  * This happens in two situations: (1) if alignment is required, files may be shifted to conform to
134  * the request alignment leaving an empty space before the previous file, and (2) if a file is
135  * removed or replaced with a file that does not fit the space it was in. By default, {@link ZFile}
136  * does not do any special processing in these situations. Files are indexed by their offsets from
137  * the central directory and empty spaces can exist in the zip file.
138  *
139  * <p>However, it is possible to tell {@link ZFile} to use the extra field in the local header
140  * to do cover the empty spaces. This is done by setting
141  * {@link ZFileOptions#setCoverEmptySpaceUsingExtraField(boolean)} to {@code true}. This has the
142  * advantage of leaving no gaps between entries in the zip, as required by some tools like Oracle's
143  * {code jar} tool. However, setting this option will destroy the contents of the file's extra
144  * field.
145  *
146  * <p>Activating {@link ZFileOptions#setCoverEmptySpaceUsingExtraField(boolean)} may lead to
147  * <i>virtual files</i> being added to the zip file. Since extra field is limited to 64k, it is not
148  * possible to cover any space bigger than that using the extra field. In those cases, <i>virtual
149  * files</i> are added to the file. A virtual file is a file that exists in the actual zip data,
150  * but is not referenced from the central directory. A zip-compliant utility should ignore these
151  * files. However, zip utilities that expect the zip to be a stream, such as Oracle's jar, will
152  * find these files instead of considering the zip to be corrupt.
153  *
154  * <p>{@code ZFile} support sorting zip files. Sorting (done through the {@link #sortZipContents()}
155  * method) is a process by which all files are re-read into memory, if not already in memory,
156  * removed from the zip and re-added in alphabetical order, respecting alignment rules. So, in
157  * general, file {@code b} will come after file {@code a} unless file {@code a} is subject to
158  * alignment that forces an empty space before that can be occupied by {@code b}. Sorting can be
159  * used to minimize the changes between two zips.
160  *
161  * <p>Sorting in {@code ZFile} can be done manually or automatically. Manual sorting is done by
162  * invoking {@link #sortZipContents()}. Automatic sorting is done by setting the
163  * {@link ZFileOptions#getAutoSortFiles()} option when creating the {@code ZFile}. Automatic
164  * sorting invokes {@link #sortZipContents()} immediately when doing an {@link #update()} after
165  * all extensions have processed the {@link ZFileExtension#beforeUpdate()}. This has the guarantee
166  * that files added by extensions will be sorted, something that does not happen if the invocation
167  * is sequential, <i>i.e.</i>, {@link #sortZipContents()} called before {@link #update()}. The
168  * drawback of automatic sorting is that sorting will happen every time {@link #update()} is
169  * called and the file is dirty having a possible penalty in performance.
170  *
171  * <p>To allow whole-apk signing, the {@code ZFile} allows the central directory location to be
172  * offset by a fixed amount. This amount can be set using the {@link #setExtraDirectoryOffset(long)}
173  * method. Setting a non-zero value will add extra (unused) space in the zip file before the
174  * central directory. This value can be changed at any time and it will force the central directory
175  * rewritten when the file is updated or closed.
176  *
177  * <p>{@code ZFile} provides an extension mechanism to allow objects to register with the file
178  * and be notified when changes to the file happen. This should be used
179  * to add extra features to the zip file while providing strong decoupling. See
180  * {@link ZFileExtension}, {@link ZFile#addZFileExtension(ZFileExtension)} and
181  * {@link ZFile#removeZFileExtension(ZFileExtension)}.
182  *
183  * <p>This class is <strong>not</strong> thread-safe. Neither are any of the classes associated with
184  * it in this package, except when otherwise noticed.
185  */
186 public class ZFile implements Closeable {
187 
188     /**
189      * The file separator in paths in the zip file. This is fixed by the zip specification
190      * (section 4.4.17).
191      */
192     public static final char SEPARATOR = '/';
193 
194     /**
195      * Minimum size the EOCD can have.
196      */
197     private static final int MIN_EOCD_SIZE = 22;
198 
199     /**
200      * Number of bytes of the Zip64 EOCD locator record.
201      */
202     private static final int ZIP64_EOCD_LOCATOR_SIZE = 20;
203 
204     /**
205      * Maximum size for the EOCD.
206      */
207     private static final int MAX_EOCD_COMMENT_SIZE = 65535;
208 
209     /**
210      * How many bytes to look back from the end of the file to look for the EOCD signature.
211      */
212     private static final int LAST_BYTES_TO_READ = MIN_EOCD_SIZE + MAX_EOCD_COMMENT_SIZE;
213 
214     /**
215      * Signature of the Zip64 EOCD locator record.
216      */
217     private static final int ZIP64_EOCD_LOCATOR_SIGNATURE = 0x07064b50;
218 
219     /**
220      * Signature of the EOCD record.
221      */
222     private static final byte[] EOCD_SIGNATURE = new byte[] { 0x06, 0x05, 0x4b, 0x50 };
223 
224     /**
225      * Size of buffer for I/O operations.
226      */
227     private static final int IO_BUFFER_SIZE = 1024 * 1024;
228 
229     /**
230      * When extensions request re-runs, we do maximum number of cycles until we decide to stop and
231      * flag a infinite recursion problem.
232      */
233     private static final int MAXIMUM_EXTENSION_CYCLE_COUNT = 10;
234 
235     /**
236      * Minimum size for the extra field when we have to add one. We rely on the alignment segment
237      * to do that so the minimum size for the extra field is the minimum size of an alignment
238      * segment.
239      */
240     private static final int MINIMUM_EXTRA_FIELD_SIZE = ExtraField.AlignmentSegment.MINIMUM_SIZE;
241 
242     /**
243      * Maximum size of the extra field.
244      *
245      * <p>Theoretically, this is (1 << 16) - 1 = 65535 and not (1 < 15) -1 = 32767. However, due to
246      * http://b.android.com/221703, we need to keep this limited.
247      */
248     private static final int MAX_LOCAL_EXTRA_FIELD_CONTENTS_SIZE = (1 << 15) - 1;
249 
250     /**
251      * File zip file.
252      */
253     @Nonnull
254     private final File file;
255 
256     /**
257      * The random access file used to access the zip file. This will be {@code null} if and only
258      * if {@link #state} is {@link ZipFileState#CLOSED}.
259      */
260     @Nullable
261     private RandomAccessFile raf;
262 
263     /**
264      * The map containing the in-memory contents of the zip file. It keeps track of which parts of
265      * the zip file are used and which are not.
266      */
267     @Nonnull
268     private final FileUseMap map;
269 
270     /**
271      * The EOCD entry. Will be {@code null} if there is no EOCD (because the zip is new) or the
272      * one that exists on disk is no longer valid (because the zip has been changed).
273      *
274      * <p>If the EOCD is deleted because the zip has been changed and the old EOCD was no longer
275      * valid, then {@link #eocdComment} will contain the comment saved from the EOCD.
276      */
277     @Nullable
278     private FileUseMapEntry<Eocd> eocdEntry;
279 
280     /**
281      * The Central Directory entry. Will be {@code null} if there is no Central Directory (because
282      * the zip is new) or because the one that exists on disk is no longer valid (because the zip
283      * has been changed).
284      */
285     @Nullable
286     private FileUseMapEntry<CentralDirectory> directoryEntry;
287 
288     /**
289      * All entries in the zip file. It includes in-memory changes and may not reflect what is
290      * written on disk. Only entries that have been compressed are in this list.
291      */
292     @Nonnull
293     private final Map<String, FileUseMapEntry<StoredEntry>> entries;
294 
295     /**
296      * Entries added to the zip file, but that are not yet compressed. When compression is done,
297      * these entries are eventually moved to {@link #entries}. uncompressedEntries is a list
298      * because entries need to be kept in the order by which they were added. It allows adding
299      * multiple files with the same name and getting the right notifications on which files replaced
300      * which.
301      *
302      * <p>Files are placed in this list in {@link #add(StoredEntry)} method. This method will
303      * keep files here temporarily and move then to {@link #entries} when the data is
304      * available.
305      *
306      * <p>Moving files out of this list to {@link #entries} is done by
307      * {@link #processAllReadyEntries()}.
308      */
309     @Nonnull
310     private final List<StoredEntry> uncompressedEntries;
311 
312     /**
313      * Current state of the zip file.
314      */
315     @Nonnull
316     private ZipFileState state;
317 
318     /**
319      * Are the in-memory changes that have not been written to the zip file?
320      *
321      * <p>This might be false, but will become true after {@link #processAllReadyEntriesWithWait()}
322      * is called if there are {@link #uncompressedEntries} compressing in the background.
323      */
324     private boolean dirty;
325 
326     /**
327      * Non-{@code null} only if the file is currently closed. Used to detect if the zip is
328      * modified outside this object's control. If the file has never been written, this will
329      * be {@code null} even if it is closed.
330      */
331     @Nullable
332     private CachedFileContents<Object> closedControl;
333 
334     /**
335      * The alignment rule.
336      */
337     @Nonnull
338     private final AlignmentRule alignmentRule;
339 
340     /**
341      * Extensions registered with the file.
342      */
343     @Nonnull
344     private final List<ZFileExtension> extensions;
345 
346     /**
347      * When notifying extensions, extensions may request that some runnables are executed. This
348      * list collects all runnables by the order they were requested. Together with
349      * {@link #isNotifying}, it is used to avoid reordering notifications.
350      */
351     @Nonnull
352     private final List<IOExceptionRunnable> toRun;
353 
354     /**
355      * {@code true} when {@link #notify(com.android.tools.build.apkzlib.utils.IOExceptionFunction)}
356      * is notifying extensions. Used to avoid reordering notifications.
357      */
358     private boolean isNotifying;
359 
360     /**
361      * An extra offset for the central directory location. {@code 0} if the central directory
362      * should be written in its standard location.
363      */
364     private long extraDirectoryOffset;
365 
366     /**
367      * Should all timestamps be zeroed when reading / writing the zip?
368      */
369     private boolean noTimestamps;
370 
371     /**
372      * Compressor to use.
373      */
374     @Nonnull
375     private Compressor compressor;
376 
377     /**
378      * Byte tracker to use.
379      */
380     @Nonnull
381     private final ByteTracker tracker;
382 
383     /**
384      * Use the zip entry's "extra field" field to cover empty space in the zip file?
385      */
386     private boolean coverEmptySpaceUsingExtraField;
387 
388     /**
389      * Should files be automatically sorted when updating?
390      */
391     private boolean autoSortFiles;
392 
393     /**
394      * Verify log factory to use.
395      */
396     @Nonnull
397     private final Supplier<VerifyLog> verifyLogFactory;
398 
399     /**
400      * Verify log to use.
401      */
402     @Nonnull
403     private final VerifyLog verifyLog;
404 
405     /**
406      * This field contains the comment in the zip's EOCD if there is no in-memory EOCD structure.
407      * This may happen, for example, if the zip has been changed and the Central Directory and
408      * EOCD have been deleted (in-memory). In that case, this field will save the comment to place
409      * on the EOCD once it is created.
410      *
411      * <p>This field will only be non-{@code null} if there is no in-memory EOCD structure
412      * (<i>i.e.</i>, {@link #eocdEntry} is {@code null}). If there is an {@link #eocdEntry}, then
413      * the comment will be there instead of being in this field.
414      */
415     @Nullable
416     private byte[] eocdComment;
417 
418     /**
419      * Is the file in read-only mode? In read-only mode no changes are allowed.
420      */
421     private boolean readOnly;
422 
423 
424     /**
425      * Creates a new zip file. If the zip file does not exist, then no file is created at this
426      * point and {@code ZFile} will contain an empty structure. However, an (empty) zip file will
427      * be created if either {@link #update()} or {@link #close()} are used. If a zip file exists,
428      * it will be parsed and read.
429      *
430      * @param file the zip file
431      * @throws IOException some file exists but could not be read
432      */
ZFile(@onnull File file)433     public ZFile(@Nonnull File file) throws IOException {
434         this(file, new ZFileOptions());
435     }
436 
437     /**
438      * Creates a new zip file. If the zip file does not exist, then no file is created at this
439      * point and {@code ZFile} will contain an empty structure. However, an (empty) zip file will
440      * be created if either {@link #update()} or {@link #close()} are used. If a zip file exists,
441      * it will be parsed and read.
442      *
443      * @param file the zip file
444      * @param options configuration options
445      * @throws IOException some file exists but could not be read
446      */
ZFile(@onnull File file, @Nonnull ZFileOptions options)447     public ZFile(@Nonnull File file, @Nonnull ZFileOptions options) throws IOException {
448         this(file, options, false);
449     }
450 
451     /**
452      * Creates a new zip file. If the zip file does not exist, then no file is created at this
453      * point and {@code ZFile} will contain an empty structure. However, an (empty) zip file will
454      * be created if either {@link #update()} or {@link #close()} are used. If a zip file exists,
455      * it will be parsed and read.
456      *
457      * @param file the zip file
458      * @param options configuration options
459      * @param readOnly should the file be open in read-only mode? If {@code true} then the file must
460      * exist and no methods can be invoked that could potentially change the file
461      * @throws IOException some file exists but could not be read
462      */
ZFile(@onnull File file, @Nonnull ZFileOptions options, boolean readOnly)463     public ZFile(@Nonnull File file, @Nonnull ZFileOptions options, boolean readOnly)
464             throws IOException {
465         this.file = file;
466         map = new FileUseMap(
467                 0,
468                 options.getCoverEmptySpaceUsingExtraField()
469                         ? MINIMUM_EXTRA_FIELD_SIZE
470                         : 0);
471         this.readOnly = readOnly;
472         dirty = false;
473         closedControl = null;
474         alignmentRule = options.getAlignmentRule();
475         extensions = Lists.newArrayList();
476         toRun = Lists.newArrayList();
477         noTimestamps = options.getNoTimestamps();
478         tracker = options.getTracker();
479         compressor = options.getCompressor();
480         coverEmptySpaceUsingExtraField = options.getCoverEmptySpaceUsingExtraField();
481         autoSortFiles = options.getAutoSortFiles();
482         verifyLogFactory = options.getVerifyLogFactory();
483         verifyLog = verifyLogFactory.get();
484 
485         /*
486          * These two values will be overwritten by openReadOnly() below if the file exists.
487          */
488         state = ZipFileState.CLOSED;
489         raf = null;
490 
491         if (file.exists()) {
492             openReadOnly();
493         } else if (readOnly) {
494             throw new IOException("File does not exist but read-only mode requested");
495         } else {
496             dirty = true;
497         }
498 
499         entries = Maps.newHashMap();
500         uncompressedEntries = Lists.newArrayList();
501         extraDirectoryOffset = 0;
502 
503         try {
504             if (state != ZipFileState.CLOSED) {
505                 long rafSize = raf.length();
506                 if (rafSize > Integer.MAX_VALUE) {
507                     throw new IOException("File exceeds size limit of " + Integer.MAX_VALUE + ".");
508                 }
509 
510                 map.extend(Ints.checkedCast(rafSize));
511                 readData();
512             }
513 
514             // If we don't have an EOCD entry, set the comment to empty.
515             if (eocdEntry == null) {
516                 eocdComment = new byte[0];
517             }
518 
519             // Notify the extensions if the zip file has been open.
520             if (state != ZipFileState.CLOSED) {
521                 notify(ZFileExtension::open);
522             }
523         } catch (Zip64NotSupportedException e) {
524             throw e;
525         } catch (IOException e) {
526             throw new IOException("Failed to read zip file '" + file.getAbsolutePath() + "'.", e);
527         } catch (IllegalStateException | IllegalArgumentException | VerifyException e) {
528             throw new RuntimeException(
529                     "Internal error when trying to read zip file '" + file.getAbsolutePath() + "'.",
530                     e);
531         }
532     }
533 
534     /**
535      * Obtains all entries in the file. Entries themselves may be or not written in disk. However,
536      * all of them can be open for reading.
537      *
538      * @return all entries in the zip
539      */
540     @Nonnull
entries()541     public Set<StoredEntry> entries() {
542         Map<String, StoredEntry> entries = Maps.newHashMap();
543 
544         for (FileUseMapEntry<StoredEntry> mapEntry : this.entries.values()) {
545             StoredEntry entry = mapEntry.getStore();
546             assert entry != null;
547             entries.put(entry.getCentralDirectoryHeader().getName(), entry);
548         }
549 
550         /*
551          * mUncompressed may override mEntriesReady as we may not have yet processed all
552          * entries.
553          */
554         for (StoredEntry uncompressed : uncompressedEntries) {
555             entries.put(uncompressed.getCentralDirectoryHeader().getName(), uncompressed);
556         }
557 
558         return Sets.newHashSet(entries.values());
559     }
560 
561     /**
562      * Obtains an entry at a given path in the zip.
563      *
564      * @param path the path
565      * @return the entry at the path or {@code null} if none exists
566      */
567     @Nullable
get(@onnull String path)568     public StoredEntry get(@Nonnull String path) {
569         /*
570          * The latest entries are the last ones in uncompressed and they may eventually override
571          * files in entries.
572          */
573         for (StoredEntry stillUncompressed : Lists.reverse(uncompressedEntries)) {
574             if (stillUncompressed.getCentralDirectoryHeader().getName().equals(path)) {
575                 return stillUncompressed;
576             }
577         }
578 
579         FileUseMapEntry<StoredEntry> found = entries.get(path);
580         if (found == null) {
581             return null;
582         }
583 
584         return found.getStore();
585     }
586 
587     /**
588      * Reads all the data in the zip file, except the contents of the entries themselves. This
589      * method will populate the directory and maps in the instance variables.
590      *
591      * @throws IOException failed to read the zip file
592      */
readData()593     private void readData() throws IOException {
594         Preconditions.checkState(state != ZipFileState.CLOSED, "state == ZipFileState.CLOSED");
595         Preconditions.checkState(raf != null, "raf == null");
596 
597         readEocd();
598         readCentralDirectory();
599 
600         /*
601          * Go over all files and create the usage map, verifying there is no overlap in the files.
602          */
603         long entryEndOffset;
604         long directoryStartOffset;
605 
606         if (directoryEntry != null) {
607             CentralDirectory directory = directoryEntry.getStore();
608             assert directory != null;
609 
610             entryEndOffset = 0;
611 
612             for (StoredEntry entry : directory.getEntries().values()) {
613                 long start = entry.getCentralDirectoryHeader().getOffset();
614                 long end = start + entry.getInFileSize();
615 
616                 /*
617                  * If isExtraAlignmentBlock(entry.getLocalExtra()) is true, we know the entry
618                  * has an extra field that is solely used for alignment. This means the
619                  * actual entry could start at start + extra.length and leave space before.
620                  *
621                  * But, if we did this here, we would be modifying the zip file and that is
622                  * weird because we're just opening it for reading.
623                  *
624                  * The downside is that we will never reuse that space. Maybe one day ZFile
625                  * can be clever enough to remove the local extra when we start modifying the zip
626                  * file.
627                  */
628 
629                 Verify.verify(start >= 0, "start < 0");
630                 Verify.verify(end < map.size(), "end >= map.size()");
631 
632                 FileUseMapEntry<?> found = map.at(start);
633                 Verify.verifyNotNull(found);
634 
635                 // We've got a problem if the found entry is not free or is a free entry but
636                 // doesn't cover the whole file.
637                 if (!found.isFree() || found.getEnd() < end) {
638                     if (found.isFree()) {
639                         found = map.after(found);
640                         Verify.verify(found != null && !found.isFree());
641                     }
642 
643                     Object foundEntry = found.getStore();
644                     Verify.verify(foundEntry != null);
645 
646                     // Obtains a custom description of an entry.
647                     IOExceptionFunction<StoredEntry, String> describe =
648                             e ->
649                                     String.format(
650                                             "'%s' (offset: %d, size: %d)",
651                                             e.getCentralDirectoryHeader().getName(),
652                                             e.getCentralDirectoryHeader().getOffset(),
653                                             e.getInFileSize());
654 
655                     String overlappingEntryDescription;
656                     if (foundEntry instanceof StoredEntry) {
657                         StoredEntry foundStored = (StoredEntry) foundEntry;
658                         overlappingEntryDescription = describe.apply((StoredEntry) foundEntry);
659                     } else {
660                         overlappingEntryDescription =
661                                 "Central Directory / EOCD: "
662                                         + found.getStart()
663                                         + " - "
664                                         + found.getEnd();
665                     }
666 
667                     throw new IOException(
668                             "Cannot read entry "
669                                     + describe.apply(entry)
670                                     + " because it overlaps with "
671                                     + overlappingEntryDescription);
672                 }
673 
674                 FileUseMapEntry<StoredEntry> mapEntry = map.add(start, end, entry);
675                 entries.put(entry.getCentralDirectoryHeader().getName(), mapEntry);
676 
677                 if (end > entryEndOffset) {
678                     entryEndOffset = end;
679                 }
680             }
681 
682             directoryStartOffset = directoryEntry.getStart();
683         } else {
684             /*
685              * No directory means an empty zip file. Use the start of the EOCD to compute
686              * an existing offset.
687              */
688             Verify.verifyNotNull(eocdEntry);
689             assert eocdEntry != null;
690             directoryStartOffset = eocdEntry.getStart();
691             entryEndOffset = 0;
692         }
693 
694         /*
695          * Check if there is an extra central directory offset. If there is, save it. Note that
696          * we can't call extraDirectoryOffset() because that would mark the file as dirty.
697          */
698         long extraOffset = directoryStartOffset - entryEndOffset;
699         Verify.verify(extraOffset >= 0, "extraOffset (%s) < 0", extraOffset);
700         extraDirectoryOffset = extraOffset;
701     }
702 
703     /**
704      * Finds the EOCD marker and reads it. It will populate the {@link #eocdEntry} variable.
705      *
706      * @throws IOException failed to read the EOCD
707      */
readEocd()708     private void readEocd() throws IOException {
709         Preconditions.checkState(state != ZipFileState.CLOSED, "state == ZipFileState.CLOSED");
710         Preconditions.checkState(raf != null, "raf == null");
711 
712         /*
713          * Read the last part of the zip into memory. If we don't find the EOCD signature by then,
714          * the file is corrupt.
715          */
716         int lastToRead = LAST_BYTES_TO_READ;
717         if (lastToRead > raf.length()) {
718             lastToRead = Ints.checkedCast(raf.length());
719         }
720 
721         byte[] last = new byte[lastToRead];
722         directFullyRead(raf.length() - lastToRead, last);
723 
724 
725         /*
726          * Start endIdx at the first possible location where the signature can be located and then
727          * move backwards. Because the EOCD must have at least MIN_EOCD size, the first byte of the
728          * signature (and first byte of the EOCD) must be located at last.length - MIN_EOCD_SIZE.
729          *
730          * Because the EOCD signature may exist in the file comment, when we find a signature we
731          * will try to read the Eocd. If we fail, we continue searching for the signature. However,
732          * we will keep the last exception in case we don't find any signature.
733          */
734         Eocd eocd = null;
735         int foundEocdSignature = -1;
736         IOException errorFindingSignature = null;
737         int eocdStart = -1;
738 
739         for (int endIdx = last.length - MIN_EOCD_SIZE; endIdx >= 0 && foundEocdSignature == -1;
740                 endIdx--) {
741             /*
742              * Remember: little endian...
743              */
744             if (last[endIdx] == EOCD_SIGNATURE[3]
745                     && last[endIdx + 1] == EOCD_SIGNATURE[2]
746                     && last[endIdx + 2] == EOCD_SIGNATURE[1]
747                     && last[endIdx + 3] == EOCD_SIGNATURE[0]) {
748 
749                 /*
750                  * We found a signature. Try to read the EOCD record.
751                  */
752 
753                 foundEocdSignature = endIdx;
754                 ByteBuffer eocdBytes =
755                         ByteBuffer.wrap(last, foundEocdSignature, last.length - foundEocdSignature);
756 
757                 try {
758                     eocd = new Eocd(eocdBytes);
759                     eocdStart = Ints.checkedCast(raf.length() - lastToRead + foundEocdSignature);
760 
761                     /*
762                      * Make sure the EOCD takes the whole file up to the end. Log an error if it
763                      * doesn't.
764                      */
765                     if (eocdStart + eocd.getEocdSize() != raf.length()) {
766                         verifyLog.log("EOCD starts at "
767                                         + eocdStart
768                                         + " and has "
769                                         + eocd.getEocdSize()
770                                         + " bytes, but file ends at "
771                                         + raf.length()
772                                         + ".");
773                     }
774                 } catch (IOException e) {
775                     if (errorFindingSignature != null) {
776                         e.addSuppressed(errorFindingSignature);
777                     }
778 
779                     errorFindingSignature = e;
780                     foundEocdSignature = -1;
781                     eocd = null;
782                 }
783             }
784         }
785 
786         if (foundEocdSignature == -1) {
787             throw new IOException("EOCD signature not found in the last "
788                     + lastToRead + " bytes of the file.", errorFindingSignature);
789         }
790 
791         Verify.verify(eocdStart >= 0);
792 
793         /*
794          * Look for the Zip64 central directory locator. If we find it, then this file is a Zip64
795          * file and we do not support it.
796          */
797         int zip64LocatorStart = eocdStart - ZIP64_EOCD_LOCATOR_SIZE;
798         if (zip64LocatorStart >= 0) {
799             byte[] possibleZip64Locator = new byte[4];
800             directFullyRead(zip64LocatorStart, possibleZip64Locator);
801             if (LittleEndianUtils.readUnsigned4Le(ByteBuffer.wrap(possibleZip64Locator)) ==
802                     ZIP64_EOCD_LOCATOR_SIGNATURE) {
803                 throw new Zip64NotSupportedException(
804                         "Zip64 EOCD locator found but Zip64 format is not supported.");
805             }
806         }
807 
808         eocdEntry = map.add(eocdStart, eocdStart + eocd.getEocdSize(), eocd);
809     }
810 
811     /**
812      * Reads the zip's central directory and populates the {@link #directoryEntry} variable. This
813      * method can only be called after the EOCD has been read. If the central directory is empty
814      * (if there are no files on the zip archive), then {@link #directoryEntry} will be set to
815      * {@code null}.
816      *
817      * @throws IOException failed to read the central directory
818      */
readCentralDirectory()819     private void readCentralDirectory() throws IOException {
820         Preconditions.checkNotNull(eocdEntry, "eocdEntry == null");
821         Preconditions.checkNotNull(eocdEntry.getStore(), "eocdEntry.getStore() == null");
822         Preconditions.checkState(state != ZipFileState.CLOSED, "state == ZipFileState.CLOSED");
823         Preconditions.checkState(raf != null, "raf == null");
824         Preconditions.checkState(directoryEntry == null, "directoryEntry != null");
825 
826         Eocd eocd = eocdEntry.getStore();
827 
828         long dirSize = eocd.getDirectorySize();
829         if (dirSize > Integer.MAX_VALUE) {
830             throw new IOException("Cannot read central directory with size " + dirSize + ".");
831         }
832 
833         long centralDirectoryEnd = eocd.getDirectoryOffset() + dirSize;
834         if (centralDirectoryEnd != eocdEntry.getStart()) {
835             String msg = "Central directory is stored in ["
836                     + eocd.getDirectoryOffset()
837                     + " - "
838                     + (centralDirectoryEnd - 1)
839                     + "] and EOCD starts at "
840                     + eocdEntry.getStart()
841                     + ".";
842 
843             /*
844              * If there is an empty space between the central directory and the EOCD, we proceed
845              * logging an error. If the central directory ends after the start of the EOCD (and
846              * therefore, they overlap), throw an exception.
847              */
848             if (centralDirectoryEnd > eocdEntry.getSize()) {
849                 throw new IOException(msg);
850             } else {
851                 verifyLog.log(msg);
852             }
853         }
854 
855         byte[] directoryData = new byte[Ints.checkedCast(dirSize)];
856         directFullyRead(eocd.getDirectoryOffset(), directoryData);
857 
858         CentralDirectory directory =
859                 CentralDirectory.makeFromData(
860                         ByteBuffer.wrap(directoryData),
861                         eocd.getTotalRecords(),
862                         this);
863         if (eocd.getDirectorySize() > 0) {
864             directoryEntry = map.add(
865                     eocd.getDirectoryOffset(),
866                     eocd.getDirectoryOffset() + eocd.getDirectorySize(),
867                     directory);
868         }
869     }
870 
871     /**
872      * Opens a portion of the zip for reading. The zip must be open for this method to be invoked.
873      * Note that if the zip has not been updated, the individual zip entries may not have been
874      * written yet.
875      *
876      * @param start the index within the zip file to start reading
877      * @param end the index within the zip file to end reading (the actual byte pointed by
878      * <em>end</em> will not be read)
879      * @return a stream that will read the portion of the file; no decompression is done, data is
880      * returned <em>as is</em>
881      * @throws IOException failed to open the zip file
882      */
883     @Nonnull
directOpen(final long start, final long end)884     public InputStream directOpen(final long start, final long end) throws IOException {
885         Preconditions.checkState(state != ZipFileState.CLOSED, "state == ZipFileState.CLOSED");
886         Preconditions.checkState(raf != null, "raf == null");
887         Preconditions.checkArgument(start >= 0, "start < 0");
888         Preconditions.checkArgument(end >= start, "end < start");
889         Preconditions.checkArgument(end <= raf.length(), "end > raf.length()");
890 
891         return new InputStream() {
892             private long mCurr = start;
893 
894             @Override
895             public int read() throws IOException {
896                 if (mCurr == end) {
897                     return -1;
898                 }
899 
900                 byte[] b = new byte[1];
901                 int r = directRead(mCurr, b);
902                 if (r > 0) {
903                     mCurr++;
904                     return b[0];
905                 } else {
906                     return -1;
907                 }
908             }
909 
910             @Override
911             public int read(@Nonnull byte[] b, int off, int len) throws IOException {
912                 Preconditions.checkNotNull(b, "b == null");
913                 Preconditions.checkArgument(off >= 0, "off < 0");
914                 Preconditions.checkArgument(off <= b.length, "off > b.length");
915                 Preconditions.checkArgument(len >= 0, "len < 0");
916                 Preconditions.checkArgument(off + len <= b.length, "off + len > b.length");
917 
918                 long availableToRead = end - mCurr;
919                 long toRead = Math.min(len, availableToRead);
920 
921                 if (toRead == 0) {
922                     return -1;
923                 }
924 
925                 if (toRead > Integer.MAX_VALUE) {
926                     throw new IOException("Cannot read " + toRead + " bytes.");
927                 }
928 
929                 int r = directRead(mCurr, b, off, Ints.checkedCast(toRead));
930                 if (r > 0) {
931                     mCurr += r;
932                 }
933 
934                 return r;
935             }
936         };
937     }
938 
939     /**
940      * Deletes an entry from the zip. This method does not actually delete anything on disk. It
941      * just changes in-memory structures. Use {@link #update()} to update the contents on disk.
942      *
943      * @param entry the entry to delete
944      * @param notify should listeners be notified of the deletion? This will only be
945      * {@code false} if the entry is being removed as part of a replacement
946      * @throws IOException failed to delete the entry
947      * @throws IllegalStateException if open in read-only mode
948      */
delete(@onnull final StoredEntry entry, boolean notify)949     void delete(@Nonnull final StoredEntry entry, boolean notify) throws IOException {
950         checkNotInReadOnlyMode();
951 
952         String path = entry.getCentralDirectoryHeader().getName();
953         FileUseMapEntry<StoredEntry> mapEntry = entries.get(path);
954         Preconditions.checkNotNull(mapEntry, "mapEntry == null");
955         Preconditions.checkArgument(entry == mapEntry.getStore(), "entry != mapEntry.getStore()");
956 
957         dirty = true;
958 
959         map.remove(mapEntry);
960         entries.remove(path);
961 
962         if (notify) {
963             notify(ext -> ext.removed(entry));
964         }
965     }
966 
967     /**
968      * Checks that the file is not in read-only mode.
969      *
970      * @throws IllegalStateException if the file is in read-only mode
971      */
checkNotInReadOnlyMode()972     private void checkNotInReadOnlyMode() {
973         if (readOnly) {
974             throw new IllegalStateException("Illegal operation in read only model");
975         }
976     }
977 
978     /**
979      * Updates the file writing new entries and removing deleted entries. This will force
980      * reopening the file as read/write if the file wasn't open in read/write mode.
981      *
982      * @throws IOException failed to update the file; this exception may have been thrown by
983      * the compressor but only reported here
984      */
update()985     public void update() throws IOException {
986         checkNotInReadOnlyMode();
987 
988         /*
989          * Process all background stuff before calling in the extensions.
990          */
991         processAllReadyEntriesWithWait();
992 
993         notify(ZFileExtension::beforeUpdate);
994 
995         /*
996          * Process all background stuff that may be leftover by the extensions.
997          */
998         processAllReadyEntriesWithWait();
999 
1000 
1001         if (!dirty) {
1002             return;
1003         }
1004 
1005         reopenRw();
1006 
1007         /*
1008          * At this point, no more files can be added. We may need to repack to remove extra
1009          * empty spaces or sort. If we sort, we don't need to repack as sorting forces the
1010          * zip file to be as compact as possible.
1011          */
1012         if (autoSortFiles) {
1013             sortZipContents();
1014         } else {
1015             packIfNecessary();
1016         }
1017 
1018         /*
1019          * We're going to change the file so delete the central directory and the EOCD as they
1020          * will have to be rewritten.
1021          */
1022         deleteDirectoryAndEocd();
1023         map.truncate();
1024 
1025         /*
1026          * If we need to use the extra field to cover empty spaces, we do the processing here.
1027          */
1028         if (coverEmptySpaceUsingExtraField) {
1029 
1030             /* We will go over all files in the zip and check whether there is empty space before
1031              * them. If there is, then we will move the entry to the beginning of the empty space
1032              * (covering it) and extend the extra field with the size of the empty space.
1033              */
1034             for (FileUseMapEntry<StoredEntry> entry : new HashSet<>(entries.values())) {
1035                 StoredEntry storedEntry = entry.getStore();
1036                 assert storedEntry != null;
1037 
1038                 FileUseMapEntry<?> before = map.before(entry);
1039                 if (before == null || !before.isFree()) {
1040                     continue;
1041                 }
1042 
1043                 /*
1044                  * We have free space before the current entry. However, we do know that it can
1045                  * be covered by the extra field, because both sortZipContents() and
1046                  * packIfNecessary() guarantee it.
1047                  */
1048                 int localExtraSize =
1049                         storedEntry.getLocalExtra().size() + Ints.checkedCast(before.getSize());
1050                 Verify.verify(localExtraSize <= MAX_LOCAL_EXTRA_FIELD_CONTENTS_SIZE);
1051 
1052                 /*
1053                  * Move file back in the zip.
1054                  */
1055                 storedEntry.loadSourceIntoMemory();
1056 
1057                 long newStart = before.getStart();
1058                 long newSize = entry.getSize() + before.getSize();
1059 
1060                 /*
1061                  * Remove the entry.
1062                  */
1063                 String name = storedEntry.getCentralDirectoryHeader().getName();
1064                 map.remove(entry);
1065                 Verify.verify(entry == entries.remove(name));
1066 
1067                 /*
1068                  * Make a list will all existing segments in the entry's extra field, but remove
1069                  * the alignment field, if it exists. Also, sum the size of all kept extra field
1070                  * segments.
1071                  */
1072                 ImmutableList<ExtraField.Segment> currentSegments;
1073                 try {
1074                     currentSegments = storedEntry.getLocalExtra().getSegments();
1075                 } catch (IOException e) {
1076                     /*
1077                      * Parsing current segments has failed. This means the contents of the extra
1078                      * field are not valid. We'll continue discarding the existing segments.
1079                      */
1080                     currentSegments = ImmutableList.of();
1081                 }
1082 
1083                 List<ExtraField.Segment> extraFieldSegments = new ArrayList<>();
1084                 int newExtraFieldSize = currentSegments.stream()
1085                         .filter(s -> s.getHeaderId()
1086                                 != ExtraField.ALIGNMENT_ZIP_EXTRA_DATA_FIELD_HEADER_ID)
1087                         .peek(extraFieldSegments::add)
1088                         .map(ExtraField.Segment::size)
1089                         .reduce(0, Integer::sum);
1090 
1091                 int spaceToFill =
1092                         Ints.checkedCast(
1093                             before.getSize()
1094                                     + storedEntry.getLocalExtra().size()
1095                                     - newExtraFieldSize);
1096 
1097                 extraFieldSegments.add(
1098                         new ExtraField.AlignmentSegment(chooseAlignment(storedEntry),spaceToFill));
1099 
1100                 storedEntry.setLocalExtraNoNotify(
1101                         new ExtraField(ImmutableList.copyOf(extraFieldSegments)));
1102                 entries.put(name, map.add(newStart, newStart + newSize, storedEntry));
1103 
1104                 /*
1105                  * Reset the offset to force the file to be rewritten.
1106                  */
1107                 storedEntry.getCentralDirectoryHeader().setOffset(-1);
1108             }
1109         }
1110 
1111         /*
1112          * Write new files in the zip. We identify new files because they don't have an offset
1113          * in the zip where they are written although we already know, by their location in the
1114          * file map, where they will be written to.
1115          *
1116          * Before writing the files, we sort them in the order they are written in the file so that
1117          * writes are made in order on disk.
1118          * This is, however, unlikely to optimize anything relevant given the way the Operating
1119          * System does caching, but it certainly won't hurt :)
1120          */
1121         TreeMap<FileUseMapEntry<?>, StoredEntry> toWriteToStore =
1122                 new TreeMap<>(FileUseMapEntry.COMPARE_BY_START);
1123 
1124         for (FileUseMapEntry<StoredEntry> entry : entries.values()) {
1125             StoredEntry entryStore = entry.getStore();
1126             assert entryStore != null;
1127             if (entryStore.getCentralDirectoryHeader().getOffset() == -1) {
1128                 toWriteToStore.put(entry, entryStore);
1129             }
1130         }
1131 
1132         /*
1133          * Add all free entries to the set.
1134          */
1135         for(FileUseMapEntry<?> freeArea : map.getFreeAreas()) {
1136             toWriteToStore.put(freeArea, null);
1137         }
1138 
1139         /*
1140          * Write everything to file.
1141          */
1142         for (FileUseMapEntry<?> fileUseMapEntry : toWriteToStore.keySet()) {
1143             StoredEntry entry = toWriteToStore.get(fileUseMapEntry);
1144             if (entry == null) {
1145                 int size = Ints.checkedCast(fileUseMapEntry.getSize());
1146                 directWrite(fileUseMapEntry.getStart(), new byte[size]);
1147             } else {
1148                 writeEntry(entry, fileUseMapEntry.getStart());
1149             }
1150         }
1151 
1152         boolean hasCentralDirectory;
1153         int extensionBugDetector = MAXIMUM_EXTENSION_CYCLE_COUNT;
1154         do {
1155             computeCentralDirectory();
1156             computeEocd();
1157 
1158             hasCentralDirectory = (directoryEntry != null);
1159 
1160             notify(ext -> {
1161                 ext.entriesWritten();
1162                 return null;
1163             });
1164 
1165             if ((--extensionBugDetector) == 0) {
1166                 throw new IOException("Extensions keep resetting the central directory. This is "
1167                         + "probably a bug.");
1168             }
1169         } while (hasCentralDirectory && directoryEntry == null);
1170 
1171         appendCentralDirectory();
1172         appendEocd();
1173 
1174         Verify.verifyNotNull(raf);
1175         raf.setLength(map.size());
1176 
1177         dirty = false;
1178 
1179         notify(ext -> {
1180            ext.updated();
1181             return null;
1182         });
1183     }
1184 
1185     /**
1186      * Reorganizes the zip so that there are no gaps between files bigger than
1187      * {@link #MAX_LOCAL_EXTRA_FIELD_CONTENTS_SIZE} if {@link #coverEmptySpaceUsingExtraField}
1188      * is set to {@code true}.
1189      *
1190      * <p>Essentially, this makes sure we can cover any empty space with the extra field, given
1191      * that the local extra field is limited to {@link #MAX_LOCAL_EXTRA_FIELD_CONTENTS_SIZE}. If
1192      * an entry is too far from the previous one, it is removed and re-added.
1193      *
1194      * @throws IOException failed to repack
1195      */
packIfNecessary()1196     private void packIfNecessary() throws IOException {
1197         if (!coverEmptySpaceUsingExtraField) {
1198             return;
1199         }
1200 
1201         SortedSet<FileUseMapEntry<StoredEntry>> entriesByLocation =
1202                 new TreeSet<>(FileUseMapEntry.COMPARE_BY_START);
1203         entriesByLocation.addAll(entries.values());
1204 
1205         for (FileUseMapEntry<StoredEntry> entry : entriesByLocation) {
1206             StoredEntry storedEntry = entry.getStore();
1207             assert storedEntry != null;
1208 
1209             FileUseMapEntry<?> before = map.before(entry);
1210             if (before == null || !before.isFree()) {
1211                 continue;
1212             }
1213 
1214             int localExtraSize =
1215                     storedEntry.getLocalExtra().size() + Ints.checkedCast(before.getSize());
1216             if (localExtraSize > MAX_LOCAL_EXTRA_FIELD_CONTENTS_SIZE) {
1217                 /*
1218                  * This entry is too far from the previous one. Remove it and re-add it to the
1219                  * zip file.
1220                  */
1221                 reAdd(storedEntry, PositionHint.LOWEST_OFFSET);
1222             }
1223         }
1224     }
1225 
1226     /**
1227      * Removes a stored entry from the zip and adds it back again. This will force the entry to be
1228      * loaded into memory and repositioned in the zip file. It will also mark the archive as
1229      * being dirty.
1230      *
1231      * @param entry the entry
1232      * @param positionHint hint to where the file should be positioned when re-adding
1233      * @throws IOException failed to load the entry into memory
1234      */
reAdd(@onnull StoredEntry entry, @Nonnull PositionHint positionHint)1235     private void reAdd(@Nonnull StoredEntry entry, @Nonnull PositionHint positionHint)
1236             throws IOException {
1237         String name = entry.getCentralDirectoryHeader().getName();
1238         FileUseMapEntry<StoredEntry> mapEntry = entries.get(name);
1239         Preconditions.checkNotNull(mapEntry);
1240         Preconditions.checkState(mapEntry.getStore() == entry);
1241 
1242         entry.loadSourceIntoMemory();
1243 
1244         map.remove(mapEntry);
1245         entries.remove(name);
1246         FileUseMapEntry<StoredEntry> positioned = positionInFile(entry, positionHint);
1247         entries.put(name, positioned);
1248         dirty = true;
1249     }
1250 
1251     /**
1252      * Invoked from {@link StoredEntry} when entry has changed in a way that forces the local
1253      * header to be rewritten
1254      *
1255      * @param entry the entry that changed
1256      * @param resized was the local header resized?
1257      * @throws IOException failed to load the entry into memory
1258      */
localHeaderChanged(@onnull StoredEntry entry, boolean resized)1259     void localHeaderChanged(@Nonnull StoredEntry entry, boolean resized) throws IOException {
1260         dirty = true;
1261 
1262         if (resized) {
1263             reAdd(entry, PositionHint.ANYWHERE);
1264         }
1265     }
1266 
1267     /**
1268      * Invoked when the central directory has changed and needs to be rewritten.
1269      */
centralDirectoryChanged()1270     void centralDirectoryChanged() {
1271         dirty = true;
1272         deleteDirectoryAndEocd();
1273     }
1274 
1275     /**
1276      * Updates the file and closes it.
1277      */
1278     @Override
close()1279     public void close() throws IOException {
1280         // We need to make sure to release raf, otherwise we end up locking the file on
1281         // Windows. Use try-with-resources to handle exception suppressing.
1282         try (Closeable ignored = this::innerClose) {
1283             if (!readOnly) {
1284                 update();
1285             }
1286         }
1287 
1288         notify(ext -> {
1289            ext.closed();
1290             return null;
1291         });
1292     }
1293 
1294     /**
1295      * Removes the Central Directory and EOCD from the file. This will free space for new entries
1296      * as well as allowing the zip file to be truncated if files have been removed.
1297      *
1298      * <p>This method does not mark the zip as dirty.
1299      */
deleteDirectoryAndEocd()1300     private void deleteDirectoryAndEocd() {
1301         if (directoryEntry != null) {
1302             map.remove(directoryEntry);
1303             directoryEntry = null;
1304         }
1305 
1306         if (eocdEntry != null) {
1307             map.remove(eocdEntry);
1308 
1309             Eocd eocd = eocdEntry.getStore();
1310             Verify.verify(eocd != null);
1311             eocdComment = eocd.getComment();
1312             eocdEntry = null;
1313         }
1314     }
1315 
1316     /**
1317      * Writes an entry's data in the zip file. This includes everything: the local header and
1318      * the data itself. After writing, the entry is updated with the offset and its source replaced
1319      * with a source that reads from the zip file.
1320      *
1321      * @param entry the entry to write
1322      * @param offset the offset at which the entry should be written
1323      * @throws IOException failed to write the entry
1324      */
writeEntry(@onnull StoredEntry entry, long offset)1325     private void writeEntry(@Nonnull StoredEntry entry, long offset) throws IOException {
1326         Preconditions.checkArgument(entry.getDataDescriptorType()
1327                 == DataDescriptorType. NO_DATA_DESCRIPTOR, "Cannot write entries with a data "
1328                 + "descriptor.");
1329         Preconditions.checkNotNull(raf, "raf == null");
1330         Preconditions.checkState(state == ZipFileState.OPEN_RW, "state != ZipFileState.OPEN_RW");
1331 
1332         /*
1333          * Place the cursor and write the local header.
1334          */
1335         byte[] headerData = entry.toHeaderData();
1336         directWrite(offset, headerData);
1337 
1338         /*
1339          * Get the raw source data to write.
1340          */
1341         ProcessedAndRawByteSources source = entry.getSource();
1342         ByteSource rawContents = source.getRawByteSource();
1343 
1344         /*
1345          * Write the source data.
1346          */
1347         byte[] chunk = new byte[IO_BUFFER_SIZE];
1348         int r;
1349         long writeOffset = offset + headerData.length;
1350         InputStream is = rawContents.openStream();
1351         while ((r = is.read(chunk)) >= 0) {
1352             directWrite(writeOffset, chunk, 0, r);
1353             writeOffset += r;
1354         }
1355 
1356         is.close();
1357 
1358         /*
1359          * Set the entry's offset and create the entry source.
1360          */
1361         entry.replaceSourceFromZip(offset);
1362     }
1363 
1364     /**
1365      * Computes the central directory. The central directory must not have been computed yet. When
1366      * this method finishes, the central directory has been computed {@link #directoryEntry},
1367      * unless the directory is empty in which case {@link #directoryEntry}
1368      * is left as {@code null}. Nothing is written to disk as a result of this method's invocation.
1369      *
1370      * @throws IOException failed to append the central directory
1371      */
computeCentralDirectory()1372     private void computeCentralDirectory() throws IOException {
1373         Preconditions.checkState(state == ZipFileState.OPEN_RW, "state != ZipFileState.OPEN_RW");
1374         Preconditions.checkNotNull(raf, "raf == null");
1375         Preconditions.checkState(directoryEntry == null, "directoryEntry == null");
1376 
1377         Set<StoredEntry> newStored = Sets.newHashSet();
1378         for (FileUseMapEntry<StoredEntry> mapEntry : entries.values()) {
1379             newStored.add(mapEntry.getStore());
1380         }
1381 
1382         /*
1383          * Make sure we truncate the map before computing the central directory's location since
1384          * the central directory is the last part of the file.
1385          */
1386         map.truncate();
1387 
1388         CentralDirectory newDirectory = CentralDirectory.makeFromEntries(newStored, this);
1389         byte[] newDirectoryBytes = newDirectory.toBytes();
1390         long directoryOffset = map.size() + extraDirectoryOffset;
1391 
1392         map.extend(directoryOffset + newDirectoryBytes.length);
1393 
1394         if (newDirectoryBytes.length > 0) {
1395             directoryEntry = map.add(directoryOffset, directoryOffset + newDirectoryBytes.length,
1396                     newDirectory);
1397         }
1398     }
1399 
1400     /**
1401      * Writes the central directory to the end of the zip file. {@link #directoryEntry} may be
1402      * {@code null} only if there are no files in the archive.
1403      *
1404      * @throws IOException failed to append the central directory
1405      */
appendCentralDirectory()1406     private void appendCentralDirectory() throws IOException {
1407         Preconditions.checkState(state == ZipFileState.OPEN_RW, "state != ZipFileState.OPEN_RW");
1408         Preconditions.checkNotNull(raf, "raf == null");
1409 
1410         if (entries.isEmpty()) {
1411             Preconditions.checkState(directoryEntry == null, "directoryEntry != null");
1412             return;
1413         }
1414 
1415         Preconditions.checkNotNull(directoryEntry, "directoryEntry != null");
1416 
1417         CentralDirectory newDirectory = directoryEntry.getStore();
1418         Preconditions.checkNotNull(newDirectory, "newDirectory != null");
1419 
1420         byte[] newDirectoryBytes = newDirectory.toBytes();
1421         long directoryOffset = directoryEntry.getStart();
1422 
1423         /*
1424          * It is fine to seek beyond the end of file. Seeking beyond the end of file will not extend
1425          * the file. Even if we do not have any directory data to write, the extend() call below
1426          * will force the file to be extended leaving exactly extraDirectoryOffset bytes empty at
1427          * the beginning.
1428          */
1429         directWrite(directoryOffset, newDirectoryBytes);
1430     }
1431 
1432     /**
1433      * Obtains the byte array representation of the central directory. The central directory must
1434      * have been already computed. If there are no entries in the zip, the central directory will be
1435      * empty.
1436      *
1437      * @return the byte representation, or an empty array if there are no entries in the zip
1438      * @throws IOException failed to compute the central directory byte representation
1439      */
1440     @Nonnull
getCentralDirectoryBytes()1441     public byte[] getCentralDirectoryBytes() throws IOException {
1442         if (entries.isEmpty()) {
1443             Preconditions.checkState(directoryEntry == null, "directoryEntry != null");
1444             return new byte[0];
1445         }
1446 
1447         Preconditions.checkNotNull(directoryEntry, "directoryEntry == null");
1448 
1449         CentralDirectory cd = directoryEntry.getStore();
1450         Preconditions.checkNotNull(cd, "cd == null");
1451         return cd.toBytes();
1452     }
1453 
1454     /**
1455      * Computes the EOCD. This creates a new {@link #eocdEntry}. The
1456      * central directory must already be written. If {@link #directoryEntry} is {@code null}, then
1457      * the zip file must not have any entries.
1458      *
1459      * @throws IOException failed to write the EOCD
1460      */
computeEocd()1461     private void computeEocd() throws IOException {
1462         Preconditions.checkState(state == ZipFileState.OPEN_RW, "state != ZipFileState.OPEN_RW");
1463         Preconditions.checkNotNull(raf, "raf == null");
1464         if (directoryEntry == null) {
1465             Preconditions.checkState(entries.isEmpty(),
1466                     "directoryEntry == null && !entries.isEmpty()");
1467         }
1468 
1469         long dirStart;
1470         long dirSize = 0;
1471 
1472         if (directoryEntry != null) {
1473             CentralDirectory directory = directoryEntry.getStore();
1474             assert directory != null;
1475 
1476             dirStart = directoryEntry.getStart();
1477             dirSize = directoryEntry.getSize();
1478             Verify.verify(directory.getEntries().size() == entries.size());
1479         } else {
1480             /*
1481              * If we do not have a directory, then we must leave any requested offset empty.
1482              */
1483             dirStart = extraDirectoryOffset;
1484         }
1485 
1486         Verify.verify(eocdComment != null);
1487         Eocd eocd = new Eocd(entries.size(), dirStart, dirSize, eocdComment);
1488         eocdComment = null;
1489 
1490         byte[] eocdBytes = eocd.toBytes();
1491         long eocdOffset = map.size();
1492 
1493         map.extend(eocdOffset + eocdBytes.length);
1494 
1495         eocdEntry = map.add(eocdOffset, eocdOffset + eocdBytes.length, eocd);
1496     }
1497 
1498     /**
1499      * Writes the EOCD to the end of the zip file. This creates a new {@link #eocdEntry}. The
1500      * central directory must already be written. If {@link #directoryEntry} is {@code null}, then
1501      * the zip file must not have any entries.
1502      *
1503      * @throws IOException failed to write the EOCD
1504      */
appendEocd()1505     private void appendEocd() throws IOException {
1506         Preconditions.checkState(state == ZipFileState.OPEN_RW, "state != ZipFileState.OPEN_RW");
1507         Preconditions.checkNotNull(raf, "raf == null");
1508         Preconditions.checkNotNull(eocdEntry, "eocdEntry == null");
1509 
1510         Eocd eocd = eocdEntry.getStore();
1511         Preconditions.checkNotNull(eocd, "eocd == null");
1512 
1513         byte[] eocdBytes = eocd.toBytes();
1514         long eocdOffset = eocdEntry.getStart();
1515 
1516         directWrite(eocdOffset, eocdBytes);
1517     }
1518 
1519     /**
1520      * Obtains the byte array representation of the EOCD. The EOCD must have already been computed
1521      * for this method to be invoked.
1522      *
1523      * @return the byte representation of the EOCD
1524      * @throws IOException failed to obtain the byte representation of the EOCD
1525      */
1526     @Nonnull
getEocdBytes()1527     public byte[] getEocdBytes() throws IOException {
1528         Preconditions.checkNotNull(eocdEntry, "eocdEntry == null");
1529 
1530         Eocd eocd = eocdEntry.getStore();
1531         Preconditions.checkNotNull(eocd, "eocd == null");
1532         return eocd.toBytes();
1533     }
1534 
1535     /**
1536      * Closes the file, if it is open.
1537      *
1538      * @throws IOException failed to close the file
1539      */
innerClose()1540     private void innerClose() throws IOException {
1541         if (state == ZipFileState.CLOSED) {
1542             return;
1543         }
1544 
1545         Verify.verifyNotNull(raf, "raf == null");
1546 
1547         raf.close();
1548         raf = null;
1549         state = ZipFileState.CLOSED;
1550         if (closedControl == null) {
1551             closedControl = new CachedFileContents<>(file);
1552         }
1553 
1554         closedControl.closed(null);
1555     }
1556 
1557     /**
1558      * If the zip file is closed, opens it in read-only mode. If it is already open, does nothing.
1559      * In general, it is not necessary to directly invoke this method. However, if directly
1560      * reading the zip file using, for example {@link #directRead(long, byte[])}, then this
1561      * method needs to be called.
1562      * @throws IOException failed to open the file
1563      */
openReadOnly()1564     public void openReadOnly() throws IOException {
1565         if (state != ZipFileState.CLOSED) {
1566             return;
1567         }
1568 
1569         state = ZipFileState.OPEN_RO;
1570         raf = new RandomAccessFile(file, "r");
1571     }
1572 
1573     /**
1574      * Opens (or reopens) the zip file as read-write. This method will ensure that
1575      * {@link #raf} is not null and open for writing.
1576      *
1577      * @throws IOException failed to open the file, failed to close it or the file was closed and
1578      * has been modified outside the control of this object
1579      */
reopenRw()1580     private void reopenRw() throws IOException {
1581         // We an never open a file RW in read-only mode. We should never get this far, though.
1582         Verify.verify(!readOnly);
1583 
1584         if (state == ZipFileState.OPEN_RW) {
1585             return;
1586         }
1587 
1588         boolean wasClosed;
1589         if (state == ZipFileState.OPEN_RO) {
1590             /*
1591              * ReadAccessFile does not have a way to reopen as RW so we have to close it and
1592              * open it again.
1593              */
1594             innerClose();
1595             wasClosed = false;
1596         } else {
1597             wasClosed = true;
1598         }
1599 
1600         Verify.verify(state == ZipFileState.CLOSED, "state != ZpiFileState.CLOSED");
1601         Verify.verify(raf == null, "raf != null");
1602 
1603         if (closedControl != null && !closedControl.isValid()) {
1604             throw new IOException("File '" + file.getAbsolutePath() + "' has been modified "
1605                     + "by an external application.");
1606         }
1607 
1608         raf = new RandomAccessFile(file, "rw");
1609         state = ZipFileState.OPEN_RW;
1610 
1611         /*
1612          * Now that we've open the zip and are ready to write, clear out any data descriptors
1613          * in the zip since we don't need them and they take space in the archive.
1614          */
1615         for (StoredEntry entry : entries()) {
1616             dirty |= entry.removeDataDescriptor();
1617         }
1618 
1619         if (wasClosed) {
1620             notify(ZFileExtension::open);
1621         }
1622     }
1623 
1624     /**
1625      * Equivalent to call {@link #add(String, InputStream, boolean)} using
1626      * {@code true} as {@code mayCompress}.
1627      *
1628      * @param name the file name (<em>i.e.</em>, path); paths should be defined using slashes
1629      * and the name should not end in slash
1630      * @param stream the source for the file's data
1631      * @throws IOException failed to read the source data
1632      * @throws IllegalStateException if the file is in read-only mode
1633      */
add(@onnull String name, @Nonnull InputStream stream)1634     public void add(@Nonnull String name, @Nonnull InputStream stream) throws IOException {
1635         checkNotInReadOnlyMode();
1636         add(name, stream, true);
1637     }
1638 
1639     /**
1640      * Creates a stored entry. This does not add the entry to the zip file, it just creates the
1641      * {@link StoredEntry} object.
1642      *
1643      * @param name the name of the entry
1644      * @param stream the input stream with the entry's data
1645      * @param mayCompress can the entry be compressed?
1646      * @return the created entry
1647      * @throws IOException failed to create the entry
1648      */
1649     @Nonnull
makeStoredEntry( @onnull String name, @Nonnull InputStream stream, boolean mayCompress)1650     private StoredEntry makeStoredEntry(
1651             @Nonnull String name,
1652             @Nonnull InputStream stream,
1653             boolean mayCompress)
1654             throws IOException {
1655         CloseableByteSource source = tracker.fromStream(stream);
1656         long crc32 = source.hash(Hashing.crc32()).padToLong();
1657 
1658         boolean encodeWithUtf8 = !EncodeUtils.canAsciiEncode(name);
1659 
1660         SettableFuture<CentralDirectoryHeaderCompressInfo> compressInfo =
1661                 SettableFuture.create();
1662         GPFlags flags = GPFlags.make(encodeWithUtf8);
1663         CentralDirectoryHeader newFileData =
1664                 new CentralDirectoryHeader(
1665                         name,
1666                         EncodeUtils.encode(name, flags),
1667                         source.size(),
1668                         compressInfo,
1669                         flags,
1670                         this);
1671         newFileData.setCrc32(crc32);
1672 
1673         /*
1674          * Create the new entry and sets its data source. Offset should be set to -1 automatically
1675          * because this is a new file. With offset set to -1, StoredEntry does not try to verify the
1676          * local header. Since this is a new file, there is no local header and not checking it is
1677          * what we want to happen.
1678          */
1679         Verify.verify(newFileData.getOffset() == -1);
1680         return new StoredEntry(
1681                 newFileData,
1682                 this,
1683                 createSources(mayCompress, source, compressInfo, newFileData));
1684     }
1685 
1686     /**
1687      * Creates the processed and raw sources for an entry.
1688      *
1689      * @param mayCompress can the entry be compressed?
1690      * @param source the entry's data (uncompressed)
1691      * @param compressInfo the compression info future that will be set when the raw entry is
1692      * created and the {@link CentralDirectoryHeaderCompressInfo} object can be created
1693      * @param newFileData the central directory header for the new file
1694      * @return the sources whose data may or may not be already defined
1695      * @throws IOException failed to create the raw sources
1696      */
1697     @Nonnull
createSources( boolean mayCompress, @Nonnull CloseableByteSource source, @Nonnull SettableFuture<CentralDirectoryHeaderCompressInfo> compressInfo, @Nonnull CentralDirectoryHeader newFileData)1698     private ProcessedAndRawByteSources createSources(
1699             boolean mayCompress,
1700             @Nonnull CloseableByteSource source,
1701             @Nonnull SettableFuture<CentralDirectoryHeaderCompressInfo> compressInfo,
1702             @Nonnull CentralDirectoryHeader newFileData)
1703             throws IOException {
1704         if (mayCompress) {
1705             ListenableFuture<CompressionResult> result = compressor.compress(source);
1706             Futures.addCallback(
1707                     result,
1708                     new FutureCallback<CompressionResult>() {
1709                         @Override
1710                         public void onSuccess(CompressionResult result) {
1711                             compressInfo.set(
1712                                     new CentralDirectoryHeaderCompressInfo(
1713                                             newFileData,
1714                                             result.getCompressionMethod(),
1715                                             result.getSize()));
1716                         }
1717 
1718                         @Override
1719                         public void onFailure(@Nonnull Throwable t) {
1720                             compressInfo.setException(t);
1721                         }
1722                     },
1723                     MoreExecutors.directExecutor());
1724 
1725             ListenableFuture<CloseableByteSource> compressedByteSourceFuture =
1726                     Futures.transform(
1727                             result, CompressionResult::getSource, MoreExecutors.directExecutor());
1728             LazyDelegateByteSource compressedByteSource = new LazyDelegateByteSource(
1729                     compressedByteSourceFuture);
1730             return new ProcessedAndRawByteSources(source, compressedByteSource);
1731         } else {
1732             compressInfo.set(new CentralDirectoryHeaderCompressInfo(newFileData,
1733                     CompressionMethod.STORE, source.size()));
1734             return new ProcessedAndRawByteSources(source, source);
1735         }
1736     }
1737 
1738     /**
1739      * Adds a file to the archive.
1740      *
1741      * <p>Adding the file will not update the archive immediately. Updating will only happen
1742      * when the {@link #update()} method is invoked.
1743      *
1744      * <p>Adding a file with the same name as an existing file will replace that file in the
1745      * archive.
1746      *
1747      * @param name the file name (<em>i.e.</em>, path); paths should be defined using slashes
1748      * and the name should not end in slash
1749      * @param stream the source for the file's data
1750      * @param mayCompress can the file be compressed? This flag will be ignored if the alignment
1751      * rules force the file to be aligned, in which case the file will not be compressed.
1752      * @throws IOException failed to read the source data
1753      * @throws IllegalStateException if the file is in read-only mode
1754      */
add(@onnull String name, @Nonnull InputStream stream, boolean mayCompress)1755     public void add(@Nonnull String name, @Nonnull InputStream stream, boolean mayCompress)
1756             throws IOException {
1757         checkNotInReadOnlyMode();
1758 
1759         /*
1760          * Clean pending background work, if needed.
1761          */
1762         processAllReadyEntries();
1763 
1764         add(makeStoredEntry(name, stream, mayCompress));
1765     }
1766 
1767     /**
1768      * Adds a {@link StoredEntry} to the zip. The entry is not immediately added to
1769      * {@link #entries} because data may not yet be available. Instead, it is placed under
1770      * {@link #uncompressedEntries} and later moved to {@link #processAllReadyEntries()} when
1771      * done.
1772      *
1773      * <p>This method invokes {@link #processAllReadyEntries()} to move the entry if it has already
1774      * been computed so, if there is no delay in compression, and no more files are in waiting
1775      * queue, then the entry is added to {@link #entries} immediately.
1776      *
1777      * @param newEntry the entry to add
1778      * @throws IOException failed to process this entry (or a previous one whose future only
1779      * completed now)
1780      */
add(@onnull final StoredEntry newEntry)1781     private void add(@Nonnull final StoredEntry newEntry) throws IOException {
1782         uncompressedEntries.add(newEntry);
1783         processAllReadyEntries();
1784     }
1785 
1786     /**
1787      * Moves all ready entries from {@link #uncompressedEntries} to {@link #entries}. It will
1788      * stop as soon as entry whose future has not been completed is found.
1789      *
1790      * @throws IOException the exception reported in the future computation, if any, or failed
1791      * to add a file to the archive
1792      */
processAllReadyEntries()1793     private void processAllReadyEntries() throws IOException {
1794         /*
1795          * Many things can happen during addToEntries(). Because addToEntries() fires
1796          * notifications to extensions, other files can be added, removed, etc. Ee are *not*
1797          * guaranteed that new stuff does not get into uncompressedEntries: add() will still work
1798          * and will add new entries in there.
1799          *
1800          * However -- important -- processReadyEntries() may be invoked during addToEntries()
1801          * because of the extension mechanism. This means that stuff *can* be removed from
1802          * uncompressedEntries and moved to entries during addToEntries().
1803          */
1804         while (!uncompressedEntries.isEmpty()) {
1805             StoredEntry next = uncompressedEntries.get(0);
1806             CentralDirectoryHeader cdh = next.getCentralDirectoryHeader();
1807             Future<CentralDirectoryHeaderCompressInfo> compressionInfo = cdh.getCompressionInfo();
1808             if (!compressionInfo.isDone()) {
1809                 /*
1810                  * First entry in queue is not yet complete. We can't do anything else.
1811                  */
1812                 return;
1813             }
1814 
1815             uncompressedEntries.remove(0);
1816 
1817             try {
1818                 compressionInfo.get();
1819             } catch (InterruptedException e) {
1820                 throw new IOException("Impossible I/O exception: get for already computed "
1821                         + "future throws InterruptedException", e);
1822             } catch (ExecutionException e) {
1823                 throw new IOException("Failed to obtain compression information for entry", e);
1824             }
1825 
1826             addToEntries(next);
1827         }
1828     }
1829 
1830     /**
1831      * Waits until {@link #uncompressedEntries} is empty.
1832      *
1833      * @throws IOException the exception reported in the future computation, if any, or failed
1834      * to add a file to the archive
1835      */
processAllReadyEntriesWithWait()1836     private void processAllReadyEntriesWithWait() throws IOException {
1837         processAllReadyEntries();
1838         while (!uncompressedEntries.isEmpty()) {
1839             /*
1840              * Wait for the first future to complete and then try again. Keep looping until we're
1841              * done.
1842              */
1843             StoredEntry first = uncompressedEntries.get(0);
1844             CentralDirectoryHeader cdh = first.getCentralDirectoryHeader();
1845             cdh.getCompressionInfoWithWait();
1846 
1847             processAllReadyEntries();
1848         }
1849     }
1850 
1851     /**
1852      * Adds a new file to {@link #entries}. This is actually added to the zip and its space
1853      * allocated in the {@link #map}.
1854      *
1855      * @param newEntry the new entry to add
1856      * @throws IOException failed to add the file
1857      */
addToEntries(@onnull final StoredEntry newEntry)1858     private void addToEntries(@Nonnull final StoredEntry newEntry) throws IOException {
1859         Preconditions.checkArgument(newEntry.getDataDescriptorType() ==
1860                 DataDescriptorType.NO_DATA_DESCRIPTOR, "newEntry has data descriptor");
1861 
1862         /*
1863          * If there is a file with the same name in the archive, remove it. We remove it by
1864          * calling delete() on the entry (this is the public API to remove a file from the archive).
1865          * StoredEntry.delete() will call {@link ZFile#delete(StoredEntry, boolean)}  to perform
1866          * data structure cleanup.
1867          */
1868         FileUseMapEntry<StoredEntry> toReplace = entries.get(
1869                 newEntry.getCentralDirectoryHeader().getName());
1870         final StoredEntry replaceStore;
1871         if (toReplace != null) {
1872             replaceStore = toReplace.getStore();
1873             assert replaceStore != null;
1874             replaceStore.delete(false);
1875         } else {
1876             replaceStore = null;
1877         }
1878 
1879         FileUseMapEntry<StoredEntry> fileUseMapEntry =
1880                 positionInFile(newEntry, PositionHint.ANYWHERE);
1881         entries.put(newEntry.getCentralDirectoryHeader().getName(), fileUseMapEntry);
1882 
1883         dirty = true;
1884 
1885         notify(ext -> ext.added(newEntry, replaceStore));
1886     }
1887 
1888     /**
1889      * Finds a location in the zip where this entry will be added to and create the map entry.
1890      * This method cannot be called if there is already a map entry for the given entry (if you
1891      * do that, then you're doing something wrong somewhere).
1892      *
1893      * <p>This may delete the central directory and EOCD (if it deletes one, it deletes the other)
1894      * if there is no space before the central directory. Otherwise, the file would be added
1895      * after the central directory. This would force a new central directory to be written
1896      * when updating the file and would create a hole in the zip. Me no like holes. Holes are evil.
1897      *
1898      * @param entry the entry to place in the zip
1899      * @param positionHint hint to where the file should be positioned
1900      * @return the position in the file where the entry should be placed
1901      */
1902     @Nonnull
positionInFile( @onnull StoredEntry entry, @Nonnull PositionHint positionHint)1903     private FileUseMapEntry<StoredEntry> positionInFile(
1904             @Nonnull StoredEntry entry,
1905             @Nonnull PositionHint positionHint)
1906             throws IOException {
1907         deleteDirectoryAndEocd();
1908         long size = entry.getInFileSize();
1909         int localHeaderSize = entry.getLocalHeaderSize();
1910         int alignment = chooseAlignment(entry);
1911 
1912         FileUseMap.PositionAlgorithm algorithm;
1913 
1914         switch (positionHint) {
1915             case LOWEST_OFFSET:
1916                 algorithm = FileUseMap.PositionAlgorithm.FIRST_FIT;
1917                 break;
1918             case ANYWHERE:
1919                 algorithm = FileUseMap.PositionAlgorithm.BEST_FIT;
1920                 break;
1921             default:
1922                 throw new AssertionError();
1923         }
1924 
1925         long newOffset = map.locateFree(size, localHeaderSize, alignment, algorithm);
1926         long newEnd = newOffset + entry.getInFileSize();
1927         if (newEnd > map.size()) {
1928             map.extend(newEnd);
1929         }
1930 
1931         return map.add(newOffset, newEnd, entry);
1932     }
1933 
1934     /**
1935      * Determines what is the alignment value of an entry.
1936      *
1937      * @param entry the entry
1938      * @return the alignment value, {@link AlignmentRule#NO_ALIGNMENT} if there is no alignment
1939      * required for the entry
1940      * @throws IOException failed to determine the alignment
1941      */
chooseAlignment(@onnull StoredEntry entry)1942     private int chooseAlignment(@Nonnull StoredEntry entry) throws IOException {
1943         CentralDirectoryHeader cdh = entry.getCentralDirectoryHeader();
1944         CentralDirectoryHeaderCompressInfo compressionInfo = cdh.getCompressionInfoWithWait();
1945 
1946         boolean isCompressed = compressionInfo.getMethod() != CompressionMethod.STORE;
1947         if (isCompressed) {
1948             return AlignmentRule.NO_ALIGNMENT;
1949         } else {
1950             return alignmentRule.alignment(cdh.getName());
1951         }
1952     }
1953 
1954     /**
1955      * Adds all files from another zip file, maintaining their compression. Files specified in
1956      * <em>src</em> that are already on this file will replace the ones in this file. However, if
1957      * their sizes and checksums are equal, they will be ignored.
1958      *
1959      * <p> This method will not perform any changes in itself, it will only update in-memory data
1960      * structures. To actually write the zip file, invoke either {@link #update()} or
1961      * {@link #close()}.
1962      *
1963      * @param src the source archive
1964      * @param ignoreFilter predicate that, if {@code true}, identifies files in <em>src</em> that
1965      * should be ignored by merging; merging will behave as if these files were not there
1966      * @throws IOException failed to read from <em>src</em> or write on the output
1967      * @throws IllegalStateException if the file is in read-only mode
1968      */
mergeFrom(@onnull ZFile src, @Nonnull Predicate<String> ignoreFilter)1969     public void mergeFrom(@Nonnull ZFile src, @Nonnull Predicate<String> ignoreFilter)
1970             throws IOException {
1971         checkNotInReadOnlyMode();
1972 
1973         for (StoredEntry fromEntry : src.entries()) {
1974             if (ignoreFilter.test(fromEntry.getCentralDirectoryHeader().getName())) {
1975                 continue;
1976             }
1977 
1978             boolean replaceCurrent = true;
1979             String path = fromEntry.getCentralDirectoryHeader().getName();
1980             FileUseMapEntry<StoredEntry> currentEntry = entries.get(path);
1981 
1982             if (currentEntry != null) {
1983                 long fromSize = fromEntry.getCentralDirectoryHeader().getUncompressedSize();
1984                 long fromCrc = fromEntry.getCentralDirectoryHeader().getCrc32();
1985 
1986                 StoredEntry currentStore = currentEntry.getStore();
1987                 assert currentStore != null;
1988 
1989                 long currentSize = currentStore.getCentralDirectoryHeader().getUncompressedSize();
1990                 long currentCrc = currentStore.getCentralDirectoryHeader().getCrc32();
1991 
1992                 if (fromSize == currentSize && fromCrc == currentCrc) {
1993                     replaceCurrent = false;
1994                 }
1995             }
1996 
1997             if (replaceCurrent) {
1998                 CentralDirectoryHeader fromCdr = fromEntry.getCentralDirectoryHeader();
1999                 CentralDirectoryHeaderCompressInfo fromCompressInfo =
2000                         fromCdr.getCompressionInfoWithWait();
2001                 CentralDirectoryHeader newFileData;
2002                 try {
2003                     /*
2004                      * We make two changes in the central directory from the file to merge:
2005                      * we reset the offset to force the entry to be written and we reset the
2006                      * deferred CRC bit as we don't need the extra stuff after the file. It takes
2007                      * space and is totally useless.
2008                      */
2009                     newFileData = fromCdr.clone();
2010                     newFileData.setOffset(-1);
2011                     newFileData.resetDeferredCrc();
2012                 } catch (CloneNotSupportedException e) {
2013                     throw new IOException("Failed to clone CDR.", e);
2014                 }
2015 
2016                 /*
2017                  * Read the data (read directly the compressed source if there is one).
2018                  */
2019                 ProcessedAndRawByteSources fromSource = fromEntry.getSource();
2020                 InputStream fromInput = fromSource.getRawByteSource().openStream();
2021                 long sourceSize = fromSource.getRawByteSource().size();
2022                 if (sourceSize > Integer.MAX_VALUE) {
2023                     throw new IOException("Cannot read source with " + sourceSize + " bytes.");
2024                 }
2025 
2026                 byte[] data = new byte[Ints.checkedCast(sourceSize)];
2027                 int read = 0;
2028                 while (read < data.length) {
2029                     int r = fromInput.read(data, read, data.length - read);
2030                     Verify.verify(r >= 0, "There should be at least 'size' bytes in the stream.");
2031                     read += r;
2032                 }
2033 
2034                 /*
2035                  * Build the new source and wrap it around an inflater source if data came from
2036                  * a compressed source.
2037                  */
2038                 CloseableByteSource rawContents = tracker.fromSource(fromSource.getRawByteSource());
2039                 CloseableByteSource processedContents;
2040                 if (fromCompressInfo.getMethod() == CompressionMethod.DEFLATE) {
2041                     //noinspection IOResourceOpenedButNotSafelyClosed
2042                     processedContents = new InflaterByteSource(rawContents);
2043                 } else {
2044                     processedContents = rawContents;
2045                 }
2046 
2047                 ProcessedAndRawByteSources newSource = new ProcessedAndRawByteSources(
2048                         processedContents, rawContents);
2049 
2050                 /*
2051                  * Add will replace any current entry with the same name.
2052                  */
2053                 StoredEntry newEntry = new StoredEntry(newFileData, this, newSource);
2054                 add(newEntry);
2055             }
2056         }
2057     }
2058 
2059     /**
2060      * Forcibly marks this zip file as touched, forcing it to be updated when {@link #update()}
2061      * or {@link #close()} are invoked.
2062      *
2063      * @throws IllegalStateException if the file is in read-only mode
2064      */
touch()2065     public void touch() {
2066         checkNotInReadOnlyMode();
2067         dirty = true;
2068     }
2069 
2070     /**
2071      * Wait for any background tasks to finish and report any errors. In general this method does
2072      * not need to be invoked directly as errors from background tasks are reported during
2073      * {@link #add(String, InputStream, boolean)}, {@link #update()} and {@link #close()}.
2074      * However, if required for some purposes, <em>e.g.</em>, ensuring all notifications have been
2075      * done to extensions, then this method may be called. It will wait for all background tasks
2076      * to complete.
2077      * @throws IOException some background work failed
2078      */
finishAllBackgroundTasks()2079     public void finishAllBackgroundTasks() throws IOException {
2080         processAllReadyEntriesWithWait();
2081     }
2082 
2083     /**
2084      * Realigns all entries in the zip. This is equivalent to call {@link StoredEntry#realign()}
2085      * for all entries in the zip file.
2086      *
2087      * @return has any entry been changed? Note that for entries that have not yet been written on
2088      * the file, realignment does not count as a change as nothing needs to be updated in the file;
2089      * entries that have been updated may have been recreated and the existing references outside
2090      * of {@code ZFile} may refer to {@link StoredEntry}s that are no longer valid
2091      * @throws IOException failed to realign the zip; some entries in the zip may have been lost
2092      * due to the I/O error
2093      * @throws IllegalStateException if the file is in read-only mode
2094      */
realign()2095     public boolean realign() throws IOException {
2096         checkNotInReadOnlyMode();
2097 
2098         boolean anyChanges = false;
2099         for (StoredEntry entry : entries()) {
2100             anyChanges |= entry.realign();
2101         }
2102 
2103         if (anyChanges) {
2104             dirty = true;
2105         }
2106 
2107         return anyChanges;
2108     }
2109 
2110     /**
2111      * Realigns a stored entry, if necessary. Realignment is done by removing and re-adding the file
2112      * if it was not aligned.
2113      *
2114      * @param entry the entry to realign
2115      * @return has the entry been changed? Note that if the entry has not yet been written on the
2116      * file, realignment does not count as a change as nothing needs to be updated in the file
2117      * @throws IOException failed to read/write an entry; the entry may no longer exist in the
2118      * file
2119      */
realign(@onnull StoredEntry entry)2120     boolean realign(@Nonnull StoredEntry entry) throws IOException {
2121         FileUseMapEntry<StoredEntry> mapEntry =
2122                 entries.get(entry.getCentralDirectoryHeader().getName());
2123         Verify.verify(entry == mapEntry.getStore());
2124         long currentDataOffset = mapEntry.getStart() + entry.getLocalHeaderSize();
2125 
2126         int expectedAlignment = chooseAlignment(entry);
2127         long misalignment = currentDataOffset % expectedAlignment;
2128         if (misalignment == 0) {
2129             /*
2130              * Good. File is aligned properly.
2131              */
2132             return false;
2133         }
2134 
2135         if (entry.getCentralDirectoryHeader().getOffset() == -1) {
2136             /*
2137              * File is not aligned but it is not written. We do not really need to do much other
2138              * than find another place in the map.
2139              */
2140             map.remove(mapEntry);
2141             long newStart =
2142                     map.locateFree(
2143                             mapEntry.getSize(),
2144                             entry.getLocalHeaderSize(),
2145                             expectedAlignment,
2146                             FileUseMap.PositionAlgorithm.BEST_FIT);
2147             mapEntry = map.add(newStart, newStart + entry.getInFileSize(), entry);
2148             entries.put(entry.getCentralDirectoryHeader().getName(), mapEntry);
2149 
2150             /*
2151              * Just for safety. We're modifying the in-memory structures but the file should
2152              * already be marked as dirty.
2153              */
2154             Verify.verify(dirty);
2155 
2156             return false;
2157 
2158         }
2159 
2160         /*
2161          * Get the entry data source, but check if we have a compressed one (we don't want to
2162          * inflate and deflate).
2163          */
2164         CentralDirectoryHeaderCompressInfo compressInfo =
2165                 entry.getCentralDirectoryHeader().getCompressionInfoWithWait();
2166 
2167         ProcessedAndRawByteSources source = entry.getSource();
2168 
2169         CentralDirectoryHeader clonedCdh;
2170         try {
2171             clonedCdh = entry.getCentralDirectoryHeader().clone();
2172         } catch (CloneNotSupportedException e) {
2173             Verify.verify(false);
2174             return false;
2175         }
2176 
2177         /*
2178          * We make two changes in the central directory when realigning:
2179          * we reset the offset to force the entry to be written and we reset the
2180          * deferred CRC bit as we don't need the extra stuff after the file. It takes
2181          * space and is totally useless and we may need the extra space to realign the entry...
2182          */
2183         clonedCdh.setOffset(-1);
2184         clonedCdh.resetDeferredCrc();
2185 
2186         CloseableByteSource rawContents = tracker.fromSource(source.getRawByteSource());
2187         CloseableByteSource processedContents;
2188 
2189         if (compressInfo.getMethod() == CompressionMethod.DEFLATE) {
2190             //noinspection IOResourceOpenedButNotSafelyClosed
2191             processedContents = new InflaterByteSource(rawContents);
2192         } else {
2193             processedContents = rawContents;
2194         }
2195 
2196         ProcessedAndRawByteSources newSource = new ProcessedAndRawByteSources(processedContents,
2197                 rawContents);
2198 
2199         /*
2200          * Add the new file. This will replace the existing one.
2201          */
2202         StoredEntry newEntry = new StoredEntry(clonedCdh, this, newSource);
2203         add(newEntry);
2204         return true;
2205     }
2206 
2207     /**
2208      * Adds an extension to this zip file.
2209      *
2210      * @param extension the listener to add
2211      * @throws IllegalStateException if the file is in read-only mode
2212      */
addZFileExtension(@onnull ZFileExtension extension)2213     public void addZFileExtension(@Nonnull ZFileExtension extension) {
2214         checkNotInReadOnlyMode();
2215         extensions.add(extension);
2216     }
2217 
2218     /**
2219      * Removes an extension from this zip file.
2220      *
2221      * @param extension the listener to remove
2222      * @throws IllegalStateException if the file is in read-only mode
2223      */
removeZFileExtension(@onnull ZFileExtension extension)2224     public void removeZFileExtension(@Nonnull ZFileExtension extension) {
2225         checkNotInReadOnlyMode();
2226         extensions.remove(extension);
2227     }
2228 
2229     /**
2230      * Notifies all extensions, collecting their execution requests and running them.
2231      *
2232      * @param function the function to apply to all listeners, it will generally invoke the
2233      * notification method on the listener and return the result of that invocation
2234      * @throws IOException failed to process some extensions
2235      */
notify(@onnull IOExceptionFunction<ZFileExtension, IOExceptionRunnable> function)2236     private void notify(@Nonnull IOExceptionFunction<ZFileExtension, IOExceptionRunnable> function)
2237             throws IOException {
2238         for (ZFileExtension fl : Lists.newArrayList(extensions)) {
2239             IOExceptionRunnable r = function.apply(fl);
2240             if (r != null) {
2241                 toRun.add(r);
2242             }
2243         }
2244 
2245         if (!isNotifying) {
2246             isNotifying = true;
2247 
2248             try {
2249                 while (!toRun.isEmpty()) {
2250                     IOExceptionRunnable r = toRun.remove(0);
2251                     r.run();
2252                 }
2253             } finally {
2254                 isNotifying = false;
2255             }
2256         }
2257     }
2258 
2259     /**
2260      * Directly writes data in the zip file. <strong>Incorrect use of this method may corrupt the
2261      * zip file</strong>. Invoking this method may force the zip to be reopened in read/write
2262      * mode.
2263      *
2264      * @param offset the offset at which data should be written
2265      * @param data the data to write, may be an empty array
2266      * @param start start offset in  {@code data} where data to write is located
2267      * @param count number of bytes of data to write
2268      * @throws IOException failed to write the data
2269      * @throws IllegalStateException if the file is in read-only mode
2270      */
directWrite(long offset, @Nonnull byte[] data, int start, int count)2271     public void directWrite(long offset, @Nonnull byte[] data, int start, int count)
2272             throws IOException {
2273         checkNotInReadOnlyMode();
2274 
2275         Preconditions.checkArgument(offset >= 0, "offset < 0");
2276         Preconditions.checkArgument(start >= 0, "start >= 0");
2277         Preconditions.checkArgument(count >= 0, "count >= 0");
2278 
2279         if (data.length == 0) {
2280             return;
2281         }
2282 
2283         Preconditions.checkArgument(start <= data.length, "start > data.length");
2284         Preconditions.checkArgument(start + count <= data.length, "start + count > data.length");
2285 
2286         reopenRw();
2287         assert raf != null;
2288 
2289         raf.seek(offset);
2290         raf.write(data, start, count);
2291     }
2292 
2293     /**
2294      * Same as {@code directWrite(offset, data, 0, data.length)}.
2295      *
2296      * @param offset the offset at which data should be written
2297      * @param data the data to write, may be an empty array
2298      * @throws IOException failed to write the data
2299      * @throws IllegalStateException if the file is in read-only mode
2300      */
directWrite(long offset, @Nonnull byte[] data)2301     public void directWrite(long offset, @Nonnull byte[] data) throws IOException {
2302         checkNotInReadOnlyMode();
2303         directWrite(offset, data, 0, data.length);
2304     }
2305 
2306     /**
2307      * Returns the current size (in bytes) of the underlying file.
2308      *
2309      * @throws IOException if an I/O error occurs
2310      */
directSize()2311     public long directSize() throws IOException {
2312         /*
2313          * Only force a reopen if the file is closed.
2314          */
2315         if (raf == null) {
2316             reopenRw();
2317             assert raf != null;
2318         }
2319         return raf.length();
2320     }
2321 
2322     /**
2323      * Directly reads data from the zip file. Invoking this method may force the zip to be reopened
2324      * in read/write mode.
2325      *
2326      * @param offset the offset at which data should be written
2327      * @param data the array where read data should be stored
2328      * @param start start position in the array where to write data to
2329      * @param count how many bytes of data can be written
2330      * @return how many bytes of data have been written or {@code -1} if there are no more bytes
2331      * to be read
2332      * @throws IOException failed to write the data
2333      */
directRead(long offset, @Nonnull byte[] data, int start, int count)2334     public int directRead(long offset, @Nonnull byte[] data, int start, int count)
2335             throws IOException {
2336         Preconditions.checkArgument(start >= 0, "start >= 0");
2337         Preconditions.checkArgument(count >= 0, "count >= 0");
2338         Preconditions.checkArgument(start <= data.length, "start > data.length");
2339         Preconditions.checkArgument(start + count <= data.length, "start + count > data.length");
2340         return directRead(offset, ByteBuffer.wrap(data, start, count));
2341     }
2342 
2343     /**
2344      * Directly reads data from the zip file. Invoking this method may force the zip to be reopened
2345      * in read/write mode.
2346      *
2347      * @param offset the offset from which data should be read
2348      * @param dest the output buffer to fill with data from the {@code offset}.
2349      * @return how many bytes of data have been written or {@code -1} if there are no more bytes
2350      * to be read
2351      * @throws IOException failed to write the data
2352      */
directRead(long offset, @Nonnull ByteBuffer dest)2353     public int directRead(long offset, @Nonnull ByteBuffer dest) throws IOException {
2354         Preconditions.checkArgument(offset >= 0, "offset < 0");
2355 
2356         if (!dest.hasRemaining()) {
2357             return 0;
2358         }
2359 
2360         /*
2361          * Only force a reopen if the file is closed.
2362          */
2363         if (raf == null) {
2364             reopenRw();
2365             assert raf != null;
2366         }
2367 
2368         raf.seek(offset);
2369         return raf.getChannel().read(dest);
2370     }
2371 
2372     /**
2373      * Same as {@code directRead(offset, data, 0, data.length)}.
2374      *
2375      * @param offset the offset at which data should be read
2376      * @param data receives the read data, may be an empty array
2377      * @throws IOException failed to read the data
2378      */
directRead(long offset, @Nonnull byte[] data)2379     public int directRead(long offset, @Nonnull byte[] data) throws IOException {
2380         return directRead(offset, data, 0, data.length);
2381     }
2382 
2383     /**
2384      * Reads exactly {@code data.length} bytes of data, failing if it was not possible to read all
2385      * the requested data.
2386      *
2387      * @param offset the offset at which to start reading
2388      * @param data the array that receives the data read
2389      * @throws IOException failed to read some data or there is not enough data to read
2390      */
directFullyRead(long offset, @Nonnull byte[] data)2391     public void directFullyRead(long offset, @Nonnull byte[] data) throws IOException {
2392         directFullyRead(offset, ByteBuffer.wrap(data));
2393     }
2394 
2395     /**
2396      * Reads exactly {@code dest.remaining()} bytes of data, failing if it was not possible to read
2397      * all the requested data.
2398      *
2399      * @param offset the offset at which to start reading
2400      * @param dest the output buffer to fill with data
2401      * @throws IOException failed to read some data or there is not enough data to read
2402      */
directFullyRead(long offset, @Nonnull ByteBuffer dest)2403     public void directFullyRead(long offset, @Nonnull ByteBuffer dest) throws IOException {
2404         Preconditions.checkArgument(offset >= 0, "offset < 0");
2405 
2406         if (!dest.hasRemaining()) {
2407             return;
2408         }
2409 
2410         /*
2411          * Only force a reopen if the file is closed.
2412          */
2413         if (raf == null) {
2414             reopenRw();
2415             assert raf != null;
2416         }
2417 
2418         FileChannel fileChannel = raf.getChannel();
2419         while (dest.hasRemaining()) {
2420             fileChannel.position(offset);
2421             int chunkSize = fileChannel.read(dest);
2422             if (chunkSize == -1) {
2423                 throw new EOFException(
2424                         "Failed to read " + dest.remaining() + " more bytes: premature EOF");
2425             }
2426             offset += chunkSize;
2427         }
2428     }
2429 
2430     /**
2431      * Adds all files and directories recursively.
2432      * <p>
2433      * Equivalent to calling {@link #addAllRecursively(File, Function)} using a function that
2434      * always returns {@code true}
2435      *
2436      * @param file a file or directory; if it is a directory, all files and directories will be
2437      * added recursively
2438      * @throws IOException failed to some (or all ) of the files
2439      * @throws IllegalStateException if the file is in read-only mode
2440      */
addAllRecursively(@onnull File file)2441     public void addAllRecursively(@Nonnull File file) throws IOException {
2442         checkNotInReadOnlyMode();
2443         addAllRecursively(file, f -> true);
2444     }
2445 
2446     /**
2447      * Adds all files and directories recursively.
2448      *
2449      * @param file a file or directory; if it is a directory, all files and directories will be
2450      * added recursively
2451      * @param mayCompress a function that decides whether files may be compressed
2452      * @throws IOException failed to some (or all ) of the files
2453      * @throws IllegalStateException if the file is in read-only mode
2454      */
addAllRecursively( @onnull File file, @Nonnull Function<? super File, Boolean> mayCompress)2455     public void addAllRecursively(
2456             @Nonnull File file,
2457             @Nonnull Function<? super File, Boolean> mayCompress) throws IOException {
2458         checkNotInReadOnlyMode();
2459 
2460         /*
2461          * The case of file.isFile() is different because if file.isFile() we will add it to the
2462          * zip in the root. However, if file.isDirectory() we won't add it and add its children.
2463          */
2464         if (file.isFile()) {
2465             boolean mayCompressFile = Verify.verifyNotNull(mayCompress.apply(file),
2466                     "mayCompress.apply() returned null");
2467 
2468             try (Closer closer = Closer.create()) {
2469                 FileInputStream fileInput = closer.register(new FileInputStream(file));
2470                 add(file.getName(), fileInput, mayCompressFile);
2471             }
2472 
2473             return;
2474         }
2475 
2476         for (File f : Iterables.skip(Files.fileTraverser().depthFirstPreOrder(file), 1)) {
2477             String path = file.toURI().relativize(f.toURI()).getPath();
2478 
2479             InputStream stream;
2480             try (Closer closer = Closer.create()) {
2481                 boolean mayCompressFile;
2482                 if (f.isDirectory()) {
2483                     stream = closer.register(new ByteArrayInputStream(new byte[0]));
2484                     mayCompressFile = false;
2485                 } else {
2486                     stream = closer.register(new FileInputStream(f));
2487                     mayCompressFile = Verify.verifyNotNull(mayCompress.apply(f),
2488                             "mayCompress.apply() returned null");
2489                 }
2490 
2491                 add(path, stream, mayCompressFile);
2492             }
2493         }
2494     }
2495 
2496     /**
2497      * Obtains the offset at which the central directory exists, or at which it will be written
2498      * if the zip file were to be flushed immediately.
2499      *
2500      * @return the offset, in bytes, where the central directory is or will be written; this value
2501      * includes any extra offset for the central directory
2502      */
getCentralDirectoryOffset()2503     public long getCentralDirectoryOffset() {
2504         if (directoryEntry != null) {
2505             return directoryEntry.getStart();
2506         }
2507 
2508         /*
2509          * If there are no entries, the central directory is written at the start of the file.
2510          */
2511         if (entries.isEmpty()) {
2512             return extraDirectoryOffset;
2513         }
2514 
2515         /*
2516          * The Central Directory is written after all entries. This will be at the end of the file
2517          * if the
2518          */
2519         return map.usedSize() + extraDirectoryOffset;
2520     }
2521 
2522     /**
2523      * Obtains the size of the central directory, if the central directory is written in the zip
2524      * file.
2525      *
2526      * @return the size of the central directory or {@code -1} if the central directory has not
2527      * been computed
2528      */
getCentralDirectorySize()2529     public long getCentralDirectorySize() {
2530         if (directoryEntry != null) {
2531             return directoryEntry.getSize();
2532         }
2533 
2534         if (entries.isEmpty()) {
2535             return 0;
2536         }
2537 
2538         return 1;
2539     }
2540 
2541     /**
2542      * Obtains the offset of the EOCD record, if the EOCD has been written to the file.
2543      *
2544      * @return the offset of the EOCD or {@code -1} if none exists yet
2545      */
getEocdOffset()2546     public long getEocdOffset() {
2547         if (eocdEntry == null) {
2548             return -1;
2549         }
2550 
2551         return eocdEntry.getStart();
2552     }
2553 
2554     /**
2555      * Obtains the size of the EOCD record, if the EOCD has been written to the file.
2556      *
2557      * @return the size of the EOCD of {@code -1} it none exists yet
2558      */
getEocdSize()2559     public long getEocdSize() {
2560         if (eocdEntry == null) {
2561             return -1;
2562         }
2563 
2564         return eocdEntry.getSize();
2565     }
2566 
2567     /**
2568      * Obtains the comment in the EOCD.
2569      *
2570      * @return the comment exactly as it was encoded in the EOCD, no encoding conversion is done
2571      */
2572     @Nonnull
getEocdComment()2573     public byte[] getEocdComment() {
2574         if (eocdEntry == null) {
2575             Verify.verify(eocdComment != null);
2576             byte[] eocdCommentCopy = new byte[eocdComment.length];
2577             System.arraycopy(eocdComment, 0, eocdCommentCopy, 0, eocdComment.length);
2578             return eocdCommentCopy;
2579         }
2580 
2581         Eocd eocd = eocdEntry.getStore();
2582         Verify.verify(eocd != null);
2583         return eocd.getComment();
2584     }
2585 
2586     /**
2587      * Sets the comment in the EOCD.
2588      *
2589      * @param comment the new comment; no conversion is done, these exact bytes will be placed in
2590      * the EOCD comment
2591      * @throws IllegalStateException if file is in read-only mode
2592      */
setEocdComment(@onnull byte[] comment)2593     public void setEocdComment(@Nonnull byte[] comment) {
2594         checkNotInReadOnlyMode();
2595 
2596         if (comment.length > MAX_EOCD_COMMENT_SIZE) {
2597             throw new IllegalArgumentException(
2598                     "EOCD comment size ("
2599                             + comment.length
2600                             + ") is larger than the maximum allowed ("
2601                             + MAX_EOCD_COMMENT_SIZE
2602                             + ")");
2603         }
2604 
2605         // Check if the EOCD signature appears anywhere in the comment we need to check if it
2606         // is valid.
2607         for (int i = 0; i < comment.length - MIN_EOCD_SIZE; i++) {
2608             // Remember: little endian...
2609             if (comment[i] == EOCD_SIGNATURE[3]
2610                     && comment[i + 1] == EOCD_SIGNATURE[2]
2611                     && comment[i + 2] == EOCD_SIGNATURE[1]
2612                     && comment[i + 3] == EOCD_SIGNATURE[0]) {
2613                 // We found a possible EOCD signature at position i. Try to read it.
2614                 ByteBuffer bytes = ByteBuffer.wrap(comment, i, comment.length - i);
2615                 try {
2616                     new Eocd(bytes);
2617                     throw new IllegalArgumentException(
2618                             "Position "
2619                                     + i
2620                                     + " of the comment contains a valid EOCD record.");
2621                 } catch (IOException e) {
2622                     // Fine, this is an invalid record. Move along...
2623                 }
2624             }
2625         }
2626 
2627         deleteDirectoryAndEocd();
2628         eocdComment = new byte[comment.length];
2629         System.arraycopy(comment, 0, eocdComment, 0, comment.length);
2630         dirty = true;
2631     }
2632 
2633     /**
2634      * Sets an extra offset for the central directory. See class description for details. Changing
2635      * this value will mark the file as dirty and force a rewrite of the central directory when
2636      * updated.
2637      *
2638      * @param offset the offset or {@code 0} to write the central directory at its current location
2639      * @throws IllegalStateException if file is in read-only mode
2640      */
setExtraDirectoryOffset(long offset)2641     public void setExtraDirectoryOffset(long offset) {
2642         checkNotInReadOnlyMode();
2643         Preconditions.checkArgument(offset >= 0, "offset < 0");
2644 
2645         if (extraDirectoryOffset != offset) {
2646             extraDirectoryOffset = offset;
2647             deleteDirectoryAndEocd();
2648             dirty = true;
2649         }
2650     }
2651 
2652     /**
2653      * Obtains the extra offset for the central directory. See class description for details.
2654      *
2655      * @return the offset or {@code 0} if no offset is set
2656      */
getExtraDirectoryOffset()2657     public long getExtraDirectoryOffset() {
2658         return extraDirectoryOffset;
2659     }
2660 
2661     /**
2662      * Obtains whether this {@code ZFile} is ignoring timestamps.
2663      *
2664      * @return are the timestamps being ignored?
2665      */
areTimestampsIgnored()2666     public boolean areTimestampsIgnored() {
2667         return noTimestamps;
2668     }
2669 
2670     /**
2671      * Sorts all files in the zip. This will force all files to be loaded and will wait for all
2672      * background tasks to complete. Sorting files is never done implicitly and will operate in
2673      * memory only (maybe reading files from the zip disk into memory, if needed). It will leave
2674      * the zip in dirty state, requiring a call to {@link #update()} to force the entries to be
2675      * written to disk.
2676      *
2677      * @throws IOException failed to load or move a file in the zip
2678      * @throws IllegalStateException if file is in read-only mode
2679      */
sortZipContents()2680     public void sortZipContents() throws IOException {
2681         checkNotInReadOnlyMode();
2682         reopenRw();
2683 
2684         processAllReadyEntriesWithWait();
2685 
2686         Verify.verify(uncompressedEntries.isEmpty());
2687 
2688         SortedSet<StoredEntry> sortedEntries = Sets.newTreeSet(StoredEntry.COMPARE_BY_NAME);
2689         for (FileUseMapEntry<StoredEntry> fmEntry : entries.values()) {
2690             StoredEntry entry = fmEntry.getStore();
2691             Preconditions.checkNotNull(entry);
2692             sortedEntries.add(entry);
2693             entry.loadSourceIntoMemory();
2694 
2695             map.remove(fmEntry);
2696         }
2697 
2698         entries.clear();
2699         for (StoredEntry entry : sortedEntries) {
2700             String name = entry.getCentralDirectoryHeader().getName();
2701             FileUseMapEntry<StoredEntry> positioned =
2702                     positionInFile(entry, PositionHint.LOWEST_OFFSET);
2703 
2704             entries.put(name, positioned);
2705         }
2706 
2707         dirty = true;
2708     }
2709 
2710     /**
2711      * Obtains the filesystem path to the zip file.
2712      *
2713      * @return the file that may or may not exist (depending on whether something existed there
2714      * before the zip was created and on whether the zip has been updated or not)
2715      */
2716     @Nonnull
getFile()2717     public File getFile() {
2718         return file;
2719     }
2720 
2721     /**
2722      * Creates a new verify log.
2723      *
2724      * @return the new verify log
2725      */
2726     @Nonnull
makeVerifyLog()2727     VerifyLog makeVerifyLog() {
2728         VerifyLog log = verifyLogFactory.get();
2729         assert log != null;
2730         return log;
2731     }
2732 
2733     /**
2734      * Obtains the zip file's verify log.
2735      *
2736      * @return the verify log
2737      */
2738     @Nonnull
getVerifyLog()2739     VerifyLog getVerifyLog() {
2740         return verifyLog;
2741     }
2742 
2743     /**
2744      * Are there in-memory changes that have not been written to the zip file?
2745      *
2746      * <p>Waits for all pending processing which may make changes.
2747      */
hasPendingChangesWithWait()2748     public boolean hasPendingChangesWithWait() throws IOException {
2749         processAllReadyEntriesWithWait();
2750         return dirty;
2751     }
2752 
2753     /** Hint to where files should be positioned. */
2754     enum PositionHint {
2755         /**
2756          * File may be positioned anywhere, caller doesn't care.
2757          */
2758         ANYWHERE,
2759 
2760         /**
2761          * File should be positioned at the lowest offset possible.
2762          */
2763         LOWEST_OFFSET
2764     }
2765 }
2766