cache: split archive file processing out from archive

This commit is contained in:
Adam
2017-09-11 16:05:11 -04:00
parent 2cc0eb4766
commit 218333e97c
3 changed files with 198 additions and 121 deletions

View File

@@ -27,12 +27,9 @@ package net.runelite.cache.fs;
import net.runelite.cache.fs.jagex.DataFile;
import net.runelite.cache.fs.jagex.DataFileReadResult;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import net.runelite.cache.io.InputStream;
import net.runelite.cache.io.OutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -51,7 +48,7 @@ public class Archive
private int revision;
private int compression;
private final List<FSFile> files = new ArrayList<>();
private final ArchiveFiles files = new ArchiveFiles();
public Archive(Index index, int id)
{
@@ -118,11 +115,16 @@ public class Archive
public FSFile addFile(int id)
{
FSFile file = new FSFile(this, id);
this.files.add(file);
FSFile file = new FSFile(id);
this.files.addFile(file);
return file;
}
public FSFile findFile(int id)
{
return this.files.findFile(id);
}
public void decompressAndLoad(int[] keys) throws IOException
{
byte[] encryptedData = this.getData();
@@ -163,116 +165,13 @@ public class Archive
setCompression(res.compression);
loadContents(decompressedData);
files.loadContents(decompressedData);
this.setData(null); // now that we've loaded it, clean it so it doesn't get written back
}
public void loadContents(byte[] data)
{
logger.trace("Loading contents of archive {} ({} files)", archiveId, files.size());
assert !this.getFiles().isEmpty();
if (this.getFiles().size() == 1)
{
this.getFiles().get(0).setContents(data);
return;
}
int filesCount = this.getFiles().size();
InputStream stream = new InputStream(data);
stream.setOffset(stream.getLength() - 1);
int chunks = stream.readUnsignedByte();
// -1 for chunks count + one int per file slot per chunk
stream.setOffset(stream.getLength() - 1 - chunks * filesCount * 4);
int[][] chunkSizes = new int[filesCount][chunks];
int[] filesSize = new int[filesCount];
for (int chunk = 0; chunk < chunks; ++chunk)
{
int chunkSize = 0;
for (int id = 0; id < filesCount; ++id)
{
int delta = stream.readInt();
chunkSize += delta; // size of this chunk
chunkSizes[id][chunk] = chunkSize; // store size of chunk
filesSize[id] += chunkSize; // add chunk size to file size
}
}
byte[][] fileContents = new byte[filesCount][];
int[] fileOffsets = new int[filesCount];
for (int i = 0; i < filesCount; ++i)
{
fileContents[i] = new byte[filesSize[i]];
}
// the file data is at the beginning of the stream
stream.setOffset(0);
for (int chunk = 0; chunk < chunks; ++chunk)
{
for (int id = 0; id < filesCount; ++id)
{
int chunkSize = chunkSizes[id][chunk];
stream.readBytes(fileContents[id], fileOffsets[id], chunkSize);
fileOffsets[id] += chunkSize;
}
}
for (int i = 0; i < filesCount; ++i)
{
FSFile f = this.getFiles().get(i);
f.setContents(fileContents[i]);
}
}
public byte[] saveContents()
{
OutputStream stream = new OutputStream();
int filesCount = this.getFiles().size();
if (filesCount == 1)
{
FSFile file = this.getFiles().get(0);
stream.writeBytes(file.getContents());
}
else
{
for (FSFile file : this.getFiles())
{
byte[] contents = file.getContents();
stream.writeBytes(contents);
}
int offset = 0;
for (FSFile file : this.getFiles())
{
int chunkSize = file.getSize();
int sz = chunkSize - offset;
offset = chunkSize;
stream.writeInt(sz);
}
stream.writeByte(1); // chunks
}
byte[] fileData = stream.flip();
logger.trace("Saved contents of archive {}/{} ({} files), {} bytes", index.getId(), archiveId, files.size(), fileData.length);
return fileData;
return files.saveContents();
}
public int getArchiveId()
@@ -332,6 +231,11 @@ public class Archive
public List<FSFile> getFiles()
{
return files;
return files.getFiles();
}
public void clearFiles()
{
files.clear();
}
}

View File

@@ -0,0 +1,180 @@
/*
* Copyright (c) 2016-2017, Adam <Adam@sigterm.info>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.cache.fs;
import com.google.common.base.Preconditions;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import net.runelite.cache.io.InputStream;
import net.runelite.cache.io.OutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ArchiveFiles
{
private static final Logger logger = LoggerFactory.getLogger(ArchiveFiles.class);
private final List<FSFile> files = new ArrayList<>();
private final Map<Integer, FSFile> fileMap = new HashMap<>();
public void addFile(FSFile file)
{
Preconditions.checkArgument(file.getFileId() != -1);
if (fileMap.containsKey(file.getFileId()))
{
throw new IllegalStateException("duplicate file ids");
}
files.add(file);
fileMap.put(file.getFileId(), file);
}
public List<FSFile> getFiles()
{
return Collections.unmodifiableList(files);
}
public FSFile findFile(int fileId)
{
return fileMap.get(fileId);
}
public void clear()
{
files.clear();
fileMap.clear();
}
public void loadContents(byte[] data)
{
logger.trace("Loading contents of archive ({} files)", files.size());
assert !this.getFiles().isEmpty();
if (this.getFiles().size() == 1)
{
this.getFiles().get(0).setContents(data);
return;
}
int filesCount = this.getFiles().size();
InputStream stream = new InputStream(data);
stream.setOffset(stream.getLength() - 1);
int chunks = stream.readUnsignedByte();
// -1 for chunks count + one int per file slot per chunk
stream.setOffset(stream.getLength() - 1 - chunks * filesCount * 4);
int[][] chunkSizes = new int[filesCount][chunks];
int[] filesSize = new int[filesCount];
for (int chunk = 0; chunk < chunks; ++chunk)
{
int chunkSize = 0;
for (int id = 0; id < filesCount; ++id)
{
int delta = stream.readInt();
chunkSize += delta; // size of this chunk
chunkSizes[id][chunk] = chunkSize; // store size of chunk
filesSize[id] += chunkSize; // add chunk size to file size
}
}
byte[][] fileContents = new byte[filesCount][];
int[] fileOffsets = new int[filesCount];
for (int i = 0; i < filesCount; ++i)
{
fileContents[i] = new byte[filesSize[i]];
}
// the file data is at the beginning of the stream
stream.setOffset(0);
for (int chunk = 0; chunk < chunks; ++chunk)
{
for (int id = 0; id < filesCount; ++id)
{
int chunkSize = chunkSizes[id][chunk];
stream.readBytes(fileContents[id], fileOffsets[id], chunkSize);
fileOffsets[id] += chunkSize;
}
}
for (int i = 0; i < filesCount; ++i)
{
FSFile f = this.getFiles().get(i);
f.setContents(fileContents[i]);
}
}
public byte[] saveContents()
{
OutputStream stream = new OutputStream();
int filesCount = this.getFiles().size();
if (filesCount == 1)
{
FSFile file = this.getFiles().get(0);
stream.writeBytes(file.getContents());
}
else
{
for (FSFile file : this.getFiles())
{
byte[] contents = file.getContents();
stream.writeBytes(contents);
}
int offset = 0;
for (FSFile file : this.getFiles())
{
int chunkSize = file.getSize();
int sz = chunkSize - offset;
offset = chunkSize;
stream.writeInt(sz);
}
stream.writeByte(1); // chunks
}
byte[] fileData = stream.flip();
logger.trace("Saved contents of archive ({} files), {} bytes", files.size(), fileData.length);
return fileData;
}
}

View File

@@ -29,14 +29,12 @@ import java.util.Arrays;
public class FSFile
{
private Archive archive;
private int fileId;
private final int fileId;
private int nameHash;
private byte[] contents;
public FSFile(Archive archive, int fileId)
public FSFile(int fileId)
{
this.archive = archive;
this.fileId = fileId;
}
@@ -77,11 +75,6 @@ public class FSFile
return true;
}
public Archive getArchive()
{
return archive;
}
public int getFileId()
{
return fileId;