Various subtle bugfixes/improvements

Replace gnu crypt whirlpool implementation with bouncycastles, which is actually the correct version
Record compression type of archives and indexes and rewrite with the same compression
Fix bzip compression to use size 100k blocks
Fix writing index and archive revisions
This commit is contained in:
Adam
2016-06-01 10:26:00 -04:00
parent a1615c2263
commit e3cd89e72d
12 changed files with 307 additions and 135 deletions

View File

@@ -31,10 +31,10 @@
package net.runelite.cache.fs;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import net.runelite.cache.io.InputStream;
import net.runelite.cache.io.OutputStream;
public class Archive
{
@@ -44,6 +44,7 @@ public class Archive
private byte[] whirlpool;
private int crc;
private int revision;
private int compression;
private List<File> files = new ArrayList<>();
public Archive(Index index, int id)
@@ -83,7 +84,6 @@ public class Archive
{
return false;
}
// crc is of the file data, we always rewrite in one loop, so iti is different
if (this.revision != other.revision)
{
return false;
@@ -115,6 +115,106 @@ public class Archive
}
}
public void loadContents(byte[] data)
{
if (this.getFiles().size() == 1)
{
this.getFiles().get(0).setContents(data);
return;
}
int filesCount = this.getFiles().size();
InputStream stream = new InputStream(data);
stream.setOffset(stream.getLength() - 1);
int chunks = stream.readUnsignedByte();
// -1 for chunks count + one int per file slot per chunk
stream.setOffset(stream.getLength() - 1 - chunks * filesCount * 4);
int[][] chunkSizes = new int[filesCount][chunks];
int[] filesSize = new int[filesCount];
for (int chunk = 0; chunk < chunks; ++chunk)
{
int chunkSize = 0;
for (int id = 0; id < filesCount; ++id)
{
int delta = stream.readInt();
chunkSize += delta; // size of this chunk
chunkSizes[id][chunk] = chunkSize; // store size of chunk
filesSize[id] += chunkSize; // add chunk size to file size
}
}
byte[][] fileContents = new byte[filesCount][];
int[] fileOffsets = new int[filesCount];
for (int i = 0; i < filesCount; ++i)
{
fileContents[i] = new byte[filesSize[i]];
}
// the file data is at the beginning of the stream
stream.setOffset(0);
for (int chunk = 0; chunk < chunks; ++chunk)
{
for (int id = 0; id < filesCount; ++id)
{
int chunkSize = chunkSizes[id][chunk];
stream.readBytes(fileContents[id], fileOffsets[id], chunkSize);
fileOffsets[id] += chunkSize;
}
}
for (int i = 0; i < filesCount; ++i)
{
File f = this.getFiles().get(i);
f.setContents(fileContents[i]);
}
}
public byte[] saveContents()
{
OutputStream stream = new OutputStream();
int filesCount = this.getFiles().size();
if (filesCount == 1)
{
File file = this.getFiles().get(0);
stream.writeBytes(file.getContents());
}
else
{
for (File file : this.getFiles())
{
stream.writeBytes(file.getContents());
}
int offset = 0;
for (File file : this.getFiles())
{
int chunkSize = file.getSize();
int sz = chunkSize - offset;
offset = chunkSize;
stream.writeInt(sz);
}
stream.writeByte(1); // chunks
}
byte[] fileData = stream.flip();
return fileData;
}
public void loadNames(InputStream stream, int numberOfFiles)
{
for (int i = 0; i < numberOfFiles; ++i)
@@ -170,6 +270,16 @@ public class Archive
this.revision = revision;
}
public int getCompression()
{
return compression;
}
public void setCompression(int compression)
{
this.compression = compression;
}
public List<File> getFiles()
{
return files;

View File

@@ -80,7 +80,7 @@ public class DataFile implements Closeable
*/
public synchronized DataFileReadResult read(int indexId, int archiveId, int sector, int size) throws IOException
{
if (sector <= 0L || dat.length() / 520L < (long) sector)
if (sector <= 0L || dat.length() / SECTOR_SIZE < (long) sector)
{
logger.warn("bad read, dat length {}, requested sector {}", dat.length(), sector);
return null;
@@ -171,7 +171,7 @@ public class DataFile implements Closeable
//XTEA decrypt here?
return this.decompress(buffer.array());
return decompress(buffer.array());
}
public synchronized DataFileWriteResult write(int indexId, int archiveId, ByteBuffer data, int compression, int revision) throws IOException
@@ -270,12 +270,13 @@ public class DataFile implements Closeable
DataFileWriteResult res = new DataFileWriteResult();
res.sector = startSector;
res.compressedLength = compressedData.length;
res.crc = CRC32HGenerator.getHash(compressedData, compressedData.length - 2);
res.whirlpool = Whirlpool.getHash(compressedData, compressedData.length - 2);
int length = revision != -1 ? compressedData.length - 2 : compressedData.length;
res.crc = CRC32HGenerator.getHash(compressedData, length);
res.whirlpool = Whirlpool.getHash(compressedData, length);
return res;
}
private DataFileReadResult decompress(byte[] b)
public static DataFileReadResult decompress(byte[] b)
{
InputStream stream = new InputStream(b);
@@ -290,22 +291,22 @@ public class DataFile implements Closeable
{
case CompressionType.NONE:
data = new byte[compressedLength];
revision = this.checkRevision(stream, compressedLength);
revision = checkRevision(stream, compressedLength);
stream.readBytes(data, 0, compressedLength);
break;
case CompressionType.BZ2:
{
int length = stream.readInt();
revision = this.checkRevision(stream, compressedLength);
data = BZip2.decompress(stream.getRemaining());
revision = checkRevision(stream, compressedLength);
data = BZip2.decompress(stream.getRemaining(), compressedLength);
assert data.length == length;
break;
}
case CompressionType.GZ:
{
int length = stream.readInt();
revision = this.checkRevision(stream, compressedLength);
data = GZip.decompress(stream.getRemaining());
revision = checkRevision(stream, compressedLength);
data = GZip.decompress(stream.getRemaining(), compressedLength);
assert data.length == length;
break;
}
@@ -316,8 +317,10 @@ public class DataFile implements Closeable
DataFileReadResult res = new DataFileReadResult();
res.data = data;
res.revision = revision;
res.crc = CRC32HGenerator.getHash(b, b.length - 2);
res.whirlpool = Whirlpool.getHash(b, b.length - 2);
int length = revision != -1 ? b.length - 2 : b.length;
res.crc = CRC32HGenerator.getHash(b, length);
res.whirlpool = Whirlpool.getHash(b, length);
res.compression = compression;
return res;
}
@@ -349,12 +352,13 @@ public class DataFile implements Closeable
}
stream.writeBytes(compressedData);
stream.writeShort(revision);
if (revision != -1)
stream.writeShort(revision);
return stream.flip();
}
private int checkRevision(InputStream stream, int compressedLength)
private static int checkRevision(InputStream stream, int compressedLength)
{
int offset = stream.getOffset();
int revision;
@@ -362,6 +366,7 @@ public class DataFile implements Closeable
{
stream.setOffset(stream.getLength() - 2);
revision = stream.readUnsignedShort();
assert revision != -1;
stream.setOffset(offset);
}
else

View File

@@ -36,4 +36,5 @@ public class DataFileReadResult
public int revision;
public int crc; // crc of compressed data
public byte[] whirlpool;
public int compression; // compression method data was compressed with
}

View File

@@ -50,7 +50,12 @@ public class Index implements Closeable
private final Store store;
private final IndexFile index;
private final int id;
private int protocol = 7;
private boolean named = true, usesWhirpool;
private int revision;
private int crc;
private byte[] whirlpool;
private int compression; // compression method of this index's data in 255
private final List<Archive> archives = new ArrayList<>();
public Index(Store store, IndexFile index, int id)
@@ -108,6 +113,21 @@ public class Index implements Closeable
return id;
}
public int getRevision()
{
return revision;
}
public int getCrc()
{
return crc;
}
public byte[] getWhirlpool()
{
return whirlpool;
}
public IndexFile getIndex()
{
return index;
@@ -154,6 +174,11 @@ public class Index implements Closeable
archives.clear();
readIndexData(data);
this.crc = res.crc;
this.whirlpool = res.whirlpool;
this.compression = res.compression;
assert res.revision == -1;
this.loadFiles();
}
@@ -167,14 +192,17 @@ public class Index implements Closeable
DataFile dataFile = store.getData();
IndexFile index255 = store.getIndex255();
DataFileWriteResult res = dataFile.write(index255.getIndexFileId(), this.id, ByteBuffer.wrap(data), 0, this.revision);
DataFileWriteResult res = dataFile.write(index255.getIndexFileId(), this.id, ByteBuffer.wrap(data), this.compression, -1); // index data revision is always -1
index255.write(new IndexEntry(index255, id, res.sector, res.compressedLength));
this.crc = res.crc;
this.whirlpool = res.whirlpool;
}
private void readIndexData(byte[] data)
public void readIndexData(byte[] data)
{
InputStream stream = new InputStream(data);
int protocol = stream.readUnsignedByte();
protocol = stream.readUnsignedByte();
if (protocol >= 5 && protocol <= 7)
{
if (protocol >= 6)
@@ -183,8 +211,9 @@ public class Index implements Closeable
}
int hash = stream.readUnsignedByte();
boolean named = (1 & hash) != 0;
boolean usesWhirpool = (2 & hash) != 0;
named = (1 & hash) != 0;
usesWhirpool = (2 & hash) != 0;
assert (hash & ~3) == 0;
int validArchivesCount = protocol >= 7 ? stream.readBigSmart() : stream.readUnsignedShort();
int lastArchiveId = 0;
@@ -288,65 +317,14 @@ public class Index implements Closeable
logger.warn("whirlpool mismatch for archive {}", a);
}
if (a.getFiles().size() == 1)
if (a.getRevision() != res.revision)
{
a.getFiles().get(0).setContents(data);
continue;
logger.warn("revision mismatch for archive {}", a);
}
a.setCompression(res.compression);
final int filesCount = a.getFiles().size();
int readPosition = data.length;
--readPosition;
int amtOfLoops = data[readPosition] & 255;
readPosition -= amtOfLoops * filesCount * 4;
InputStream stream = new InputStream(data);
stream.setOffset(readPosition);
int[] filesSize = new int[filesCount];
int sourceOffset;
int count;
for (int filesData = 0; filesData < amtOfLoops; ++filesData)
{
sourceOffset = 0;
for (count = 0; count < filesCount; ++count)
{
filesSize[count] += sourceOffset += stream.readInt();
}
}
byte[][] var18 = new byte[filesCount][];
for (sourceOffset = 0; sourceOffset < filesCount; ++sourceOffset)
{
var18[sourceOffset] = new byte[filesSize[sourceOffset]];
filesSize[sourceOffset] = 0;
}
stream.setOffset(readPosition);
sourceOffset = 0;
int fileId;
int i;
for (count = 0; count < amtOfLoops; ++count)
{
fileId = 0;
for (i = 0; i < filesCount; ++i)
{
fileId += stream.readInt();
System.arraycopy(data, sourceOffset, var18[i], filesSize[i], fileId);
sourceOffset += fileId;
filesSize[i] += fileId;
}
}
for (i = 0; i < filesCount; ++i)
{
File f = a.getFiles().get(i);
f.setContents(var18[i]);
}
a.loadContents(data);
}
}
@@ -354,43 +332,12 @@ public class Index implements Closeable
{
for (Archive a : archives)
{
OutputStream stream = new OutputStream();
int sourceOffset = 0;
final int filesCount = a.getFiles().size();
if (filesCount == 1)
{
File file = a.getFiles().get(0);
stream.writeBytes(file.getContents());
}
else
{
for (int i = 0; i < filesCount; ++i)
{
File file = a.getFiles().get(i);
stream.writeBytes(file.getContents());
}
for (int count = 0; count < filesCount; ++count)
{
File file = a.getFiles().get(count);
int sz = file.getSize() - sourceOffset;
sourceOffset = file.getSize();
stream.writeInt(sz);
}
stream.writeByte(1); // number of loops
}
byte[] fileData = stream.flip();
byte[] fileData = a.saveContents();
assert this.index.getIndexFileId() == this.id;
DataFile data = store.getData();
// XXX old data is just left there in the file?
DataFileWriteResult res = data.write(this.id, a.getArchiveId(), ByteBuffer.wrap(fileData), 0, this.revision);
DataFileWriteResult res = data.write(this.id, a.getArchiveId(), ByteBuffer.wrap(fileData), a.getCompression(), a.getRevision());
this.index.write(new IndexEntry(this.index, a.getArchiveId(), res.sector, res.compressedLength));
a.setCrc(res.crc);
@@ -401,14 +348,12 @@ public class Index implements Closeable
public byte[] writeIndexData()
{
OutputStream stream = new OutputStream();
int protocol = 7;//this.getProtocol();
stream.writeByte(protocol);
if (protocol >= 6)
{
stream.writeInt(this.revision);
}
boolean named = true, usesWhirpool = false;
stream.writeByte((named ? 1 : 0) | (usesWhirpool ? 2 : 0));
if (protocol >= 7)
{

View File

@@ -98,7 +98,7 @@ public class Store implements Closeable
return true;
}
public final Index addIndex(int id) throws FileNotFoundException
public Index addIndex(int id) throws FileNotFoundException
{
for (Index i : indexes)
if (i.getIndex().getIndexFileId() == id)
@@ -111,6 +111,12 @@ public class Store implements Closeable
return index;
}
public void removeIndex(Index index)
{
assert indexes.contains(index);
indexes.remove(index);
}
public void load() throws IOException
{
@@ -150,4 +156,12 @@ public class Store implements Closeable
{
return indexes.get(type.getNumber());
}
public Index findIndex(int id)
{
for (Index i : indexes)
if (i.getId() == id)
return i;
return null;
}
}

View File

@@ -58,12 +58,18 @@ public class BZip2
{
InputStream is = new ByteArrayInputStream(bytes);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
try (OutputStream os = new BZip2CompressorOutputStream(bout))
try (OutputStream os = new BZip2CompressorOutputStream(bout, 1))
{
IOUtils.copy(is, os);
}
byte[] out = bout.toByteArray();
assert BZIP_HEADER[0] == out[0];
assert BZIP_HEADER[1] == out[1];
assert BZIP_HEADER[2] == out[2];
assert BZIP_HEADER[3] == out[3];
return Arrays.copyOfRange(out, BZIP_HEADER.length, out.length); // remove header..
}
catch (IOException ex)
@@ -73,15 +79,15 @@ public class BZip2
}
}
public static byte[] decompress(byte[] bytes)
public static byte[] decompress(byte[] bytes, int len)
{
try
{
byte[] data = new byte[bytes.length + BZIP_HEADER.length];
byte[] data = new byte[len + BZIP_HEADER.length];
// add header
System.arraycopy(BZIP_HEADER, 0, data, 0, BZIP_HEADER.length);
System.arraycopy(bytes, 0, data, BZIP_HEADER.length, bytes.length);
System.arraycopy(bytes, 0, data, BZIP_HEADER.length, len);
ByteArrayOutputStream os = new ByteArrayOutputStream();

View File

@@ -63,11 +63,11 @@ public class GZip
return bout.toByteArray();
}
public static byte[] decompress(byte[] bytes)
public static byte[] decompress(byte[] bytes, int len)
{
ByteArrayOutputStream os = new ByteArrayOutputStream();
try (InputStream is = new GZIPInputStream(new ByteArrayInputStream(bytes)))
try (InputStream is = new GZIPInputStream(new ByteArrayInputStream(bytes, 0, len)))
{
IOUtils.copy(is, os);
}

View File

@@ -27,22 +27,34 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.cache.fs.util;
public class Whirlpool {
private static gnu.crypto.hash.Whirlpool whirlpool = new gnu.crypto.hash.Whirlpool();
public static synchronized byte[] getHash(byte[] data, int len)
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.security.Security;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
public class Whirlpool
{
private static MessageDigest messageDigest;
static
{
whirlpool.update(data, 0, len);
Security.addProvider(new BouncyCastleProvider());
try
{
return whirlpool.digest();
messageDigest = MessageDigest.getInstance("Whirlpool");
}
finally
catch (NoSuchAlgorithmException ex)
{
whirlpool.reset();
throw new RuntimeException(ex);
}
}
public static synchronized byte[] getHash(byte[] data, int len)
{
messageDigest.update(data, 0, len);
return messageDigest.digest();
}
}

View File

@@ -75,8 +75,13 @@ public final class OutputStream extends java.io.OutputStream
public void writeBytes(byte[] b)
{
ensureRemaining(b.length);
buffer.put(b);
writeBytes(b, 0, b.length);
}
public void writeBytes(byte[] b, int offset, int length)
{
ensureRemaining(length);
buffer.put(b, offset, length);
}
public void writeByte(int i)

View File

@@ -68,7 +68,7 @@ public class DataFileTest
File file = folder.newFile();
Store store = new Store(folder.getRoot());
DataFile df = new DataFile(store, file);
DataFileWriteResult res = df.write(42, 0x1FFFF, ByteBuffer.wrap(b), CompressionType.NONE, 0);
DataFileWriteResult res = df.write(42, 0x1FFFF, ByteBuffer.wrap(b), CompressionType.BZ2, 42);
DataFileReadResult res2 = df.read(42, 0x1FFFF, res.sector, res.compressedLength);
byte[] buf = res2.data;
Assert.assertArrayEquals(b, buf);
@@ -95,11 +95,27 @@ public class DataFileTest
try (Store store = new Store(folder.getRoot()))
{
DataFile df = new DataFile(store, folder.newFile());
DataFileWriteResult res = df.write(41, 4, ByteBuffer.wrap("test".getBytes()), CompressionType.BZ2, 0);
DataFileWriteResult res = df.write(41, 4, ByteBuffer.wrap("test".getBytes()), CompressionType.BZ2, 5);
DataFileReadResult res2 = df.read(41, 4, res.sector, res.compressedLength);
byte[] buf = res2.data;
String str = new String(buf);
Assert.assertEquals("test", str);
}
}
@Test
public void testCrc() throws IOException
{
File file = folder.newFile();
Store store = new Store(folder.getRoot());
DataFile df = new DataFile(store, file);
DataFileWriteResult res = df.write(42, 3, ByteBuffer.wrap("test".getBytes()), CompressionType.NONE, 42);
DataFileReadResult res2 = df.read(42, 3, res.sector, res.compressedLength);
byte[] buf = res2.data;
String str = new String(buf);
Assert.assertEquals("test", str);
Assert.assertEquals(res.crc, res2.crc);
Assert.assertEquals(42, res2.revision);
file.delete();
}
}

View File

@@ -0,0 +1,58 @@
/*
* Copyright (c) 2016, Adam <Adam@sigterm.info>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Adam <Adam@sigterm.info>
* 4. Neither the name of the Adam <Adam@sigterm.info> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Adam <Adam@sigterm.info> ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Adam <Adam@sigterm.info> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.cache.fs.util;
import org.junit.Assert;
import org.junit.Test;
public class WhirlpoolTest
{
private static final byte[] result =
{
92, -33, 60, 4, -28, 24, 54, -39,
-11, -85, -123, -74, 6, -107, 32, 36,
108, 104, -82, 108, 36, -53, -95, 123,
-84, -86, -13, 107, -110, 27, 35, -78,
-60, -122, 36, 56, 86, 73, -9, -70,
-35, 58, -43, 82, -36, -53, -107, -9,
-21, 6, -43, 14, 109, -26, -115, 67,
64, 116, 107, 18, 12, 46, -64, 63
};
@Test
public void testGetHash()
{
byte[] data = "runelite".getBytes();
byte[] out = Whirlpool.getHash(data, data.length);
Assert.assertArrayEquals(out, result);
}
}