cache: split loading archive files from archive loading
Also no longer store archive contents in memory and instead read it from storage on demand.
This commit is contained in:
@@ -24,6 +24,7 @@
|
||||
*/
|
||||
package net.runelite.cache;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
@@ -31,8 +32,10 @@ import java.util.Map;
|
||||
import net.runelite.cache.definitions.AreaDefinition;
|
||||
import net.runelite.cache.definitions.loaders.AreaLoader;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.ArchiveFiles;
|
||||
import net.runelite.cache.fs.FSFile;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
|
||||
public class AreaManager
|
||||
@@ -45,12 +48,16 @@ public class AreaManager
|
||||
this.store = store;
|
||||
}
|
||||
|
||||
public void load()
|
||||
public void load() throws IOException
|
||||
{
|
||||
Storage storage = store.getStorage();
|
||||
Index index = store.getIndex(IndexType.CONFIGS);
|
||||
Archive archive = index.getArchive(ConfigType.AREA.getId());
|
||||
|
||||
for (FSFile file : archive.getFiles())
|
||||
byte[] archiveData = storage.loadArchive(archive);
|
||||
ArchiveFiles files = archive.getFiles(archiveData);
|
||||
|
||||
for (FSFile file : files.getFiles())
|
||||
{
|
||||
AreaLoader loader = new AreaLoader();
|
||||
AreaDefinition area = loader.load(file.getContents(), file.getFileId());
|
||||
|
||||
@@ -33,8 +33,10 @@ import net.runelite.cache.definitions.InterfaceDefinition;
|
||||
import net.runelite.cache.definitions.exporters.InterfaceExporter;
|
||||
import net.runelite.cache.definitions.loaders.InterfaceLoader;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.ArchiveFiles;
|
||||
import net.runelite.cache.fs.FSFile;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
import net.runelite.cache.util.Namer;
|
||||
|
||||
@@ -49,17 +51,20 @@ public class InterfaceManager
|
||||
this.store = store;
|
||||
}
|
||||
|
||||
public void load()
|
||||
public void load() throws IOException
|
||||
{
|
||||
InterfaceLoader loader = new InterfaceLoader();
|
||||
|
||||
Storage storage = store.getStorage();
|
||||
Index index = store.getIndex(IndexType.INTERFACES);
|
||||
|
||||
for (Archive archive : index.getArchives())
|
||||
{
|
||||
int archiveId = archive.getArchiveId();
|
||||
byte[] archiveData = storage.loadArchive(archive);
|
||||
ArchiveFiles files = archive.getFiles(archiveData);
|
||||
|
||||
for (FSFile file : archive.getFiles())
|
||||
for (FSFile file : files.getFiles())
|
||||
{
|
||||
int fileId = file.getFileId();
|
||||
|
||||
|
||||
@@ -33,8 +33,10 @@ import net.runelite.cache.definitions.ItemDefinition;
|
||||
import net.runelite.cache.definitions.exporters.ItemExporter;
|
||||
import net.runelite.cache.definitions.loaders.ItemLoader;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.ArchiveFiles;
|
||||
import net.runelite.cache.fs.FSFile;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
import net.runelite.cache.util.Namer;
|
||||
|
||||
@@ -49,14 +51,18 @@ public class ItemManager
|
||||
this.store = store;
|
||||
}
|
||||
|
||||
public void load()
|
||||
public void load() throws IOException
|
||||
{
|
||||
ItemLoader loader = new ItemLoader();
|
||||
|
||||
Storage storage = store.getStorage();
|
||||
Index index = store.getIndex(IndexType.CONFIGS);
|
||||
Archive archive = index.getArchive(ConfigType.ITEM.getId());
|
||||
|
||||
for (FSFile f : archive.getFiles())
|
||||
byte[] archiveData = storage.loadArchive(archive);
|
||||
ArchiveFiles files = archive.getFiles(archiveData);
|
||||
|
||||
for (FSFile f : files.getFiles())
|
||||
{
|
||||
ItemDefinition def = loader.load(f.getFileId(), f.getContents());
|
||||
items.add(def);
|
||||
@@ -75,7 +81,7 @@ public class ItemManager
|
||||
for (ItemDefinition def : items)
|
||||
{
|
||||
ItemExporter exporter = new ItemExporter(def);
|
||||
|
||||
|
||||
File targ = new File(out, def.id + ".json");
|
||||
exporter.exportTo(targ);
|
||||
}
|
||||
|
||||
@@ -39,14 +39,15 @@ import net.runelite.cache.definitions.OverlayDefinition;
|
||||
import net.runelite.cache.definitions.SpriteDefinition;
|
||||
import net.runelite.cache.definitions.TextureDefinition;
|
||||
import net.runelite.cache.definitions.UnderlayDefinition;
|
||||
import net.runelite.cache.definitions.loaders.ObjectLoader;
|
||||
import net.runelite.cache.definitions.loaders.OverlayLoader;
|
||||
import net.runelite.cache.definitions.loaders.SpriteLoader;
|
||||
import net.runelite.cache.definitions.loaders.TextureLoader;
|
||||
import net.runelite.cache.definitions.loaders.UnderlayLoader;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.ArchiveFiles;
|
||||
import net.runelite.cache.fs.FSFile;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
import net.runelite.cache.region.Location;
|
||||
import net.runelite.cache.region.Region;
|
||||
@@ -367,12 +368,16 @@ public class MapImageDumper
|
||||
logger.info("East most region: {}", regionLoader.getHighestX().getBaseX());
|
||||
}
|
||||
|
||||
private void loadUnderlays(Store store)
|
||||
private void loadUnderlays(Store store) throws IOException
|
||||
{
|
||||
Storage storage = store.getStorage();
|
||||
Index index = store.getIndex(IndexType.CONFIGS);
|
||||
Archive archive = index.getArchive(ConfigType.UNDERLAY.getId());
|
||||
|
||||
for (FSFile file : archive.getFiles())
|
||||
byte[] archiveData = storage.loadArchive(archive);
|
||||
ArchiveFiles files = archive.getFiles(archiveData);
|
||||
|
||||
for (FSFile file : files.getFiles())
|
||||
{
|
||||
UnderlayLoader loader = new UnderlayLoader();
|
||||
UnderlayDefinition underlay = loader.load(file.getFileId(), file.getContents());
|
||||
@@ -393,12 +398,16 @@ public class MapImageDumper
|
||||
return null;
|
||||
}
|
||||
|
||||
private void loadOverlays(Store store)
|
||||
private void loadOverlays(Store store) throws IOException
|
||||
{
|
||||
Storage storage = store.getStorage();
|
||||
Index index = store.getIndex(IndexType.CONFIGS);
|
||||
Archive archive = index.getArchive(ConfigType.OVERLAY.getId());
|
||||
|
||||
for (FSFile file : archive.getFiles())
|
||||
byte[] archiveData = storage.loadArchive(archive);
|
||||
ArchiveFiles files = archive.getFiles(archiveData);
|
||||
|
||||
for (FSFile file : files.getFiles())
|
||||
{
|
||||
OverlayLoader loader = new OverlayLoader();
|
||||
OverlayDefinition underlay = loader.load(file.getFileId(), file.getContents());
|
||||
@@ -419,12 +428,16 @@ public class MapImageDumper
|
||||
return null;
|
||||
}
|
||||
|
||||
private void loadTextures(Store store)
|
||||
private void loadTextures(Store store) throws IOException
|
||||
{
|
||||
Storage storage = store.getStorage();
|
||||
Index index = store.getIndex(IndexType.TEXTURES);
|
||||
Archive archive = index.getArchive(0);
|
||||
|
||||
for (FSFile file : archive.getFiles())
|
||||
byte[] archiveData = storage.loadArchive(archive);
|
||||
ArchiveFiles files = archive.getFiles(archiveData);
|
||||
|
||||
for (FSFile file : files.getFiles())
|
||||
{
|
||||
TextureLoader loader = new TextureLoader();
|
||||
TextureDefinition texture = loader.load(file.getFileId(), file.getContents());
|
||||
@@ -446,19 +459,15 @@ public class MapImageDumper
|
||||
return null;
|
||||
}
|
||||
|
||||
private void loadSprites()
|
||||
private void loadSprites() throws IOException
|
||||
{
|
||||
Storage storage = store.getStorage();
|
||||
Index index = store.getIndex(IndexType.SPRITES);
|
||||
final int mapsceneHash = Djb2.hash("mapscene");
|
||||
|
||||
for (Archive a : index.getArchives())
|
||||
{
|
||||
List<FSFile> files = a.getFiles();
|
||||
|
||||
assert files.size() == 1;
|
||||
|
||||
FSFile file = files.get(0);
|
||||
byte[] contents = file.getContents();
|
||||
byte[] contents = a.decompress(storage.loadArchive(a));
|
||||
|
||||
SpriteLoader loader = new SpriteLoader();
|
||||
SpriteDefinition[] sprites = loader.load(a.getArchiveId(), contents);
|
||||
@@ -515,16 +524,12 @@ public class MapImageDumper
|
||||
return color.getRGB();
|
||||
}
|
||||
|
||||
private void loadObjects(Store store)
|
||||
private void loadObjects(Store store) throws IOException
|
||||
{
|
||||
Index index = store.getIndex(IndexType.CONFIGS);
|
||||
Archive archive = index.getArchive(ConfigType.OBJECT.getId());
|
||||
|
||||
ObjectLoader loader = new ObjectLoader();
|
||||
|
||||
for (FSFile f : archive.getFiles())
|
||||
ObjectManager manager = new ObjectManager(store);
|
||||
manager.load();
|
||||
for (ObjectDefinition def : manager.getObjects())
|
||||
{
|
||||
ObjectDefinition def = loader.load(f.getFileId(), f.getContents());
|
||||
objects.put(def.getId(), def);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,8 +33,10 @@ import net.runelite.cache.definitions.NpcDefinition;
|
||||
import net.runelite.cache.definitions.exporters.NpcExporter;
|
||||
import net.runelite.cache.definitions.loaders.NpcLoader;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.ArchiveFiles;
|
||||
import net.runelite.cache.fs.FSFile;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
import net.runelite.cache.util.Namer;
|
||||
|
||||
@@ -53,10 +55,14 @@ public class NpcManager
|
||||
{
|
||||
NpcLoader loader = new NpcLoader();
|
||||
|
||||
Storage storage = store.getStorage();
|
||||
Index index = store.getIndex(IndexType.CONFIGS);
|
||||
Archive archive = index.getArchive(ConfigType.NPC.getId());
|
||||
|
||||
for (FSFile f : archive.getFiles())
|
||||
byte[] archiveData = storage.loadArchive(archive);
|
||||
ArchiveFiles files = archive.getFiles(archiveData);
|
||||
|
||||
for (FSFile f : files.getFiles())
|
||||
{
|
||||
NpcDefinition npc = loader.load(f.getFileId(), f.getContents());
|
||||
npcs.add(npc);
|
||||
|
||||
@@ -33,8 +33,10 @@ import net.runelite.cache.definitions.ObjectDefinition;
|
||||
import net.runelite.cache.definitions.exporters.ObjectExporter;
|
||||
import net.runelite.cache.definitions.loaders.ObjectLoader;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.ArchiveFiles;
|
||||
import net.runelite.cache.fs.FSFile;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
import net.runelite.cache.util.Namer;
|
||||
|
||||
@@ -53,10 +55,14 @@ public class ObjectManager
|
||||
{
|
||||
ObjectLoader loader = new ObjectLoader();
|
||||
|
||||
Storage storage = store.getStorage();
|
||||
Index index = store.getIndex(IndexType.CONFIGS);
|
||||
Archive archive = index.getArchive(ConfigType.OBJECT.getId());
|
||||
|
||||
for (FSFile f : archive.getFiles())
|
||||
byte[] archiveData = storage.loadArchive(archive);
|
||||
ArchiveFiles files = archive.getFiles(archiveData);
|
||||
|
||||
for (FSFile f : files.getFiles())
|
||||
{
|
||||
ObjectDefinition def = loader.load(f.getFileId(), f.getContents());
|
||||
objects.add(def);
|
||||
|
||||
@@ -31,13 +31,12 @@ import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import net.runelite.cache.definitions.SpriteDefinition;
|
||||
import net.runelite.cache.definitions.exporters.SpriteExporter;
|
||||
import net.runelite.cache.definitions.loaders.SpriteLoader;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.FSFile;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
|
||||
public class SpriteManager
|
||||
@@ -50,18 +49,14 @@ public class SpriteManager
|
||||
this.store = store;
|
||||
}
|
||||
|
||||
public void load()
|
||||
public void load() throws IOException
|
||||
{
|
||||
Storage storage = store.getStorage();
|
||||
Index index = store.getIndex(IndexType.SPRITES);
|
||||
|
||||
for (Archive a : index.getArchives())
|
||||
{
|
||||
List<FSFile> files = a.getFiles();
|
||||
|
||||
assert files.size() == 1;
|
||||
|
||||
FSFile file = files.get(0);
|
||||
byte[] contents = file.getContents();
|
||||
byte[] contents = a.decompress(storage.loadArchive(a));
|
||||
|
||||
SpriteLoader loader = new SpriteLoader();
|
||||
SpriteDefinition[] defs = loader.load(a.getArchiveId(), contents);
|
||||
|
||||
@@ -24,13 +24,16 @@
|
||||
*/
|
||||
package net.runelite.cache;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import net.runelite.cache.definitions.TextureDefinition;
|
||||
import net.runelite.cache.definitions.loaders.TextureLoader;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.ArchiveFiles;
|
||||
import net.runelite.cache.fs.FSFile;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
|
||||
public class TextureManager
|
||||
@@ -43,14 +46,18 @@ public class TextureManager
|
||||
this.store = store;
|
||||
}
|
||||
|
||||
public void load()
|
||||
public void load() throws IOException
|
||||
{
|
||||
Storage storage = store.getStorage();
|
||||
Index index = store.getIndex(IndexType.TEXTURES);
|
||||
Archive archive = index.getArchive(0);
|
||||
|
||||
byte[] archiveData = storage.loadArchive(archive);
|
||||
ArchiveFiles files = archive.getFiles(archiveData);
|
||||
|
||||
TextureLoader loader = new TextureLoader();
|
||||
|
||||
for (FSFile file : archive.getFiles())
|
||||
for (FSFile file : files.getFiles())
|
||||
{
|
||||
TextureDefinition texture = loader.load(file.getFileId(), file.getContents());
|
||||
textures.add(texture);
|
||||
|
||||
@@ -44,11 +44,10 @@ import java.util.List;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.FSFile;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
import net.runelite.cache.index.ArchiveData;
|
||||
import net.runelite.cache.index.FileData;
|
||||
import net.runelite.cache.index.IndexData;
|
||||
import net.runelite.cache.protocol.decoders.HandshakeResponseDecoder;
|
||||
import net.runelite.cache.protocol.encoders.ArchiveRequestEncoder;
|
||||
@@ -275,7 +274,7 @@ public class CacheClient implements AutoCloseable
|
||||
&& existing.getCrc() == ad.getCrc()
|
||||
&& existing.getNameHash() == ad.getNameHash())
|
||||
{
|
||||
logger.info("Archive {}/{} in index {} is up to date",
|
||||
logger.debug("Archive {}/{} in index {} is up to date",
|
||||
ad.getId(), indexData.getArchives().length, index.getId());
|
||||
continue;
|
||||
}
|
||||
@@ -310,13 +309,7 @@ public class CacheClient implements AutoCloseable
|
||||
archive.setNameHash(ad.getNameHash());
|
||||
|
||||
// Add files
|
||||
archive.clearFiles();
|
||||
for (FileData fd : ad.getFiles())
|
||||
{
|
||||
FSFile file = new FSFile(fd.getId());
|
||||
file.setNameHash(fd.getNameHash());
|
||||
archive.addFile(file);
|
||||
}
|
||||
archive.setFileData(ad.getFiles());
|
||||
|
||||
CompletableFuture<FileResult> future = requestFile(index.getId(), ad.getId(), false);
|
||||
future.handle((fr, ex) ->
|
||||
@@ -332,13 +325,24 @@ public class CacheClient implements AutoCloseable
|
||||
logger.warn("crc mismatch on downloaded archive {}/{}: {} != {}",
|
||||
archive.getIndex().getId(), archive.getArchiveId(),
|
||||
hash, archive.getCrc());
|
||||
throw new RuntimeException("crc mismatch");
|
||||
}
|
||||
|
||||
archive.setData(data);
|
||||
|
||||
if (watcher != null)
|
||||
{
|
||||
watcher.downloadComplete(archive);
|
||||
watcher.downloadComplete(archive, data);
|
||||
}
|
||||
else
|
||||
{
|
||||
try
|
||||
{
|
||||
Storage storage = store.getStorage();
|
||||
storage.saveArchive(archive, data);
|
||||
}
|
||||
catch (IOException ex1)
|
||||
{
|
||||
logger.warn("unable to save archive data", ex1);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
@@ -29,5 +29,5 @@ import net.runelite.cache.fs.Archive;
|
||||
@FunctionalInterface
|
||||
public interface DownloadWatcher
|
||||
{
|
||||
void downloadComplete(Archive archive);
|
||||
void downloadComplete(Archive archive, byte[] data);
|
||||
}
|
||||
|
||||
@@ -25,8 +25,7 @@
|
||||
package net.runelite.cache.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import net.runelite.cache.fs.jagex.DataFile;
|
||||
import net.runelite.cache.fs.jagex.DataFileReadResult;
|
||||
import net.runelite.cache.fs.Container;
|
||||
|
||||
public class FileResult
|
||||
{
|
||||
@@ -63,7 +62,7 @@ public class FileResult
|
||||
|
||||
public void decompress(int[] keys) throws IOException
|
||||
{
|
||||
DataFileReadResult res = DataFile.decompress(compressedData, keys);
|
||||
Container res = Container.decompress(compressedData, keys);
|
||||
|
||||
contents = res.data;
|
||||
revision = res.revision;
|
||||
|
||||
@@ -24,11 +24,8 @@
|
||||
*/
|
||||
package net.runelite.cache.fs;
|
||||
|
||||
import net.runelite.cache.fs.jagex.DataFile;
|
||||
import net.runelite.cache.fs.jagex.DataFileReadResult;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import net.runelite.cache.index.FileData;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@@ -38,15 +35,13 @@ public class Archive
|
||||
|
||||
private final Index index; // member of this index
|
||||
|
||||
private byte[] data; // raw data from the datafile, compressed/encrypted
|
||||
|
||||
private final int archiveId;
|
||||
private int nameHash;
|
||||
private int crc;
|
||||
private int revision;
|
||||
private int compression;
|
||||
|
||||
private final ArchiveFiles files = new ArchiveFiles();
|
||||
private FileData[] fileData;
|
||||
private byte[] hash; // used by webservice, sha256 hash of content
|
||||
|
||||
public Archive(Index index, int id)
|
||||
{
|
||||
@@ -61,7 +56,6 @@ public class Archive
|
||||
hash = 47 * hash + this.archiveId;
|
||||
hash = 47 * hash + this.nameHash;
|
||||
hash = 47 * hash + this.revision;
|
||||
hash = 47 * hash + Objects.hashCode(this.files);
|
||||
return hash;
|
||||
}
|
||||
|
||||
@@ -89,10 +83,6 @@ public class Archive
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if (!Objects.equals(this.files, other.files))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -101,68 +91,70 @@ public class Archive
|
||||
return index;
|
||||
}
|
||||
|
||||
public byte[] getData()
|
||||
public byte[] decompress(byte[] data) throws IOException
|
||||
{
|
||||
return data;
|
||||
return decompress(data, null);
|
||||
}
|
||||
|
||||
public void setData(byte[] data)
|
||||
public byte[] decompress(byte[] data, int[] keys) throws IOException
|
||||
{
|
||||
this.data = data;
|
||||
}
|
||||
if (data == null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
public FSFile addFile(FSFile file)
|
||||
{
|
||||
this.files.addFile(file);
|
||||
return file;
|
||||
}
|
||||
byte[] encryptedData = data;
|
||||
|
||||
public FSFile findFile(int id)
|
||||
{
|
||||
return this.files.findFile(id);
|
||||
}
|
||||
|
||||
public void decompressAndLoad(int[] keys) throws IOException
|
||||
{
|
||||
byte[] encryptedData = this.getData();
|
||||
|
||||
DataFileReadResult res = DataFile.decompress(encryptedData, keys);
|
||||
if (res == null)
|
||||
Container container = Container.decompress(encryptedData, keys);
|
||||
if (container == null)
|
||||
{
|
||||
logger.warn("Unable to decrypt archive {}", this);
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
|
||||
byte[] decompressedData = res.data;
|
||||
byte[] decompressedData = container.data;
|
||||
|
||||
if (this.crc != res.crc)
|
||||
if (this.crc != container.crc)
|
||||
{
|
||||
logger.warn("crc mismatch for archive {}/{}", index.getId(), this.getArchiveId());
|
||||
this.setCrc(res.crc);
|
||||
throw new IOException("CRC mismatch for " + index.getId() + "/" + this.getArchiveId());
|
||||
}
|
||||
|
||||
if (res.revision != -1 && this.getRevision() != res.revision)
|
||||
if (container.revision != -1 && this.getRevision() != container.revision)
|
||||
{
|
||||
// compressed data doesn't always include a revision, but check it if it does
|
||||
logger.warn("revision mismatch for archive {}/{}, expected {} was {}",
|
||||
index.getId(), this.getArchiveId(),
|
||||
this.getRevision(), res.revision);
|
||||
this.getRevision(), container.revision);
|
||||
// I've seen this happen with vanilla caches where the
|
||||
// revision in the index data differs from the revision
|
||||
// stored for the archive data on disk... I assume this
|
||||
// is more correct
|
||||
this.setRevision(res.revision);
|
||||
this.setRevision(container.revision);
|
||||
}
|
||||
|
||||
setCompression(res.compression);
|
||||
|
||||
files.loadContents(decompressedData);
|
||||
this.setData(null); // now that we've loaded it, clean it so it doesn't get written back
|
||||
setCompression(container.compression);
|
||||
return decompressedData;
|
||||
}
|
||||
|
||||
public byte[] saveContents()
|
||||
public ArchiveFiles getFiles(byte[] data) throws IOException
|
||||
{
|
||||
return files.saveContents();
|
||||
return getFiles(data, null);
|
||||
}
|
||||
|
||||
public ArchiveFiles getFiles(byte[] data, int[] keys) throws IOException
|
||||
{
|
||||
byte[] decompressedData = decompress(data, keys);
|
||||
|
||||
ArchiveFiles files = new ArchiveFiles();
|
||||
for (FileData fileEntry : fileData)
|
||||
{
|
||||
FSFile file = new FSFile(fileEntry.getId());
|
||||
file.setNameHash(fileEntry.getNameHash());
|
||||
files.addFile(file);
|
||||
}
|
||||
files.loadContents(decompressedData);
|
||||
return files;
|
||||
}
|
||||
|
||||
public int getArchiveId()
|
||||
@@ -210,13 +202,23 @@ public class Archive
|
||||
this.compression = compression;
|
||||
}
|
||||
|
||||
public List<FSFile> getFiles()
|
||||
public FileData[] getFileData()
|
||||
{
|
||||
return files.getFiles();
|
||||
return fileData;
|
||||
}
|
||||
|
||||
public void clearFiles()
|
||||
public void setFileData(FileData[] fileData)
|
||||
{
|
||||
files.clear();
|
||||
this.fileData = fileData;
|
||||
}
|
||||
|
||||
public byte[] getHash()
|
||||
{
|
||||
return hash;
|
||||
}
|
||||
|
||||
public void setHash(byte[] hash)
|
||||
{
|
||||
this.hash = hash;
|
||||
}
|
||||
}
|
||||
|
||||
216
cache/src/main/java/net/runelite/cache/fs/Container.java
vendored
Normal file
216
cache/src/main/java/net/runelite/cache/fs/Container.java
vendored
Normal file
@@ -0,0 +1,216 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017, Adam <Adam@sigterm.info>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
package net.runelite.cache.fs;
|
||||
|
||||
import static com.google.common.primitives.Bytes.concat;
|
||||
import com.google.common.primitives.Ints;
|
||||
import java.io.IOException;
|
||||
import net.runelite.cache.fs.jagex.CompressionType;
|
||||
import net.runelite.cache.io.InputStream;
|
||||
import net.runelite.cache.io.OutputStream;
|
||||
import net.runelite.cache.util.BZip2;
|
||||
import net.runelite.cache.util.Crc32;
|
||||
import net.runelite.cache.util.GZip;
|
||||
import net.runelite.cache.util.Xtea;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class Container
|
||||
{
|
||||
private static final Logger logger = LoggerFactory.getLogger(Container.class);
|
||||
|
||||
public byte[] data;
|
||||
public int compression; // compression
|
||||
public int revision;
|
||||
public int crc; // crc of compressed data
|
||||
|
||||
public Container(int compression, int revision)
|
||||
{
|
||||
this.compression = compression;
|
||||
this.revision = revision;
|
||||
}
|
||||
|
||||
public void compress(byte[] data, int[] keys) throws IOException
|
||||
{
|
||||
OutputStream stream = new OutputStream();
|
||||
|
||||
byte[] compressedData;
|
||||
int length;
|
||||
switch (compression)
|
||||
{
|
||||
case CompressionType.NONE:
|
||||
compressedData = data;
|
||||
length = compressedData.length;
|
||||
break;
|
||||
case CompressionType.BZ2:
|
||||
compressedData = concat(Ints.toByteArray(data.length), BZip2.compress(data));
|
||||
length = compressedData.length - 4;
|
||||
break;
|
||||
case CompressionType.GZ:
|
||||
compressedData = concat(Ints.toByteArray(data.length), GZip.compress(data));
|
||||
length = compressedData.length - 4;
|
||||
break;
|
||||
default:
|
||||
throw new RuntimeException("Unknown compression type");
|
||||
}
|
||||
|
||||
compressedData = encrypt(compressedData, compressedData.length, keys);
|
||||
|
||||
stream.writeByte(compression);
|
||||
stream.writeInt(length);
|
||||
|
||||
stream.writeBytes(compressedData);
|
||||
if (revision != -1)
|
||||
{
|
||||
stream.writeShort(revision);
|
||||
}
|
||||
|
||||
this.data = stream.flip();
|
||||
}
|
||||
|
||||
public static Container decompress(byte[] b, int[] keys) throws IOException
|
||||
{
|
||||
InputStream stream = new InputStream(b);
|
||||
|
||||
int compression = stream.readUnsignedByte();
|
||||
int compressedLength = stream.readInt();
|
||||
if (compressedLength < 0 || compressedLength > 1000000)
|
||||
{
|
||||
throw new RuntimeException("Invalid data");
|
||||
}
|
||||
|
||||
Crc32 crc32 = new Crc32();
|
||||
crc32.update(b, 0, 5); // compression + length
|
||||
|
||||
byte[] data;
|
||||
int revision = -1;
|
||||
switch (compression)
|
||||
{
|
||||
case CompressionType.NONE:
|
||||
{
|
||||
byte[] encryptedData = new byte[compressedLength];
|
||||
stream.readBytes(encryptedData, 0, compressedLength);
|
||||
|
||||
crc32.update(encryptedData, 0, compressedLength);
|
||||
byte[] decryptedData = decrypt(encryptedData, encryptedData.length, keys);
|
||||
|
||||
if (stream.remaining() >= 2)
|
||||
{
|
||||
revision = stream.readUnsignedShort();
|
||||
assert revision != -1;
|
||||
}
|
||||
|
||||
data = decryptedData;
|
||||
|
||||
break;
|
||||
}
|
||||
case CompressionType.BZ2:
|
||||
{
|
||||
byte[] encryptedData = new byte[compressedLength + 4];
|
||||
stream.readBytes(encryptedData);
|
||||
|
||||
crc32.update(encryptedData, 0, encryptedData.length);
|
||||
byte[] decryptedData = decrypt(encryptedData, encryptedData.length, keys);
|
||||
|
||||
if (stream.remaining() >= 2)
|
||||
{
|
||||
revision = stream.readUnsignedShort();
|
||||
assert revision != -1;
|
||||
}
|
||||
|
||||
stream = new InputStream(decryptedData);
|
||||
|
||||
int decompressedLength = stream.readInt();
|
||||
data = BZip2.decompress(stream.getRemaining(), compressedLength);
|
||||
|
||||
if (data == null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
assert data.length == decompressedLength;
|
||||
|
||||
break;
|
||||
}
|
||||
case CompressionType.GZ:
|
||||
{
|
||||
byte[] encryptedData = new byte[compressedLength + 4];
|
||||
stream.readBytes(encryptedData);
|
||||
|
||||
crc32.update(encryptedData, 0, encryptedData.length);
|
||||
byte[] decryptedData = decrypt(encryptedData, encryptedData.length, keys);
|
||||
|
||||
if (stream.remaining() >= 2)
|
||||
{
|
||||
revision = stream.readUnsignedShort();
|
||||
assert revision != -1;
|
||||
}
|
||||
|
||||
stream = new InputStream(decryptedData);
|
||||
|
||||
int decompressedLength = stream.readInt();
|
||||
data = GZip.decompress(stream.getRemaining(), compressedLength);
|
||||
|
||||
if (data == null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
assert data.length == decompressedLength;
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new RuntimeException("Unknown decompression type");
|
||||
}
|
||||
|
||||
Container container = new Container(compression, revision);
|
||||
container.data = data;
|
||||
container.crc = crc32.getHash();
|
||||
return container;
|
||||
}
|
||||
|
||||
private static byte[] decrypt(byte[] data, int length, int[] keys)
|
||||
{
|
||||
if (keys == null)
|
||||
{
|
||||
return data;
|
||||
}
|
||||
|
||||
Xtea xtea = new Xtea(keys);
|
||||
return xtea.decrypt(data, length);
|
||||
}
|
||||
|
||||
private static byte[] encrypt(byte[] data, int length, int[] keys)
|
||||
{
|
||||
if (keys == null)
|
||||
{
|
||||
return data;
|
||||
}
|
||||
|
||||
Xtea xtea = new Xtea(keys);
|
||||
return xtea.encrypt(data, length);
|
||||
}
|
||||
}
|
||||
@@ -24,20 +24,13 @@
|
||||
*/
|
||||
package net.runelite.cache.fs;
|
||||
|
||||
import net.runelite.cache.fs.jagex.DataFile;
|
||||
import net.runelite.cache.fs.jagex.CompressionType;
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.Unpooled;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import net.runelite.cache.index.ArchiveData;
|
||||
import net.runelite.cache.index.FileData;
|
||||
import net.runelite.cache.index.IndexData;
|
||||
import net.runelite.cache.util.Crc32;
|
||||
import net.runelite.cache.util.Djb2;
|
||||
import net.runelite.cache.util.XteaKeyManager;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@@ -45,11 +38,8 @@ public class Index
|
||||
{
|
||||
private static final Logger logger = LoggerFactory.getLogger(Index.class);
|
||||
|
||||
private final Store store;
|
||||
private final int id;
|
||||
|
||||
private XteaKeyManager xteaManager;
|
||||
|
||||
private int protocol = 7;
|
||||
private boolean named = true;
|
||||
private int revision;
|
||||
@@ -58,9 +48,8 @@ public class Index
|
||||
|
||||
private final List<Archive> archives = new ArrayList<>();
|
||||
|
||||
public Index(Store store, int id)
|
||||
public Index(int id)
|
||||
{
|
||||
this.store = store;
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
@@ -101,16 +90,6 @@ public class Index
|
||||
return true;
|
||||
}
|
||||
|
||||
public XteaKeyManager getXteaManager()
|
||||
{
|
||||
return xteaManager;
|
||||
}
|
||||
|
||||
public void setXteaManager(XteaKeyManager xteaManager)
|
||||
{
|
||||
this.xteaManager = xteaManager;
|
||||
}
|
||||
|
||||
public int getId()
|
||||
{
|
||||
return id;
|
||||
@@ -203,48 +182,6 @@ public class Index
|
||||
return null;
|
||||
}
|
||||
|
||||
public void rebuildCrc() throws IOException
|
||||
{
|
||||
for (Archive a : archives)
|
||||
{
|
||||
int rev; // used for determining what part of compressedData to crc
|
||||
byte[] compressedData;
|
||||
|
||||
if (a.getData() != null)
|
||||
{
|
||||
compressedData = a.getData(); // data was never decompressed or loaded
|
||||
rev = -1; // assume that this data has no revision?
|
||||
}
|
||||
else
|
||||
{
|
||||
byte[] fileData = a.saveContents();
|
||||
rev = a.getRevision();
|
||||
compressedData = DataFile.compress(fileData, a.getCompression(), a.getRevision(), null);
|
||||
}
|
||||
|
||||
int length = rev != -1 ? compressedData.length - 2 : compressedData.length;
|
||||
Crc32 crc32 = new Crc32();
|
||||
crc32.update(compressedData, 0, length);
|
||||
|
||||
int crc = crc32.getHash();
|
||||
|
||||
a.setCrc(crc);
|
||||
}
|
||||
|
||||
Crc32 crc = new Crc32();
|
||||
byte[] indexData = toIndexData().writeIndexData();
|
||||
|
||||
ByteBuf b = Unpooled.buffer(5, 5);
|
||||
b.writeByte((byte) CompressionType.NONE);
|
||||
b.writeInt(indexData.length);
|
||||
|
||||
crc.update(b.array(), 0, 5); // crc includes compression type and length
|
||||
crc.update(indexData, 0, indexData.length);
|
||||
|
||||
int hash = crc.getHash();
|
||||
this.setCrc(hash);
|
||||
}
|
||||
|
||||
public IndexData toIndexData()
|
||||
{
|
||||
IndexData data = new IndexData();
|
||||
@@ -264,16 +201,8 @@ public class Index
|
||||
ad.setCrc(archive.getCrc());
|
||||
ad.setRevision(archive.getRevision());
|
||||
|
||||
FileData[] files = new FileData[archive.getFiles().size()];
|
||||
FileData[] files = archive.getFileData();
|
||||
ad.setFiles(files);
|
||||
|
||||
int idx2 = 0;
|
||||
for (FSFile file : archive.getFiles())
|
||||
{
|
||||
FileData fd = files[idx2++] = new FileData();
|
||||
fd.setId(file.getFileId());
|
||||
fd.setNameHash(file.getNameHash());
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
@@ -36,4 +36,8 @@ public interface Storage extends AutoCloseable
|
||||
void load(Store store) throws IOException;
|
||||
|
||||
void save(Store store) throws IOException;
|
||||
|
||||
byte[] loadArchive(Archive archive) throws IOException;
|
||||
|
||||
void saveArchive(Archive archive, byte[] data) throws IOException;
|
||||
}
|
||||
|
||||
@@ -33,7 +33,6 @@ import java.util.List;
|
||||
import java.util.Objects;
|
||||
import net.runelite.cache.IndexType;
|
||||
import net.runelite.cache.fs.jagex.DiskStorage;
|
||||
import net.runelite.cache.util.XteaKeyManager;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@@ -48,14 +47,6 @@ public final class Store implements Closeable
|
||||
{
|
||||
storage = new DiskStorage(folder);
|
||||
storage.init(this);
|
||||
|
||||
Index maps = this.findIndex(IndexType.MAPS.getNumber());
|
||||
if (maps != null)
|
||||
{
|
||||
XteaKeyManager mapKeys = new XteaKeyManager();
|
||||
mapKeys.loadKeys();
|
||||
maps.setXteaManager(mapKeys);
|
||||
}
|
||||
}
|
||||
|
||||
public Store(Storage storage) throws IOException
|
||||
@@ -65,6 +56,11 @@ public final class Store implements Closeable
|
||||
storage.init(this);
|
||||
}
|
||||
|
||||
public Storage getStorage()
|
||||
{
|
||||
return storage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException
|
||||
{
|
||||
@@ -108,7 +104,7 @@ public final class Store implements Closeable
|
||||
}
|
||||
}
|
||||
|
||||
Index index = new Index(this, id);
|
||||
Index index = new Index(id);
|
||||
this.indexes.add(index);
|
||||
|
||||
return index;
|
||||
@@ -120,18 +116,6 @@ public final class Store implements Closeable
|
||||
indexes.remove(index);
|
||||
}
|
||||
|
||||
/*
|
||||
* we rebuild data differently, so the CRCs aren't right.
|
||||
* rebuild them.
|
||||
*/
|
||||
public void rebuildCrc() throws IOException
|
||||
{
|
||||
for (Index i : indexes)
|
||||
{
|
||||
i.rebuildCrc();
|
||||
}
|
||||
}
|
||||
|
||||
public void load() throws IOException
|
||||
{
|
||||
storage.load(this);
|
||||
|
||||
@@ -24,20 +24,12 @@
|
||||
*/
|
||||
package net.runelite.cache.fs.jagex;
|
||||
|
||||
import static com.google.common.primitives.Bytes.concat;
|
||||
import com.google.common.primitives.Ints;
|
||||
import java.io.Closeable;
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.RandomAccessFile;
|
||||
import java.nio.ByteBuffer;
|
||||
import net.runelite.cache.util.BZip2;
|
||||
import net.runelite.cache.io.InputStream;
|
||||
import net.runelite.cache.io.OutputStream;
|
||||
import net.runelite.cache.util.Crc32;
|
||||
import net.runelite.cache.util.GZip;
|
||||
import net.runelite.cache.util.Xtea;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@@ -177,7 +169,7 @@ public class DataFile implements Closeable
|
||||
return buffer.array();
|
||||
}
|
||||
|
||||
public DataFileWriteResult write(int indexId, int archiveId, byte[] compressedData, int revision) throws IOException
|
||||
public DataFileWriteResult write(int indexId, int archiveId, byte[] compressedData) throws IOException
|
||||
{
|
||||
int sector;
|
||||
int startSector;
|
||||
@@ -270,174 +262,6 @@ public class DataFile implements Closeable
|
||||
DataFileWriteResult res = new DataFileWriteResult();
|
||||
res.sector = startSector;
|
||||
res.compressedLength = compressedData.length;
|
||||
|
||||
int length = revision != -1 ? compressedData.length - 2 : compressedData.length;
|
||||
Crc32 crc32 = new Crc32();
|
||||
crc32.update(compressedData, 0, length);
|
||||
res.crc = crc32.getHash();
|
||||
return res;
|
||||
}
|
||||
|
||||
public static DataFileReadResult decompress(byte[] b, int[] keys) throws IOException
|
||||
{
|
||||
InputStream stream = new InputStream(b);
|
||||
|
||||
int compression = stream.readUnsignedByte();
|
||||
int compressedLength = stream.readInt();
|
||||
if (compressedLength < 0 || compressedLength > 1000000)
|
||||
{
|
||||
throw new RuntimeException("Invalid data");
|
||||
}
|
||||
|
||||
Crc32 crc32 = new Crc32();
|
||||
crc32.update(b, 0, 5); // compression + length
|
||||
|
||||
byte[] data;
|
||||
int revision = -1;
|
||||
switch (compression)
|
||||
{
|
||||
case CompressionType.NONE:
|
||||
{
|
||||
byte[] encryptedData = new byte[compressedLength];
|
||||
stream.readBytes(encryptedData, 0, compressedLength);
|
||||
|
||||
crc32.update(encryptedData, 0, compressedLength);
|
||||
byte[] decryptedData = decrypt(encryptedData, encryptedData.length, keys);
|
||||
|
||||
if (stream.remaining() >= 2)
|
||||
{
|
||||
revision = stream.readUnsignedShort();
|
||||
assert revision != -1;
|
||||
}
|
||||
|
||||
data = decryptedData;
|
||||
|
||||
break;
|
||||
}
|
||||
case CompressionType.BZ2:
|
||||
{
|
||||
byte[] encryptedData = new byte[compressedLength + 4];
|
||||
stream.readBytes(encryptedData);
|
||||
|
||||
crc32.update(encryptedData, 0, encryptedData.length);
|
||||
byte[] decryptedData = decrypt(encryptedData, encryptedData.length, keys);
|
||||
|
||||
if (stream.remaining() >= 2)
|
||||
{
|
||||
revision = stream.readUnsignedShort();
|
||||
assert revision != -1;
|
||||
}
|
||||
|
||||
stream = new InputStream(decryptedData);
|
||||
|
||||
int decompressedLength = stream.readInt();
|
||||
data = BZip2.decompress(stream.getRemaining(), compressedLength);
|
||||
|
||||
if (data == null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
assert data.length == decompressedLength;
|
||||
|
||||
break;
|
||||
}
|
||||
case CompressionType.GZ:
|
||||
{
|
||||
byte[] encryptedData = new byte[compressedLength + 4];
|
||||
stream.readBytes(encryptedData);
|
||||
|
||||
crc32.update(encryptedData, 0, encryptedData.length);
|
||||
byte[] decryptedData = decrypt(encryptedData, encryptedData.length, keys);
|
||||
|
||||
if (stream.remaining() >= 2)
|
||||
{
|
||||
revision = stream.readUnsignedShort();
|
||||
assert revision != -1;
|
||||
}
|
||||
|
||||
stream = new InputStream(decryptedData);
|
||||
|
||||
int decompressedLength = stream.readInt();
|
||||
data = GZip.decompress(stream.getRemaining(), compressedLength);
|
||||
|
||||
if (data == null)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
assert data.length == decompressedLength;
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw new RuntimeException("Unknown decompression type");
|
||||
}
|
||||
|
||||
DataFileReadResult res = new DataFileReadResult();
|
||||
res.data = data;
|
||||
res.revision = revision;
|
||||
res.crc = crc32.getHash();
|
||||
res.compression = compression;
|
||||
return res;
|
||||
}
|
||||
|
||||
public static byte[] compress(byte[] data, int compression, int revision, int[] keys) throws IOException
|
||||
{
|
||||
OutputStream stream = new OutputStream();
|
||||
byte[] compressedData;
|
||||
int length;
|
||||
switch (compression)
|
||||
{
|
||||
case CompressionType.NONE:
|
||||
compressedData = data;
|
||||
length = compressedData.length;
|
||||
break;
|
||||
case CompressionType.BZ2:
|
||||
compressedData = concat(Ints.toByteArray(data.length), BZip2.compress(data));
|
||||
length = compressedData.length - 4;
|
||||
break;
|
||||
case CompressionType.GZ:
|
||||
compressedData = concat(Ints.toByteArray(data.length), GZip.compress(data));
|
||||
length = compressedData.length - 4;
|
||||
break;
|
||||
default:
|
||||
throw new RuntimeException("Unknown compression type");
|
||||
}
|
||||
|
||||
compressedData = encrypt(compressedData, compressedData.length, keys);
|
||||
|
||||
stream.writeByte(compression);
|
||||
stream.writeInt(length);
|
||||
|
||||
stream.writeBytes(compressedData);
|
||||
if (revision != -1)
|
||||
{
|
||||
stream.writeShort(revision);
|
||||
}
|
||||
|
||||
return stream.flip();
|
||||
}
|
||||
|
||||
private static byte[] decrypt(byte[] data, int length, int[] keys)
|
||||
{
|
||||
if (keys == null)
|
||||
{
|
||||
return data;
|
||||
}
|
||||
|
||||
Xtea xtea = new Xtea(keys);
|
||||
return xtea.decrypt(data, length);
|
||||
}
|
||||
|
||||
private static byte[] encrypt(byte[] data, int length, int[] keys)
|
||||
{
|
||||
if (keys == null)
|
||||
{
|
||||
return data;
|
||||
}
|
||||
|
||||
Xtea xtea = new Xtea(keys);
|
||||
return xtea.encrypt(data, length);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017, Adam <Adam@sigterm.info>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package net.runelite.cache.fs.jagex;
|
||||
|
||||
public class DataFileReadResult
|
||||
{
|
||||
public byte[] data;
|
||||
public int revision;
|
||||
public int crc; // crc of compressed data
|
||||
public int compression; // compression method data was compressed with
|
||||
}
|
||||
@@ -28,5 +28,4 @@ package net.runelite.cache.fs.jagex;
|
||||
public class DataFileWriteResult
|
||||
{
|
||||
public int sector, compressedLength;
|
||||
public int crc; // crc of compressed data
|
||||
}
|
||||
|
||||
@@ -24,19 +24,20 @@
|
||||
*/
|
||||
package net.runelite.cache.fs.jagex;
|
||||
|
||||
import com.google.common.primitives.Ints;
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.FSFile;
|
||||
import net.runelite.cache.fs.Container;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
import net.runelite.cache.index.ArchiveData;
|
||||
import net.runelite.cache.index.FileData;
|
||||
import net.runelite.cache.index.IndexData;
|
||||
import net.runelite.cache.util.Crc32;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@@ -108,13 +109,19 @@ public class DiskStorage implements Storage
|
||||
}
|
||||
}
|
||||
|
||||
public byte[] readIndex(int indexId) throws IOException
|
||||
{
|
||||
IndexEntry entry = index255.read(indexId);
|
||||
byte[] indexData = data.read(index255.getIndexFileId(), entry.getId(), entry.getSector(), entry.getLength());
|
||||
return indexData;
|
||||
}
|
||||
|
||||
private void loadIndex(Index index) throws IOException
|
||||
{
|
||||
logger.trace("Loading index {}", index.getId());
|
||||
|
||||
IndexEntry entry = index255.read(index.getId());
|
||||
byte[] indexData = data.read(index255.getIndexFileId(), entry.getId(), entry.getSector(), entry.getLength());
|
||||
DataFileReadResult res = DataFile.decompress(indexData, null);
|
||||
byte[] indexData = readIndex(index.getId());
|
||||
Container res = Container.decompress(indexData, null);
|
||||
byte[] data = res.data;
|
||||
|
||||
IndexData id = new IndexData();
|
||||
@@ -130,28 +137,18 @@ public class DiskStorage implements Storage
|
||||
archive.setNameHash(ad.getNameHash());
|
||||
archive.setCrc(ad.getCrc());
|
||||
archive.setRevision(ad.getRevision());
|
||||
archive.setFileData(ad.getFiles());
|
||||
|
||||
assert ad.getFiles().length > 0;
|
||||
|
||||
for (FileData fd : ad.getFiles())
|
||||
{
|
||||
FSFile file = new FSFile(fd.getId());
|
||||
file.setNameHash(fd.getNameHash());
|
||||
archive.addFile(file);
|
||||
}
|
||||
}
|
||||
|
||||
index.setCrc(res.crc);
|
||||
index.setCompression(res.compression);
|
||||
assert res.revision == -1;
|
||||
|
||||
for (Archive archive : new ArrayList<>(index.getArchives()))
|
||||
{
|
||||
loadArchive(archive);
|
||||
}
|
||||
}
|
||||
|
||||
private void loadArchive(Archive archive) throws IOException
|
||||
@Override
|
||||
public byte[] loadArchive(Archive archive) throws IOException
|
||||
{
|
||||
Index index = archive.getIndex();
|
||||
IndexFile indexFile = getIndex(index.getId());
|
||||
@@ -162,8 +159,7 @@ public class DiskStorage implements Storage
|
||||
if (entry == null)
|
||||
{
|
||||
logger.debug("can't read archive " + archive.getArchiveId() + " from index " + index.getId());
|
||||
index.getArchives().remove(archive); // is this correct?
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
|
||||
assert entry.getId() == archive.getArchiveId();
|
||||
@@ -172,28 +168,12 @@ public class DiskStorage implements Storage
|
||||
archive.getArchiveId(), index.getId(), entry.getSector(), entry.getLength());
|
||||
|
||||
byte[] archiveData = data.read(index.getId(), entry.getId(), entry.getSector(), entry.getLength());
|
||||
archive.setData(archiveData);
|
||||
|
||||
if (index.getXteaManager() != null)
|
||||
{
|
||||
return; // can't decrypt this yet
|
||||
}
|
||||
|
||||
archive.decompressAndLoad(null);
|
||||
return archiveData;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void save(Store store) throws IOException
|
||||
{
|
||||
logger.debug("Clearing data and indexes in preparation for store save");
|
||||
|
||||
data.clear();
|
||||
|
||||
for (IndexFile indexFile : indexFiles)
|
||||
{
|
||||
indexFile.clear();
|
||||
}
|
||||
|
||||
logger.debug("Saving store");
|
||||
|
||||
for (Index i : store.getIndexes())
|
||||
@@ -204,49 +184,46 @@ public class DiskStorage implements Storage
|
||||
|
||||
private void saveIndex(Index index) throws IOException
|
||||
{
|
||||
// This updates archive CRCs for writeIndexData
|
||||
for (Archive archive : index.getArchives())
|
||||
{
|
||||
saveArchive(archive);
|
||||
}
|
||||
|
||||
IndexData indexData = index.toIndexData();
|
||||
byte[] data = indexData.writeIndexData();
|
||||
|
||||
byte[] compressedData = DataFile.compress(data, index.getCompression(), -1, null); // index data revision is always -1
|
||||
DataFileWriteResult res = this.data.write(index255.getIndexFileId(), index.getId(), compressedData, index.getRevision());
|
||||
Container container = new Container(index.getCompression(), -1); // index data revision is always -1
|
||||
container.compress(data, null);
|
||||
byte[] compressedData = container.data;
|
||||
DataFileWriteResult res = this.data.write(index255.getIndexFileId(), index.getId(), compressedData);
|
||||
|
||||
index255.write(new IndexEntry(index255, index.getId(), res.sector, res.compressedLength));
|
||||
|
||||
index.setCrc(res.crc);
|
||||
Crc32 crc = new Crc32();
|
||||
crc.update(compressedData, 0, compressedData.length);
|
||||
index.setCrc(crc.getHash());
|
||||
}
|
||||
|
||||
private void saveArchive(Archive a) throws IOException
|
||||
@Override
|
||||
public void saveArchive(Archive a, byte[] archiveData) throws IOException
|
||||
{
|
||||
Index index = a.getIndex();
|
||||
IndexFile indexFile = getIndex(index.getId());
|
||||
assert indexFile.getIndexFileId() == index.getId();
|
||||
|
||||
int rev; // used for determining what part of compressedData to crc
|
||||
byte[] compressedData;
|
||||
|
||||
if (a.getData() != null)
|
||||
{
|
||||
compressedData = a.getData(); // data was never decompressed or loaded
|
||||
rev = -1; // assume that this data has no revision?
|
||||
}
|
||||
else
|
||||
{
|
||||
byte[] fileData = a.saveContents();
|
||||
rev = a.getRevision();
|
||||
compressedData = DataFile.compress(fileData, a.getCompression(), a.getRevision(), null);
|
||||
}
|
||||
|
||||
DataFileWriteResult res = data.write(index.getId(), a.getArchiveId(), compressedData, rev);
|
||||
DataFileWriteResult res = data.write(index.getId(), a.getArchiveId(), archiveData);
|
||||
indexFile.write(new IndexEntry(indexFile, a.getArchiveId(), res.sector, res.compressedLength));
|
||||
|
||||
logger.trace("Saved archive {}/{} at sector {}, compressed length {}", index.getId(), a.getArchiveId(), res.sector, res.compressedLength);
|
||||
byte compression = archiveData[0];
|
||||
int compressedSize = Ints.fromBytes(archiveData[1], archiveData[2],
|
||||
archiveData[3], archiveData[4]);
|
||||
|
||||
a.setCrc(res.crc);
|
||||
// don't crc the appended revision, if it is there
|
||||
int length = 1 // compression type
|
||||
+ 4 // compressed size
|
||||
+ compressedSize
|
||||
+ (compression != CompressionType.NONE ? 4 : 0);
|
||||
|
||||
Crc32 crc = new Crc32();
|
||||
crc.update(archiveData, 0, length);
|
||||
a.setCrc(crc.getHash());
|
||||
|
||||
logger.trace("Saved archive {}/{} at sector {}, compressed length {}",
|
||||
index.getId(), a.getArchiveId(), res.sector, res.compressedLength);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ import java.util.List;
|
||||
import net.runelite.cache.IndexType;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
import net.runelite.cache.util.XteaKeyManager;
|
||||
import org.slf4j.Logger;
|
||||
@@ -53,7 +54,8 @@ public class RegionLoader
|
||||
{
|
||||
this.store = store;
|
||||
index = store.getIndex(IndexType.MAPS);
|
||||
keyManager = index.getXteaManager();
|
||||
keyManager = new XteaKeyManager();
|
||||
keyManager.loadKeys();
|
||||
}
|
||||
|
||||
public void loadRegions() throws IOException
|
||||
@@ -71,6 +73,7 @@ public class RegionLoader
|
||||
int x = i >> 8;
|
||||
int y = i & 0xFF;
|
||||
|
||||
Storage storage = store.getStorage();
|
||||
Archive map = index.findArchiveByName("m" + x + "_" + y);
|
||||
Archive land = index.findArchiveByName("l" + x + "_" + y);
|
||||
|
||||
@@ -81,12 +84,7 @@ public class RegionLoader
|
||||
return null;
|
||||
}
|
||||
|
||||
assert map.getFiles().size() == 1;
|
||||
assert land.getFiles().size() == 1;
|
||||
|
||||
map.decompressAndLoad(null);
|
||||
|
||||
byte[] data = map.getFiles().get(0).getContents();
|
||||
byte[] data = map.decompress(storage.loadArchive(map));
|
||||
|
||||
Region region = new Region(i);
|
||||
region.loadTerrain(data);
|
||||
@@ -96,9 +94,7 @@ public class RegionLoader
|
||||
{
|
||||
try
|
||||
{
|
||||
land.decompressAndLoad(keys);
|
||||
|
||||
data = land.getFiles().get(0).getContents();
|
||||
data = land.decompress(storage.loadArchive(land), keys);
|
||||
region.loadLocations(data);
|
||||
}
|
||||
catch (IOException ex)
|
||||
|
||||
@@ -32,10 +32,12 @@ import io.netty.channel.SimpleChannelInboundHandler;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import net.runelite.cache.fs.Archive;
|
||||
import net.runelite.cache.fs.Container;
|
||||
import net.runelite.cache.fs.Index;
|
||||
import net.runelite.cache.fs.Storage;
|
||||
import net.runelite.cache.fs.Store;
|
||||
import net.runelite.cache.fs.jagex.CompressionType;
|
||||
import net.runelite.cache.fs.jagex.DataFile;
|
||||
import net.runelite.cache.fs.jagex.DiskStorage;
|
||||
import net.runelite.cache.protocol.packets.ArchiveRequestPacket;
|
||||
import net.runelite.cache.protocol.packets.ArchiveResponsePacket;
|
||||
import org.slf4j.Logger;
|
||||
@@ -88,12 +90,10 @@ public class ArchiveRequestHandler extends SimpleChannelInboundHandler<ArchiveRe
|
||||
}
|
||||
else
|
||||
{
|
||||
Index i = store.findIndex(archiveId);
|
||||
assert i != null;
|
||||
|
||||
byte[] indexData = i.toIndexData().writeIndexData();
|
||||
|
||||
compressed = compress(CompressionType.NONE, indexData);
|
||||
// Requires disk storage. Use packed index data from
|
||||
// store as its crc matches
|
||||
DiskStorage storage = (DiskStorage) store.getStorage();
|
||||
compressed = storage.readIndex(archiveId);
|
||||
}
|
||||
|
||||
ArchiveResponsePacket response = new ArchiveResponsePacket();
|
||||
@@ -114,33 +114,31 @@ public class ArchiveRequestHandler extends SimpleChannelInboundHandler<ArchiveRe
|
||||
Archive archive = i.getArchive(archiveId);
|
||||
assert archive != null;
|
||||
|
||||
byte[] packed;
|
||||
if (archive.getData() != null)
|
||||
Storage storage = store.getStorage();
|
||||
byte[] packed = storage.loadArchive(archive); // is compressed, includes length and type
|
||||
|
||||
if (packed == null)
|
||||
{
|
||||
packed = archive.getData(); // is compressed, includes length and type
|
||||
|
||||
byte compression = packed[0];
|
||||
int compressedSize = Ints.fromBytes(packed[1], packed[2],
|
||||
packed[3], packed[4]);
|
||||
|
||||
// size the client expects the data to be
|
||||
int expectedSize = 1 // compression
|
||||
+ 4 // compressed size
|
||||
+ compressedSize
|
||||
+ (compression != CompressionType.NONE ? 4 : 0);
|
||||
if (packed.length != expectedSize)
|
||||
{
|
||||
// It may have the archive revision appended at the end.
|
||||
// The data the client writes will have it, but the data fetched from
|
||||
// the update server will never have it
|
||||
assert packed.length - expectedSize == 2 : "packed length != expected size";
|
||||
packed = Arrays.copyOf(packed, packed.length - 2);
|
||||
}
|
||||
logger.warn("Missing archive {}/{}", index, archiveId);
|
||||
return; // is it possible to notify the client of an error with this?
|
||||
}
|
||||
else
|
||||
|
||||
byte compression = packed[0];
|
||||
int compressedSize = Ints.fromBytes(packed[1], packed[2],
|
||||
packed[3], packed[4]);
|
||||
|
||||
// size the client expects the data to be
|
||||
int expectedSize = 1 // compression type
|
||||
+ 4 // compressed size
|
||||
+ compressedSize
|
||||
+ (compression != CompressionType.NONE ? 4 : 0);
|
||||
if (packed.length != expectedSize)
|
||||
{
|
||||
byte[] data = archive.saveContents();
|
||||
packed = compress(archive.getCompression(), data);
|
||||
// It may have the archive revision appended at the end.
|
||||
// The data the client writes will have it, but the data fetched from
|
||||
// the update server will never have it
|
||||
assert packed.length - expectedSize == 2 : "packed length != expected size";
|
||||
packed = Arrays.copyOf(packed, packed.length - 2);
|
||||
}
|
||||
|
||||
ArchiveResponsePacket response = new ArchiveResponsePacket();
|
||||
@@ -153,6 +151,8 @@ public class ArchiveRequestHandler extends SimpleChannelInboundHandler<ArchiveRe
|
||||
|
||||
private byte[] compress(int compression, byte[] data) throws IOException
|
||||
{
|
||||
return DataFile.compress(data, compression, -1, null);
|
||||
Container container = new Container(compression, -1);
|
||||
container.compress(data, null);
|
||||
return container.data;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user