From 4a14c26529045a68be6fecd9b14badb6f19d25e2 Mon Sep 17 00:00:00 2001 From: barncastle Date: Thu, 8 Aug 2019 19:54:51 +0100 Subject: [PATCH 01/40] rename IO library --- .../Attributes/CardinalityAttribute.cs | 2 +- .../Attributes/IndexAttribute.cs | 2 +- .../Attributes/LocaleAttribute.cs | 2 +- .../Common/BitReader.cs | 2 +- .../Common/DBStructs.cs | 2 +- .../Common/HTFXStructs.cs | 2 +- {DBFileReaderLib => DBCD.IO}/Common/IDBRow.cs | 2 +- {DBFileReaderLib => DBCD.IO}/DB2Flags.cs | 2 +- .../DBCD.IO.csproj | 0 {DBFileReaderLib => DBCD.IO}/DBReader.cs | 4 +- {DBFileReaderLib => DBCD.IO}/Extensions.cs | 2 +- {DBFileReaderLib => DBCD.IO}/FieldCache.cs | 4 +- {DBFileReaderLib => DBCD.IO}/HotfixReader.cs | 4 +- .../Readers/BaseReader.cs | 4 +- .../Readers/HTFXReader.cs | 4 +- .../Readers/WDB2Reader.cs | 4 +- .../Readers/WDB3Reader.cs | 4 +- .../Readers/WDB4Reader.cs | 4 +- .../Readers/WDB5Reader.cs | 4 +- .../Readers/WDB6Reader.cs | 4 +- .../Readers/WDBCReader.cs | 4 +- .../Readers/WDC1Reader.cs | 4 +- .../Readers/WDC2Reader.cs | 4 +- .../Readers/WDC3Reader.cs | 4 +- {DBFileReaderLib => DBCD.IO}/Storage.cs | 2 +- DBCD.Tests/TestDBDProvider.cs | 16 +++ DBCD.Tests/UnitTest1.cs | 114 +++++++++++++++++- DBCD.sln | 4 +- DBCD/DBCD.cs | 2 +- DBCD/DBCD.csproj | 2 +- DBCD/DBCDBuilder.cs | 4 +- DBCD/DBCDStorage.cs | 2 +- 32 files changed, 170 insertions(+), 50 deletions(-) rename {DBFileReaderLib => DBCD.IO}/Attributes/CardinalityAttribute.cs (83%) rename {DBFileReaderLib => DBCD.IO}/Attributes/IndexAttribute.cs (67%) rename {DBFileReaderLib => DBCD.IO}/Attributes/LocaleAttribute.cs (93%) rename {DBFileReaderLib => DBCD.IO}/Common/BitReader.cs (98%) rename {DBFileReaderLib => DBCD.IO}/Common/DBStructs.cs (99%) rename {DBFileReaderLib => DBCD.IO}/Common/HTFXStructs.cs (97%) rename {DBFileReaderLib => DBCD.IO}/Common/IDBRow.cs (88%) rename {DBFileReaderLib => DBCD.IO}/DB2Flags.cs (90%) rename DBFileReaderLib/DBFileReaderLib.csproj => DBCD.IO/DBCD.IO.csproj (100%) rename {DBFileReaderLib => DBCD.IO}/DBReader.cs (98%) rename {DBFileReaderLib => DBCD.IO}/Extensions.cs (99%) rename {DBFileReaderLib => DBCD.IO}/FieldCache.cs (95%) rename {DBFileReaderLib => DBCD.IO}/HotfixReader.cs (97%) rename {DBFileReaderLib => DBCD.IO}/Readers/BaseReader.cs (97%) rename {DBFileReaderLib => DBCD.IO}/Readers/HTFXReader.cs (99%) rename {DBFileReaderLib => DBCD.IO}/Readers/WDB2Reader.cs (99%) rename {DBFileReaderLib => DBCD.IO}/Readers/WDB3Reader.cs (99%) rename {DBFileReaderLib => DBCD.IO}/Readers/WDB4Reader.cs (99%) rename {DBFileReaderLib => DBCD.IO}/Readers/WDB5Reader.cs (99%) rename {DBFileReaderLib => DBCD.IO}/Readers/WDB6Reader.cs (99%) rename {DBFileReaderLib => DBCD.IO}/Readers/WDBCReader.cs (99%) rename {DBFileReaderLib => DBCD.IO}/Readers/WDC1Reader.cs (99%) rename {DBFileReaderLib => DBCD.IO}/Readers/WDC2Reader.cs (99%) rename {DBFileReaderLib => DBCD.IO}/Readers/WDC3Reader.cs (99%) rename {DBFileReaderLib => DBCD.IO}/Storage.cs (94%) create mode 100644 DBCD.Tests/TestDBDProvider.cs diff --git a/DBFileReaderLib/Attributes/CardinalityAttribute.cs b/DBCD.IO/Attributes/CardinalityAttribute.cs similarity index 83% rename from DBFileReaderLib/Attributes/CardinalityAttribute.cs rename to DBCD.IO/Attributes/CardinalityAttribute.cs index b34161f..0254290 100644 --- a/DBFileReaderLib/Attributes/CardinalityAttribute.cs +++ b/DBCD.IO/Attributes/CardinalityAttribute.cs @@ -1,6 +1,6 @@ using System; -namespace DBFileReaderLib.Attributes +namespace DBCD.IO.Attributes { public class CardinalityAttribute : Attribute { diff --git a/DBFileReaderLib/Attributes/IndexAttribute.cs b/DBCD.IO/Attributes/IndexAttribute.cs similarity index 67% rename from DBFileReaderLib/Attributes/IndexAttribute.cs rename to DBCD.IO/Attributes/IndexAttribute.cs index 4b43cdb..bb40c3c 100644 --- a/DBFileReaderLib/Attributes/IndexAttribute.cs +++ b/DBCD.IO/Attributes/IndexAttribute.cs @@ -1,6 +1,6 @@ using System; -namespace DBFileReaderLib.Attributes +namespace DBCD.IO.Attributes { public class IndexAttribute : Attribute { diff --git a/DBFileReaderLib/Attributes/LocaleAttribute.cs b/DBCD.IO/Attributes/LocaleAttribute.cs similarity index 93% rename from DBFileReaderLib/Attributes/LocaleAttribute.cs rename to DBCD.IO/Attributes/LocaleAttribute.cs index 726e1a4..e9a6d97 100644 --- a/DBFileReaderLib/Attributes/LocaleAttribute.cs +++ b/DBCD.IO/Attributes/LocaleAttribute.cs @@ -2,7 +2,7 @@ using System.Collections.Generic; using System.Text; -namespace DBFileReaderLib.Attributes +namespace DBCD.IO.Attributes { public class LocaleAttribute : Attribute { diff --git a/DBFileReaderLib/Common/BitReader.cs b/DBCD.IO/Common/BitReader.cs similarity index 98% rename from DBFileReaderLib/Common/BitReader.cs rename to DBCD.IO/Common/BitReader.cs index e2a7a77..87e3167 100644 --- a/DBFileReaderLib/Common/BitReader.cs +++ b/DBCD.IO/Common/BitReader.cs @@ -2,7 +2,7 @@ using System.Runtime.CompilerServices; using System.Text; -namespace DBFileReaderLib.Common +namespace DBCD.IO.Common { class BitReader { diff --git a/DBFileReaderLib/Common/DBStructs.cs b/DBCD.IO/Common/DBStructs.cs similarity index 99% rename from DBFileReaderLib/Common/DBStructs.cs rename to DBCD.IO/Common/DBStructs.cs index c1e1e62..8004ccd 100644 --- a/DBFileReaderLib/Common/DBStructs.cs +++ b/DBCD.IO/Common/DBStructs.cs @@ -4,7 +4,7 @@ #pragma warning disable CS0649 #pragma warning disable IDE0044 -namespace DBFileReaderLib.Common +namespace DBCD.IO.Common { struct FieldMetaData { diff --git a/DBFileReaderLib/Common/HTFXStructs.cs b/DBCD.IO/Common/HTFXStructs.cs similarity index 97% rename from DBFileReaderLib/Common/HTFXStructs.cs rename to DBCD.IO/Common/HTFXStructs.cs index cacc0d5..bbb7ba8 100644 --- a/DBFileReaderLib/Common/HTFXStructs.cs +++ b/DBCD.IO/Common/HTFXStructs.cs @@ -1,6 +1,6 @@ #pragma warning disable CS0169 -namespace DBFileReaderLib.Common +namespace DBCD.IO.Common { interface IHotfixEntry { diff --git a/DBFileReaderLib/Common/IDBRow.cs b/DBCD.IO/Common/IDBRow.cs similarity index 88% rename from DBFileReaderLib/Common/IDBRow.cs rename to DBCD.IO/Common/IDBRow.cs index 594b365..ea6d10e 100644 --- a/DBFileReaderLib/Common/IDBRow.cs +++ b/DBCD.IO/Common/IDBRow.cs @@ -2,7 +2,7 @@ using System.Collections.Generic; using System.Text; -namespace DBFileReaderLib.Common +namespace DBCD.IO.Common { interface IDBRow { diff --git a/DBFileReaderLib/DB2Flags.cs b/DBCD.IO/DB2Flags.cs similarity index 90% rename from DBFileReaderLib/DB2Flags.cs rename to DBCD.IO/DB2Flags.cs index 143e8d7..a846a57 100644 --- a/DBFileReaderLib/DB2Flags.cs +++ b/DBCD.IO/DB2Flags.cs @@ -1,6 +1,6 @@ using System; -namespace DBFileReaderLib +namespace DBCD.IO { [Flags] public enum DB2Flags diff --git a/DBFileReaderLib/DBFileReaderLib.csproj b/DBCD.IO/DBCD.IO.csproj similarity index 100% rename from DBFileReaderLib/DBFileReaderLib.csproj rename to DBCD.IO/DBCD.IO.csproj diff --git a/DBFileReaderLib/DBReader.cs b/DBCD.IO/DBReader.cs similarity index 98% rename from DBFileReaderLib/DBReader.cs rename to DBCD.IO/DBReader.cs index 7270d53..e5d076f 100644 --- a/DBFileReaderLib/DBReader.cs +++ b/DBCD.IO/DBReader.cs @@ -2,9 +2,9 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using DBFileReaderLib.Readers; +using DBCD.IO.Readers; -namespace DBFileReaderLib +namespace DBCD.IO { public class DBReader { diff --git a/DBFileReaderLib/Extensions.cs b/DBCD.IO/Extensions.cs similarity index 99% rename from DBFileReaderLib/Extensions.cs rename to DBCD.IO/Extensions.cs index 21bbfbc..bc12e11 100644 --- a/DBFileReaderLib/Extensions.cs +++ b/DBCD.IO/Extensions.cs @@ -6,7 +6,7 @@ using System.Runtime.InteropServices; using System.Text; -namespace DBFileReaderLib +namespace DBCD.IO { static class Extensions { diff --git a/DBFileReaderLib/FieldCache.cs b/DBCD.IO/FieldCache.cs similarity index 95% rename from DBFileReaderLib/FieldCache.cs rename to DBCD.IO/FieldCache.cs index a087951..a9225ca 100644 --- a/DBFileReaderLib/FieldCache.cs +++ b/DBCD.IO/FieldCache.cs @@ -1,8 +1,8 @@ -using DBFileReaderLib.Attributes; +using DBCD.IO.Attributes; using System; using System.Reflection; -namespace DBFileReaderLib +namespace DBCD.IO { class FieldCache { diff --git a/DBFileReaderLib/HotfixReader.cs b/DBCD.IO/HotfixReader.cs similarity index 97% rename from DBFileReaderLib/HotfixReader.cs rename to DBCD.IO/HotfixReader.cs index 9bbc6b4..60cd827 100644 --- a/DBFileReaderLib/HotfixReader.cs +++ b/DBCD.IO/HotfixReader.cs @@ -1,10 +1,10 @@ -using DBFileReaderLib.Readers; +using DBCD.IO.Readers; using System; using System.Collections.Generic; using System.IO; using System.Linq; -namespace DBFileReaderLib +namespace DBCD.IO { public class HotfixReader { diff --git a/DBFileReaderLib/Readers/BaseReader.cs b/DBCD.IO/Readers/BaseReader.cs similarity index 97% rename from DBFileReaderLib/Readers/BaseReader.cs rename to DBCD.IO/Readers/BaseReader.cs index 9820dba..54ca117 100644 --- a/DBFileReaderLib/Readers/BaseReader.cs +++ b/DBCD.IO/Readers/BaseReader.cs @@ -2,9 +2,9 @@ using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; -using DBFileReaderLib.Common; +using DBCD.IO.Common; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { abstract class BaseReader { diff --git a/DBFileReaderLib/Readers/HTFXReader.cs b/DBCD.IO/Readers/HTFXReader.cs similarity index 99% rename from DBFileReaderLib/Readers/HTFXReader.cs rename to DBCD.IO/Readers/HTFXReader.cs index 7d6f0c9..3f631d9 100644 --- a/DBFileReaderLib/Readers/HTFXReader.cs +++ b/DBCD.IO/Readers/HTFXReader.cs @@ -1,4 +1,4 @@ -using DBFileReaderLib.Common; +using DBCD.IO.Common; using System; using System.Collections.Generic; using System.IO; @@ -7,7 +7,7 @@ using System.Runtime.CompilerServices; using System.Text; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class HTFXRow : IDBRow, IHotfixEntry, IEquatable { diff --git a/DBFileReaderLib/Readers/WDB2Reader.cs b/DBCD.IO/Readers/WDB2Reader.cs similarity index 99% rename from DBFileReaderLib/Readers/WDB2Reader.cs rename to DBCD.IO/Readers/WDB2Reader.cs index 9bfb032..53866be 100644 --- a/DBFileReaderLib/Readers/WDB2Reader.cs +++ b/DBCD.IO/Readers/WDB2Reader.cs @@ -4,9 +4,9 @@ using System.Linq; using System.Runtime.CompilerServices; using System.Text; -using DBFileReaderLib.Common; +using DBCD.IO.Common; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class WDB2Row : IDBRow { diff --git a/DBFileReaderLib/Readers/WDB3Reader.cs b/DBCD.IO/Readers/WDB3Reader.cs similarity index 99% rename from DBFileReaderLib/Readers/WDB3Reader.cs rename to DBCD.IO/Readers/WDB3Reader.cs index b8a0373..f3d7e5d 100644 --- a/DBFileReaderLib/Readers/WDB3Reader.cs +++ b/DBCD.IO/Readers/WDB3Reader.cs @@ -1,4 +1,4 @@ -using DBFileReaderLib.Common; +using DBCD.IO.Common; using System; using System.Collections.Generic; using System.IO; @@ -6,7 +6,7 @@ using System.Runtime.CompilerServices; using System.Text; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class WDB3Row : IDBRow { diff --git a/DBFileReaderLib/Readers/WDB4Reader.cs b/DBCD.IO/Readers/WDB4Reader.cs similarity index 99% rename from DBFileReaderLib/Readers/WDB4Reader.cs rename to DBCD.IO/Readers/WDB4Reader.cs index 63264ea..2136d73 100644 --- a/DBFileReaderLib/Readers/WDB4Reader.cs +++ b/DBCD.IO/Readers/WDB4Reader.cs @@ -1,4 +1,4 @@ -using DBFileReaderLib.Common; +using DBCD.IO.Common; using System; using System.Collections.Generic; using System.IO; @@ -6,7 +6,7 @@ using System.Runtime.CompilerServices; using System.Text; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class WDB4Row : IDBRow { diff --git a/DBFileReaderLib/Readers/WDB5Reader.cs b/DBCD.IO/Readers/WDB5Reader.cs similarity index 99% rename from DBFileReaderLib/Readers/WDB5Reader.cs rename to DBCD.IO/Readers/WDB5Reader.cs index 1542e60..6d87c1a 100644 --- a/DBFileReaderLib/Readers/WDB5Reader.cs +++ b/DBCD.IO/Readers/WDB5Reader.cs @@ -1,11 +1,11 @@ -using DBFileReaderLib.Common; +using DBCD.IO.Common; using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class WDB5Row : IDBRow { diff --git a/DBFileReaderLib/Readers/WDB6Reader.cs b/DBCD.IO/Readers/WDB6Reader.cs similarity index 99% rename from DBFileReaderLib/Readers/WDB6Reader.cs rename to DBCD.IO/Readers/WDB6Reader.cs index 4152ae0..b76fd7f 100644 --- a/DBFileReaderLib/Readers/WDB6Reader.cs +++ b/DBCD.IO/Readers/WDB6Reader.cs @@ -1,4 +1,4 @@ -using DBFileReaderLib.Common; +using DBCD.IO.Common; using System; using System.Collections.Generic; using System.IO; @@ -6,7 +6,7 @@ using System.Runtime.CompilerServices; using System.Text; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class WDB6Row : IDBRow { diff --git a/DBFileReaderLib/Readers/WDBCReader.cs b/DBCD.IO/Readers/WDBCReader.cs similarity index 99% rename from DBFileReaderLib/Readers/WDBCReader.cs rename to DBCD.IO/Readers/WDBCReader.cs index 9e430ce..940a48d 100644 --- a/DBFileReaderLib/Readers/WDBCReader.cs +++ b/DBCD.IO/Readers/WDBCReader.cs @@ -1,4 +1,4 @@ -using DBFileReaderLib.Common; +using DBCD.IO.Common; using System; using System.Collections.Generic; using System.IO; @@ -6,7 +6,7 @@ using System.Runtime.CompilerServices; using System.Text; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class WDBCRow : IDBRow { diff --git a/DBFileReaderLib/Readers/WDC1Reader.cs b/DBCD.IO/Readers/WDC1Reader.cs similarity index 99% rename from DBFileReaderLib/Readers/WDC1Reader.cs rename to DBCD.IO/Readers/WDC1Reader.cs index 29fbd96..cfd69c3 100644 --- a/DBFileReaderLib/Readers/WDC1Reader.cs +++ b/DBCD.IO/Readers/WDC1Reader.cs @@ -1,4 +1,4 @@ -using DBFileReaderLib.Common; +using DBCD.IO.Common; using System; using System.Collections.Generic; using System.IO; @@ -7,7 +7,7 @@ using System.Runtime.InteropServices; using System.Text; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class WDC1Row : IDBRow { diff --git a/DBFileReaderLib/Readers/WDC2Reader.cs b/DBCD.IO/Readers/WDC2Reader.cs similarity index 99% rename from DBFileReaderLib/Readers/WDC2Reader.cs rename to DBCD.IO/Readers/WDC2Reader.cs index c117f96..b24fcc9 100644 --- a/DBFileReaderLib/Readers/WDC2Reader.cs +++ b/DBCD.IO/Readers/WDC2Reader.cs @@ -1,4 +1,4 @@ -using DBFileReaderLib.Common; +using DBCD.IO.Common; using System; using System.Collections.Generic; using System.IO; @@ -6,7 +6,7 @@ using System.Runtime.CompilerServices; using System.Text; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class WDC2Row : IDBRow { diff --git a/DBFileReaderLib/Readers/WDC3Reader.cs b/DBCD.IO/Readers/WDC3Reader.cs similarity index 99% rename from DBFileReaderLib/Readers/WDC3Reader.cs rename to DBCD.IO/Readers/WDC3Reader.cs index ee5d3a5..e8a6483 100644 --- a/DBFileReaderLib/Readers/WDC3Reader.cs +++ b/DBCD.IO/Readers/WDC3Reader.cs @@ -1,4 +1,4 @@ -using DBFileReaderLib.Common; +using DBCD.IO.Common; using System; using System.Collections.Generic; using System.IO; @@ -6,7 +6,7 @@ using System.Runtime.CompilerServices; using System.Text; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class WDC3Row : IDBRow { diff --git a/DBFileReaderLib/Storage.cs b/DBCD.IO/Storage.cs similarity index 94% rename from DBFileReaderLib/Storage.cs rename to DBCD.IO/Storage.cs index 3674194..6e77109 100644 --- a/DBFileReaderLib/Storage.cs +++ b/DBCD.IO/Storage.cs @@ -1,7 +1,7 @@ using System.Collections.Generic; using System.IO; -namespace DBFileReaderLib +namespace DBCD.IO { public class Storage : SortedDictionary where T : class, new() { diff --git a/DBCD.Tests/TestDBDProvider.cs b/DBCD.Tests/TestDBDProvider.cs new file mode 100644 index 0000000..4e27385 --- /dev/null +++ b/DBCD.Tests/TestDBDProvider.cs @@ -0,0 +1,16 @@ +using DBCD.Providers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; + +namespace DBCD.Tests +{ + class TestDBDProvider : IDBDProvider + { + public Stream StreamForTableName(string tableName, string build = null) + { + return File.OpenRead(Path.Combine(@"C:\Users\TomSpearman\Downloads\WoWDBDefs\definitions", tableName + ".dbd")); + } + } +} diff --git a/DBCD.Tests/UnitTest1.cs b/DBCD.Tests/UnitTest1.cs index b86b9db..9b95424 100644 --- a/DBCD.Tests/UnitTest1.cs +++ b/DBCD.Tests/UnitTest1.cs @@ -1,6 +1,9 @@ -using System.IO; -using DBCD.Providers; +using DBCD.Providers; +using DBCD.IO.Attributes; using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.IO; +using System.Linq; namespace DBCD.Tests { @@ -11,12 +14,113 @@ public class UnitTest1 public void TestMethod1() { var githubDBDProvider = new GithubDBDProvider(); - var dbcProvider = new TestDBCProvider(@"E:\"); + var testDBDProvider = new TestDBDProvider(); + var dbcProvider = new TestDBCProvider(@"C:\Users\TomSpearman\Downloads\dbfilesclient"); - DBCD dbcd = new DBCD(dbcProvider, githubDBDProvider); - IDBCDStorage storage = dbcd.Load("Achievement"); + /* + "C:\\Users\\TomSpearman\\Downloads\\dbfilesclient\\item.db2" + "C:\\Users\\TomSpearman\\Downloads\\dbfilesclient\\spell.db2" + "C:\\Users\\TomSpearman\\Downloads\\dbfilesclient\\spelleffect.db2" + "C:\\Users\\TomSpearman\\Downloads\\dbfilesclient\\spellname.db2" + */ + + //DBCD dbcd = new DBCD(dbcProvider, githubDBDProvider); + //IDBCDStorage storage = dbcd.Load("Map"); + + //var fucked = new System.Collections.Generic.List(); + //foreach (var file in Directory.EnumerateFiles(@"C:\Users\TomSpearman\Downloads\dbfilesclient")) + //{ + // try + // { + // DBCD dbcd = new DBCD(dbcProvider, testDBDProvider); + // IDBCDStorage storage = dbcd.Load(Path.GetFileNameWithoutExtension(file)); + // } + // catch + // { + // fucked.Add(file); + // } + //} + + //DBCD dbcd = new DBCD(dbcProvider, githubDBDProvider); + //IDBCDStorage storage = dbcd.Load("Creature"); //IDBCDStorage storage = dbcd.Load("LockType", "1.12.1.5875", Locale.EnUS); + + var fields = typeof(SpellVisualEffectNameRec).GetFields(); + + DBCD.IO.DBReader reader = new DBCD.IO.DBReader("SpellVisualEffectName.dbc"); + var recs = reader.GetRecords(); + var val = recs.Values.Where(x => x.Flags > 0).ToArray(); + + } + + class SpellVisualEffectNameRec + { + [Index] + public int Id; + public string FileName; + public UNITEFFECTSPECIALS[] SpecialID; + public int SpecialAttachPoint; + public float AreaEffectSize; + public Flags Flags; } + + [Flags] + enum Flags : uint + { + ReleaseDeathHolds = 1, + Unknown = 2, + OneShotEndHandler = 4, + UnitEffectIsAuraWorldObject = 8 + } + + enum UNITEFFECTSPECIALS : uint + { + SPECIALEFFECT_LOOTART = 0x0, + SPECIALEFFECT_LEVELUP = 0x1, + SPECIALEFFECT_FOOTSTEPSPRAYSNOW = 0x2, + SPECIALEFFECT_FOOTSTEPSPRAYSNOWWALK = 0x3, + SPECIALEFFECT_FOOTSTEPDIRT = 0x4, + SPECIALEFFECT_FOOTSTEPDIRTWALK = 0x5, + SPECIALEFFECT_COLDBREATH = 0x6, + SPECIALEFFECT_UNDERWATERBUBBLES = 0x7, + SPECIALEFFECT_COMBATBLOODSPURTFRONT = 0x8, + SPECIALEFFECT_UNUSED = 0x9, + SPECIALEFFECT_COMBATBLOODSPURTBACK = 0xA, + SPECIALEFFECT_HITSPLATPHYSICALSMALL = 0xB, + SPECIALEFFECT_HITSPLATPHYSICALBIG = 0xC, + SPECIALEFFECT_HITSPLATHOLYSMALL = 0xD, + SPECIALEFFECT_HITSPLATHOLYBIG = 0xE, + SPECIALEFFECT_HITSPLATFIRESMALL = 0xF, + SPECIALEFFECT_HITSPLATFIREBIG = 0x10, + SPECIALEFFECT_HITSPLATNATURESMALL = 0x11, + SPECIALEFFECT_HITSPLATNATUREBIG = 0x12, + SPECIALEFFECT_HITSPLATFROSTSMALL = 0x13, + SPECIALEFFECT_HITSPLATFROSTBIG = 0x14, + SPECIALEFFECT_HITSPLATSHADOWSMALL = 0x15, + SPECIALEFFECT_HITSPLATSHADOWBIG = 0x16, + SPECIALEFFECT_COMBATBLOODSPURTFRONTLARGE = 0x17, + SPECIALEFFECT_COMBATBLOODSPURTBACKLARGE = 0x18, + SPECIALEFFECT_FIZZLEPHYSICAL = 0x19, + SPECIALEFFECT_FIZZLEHOLY = 0x1A, + SPECIALEFFECT_FIZZLEFIRE = 0x1B, + SPECIALEFFECT_FIZZLENATURE = 0x1C, + SPECIALEFFECT_FIZZLEFROST = 0x1D, + SPECIALEFFECT_FIZZLESHADOW = 0x1E, + SPECIALEFFECT_COMBATBLOODSPURTGREENFRONT = 0x1F, + SPECIALEFFECT_COMBATBLOODSPURTGREENFRONTLARGE = 0x20, + SPECIALEFFECT_COMBATBLOODSPURTGREENBACK = 0x21, + SPECIALEFFECT_COMBATBLOODSPURTGREENBACKLARGE = 0x22, + SPECIALEFFECT_FOOTSTEPSPRAYWATER = 0x23, + SPECIALEFFECT_FOOTSTEPSPRAYWATERWALK = 0x24, + SPECIALEFFECT_CHARACTERSHAPESHIFT = 0x25, + SPECIALEFFECT_COMBATBLOODSPURTBLACKFRONT = 0x26, + SPECIALEFFECT_COMBATBLOODSPURTBLACKFRONTLARGE = 0x27, + SPECIALEFFECT_COMBATBLOODSPURTBLACKBACK = 0x28, + SPECIALEFFECT_COMBATBLOODSPURTBLACKBACKLARGE = 0x29, + SPECIALEFFECT_RES_EFFECT = 0x2A, + NUM_UNITEFFECTSPECIALS = 0x2B, + SPECIALEFFECT_NONE = 0xFFFFFFFF, + }; } } diff --git a/DBCD.sln b/DBCD.sln index 3d369b9..1f21f5c 100644 --- a/DBCD.sln +++ b/DBCD.sln @@ -5,9 +5,9 @@ VisualStudioVersion = 15.0.28307.645 MinimumVisualStudioVersion = 10.0.40219.1 Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DBCD", "DBCD\DBCD.csproj", "{8267DA23-F629-4756-8D14-F532617FBCE4}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DBFileReaderLib", "DBFileReaderLib\DBFileReaderLib.csproj", "{3172C02B-502C-449F-81D3-47BBF47297BD}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DBCD.IO", "DBCD.IO\DBCD.IO.csproj", "{3172C02B-502C-449F-81D3-47BBF47297BD}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DBCD.Tests", "DBCD.Tests\DBCD.Tests.csproj", "{96CFC512-3818-487F-8FB6-7632E340ABB9}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DBCD.Tests", "DBCD.Tests\DBCD.Tests.csproj", "{96CFC512-3818-487F-8FB6-7632E340ABB9}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution diff --git a/DBCD/DBCD.cs b/DBCD/DBCD.cs index 1066028..e0dab9e 100644 --- a/DBCD/DBCD.cs +++ b/DBCD/DBCD.cs @@ -1,5 +1,5 @@ using DBCD.Providers; -using DBFileReaderLib; +using DBCD.IO; using System; namespace DBCD diff --git a/DBCD/DBCD.csproj b/DBCD/DBCD.csproj index a476177..c5175bb 100644 --- a/DBCD/DBCD.csproj +++ b/DBCD/DBCD.csproj @@ -17,7 +17,7 @@ - + diff --git a/DBCD/DBCDBuilder.cs b/DBCD/DBCDBuilder.cs index 48c6f6a..e8d46f0 100644 --- a/DBCD/DBCDBuilder.cs +++ b/DBCD/DBCDBuilder.cs @@ -1,6 +1,6 @@ using DBDefsLib; -using DBFileReaderLib; -using DBFileReaderLib.Attributes; +using DBCD.IO; +using DBCD.IO.Attributes; using System; using System.Collections.Generic; using System.IO; diff --git a/DBCD/DBCDStorage.cs b/DBCD/DBCDStorage.cs index 8d4204c..3f9d842 100644 --- a/DBCD/DBCDStorage.cs +++ b/DBCD/DBCDStorage.cs @@ -1,5 +1,5 @@ using DBCD.Helpers; -using DBFileReaderLib; +using DBCD.IO; using System.Collections; using System.Collections.Generic; using System.Collections.ObjectModel; From dceb12ef9c8296e99d24c6adfe982051ee045c7e Mon Sep 17 00:00:00 2001 From: barncastle Date: Fri, 9 Aug 2019 15:44:09 +0100 Subject: [PATCH 02/40] fix projects --- DBCD/DBCD.csproj | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/DBCD/DBCD.csproj b/DBCD/DBCD.csproj index c5175bb..f09d455 100644 --- a/DBCD/DBCD.csproj +++ b/DBCD/DBCD.csproj @@ -2,6 +2,7 @@ netstandard2.0 + 7.3 @@ -17,7 +18,7 @@ - + From 0a9bbf5ba77cb3ba8bd2f2f6b92e781bdb329189 Mon Sep 17 00:00:00 2001 From: barncastle Date: Fri, 9 Aug 2019 15:46:10 +0100 Subject: [PATCH 03/40] updae other projects --- DBCD.IO/DBCD.IO.csproj | 1 + DBCD.Tests/DBCD.Tests.csproj | 3 +++ DBCD.Tests/UnitTest1.cs | 3 ++- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/DBCD.IO/DBCD.IO.csproj b/DBCD.IO/DBCD.IO.csproj index b2887fe..7314a0f 100644 --- a/DBCD.IO/DBCD.IO.csproj +++ b/DBCD.IO/DBCD.IO.csproj @@ -2,6 +2,7 @@ netstandard2.0 + 7.3 diff --git a/DBCD.Tests/DBCD.Tests.csproj b/DBCD.Tests/DBCD.Tests.csproj index c228674..80ff2a5 100644 --- a/DBCD.Tests/DBCD.Tests.csproj +++ b/DBCD.Tests/DBCD.Tests.csproj @@ -4,6 +4,8 @@ netcoreapp2.2 false + + 7.3 @@ -13,6 +15,7 @@ + diff --git a/DBCD.Tests/UnitTest1.cs b/DBCD.Tests/UnitTest1.cs index 9b95424..5b07451 100644 --- a/DBCD.Tests/UnitTest1.cs +++ b/DBCD.Tests/UnitTest1.cs @@ -3,6 +3,7 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using System; using System.IO; +using DBCD.IO; using System.Linq; namespace DBCD.Tests @@ -48,7 +49,7 @@ public void TestMethod1() var fields = typeof(SpellVisualEffectNameRec).GetFields(); - DBCD.IO.DBReader reader = new DBCD.IO.DBReader("SpellVisualEffectName.dbc"); + var reader = new DBReader("SpellVisualEffectName.dbc"); var recs = reader.GetRecords(); var val = recs.Values.Where(x => x.Flags > 0).ToArray(); From 63799aa4a8159b19fdb691314e5039a6a2e43b56 Mon Sep 17 00:00:00 2001 From: barncastle Date: Fri, 9 Aug 2019 16:17:34 +0100 Subject: [PATCH 04/40] expose additional fields --- DBCD.IO/Readers/BaseReader.cs | 3 +++ DBCD.IO/Readers/WDB2Reader.cs | 4 ++-- DBCD.IO/Readers/WDB3Reader.cs | 4 ++-- DBCD.IO/Readers/WDB4Reader.cs | 4 ++-- DBCD.IO/Readers/WDB5Reader.cs | 4 ++-- DBCD.IO/Readers/WDB6Reader.cs | 18 ++++++++++++------ DBCD.IO/Readers/WDC1Reader.cs | 4 ++-- DBCD.IO/Readers/WDC2Reader.cs | 8 +++++--- DBCD.IO/Readers/WDC3Reader.cs | 4 ++-- 9 files changed, 32 insertions(+), 21 deletions(-) diff --git a/DBCD.IO/Readers/BaseReader.cs b/DBCD.IO/Readers/BaseReader.cs index 54ca117..99c2643 100644 --- a/DBCD.IO/Readers/BaseReader.cs +++ b/DBCD.IO/Readers/BaseReader.cs @@ -18,6 +18,9 @@ abstract class BaseReader public int MaxIndex { get; protected set; } public int IdFieldIndex { get; protected set; } public DB2Flags Flags { get; protected set; } + public int Locale { get; protected set; } + public uint Build { get; protected set; } + public int PackedDataOffset { get; protected set; } #region Data diff --git a/DBCD.IO/Readers/WDB2Reader.cs b/DBCD.IO/Readers/WDB2Reader.cs index 53866be..4263332 100644 --- a/DBCD.IO/Readers/WDB2Reader.cs +++ b/DBCD.IO/Readers/WDB2Reader.cs @@ -138,14 +138,14 @@ public WDB2Reader(Stream stream) RecordSize = reader.ReadInt32(); StringTableSize = reader.ReadInt32(); TableHash = reader.ReadUInt32(); - uint build = reader.ReadUInt32(); + Build = reader.ReadUInt32(); uint timestamp = reader.ReadUInt32(); if (RecordsCount == 0) return; // Extended header - if (build > 12880) + if (Build > 12880) { if (reader.BaseStream.Length < ExtendedHeaderSize) throw new InvalidDataException("WDB2 file is corrupted!"); diff --git a/DBCD.IO/Readers/WDB3Reader.cs b/DBCD.IO/Readers/WDB3Reader.cs index f3d7e5d..69d1189 100644 --- a/DBCD.IO/Readers/WDB3Reader.cs +++ b/DBCD.IO/Readers/WDB3Reader.cs @@ -162,11 +162,11 @@ public WDB3Reader(Stream stream) RecordSize = reader.ReadInt32(); StringTableSize = reader.ReadInt32(); TableHash = reader.ReadUInt32(); - uint build = reader.ReadUInt32(); + Build = reader.ReadUInt32(); uint timestamp = reader.ReadUInt32(); MinIndex = reader.ReadInt32(); MaxIndex = reader.ReadInt32(); - int locale = reader.ReadInt32(); + Locale = reader.ReadInt32(); int copyTableSize = reader.ReadInt32(); if (RecordsCount == 0) diff --git a/DBCD.IO/Readers/WDB4Reader.cs b/DBCD.IO/Readers/WDB4Reader.cs index 2136d73..d7d145d 100644 --- a/DBCD.IO/Readers/WDB4Reader.cs +++ b/DBCD.IO/Readers/WDB4Reader.cs @@ -153,11 +153,11 @@ public WDB4Reader(Stream stream) RecordSize = reader.ReadInt32(); StringTableSize = reader.ReadInt32(); TableHash = reader.ReadUInt32(); - uint build = reader.ReadUInt32(); + Build = reader.ReadUInt32(); uint timestamp = reader.ReadUInt32(); MinIndex = reader.ReadInt32(); MaxIndex = reader.ReadInt32(); - int locale = reader.ReadInt32(); + Locale = reader.ReadInt32(); int copyTableSize = reader.ReadInt32(); Flags = (DB2Flags)reader.ReadUInt32(); diff --git a/DBCD.IO/Readers/WDB5Reader.cs b/DBCD.IO/Readers/WDB5Reader.cs index 6d87c1a..6c6abb1 100644 --- a/DBCD.IO/Readers/WDB5Reader.cs +++ b/DBCD.IO/Readers/WDB5Reader.cs @@ -158,7 +158,7 @@ public IDBRow Clone() class WDB5Reader : BaseReader { - private const int HeaderSize = 52; + private const int HeaderSize = 48; private const uint WDB5FmtSig = 0x35424457; // WDB5 public WDB5Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) { } @@ -183,7 +183,7 @@ public WDB5Reader(Stream stream) LayoutHash = reader.ReadUInt32(); MinIndex = reader.ReadInt32(); MaxIndex = reader.ReadInt32(); - int locale = reader.ReadInt32(); + Locale = reader.ReadInt32(); int copyTableSize = reader.ReadInt32(); Flags = (DB2Flags)reader.ReadUInt16(); IdFieldIndex = reader.ReadUInt16(); diff --git a/DBCD.IO/Readers/WDB6Reader.cs b/DBCD.IO/Readers/WDB6Reader.cs index b76fd7f..3553d14 100644 --- a/DBCD.IO/Readers/WDB6Reader.cs +++ b/DBCD.IO/Readers/WDB6Reader.cs @@ -170,11 +170,14 @@ public IDBRow Clone() class WDB6Reader : BaseReader { + public readonly bool CommonDataIsAligned; + public readonly byte[] CommonDataTypes; + private const int HeaderSize = 56; private const uint WDB6FmtSig = 0x36424457; // WDB6 // CommonData type enum to bit size - private readonly Dictionary CommonTypeBits = new Dictionary + private readonly Dictionary CommonDataTypeBits = new Dictionary { { 0, 0 }, // string { 1, 16 }, // short @@ -185,7 +188,7 @@ class WDB6Reader : BaseReader public WDB6Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) { } - public unsafe WDB6Reader(Stream stream) + public WDB6Reader(Stream stream) { using (var reader = new BinaryReader(stream, Encoding.UTF8)) { @@ -205,7 +208,7 @@ public unsafe WDB6Reader(Stream stream) LayoutHash = reader.ReadUInt32(); MinIndex = reader.ReadInt32(); MaxIndex = reader.ReadInt32(); - int locale = reader.ReadInt32(); + Locale = reader.ReadInt32(); int copyTableSize = reader.ReadInt32(); Flags = (DB2Flags)reader.ReadUInt16(); IdFieldIndex = reader.ReadUInt16(); @@ -286,20 +289,23 @@ public unsafe WDB6Reader(Stream stream) // HACK as of 24473 values are 4 byte aligned // try to calculate this by seeing if all tuples are 8 bytes - bool aligned = (commonDataSize - 4 - (fieldCount * 5)) % 8 == 0; + CommonDataIsAligned = (commonDataSize - 4 - (fieldCount * 5)) % 8 == 0; + CommonDataTypes = new byte[totalFieldCount - FieldsCount]; for (int i = 0; i < fieldCount; i++) { int count = reader.ReadInt32(); byte type = reader.ReadByte(); - int size = aligned ? 4 : (32 - CommonTypeBits[type]) >> 3; + int size = CommonDataIsAligned ? 4 : (32 - CommonDataTypeBits[type]) >> 3; // add the new meta entry if (i > FieldsCount) { + CommonDataTypes[i - FieldsCount] = type; + m_meta[i] = new FieldMetaData() { - Bits = CommonTypeBits[type], + Bits = CommonDataTypeBits[type], Offset = (short)(m_meta[i - 1].Offset + ((32 - m_meta[i - 1].Bits) >> 3)) }; } diff --git a/DBCD.IO/Readers/WDC1Reader.cs b/DBCD.IO/Readers/WDC1Reader.cs index cfd69c3..a54cacf 100644 --- a/DBCD.IO/Readers/WDC1Reader.cs +++ b/DBCD.IO/Readers/WDC1Reader.cs @@ -231,13 +231,13 @@ public WDC1Reader(Stream stream) LayoutHash = reader.ReadUInt32(); MinIndex = reader.ReadInt32(); MaxIndex = reader.ReadInt32(); - int locale = reader.ReadInt32(); + Locale = reader.ReadInt32(); int copyTableSize = reader.ReadInt32(); Flags = (DB2Flags)reader.ReadUInt16(); IdFieldIndex = reader.ReadUInt16(); int totalFieldsCount = reader.ReadInt32(); - int packedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts + PackedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts int lookupColumnCount = reader.ReadInt32(); // count of lookup columns int sparseTableOffset = reader.ReadInt32(); // absolute value, {uint offset, ushort size}[MaxId - MinId + 1] int indexDataSize = reader.ReadInt32(); // int indexData[IndexDataSize / 4] diff --git a/DBCD.IO/Readers/WDC2Reader.cs b/DBCD.IO/Readers/WDC2Reader.cs index b24fcc9..115572f 100644 --- a/DBCD.IO/Readers/WDC2Reader.cs +++ b/DBCD.IO/Readers/WDC2Reader.cs @@ -248,6 +248,8 @@ public IDBRow Clone() class WDC2Reader : BaseReader { + public uint Signature { get; } + private const int HeaderSize = 72; private const uint WDC2FmtSig = 0x32434457; // WDC2 private const uint CLS1FmtSig = 0x434C5331; // CLS1 @@ -262,10 +264,10 @@ public WDC2Reader(Stream stream) throw new InvalidDataException("WDC2 file is corrupted!"); uint magic = reader.ReadUInt32(); - if (magic != WDC2FmtSig && magic != CLS1FmtSig) throw new InvalidDataException("WDC2 file is corrupted!"); + Signature = magic; RecordsCount = reader.ReadInt32(); FieldsCount = reader.ReadInt32(); RecordSize = reader.ReadInt32(); @@ -274,11 +276,11 @@ public WDC2Reader(Stream stream) LayoutHash = reader.ReadUInt32(); MinIndex = reader.ReadInt32(); MaxIndex = reader.ReadInt32(); - int locale = reader.ReadInt32(); + Locale = reader.ReadInt32(); Flags = (DB2Flags)reader.ReadUInt16(); IdFieldIndex = reader.ReadUInt16(); int totalFieldsCount = reader.ReadInt32(); - int packedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts + PackedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts int lookupColumnCount = reader.ReadInt32(); // count of lookup columns int columnMetaDataSize = reader.ReadInt32(); // 24 * NumFields bytes, describes column bit packing, {ushort recordOffset, ushort size, uint additionalDataSize, uint compressionType, uint packedDataOffset or commonvalue, uint cellSize, uint cardinality}[NumFields], sizeof(DBC2CommonValue) == 8 int commonDataSize = reader.ReadInt32(); diff --git a/DBCD.IO/Readers/WDC3Reader.cs b/DBCD.IO/Readers/WDC3Reader.cs index e8a6483..dfa977b 100644 --- a/DBCD.IO/Readers/WDC3Reader.cs +++ b/DBCD.IO/Readers/WDC3Reader.cs @@ -254,11 +254,11 @@ public WDC3Reader(Stream stream) LayoutHash = reader.ReadUInt32(); MinIndex = reader.ReadInt32(); MaxIndex = reader.ReadInt32(); - int locale = reader.ReadInt32(); + Locale = reader.ReadInt32(); Flags = (DB2Flags)reader.ReadUInt16(); IdFieldIndex = reader.ReadUInt16(); int totalFieldsCount = reader.ReadInt32(); - int packedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts + PackedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts int lookupColumnCount = reader.ReadInt32(); // count of lookup columns int columnMetaDataSize = reader.ReadInt32(); // 24 * NumFields bytes, describes column bit packing, {ushort recordOffset, ushort size, uint additionalDataSize, uint compressionType, uint packedDataOffset or commonvalue, uint cellSize, uint cardinality}[NumFields], sizeof(DBC2CommonValue) == 8 int commonDataSize = reader.ReadInt32(); From dccc9c86f170b1a37c48e54ed58f4be7c02ca44e Mon Sep 17 00:00:00 2001 From: barncastle Date: Fri, 9 Aug 2019 16:58:26 +0100 Subject: [PATCH 05/40] WIP db writing --- DBCD.IO/Common/BitWriter.cs | 191 ++++++++++++ DBCD.IO/Common/DBStructs.cs | 29 ++ DBCD.IO/Common/IDBRowSerializer.cs | 15 + DBCD.IO/Common/Value32Comparer.cs | 35 +++ DBCD.IO/Extensions.cs | 37 ++- DBCD.IO/FieldCache.cs | 2 + DBCD.IO/Readers/WDB6Reader.cs | 4 +- DBCD.IO/Writers/BaseWriter.cs | 134 +++++++++ DBCD.IO/Writers/WDB2Writer.cs | 197 ++++++++++++ DBCD.IO/Writers/WDB3Writer.cs | 205 +++++++++++++ DBCD.IO/Writers/WDB4Writer.cs | 208 +++++++++++++ DBCD.IO/Writers/WDB5Writer.cs | 220 ++++++++++++++ DBCD.IO/Writers/WDB6Writer.cs | 275 +++++++++++++++++ DBCD.IO/Writers/WDBCWriter.cs | 136 +++++++++ DBCD.IO/Writers/WDC1Writer.cs | 379 +++++++++++++++++++++++ DBCD.IO/Writers/WDC2Writer.cs | 452 ++++++++++++++++++++++++++++ DBCD.IO/Writers/WDC3Writer.cs | 465 +++++++++++++++++++++++++++++ 17 files changed, 2975 insertions(+), 9 deletions(-) create mode 100644 DBCD.IO/Common/BitWriter.cs create mode 100644 DBCD.IO/Common/IDBRowSerializer.cs create mode 100644 DBCD.IO/Common/Value32Comparer.cs create mode 100644 DBCD.IO/Writers/BaseWriter.cs create mode 100644 DBCD.IO/Writers/WDB2Writer.cs create mode 100644 DBCD.IO/Writers/WDB3Writer.cs create mode 100644 DBCD.IO/Writers/WDB4Writer.cs create mode 100644 DBCD.IO/Writers/WDB5Writer.cs create mode 100644 DBCD.IO/Writers/WDB6Writer.cs create mode 100644 DBCD.IO/Writers/WDBCWriter.cs create mode 100644 DBCD.IO/Writers/WDC1Writer.cs create mode 100644 DBCD.IO/Writers/WDC2Writer.cs create mode 100644 DBCD.IO/Writers/WDC3Writer.cs diff --git a/DBCD.IO/Common/BitWriter.cs b/DBCD.IO/Common/BitWriter.cs new file mode 100644 index 0000000..ce0e747 --- /dev/null +++ b/DBCD.IO/Common/BitWriter.cs @@ -0,0 +1,191 @@ +using System; +using System.IO; +using System.Runtime.CompilerServices; +using System.Text; + +namespace DBCD.IO.Common +{ + class BitWriter : IEquatable + { + private byte nAccumulatedBits; + private byte[] buffer; + + private readonly byte[] _pool; + + public BitWriter(int capacity) + { + buffer = new byte[capacity]; + _pool = new byte[0x10]; + } + + public byte this[int i] => buffer[i]; + public int TotalBytesWrittenOut { get; private set; } + + + public void WriteAligned(T value) where T : struct + { + EnsureSize(); + Unsafe.WriteUnaligned(ref buffer[TotalBytesWrittenOut], value); + TotalBytesWrittenOut += Unsafe.SizeOf(); + } + + public void WriteCStringAligned(string value) + { + byte[] data = Encoding.UTF8.GetBytes(value); + + Resize(data.Length); + Array.Copy(data, 0, buffer, TotalBytesWrittenOut, data.Length); + TotalBytesWrittenOut += data.Length + 1; + } + + public void Write(T value, int nbits) where T : struct + { + if (nAccumulatedBits == 0 && (nbits & 7) == 0) + { + EnsureSize(); + Unsafe.WriteUnaligned(ref buffer[TotalBytesWrittenOut], value); + TotalBytesWrittenOut += nbits / 8; + } + else + { + Unsafe.WriteUnaligned(ref _pool[0], value); + for (int i = 0; nbits > 0; i++) + { + WriteBits(Math.Min(nbits, 8), _pool[i]); + nbits -= 8; + } + } + } + + public void Write(T value, int nbits, int offset) where T : struct + { + Unsafe.WriteUnaligned(ref _pool[0], value); + + int byteOffset = offset >> 3; + int lowLen = offset & 7; + int highLen = 8 - lowLen; + + int i = 0; + while ((nbits -= 8) >= 0) + { + // write last part of this byte + buffer[byteOffset] = (byte)((buffer[byteOffset] & (0xFF >> highLen)) | (_pool[i] << lowLen)); + + // write first part of next byte + byteOffset++; + buffer[byteOffset] = (byte)((buffer[byteOffset] & (0xFF << lowLen)) | (_pool[i] >> highLen)); + i++; + } + + // write final bits + if ((nbits &= 7) > 0) + { + lowLen = nbits; + highLen = 8 - nbits; + + buffer[byteOffset] = (byte)((buffer[byteOffset] & (0xFF >> highLen)) | (_pool[i] << lowLen)); + } + } + + public void WriteCString(string value) + { + // Note: cstrings are always aligned to 8 bytes + if (nAccumulatedBits == 0) + { + WriteCStringAligned(value); + } + else + { + byte[] data = Encoding.UTF8.GetBytes(value); + for (int i = 0; i < data.Length; i++) + WriteBits(8, data[i]); + + WriteBits(8, 0); + } + } + + + private void WriteBits(int bitCount, uint value) + { + EnsureSize(); + + for (int i = 0; i < bitCount; i++) + { + buffer[TotalBytesWrittenOut] |= (byte)(((value >> i) & 0x1) << nAccumulatedBits); + nAccumulatedBits++; + + if (nAccumulatedBits > 7) + { + TotalBytesWrittenOut++; + nAccumulatedBits = 0; + } + } + } + + private void EnsureSize(int size = 8) + { + if (TotalBytesWrittenOut + size >= buffer.Length) + Array.Resize(ref buffer, buffer.Length + size + 0x10); + } + + + public void Resize(int size) + { + if (TotalBytesWrittenOut < size) + { + EnsureSize(size - TotalBytesWrittenOut); + TotalBytesWrittenOut = size; + } + } + + public void ResizeToMultiple(int divisor) + { + int remainder = TotalBytesWrittenOut % divisor; + if (remainder != 0) + { + EnsureSize(); + TotalBytesWrittenOut += 4 - remainder; + } + } + + public void CopyTo(Stream stream) + { + stream.Write(buffer, 0, TotalBytesWrittenOut); + } + + + public bool Equals(BitWriter other) + { + if (TotalBytesWrittenOut != other.TotalBytesWrittenOut) + return false; + if (ReferenceEquals(this, other)) + return true; + + for (int i = 0; i < TotalBytesWrittenOut; i++) + if (this[i] != other[i]) + return false; + + return true; + } + + public override int GetHashCode() + { + unchecked + { + // jenkins one-at-a-time + int hashcode = 0; + for (int i = 0; i < TotalBytesWrittenOut; i++) + { + hashcode += buffer[i]; + hashcode += hashcode << 10; + hashcode ^= hashcode >> 6; + } + + hashcode += hashcode << 3; + hashcode ^= hashcode >> 11; + hashcode += hashcode << 15; + return hashcode; + } + } + } +} diff --git a/DBCD.IO/Common/DBStructs.cs b/DBCD.IO/Common/DBStructs.cs index 8004ccd..a82f936 100644 --- a/DBCD.IO/Common/DBStructs.cs +++ b/DBCD.IO/Common/DBStructs.cs @@ -64,6 +64,35 @@ public T GetValue() where T : struct return Unsafe.ReadUnaligned(ptr); } } + + public static unsafe Value32 Create(T obj) where T : unmanaged + { + return *(Value32*)&obj; + } + + public static unsafe Value32 Create(object obj) + { + if (obj is byte b) + return *(Value32*)&b; + else if (obj is sbyte sb) + return *(Value32*)&sb; + else if (obj is short s) + return *(Value32*)&s; + else if (obj is ushort us) + return *(Value32*)&us; + else if (obj is int i) + return *(Value32*)&i; + else if (obj is uint ui) + return *(Value32*)&ui; + else if (obj is long l) + return *(Value32*)&l; + else if (obj is ulong ul) + return *(Value32*)&ul; + else if (obj is float f) + return *(Value32*)&f; + else + throw new System.Exception("Invalid type"); + } } struct Value64 diff --git a/DBCD.IO/Common/IDBRowSerializer.cs b/DBCD.IO/Common/IDBRowSerializer.cs new file mode 100644 index 0000000..53c7754 --- /dev/null +++ b/DBCD.IO/Common/IDBRowSerializer.cs @@ -0,0 +1,15 @@ +using System.Collections.Generic; + +namespace DBCD.IO.Common +{ + interface IDBRowSerializer where T : class + { + IDictionary Records { get; } + + void Serialize(IDictionary rows); + + void Serialize(int id, T row); + + void GetCopyRows(); + } +} diff --git a/DBCD.IO/Common/Value32Comparer.cs b/DBCD.IO/Common/Value32Comparer.cs new file mode 100644 index 0000000..a5f0c14 --- /dev/null +++ b/DBCD.IO/Common/Value32Comparer.cs @@ -0,0 +1,35 @@ +using System.Collections.Generic; + +namespace DBCD.IO.Common +{ + class Value32Comparer : IEqualityComparer + { + public bool Equals(Value32[] x, Value32[] y) + { + if (ReferenceEquals(x, y)) + return true; + if (x.Length != y.Length) + return false; + + for (int i = 0; i < x.Length; i++) + if (x[i].GetValue() != y[i].GetValue()) + return false; + + return true; + } + + public int GetHashCode(Value32[] obj) + { + unchecked + { + int s = 314, t = 159, hashCode = 0; + for (int i = 0; i < obj.Length; i++) + { + hashCode = hashCode * s + obj[i].GetValue(); + s *= t; + } + return hashCode; + } + } + } +} diff --git a/DBCD.IO/Extensions.cs b/DBCD.IO/Extensions.cs index bc12e11..8160651 100644 --- a/DBCD.IO/Extensions.cs +++ b/DBCD.IO/Extensions.cs @@ -21,6 +21,15 @@ public static Action GetSetter(this FieldInfo fieldInfo) return Expression.Lambda>(assignExpression, paramExpression, valueExpression).Compile(); } + public static Func GetGetter(this FieldInfo fieldInfo) + { + var paramExpression = Expression.Parameter(typeof(T)); + var propertyExpression = Expression.Field(paramExpression, fieldInfo); + var convertExpression = Expression.Convert(propertyExpression, typeof(object)); + + return Expression.Lambda>(convertExpression, paramExpression).Compile(); + } + public static T GetAttribute(this FieldInfo fieldInfo) where T : Attribute { return Attribute.GetCustomAttribute(fieldInfo, typeof(T)) as T; @@ -32,14 +41,11 @@ public static T Read(this BinaryReader reader) where T : struct return Unsafe.ReadUnaligned(ref result[0]); } - public static T[] ReadArray(this BinaryReader reader) where T : struct + public static void Write(this BinaryWriter writer, T value) where T : struct { - int numBytes = (int)reader.ReadInt64(); - - byte[] result = reader.ReadBytes(numBytes); - - reader.BaseStream.Position += (0 - numBytes) & 0x07; - return result.CopyTo(); + byte[] buffer = new byte[Unsafe.SizeOf()]; + Unsafe.WriteUnaligned(ref buffer[0], value); + writer.Write(buffer); } public static T[] ReadArray(this BinaryReader reader, int size) where T : struct @@ -50,6 +56,23 @@ public static T[] ReadArray(this BinaryReader reader, int size) where T : str return result.CopyTo(); } + public static unsafe void WriteArray(this BinaryWriter writer, T[] value) where T : struct + { + if (value.Length == 0) + return; + + if (value is byte[] arr) + { + writer.Write(arr); + } + else + { + byte[] result = new byte[value.Length * Unsafe.SizeOf()]; + Unsafe.CopyBlockUnaligned(Unsafe.AsPointer(ref result[0]), Unsafe.AsPointer(ref value[0]), (uint)result.Length); + writer.Write(result); + } + } + public static unsafe T[] CopyTo(this byte[] src) where T : struct { T[] result = new T[src.Length / Unsafe.SizeOf()]; diff --git a/DBCD.IO/FieldCache.cs b/DBCD.IO/FieldCache.cs index a9225ca..848826f 100644 --- a/DBCD.IO/FieldCache.cs +++ b/DBCD.IO/FieldCache.cs @@ -10,6 +10,7 @@ class FieldCache public readonly bool IsArray = false; public readonly bool IsLocalisedString = false; public readonly Action Setter; + public readonly Func Getter; public readonly LocaleAttribute LocaleInfo; public bool IndexMapField { get; set; } = false; @@ -21,6 +22,7 @@ public FieldCache(FieldInfo field) IsArray = field.FieldType.IsArray; IsLocalisedString = GetStringInfo(field, out LocaleInfo); Setter = field.GetSetter(); + Getter = field.GetGetter(); IndexMapField = Attribute.IsDefined(field, typeof(IndexAttribute)); Cardinality = GetCardinality(field); } diff --git a/DBCD.IO/Readers/WDB6Reader.cs b/DBCD.IO/Readers/WDB6Reader.cs index 3553d14..56802ab 100644 --- a/DBCD.IO/Readers/WDB6Reader.cs +++ b/DBCD.IO/Readers/WDB6Reader.cs @@ -170,8 +170,8 @@ public IDBRow Clone() class WDB6Reader : BaseReader { - public readonly bool CommonDataIsAligned; - public readonly byte[] CommonDataTypes; + public bool CommonDataIsAligned { get; } + public byte[] CommonDataTypes { get; } private const int HeaderSize = 56; private const uint WDB6FmtSig = 0x36424457; // WDB6 diff --git a/DBCD.IO/Writers/BaseWriter.cs b/DBCD.IO/Writers/BaseWriter.cs new file mode 100644 index 0000000..d937d86 --- /dev/null +++ b/DBCD.IO/Writers/BaseWriter.cs @@ -0,0 +1,134 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System.Collections.Generic; +using System.IO; +using System.Linq; + +namespace DBCD.IO.Writers +{ + abstract class BaseWriter where T : class + { + public FieldCache[] FieldCache { get; protected set; } + public int RecordsCount { get; protected set; } + public int StringTableSize { get; protected set; } + public int FieldsCount { get; } + public int RecordSize { get; } + public int IdFieldIndex { get; } + public DB2Flags Flags { get; } + + public BaseWriter(BaseReader reader) + { + FieldCache = typeof(T).GetFields().Select(x => new FieldCache(x)).ToArray(); + + FieldsCount = reader.FieldsCount; + RecordSize = reader.RecordSize; + IdFieldIndex = reader.IdFieldIndex; + Flags = reader.Flags; + + m_stringsTable = new Dictionary(); + m_copyData = new SortedDictionary(); + m_meta = reader.Meta; + m_columnMeta = reader.ColumnMeta; + + if (m_columnMeta != null) + { + m_commonData = new Dictionary[m_columnMeta.Length]; + m_palletData = new List[m_columnMeta.Length]; + m_referenceData = new List(); + + // create the lookup collections + for (int i = 0; i < m_columnMeta.Length; i++) + { + m_commonData[i] = new Dictionary(); + m_palletData[i] = new List(); + } + } + + InternString(""); + } + + + #region Data + + protected FieldMetaData[] m_meta; + public FieldMetaData[] Meta => m_meta; + + protected ColumnMetaData[] m_columnMeta; + public ColumnMetaData[] ColumnMeta => m_columnMeta; + + protected List[] m_palletData; + public List[] PalletData => m_palletData; + + protected Dictionary[] m_commonData; + public Dictionary[] CommonData => m_commonData; + + protected Dictionary m_stringsTable; + public Dictionary StringTable => m_stringsTable; + + protected SortedDictionary m_copyData; + public SortedDictionary CopyData => m_copyData; + + protected List m_referenceData; + public List ReferenceData => m_referenceData; + + #endregion + + #region Methods + + public int InternString(string value) + { + if (m_stringsTable.TryGetValue(value, out int index)) + return index; + + m_stringsTable.Add(value, StringTableSize); + + int offset = StringTableSize; + StringTableSize += value.Length + 1; + return offset; + } + + public void WriteOffsetRecords(BinaryWriter writer, IDBRowSerializer serializer, uint recordOffset, int sparseCount) + { + var sparseIdLookup = new Dictionary(sparseCount); + + for (int i = 0; i < sparseCount; i++) + { + if (serializer.Records.TryGetValue(i, out var record)) + { + if (m_copyData.TryGetValue(i, out int copyid)) + { + // copy records use their parent's offset + writer.Write(sparseIdLookup[copyid]); + writer.Write(record.TotalBytesWrittenOut); + } + else + { + writer.Write(sparseIdLookup[i] = recordOffset); + writer.Write(record.TotalBytesWrittenOut); + recordOffset += (uint)record.TotalBytesWrittenOut; + } + } + else + { + // unused ids are empty records + writer.BaseStream.Position += 6; + } + } + } + + public void WriteSecondaryKeyData(BinaryWriter writer, IDictionary storage, int sparseCount) + { + // this was always the final field of wmominimaptexture.db2 + var fieldInfo = FieldCache[FieldCache.Length - 1]; + for (int i = 0; i < sparseCount; i++) + { + if (storage.TryGetValue(i, out var record)) + writer.Write((int)fieldInfo.Getter(record)); + else + writer.BaseStream.Position += 4; + } + } + + #endregion + } +} diff --git a/DBCD.IO/Writers/WDB2Writer.cs b/DBCD.IO/Writers/WDB2Writer.cs new file mode 100644 index 0000000..1047c8b --- /dev/null +++ b/DBCD.IO/Writers/WDB2Writer.cs @@ -0,0 +1,197 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; + +namespace DBCD.IO.Writers +{ + class WDB2RowSerializer : IDBRowSerializer where T : class + { + public IDictionary Records { get; private set; } + public IDictionary StringLengths { get; private set; } + + private readonly BaseWriter m_writer; + + + public WDB2RowSerializer(BaseWriter writer) + { + m_writer = writer; + + Records = new Dictionary(); + StringLengths = new Dictionary(); + } + + public void Serialize(IDictionary rows) + { + foreach (var row in rows) + Serialize(row.Key, row.Value); + } + + public void Serialize(int id, T row) + { + BitWriter bitWriter = new BitWriter(m_writer.RecordSize); + StringLengths[id] = 0; + + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + FieldCache info = m_writer.FieldCache[i]; + + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + { + Array array = (Array)info.Getter(row); + writer(bitWriter, m_writer, array); + + if (array is string[] strings) + StringLengths[id] = (ushort)strings.Sum(x => x.Length == 0 ? 0 : x.Length + 1); + } + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } + else + { + if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + { + object value = info.Getter(row); + writer(bitWriter, m_writer, value); + + if (value is string strings) + StringLengths[id] = (ushort)(strings.Length == 0 ? 0 : strings.Length + 1); + } + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } + } + + // pad to record size + bitWriter.Resize(m_writer.RecordSize); + Records[id] = bitWriter; + } + + public void GetCopyRows() + { + throw new NotImplementedException(); + } + + + private static Dictionary, object>> simpleWriters = new Dictionary, object>> + { + [typeof(long)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(float)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(int)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(uint)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(short)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(ushort)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(sbyte)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(byte)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(string)] = (data, writer, value) => WriteFieldValue(data, writer.InternString((string)value)), + }; + + private readonly Dictionary, Array>> arrayWriters = new Dictionary, Array>> + { + [typeof(ulong[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(long[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(float[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(int[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(uint[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(ulong[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(ushort[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(short[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(byte[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(sbyte[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(string[])] = (data, writer, array) => WriteFieldValueArray(data, (array as string[]).Select(x => writer.InternString(x)).ToArray()), + }; + + private static void WriteFieldValue(BitWriter r, object value) where TType : struct + { + r.WriteAligned((TType)value); + } + + private static void WriteFieldValueArray(BitWriter r, Array value) where TType : struct + { + for (int i = 0; i < value.Length; i++) + r.WriteAligned((TType)value.GetValue(i)); + } + + } + + class WDB2Writer : BaseWriter where T : class + { + private const uint WDB2FmtSig = 0x32424457; // WDB2 + + public WDB2Writer(WDB2Reader reader, IDictionary storage, Stream stream) : base(reader) + { + WDB2RowSerializer serializer = new WDB2RowSerializer(this); + serializer.Serialize(storage); + + RecordsCount = storage.Count; + + using (var writer = new BinaryWriter(stream)) + { + writer.Write(WDB2FmtSig); + writer.Write(RecordsCount); + writer.Write(FieldsCount); + writer.Write(RecordSize); + writer.Write(StringTableSize); + writer.Write(reader.TableHash); + writer.Write(reader.Build); + writer.Write((uint)DateTimeOffset.UtcNow.ToUnixTimeSeconds()); + + if (storage.Count == 0) + return; + + // Extended header + if (reader.Build > 12880) + { + if (reader.MaxIndex == 0) + { + writer.Write(0); + writer.Write(0); + writer.Write(reader.LayoutHash); + writer.Write(0); // CopyTableSize + } + else + { + WriteIndices(writer, serializer, reader.LayoutHash); + } + } + + foreach (var record in serializer.Records) + record.Value.CopyTo(writer.BaseStream); + + foreach (var str in m_stringsTable) + writer.WriteCString(str.Key); + } + } + + private void WriteIndices(BinaryWriter writer, WDB2RowSerializer serializer, uint layoutHash) + { + int min = serializer.Records.Keys.Min(); + int max = serializer.Records.Keys.Max(); + + writer.Write(min); + writer.Write(max); + writer.Write(layoutHash); + writer.Write(0); // CopyTableSize + + int index = 0; + for (int i = min; i <= max; i++) + { + if (serializer.StringLengths.ContainsKey(i)) + { + writer.Write(++index); + writer.Write(serializer.StringLengths[i]); + } + else + { + writer.Write(0); + writer.Write((ushort)0); + } + } + } + } +} diff --git a/DBCD.IO/Writers/WDB3Writer.cs b/DBCD.IO/Writers/WDB3Writer.cs new file mode 100644 index 0000000..58f76b4 --- /dev/null +++ b/DBCD.IO/Writers/WDB3Writer.cs @@ -0,0 +1,205 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; + +namespace DBCD.IO.Writers +{ + class WDB3RowSerializer : IDBRowSerializer where T : class + { + public IDictionary Records { get; private set; } + + private readonly BaseWriter m_writer; + + + public WDB3RowSerializer(BaseWriter writer) + { + m_writer = writer; + + Records = new Dictionary(); + } + + public void Serialize(IDictionary rows) + { + foreach (var row in rows) + Serialize(row.Key, row.Value); + } + + public void Serialize(int id, T row) + { + BitWriter bitWriter = new BitWriter(m_writer.RecordSize); + + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + FieldCache info = m_writer.FieldCache[i]; + + if (info.IndexMapField && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + continue; + + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } + else + { + if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } + } + + // pad to record size + if (!m_writer.Flags.HasFlagExt(DB2Flags.Sparse)) + bitWriter.Resize(m_writer.RecordSize); + else + bitWriter.ResizeToMultiple(4); + + Records[id] = bitWriter; + } + + public void GetCopyRows() + { + var copydata = Records.GroupBy(x => x.Value).Where(x => x.Count() > 1); + foreach (var copygroup in copydata) + { + int key = copygroup.First().Key; + foreach (var copy in copygroup.Skip(1)) + m_writer.CopyData[copy.Key] = key; + } + } + + + private static Dictionary, object>> simpleWriters = new Dictionary, object>> + { + [typeof(long)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(float)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(int)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(uint)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(short)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(ushort)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(sbyte)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(byte)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(string)] = (data, writer, value) => + { + if (writer.Flags.HasFlagExt(DB2Flags.Sparse)) + data.WriteCStringAligned((string)value); + else + WriteFieldValue(data, writer.InternString((string)value)); + } + }; + + private readonly Dictionary, Array>> arrayWriters = new Dictionary, Array>> + { + [typeof(ulong[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(long[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(float[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(int[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(uint[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(ulong[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(ushort[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(short[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(byte[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(sbyte[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(string[])] = (data, writer, array) => WriteFieldValueArray(data, (array as string[]).Select(x => writer.InternString(x)).ToArray()), + }; + + private static void WriteFieldValue(BitWriter r, object value) where TType : struct + { + r.WriteAligned((TType)value); + } + + private static void WriteFieldValueArray(BitWriter r, Array value) where TType : struct + { + for (int i = 0; i < value.Length; i++) + r.WriteAligned((TType)value.GetValue(i)); + } + } + + + class WDB3Writer : BaseWriter where T : class + { + private const uint WDB3FmtSig = 0x33424457; // WDB3 + + public WDB3Writer(WDB3Reader reader, IDictionary storage, Stream stream) : base(reader) + { + // always 2 empties + StringTableSize++; + + WDB3RowSerializer serializer = new WDB3RowSerializer(this); + serializer.Serialize(storage); + serializer.GetCopyRows(); + + RecordsCount = serializer.Records.Count - m_copyData.Count; + if (Flags.HasFlagExt(DB2Flags.Sparse)) + StringTableSize = 0; + + using (var writer = new BinaryWriter(stream)) + { + int minIndex = storage.Keys.Min(); + int maxIndex = storage.Keys.Max(); + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + + writer.Write(WDB3FmtSig); + writer.Write(RecordsCount); + writer.Write(FieldsCount); + writer.Write(RecordSize); + writer.Write(StringTableSize); + writer.Write(reader.TableHash); + writer.Write(reader.Build); + writer.Write((uint)DateTimeOffset.UtcNow.ToUnixTimeSeconds()); + writer.Write(minIndex); + writer.Write(maxIndex); + writer.Write(reader.Locale); + writer.Write(copyTableSize); + + if (storage.Count == 0) + return; + + // sparse data + if (Flags.HasFlagExt(DB2Flags.Sparse)) + { + int sparseCount = maxIndex - minIndex + 1; + uint recordsOffset = (uint)(writer.BaseStream.Position + (sparseCount * 6)); + WriteOffsetRecords(writer, serializer, recordsOffset, sparseCount); + } + + // secondary key + if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) + WriteSecondaryKeyData(writer, storage, maxIndex - minIndex + 1); + + // record data + foreach (var record in serializer.Records) + if (!m_copyData.ContainsKey(record.Key)) + record.Value.CopyTo(writer.BaseStream); + + // string table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + writer.WriteCString(""); + foreach (var str in m_stringsTable) + writer.WriteCString(str.Key); + } + + // index table + if (Flags.HasFlagExt(DB2Flags.Index)) + writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + + // copy table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + foreach (var copyRecord in m_copyData) + { + writer.Write(copyRecord.Key); + writer.Write(copyRecord.Value); + } + } + } + } + } +} diff --git a/DBCD.IO/Writers/WDB4Writer.cs b/DBCD.IO/Writers/WDB4Writer.cs new file mode 100644 index 0000000..f5f2217 --- /dev/null +++ b/DBCD.IO/Writers/WDB4Writer.cs @@ -0,0 +1,208 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; + +namespace DBCD.IO.Writers +{ + class WDB4RowSerializer : IDBRowSerializer where T : class + { + public IDictionary Records { get; private set; } + + private readonly BaseWriter m_writer; + + + public WDB4RowSerializer(BaseWriter writer) + { + m_writer = writer; + + Records = new Dictionary(); + } + + public void Serialize(IDictionary rows) + { + foreach (var row in rows) + Serialize(row.Key, row.Value); + } + + public void Serialize(int id, T row) + { + BitWriter bitWriter = new BitWriter(m_writer.RecordSize); + + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + FieldCache info = m_writer.FieldCache[i]; + + if (info.IndexMapField && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + continue; + + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } + else + { + if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } + } + + // pad to record size + if (!m_writer.Flags.HasFlagExt(DB2Flags.Sparse)) + bitWriter.Resize(m_writer.RecordSize); + else + bitWriter.ResizeToMultiple(4); + + Records[id] = bitWriter; + } + + public void GetCopyRows() + { + var copydata = Records.GroupBy(x => x.Value).Where(x => x.Count() > 1); + foreach (var copygroup in copydata) + { + int key = copygroup.First().Key; + foreach (var copy in copygroup.Skip(1)) + m_writer.CopyData[copy.Key] = key; + } + } + + + private static Dictionary, object>> simpleWriters = new Dictionary, object>> + { + [typeof(long)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(float)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(int)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(uint)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(short)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(ushort)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(sbyte)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(byte)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(string)] = (data, writer, value) => + { + if (writer.Flags.HasFlagExt(DB2Flags.Sparse)) + data.WriteCStringAligned((string)value); + else + WriteFieldValue(data, writer.InternString((string)value)); + } + }; + + private readonly Dictionary, Array>> arrayWriters = new Dictionary, Array>> + { + [typeof(ulong[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(long[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(float[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(int[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(uint[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(ulong[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(ushort[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(short[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(byte[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(sbyte[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(string[])] = (data, writer, array) => WriteFieldValueArray(data, (array as string[]).Select(x => writer.InternString(x)).ToArray()), + }; + + private static void WriteFieldValue(BitWriter r, object value) where TType : struct + { + r.WriteAligned((TType)value); + } + + private static void WriteFieldValueArray(BitWriter r, Array value) where TType : struct + { + for (int i = 0; i < value.Length; i++) + r.WriteAligned((TType)value.GetValue(i)); + } + } + + class WDB4Writer : BaseWriter where T : class + { + private const uint WDB4FmtSig = 0x34424457; // WDB4 + + public WDB4Writer(WDB4Reader reader, IDictionary storage, Stream stream) : base(reader) + { + // always 2 empties + StringTableSize++; + + WDB4RowSerializer serializer = new WDB4RowSerializer(this); + serializer.Serialize(storage); + serializer.GetCopyRows(); + + RecordsCount = serializer.Records.Count - m_copyData.Count; + + using (var writer = new BinaryWriter(stream)) + { + int minIndex = storage.Keys.Min(); + int maxIndex = storage.Keys.Max(); + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + + writer.Write(WDB4FmtSig); + writer.Write(RecordsCount); + writer.Write(FieldsCount); + writer.Write(RecordSize); + writer.Write(StringTableSize); // if flags & 0x01 != 0, offset to the offset_map + writer.Write(reader.TableHash); + writer.Write(reader.Build); + writer.Write((uint)DateTimeOffset.UtcNow.ToUnixTimeSeconds()); + writer.Write(minIndex); + writer.Write(maxIndex); + writer.Write(reader.Locale); + writer.Write(copyTableSize); + writer.Write((uint)Flags); + + if (storage.Count == 0) + return; + + // record data + uint recordsOffset = (uint)writer.BaseStream.Position; + foreach (var record in serializer.Records) + if (!m_copyData.ContainsKey(record.Key)) + record.Value.CopyTo(writer.BaseStream); + + // string table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + writer.WriteCString(""); + foreach (var str in m_stringsTable) + writer.WriteCString(str.Key); + } + + // sparse data + if (Flags.HasFlagExt(DB2Flags.Sparse)) + { + // change the StringTableSize to the offset_map position + long oldPos = writer.BaseStream.Position; + writer.BaseStream.Position = 16; + writer.Write((uint)oldPos); + writer.BaseStream.Position = oldPos; + + WriteOffsetRecords(writer, serializer, recordsOffset, maxIndex - minIndex + 1); + } + + // secondary key + if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) + WriteSecondaryKeyData(writer, storage, maxIndex - minIndex + 1); + + // index table + if (Flags.HasFlagExt(DB2Flags.Index)) + writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + + // copy table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + foreach (var copyRecord in m_copyData) + { + writer.Write(copyRecord.Key); + writer.Write(copyRecord.Value); + } + } + } + } + } +} diff --git a/DBCD.IO/Writers/WDB5Writer.cs b/DBCD.IO/Writers/WDB5Writer.cs new file mode 100644 index 0000000..0004975 --- /dev/null +++ b/DBCD.IO/Writers/WDB5Writer.cs @@ -0,0 +1,220 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; + +namespace DBCD.IO.Writers +{ + class WDB5RowSerializer : IDBRowSerializer where T : class + { + public IDictionary Records { get; private set; } + + private readonly BaseWriter m_writer; + private readonly FieldMetaData[] m_fieldMeta; + + + public WDB5RowSerializer(BaseWriter writer) + { + m_writer = writer; + m_fieldMeta = m_writer.Meta; + + Records = new Dictionary(); + } + + public void Serialize(IDictionary rows) + { + foreach (var row in rows) + Serialize(row.Key, row.Value); + } + + public void Serialize(int id, T row) + { + BitWriter bitWriter = new BitWriter(m_writer.RecordSize); + + int indexFieldOffSet = 0; + + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + FieldCache info = m_writer.FieldCache[i]; + + if (info.IndexMapField && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + { + indexFieldOffSet++; + continue; + } + + int fieldIndex = i - indexFieldOffSet; + + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } + else + { + if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } + } + + // pad to record size + if (!m_writer.Flags.HasFlagExt(DB2Flags.Sparse)) + bitWriter.Resize(m_writer.RecordSize); + else + bitWriter.ResizeToMultiple(4); + + Records[id] = bitWriter; + } + + public void GetCopyRows() + { + var copydata = Records.GroupBy(x => x.Value).Where(x => x.Count() > 1); + foreach (var copygroup in copydata) + { + int key = copygroup.First().Key; + foreach (var copy in copygroup.Skip(1)) + m_writer.CopyData[copy.Key] = key; + } + } + + + private static Dictionary, FieldMetaData, object>> simpleWriters = new Dictionary, FieldMetaData, object>> + { + [typeof(long)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(float)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(int)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(uint)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(short)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(ushort)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(sbyte)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(byte)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(string)] = (data, writer, fieldMeta, value) => + { + if (writer.Flags.HasFlagExt(DB2Flags.Sparse)) + data.WriteCString((string)value); + else + WriteFieldValue(data, fieldMeta, writer.InternString((string)value)); + } + }; + + private static Dictionary, FieldMetaData, Array>> arrayWriters = new Dictionary, FieldMetaData, Array>> + { + [typeof(ulong[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(long[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(float[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(int[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(uint[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(ulong[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(ushort[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(short[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(byte[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(sbyte[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(string[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, (array as string[]).Select(x => writer.InternString(x)).ToArray()), + }; + + private static void WriteFieldValue(BitWriter r, FieldMetaData fieldMeta, object value) where TType : struct + { + r.Write((TType)value, 32 - fieldMeta.Bits); + } + + private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, Array value) where TType : struct + { + for (int i = 0; i < value.Length; i++) + r.Write((TType)value.GetValue(i), 32 - fieldMeta.Bits); + } + } + + class WDB5Writer : BaseWriter where T : class + { + private const uint WDB5FmtSig = 0x35424457; // WDB5 + + public WDB5Writer(WDB5Reader reader, IDictionary storage, Stream stream) : base(reader) + { + // always 2 empties + StringTableSize++; + + WDB5RowSerializer serializer = new WDB5RowSerializer(this); + serializer.Serialize(storage); + serializer.GetCopyRows(); + + RecordsCount = serializer.Records.Count - m_copyData.Count; + + using (var writer = new BinaryWriter(stream)) + { + int minIndex = storage.Keys.Min(); + int maxIndex = storage.Keys.Max(); + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + + writer.Write(WDB5FmtSig); + writer.Write(RecordsCount); + writer.Write(FieldsCount); + writer.Write(RecordSize); + writer.Write(StringTableSize); // if flags & 0x01 != 0, offset to the offset_map + writer.Write(reader.TableHash); + writer.Write(reader.LayoutHash); + writer.Write(minIndex); + writer.Write(maxIndex); + writer.Write(reader.Locale); + writer.Write(copyTableSize); // copytablesize + writer.Write((uint)Flags); + writer.Write((ushort)IdFieldIndex); + + if (storage.Count == 0) + return; + + // field meta + writer.WriteArray(m_meta); + + // record data + uint recordsOffset = (uint)writer.BaseStream.Position; + foreach (var record in serializer.Records) + if (!m_copyData.ContainsKey(record.Key)) + record.Value.CopyTo(writer.BaseStream); + + // string table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + writer.WriteCString(""); + foreach (var str in m_stringsTable) + writer.WriteCString(str.Key); + } + + // sparse data + if (Flags.HasFlagExt(DB2Flags.Sparse)) + { + // change the StringTableSize to the offset_map position + long oldPos = writer.BaseStream.Position; + writer.BaseStream.Position = 16; + writer.Write((uint)oldPos); + writer.BaseStream.Position = oldPos; + + WriteOffsetRecords(writer, serializer, recordsOffset, maxIndex - minIndex + 1); + } + + // secondary key + if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) + WriteSecondaryKeyData(writer, storage, maxIndex - minIndex + 1); + + // index table + if (Flags.HasFlagExt(DB2Flags.Index)) + writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + + // copy table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + foreach (var copyRecord in m_copyData) + { + writer.Write(copyRecord.Key); + writer.Write(copyRecord.Value); + } + } + } + } + } +} diff --git a/DBCD.IO/Writers/WDB6Writer.cs b/DBCD.IO/Writers/WDB6Writer.cs new file mode 100644 index 0000000..30747e2 --- /dev/null +++ b/DBCD.IO/Writers/WDB6Writer.cs @@ -0,0 +1,275 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; + +namespace DBCD.IO.Writers +{ + class WDB6RowSerializer : IDBRowSerializer where T : class + { + public IDictionary Records { get; private set; } + + private readonly BaseWriter m_writer; + private readonly FieldMetaData[] m_fieldMeta; + + + public WDB6RowSerializer(BaseWriter writer) + { + m_writer = writer; + m_fieldMeta = m_writer.Meta; + + Records = new Dictionary(); + } + + public void Serialize(IDictionary rows) + { + foreach (var row in rows) + Serialize(row.Key, row.Value); + } + + public void Serialize(int id, T row) + { + BitWriter bitWriter = new BitWriter(m_writer.RecordSize); + + int indexFieldOffSet = 0; + + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + FieldCache info = m_writer.FieldCache[i]; + + if (i == m_writer.IdFieldIndex && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + { + indexFieldOffSet++; + continue; + } + + int fieldIndex = i - indexFieldOffSet; + + // common data fields + if (fieldIndex >= m_writer.FieldsCount) + { + m_writer.CommonData[fieldIndex - m_writer.FieldsCount].Add(id, Value32.Create(info.Getter(row))); + continue; + } + + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } + else + { + if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } + } + + // pad to record size + if (!m_writer.Flags.HasFlagExt(DB2Flags.Sparse)) + bitWriter.Resize(m_writer.RecordSize); + else + bitWriter.ResizeToMultiple(4); + + Records[id] = bitWriter; + } + + public void GetCopyRows() + { + var copydata = Records.GroupBy(x => x.Value).Where(x => x.Count() > 1); + foreach (var copygroup in copydata) + { + int key = copygroup.First().Key; + foreach (var copy in copygroup.Skip(1)) + m_writer.CopyData[copy.Key] = key; + } + } + + + private static Dictionary, FieldMetaData, object>> simpleWriters = new Dictionary, FieldMetaData, object>> + { + [typeof(long)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(float)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(int)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(uint)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(short)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(ushort)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(sbyte)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(byte)] = (data, writer, fieldMeta, value) => WriteFieldValue(data, fieldMeta, value), + [typeof(string)] = (data, writer, fieldMeta, value) => + { + if (writer.Flags.HasFlagExt(DB2Flags.Sparse)) + data.WriteCString((string)value); + else + WriteFieldValue(data, fieldMeta, writer.InternString((string)value)); + } + }; + + private static Dictionary, FieldMetaData, Array>> arrayWriters = new Dictionary, FieldMetaData, Array>> + { + [typeof(ulong[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(long[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(float[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(int[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(uint[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(ulong[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(ushort[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(short[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(byte[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(sbyte[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, array), + [typeof(string[])] = (data, writer, fieldMeta, array) => WriteFieldValueArray(data, fieldMeta, (array as string[]).Select(x => writer.InternString(x)).ToArray()), + }; + + private static void WriteFieldValue(BitWriter r, FieldMetaData fieldMeta, object value) where TType : struct + { + r.Write((TType)value, 32 - fieldMeta.Bits); + } + + private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, Array value) where TType : struct + { + for (int i = 0; i < value.Length; i++) + r.Write((TType)value.GetValue(i), 32 - fieldMeta.Bits); + } + } + + class WDB6Writer : BaseWriter where T : class + { + private const uint WDB6FmtSig = 0x36424457; // WDB6 + + public WDB6Writer(WDB6Reader reader, IDictionary storage, Stream stream) : base(reader) + { + // always 2 empties + StringTableSize++; + + m_commonData = new Dictionary[m_meta.Length - FieldsCount]; + Array.ForEach(m_commonData, x => x = new Dictionary()); + + WDB6RowSerializer serializer = new WDB6RowSerializer(this); + serializer.Serialize(storage); + serializer.GetCopyRows(); + + RecordsCount = serializer.Records.Count - m_copyData.Count; + + using (var writer = new BinaryWriter(stream)) + { + int minIndex = storage.Keys.Min(); + int maxIndex = storage.Keys.Max(); + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + + writer.Write(WDB6FmtSig); + writer.Write(RecordsCount); + writer.Write(FieldsCount); + writer.Write(RecordSize); + writer.Write(StringTableSize); // if flags & 0x01 != 0, offset to the offset_map + writer.Write(reader.TableHash); + writer.Write(reader.LayoutHash); + writer.Write(minIndex); + writer.Write(maxIndex); + writer.Write(reader.Locale); + writer.Write(copyTableSize); + writer.Write((ushort)Flags); + writer.Write((ushort)IdFieldIndex); + writer.Write(m_meta.Length); // totalFieldCount + writer.Write(0); // commonDataSize + + if (storage.Count == 0) + return; + + // field meta + for (int i = 0; i < FieldsCount; i++) + writer.Write(m_meta[i]); + + // record data + uint recordsOffset = (uint)writer.BaseStream.Position; + foreach (var record in serializer.Records) + if (!m_copyData.TryGetValue(record.Key, out int parent)) + record.Value.CopyTo(writer.BaseStream); + + // string table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + writer.WriteCString(""); + foreach (var str in m_stringsTable) + writer.WriteCString(str.Key); + } + + // sparse data + if (Flags.HasFlagExt(DB2Flags.Sparse)) + { + // change the StringTableSize to the offset_map position + long oldPos = writer.BaseStream.Position; + writer.BaseStream.Position = 16; + writer.Write((uint)oldPos); + writer.BaseStream.Position = oldPos; + + WriteOffsetRecords(writer, serializer, recordsOffset, maxIndex - minIndex + 1); + } + + // secondary key + if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) + WriteSecondaryKeyData(writer, storage, maxIndex - minIndex + 1); + + // index table + if (Flags.HasFlagExt(DB2Flags.Index)) + writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + + // copy table + if(!Flags.HasFlagExt(DB2Flags.Sparse)) + { + foreach (var copyRecord in m_copyData) + { + writer.Write(copyRecord.Key); + writer.Write(copyRecord.Value); + } + } + + // common data + // HACK this is bodged together + // - it only writes common data columns and all values including common ones + if (m_commonData.Length > 0) + { + long startPos = writer.BaseStream.Position; + + writer.Write(m_meta.Length - FieldsCount); + + for (int i = 0; i < m_commonData.Length; i++) + { + writer.Write(m_commonData[i].Count); + writer.Write(reader.CommonDataTypes[i]); // type + + foreach (var record in m_commonData[i]) + { + writer.Write(record.Key); + + switch (reader.CommonDataIsAligned) + { + // ushort + case false when reader.CommonDataTypes[i] == 1: + writer.Write(record.Value.GetValue()); + break; + // byte + case false when reader.CommonDataTypes[i] == 2: + writer.Write(record.Value.GetValue()); + break; + default: + writer.Write(record.Value.GetValue()); + break; + } + } + } + + // set the CommonDataSize + writer.BaseStream.Position = 52; + writer.Write((uint)(writer.BaseStream.Position - startPos)); + writer.BaseStream.Position = writer.BaseStream.Length; + } + } + } + } +} diff --git a/DBCD.IO/Writers/WDBCWriter.cs b/DBCD.IO/Writers/WDBCWriter.cs new file mode 100644 index 0000000..c7d952c --- /dev/null +++ b/DBCD.IO/Writers/WDBCWriter.cs @@ -0,0 +1,136 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; + +namespace DBCD.IO.Writers +{ + class WDBCRowSerializer : IDBRowSerializer where T : class + { + public IDictionary Records { get; private set; } + + private readonly BaseWriter m_writer; + + + public WDBCRowSerializer(BaseWriter writer) + { + m_writer = writer; + Records = new Dictionary(); + } + + public void Serialize(IDictionary rows) + { + foreach (var row in rows) + Serialize(row.Key, row.Value); + } + + public void Serialize(int id, T row) + { + BitWriter bitWriter = new BitWriter(m_writer.RecordSize); + + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + FieldCache info = m_writer.FieldCache[i]; + + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } + else + { + if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } + } + + // pad to record size + bitWriter.Resize(m_writer.RecordSize); + Records[id] = bitWriter; + } + + public void GetCopyRows() + { + throw new NotImplementedException(); + } + + + private static Dictionary, object>> simpleWriters = new Dictionary, object>> + { + [typeof(long)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(float)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(int)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(uint)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(short)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(ushort)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(sbyte)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(byte)] = (data, writer, value) => WriteFieldValue(data, value), + [typeof(string)] = (data, writer, value) => WriteFieldValue(data, writer.InternString((string)value)), + }; + + private readonly Dictionary, Array>> arrayWriters = new Dictionary, Array>> + { + [typeof(ulong[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(long[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(float[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(int[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(uint[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(ulong[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(ushort[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(short[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(byte[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(sbyte[])] = (data, writer, array) => WriteFieldValueArray(data, array), + [typeof(string[])] = (data, writer, array) => WriteFieldValueArray(data, (array as string[]).Select(x => writer.InternString(x)).ToArray()), + }; + + private static void WriteFieldValue(BitWriter r, object value) where TType : struct + { + r.WriteAligned((TType)value); + } + + private static void WriteFieldValueArray(BitWriter r, Array value) where TType : struct + { + for (int i = 0; i < value.Length; i++) + r.WriteAligned((TType)value.GetValue(i)); + } + } + + class WDBCWriter : BaseWriter where T : class + { + private const int HeaderSize = 20; + private const uint WDBCFmtSig = 0x43424457; // WDBC + + public WDBCWriter(WDBCReader reader, IDictionary storage, Stream stream) : base(reader) + { + WDBCRowSerializer serializer = new WDBCRowSerializer(this); + serializer.Serialize(storage); + + RecordsCount = storage.Count; + + using (var writer = new BinaryWriter(stream)) + { + writer.Write(WDBCFmtSig); + writer.Write(RecordsCount); + writer.Write(FieldsCount); + writer.Write(RecordSize); + writer.Write(StringTableSize); + + if (RecordsCount == 0) + return; + + foreach (var record in serializer.Records) + record.Value.CopyTo(writer.BaseStream); + + foreach (var str in m_stringsTable) + writer.WriteCString(str.Key); + } + } + } +} diff --git a/DBCD.IO/Writers/WDC1Writer.cs b/DBCD.IO/Writers/WDC1Writer.cs new file mode 100644 index 0000000..650f25c --- /dev/null +++ b/DBCD.IO/Writers/WDC1Writer.cs @@ -0,0 +1,379 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; + +namespace DBCD.IO.Writers +{ + class WDC1RowSerializer : IDBRowSerializer where T : class + { + public IDictionary Records { get; private set; } + + private readonly BaseWriter m_writer; + private readonly FieldMetaData[] m_fieldMeta; + private readonly ColumnMetaData[] m_columnMeta; + private readonly List[] m_palletData; + private readonly Dictionary[] m_commonData; + + private static readonly Value32Comparer Value32Comparer = new Value32Comparer(); + + + public WDC1RowSerializer(BaseWriter writer) + { + m_writer = writer; + m_fieldMeta = m_writer.Meta; + m_columnMeta = m_writer.ColumnMeta; + m_palletData = m_writer.PalletData; + m_commonData = m_writer.CommonData; + + Records = new Dictionary(); + } + + public void Serialize(IDictionary rows) + { + foreach (var row in rows) + Serialize(row.Key, row.Value); + } + + public void Serialize(int id, T row) + { + BitWriter bitWriter = new BitWriter(m_writer.RecordSize); + + int indexFieldOffSet = 0; + + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + FieldCache info = m_writer.FieldCache[i]; + + if (i == m_writer.IdFieldIndex && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + { + indexFieldOffSet++; + continue; + } + + int fieldIndex = i - indexFieldOffSet; + + // reference data field + if (fieldIndex >= m_writer.Meta.Length) + { + m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); + continue; + } + + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } + else + { + if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } + } + + // pad to record size + if (!m_writer.Flags.HasFlagExt(DB2Flags.Sparse)) + bitWriter.Resize(m_writer.RecordSize); + else + bitWriter.ResizeToMultiple(4); + + Records[id] = bitWriter; + } + + public void GetCopyRows() + { + var copydata = Records.GroupBy(x => x.Value).Where(x => x.Count() > 1); + foreach (var copygroup in copydata) + { + int key = copygroup.First().Key; + foreach (var copy in copygroup.Skip(1)) + m_writer.CopyData[copy.Key] = key; + } + } + + + private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> simpleWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> + { + [typeof(long)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(float)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(int)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(uint)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(short)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(ushort)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(sbyte)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(byte)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(string)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => + { + if (writer.Flags.HasFlagExt(DB2Flags.Sparse)) + data.WriteCString((string)value); + else + WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, writer.InternString((string)value)); + } + }; + + private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> arrayWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> + { + [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(long[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(float[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(int[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(uint[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(ushort[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(short[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(byte[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(sbyte[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(string[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, (array as string[]).Select(x => writer.InternString(x)).ToArray()), + }; + + private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, object value) where TType : unmanaged + { + switch (columnMeta.CompressionType) + { + case CompressionType.None: + { + int bitSize = 32 - fieldMeta.Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + r.Write((TType)value, bitSize); + break; + } + case CompressionType.Immediate: + { + r.Write((TType)value, columnMeta.Immediate.BitWidth); + break; + } + case CompressionType.Common: + { + if (!columnMeta.Common.DefaultValue.GetValue().Equals(value)) + commonData.Add(Id, Value32.Create((TType)value)); + break; + } + case CompressionType.Pallet: + { + Value32[] array = new[] { Value32.Create((TType)value) }; + + int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + if (palletIndex == -1) + { + palletIndex = palletData.Count; + palletData.Add(array); + } + + r.Write(palletIndex, columnMeta.Pallet.BitWidth); + break; + } + } + } + + private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, Array value) where TType : unmanaged + { + switch (columnMeta.CompressionType) + { + case CompressionType.None: + { + int bitSize = 32 - fieldMeta.Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + for (int i = 0; i < value.Length; i++) + r.Write((TType)value.GetValue(i), bitSize); + + break; + } + case CompressionType.PalletArray: + { + // get data + Value32[] array = new Value32[value.Length]; + for (int i = 0; i < value.Length; i++) + array[i] = Value32.Create((TType)value.GetValue(i)); + + int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + if (palletIndex == -1) + { + palletIndex = palletData.Count; + palletData.Add(array); + } + + r.Write(palletIndex, columnMeta.Pallet.BitWidth); + break; + } + } + } + } + + class WDC1Writer : BaseWriter where T : class + { + private const int HeaderSize = 84; + private const uint WDC1FmtSig = 0x31434457; // WDC1 + + public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) : base(reader) + { + // always 2 empties + StringTableSize++; + + WDC1RowSerializer serializer = new WDC1RowSerializer(this); + serializer.Serialize(storage); + serializer.GetCopyRows(); + + RecordsCount = serializer.Records.Count - m_copyData.Count; + + var (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); + + using (var writer = new BinaryWriter(stream)) + { + int minIndex = storage.Keys.Min(); + int maxIndex = storage.Keys.Max(); + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + + writer.Write(WDC1FmtSig); + writer.Write(RecordsCount); + writer.Write(FieldsCount); + writer.Write(RecordSize); + writer.Write(StringTableSize); + writer.Write(reader.TableHash); + writer.Write(reader.LayoutHash); + writer.Write(minIndex); + writer.Write(maxIndex); + writer.Write(reader.Locale); + writer.Write(copyTableSize); + writer.Write((ushort)Flags); + writer.Write((ushort)IdFieldIndex); + + writer.Write(FieldsCount); // totalFieldCount + writer.Write(reader.PackedDataOffset); + writer.Write(m_referenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount + writer.Write(0); // sparseTableOffset + writer.Write(RecordsCount * 4); // indexTableSize + writer.Write(m_columnMeta.Length * 24); // ColumnMetaDataSize + writer.Write(commonDataSize); + writer.Write(palletDataSize); + writer.Write(referenceDataSize); + + if (storage.Count == 0) + return; + + // field meta + writer.WriteArray(m_meta); + + // record data + uint recordsOffset = (uint)writer.BaseStream.Position; + foreach (var record in serializer.Records) + if (!m_copyData.TryGetValue(record.Key, out int parent)) + record.Value.CopyTo(writer.BaseStream); + + // string table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + writer.WriteCString(""); + foreach (var str in m_stringsTable) + writer.WriteCString(str.Key); + } + + // sparse data + if (Flags.HasFlagExt(DB2Flags.Sparse)) + { + // set the sparseTableOffset + long oldPos = writer.BaseStream.Position; + writer.BaseStream.Position = 60; + writer.Write((uint)oldPos); + writer.BaseStream.Position = oldPos; + + WriteOffsetRecords(writer, serializer, recordsOffset, maxIndex - minIndex + 1); + } + + // index table + if (Flags.HasFlagExt(DB2Flags.Index)) + writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + + // copy table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + foreach (var copyRecord in m_copyData) + { + writer.Write(copyRecord.Key); + writer.Write(copyRecord.Value); + } + } + + // column meta data + writer.WriteArray(m_columnMeta); + + // pallet data + for (int i = 0; i < m_columnMeta.Length; i++) + { + if (m_columnMeta[i].CompressionType == CompressionType.Pallet || m_columnMeta[i].CompressionType == CompressionType.PalletArray) + { + foreach (var palletData in m_palletData[i]) + writer.WriteArray(palletData); + } + } + + // common data + for (int i = 0; i < m_columnMeta.Length; i++) + { + if (m_columnMeta[i].CompressionType == CompressionType.Common) + { + foreach (var commondata in m_commonData[i]) + { + writer.Write(commondata.Key); + writer.Write(commondata.Value.GetValue()); + } + } + } + + // reference data + if (m_referenceData.Count > 0) + { + writer.Write(m_referenceData.Count); + writer.Write(m_referenceData.Min()); + writer.Write(m_referenceData.Max()); + + for (int i = 0; i < m_referenceData.Count; i++) + { + writer.Write(m_referenceData[i]); + writer.Write(i); + } + } + } + } + + private (int CommonDataSize, int PalletDataSize, int RefDataSize) GetDataSizes() + { + // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords] + int refSize = 0; + if (m_referenceData.Count > 0) + refSize = 12 + (m_referenceData.Count * 8); + + int commonSize = 0, palletSize = 0; + for (int i = 0; i < m_columnMeta.Length; i++) + { + switch (m_columnMeta[i].CompressionType) + { + // {uint id, uint copyid}[] + case CompressionType.Common: + m_columnMeta[i].AdditionalDataSize = (uint)(m_commonData[i].Count * 8); + commonSize += (int)m_columnMeta[i].AdditionalDataSize; + break; + + // {uint values[Cardinality]}[] + case CompressionType.Pallet: + case CompressionType.PalletArray: + m_columnMeta[i].AdditionalDataSize = (uint)m_palletData[i].Sum(x => x.Length * 4); + palletSize += (int)m_columnMeta[i].AdditionalDataSize; + break; + } + } + + return (commonSize, palletSize, refSize); + } + } +} diff --git a/DBCD.IO/Writers/WDC2Writer.cs b/DBCD.IO/Writers/WDC2Writer.cs new file mode 100644 index 0000000..eb8a791 --- /dev/null +++ b/DBCD.IO/Writers/WDC2Writer.cs @@ -0,0 +1,452 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Runtime.CompilerServices; + +namespace DBCD.IO.Writers +{ + class WDC2RowSerializer : IDBRowSerializer where T : class + { + public IDictionary Records { get; private set; } + + private readonly BaseWriter m_writer; + private readonly FieldMetaData[] m_fieldMeta; + private readonly ColumnMetaData[] m_columnMeta; + private readonly List[] m_palletData; + private readonly Dictionary[] m_commonData; + + private static readonly Value32Comparer Value32Comparer = new Value32Comparer(); + + + public WDC2RowSerializer(BaseWriter writer) + { + m_writer = writer; + m_fieldMeta = m_writer.Meta; + m_columnMeta = m_writer.ColumnMeta; + m_palletData = m_writer.PalletData; + m_commonData = m_writer.CommonData; + + Records = new Dictionary(); + } + + public void Serialize(IDictionary rows) + { + foreach (var row in rows) + Serialize(row.Key, row.Value); + } + + public void Serialize(int id, T row) + { + BitWriter bitWriter = new BitWriter(m_writer.RecordSize); + + int indexFieldOffSet = 0; + + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + FieldCache info = m_writer.FieldCache[i]; + + if (i == m_writer.IdFieldIndex && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + { + indexFieldOffSet++; + continue; + } + + int fieldIndex = i - indexFieldOffSet; + + // reference data field + if (fieldIndex >= m_writer.Meta.Length) + { + m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); + continue; + } + + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } + else + { + if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } + } + + // pad to record size + if (!m_writer.Flags.HasFlagExt(DB2Flags.Sparse)) + bitWriter.Resize(m_writer.RecordSize); + else + bitWriter.ResizeToMultiple(4); + + Records[id] = bitWriter; + } + + public void GetCopyRows() + { + var copydata = Records.GroupBy(x => x.Value).Where(x => x.Count() > 1); + foreach (var copygroup in copydata) + { + int key = copygroup.First().Key; + foreach (var copy in copygroup.Skip(1)) + m_writer.CopyData[copy.Key] = key; + } + } + + public void UpdateStringOffsets(IDictionary rows) + { + if (m_writer.Flags.HasFlagExt(DB2Flags.Sparse) || m_writer.StringTableSize <= 1) + return; + + int indexFieldOffSet = 0; + var fieldInfos = new Dictionary>(); + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + if (i == m_writer.IdFieldIndex && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + indexFieldOffSet++; + else if (m_writer.FieldCache[i].Field.FieldType == typeof(string)) + fieldInfos[i - indexFieldOffSet] = m_writer.FieldCache[i]; + else if (m_writer.FieldCache[i].Field.FieldType == typeof(string[])) + fieldInfos[i - indexFieldOffSet] = m_writer.FieldCache[i]; + } + + if (fieldInfos.Count == 0) + return; + + int recordOffset = (Records.Count - m_writer.CopyData.Count) * m_writer.RecordSize; + int fieldOffset = 0; + + foreach (var record in Records) + { + // skip copy records + if (m_writer.CopyData.ContainsKey(record.Key)) + continue; + + foreach (var fieldInfo in fieldInfos) + { + int index = fieldInfo.Key; + var info = fieldInfo.Value; + + var columnMeta = m_columnMeta[index]; + if (columnMeta.CompressionType != CompressionType.None) + throw new Exception("CompressionType != CompressionType.None"); + + int bitSize = 32 - m_fieldMeta[index].Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + if (info.IsArray) + { + var array = (string[])info.Getter(rows[record.Key]); + for (int i = 0; i < array.Length; i++) + { + fieldOffset = m_writer.StringTable[array[i]] + recordOffset - (columnMeta.RecordOffset / 8 * i); + record.Value.Write(fieldOffset, bitSize, columnMeta.RecordOffset + (i * bitSize)); + } + } + else + { + fieldOffset = m_writer.StringTable[(string)info.Getter(rows[record.Key])] + recordOffset - (columnMeta.RecordOffset / 8); + record.Value.Write(fieldOffset, bitSize, columnMeta.RecordOffset); + } + } + + recordOffset -= m_writer.RecordSize; + } + } + + + private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> simpleWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> + { + [typeof(long)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(float)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(int)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(uint)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(short)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(ushort)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(sbyte)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(byte)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(string)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => + { + if (writer.Flags.HasFlagExt(DB2Flags.Sparse)) + data.WriteCString((string)value); + else + WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, writer.InternString((string)value)); + } + }; + + private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> arrayWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> + { + [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(long[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(float[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(int[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(uint[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(ushort[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(short[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(byte[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(sbyte[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(string[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, (array as string[]).Select(x => writer.InternString(x)).ToArray()), + }; + + private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, object value) where TType : unmanaged + { + switch (columnMeta.CompressionType) + { + case CompressionType.None: + { + int bitSize = 32 - fieldMeta.Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + r.Write((TType)value, bitSize); + break; + } + case CompressionType.Immediate: + case CompressionType.SignedImmediate: + { + r.Write((TType)value, columnMeta.Immediate.BitWidth); + break; + } + case CompressionType.Common: + { + if (!columnMeta.Common.DefaultValue.GetValue().Equals(value)) + commonData.Add(Id, Value32.Create((TType)value)); + break; + } + case CompressionType.Pallet: + { + Value32[] array = new[] { Value32.Create((TType)value) }; + + int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + if (palletIndex == -1) + { + palletIndex = palletData.Count; + palletData.Add(array); + } + + r.Write(palletIndex, columnMeta.Pallet.BitWidth); + break; + } + } + } + + private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, Array value) where TType : unmanaged + { + switch (columnMeta.CompressionType) + { + case CompressionType.None: + { + int bitSize = 32 - fieldMeta.Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + for (int i = 0; i < value.Length; i++) + r.Write((TType)value.GetValue(i), bitSize); + + break; + } + case CompressionType.PalletArray: + { + // get data + Value32[] array = new Value32[value.Length]; + for (int i = 0; i < value.Length; i++) + array[i] = Value32.Create((TType)value.GetValue(i)); + + int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + if (palletIndex == -1) + { + palletIndex = palletData.Count; + palletData.Add(array); + } + + r.Write(palletIndex, columnMeta.Pallet.BitWidth); + break; + } + } + } + } + + class WDC2Writer : BaseWriter where T : class + { + private const int HeaderSize = 72; + + public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) : base(reader) + { + // always 2 empties + StringTableSize++; + + WDC2RowSerializer serializer = new WDC2RowSerializer(this); + serializer.Serialize(storage); + serializer.GetCopyRows(); + serializer.UpdateStringOffsets(storage); + + RecordsCount = serializer.Records.Count - m_copyData.Count; + + var (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); + + using (var writer = new BinaryWriter(stream)) + { + int minIndex = storage.Keys.Min(); + int maxIndex = storage.Keys.Max(); + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + + writer.Write(reader.Signature); + writer.Write(RecordsCount); + writer.Write(FieldsCount); + writer.Write(RecordSize); + writer.Write(StringTableSize); + writer.Write(reader.TableHash); + writer.Write(reader.LayoutHash); + writer.Write(minIndex); + writer.Write(maxIndex); + writer.Write(reader.Locale); + writer.Write((ushort)Flags); + writer.Write((ushort)IdFieldIndex); + + writer.Write(FieldsCount); // totalFieldCount + writer.Write(reader.PackedDataOffset); + writer.Write(m_referenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount + writer.Write(m_columnMeta.Length * 24); // ColumnMetaDataSize + writer.Write(commonDataSize); + writer.Write(palletDataSize); + writer.Write(1); // sections count + + if (storage.Count == 0) + return; + + // section header + int fileOffset = HeaderSize + (m_meta.Length * 4) + (m_columnMeta.Length * 24) + Unsafe.SizeOf() + palletDataSize + commonDataSize; + + writer.Write(0UL); // TactKeyLookup + writer.Write(fileOffset); // FileOffset + writer.Write(RecordsCount); // NumRecords + writer.Write(StringTableSize); + writer.Write(copyTableSize); + writer.Write(0); // sparseTableOffset + writer.Write(RecordsCount * 4); // indexTableSize + writer.Write(referenceDataSize); + + // field meta + writer.WriteArray(m_meta); + + // column meta data + writer.WriteArray(m_columnMeta); + + // pallet data + for (int i = 0; i < m_columnMeta.Length; i++) + { + if (m_columnMeta[i].CompressionType == CompressionType.Pallet || m_columnMeta[i].CompressionType == CompressionType.PalletArray) + { + foreach (var palletData in m_palletData[i]) + writer.WriteArray(palletData); + } + } + + // common data + for (int i = 0; i < m_columnMeta.Length; i++) + { + if (m_columnMeta[i].CompressionType == CompressionType.Common) + { + foreach (var commondata in m_commonData[i]) + { + writer.Write(commondata.Key); + writer.Write(commondata.Value.GetValue()); + } + } + } + + // record data + uint recordsOffset = (uint)writer.BaseStream.Position; + foreach (var record in serializer.Records) + if (!m_copyData.TryGetValue(record.Key, out int parent)) + record.Value.CopyTo(writer.BaseStream); + + // string table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + writer.WriteCString(""); + foreach (var str in m_stringsTable) + writer.WriteCString(str.Key); + } + + // sparse data + if (Flags.HasFlagExt(DB2Flags.Sparse)) + { + // set the sparseTableOffset + long oldPos = writer.BaseStream.Position; + writer.BaseStream.Position = 96; + writer.Write((uint)oldPos); + writer.BaseStream.Position = oldPos; + + WriteOffsetRecords(writer, serializer, recordsOffset, maxIndex - minIndex + 1); + } + + // index table + if (Flags.HasFlagExt(DB2Flags.Index)) + writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + + // copy table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + foreach (var copyRecord in m_copyData) + { + writer.Write(copyRecord.Key); + writer.Write(copyRecord.Value); + } + } + + // reference data + if (m_referenceData.Count > 0) + { + writer.Write(m_referenceData.Count); + writer.Write(m_referenceData.Min()); + writer.Write(m_referenceData.Max()); + + for (int i = 0; i < m_referenceData.Count; i++) + { + writer.Write(m_referenceData[i]); + writer.Write(i); + } + } + } + } + + private (int CommonDataSize, int PalletDataSize, int RefDataSize) GetDataSizes() + { + // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords] + int refSize = 0; + if (m_referenceData.Count > 0) + refSize = 12 + (m_referenceData.Count * 8); + + int commonSize = 0, palletSize = 0; + for (int i = 0; i < m_columnMeta.Length; i++) + { + switch (m_columnMeta[i].CompressionType) + { + // {uint id, uint copyid}[] + case CompressionType.Common: + m_columnMeta[i].AdditionalDataSize = (uint)(m_commonData[i].Count * 8); + commonSize += (int)m_columnMeta[i].AdditionalDataSize; + break; + + // {uint values[cardinality]}[] + case CompressionType.Pallet: + case CompressionType.PalletArray: + m_columnMeta[i].AdditionalDataSize = (uint)m_palletData[i].Sum(x => x.Length * 4); + palletSize += (int)m_columnMeta[i].AdditionalDataSize; + break; + } + } + + return (commonSize, palletSize, refSize); + } + } +} diff --git a/DBCD.IO/Writers/WDC3Writer.cs b/DBCD.IO/Writers/WDC3Writer.cs new file mode 100644 index 0000000..f2460e6 --- /dev/null +++ b/DBCD.IO/Writers/WDC3Writer.cs @@ -0,0 +1,465 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text; + +namespace DBCD.IO.Writers +{ + class WDC3RowSerializer : IDBRowSerializer where T : class + { + public IDictionary Records { get; private set; } + + private readonly BaseWriter m_writer; + private readonly FieldMetaData[] m_fieldMeta; + private readonly ColumnMetaData[] m_columnMeta; + private readonly List[] m_palletData; + private readonly Dictionary[] m_commonData; + + private static readonly Value32Comparer Value32Comparer = new Value32Comparer(); + + + public WDC3RowSerializer(BaseWriter writer) + { + m_writer = writer; + m_fieldMeta = m_writer.Meta; + m_columnMeta = m_writer.ColumnMeta; + m_palletData = m_writer.PalletData; + m_commonData = m_writer.CommonData; + + Records = new Dictionary(); + } + + public void Serialize(IDictionary rows) + { + foreach (var row in rows) + Serialize(row.Key, row.Value); + } + + public void Serialize(int id, T row) + { + BitWriter bitWriter = new BitWriter(m_writer.RecordSize); + + int indexFieldOffSet = 0; + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + FieldCache info = m_writer.FieldCache[i]; + + if (i == m_writer.IdFieldIndex && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + { + indexFieldOffSet++; + continue; + } + + int fieldIndex = i - indexFieldOffSet; + + // reference data field + if (fieldIndex >= m_writer.Meta.Length) + { + m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); + continue; + } + + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } + else + { + if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } + } + + // pad to record size + if (!m_writer.Flags.HasFlagExt(DB2Flags.Sparse)) + bitWriter.Resize(m_writer.RecordSize); + else + bitWriter.ResizeToMultiple(4); + + Records[id] = bitWriter; + } + + public void GetCopyRows() + { + var copydata = Records.GroupBy(x => x.Value).Where(x => x.Count() > 1).ToArray(); + foreach (var copygroup in copydata) + { + int key = copygroup.First().Key; + foreach (var copy in copygroup.Skip(1)) + m_writer.CopyData[copy.Key] = key; + } + } + + public void UpdateStringOffsets(IDictionary rows) + { + if (m_writer.Flags.HasFlagExt(DB2Flags.Sparse) || m_writer.StringTableSize <= 1) + return; + + int indexFieldOffSet = 0; + var fieldInfos = new Dictionary>(); + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + if (i == m_writer.IdFieldIndex && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + indexFieldOffSet++; + else if (m_writer.FieldCache[i].Field.FieldType == typeof(string)) + fieldInfos[i - indexFieldOffSet] = m_writer.FieldCache[i]; + else if (m_writer.FieldCache[i].Field.FieldType == typeof(string[])) + fieldInfos[i - indexFieldOffSet] = m_writer.FieldCache[i]; + } + + if (fieldInfos.Count == 0) + return; + + int recordOffset = (Records.Count - m_writer.CopyData.Count) * m_writer.RecordSize; + int fieldOffset = 0; + + foreach (var record in Records) + { + // skip copy records + if (m_writer.CopyData.ContainsKey(record.Key)) + continue; + + foreach (var fieldInfo in fieldInfos) + { + int index = fieldInfo.Key; + var info = fieldInfo.Value; + + var columnMeta = m_columnMeta[index]; + if (columnMeta.CompressionType != CompressionType.None) + throw new Exception("CompressionType != CompressionType.None"); + + int bitSize = 32 - m_fieldMeta[index].Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + if (info.IsArray) + { + var array = (string[])info.Getter(rows[record.Key]); + for (int i = 0; i < array.Length; i++) + { + fieldOffset = m_writer.StringTable[array[i]] + recordOffset - (columnMeta.RecordOffset / 8 * i); + record.Value.Write(fieldOffset, bitSize, columnMeta.RecordOffset + (i * bitSize)); + } + } + else + { + fieldOffset = m_writer.StringTable[(string)info.Getter(rows[record.Key])] + recordOffset - (columnMeta.RecordOffset / 8); + record.Value.Write(fieldOffset, bitSize, columnMeta.RecordOffset); + } + } + + recordOffset -= m_writer.RecordSize; + } + } + + + private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> simpleWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> + { + [typeof(long)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(float)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(int)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(uint)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(short)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(ushort)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(sbyte)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(byte)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(string)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => + { + if (writer.Flags.HasFlagExt(DB2Flags.Sparse)) + data.WriteCString((string)value); + else + WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, writer.InternString((string)value)); + } + }; + + private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> arrayWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> + { + [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(long[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(float[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(int[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(uint[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(ushort[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(short[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(byte[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(sbyte[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(string[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, (array as string[]).Select(x => writer.InternString(x)).ToArray()), + }; + + private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, object value) where TType : unmanaged + { + switch (columnMeta.CompressionType) + { + case CompressionType.None: + { + int bitSize = 32 - fieldMeta.Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + r.Write((TType)value, bitSize); + break; + } + case CompressionType.Immediate: + case CompressionType.SignedImmediate: + { + r.Write((TType)value, columnMeta.Immediate.BitWidth); + break; + } + case CompressionType.Common: + { + if (!columnMeta.Common.DefaultValue.GetValue().Equals(value)) + commonData.Add(Id, Value32.Create((TType)value)); + break; + } + case CompressionType.Pallet: + { + Value32[] array = new[] { Value32.Create((TType)value) }; + + int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + if (palletIndex == -1) + { + palletIndex = palletData.Count; + palletData.Add(array); + } + + r.Write(palletIndex, columnMeta.Pallet.BitWidth); + break; + } + } + } + + private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, Array value) where TType : unmanaged + { + switch (columnMeta.CompressionType) + { + case CompressionType.None: + { + int bitSize = 32 - fieldMeta.Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + for (int i = 0; i < value.Length; i++) + r.Write((TType)value.GetValue(i), bitSize); + + break; + } + case CompressionType.PalletArray: + { + // get data + Value32[] array = new Value32[value.Length]; + for (int i = 0; i < value.Length; i++) + array[i] = Value32.Create((TType)value.GetValue(i)); + + int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + if (palletIndex == -1) + { + palletIndex = palletData.Count; + palletData.Add(array); + } + + r.Write(palletIndex, columnMeta.Pallet.BitWidth); + break; + } + } + } + } + + class WDC3Writer : BaseWriter where T : class + { + private const int HeaderSize = 72; + private const uint WDC3FmtSig = 0x33434457; // WDC3 + + public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) : base(reader) + { + // always 2 empties + StringTableSize++; + + WDC3RowSerializer serializer = new WDC3RowSerializer(this); + serializer.Serialize(storage); + serializer.GetCopyRows(); + serializer.UpdateStringOffsets(storage); + + RecordsCount = serializer.Records.Count - m_copyData.Count; + + var (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); + + using (var writer = new BinaryWriter(stream)) + { + int minIndex = storage.Keys.Min(); + int maxIndex = storage.Keys.Max(); + + writer.Write(WDC3FmtSig); + writer.Write(RecordsCount); + writer.Write(FieldsCount); + writer.Write(RecordSize); + writer.Write(StringTableSize); + writer.Write(reader.TableHash); + writer.Write(reader.LayoutHash); + writer.Write(minIndex); + writer.Write(maxIndex); + writer.Write(reader.Locale); + writer.Write((ushort)Flags); + writer.Write((ushort)IdFieldIndex); + + writer.Write(FieldsCount); // totalFieldCount + writer.Write(reader.PackedDataOffset); + writer.Write(m_referenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount + writer.Write(m_columnMeta.Length * 24); // ColumnMetaDataSize + writer.Write(commonDataSize); + writer.Write(palletDataSize); + writer.Write(1); // sections count + + if (storage.Count == 0) + return; + + // section header + int fileOffset = HeaderSize + (m_meta.Length * 4) + (m_columnMeta.Length * 24) + Unsafe.SizeOf() + palletDataSize + commonDataSize; + + writer.Write(0UL); // TactKeyLookup + writer.Write(fileOffset); // FileOffset + writer.Write(RecordsCount); // NumRecords + writer.Write(StringTableSize); + writer.Write(0); // OffsetRecordsEndOffset + writer.Write(RecordsCount * 4); // IndexDataSize + writer.Write(referenceDataSize); // ParentLookupDataSize + writer.Write(Flags.HasFlagExt(DB2Flags.Sparse) ? RecordsCount : 0); // OffsetMapIDCount + writer.Write(m_copyData.Count); // CopyTableCount + + // field meta + writer.WriteArray(m_meta); + + // column meta data + writer.WriteArray(m_columnMeta); + + // pallet data + for (int i = 0; i < m_columnMeta.Length; i++) + { + if (m_columnMeta[i].CompressionType == CompressionType.Pallet || m_columnMeta[i].CompressionType == CompressionType.PalletArray) + { + foreach (var palletData in m_palletData[i]) + writer.WriteArray(palletData); + } + } + + // common data + for (int i = 0; i < m_columnMeta.Length; i++) + { + if (m_columnMeta[i].CompressionType == CompressionType.Common) + { + foreach (var commondata in m_commonData[i]) + { + writer.Write(commondata.Key); + writer.Write(commondata.Value.GetValue()); + } + } + } + + // record data + var m_sparseEntries = new Dictionary(storage.Count); + foreach (var record in serializer.Records) + { + if (!m_copyData.TryGetValue(record.Key, out int parent)) + { + m_sparseEntries.Add(record.Key, new SparseEntry() + { + Offset = (uint)writer.BaseStream.Position, + Size = (ushort)record.Value.TotalBytesWrittenOut + }); + + record.Value.CopyTo(writer.BaseStream); + } + } + + // string table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + writer.WriteCString(""); + foreach (var str in m_stringsTable) + writer.WriteCString(str.Key); + } + + // set the OffsetRecordsEndOffset + if (Flags.HasFlagExt(DB2Flags.Sparse)) + { + long oldPos = writer.BaseStream.Position; + writer.BaseStream.Position = 92; + writer.Write((uint)oldPos); + writer.BaseStream.Position = oldPos; + } + + // index table + if (Flags.HasFlagExt(DB2Flags.Index)) + writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + + // copy table + foreach (var copyRecord in m_copyData) + { + writer.Write(copyRecord.Key); + writer.Write(copyRecord.Value); + } + + // sparse data + if (Flags.HasFlagExt(DB2Flags.Sparse)) + writer.WriteArray(m_sparseEntries.Values.ToArray()); + + // reference data + if (m_referenceData.Count > 0) + { + writer.Write(m_referenceData.Count); + writer.Write(m_referenceData.Min()); + writer.Write(m_referenceData.Max()); + + for (int i = 0; i < m_referenceData.Count; i++) + { + writer.Write(m_referenceData[i]); + writer.Write(i); + } + } + + // sparse data idss + if (Flags.HasFlagExt(DB2Flags.Sparse)) + writer.WriteArray(m_sparseEntries.Keys.ToArray()); + } + } + + private (int CommonDataSize, int PalletDataSize, int RefDataSize) GetDataSizes() + { + // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords] + int refSize = 0; + if (m_referenceData.Count > 0) + refSize = 12 + (m_referenceData.Count * 8); + + int commonSize = 0, palletSize = 0; + for (int i = 0; i < m_columnMeta.Length; i++) + { + switch (m_columnMeta[i].CompressionType) + { + // {uint id, uint copyid}[] + case CompressionType.Common: + m_columnMeta[i].AdditionalDataSize = (uint)(m_commonData[i].Count * 8); + commonSize += (int)m_columnMeta[i].AdditionalDataSize; + break; + + // {uint values[Cardinality]}[] + case CompressionType.Pallet: + case CompressionType.PalletArray: + m_columnMeta[i].AdditionalDataSize = (uint)m_palletData[i].Sum(x => x.Length * 4); + palletSize += (int)m_columnMeta[i].AdditionalDataSize; + break; + } + } + + return (commonSize, palletSize, refSize); + } + } +} From 6f29aafa9e59a5fa0092d2d280c9459d7f6ba314 Mon Sep 17 00:00:00 2001 From: barncastle Date: Wed, 11 Sep 2019 17:18:28 +0100 Subject: [PATCH 06/40] formatting, extensions + code cleanup --- DBCD.IO/Attributes/LocaleAttribute.cs | 2 - DBCD.IO/Common/IDBRow.cs | 6 +-- DBCD.IO/DBReader.cs | 7 ++-- DBCD.IO/Extensions.cs | 58 +++++++++++---------------- DBCD.IO/FieldCache.cs | 2 +- DBCD.IO/HotfixReader.cs | 4 +- DBCD.IO/Readers/BaseReader.cs | 16 +++++++- DBCD.IO/Readers/HTFXReader.cs | 2 +- DBCD.IO/Readers/WDB2Reader.cs | 4 +- DBCD.IO/Readers/WDC1Reader.cs | 3 +- DBCD.IO/Writers/BaseWriter.cs | 3 +- DBCD.IO/Writers/WDB2Writer.cs | 1 - DBCD.IO/Writers/WDB3Writer.cs | 2 +- DBCD.IO/Writers/WDB6Writer.cs | 5 +-- DBCD.IO/Writers/WDBCWriter.cs | 1 - DBCD.IO/Writers/WDC3Writer.cs | 1 - 16 files changed, 53 insertions(+), 64 deletions(-) diff --git a/DBCD.IO/Attributes/LocaleAttribute.cs b/DBCD.IO/Attributes/LocaleAttribute.cs index e9a6d97..9d6da2b 100644 --- a/DBCD.IO/Attributes/LocaleAttribute.cs +++ b/DBCD.IO/Attributes/LocaleAttribute.cs @@ -1,6 +1,4 @@ using System; -using System.Collections.Generic; -using System.Text; namespace DBCD.IO.Attributes { diff --git a/DBCD.IO/Common/IDBRow.cs b/DBCD.IO/Common/IDBRow.cs index ea6d10e..376d48d 100644 --- a/DBCD.IO/Common/IDBRow.cs +++ b/DBCD.IO/Common/IDBRow.cs @@ -1,8 +1,4 @@ -using System; -using System.Collections.Generic; -using System.Text; - -namespace DBCD.IO.Common +namespace DBCD.IO.Common { interface IDBRow { diff --git a/DBCD.IO/DBReader.cs b/DBCD.IO/DBReader.cs index e5d076f..01e8a82 100644 --- a/DBCD.IO/DBReader.cs +++ b/DBCD.IO/DBReader.cs @@ -1,8 +1,7 @@ -using System; +using DBCD.IO.Readers; +using System; using System.Collections.Generic; using System.IO; -using System.Linq; -using DBCD.IO.Readers; namespace DBCD.IO { @@ -75,7 +74,7 @@ public DBReader(Stream stream) protected virtual void ReadRecords(IDictionary storage) where T : class, new() { - var fieldCache = typeof(T).GetFields().Select(x => new FieldCache(x)).ToArray(); + var fieldCache = typeof(T).ToFieldCache(); _reader.Enumerate((row) => { diff --git a/DBCD.IO/Extensions.cs b/DBCD.IO/Extensions.cs index 8160651..06d8734 100644 --- a/DBCD.IO/Extensions.cs +++ b/DBCD.IO/Extensions.cs @@ -3,7 +3,6 @@ using System.Linq.Expressions; using System.Reflection; using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; using System.Text; namespace DBCD.IO @@ -30,6 +29,17 @@ public static Func GetGetter(this FieldInfo fieldInfo) return Expression.Lambda>(convertExpression, paramExpression).Compile(); } + public static FieldCache[] ToFieldCache(this Type type) + { + var fields = type.GetFields(); + + var cache = new FieldCache[fields.Length]; + for (int i = 0; i < fields.Length; i++) + cache[i] = new FieldCache(fields[i]); + + return cache; + } + public static T GetAttribute(this FieldInfo fieldInfo) where T : Attribute { return Attribute.GetCustomAttribute(fieldInfo, typeof(T)) as T; @@ -48,12 +58,17 @@ public static void Write(this BinaryWriter writer, T value) where T : struct writer.Write(buffer); } - public static T[] ReadArray(this BinaryReader reader, int size) where T : struct + public static unsafe T[] ReadArray(this BinaryReader reader, int size) where T : struct { - int numBytes = Marshal.SizeOf() * size; + int sizeOf = Unsafe.SizeOf(); + + byte[] src = reader.ReadBytes(sizeOf * size); + if (src.Length == 0) + return new T[0]; - byte[] result = reader.ReadBytes(numBytes); - return result.CopyTo(); + T[] result = new T[src.Length / sizeOf]; + Unsafe.CopyBlockUnaligned(Unsafe.AsPointer(ref result[0]), Unsafe.AsPointer(ref src[0]), (uint)src.Length); + return result; } public static unsafe void WriteArray(this BinaryWriter writer, T[] value) where T : struct @@ -61,26 +76,13 @@ public static unsafe void WriteArray(this BinaryWriter writer, T[] value) whe if (value.Length == 0) return; - if (value is byte[] arr) - { - writer.Write(arr); - } - else + if (!(value is byte[] buffer)) { - byte[] result = new byte[value.Length * Unsafe.SizeOf()]; - Unsafe.CopyBlockUnaligned(Unsafe.AsPointer(ref result[0]), Unsafe.AsPointer(ref value[0]), (uint)result.Length); - writer.Write(result); + buffer = new byte[value.Length * Unsafe.SizeOf()]; + Unsafe.CopyBlockUnaligned(Unsafe.AsPointer(ref buffer[0]), Unsafe.AsPointer(ref value[0]), (uint)buffer.Length); } - } - public static unsafe T[] CopyTo(this byte[] src) where T : struct - { - T[] result = new T[src.Length / Unsafe.SizeOf()]; - - if (src.Length > 0) - Unsafe.CopyBlockUnaligned(Unsafe.AsPointer(ref result[0]), Unsafe.AsPointer(ref src[0]), (uint)src.Length); - - return result; + writer.Write(buffer); } public static bool HasFlagExt(this DB2Flags flag, DB2Flags valueToCheck) @@ -119,17 +121,5 @@ public static void WriteCString(this BinaryWriter writer, string str) writer.Write(bytes); writer.Write((byte)0); } - - public static byte[] ToByteArray(this string str) - { - str = str.Replace(" ", string.Empty); - - var res = new byte[str.Length / 2]; - for (int i = 0; i < res.Length; i++) - { - res[i] = Convert.ToByte(str.Substring(i * 2, 2), 16); - } - return res; - } } } diff --git a/DBCD.IO/FieldCache.cs b/DBCD.IO/FieldCache.cs index 848826f..04cea29 100644 --- a/DBCD.IO/FieldCache.cs +++ b/DBCD.IO/FieldCache.cs @@ -30,7 +30,7 @@ public FieldCache(FieldInfo field) private int GetCardinality(FieldInfo field) { var cardinality = field.GetAttribute()?.Count; - return cardinality.HasValue && cardinality > 0 ? cardinality.Value : 1; + return cardinality > 0 ? cardinality.Value : 1; } private bool GetStringInfo(FieldInfo field, out LocaleAttribute attribute) diff --git a/DBCD.IO/HotfixReader.cs b/DBCD.IO/HotfixReader.cs index 60cd827..483cae3 100644 --- a/DBCD.IO/HotfixReader.cs +++ b/DBCD.IO/HotfixReader.cs @@ -41,7 +41,7 @@ public HotfixReader(Stream stream) public void CombineCaches(params string[] files) { - foreach(var file in files) + foreach (var file in files) { if (!File.Exists(file)) continue; @@ -59,7 +59,7 @@ public void CombineCaches(params string[] files) protected virtual void ReadHotfixes(IDictionary storage, DBReader dbReader) where T : class, new() { - var fieldCache = typeof(T).GetFields().Select(x => new FieldCache(x)).ToArray(); + var fieldCache = typeof(T).ToFieldCache(); // Id fields need to be excluded if not inline if (dbReader.Flags.HasFlagExt(DB2Flags.Index)) diff --git a/DBCD.IO/Readers/BaseReader.cs b/DBCD.IO/Readers/BaseReader.cs index 99c2643..9ea837d 100644 --- a/DBCD.IO/Readers/BaseReader.cs +++ b/DBCD.IO/Readers/BaseReader.cs @@ -1,8 +1,8 @@ -using System; +using DBCD.IO.Common; +using System; using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; -using DBCD.IO.Common; namespace DBCD.IO.Readers { @@ -62,6 +62,18 @@ public void Enumerate(Action action) Parallel.ForEach(GetCopyRows(), action); } + public void Clear() + { + _Records.Clear(); + Array.Resize(ref m_indexData, 0); + Array.Resize(ref m_palletData, 0); + Array.Resize(ref m_palletData, 0); + Array.Resize(ref recordsData, 0); + Array.Resize(ref m_foreignKeyData, 0); + m_stringsTable.Clear(); + m_sparseEntries.Clear(); + } + private IEnumerable GetCopyRows() { if (m_copyData == null || m_copyData.Count == 0) diff --git a/DBCD.IO/Readers/HTFXReader.cs b/DBCD.IO/Readers/HTFXReader.cs index 3f631d9..7c2e493 100644 --- a/DBCD.IO/Readers/HTFXReader.cs +++ b/DBCD.IO/Readers/HTFXReader.cs @@ -114,7 +114,7 @@ public IDBRow Clone() { return (IDBRow)MemberwiseClone(); } - + public override int GetHashCode() { unchecked diff --git a/DBCD.IO/Readers/WDB2Reader.cs b/DBCD.IO/Readers/WDB2Reader.cs index 4263332..fee8a82 100644 --- a/DBCD.IO/Readers/WDB2Reader.cs +++ b/DBCD.IO/Readers/WDB2Reader.cs @@ -1,10 +1,10 @@ -using System; +using DBCD.IO.Common; +using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Runtime.CompilerServices; using System.Text; -using DBCD.IO.Common; namespace DBCD.IO.Readers { diff --git a/DBCD.IO/Readers/WDC1Reader.cs b/DBCD.IO/Readers/WDC1Reader.cs index a54cacf..39ec783 100644 --- a/DBCD.IO/Readers/WDC1Reader.cs +++ b/DBCD.IO/Readers/WDC1Reader.cs @@ -4,7 +4,6 @@ using System.IO; using System.Linq; using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; using System.Text; namespace DBCD.IO.Readers @@ -271,7 +270,7 @@ public WDC1Reader(Stream stream) else { // sparse data with inlined strings - recordsData = reader.ReadBytes(sparseTableOffset - HeaderSize - Marshal.SizeOf() * FieldsCount); + recordsData = reader.ReadBytes(sparseTableOffset - HeaderSize - Unsafe.SizeOf() * FieldsCount); if (reader.BaseStream.Position != sparseTableOffset) throw new Exception("r.BaseStream.Position != sparseTableOffset"); diff --git a/DBCD.IO/Writers/BaseWriter.cs b/DBCD.IO/Writers/BaseWriter.cs index d937d86..31f4ede 100644 --- a/DBCD.IO/Writers/BaseWriter.cs +++ b/DBCD.IO/Writers/BaseWriter.cs @@ -2,7 +2,6 @@ using DBCD.IO.Readers; using System.Collections.Generic; using System.IO; -using System.Linq; namespace DBCD.IO.Writers { @@ -18,7 +17,7 @@ abstract class BaseWriter where T : class public BaseWriter(BaseReader reader) { - FieldCache = typeof(T).GetFields().Select(x => new FieldCache(x)).ToArray(); + FieldCache = typeof(T).ToFieldCache(); FieldsCount = reader.FieldsCount; RecordSize = reader.RecordSize; diff --git a/DBCD.IO/Writers/WDB2Writer.cs b/DBCD.IO/Writers/WDB2Writer.cs index 1047c8b..3c6b0e8 100644 --- a/DBCD.IO/Writers/WDB2Writer.cs +++ b/DBCD.IO/Writers/WDB2Writer.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Text; namespace DBCD.IO.Writers { diff --git a/DBCD.IO/Writers/WDB3Writer.cs b/DBCD.IO/Writers/WDB3Writer.cs index 58f76b4..d5c5cd7 100644 --- a/DBCD.IO/Writers/WDB3Writer.cs +++ b/DBCD.IO/Writers/WDB3Writer.cs @@ -167,7 +167,7 @@ public WDB3Writer(WDB3Reader reader, IDictionary storage, Stream stream) int sparseCount = maxIndex - minIndex + 1; uint recordsOffset = (uint)(writer.BaseStream.Position + (sparseCount * 6)); WriteOffsetRecords(writer, serializer, recordsOffset, sparseCount); - } + } // secondary key if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) diff --git a/DBCD.IO/Writers/WDB6Writer.cs b/DBCD.IO/Writers/WDB6Writer.cs index 30747e2..89e9436 100644 --- a/DBCD.IO/Writers/WDB6Writer.cs +++ b/DBCD.IO/Writers/WDB6Writer.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Text; namespace DBCD.IO.Writers { @@ -220,14 +219,14 @@ public WDB6Writer(WDB6Reader reader, IDictionary storage, Stream stream) writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); // copy table - if(!Flags.HasFlagExt(DB2Flags.Sparse)) + if (!Flags.HasFlagExt(DB2Flags.Sparse)) { foreach (var copyRecord in m_copyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); } - } + } // common data // HACK this is bodged together diff --git a/DBCD.IO/Writers/WDBCWriter.cs b/DBCD.IO/Writers/WDBCWriter.cs index c7d952c..105f9c4 100644 --- a/DBCD.IO/Writers/WDBCWriter.cs +++ b/DBCD.IO/Writers/WDBCWriter.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Text; namespace DBCD.IO.Writers { diff --git a/DBCD.IO/Writers/WDC3Writer.cs b/DBCD.IO/Writers/WDC3Writer.cs index f2460e6..466d061 100644 --- a/DBCD.IO/Writers/WDC3Writer.cs +++ b/DBCD.IO/Writers/WDC3Writer.cs @@ -5,7 +5,6 @@ using System.IO; using System.Linq; using System.Runtime.CompilerServices; -using System.Text; namespace DBCD.IO.Writers { From 735c49b23fa56f1a049a2648df2ed2f6bada0143 Mon Sep 17 00:00:00 2001 From: barncastle Date: Thu, 12 Sep 2019 09:58:19 +0100 Subject: [PATCH 07/40] fixes --- DBCD.IO/Common/BitWriter.cs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/DBCD.IO/Common/BitWriter.cs b/DBCD.IO/Common/BitWriter.cs index ce0e747..e736945 100644 --- a/DBCD.IO/Common/BitWriter.cs +++ b/DBCD.IO/Common/BitWriter.cs @@ -7,6 +7,8 @@ namespace DBCD.IO.Common { class BitWriter : IEquatable { + public int TotalBytesWrittenOut { get; private set; } + private byte nAccumulatedBits; private byte[] buffer; @@ -19,8 +21,6 @@ public BitWriter(int capacity) } public byte this[int i] => buffer[i]; - public int TotalBytesWrittenOut { get; private set; } - public void WriteAligned(T value) where T : struct { @@ -143,8 +143,8 @@ public void ResizeToMultiple(int divisor) int remainder = TotalBytesWrittenOut % divisor; if (remainder != 0) { - EnsureSize(); - TotalBytesWrittenOut += 4 - remainder; + EnsureSize(divisor); + TotalBytesWrittenOut += divisor - remainder; } } From ffbe68176a1893c8216062b978006e1631f18f3a Mon Sep 17 00:00:00 2001 From: barncastle Date: Thu, 12 Sep 2019 09:58:46 +0100 Subject: [PATCH 08/40] bubble Enumerate errors --- DBCD.IO/Readers/BaseReader.cs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/DBCD.IO/Readers/BaseReader.cs b/DBCD.IO/Readers/BaseReader.cs index 9ea837d..d97fedc 100644 --- a/DBCD.IO/Readers/BaseReader.cs +++ b/DBCD.IO/Readers/BaseReader.cs @@ -58,8 +58,15 @@ abstract class BaseReader public void Enumerate(Action action) { - Parallel.ForEach(_Records.Values, action); - Parallel.ForEach(GetCopyRows(), action); + try + { + Parallel.ForEach(_Records.Values, action); + Parallel.ForEach(GetCopyRows(), action); + } + catch (AggregateException ex) + { + throw ex.InnerException; + } } public void Clear() From da5ac49f0ca3b4235775c7dd88e1ed9371840709 Mon Sep 17 00:00:00 2001 From: barncastle Date: Thu, 12 Sep 2019 10:45:08 +0100 Subject: [PATCH 09/40] revert last unittest changes push --- DBCD.Tests/UnitTest1.cs | 107 ++-------------------------------------- 1 file changed, 3 insertions(+), 104 deletions(-) diff --git a/DBCD.Tests/UnitTest1.cs b/DBCD.Tests/UnitTest1.cs index 5b07451..896661d 100644 --- a/DBCD.Tests/UnitTest1.cs +++ b/DBCD.Tests/UnitTest1.cs @@ -15,113 +15,12 @@ public class UnitTest1 public void TestMethod1() { var githubDBDProvider = new GithubDBDProvider(); - var testDBDProvider = new TestDBDProvider(); - var dbcProvider = new TestDBCProvider(@"C:\Users\TomSpearman\Downloads\dbfilesclient"); + var dbcProvider = new TestDBCProvider(@"E:\"); - /* - "C:\\Users\\TomSpearman\\Downloads\\dbfilesclient\\item.db2" - "C:\\Users\\TomSpearman\\Downloads\\dbfilesclient\\spell.db2" - "C:\\Users\\TomSpearman\\Downloads\\dbfilesclient\\spelleffect.db2" - "C:\\Users\\TomSpearman\\Downloads\\dbfilesclient\\spellname.db2" - */ - - //DBCD dbcd = new DBCD(dbcProvider, githubDBDProvider); - //IDBCDStorage storage = dbcd.Load("Map"); - - //var fucked = new System.Collections.Generic.List(); - //foreach (var file in Directory.EnumerateFiles(@"C:\Users\TomSpearman\Downloads\dbfilesclient")) - //{ - // try - // { - // DBCD dbcd = new DBCD(dbcProvider, testDBDProvider); - // IDBCDStorage storage = dbcd.Load(Path.GetFileNameWithoutExtension(file)); - // } - // catch - // { - // fucked.Add(file); - // } - //} - - //DBCD dbcd = new DBCD(dbcProvider, githubDBDProvider); - //IDBCDStorage storage = dbcd.Load("Creature"); + DBCD dbcd = new DBCD(dbcProvider, githubDBDProvider); + IDBCDStorage storage = dbcd.Load("Creature"); //IDBCDStorage storage = dbcd.Load("LockType", "1.12.1.5875", Locale.EnUS); - - var fields = typeof(SpellVisualEffectNameRec).GetFields(); - - var reader = new DBReader("SpellVisualEffectName.dbc"); - var recs = reader.GetRecords(); - var val = recs.Values.Where(x => x.Flags > 0).ToArray(); - - } - - class SpellVisualEffectNameRec - { - [Index] - public int Id; - public string FileName; - public UNITEFFECTSPECIALS[] SpecialID; - public int SpecialAttachPoint; - public float AreaEffectSize; - public Flags Flags; } - - [Flags] - enum Flags : uint - { - ReleaseDeathHolds = 1, - Unknown = 2, - OneShotEndHandler = 4, - UnitEffectIsAuraWorldObject = 8 - } - - enum UNITEFFECTSPECIALS : uint - { - SPECIALEFFECT_LOOTART = 0x0, - SPECIALEFFECT_LEVELUP = 0x1, - SPECIALEFFECT_FOOTSTEPSPRAYSNOW = 0x2, - SPECIALEFFECT_FOOTSTEPSPRAYSNOWWALK = 0x3, - SPECIALEFFECT_FOOTSTEPDIRT = 0x4, - SPECIALEFFECT_FOOTSTEPDIRTWALK = 0x5, - SPECIALEFFECT_COLDBREATH = 0x6, - SPECIALEFFECT_UNDERWATERBUBBLES = 0x7, - SPECIALEFFECT_COMBATBLOODSPURTFRONT = 0x8, - SPECIALEFFECT_UNUSED = 0x9, - SPECIALEFFECT_COMBATBLOODSPURTBACK = 0xA, - SPECIALEFFECT_HITSPLATPHYSICALSMALL = 0xB, - SPECIALEFFECT_HITSPLATPHYSICALBIG = 0xC, - SPECIALEFFECT_HITSPLATHOLYSMALL = 0xD, - SPECIALEFFECT_HITSPLATHOLYBIG = 0xE, - SPECIALEFFECT_HITSPLATFIRESMALL = 0xF, - SPECIALEFFECT_HITSPLATFIREBIG = 0x10, - SPECIALEFFECT_HITSPLATNATURESMALL = 0x11, - SPECIALEFFECT_HITSPLATNATUREBIG = 0x12, - SPECIALEFFECT_HITSPLATFROSTSMALL = 0x13, - SPECIALEFFECT_HITSPLATFROSTBIG = 0x14, - SPECIALEFFECT_HITSPLATSHADOWSMALL = 0x15, - SPECIALEFFECT_HITSPLATSHADOWBIG = 0x16, - SPECIALEFFECT_COMBATBLOODSPURTFRONTLARGE = 0x17, - SPECIALEFFECT_COMBATBLOODSPURTBACKLARGE = 0x18, - SPECIALEFFECT_FIZZLEPHYSICAL = 0x19, - SPECIALEFFECT_FIZZLEHOLY = 0x1A, - SPECIALEFFECT_FIZZLEFIRE = 0x1B, - SPECIALEFFECT_FIZZLENATURE = 0x1C, - SPECIALEFFECT_FIZZLEFROST = 0x1D, - SPECIALEFFECT_FIZZLESHADOW = 0x1E, - SPECIALEFFECT_COMBATBLOODSPURTGREENFRONT = 0x1F, - SPECIALEFFECT_COMBATBLOODSPURTGREENFRONTLARGE = 0x20, - SPECIALEFFECT_COMBATBLOODSPURTGREENBACK = 0x21, - SPECIALEFFECT_COMBATBLOODSPURTGREENBACKLARGE = 0x22, - SPECIALEFFECT_FOOTSTEPSPRAYWATER = 0x23, - SPECIALEFFECT_FOOTSTEPSPRAYWATERWALK = 0x24, - SPECIALEFFECT_CHARACTERSHAPESHIFT = 0x25, - SPECIALEFFECT_COMBATBLOODSPURTBLACKFRONT = 0x26, - SPECIALEFFECT_COMBATBLOODSPURTBLACKFRONTLARGE = 0x27, - SPECIALEFFECT_COMBATBLOODSPURTBLACKBACK = 0x28, - SPECIALEFFECT_COMBATBLOODSPURTBLACKBACKLARGE = 0x29, - SPECIALEFFECT_RES_EFFECT = 0x2A, - NUM_UNITEFFECTSPECIALS = 0x2B, - SPECIALEFFECT_NONE = 0xFFFFFFFF, - }; } } From f32e25acb898c7851fabb87d57975121ebb887da Mon Sep 17 00:00:00 2001 From: barncastle Date: Sun, 29 Sep 2019 17:34:49 +0100 Subject: [PATCH 10/40] big refactor --- DBCD.IO/DBParser.cs | 148 ++++++++++++++++++++++++++++++++++ DBCD.IO/DBReader.cs | 88 -------------------- DBCD.IO/DBStorage.cs | 49 +++++++++++ DBCD.IO/HotfixReader.cs | 4 +- DBCD.IO/Readers/BaseReader.cs | 66 +++++++-------- DBCD.IO/Readers/WDB2Reader.cs | 13 +-- DBCD.IO/Readers/WDB3Reader.cs | 39 ++++----- DBCD.IO/Readers/WDB4Reader.cs | 37 ++++----- DBCD.IO/Readers/WDB5Reader.cs | 39 ++++----- DBCD.IO/Readers/WDB6Reader.cs | 51 ++++++------ DBCD.IO/Readers/WDBCReader.cs | 13 +-- DBCD.IO/Readers/WDC1Reader.cs | 60 +++++++------- DBCD.IO/Readers/WDC2Reader.cs | 64 +++++++-------- DBCD.IO/Readers/WDC3Reader.cs | 66 +++++++-------- DBCD.IO/Storage.cs | 14 ---- DBCD.IO/Writers/BaseWriter.cs | 69 +++++++--------- DBCD.IO/Writers/WDB2Writer.cs | 3 +- DBCD.IO/Writers/WDB3Writer.cs | 13 +-- DBCD.IO/Writers/WDB4Writer.cs | 13 +-- DBCD.IO/Writers/WDB5Writer.cs | 15 ++-- DBCD.IO/Writers/WDB6Writer.cs | 31 +++---- DBCD.IO/Writers/WDBCWriter.cs | 3 +- DBCD.IO/Writers/WDC1Writer.cs | 61 +++++++------- DBCD.IO/Writers/WDC2Writer.cs | 63 ++++++++------- DBCD.IO/Writers/WDC3Writer.cs | 63 ++++++++------- DBCD/DBCD.cs | 2 +- DBCD/DBCDBuilder.cs | 2 +- DBCD/DBCDStorage.cs | 9 ++- 28 files changed, 593 insertions(+), 505 deletions(-) create mode 100644 DBCD.IO/DBParser.cs delete mode 100644 DBCD.IO/DBReader.cs create mode 100644 DBCD.IO/DBStorage.cs delete mode 100644 DBCD.IO/Storage.cs diff --git a/DBCD.IO/DBParser.cs b/DBCD.IO/DBParser.cs new file mode 100644 index 0000000..a2c29aa --- /dev/null +++ b/DBCD.IO/DBParser.cs @@ -0,0 +1,148 @@ +using DBCD.IO.Readers; +using DBCD.IO.Writers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Text; + +namespace DBCD.IO +{ + public class DBParser + { + private readonly BaseReader _reader; + + #region Fields + + public Type RecordType { get; private set; } + public string Identifier { get; } + public int RecordsCount => _reader.RecordsCount; + public int FieldsCount => _reader.FieldsCount; + public int RecordSize => _reader.RecordSize; + public int StringTableSize => _reader.StringTableSize; + public uint TableHash => _reader.TableHash; + public uint LayoutHash => _reader.LayoutHash; + public int IdFieldIndex => _reader.IdFieldIndex; + public DB2Flags Flags => _reader.Flags; + public int Locale => _reader.Locale; + + #endregion + + #region Constructors + + public DBParser(string fileName) : this(File.Open(fileName, FileMode.Open, FileAccess.Read, FileShare.Read)) { } + + public DBParser(Stream stream) + { + using (var bin = new BinaryReader(stream)) + { + Identifier = new string(bin.ReadChars(4)); + stream.Position = 0; + switch (Identifier) + { + case "WDC3": + _reader = new WDC3Reader(stream); + break; + case "WDC2": + case "1SLC": + _reader = new WDC2Reader(stream); + break; + case "WDC1": + _reader = new WDC1Reader(stream); + break; + case "WDB6": + _reader = new WDB6Reader(stream); + break; + case "WDB5": + _reader = new WDB5Reader(stream); + break; + case "WDB4": + _reader = new WDB4Reader(stream); + break; + case "WDB3": + _reader = new WDB3Reader(stream); + break; + case "WDB2": + _reader = new WDB2Reader(stream); + break; + case "WDBC": + _reader = new WDBCReader(stream); + break; + default: + throw new Exception("DB type " + Identifier + " is not supported!"); + } + } + } + + #endregion + + #region Methods + + public DBStorage ReadRecords() where T : class, new() => new DBStorage(this); + + public void ReadRecords(IDictionary storage) where T : class, new() + { + var fieldCache = (RecordType = typeof(T)).ToFieldCache(); + + _reader.Enumerate((row) => + { + T entry = new T(); + row.GetFields(fieldCache, entry); + lock (storage) + storage[row.Id] = entry; + }); + } + + + public void WriteRecords(IDictionary storage, string fileName) where T : class, new() + { + WriteRecords(storage, File.Open(fileName, FileMode.Create, FileAccess.ReadWrite, FileShare.ReadWrite)); + } + + public void WriteRecords(IDictionary storage, Stream stream) where T : class, new() + { + if (typeof(T) != RecordType) + throw new FormatException($"Invalid record type, expected {RecordType.Name}"); + + BaseWriter writer; + switch (Identifier) + { + case "WDC3": + writer = new WDC3Writer((WDC3Reader)_reader, storage, stream); + break; + case "WDC2": + case "1SLC": + writer = new WDC2Writer((WDC2Reader)_reader, storage, stream); + break; + case "WDC1": + writer = new WDC1Writer((WDC1Reader)_reader, storage, stream); + break; + case "WDB6": + writer = new WDB6Writer((WDB6Reader)_reader, storage, stream); + break; + case "WDB5": + writer = new WDB5Writer((WDB5Reader)_reader, storage, stream); + break; + case "WDB4": + writer = new WDB4Writer((WDB4Reader)_reader, storage, stream); + break; + case "WDB3": + writer = new WDB3Writer((WDB3Reader)_reader, storage, stream); + break; + case "WDB2": + writer = new WDB2Writer((WDB2Reader)_reader, storage, stream); + break; + case "WDBC": + writer = new WDBCWriter((WDBCReader)_reader, storage, stream); + break; + } + } + + + /// + /// Clears temporary data however prevents further calls + /// + public void ClearCache() => _reader.Clear(); + + #endregion + } +} diff --git a/DBCD.IO/DBReader.cs b/DBCD.IO/DBReader.cs deleted file mode 100644 index 01e8a82..0000000 --- a/DBCD.IO/DBReader.cs +++ /dev/null @@ -1,88 +0,0 @@ -using DBCD.IO.Readers; -using System; -using System.Collections.Generic; -using System.IO; - -namespace DBCD.IO -{ - public class DBReader - { - private readonly BaseReader _reader; - - #region Header - - public int RecordsCount => _reader.RecordsCount; - public int FieldsCount => _reader.FieldsCount; - public int RecordSize => _reader.RecordSize; - public int StringTableSize => _reader.StringTableSize; - public uint TableHash => _reader.TableHash; - public uint LayoutHash => _reader.LayoutHash; - public int IdFieldIndex => _reader.IdFieldIndex; - public DB2Flags Flags => _reader.Flags; - - #endregion - - public DBReader(string fileName) : this(File.Open(fileName, FileMode.Open, FileAccess.Read, FileShare.Read)) { } - - public DBReader(Stream stream) - { - using (var bin = new BinaryReader(stream)) - { - var identifier = new string(bin.ReadChars(4)); - stream.Position = 0; - switch (identifier) - { - case "WDC3": - _reader = new WDC3Reader(stream); - break; - case "WDC2": - case "1SLC": - _reader = new WDC2Reader(stream); - break; - case "WDC1": - _reader = new WDC1Reader(stream); - break; - case "WDB6": - _reader = new WDB6Reader(stream); - break; - case "WDB5": - _reader = new WDB5Reader(stream); - break; - case "WDB4": - _reader = new WDB4Reader(stream); - break; - case "WDB3": - _reader = new WDB3Reader(stream); - break; - case "WDB2": - _reader = new WDB2Reader(stream); - break; - case "WDBC": - _reader = new WDBCReader(stream); - break; - default: - throw new Exception("DB type " + identifier + " is not supported!"); - } - } - } - - - public Storage GetRecords() where T : class, new() => new Storage(this); - - public void PopulateRecords(IDictionary storage) where T : class, new() => ReadRecords(storage); - - - protected virtual void ReadRecords(IDictionary storage) where T : class, new() - { - var fieldCache = typeof(T).ToFieldCache(); - - _reader.Enumerate((row) => - { - T entry = new T(); - row.GetFields(fieldCache, entry); - lock (storage) - storage.Add(row.Id, entry); - }); - } - } -} diff --git a/DBCD.IO/DBStorage.cs b/DBCD.IO/DBStorage.cs new file mode 100644 index 0000000..ba0178e --- /dev/null +++ b/DBCD.IO/DBStorage.cs @@ -0,0 +1,49 @@ +using DBCD.IO.Readers; +using DBCD.IO.Writers; +using System; +using System.Collections.Generic; +using System.IO; + +namespace DBCD.IO +{ + public class DBStorage : SortedDictionary where T : class, new() + { + private readonly DBParser _reader; + + #region Header + + public string Identifier => _reader.Identifier; + public int RecordsCount => _reader.RecordsCount; + public int FieldsCount => _reader.FieldsCount; + public int RecordSize => _reader.RecordSize; + public uint TableHash => _reader.TableHash; + public uint LayoutHash => _reader.LayoutHash; + public int IdFieldIndex => _reader.IdFieldIndex; + public DB2Flags Flags => _reader.Flags; + public int Locale => _reader.Locale; + + #endregion + + #region Constructors + + public DBStorage(string fileName) : this(File.Open(fileName, FileMode.Open, FileAccess.Read, FileShare.Read)) { } + + public DBStorage(Stream stream) : this(new DBParser(stream)) => _reader.ClearCache(); + + public DBStorage(DBParser dbReader) + { + _reader = dbReader; + _reader.ReadRecords(this); + } + + #endregion + + #region Methods + + public void Save(string fileName) => _reader.WriteRecords(this, fileName); + + public void Save(Stream stream) => _reader.WriteRecords(this, stream); + + #endregion + } +} diff --git a/DBCD.IO/HotfixReader.cs b/DBCD.IO/HotfixReader.cs index 483cae3..8be2821 100644 --- a/DBCD.IO/HotfixReader.cs +++ b/DBCD.IO/HotfixReader.cs @@ -37,7 +37,7 @@ public HotfixReader(Stream stream) } - public void ApplyHotfixes(IDictionary storage, DBReader dbReader) where T : class, new() => ReadHotfixes(storage, dbReader); + public void ApplyHotfixes(IDictionary storage, DBParser dbReader) where T : class, new() => ReadHotfixes(storage, dbReader); public void CombineCaches(params string[] files) { @@ -57,7 +57,7 @@ public void CombineCaches(params string[] files) } - protected virtual void ReadHotfixes(IDictionary storage, DBReader dbReader) where T : class, new() + protected virtual void ReadHotfixes(IDictionary storage, DBParser dbReader) where T : class, new() { var fieldCache = typeof(T).ToFieldCache(); diff --git a/DBCD.IO/Readers/BaseReader.cs b/DBCD.IO/Readers/BaseReader.cs index d97fedc..f514fee 100644 --- a/DBCD.IO/Readers/BaseReader.cs +++ b/DBCD.IO/Readers/BaseReader.cs @@ -24,37 +24,22 @@ abstract class BaseReader #region Data - protected FieldMetaData[] m_meta; - public FieldMetaData[] Meta => m_meta; - - protected int[] m_indexData; - public int[] IndexData => m_indexData; - - protected ColumnMetaData[] m_columnMeta; - public ColumnMetaData[] ColumnMeta => m_columnMeta; - - protected Value32[][] m_palletData; - public Value32[][] PalletData => m_palletData; - - protected Dictionary[] m_commonData; - public Dictionary[] CommonData => m_commonData; - - protected Dictionary m_stringsTable; - public Dictionary StringTable => m_stringsTable; - - protected Dictionary m_copyData; - - protected byte[] recordsData; - protected Dictionary _Records = new Dictionary(); - - protected List m_sparseEntries; - - protected int[] m_foreignKeyData; - public int[] ForeignKeyData => m_foreignKeyData; + public FieldMetaData[] Meta { get; protected set; } + public int[] IndexData { get; protected set; } + public ColumnMetaData[] ColumnMeta { get; protected set; } + public Value32[][] PalletData { get; protected set; } + public Dictionary[] CommonData { get; protected set; } + public Dictionary StringTable { get; protected set; } + public int[] ForeignKeyData { get; protected set; } + + protected Dictionary CopyData { get; set; } + protected byte[] RecordsData { get; set; } + protected Dictionary _Records { get; set; } = new Dictionary(); + protected List SparseEntries { get; set; } #endregion - #region Helpers + #region Methods public void Enumerate(Action action) { @@ -71,25 +56,28 @@ public void Enumerate(Action action) public void Clear() { - _Records.Clear(); - Array.Resize(ref m_indexData, 0); - Array.Resize(ref m_palletData, 0); - Array.Resize(ref m_palletData, 0); - Array.Resize(ref recordsData, 0); - Array.Resize(ref m_foreignKeyData, 0); - m_stringsTable.Clear(); - m_sparseEntries.Clear(); + IndexData = null; + PalletData = null; + ColumnMeta = null; + RecordsData = null; + ForeignKeyData = null; + CommonData = null; + + _Records?.Clear(); + StringTable?.Clear(); + SparseEntries?.Clear(); + CopyData?.Clear(); } private IEnumerable GetCopyRows() { - if (m_copyData == null || m_copyData.Count == 0) + if (CopyData == null || CopyData.Count == 0) yield break; // fix temp ids _Records = _Records.ToDictionary(x => x.Value.Id, x => x.Value); - foreach (var copyRow in m_copyData) + foreach (var copyRow in CopyData) { IDBRow rec = _Records[copyRow.Value].Clone(); rec.Data = rec.Data.Clone(); @@ -98,7 +86,7 @@ private IEnumerable GetCopyRows() yield return rec; } - m_copyData.Clear(); + CopyData.Clear(); } #endregion diff --git a/DBCD.IO/Readers/WDB2Reader.cs b/DBCD.IO/Readers/WDB2Reader.cs index fee8a82..fdbe14d 100644 --- a/DBCD.IO/Readers/WDB2Reader.cs +++ b/DBCD.IO/Readers/WDB2Reader.cs @@ -123,7 +123,7 @@ public WDB2Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDB2Reader(Stream stream) { - using (var reader = new BinaryReader(stream, Encoding.UTF8)) + using (var reader = new BinaryReader(stream)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDB2 file is corrupted!"); @@ -163,21 +163,22 @@ public WDB2Reader(Stream stream) } } - recordsData = reader.ReadBytes(RecordsCount * RecordSize); - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(RecordsCount * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; for (int i = 0; i < RecordsCount; i++) { - BitReader bitReader = new BitReader(recordsData) { Position = i * RecordSize * 8 }; + BitReader bitReader = new BitReader(RecordsData) { Position = i * RecordSize * 8 }; IDBRow rec = new WDB2Row(this, bitReader, i); _Records.Add(i, rec); } - m_stringsTable = new Dictionary(StringTableSize / 0x20); + StringTable = new Dictionary(StringTableSize / 0x20); for (int i = 0; i < StringTableSize;) { long oldPos = reader.BaseStream.Position; - m_stringsTable[i] = reader.ReadCString(); + StringTable[i] = reader.ReadCString(); i += (int)(reader.BaseStream.Position - oldPos); } } diff --git a/DBCD.IO/Readers/WDB3Reader.cs b/DBCD.IO/Readers/WDB3Reader.cs index 69d1189..7ee2f6a 100644 --- a/DBCD.IO/Readers/WDB3Reader.cs +++ b/DBCD.IO/Readers/WDB3Reader.cs @@ -147,7 +147,7 @@ public WDB3Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDB3Reader(Stream stream) { - using (var reader = new BinaryReader(stream, Encoding.UTF8)) + using (var reader = new BinaryReader(stream)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDB3 file is corrupted!"); @@ -181,8 +181,8 @@ public WDB3Reader(Stream stream) { int sparseCount = MaxIndex - MinIndex + 1; - m_sparseEntries = new List(sparseCount); - m_copyData = new Dictionary(sparseCount); + SparseEntries = new List(sparseCount); + CopyData = new Dictionary(sparseCount); var sparseIdLookup = new Dictionary(sparseCount); for (int i = 0; i < sparseCount; i++) @@ -193,71 +193,72 @@ public WDB3Reader(Stream stream) if (sparseIdLookup.TryGetValue(sparse.Offset, out int copyId)) { - m_copyData[MinIndex + i] = copyId; + CopyData[MinIndex + i] = copyId; } else { - m_sparseEntries.Add(sparse); + SparseEntries.Add(sparse); sparseIdLookup.Add(sparse.Offset, MinIndex + i); } } // secondary key if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) - m_foreignKeyData = reader.ReadArray(MaxIndex - MinIndex + 1); + ForeignKeyData = reader.ReadArray(MaxIndex - MinIndex + 1); - recordsData = reader.ReadBytes(m_sparseEntries.Sum(x => x.Size)); + RecordsData = reader.ReadBytes(SparseEntries.Sum(x => x.Size)); } else { // secondary key if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) - m_foreignKeyData = reader.ReadArray(MaxIndex - MinIndex + 1); + ForeignKeyData = reader.ReadArray(MaxIndex - MinIndex + 1); // record data - recordsData = reader.ReadBytes(RecordsCount * RecordSize); - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(RecordsCount * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; } // string table - m_stringsTable = new Dictionary(StringTableSize / 0x20); + StringTable = new Dictionary(StringTableSize / 0x20); for (int i = 0; i < StringTableSize;) { long oldPos = reader.BaseStream.Position; - m_stringsTable[i] = reader.ReadCString(); + StringTable[i] = reader.ReadCString(); i += (int)(reader.BaseStream.Position - oldPos); } // index table if ((reader.BaseStream.Position + copyTableSize) < reader.BaseStream.Length) { - m_indexData = reader.ReadArray(RecordsCount); + IndexData = reader.ReadArray(RecordsCount); Flags |= DB2Flags.Index; } // duplicate rows data - if (m_copyData == null) - m_copyData = new Dictionary(copyTableSize / 8); + if (CopyData == null) + CopyData = new Dictionary(copyTableSize / 8); for (int i = 0; i < copyTableSize / 8; i++) - m_copyData[reader.ReadInt32()] = reader.ReadInt32(); + CopyData[reader.ReadInt32()] = reader.ReadInt32(); int position = 0; for (int i = 0; i < RecordsCount; i++) { - BitReader bitReader = new BitReader(recordsData) { Position = 0 }; + BitReader bitReader = new BitReader(RecordsData) { Position = 0 }; if (Flags.HasFlagExt(DB2Flags.Sparse)) { bitReader.Position = position; - position += m_sparseEntries[i].Size * 8; + position += SparseEntries[i].Size * 8; } else { bitReader.Offset = i * RecordSize; } - IDBRow rec = new WDB3Row(this, bitReader, Flags.HasFlagExt(DB2Flags.Index) ? m_indexData[i] : -1, i); + IDBRow rec = new WDB3Row(this, bitReader, Flags.HasFlagExt(DB2Flags.Index) ? IndexData[i] : -1, i); _Records.Add(i, rec); } } diff --git a/DBCD.IO/Readers/WDB4Reader.cs b/DBCD.IO/Readers/WDB4Reader.cs index d7d145d..a5f20d8 100644 --- a/DBCD.IO/Readers/WDB4Reader.cs +++ b/DBCD.IO/Readers/WDB4Reader.cs @@ -138,7 +138,7 @@ public WDB4Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDB4Reader(Stream stream) { - using (var reader = new BinaryReader(stream, Encoding.UTF8)) + using (var reader = new BinaryReader(stream)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDB4 file is corrupted!"); @@ -167,27 +167,28 @@ public WDB4Reader(Stream stream) if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data - recordsData = reader.ReadBytes(RecordsCount * RecordSize); - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(RecordsCount * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; // string table - m_stringsTable = new Dictionary(StringTableSize / 0x20); + StringTable = new Dictionary(StringTableSize / 0x20); for (int i = 0; i < StringTableSize;) { long oldPos = reader.BaseStream.Position; - m_stringsTable[i] = reader.ReadCString(); + StringTable[i] = reader.ReadCString(); i += (int)(reader.BaseStream.Position - oldPos); } } else { // sparse data with inlined strings - recordsData = reader.ReadBytes(StringTableSize - HeaderSize); + RecordsData = reader.ReadBytes(StringTableSize - HeaderSize); int sparseCount = MaxIndex - MinIndex + 1; - m_sparseEntries = new List(sparseCount); - m_copyData = new Dictionary(sparseCount); + SparseEntries = new List(sparseCount); + CopyData = new Dictionary(sparseCount); var sparseIdLookup = new Dictionary(sparseCount); for (int i = 0; i < sparseCount; i++) @@ -198,11 +199,11 @@ public WDB4Reader(Stream stream) if (sparseIdLookup.TryGetValue(sparse.Offset, out int copyId)) { - m_copyData[MinIndex + i] = copyId; + CopyData[MinIndex + i] = copyId; } else { - m_sparseEntries.Add(sparse); + SparseEntries.Add(sparse); sparseIdLookup.Add(sparse.Offset, MinIndex + i); } } @@ -210,35 +211,35 @@ public WDB4Reader(Stream stream) // secondary key if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) - m_foreignKeyData = reader.ReadArray(MaxIndex - MinIndex + 1); + ForeignKeyData = reader.ReadArray(MaxIndex - MinIndex + 1); // index table if (Flags.HasFlagExt(DB2Flags.Index)) - m_indexData = reader.ReadArray(RecordsCount); + IndexData = reader.ReadArray(RecordsCount); // duplicate rows data - if (m_copyData == null) - m_copyData = new Dictionary(copyTableSize / 8); + if (CopyData == null) + CopyData = new Dictionary(copyTableSize / 8); for (int i = 0; i < copyTableSize / 8; i++) - m_copyData[reader.ReadInt32()] = reader.ReadInt32(); + CopyData[reader.ReadInt32()] = reader.ReadInt32(); int position = 0; for (int i = 0; i < RecordsCount; i++) { - BitReader bitReader = new BitReader(recordsData) { Position = 0 }; + BitReader bitReader = new BitReader(RecordsData) { Position = 0 }; if (Flags.HasFlagExt(DB2Flags.Sparse)) { bitReader.Position = position; - position += m_sparseEntries[i].Size * 8; + position += SparseEntries[i].Size * 8; } else { bitReader.Offset = i * RecordSize; } - IDBRow rec = new WDB4Row(this, bitReader, Flags.HasFlagExt(DB2Flags.Index) ? m_indexData[i] : -1, i); + IDBRow rec = new WDB4Row(this, bitReader, Flags.HasFlagExt(DB2Flags.Index) ? IndexData[i] : -1, i); _Records.Add(i, rec); } } diff --git a/DBCD.IO/Readers/WDB5Reader.cs b/DBCD.IO/Readers/WDB5Reader.cs index 6c6abb1..1829552 100644 --- a/DBCD.IO/Readers/WDB5Reader.cs +++ b/DBCD.IO/Readers/WDB5Reader.cs @@ -165,7 +165,7 @@ public WDB5Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDB5Reader(Stream stream) { - using (var reader = new BinaryReader(stream, Encoding.UTF8)) + using (var reader = new BinaryReader(stream)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDB5 file is corrupted!"); @@ -192,32 +192,33 @@ public WDB5Reader(Stream stream) return; // field meta data - m_meta = reader.ReadArray(FieldsCount); + Meta = reader.ReadArray(FieldsCount); if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data - recordsData = reader.ReadBytes(RecordsCount * RecordSize); - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(RecordsCount * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; // string table - m_stringsTable = new Dictionary(StringTableSize / 0x20); + StringTable = new Dictionary(StringTableSize / 0x20); for (int i = 0; i < StringTableSize;) { long oldPos = reader.BaseStream.Position; - m_stringsTable[i] = reader.ReadCString(); + StringTable[i] = reader.ReadCString(); i += (int)(reader.BaseStream.Position - oldPos); } } else { // sparse data with inlined strings - recordsData = reader.ReadBytes(StringTableSize - (int)reader.BaseStream.Position); + RecordsData = reader.ReadBytes(StringTableSize - (int)reader.BaseStream.Position); int sparseCount = MaxIndex - MinIndex + 1; - m_sparseEntries = new List(sparseCount); - m_copyData = new Dictionary(sparseCount); + SparseEntries = new List(sparseCount); + CopyData = new Dictionary(sparseCount); var sparseIdLookup = new Dictionary(sparseCount); for (int i = 0; i < sparseCount; i++) @@ -228,11 +229,11 @@ public WDB5Reader(Stream stream) if (sparseIdLookup.TryGetValue(sparse.Offset, out int copyId)) { - m_copyData[MinIndex + i] = copyId; + CopyData[MinIndex + i] = copyId; } else { - m_sparseEntries.Add(sparse); + SparseEntries.Add(sparse); sparseIdLookup.Add(sparse.Offset, MinIndex + i); } } @@ -240,35 +241,35 @@ public WDB5Reader(Stream stream) // secondary key if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) - m_foreignKeyData = reader.ReadArray(MaxIndex - MinIndex + 1); + ForeignKeyData = reader.ReadArray(MaxIndex - MinIndex + 1); // index table if (Flags.HasFlagExt(DB2Flags.Index)) - m_indexData = reader.ReadArray(RecordsCount); + IndexData = reader.ReadArray(RecordsCount); // duplicate rows data - if (m_copyData == null) - m_copyData = new Dictionary(copyTableSize / 8); + if (CopyData == null) + CopyData = new Dictionary(copyTableSize / 8); for (int i = 0; i < copyTableSize / 8; i++) - m_copyData[reader.ReadInt32()] = reader.ReadInt32(); + CopyData[reader.ReadInt32()] = reader.ReadInt32(); int position = 0; for (int i = 0; i < RecordsCount; i++) { - BitReader bitReader = new BitReader(recordsData) { Position = 0 }; + BitReader bitReader = new BitReader(RecordsData) { Position = 0 }; if (Flags.HasFlagExt(DB2Flags.Sparse)) { bitReader.Position = position; - position += m_sparseEntries[i].Size * 8; + position += SparseEntries[i].Size * 8; } else { bitReader.Offset = i * RecordSize; } - IDBRow rec = new WDB5Row(this, bitReader, Flags.HasFlagExt(DB2Flags.Index) ? m_indexData[i] : -1, i); + IDBRow rec = new WDB5Row(this, bitReader, Flags.HasFlagExt(DB2Flags.Index) ? IndexData[i] : -1, i); _Records.Add(i, rec); } } diff --git a/DBCD.IO/Readers/WDB6Reader.cs b/DBCD.IO/Readers/WDB6Reader.cs index 56802ab..9441ce4 100644 --- a/DBCD.IO/Readers/WDB6Reader.cs +++ b/DBCD.IO/Readers/WDB6Reader.cs @@ -190,7 +190,7 @@ public WDB6Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDB6Reader(Stream stream) { - using (var reader = new BinaryReader(stream, Encoding.UTF8)) + using (var reader = new BinaryReader(stream)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDB6 file is corrupted!"); @@ -219,32 +219,35 @@ public WDB6Reader(Stream stream) return; // field meta data - m_meta = reader.ReadArray(FieldsCount); + var meta = reader.ReadArray(FieldsCount); + Array.Resize(ref meta, totalFieldCount); + Meta = meta; if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data - recordsData = reader.ReadBytes(RecordsCount * RecordSize); - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(RecordsCount * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; // string table - m_stringsTable = new Dictionary(StringTableSize / 0x20); + StringTable = new Dictionary(StringTableSize / 0x20); for (int i = 0; i < StringTableSize;) { long oldPos = reader.BaseStream.Position; - m_stringsTable[i] = reader.ReadCString(); + StringTable[i] = reader.ReadCString(); i += (int)(reader.BaseStream.Position - oldPos); } } else { // sparse data with inlined strings - recordsData = reader.ReadBytes(StringTableSize - (int)reader.BaseStream.Position); + RecordsData = reader.ReadBytes(StringTableSize - (int)reader.BaseStream.Position); int sparseCount = MaxIndex - MinIndex + 1; - m_sparseEntries = new List(sparseCount); - m_copyData = new Dictionary(sparseCount); + SparseEntries = new List(sparseCount); + CopyData = new Dictionary(sparseCount); var sparseIdLookup = new Dictionary(sparseCount); for (int i = 0; i < sparseCount; i++) @@ -255,11 +258,11 @@ public WDB6Reader(Stream stream) if (sparseIdLookup.TryGetValue(sparse.Offset, out int copyId)) { - m_copyData[MinIndex + i] = copyId; + CopyData[MinIndex + i] = copyId; } else { - m_sparseEntries.Add(sparse); + SparseEntries.Add(sparse); sparseIdLookup.Add(sparse.Offset, MinIndex + i); } } @@ -267,25 +270,23 @@ public WDB6Reader(Stream stream) // secondary key if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) - m_foreignKeyData = reader.ReadArray(MaxIndex - MinIndex + 1); + ForeignKeyData = reader.ReadArray(MaxIndex - MinIndex + 1); // index table if (Flags.HasFlagExt(DB2Flags.Index)) - m_indexData = reader.ReadArray(RecordsCount); + IndexData = reader.ReadArray(RecordsCount); // duplicate rows data - if (m_copyData == null) - m_copyData = new Dictionary(copyTableSize / 8); + if (CopyData == null) + CopyData = new Dictionary(copyTableSize / 8); for (int i = 0; i < copyTableSize / 8; i++) - m_copyData[reader.ReadInt32()] = reader.ReadInt32(); + CopyData[reader.ReadInt32()] = reader.ReadInt32(); if (commonDataSize > 0) { - Array.Resize(ref m_meta, totalFieldCount); - int fieldCount = reader.ReadInt32(); - m_commonData = new Dictionary[fieldCount]; + CommonData = new Dictionary[fieldCount]; // HACK as of 24473 values are 4 byte aligned // try to calculate this by seeing if all tuples are 8 bytes @@ -303,10 +304,10 @@ public WDB6Reader(Stream stream) { CommonDataTypes[i - FieldsCount] = type; - m_meta[i] = new FieldMetaData() + Meta[i] = new FieldMetaData() { Bits = CommonDataTypeBits[type], - Offset = (short)(m_meta[i - 1].Offset + ((32 - m_meta[i - 1].Bits) >> 3)) + Offset = (short)(Meta[i - 1].Offset + ((32 - Meta[i - 1].Bits) >> 3)) }; } @@ -320,26 +321,26 @@ public WDB6Reader(Stream stream) commonValues.Add(id, value); } - m_commonData[i] = commonValues; + CommonData[i] = commonValues; } } int position = 0; for (int i = 0; i < RecordsCount; i++) { - BitReader bitReader = new BitReader(recordsData) { Position = 0 }; + BitReader bitReader = new BitReader(RecordsData) { Position = 0 }; if (Flags.HasFlagExt(DB2Flags.Sparse)) { bitReader.Position = position; - position += m_sparseEntries[i].Size * 8; + position += SparseEntries[i].Size * 8; } else { bitReader.Offset = i * RecordSize; } - IDBRow rec = new WDB6Row(this, bitReader, Flags.HasFlagExt(DB2Flags.Index) ? m_indexData[i] : -1, i); + IDBRow rec = new WDB6Row(this, bitReader, Flags.HasFlagExt(DB2Flags.Index) ? IndexData[i] : -1, i); _Records.Add(i, rec); } } diff --git a/DBCD.IO/Readers/WDBCReader.cs b/DBCD.IO/Readers/WDBCReader.cs index 940a48d..72cda5c 100644 --- a/DBCD.IO/Readers/WDBCReader.cs +++ b/DBCD.IO/Readers/WDBCReader.cs @@ -122,7 +122,7 @@ public WDBCReader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDBCReader(Stream stream) { - using (var reader = new BinaryReader(stream, Encoding.UTF8)) + using (var reader = new BinaryReader(stream)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDBC file is corrupted!"); @@ -140,21 +140,22 @@ public WDBCReader(Stream stream) if (RecordsCount == 0) return; - recordsData = reader.ReadBytes(RecordsCount * RecordSize); - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(RecordsCount * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; for (int i = 0; i < RecordsCount; i++) { - BitReader bitReader = new BitReader(recordsData) { Position = i * RecordSize * 8 }; + BitReader bitReader = new BitReader(RecordsData) { Position = i * RecordSize * 8 }; IDBRow rec = new WDBCRow(this, bitReader, i); _Records.Add(i, rec); } - m_stringsTable = new Dictionary(StringTableSize / 0x20); + StringTable = new Dictionary(StringTableSize / 0x20); for (int i = 0; i < StringTableSize;) { long oldPos = reader.BaseStream.Position; - m_stringsTable[i] = reader.ReadCString(); + StringTable[i] = reader.ReadCString(); i += (int)(reader.BaseStream.Position - oldPos); } } diff --git a/DBCD.IO/Readers/WDC1Reader.cs b/DBCD.IO/Readers/WDC1Reader.cs index 39ec783..c5db88b 100644 --- a/DBCD.IO/Readers/WDC1Reader.cs +++ b/DBCD.IO/Readers/WDC1Reader.cs @@ -211,7 +211,7 @@ public WDC1Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDC1Reader(Stream stream) { - using (var reader = new BinaryReader(stream, Encoding.UTF8)) + using (var reader = new BinaryReader(stream)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDC1 file is corrupted!"); @@ -249,36 +249,36 @@ public WDC1Reader(Stream stream) return; // field meta data - m_meta = reader.ReadArray(FieldsCount); + Meta = reader.ReadArray(FieldsCount); if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data - recordsData = reader.ReadBytes(RecordsCount * RecordSize); - - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(RecordsCount * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; // string data - m_stringsTable = new Dictionary(StringTableSize / 0x20); + StringTable = new Dictionary(StringTableSize / 0x20); for (int i = 0; i < StringTableSize;) { long oldPos = reader.BaseStream.Position; - m_stringsTable[i] = reader.ReadCString(); + StringTable[i] = reader.ReadCString(); i += (int)(reader.BaseStream.Position - oldPos); } } else { // sparse data with inlined strings - recordsData = reader.ReadBytes(sparseTableOffset - HeaderSize - Unsafe.SizeOf() * FieldsCount); + RecordsData = reader.ReadBytes(sparseTableOffset - HeaderSize - Unsafe.SizeOf() * FieldsCount); if (reader.BaseStream.Position != sparseTableOffset) throw new Exception("r.BaseStream.Position != sparseTableOffset"); int sparseCount = MaxIndex - MinIndex + 1; - m_sparseEntries = new List(sparseCount); - m_copyData = new Dictionary(sparseCount); + SparseEntries = new List(sparseCount); + CopyData = new Dictionary(sparseCount); var sparseIdLookup = new Dictionary(sparseCount); for (int i = 0; i < sparseCount; i++) @@ -289,49 +289,49 @@ public WDC1Reader(Stream stream) if (sparseIdLookup.TryGetValue(sparse.Offset, out int copyId)) { - m_copyData[MinIndex + i] = copyId; + CopyData[MinIndex + i] = copyId; } else { - m_sparseEntries.Add(sparse); + SparseEntries.Add(sparse); sparseIdLookup.Add(sparse.Offset, MinIndex + i); } } } // index data - m_indexData = reader.ReadArray(indexDataSize / 4); + IndexData = reader.ReadArray(indexDataSize / 4); // duplicate rows data - if (m_copyData == null) - m_copyData = new Dictionary(copyTableSize / 8); + if (CopyData == null) + CopyData = new Dictionary(copyTableSize / 8); for (int i = 0; i < copyTableSize / 8; i++) - m_copyData[reader.ReadInt32()] = reader.ReadInt32(); + CopyData[reader.ReadInt32()] = reader.ReadInt32(); // column meta data - m_columnMeta = reader.ReadArray(FieldsCount); + ColumnMeta = reader.ReadArray(FieldsCount); // pallet data - m_palletData = new Value32[m_columnMeta.Length][]; - for (int i = 0; i < m_columnMeta.Length; i++) + PalletData = new Value32[ColumnMeta.Length][]; + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Pallet || m_columnMeta[i].CompressionType == CompressionType.PalletArray) + if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) { - m_palletData[i] = reader.ReadArray((int)m_columnMeta[i].AdditionalDataSize / 4); + PalletData[i] = reader.ReadArray((int)ColumnMeta[i].AdditionalDataSize / 4); } } // common data - m_commonData = new Dictionary[m_columnMeta.Length]; - for (int i = 0; i < m_columnMeta.Length; i++) + CommonData = new Dictionary[ColumnMeta.Length]; + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Common) + if (ColumnMeta[i].CompressionType == CompressionType.Common) { - var commonValues = new Dictionary((int)m_columnMeta[i].AdditionalDataSize / 8); - m_commonData[i] = commonValues; + var commonValues = new Dictionary((int)ColumnMeta[i].AdditionalDataSize / 8); + CommonData[i] = commonValues; - for (int j = 0; j < m_columnMeta[i].AdditionalDataSize / 8; j++) + for (int j = 0; j < ColumnMeta[i].AdditionalDataSize / 8; j++) commonValues[reader.ReadInt32()] = reader.Read(); } } @@ -353,17 +353,17 @@ public WDC1Reader(Stream stream) int position = 0; for (int i = 0; i < RecordsCount; i++) { - BitReader bitReader = new BitReader(recordsData) { Position = 0 }; + BitReader bitReader = new BitReader(RecordsData) { Position = 0 }; if (Flags.HasFlagExt(DB2Flags.Sparse)) { bitReader.Position = position; - position += m_sparseEntries[i].Size * 8; + position += SparseEntries[i].Size * 8; } else bitReader.Offset = i * RecordSize; - IDBRow rec = new WDC1Row(this, bitReader, indexDataSize != 0 ? m_indexData[i] : -1, refData?.Entries.ElementAtOrDefault(i), i); + IDBRow rec = new WDC1Row(this, bitReader, indexDataSize != 0 ? IndexData[i] : -1, refData?.Entries.ElementAtOrDefault(i), i); _Records.Add(i, rec); } } diff --git a/DBCD.IO/Readers/WDC2Reader.cs b/DBCD.IO/Readers/WDC2Reader.cs index 115572f..10e24ec 100644 --- a/DBCD.IO/Readers/WDC2Reader.cs +++ b/DBCD.IO/Readers/WDC2Reader.cs @@ -258,7 +258,7 @@ public WDC2Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDC2Reader(Stream stream) { - using (var reader = new BinaryReader(stream, Encoding.UTF8)) + using (var reader = new BinaryReader(stream)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDC2 file is corrupted!"); @@ -296,31 +296,31 @@ public WDC2Reader(Stream stream) SectionHeader[] sections = reader.ReadArray(sectionsCount); // field meta data - m_meta = reader.ReadArray(FieldsCount); + Meta = reader.ReadArray(FieldsCount); // column meta data - m_columnMeta = reader.ReadArray(FieldsCount); + ColumnMeta = reader.ReadArray(FieldsCount); // pallet data - m_palletData = new Value32[m_columnMeta.Length][]; - for (int i = 0; i < m_columnMeta.Length; i++) + PalletData = new Value32[ColumnMeta.Length][]; + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Pallet || m_columnMeta[i].CompressionType == CompressionType.PalletArray) + if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) { - m_palletData[i] = reader.ReadArray((int)m_columnMeta[i].AdditionalDataSize / 4); + PalletData[i] = reader.ReadArray((int)ColumnMeta[i].AdditionalDataSize / 4); } } // common data - m_commonData = new Dictionary[m_columnMeta.Length]; - for (int i = 0; i < m_columnMeta.Length; i++) + CommonData = new Dictionary[ColumnMeta.Length]; + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Common) + if (ColumnMeta[i].CompressionType == CompressionType.Common) { - var commonValues = new Dictionary((int)m_columnMeta[i].AdditionalDataSize / 8); - m_commonData[i] = commonValues; + var commonValues = new Dictionary((int)ColumnMeta[i].AdditionalDataSize / 8); + CommonData[i] = commonValues; - for (int j = 0; j < m_columnMeta[i].AdditionalDataSize / 8; j++) + for (int j = 0; j < ColumnMeta[i].AdditionalDataSize / 8; j++) commonValues[reader.ReadInt32()] = reader.Read(); } } @@ -332,31 +332,31 @@ public WDC2Reader(Stream stream) if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data - recordsData = reader.ReadBytes(sections[sectionIndex].NumRecords * RecordSize); - - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(RecordsCount * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; // string data - m_stringsTable = new Dictionary(sections[sectionIndex].StringTableSize / 0x20); + StringTable = new Dictionary(sections[sectionIndex].StringTableSize / 0x20); for (int i = 0; i < sections[sectionIndex].StringTableSize;) { long oldPos = reader.BaseStream.Position; - m_stringsTable[oldPos] = reader.ReadCString(); + StringTable[oldPos] = reader.ReadCString(); i += (int)(reader.BaseStream.Position - oldPos); } } else { // sparse data with inlined strings - recordsData = reader.ReadBytes(sections[sectionIndex].SparseTableOffset - sections[sectionIndex].FileOffset); + RecordsData = reader.ReadBytes(sections[sectionIndex].SparseTableOffset - sections[sectionIndex].FileOffset); if (reader.BaseStream.Position != sections[sectionIndex].SparseTableOffset) throw new Exception("reader.BaseStream.Position != sections[sectionIndex].SparseTableOffset"); int sparseCount = MaxIndex - MinIndex + 1; - m_sparseEntries = new List(sparseCount); - m_copyData = new Dictionary(sparseCount); + SparseEntries = new List(sparseCount); + CopyData = new Dictionary(sparseCount); var sparseIdLookup = new Dictionary(sparseCount); for (int i = 0; i < sparseCount; i++) @@ -367,29 +367,29 @@ public WDC2Reader(Stream stream) if (sparseIdLookup.TryGetValue(sparse.Offset, out int copyId)) { - m_copyData[MinIndex + i] = copyId; + CopyData[MinIndex + i] = copyId; } else { - m_sparseEntries.Add(sparse); + SparseEntries.Add(sparse); sparseIdLookup.Add(sparse.Offset, MinIndex + i); } } } // index data - m_indexData = reader.ReadArray(sections[sectionIndex].IndexDataSize / 4); + IndexData = reader.ReadArray(sections[sectionIndex].IndexDataSize / 4); // fix zero-filled index data - if (m_indexData.Length > 0 && m_indexData.All(x => x == 0)) - m_indexData = Enumerable.Range(MinIndex, MaxIndex - MinIndex + 1).ToArray(); + if (IndexData.Length > 0 && IndexData.All(x => x == 0)) + IndexData = Enumerable.Range(MinIndex, MaxIndex - MinIndex + 1).ToArray(); // duplicate rows data - if (m_copyData == null) - m_copyData = new Dictionary(sections[sectionIndex].CopyTableSize / 8); + if (CopyData == null) + CopyData = new Dictionary(sections[sectionIndex].CopyTableSize / 8); for (int i = 0; i < sections[sectionIndex].CopyTableSize / 8; i++) - m_copyData[reader.ReadInt32()] = reader.ReadInt32(); + CopyData[reader.ReadInt32()] = reader.ReadInt32(); // reference data ReferenceData refData = null; @@ -408,17 +408,17 @@ public WDC2Reader(Stream stream) int position = 0; for (int i = 0; i < RecordsCount; i++) { - BitReader bitReader = new BitReader(recordsData) { Position = 0 }; + BitReader bitReader = new BitReader(RecordsData) { Position = 0 }; if (Flags.HasFlagExt(DB2Flags.Sparse)) { bitReader.Position = position; - position += m_sparseEntries[i].Size * 8; + position += SparseEntries[i].Size * 8; } else bitReader.Offset = i * RecordSize; - IDBRow rec = new WDC2Row(this, bitReader, sections[sectionIndex].FileOffset, sections[sectionIndex].IndexDataSize != 0 ? m_indexData[i] : -1, refData?.Entries.ElementAtOrDefault(i), i); + IDBRow rec = new WDC2Row(this, bitReader, sections[sectionIndex].FileOffset, sections[sectionIndex].IndexDataSize != 0 ? IndexData[i] : -1, refData?.Entries.ElementAtOrDefault(i), i); _Records.Add(i, rec); } } diff --git a/DBCD.IO/Readers/WDC3Reader.cs b/DBCD.IO/Readers/WDC3Reader.cs index dfa977b..4041c86 100644 --- a/DBCD.IO/Readers/WDC3Reader.cs +++ b/DBCD.IO/Readers/WDC3Reader.cs @@ -236,7 +236,7 @@ public WDC3Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDC3Reader(Stream stream) { - using (var reader = new BinaryReader(stream, Encoding.UTF8)) + using (var reader = new BinaryReader(stream)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDC3 file is corrupted!"); @@ -271,31 +271,31 @@ public WDC3Reader(Stream stream) SectionHeaderWDC3[] sections = reader.ReadArray(sectionsCount); // field meta data - m_meta = reader.ReadArray(FieldsCount); + Meta = reader.ReadArray(FieldsCount); // column meta data - m_columnMeta = reader.ReadArray(FieldsCount); + ColumnMeta = reader.ReadArray(FieldsCount); // pallet data - m_palletData = new Value32[m_columnMeta.Length][]; - for (int i = 0; i < m_columnMeta.Length; i++) + PalletData = new Value32[ColumnMeta.Length][]; + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Pallet || m_columnMeta[i].CompressionType == CompressionType.PalletArray) + if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) { - m_palletData[i] = reader.ReadArray((int)m_columnMeta[i].AdditionalDataSize / 4); + PalletData[i] = reader.ReadArray((int)ColumnMeta[i].AdditionalDataSize / 4); } } // common data - m_commonData = new Dictionary[m_columnMeta.Length]; - for (int i = 0; i < m_columnMeta.Length; i++) + CommonData = new Dictionary[ColumnMeta.Length]; + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Common) + if (ColumnMeta[i].CompressionType == CompressionType.Common) { - var commonValues = new Dictionary((int)m_columnMeta[i].AdditionalDataSize / 8); - m_commonData[i] = commonValues; + var commonValues = new Dictionary((int)ColumnMeta[i].AdditionalDataSize / 8); + CommonData[i] = commonValues; - for (int j = 0; j < m_columnMeta[i].AdditionalDataSize / 8; j++) + for (int j = 0; j < ColumnMeta[i].AdditionalDataSize / 8; j++) commonValues[reader.ReadInt32()] = reader.Read(); } } @@ -308,18 +308,18 @@ public WDC3Reader(Stream stream) if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data - recordsData = reader.ReadBytes(sections[sectionIndex].NumRecords * RecordSize); - - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(RecordsCount * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; // string data - if (m_stringsTable == null) - m_stringsTable = new Dictionary(sections[sectionIndex].StringTableSize / 0x20); + if (StringTable == null) + StringTable = new Dictionary(sections[sectionIndex].StringTableSize / 0x20); for (int i = 0; i < sections[sectionIndex].StringTableSize;) { long oldPos = reader.BaseStream.Position; - m_stringsTable[i + previousStringTableSize] = reader.ReadCString(); + StringTable[i + previousStringTableSize] = reader.ReadCString(); i += (int)(reader.BaseStream.Position - oldPos); } @@ -328,34 +328,34 @@ public WDC3Reader(Stream stream) else { // sparse data with inlined strings - recordsData = reader.ReadBytes(sections[sectionIndex].OffsetRecordsEndOffset - sections[sectionIndex].FileOffset); + RecordsData = reader.ReadBytes(sections[sectionIndex].OffsetRecordsEndOffset - sections[sectionIndex].FileOffset); if (reader.BaseStream.Position != sections[sectionIndex].OffsetRecordsEndOffset) throw new Exception("reader.BaseStream.Position != sections[sectionIndex].OffsetRecordsEndOffset"); } // skip encrypted sections => has tact key + record data is zero filled - if (sections[sectionIndex].TactKeyLookup != 0 && Array.TrueForAll(recordsData, x => x == 0)) + if (sections[sectionIndex].TactKeyLookup != 0 && Array.TrueForAll(RecordsData, x => x == 0)) { previousRecordCount += sections[sectionIndex].NumRecords; continue; } // index data - m_indexData = reader.ReadArray(sections[sectionIndex].IndexDataSize / 4); + IndexData = reader.ReadArray(sections[sectionIndex].IndexDataSize / 4); // fix zero-filled index data - if (m_indexData.Length > 0 && m_indexData.All(x => x == 0)) - m_indexData = Enumerable.Range(MinIndex + previousRecordCount, sections[sectionIndex].NumRecords).ToArray(); + if (IndexData.Length > 0 && IndexData.All(x => x == 0)) + IndexData = Enumerable.Range(MinIndex + previousRecordCount, sections[sectionIndex].NumRecords).ToArray(); // duplicate rows data if (sections[sectionIndex].CopyTableCount > 0) { - if (m_copyData == null) - m_copyData = new Dictionary(); + if (CopyData == null) + CopyData = new Dictionary(); for (int i = 0; i < sections[sectionIndex].CopyTableCount; i++) - m_copyData[reader.ReadInt32()] = reader.ReadInt32(); + CopyData[reader.ReadInt32()] = reader.ReadInt32(); } if (sections[sectionIndex].OffsetMapIDCount > 0) @@ -364,7 +364,7 @@ public WDC3Reader(Stream stream) if (TableHash == 145293629) reader.BaseStream.Position += 4 * sections[sectionIndex].OffsetMapIDCount; - m_sparseEntries = reader.ReadArray(sections[sectionIndex].OffsetMapIDCount).ToList(); + SparseEntries = reader.ReadArray(sections[sectionIndex].OffsetMapIDCount).ToList(); } // reference data @@ -385,26 +385,26 @@ public WDC3Reader(Stream stream) { int[] sparseIndexData = reader.ReadArray(sections[sectionIndex].OffsetMapIDCount); - if (sections[sectionIndex].IndexDataSize > 0 && m_indexData.Length != sparseIndexData.Length) + if (sections[sectionIndex].IndexDataSize > 0 && IndexData.Length != sparseIndexData.Length) throw new Exception("m_indexData.Length != sparseIndexData.Length"); - m_indexData = sparseIndexData; + IndexData = sparseIndexData; } int position = 0; for (int i = 0; i < sections[sectionIndex].NumRecords; i++) { - BitReader bitReader = new BitReader(recordsData) { Position = 0 }; + BitReader bitReader = new BitReader(RecordsData) { Position = 0 }; if (Flags.HasFlagExt(DB2Flags.Sparse)) { bitReader.Position = position; - position += m_sparseEntries[i].Size * 8; + position += SparseEntries[i].Size * 8; } else bitReader.Offset = i * RecordSize; - IDBRow rec = new WDC3Row(this, bitReader, sections[sectionIndex].IndexDataSize != 0 ? m_indexData[i] : -1, refData?.Entries.ElementAtOrDefault(i), i + previousRecordCount); + IDBRow rec = new WDC3Row(this, bitReader, sections[sectionIndex].IndexDataSize != 0 ? IndexData[i] : -1, refData?.Entries.ElementAtOrDefault(i), i + previousRecordCount); _Records.Add(_Records.Count, rec); } diff --git a/DBCD.IO/Storage.cs b/DBCD.IO/Storage.cs deleted file mode 100644 index 6e77109..0000000 --- a/DBCD.IO/Storage.cs +++ /dev/null @@ -1,14 +0,0 @@ -using System.Collections.Generic; -using System.IO; - -namespace DBCD.IO -{ - public class Storage : SortedDictionary where T : class, new() - { - public Storage(string fileName) : this(File.Open(fileName, FileMode.Open, FileAccess.Read, FileShare.Read)) { } - - public Storage(Stream stream) : this(new DBReader(stream)) { } - - public Storage(DBReader dbReader) => dbReader.PopulateRecords(this); - } -} diff --git a/DBCD.IO/Writers/BaseWriter.cs b/DBCD.IO/Writers/BaseWriter.cs index 31f4ede..65c545a 100644 --- a/DBCD.IO/Writers/BaseWriter.cs +++ b/DBCD.IO/Writers/BaseWriter.cs @@ -15,6 +15,18 @@ abstract class BaseWriter where T : class public int IdFieldIndex { get; } public DB2Flags Flags { get; } + #region Data + + public FieldMetaData[] Meta { get; protected set; } + public ColumnMetaData[] ColumnMeta { get; protected set; } + public List[] PalletData { get; protected set; } + public Dictionary[] CommonData { get; protected set; } + public Dictionary StringTable { get; protected set; } + public SortedDictionary CopyData { get; protected set; } + public List ReferenceData { get; protected set; } + + #endregion + public BaseWriter(BaseReader reader) { FieldCache = typeof(T).ToFieldCache(); @@ -24,62 +36,37 @@ public BaseWriter(BaseReader reader) IdFieldIndex = reader.IdFieldIndex; Flags = reader.Flags; - m_stringsTable = new Dictionary(); - m_copyData = new SortedDictionary(); - m_meta = reader.Meta; - m_columnMeta = reader.ColumnMeta; + StringTable = new Dictionary(); + CopyData = new SortedDictionary(); + Meta = reader.Meta; + ColumnMeta = reader.ColumnMeta; - if (m_columnMeta != null) + if (ColumnMeta != null) { - m_commonData = new Dictionary[m_columnMeta.Length]; - m_palletData = new List[m_columnMeta.Length]; - m_referenceData = new List(); + CommonData = new Dictionary[ColumnMeta.Length]; + PalletData = new List[ColumnMeta.Length]; + ReferenceData = new List(); // create the lookup collections - for (int i = 0; i < m_columnMeta.Length; i++) + for (int i = 0; i < ColumnMeta.Length; i++) { - m_commonData[i] = new Dictionary(); - m_palletData[i] = new List(); + CommonData[i] = new Dictionary(); + PalletData[i] = new List(); } } + // add an empty string at the first index InternString(""); - } - - - #region Data - - protected FieldMetaData[] m_meta; - public FieldMetaData[] Meta => m_meta; - - protected ColumnMetaData[] m_columnMeta; - public ColumnMetaData[] ColumnMeta => m_columnMeta; - - protected List[] m_palletData; - public List[] PalletData => m_palletData; - - protected Dictionary[] m_commonData; - public Dictionary[] CommonData => m_commonData; - - protected Dictionary m_stringsTable; - public Dictionary StringTable => m_stringsTable; - - protected SortedDictionary m_copyData; - public SortedDictionary CopyData => m_copyData; - - protected List m_referenceData; - public List ReferenceData => m_referenceData; - - #endregion + } #region Methods public int InternString(string value) { - if (m_stringsTable.TryGetValue(value, out int index)) + if (StringTable.TryGetValue(value, out int index)) return index; - m_stringsTable.Add(value, StringTableSize); + StringTable.Add(value, StringTableSize); int offset = StringTableSize; StringTableSize += value.Length + 1; @@ -94,7 +81,7 @@ public void WriteOffsetRecords(BinaryWriter writer, IDBRowSerializer serializ { if (serializer.Records.TryGetValue(i, out var record)) { - if (m_copyData.TryGetValue(i, out int copyid)) + if (CopyData.TryGetValue(i, out int copyid)) { // copy records use their parent's offset writer.Write(sparseIdLookup[copyid]); diff --git a/DBCD.IO/Writers/WDB2Writer.cs b/DBCD.IO/Writers/WDB2Writer.cs index 3c6b0e8..1ab5dd5 100644 --- a/DBCD.IO/Writers/WDB2Writer.cs +++ b/DBCD.IO/Writers/WDB2Writer.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System.IO; using System.Linq; +using System.Text; namespace DBCD.IO.Writers { @@ -162,7 +163,7 @@ public WDB2Writer(WDB2Reader reader, IDictionary storage, Stream stream) foreach (var record in serializer.Records) record.Value.CopyTo(writer.BaseStream); - foreach (var str in m_stringsTable) + foreach (var str in StringTable) writer.WriteCString(str.Key); } } diff --git a/DBCD.IO/Writers/WDB3Writer.cs b/DBCD.IO/Writers/WDB3Writer.cs index d5c5cd7..798d5a4 100644 --- a/DBCD.IO/Writers/WDB3Writer.cs +++ b/DBCD.IO/Writers/WDB3Writer.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System.IO; using System.Linq; +using System.Text; namespace DBCD.IO.Writers { @@ -135,7 +136,7 @@ public WDB3Writer(WDB3Reader reader, IDictionary storage, Stream stream) serializer.Serialize(storage); serializer.GetCopyRows(); - RecordsCount = serializer.Records.Count - m_copyData.Count; + RecordsCount = serializer.Records.Count - CopyData.Count; if (Flags.HasFlagExt(DB2Flags.Sparse)) StringTableSize = 0; @@ -143,7 +144,7 @@ public WDB3Writer(WDB3Reader reader, IDictionary storage, Stream stream) { int minIndex = storage.Keys.Min(); int maxIndex = storage.Keys.Max(); - int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDB3FmtSig); writer.Write(RecordsCount); @@ -175,25 +176,25 @@ public WDB3Writer(WDB3Reader reader, IDictionary storage, Stream stream) // record data foreach (var record in serializer.Records) - if (!m_copyData.ContainsKey(record.Key)) + if (!CopyData.ContainsKey(record.Key)) record.Value.CopyTo(writer.BaseStream); // string table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteCString(""); - foreach (var str in m_stringsTable) + foreach (var str in StringTable) writer.WriteCString(str.Key); } // index table if (Flags.HasFlagExt(DB2Flags.Index)) - writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); // copy table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { - foreach (var copyRecord in m_copyData) + foreach (var copyRecord in CopyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); diff --git a/DBCD.IO/Writers/WDB4Writer.cs b/DBCD.IO/Writers/WDB4Writer.cs index f5f2217..d7ecaa8 100644 --- a/DBCD.IO/Writers/WDB4Writer.cs +++ b/DBCD.IO/Writers/WDB4Writer.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System.IO; using System.Linq; +using System.Text; namespace DBCD.IO.Writers { @@ -134,13 +135,13 @@ public WDB4Writer(WDB4Reader reader, IDictionary storage, Stream stream) serializer.Serialize(storage); serializer.GetCopyRows(); - RecordsCount = serializer.Records.Count - m_copyData.Count; + RecordsCount = serializer.Records.Count - CopyData.Count; using (var writer = new BinaryWriter(stream)) { int minIndex = storage.Keys.Min(); int maxIndex = storage.Keys.Max(); - int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDB4FmtSig); writer.Write(RecordsCount); @@ -162,14 +163,14 @@ public WDB4Writer(WDB4Reader reader, IDictionary storage, Stream stream) // record data uint recordsOffset = (uint)writer.BaseStream.Position; foreach (var record in serializer.Records) - if (!m_copyData.ContainsKey(record.Key)) + if (!CopyData.ContainsKey(record.Key)) record.Value.CopyTo(writer.BaseStream); // string table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteCString(""); - foreach (var str in m_stringsTable) + foreach (var str in StringTable) writer.WriteCString(str.Key); } @@ -191,12 +192,12 @@ public WDB4Writer(WDB4Reader reader, IDictionary storage, Stream stream) // index table if (Flags.HasFlagExt(DB2Flags.Index)) - writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); // copy table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { - foreach (var copyRecord in m_copyData) + foreach (var copyRecord in CopyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); diff --git a/DBCD.IO/Writers/WDB5Writer.cs b/DBCD.IO/Writers/WDB5Writer.cs index 0004975..e96ba0c 100644 --- a/DBCD.IO/Writers/WDB5Writer.cs +++ b/DBCD.IO/Writers/WDB5Writer.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System.IO; using System.Linq; +using System.Text; namespace DBCD.IO.Writers { @@ -143,13 +144,13 @@ public WDB5Writer(WDB5Reader reader, IDictionary storage, Stream stream) serializer.Serialize(storage); serializer.GetCopyRows(); - RecordsCount = serializer.Records.Count - m_copyData.Count; + RecordsCount = serializer.Records.Count - CopyData.Count; using (var writer = new BinaryWriter(stream)) { int minIndex = storage.Keys.Min(); int maxIndex = storage.Keys.Max(); - int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDB5FmtSig); writer.Write(RecordsCount); @@ -169,19 +170,19 @@ public WDB5Writer(WDB5Reader reader, IDictionary storage, Stream stream) return; // field meta - writer.WriteArray(m_meta); + writer.WriteArray(Meta); // record data uint recordsOffset = (uint)writer.BaseStream.Position; foreach (var record in serializer.Records) - if (!m_copyData.ContainsKey(record.Key)) + if (!CopyData.ContainsKey(record.Key)) record.Value.CopyTo(writer.BaseStream); // string table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteCString(""); - foreach (var str in m_stringsTable) + foreach (var str in StringTable) writer.WriteCString(str.Key); } @@ -203,12 +204,12 @@ public WDB5Writer(WDB5Reader reader, IDictionary storage, Stream stream) // index table if (Flags.HasFlagExt(DB2Flags.Index)) - writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); // copy table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { - foreach (var copyRecord in m_copyData) + foreach (var copyRecord in CopyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); diff --git a/DBCD.IO/Writers/WDB6Writer.cs b/DBCD.IO/Writers/WDB6Writer.cs index 89e9436..acff522 100644 --- a/DBCD.IO/Writers/WDB6Writer.cs +++ b/DBCD.IO/Writers/WDB6Writer.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System.IO; using System.Linq; +using System.Text; namespace DBCD.IO.Writers { @@ -146,20 +147,20 @@ public WDB6Writer(WDB6Reader reader, IDictionary storage, Stream stream) // always 2 empties StringTableSize++; - m_commonData = new Dictionary[m_meta.Length - FieldsCount]; - Array.ForEach(m_commonData, x => x = new Dictionary()); + CommonData = new Dictionary[Meta.Length - FieldsCount]; + Array.ForEach(CommonData, x => x = new Dictionary()); WDB6RowSerializer serializer = new WDB6RowSerializer(this); serializer.Serialize(storage); serializer.GetCopyRows(); - RecordsCount = serializer.Records.Count - m_copyData.Count; + RecordsCount = serializer.Records.Count - CopyData.Count; using (var writer = new BinaryWriter(stream)) { int minIndex = storage.Keys.Min(); int maxIndex = storage.Keys.Max(); - int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDB6FmtSig); writer.Write(RecordsCount); @@ -174,7 +175,7 @@ public WDB6Writer(WDB6Reader reader, IDictionary storage, Stream stream) writer.Write(copyTableSize); writer.Write((ushort)Flags); writer.Write((ushort)IdFieldIndex); - writer.Write(m_meta.Length); // totalFieldCount + writer.Write(Meta.Length); // totalFieldCount writer.Write(0); // commonDataSize if (storage.Count == 0) @@ -182,19 +183,19 @@ public WDB6Writer(WDB6Reader reader, IDictionary storage, Stream stream) // field meta for (int i = 0; i < FieldsCount; i++) - writer.Write(m_meta[i]); + writer.Write(Meta[i]); // record data uint recordsOffset = (uint)writer.BaseStream.Position; foreach (var record in serializer.Records) - if (!m_copyData.TryGetValue(record.Key, out int parent)) + if (!CopyData.TryGetValue(record.Key, out int parent)) record.Value.CopyTo(writer.BaseStream); // string table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteCString(""); - foreach (var str in m_stringsTable) + foreach (var str in StringTable) writer.WriteCString(str.Key); } @@ -216,12 +217,12 @@ public WDB6Writer(WDB6Reader reader, IDictionary storage, Stream stream) // index table if (Flags.HasFlagExt(DB2Flags.Index)) - writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); // copy table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { - foreach (var copyRecord in m_copyData) + foreach (var copyRecord in CopyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); @@ -231,18 +232,18 @@ public WDB6Writer(WDB6Reader reader, IDictionary storage, Stream stream) // common data // HACK this is bodged together // - it only writes common data columns and all values including common ones - if (m_commonData.Length > 0) + if (CommonData.Length > 0) { long startPos = writer.BaseStream.Position; - writer.Write(m_meta.Length - FieldsCount); + writer.Write(Meta.Length - FieldsCount); - for (int i = 0; i < m_commonData.Length; i++) + for (int i = 0; i < CommonData.Length; i++) { - writer.Write(m_commonData[i].Count); + writer.Write(CommonData[i].Count); writer.Write(reader.CommonDataTypes[i]); // type - foreach (var record in m_commonData[i]) + foreach (var record in CommonData[i]) { writer.Write(record.Key); diff --git a/DBCD.IO/Writers/WDBCWriter.cs b/DBCD.IO/Writers/WDBCWriter.cs index 105f9c4..1f70063 100644 --- a/DBCD.IO/Writers/WDBCWriter.cs +++ b/DBCD.IO/Writers/WDBCWriter.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System.IO; using System.Linq; +using System.Text; namespace DBCD.IO.Writers { @@ -127,7 +128,7 @@ public WDBCWriter(WDBCReader reader, IDictionary storage, Stream stream) foreach (var record in serializer.Records) record.Value.CopyTo(writer.BaseStream); - foreach (var str in m_stringsTable) + foreach (var str in StringTable) writer.WriteCString(str.Key); } } diff --git a/DBCD.IO/Writers/WDC1Writer.cs b/DBCD.IO/Writers/WDC1Writer.cs index 650f25c..0e5c96c 100644 --- a/DBCD.IO/Writers/WDC1Writer.cs +++ b/DBCD.IO/Writers/WDC1Writer.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System.IO; using System.Linq; +using System.Text; namespace DBCD.IO.Writers { @@ -224,7 +225,7 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) serializer.Serialize(storage); serializer.GetCopyRows(); - RecordsCount = serializer.Records.Count - m_copyData.Count; + RecordsCount = serializer.Records.Count - CopyData.Count; var (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); @@ -232,7 +233,7 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) { int minIndex = storage.Keys.Min(); int maxIndex = storage.Keys.Max(); - int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDC1FmtSig); writer.Write(RecordsCount); @@ -250,10 +251,10 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) writer.Write(FieldsCount); // totalFieldCount writer.Write(reader.PackedDataOffset); - writer.Write(m_referenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount + writer.Write(ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount writer.Write(0); // sparseTableOffset writer.Write(RecordsCount * 4); // indexTableSize - writer.Write(m_columnMeta.Length * 24); // ColumnMetaDataSize + writer.Write(ColumnMeta.Length * 24); // ColumnMetaDataSize writer.Write(commonDataSize); writer.Write(palletDataSize); writer.Write(referenceDataSize); @@ -262,19 +263,19 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) return; // field meta - writer.WriteArray(m_meta); + writer.WriteArray(Meta); // record data uint recordsOffset = (uint)writer.BaseStream.Position; foreach (var record in serializer.Records) - if (!m_copyData.TryGetValue(record.Key, out int parent)) + if (!CopyData.TryGetValue(record.Key, out int parent)) record.Value.CopyTo(writer.BaseStream); // string table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteCString(""); - foreach (var str in m_stringsTable) + foreach (var str in StringTable) writer.WriteCString(str.Key); } @@ -292,12 +293,12 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) // index table if (Flags.HasFlagExt(DB2Flags.Index)) - writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); // copy table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { - foreach (var copyRecord in m_copyData) + foreach (var copyRecord in CopyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); @@ -305,24 +306,24 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) } // column meta data - writer.WriteArray(m_columnMeta); + writer.WriteArray(ColumnMeta); // pallet data - for (int i = 0; i < m_columnMeta.Length; i++) + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Pallet || m_columnMeta[i].CompressionType == CompressionType.PalletArray) + if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) { - foreach (var palletData in m_palletData[i]) + foreach (var palletData in PalletData[i]) writer.WriteArray(palletData); } } // common data - for (int i = 0; i < m_columnMeta.Length; i++) + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Common) + if (ColumnMeta[i].CompressionType == CompressionType.Common) { - foreach (var commondata in m_commonData[i]) + foreach (var commondata in CommonData[i]) { writer.Write(commondata.Key); writer.Write(commondata.Value.GetValue()); @@ -331,15 +332,15 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) } // reference data - if (m_referenceData.Count > 0) + if (ReferenceData.Count > 0) { - writer.Write(m_referenceData.Count); - writer.Write(m_referenceData.Min()); - writer.Write(m_referenceData.Max()); + writer.Write(ReferenceData.Count); + writer.Write(ReferenceData.Min()); + writer.Write(ReferenceData.Max()); - for (int i = 0; i < m_referenceData.Count; i++) + for (int i = 0; i < ReferenceData.Count; i++) { - writer.Write(m_referenceData[i]); + writer.Write(ReferenceData[i]); writer.Write(i); } } @@ -350,25 +351,25 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) { // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords] int refSize = 0; - if (m_referenceData.Count > 0) - refSize = 12 + (m_referenceData.Count * 8); + if (ReferenceData.Count > 0) + refSize = 12 + (ReferenceData.Count * 8); int commonSize = 0, palletSize = 0; - for (int i = 0; i < m_columnMeta.Length; i++) + for (int i = 0; i < ColumnMeta.Length; i++) { - switch (m_columnMeta[i].CompressionType) + switch (ColumnMeta[i].CompressionType) { // {uint id, uint copyid}[] case CompressionType.Common: - m_columnMeta[i].AdditionalDataSize = (uint)(m_commonData[i].Count * 8); - commonSize += (int)m_columnMeta[i].AdditionalDataSize; + ColumnMeta[i].AdditionalDataSize = (uint)(CommonData[i].Count * 8); + commonSize += (int)ColumnMeta[i].AdditionalDataSize; break; // {uint values[Cardinality]}[] case CompressionType.Pallet: case CompressionType.PalletArray: - m_columnMeta[i].AdditionalDataSize = (uint)m_palletData[i].Sum(x => x.Length * 4); - palletSize += (int)m_columnMeta[i].AdditionalDataSize; + ColumnMeta[i].AdditionalDataSize = (uint)PalletData[i].Sum(x => x.Length * 4); + palletSize += (int)ColumnMeta[i].AdditionalDataSize; break; } } diff --git a/DBCD.IO/Writers/WDC2Writer.cs b/DBCD.IO/Writers/WDC2Writer.cs index eb8a791..5128156 100644 --- a/DBCD.IO/Writers/WDC2Writer.cs +++ b/DBCD.IO/Writers/WDC2Writer.cs @@ -5,6 +5,7 @@ using System.IO; using System.Linq; using System.Runtime.CompilerServices; +using System.Text; namespace DBCD.IO.Writers { @@ -288,7 +289,7 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) serializer.GetCopyRows(); serializer.UpdateStringOffsets(storage); - RecordsCount = serializer.Records.Count - m_copyData.Count; + RecordsCount = serializer.Records.Count - CopyData.Count; var (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); @@ -296,7 +297,7 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) { int minIndex = storage.Keys.Min(); int maxIndex = storage.Keys.Max(); - int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : m_copyData.Count * 8; + int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(reader.Signature); writer.Write(RecordsCount); @@ -313,8 +314,8 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) writer.Write(FieldsCount); // totalFieldCount writer.Write(reader.PackedDataOffset); - writer.Write(m_referenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount - writer.Write(m_columnMeta.Length * 24); // ColumnMetaDataSize + writer.Write(ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount + writer.Write(ColumnMeta.Length * 24); // ColumnMetaDataSize writer.Write(commonDataSize); writer.Write(palletDataSize); writer.Write(1); // sections count @@ -323,7 +324,7 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) return; // section header - int fileOffset = HeaderSize + (m_meta.Length * 4) + (m_columnMeta.Length * 24) + Unsafe.SizeOf() + palletDataSize + commonDataSize; + int fileOffset = HeaderSize + (Meta.Length * 4) + (ColumnMeta.Length * 24) + Unsafe.SizeOf() + palletDataSize + commonDataSize; writer.Write(0UL); // TactKeyLookup writer.Write(fileOffset); // FileOffset @@ -335,27 +336,27 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) writer.Write(referenceDataSize); // field meta - writer.WriteArray(m_meta); + writer.WriteArray(Meta); // column meta data - writer.WriteArray(m_columnMeta); + writer.WriteArray(ColumnMeta); // pallet data - for (int i = 0; i < m_columnMeta.Length; i++) + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Pallet || m_columnMeta[i].CompressionType == CompressionType.PalletArray) + if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) { - foreach (var palletData in m_palletData[i]) + foreach (var palletData in PalletData[i]) writer.WriteArray(palletData); } } // common data - for (int i = 0; i < m_columnMeta.Length; i++) + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Common) + if (ColumnMeta[i].CompressionType == CompressionType.Common) { - foreach (var commondata in m_commonData[i]) + foreach (var commondata in CommonData[i]) { writer.Write(commondata.Key); writer.Write(commondata.Value.GetValue()); @@ -366,14 +367,14 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) // record data uint recordsOffset = (uint)writer.BaseStream.Position; foreach (var record in serializer.Records) - if (!m_copyData.TryGetValue(record.Key, out int parent)) + if (!CopyData.TryGetValue(record.Key, out int parent)) record.Value.CopyTo(writer.BaseStream); // string table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteCString(""); - foreach (var str in m_stringsTable) + foreach (var str in StringTable) writer.WriteCString(str.Key); } @@ -391,12 +392,12 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) // index table if (Flags.HasFlagExt(DB2Flags.Index)) - writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); // copy table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { - foreach (var copyRecord in m_copyData) + foreach (var copyRecord in CopyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); @@ -404,15 +405,15 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) } // reference data - if (m_referenceData.Count > 0) + if (ReferenceData.Count > 0) { - writer.Write(m_referenceData.Count); - writer.Write(m_referenceData.Min()); - writer.Write(m_referenceData.Max()); + writer.Write(ReferenceData.Count); + writer.Write(ReferenceData.Min()); + writer.Write(ReferenceData.Max()); - for (int i = 0; i < m_referenceData.Count; i++) + for (int i = 0; i < ReferenceData.Count; i++) { - writer.Write(m_referenceData[i]); + writer.Write(ReferenceData[i]); writer.Write(i); } } @@ -423,25 +424,25 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) { // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords] int refSize = 0; - if (m_referenceData.Count > 0) - refSize = 12 + (m_referenceData.Count * 8); + if (ReferenceData.Count > 0) + refSize = 12 + (ReferenceData.Count * 8); int commonSize = 0, palletSize = 0; - for (int i = 0; i < m_columnMeta.Length; i++) + for (int i = 0; i < ColumnMeta.Length; i++) { - switch (m_columnMeta[i].CompressionType) + switch (ColumnMeta[i].CompressionType) { // {uint id, uint copyid}[] case CompressionType.Common: - m_columnMeta[i].AdditionalDataSize = (uint)(m_commonData[i].Count * 8); - commonSize += (int)m_columnMeta[i].AdditionalDataSize; + ColumnMeta[i].AdditionalDataSize = (uint)(CommonData[i].Count * 8); + commonSize += (int)ColumnMeta[i].AdditionalDataSize; break; // {uint values[cardinality]}[] case CompressionType.Pallet: case CompressionType.PalletArray: - m_columnMeta[i].AdditionalDataSize = (uint)m_palletData[i].Sum(x => x.Length * 4); - palletSize += (int)m_columnMeta[i].AdditionalDataSize; + ColumnMeta[i].AdditionalDataSize = (uint)PalletData[i].Sum(x => x.Length * 4); + palletSize += (int)ColumnMeta[i].AdditionalDataSize; break; } } diff --git a/DBCD.IO/Writers/WDC3Writer.cs b/DBCD.IO/Writers/WDC3Writer.cs index 466d061..520e35c 100644 --- a/DBCD.IO/Writers/WDC3Writer.cs +++ b/DBCD.IO/Writers/WDC3Writer.cs @@ -5,6 +5,7 @@ using System.IO; using System.Linq; using System.Runtime.CompilerServices; +using System.Text; namespace DBCD.IO.Writers { @@ -288,7 +289,7 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) serializer.GetCopyRows(); serializer.UpdateStringOffsets(storage); - RecordsCount = serializer.Records.Count - m_copyData.Count; + RecordsCount = serializer.Records.Count - CopyData.Count; var (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); @@ -312,8 +313,8 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) writer.Write(FieldsCount); // totalFieldCount writer.Write(reader.PackedDataOffset); - writer.Write(m_referenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount - writer.Write(m_columnMeta.Length * 24); // ColumnMetaDataSize + writer.Write(ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount + writer.Write(ColumnMeta.Length * 24); // ColumnMetaDataSize writer.Write(commonDataSize); writer.Write(palletDataSize); writer.Write(1); // sections count @@ -322,7 +323,7 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) return; // section header - int fileOffset = HeaderSize + (m_meta.Length * 4) + (m_columnMeta.Length * 24) + Unsafe.SizeOf() + palletDataSize + commonDataSize; + int fileOffset = HeaderSize + (Meta.Length * 4) + (ColumnMeta.Length * 24) + Unsafe.SizeOf() + palletDataSize + commonDataSize; writer.Write(0UL); // TactKeyLookup writer.Write(fileOffset); // FileOffset @@ -332,30 +333,30 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) writer.Write(RecordsCount * 4); // IndexDataSize writer.Write(referenceDataSize); // ParentLookupDataSize writer.Write(Flags.HasFlagExt(DB2Flags.Sparse) ? RecordsCount : 0); // OffsetMapIDCount - writer.Write(m_copyData.Count); // CopyTableCount + writer.Write(CopyData.Count); // CopyTableCount // field meta - writer.WriteArray(m_meta); + writer.WriteArray(Meta); // column meta data - writer.WriteArray(m_columnMeta); + writer.WriteArray(ColumnMeta); // pallet data - for (int i = 0; i < m_columnMeta.Length; i++) + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Pallet || m_columnMeta[i].CompressionType == CompressionType.PalletArray) + if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) { - foreach (var palletData in m_palletData[i]) + foreach (var palletData in PalletData[i]) writer.WriteArray(palletData); } } // common data - for (int i = 0; i < m_columnMeta.Length; i++) + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Common) + if (ColumnMeta[i].CompressionType == CompressionType.Common) { - foreach (var commondata in m_commonData[i]) + foreach (var commondata in CommonData[i]) { writer.Write(commondata.Key); writer.Write(commondata.Value.GetValue()); @@ -367,7 +368,7 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) var m_sparseEntries = new Dictionary(storage.Count); foreach (var record in serializer.Records) { - if (!m_copyData.TryGetValue(record.Key, out int parent)) + if (!CopyData.TryGetValue(record.Key, out int parent)) { m_sparseEntries.Add(record.Key, new SparseEntry() { @@ -383,7 +384,7 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) if (!Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteCString(""); - foreach (var str in m_stringsTable) + foreach (var str in StringTable) writer.WriteCString(str.Key); } @@ -398,10 +399,10 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) // index table if (Flags.HasFlagExt(DB2Flags.Index)) - writer.WriteArray(serializer.Records.Keys.Except(m_copyData.Keys).ToArray()); + writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); // copy table - foreach (var copyRecord in m_copyData) + foreach (var copyRecord in CopyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); @@ -412,15 +413,15 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) writer.WriteArray(m_sparseEntries.Values.ToArray()); // reference data - if (m_referenceData.Count > 0) + if (ReferenceData.Count > 0) { - writer.Write(m_referenceData.Count); - writer.Write(m_referenceData.Min()); - writer.Write(m_referenceData.Max()); + writer.Write(ReferenceData.Count); + writer.Write(ReferenceData.Min()); + writer.Write(ReferenceData.Max()); - for (int i = 0; i < m_referenceData.Count; i++) + for (int i = 0; i < ReferenceData.Count; i++) { - writer.Write(m_referenceData[i]); + writer.Write(ReferenceData[i]); writer.Write(i); } } @@ -435,25 +436,25 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) { // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords] int refSize = 0; - if (m_referenceData.Count > 0) - refSize = 12 + (m_referenceData.Count * 8); + if (ReferenceData.Count > 0) + refSize = 12 + (ReferenceData.Count * 8); int commonSize = 0, palletSize = 0; - for (int i = 0; i < m_columnMeta.Length; i++) + for (int i = 0; i < ColumnMeta.Length; i++) { - switch (m_columnMeta[i].CompressionType) + switch (ColumnMeta[i].CompressionType) { // {uint id, uint copyid}[] case CompressionType.Common: - m_columnMeta[i].AdditionalDataSize = (uint)(m_commonData[i].Count * 8); - commonSize += (int)m_columnMeta[i].AdditionalDataSize; + ColumnMeta[i].AdditionalDataSize = (uint)(CommonData[i].Count * 8); + commonSize += (int)ColumnMeta[i].AdditionalDataSize; break; // {uint values[Cardinality]}[] case CompressionType.Pallet: case CompressionType.PalletArray: - m_columnMeta[i].AdditionalDataSize = (uint)m_palletData[i].Sum(x => x.Length * 4); - palletSize += (int)m_columnMeta[i].AdditionalDataSize; + ColumnMeta[i].AdditionalDataSize = (uint)PalletData[i].Sum(x => x.Length * 4); + palletSize += (int)ColumnMeta[i].AdditionalDataSize; break; } } diff --git a/DBCD/DBCD.cs b/DBCD/DBCD.cs index e0dab9e..bac37fb 100644 --- a/DBCD/DBCD.cs +++ b/DBCD/DBCD.cs @@ -22,7 +22,7 @@ public IDBCDStorage Load(string tableName, string build = null, Locale locale = var builder = new DBCDBuilder(locale); - var dbReader = new DBReader(dbcStream); + var dbReader = new DBParser(dbcStream); var definition = builder.Build(dbReader, dbdStream, tableName, build); var type = typeof(DBCDStorage<>).MakeGenericType(definition.Item1); diff --git a/DBCD/DBCDBuilder.cs b/DBCD/DBCDBuilder.cs index e8d46f0..c870f77 100644 --- a/DBCD/DBCDBuilder.cs +++ b/DBCD/DBCDBuilder.cs @@ -35,7 +35,7 @@ internal DBCDBuilder(Locale locale = Locale.None) this.locale = locale; } - internal Tuple Build(DBReader dbcReader, Stream dbd, string name, string build) + internal Tuple Build(DBParser dbcReader, Stream dbd, string name, string build) { var dbdReader = new DBDReader(); diff --git a/DBCD/DBCDStorage.cs b/DBCD/DBCDStorage.cs index 3f9d842..43762c8 100644 --- a/DBCD/DBCDStorage.cs +++ b/DBCD/DBCDStorage.cs @@ -65,18 +65,21 @@ public interface IDBCDStorage : IEnumerable>, IDictiona string[] IDBCDStorage.AvailableColumns => this.availableColumns; - public DBCDStorage(Stream stream, DBCDInfo info) : this(new DBReader(stream), info) { } + public DBCDStorage(Stream stream, DBCDInfo info) : this(new DBParser(stream), info) { } - public DBCDStorage(DBReader dbReader, DBCDInfo info) : base(new Dictionary()) + public DBCDStorage(DBParser dbReader, DBCDInfo info) : base(new Dictionary()) { this.availableColumns = info.availableColumns; this.tableName = info.tableName; this.fieldAccessor = new FieldAccessor(typeof(T)); // populate the collection so we don't iterate all values and create new rows each time - storage = new ReadOnlyDictionary(dbReader.GetRecords()); + storage = new ReadOnlyDictionary(dbReader.ReadRecords()); foreach (var record in storage) base.Dictionary.Add(record.Key, new DBCDRow(record.Key, record.Value, fieldAccessor)); + + // clear temp data from memory + dbReader.ClearCache(); } IEnumerator> IEnumerable>.GetEnumerator() From cbbd7188df23392f67de3cc1fa3d32d6beb6b532 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Thu, 8 Aug 2024 06:16:51 +0200 Subject: [PATCH 11/40] Merge changes between repos Original db_writing branch of wowdev/DBCD with changes from ModernWowTools/DBCD on top Co-Authored-By: barncastle <1619104+barncastle@users.noreply.github.com> Co-Authored-By: BinarySpace <29510799+YetAnotherBinarySpace@users.noreply.github.com> --- DBCD.IO/Attributes/ForeignAttribute.cs | 11 + DBCD.IO/Attributes/RelationAttribute.cs | 4 +- DBCD.IO/Common/BitWriter.cs | 108 +- DBCD.IO/Common/DBStructs.cs | 26 +- DBCD.IO/Common/HTFXStructs.cs | 2 + DBCD.IO/Common/OrderedHashSet.cs | 1328 +++++++++++++++++ DBCD.IO/DBCD.IO.csproj | 3 + DBCD.IO/DBParser.cs | 81 +- DBCD.IO/DBStorage.cs | 49 - DBCD.IO/Extensions.cs | 101 +- DBCD.IO/FieldCache.cs | 8 +- DBCD.IO/HotfixReader.cs | 25 +- .../Readers/BaseEncryptionSupportingReader.cs | 4 +- DBCD.IO/Readers/WDB6Reader.cs | 10 +- DBCD.IO/Readers/WDC1Reader.cs | 20 +- DBCD.IO/Readers/WDC2Reader.cs | 28 +- DBCD.IO/Readers/WDC3Reader.cs | 66 +- DBCD.IO/Readers/WDC4Reader.cs | 104 +- DBCD.IO/Readers/WDC5Reader.cs | 104 +- DBCD.IO/Storage.cs | 43 + DBCD.IO/Writers/BaseWriter.cs | 153 +- DBCD.IO/Writers/WDB2Writer.cs | 5 +- DBCD.IO/Writers/WDB3Writer.cs | 9 +- DBCD.IO/Writers/WDB4Writer.cs | 9 +- DBCD.IO/Writers/WDB5Writer.cs | 15 +- DBCD.IO/Writers/WDB6Writer.cs | 15 +- DBCD.IO/Writers/WDBCWriter.cs | 5 +- DBCD.IO/Writers/WDC1Writer.cs | 71 +- DBCD.IO/Writers/WDC2Writer.cs | 68 +- DBCD.IO/Writers/WDC3Writer.cs | 114 +- DBCD.IO/Writers/WDC4Writer.cs | 477 ++++++ DBCD.IO/Writers/WDC5Writer.cs | 483 ++++++ DBCD.Tests/DBCD.Tests.csproj | 6 +- DBCD/DBCDStorage.cs | 130 +- DBCD/Helpers/FieldAccessor.cs | 43 +- 35 files changed, 3202 insertions(+), 526 deletions(-) create mode 100644 DBCD.IO/Attributes/ForeignAttribute.cs create mode 100644 DBCD.IO/Common/OrderedHashSet.cs delete mode 100644 DBCD.IO/DBStorage.cs create mode 100644 DBCD.IO/Storage.cs create mode 100644 DBCD.IO/Writers/WDC4Writer.cs create mode 100644 DBCD.IO/Writers/WDC5Writer.cs diff --git a/DBCD.IO/Attributes/ForeignAttribute.cs b/DBCD.IO/Attributes/ForeignAttribute.cs new file mode 100644 index 0000000..8fc88f3 --- /dev/null +++ b/DBCD.IO/Attributes/ForeignAttribute.cs @@ -0,0 +1,11 @@ +using System; + +namespace DBCD.IO.Attributes +{ + public class ForeignAttribute : Attribute + { + public readonly bool IsForeign; + + public ForeignAttribute(bool isForeign) => IsForeign = isForeign; + } +} diff --git a/DBCD.IO/Attributes/RelationAttribute.cs b/DBCD.IO/Attributes/RelationAttribute.cs index aa8ba04..33c59fa 100644 --- a/DBCD.IO/Attributes/RelationAttribute.cs +++ b/DBCD.IO/Attributes/RelationAttribute.cs @@ -1,6 +1,6 @@ -using System; +using System; -namespace DBFileReaderLib.Attributes +namespace DBCD.IO.Attributes { public class RelationAttribute : Attribute { diff --git a/DBCD.IO/Common/BitWriter.cs b/DBCD.IO/Common/BitWriter.cs index e736945..e2932be 100644 --- a/DBCD.IO/Common/BitWriter.cs +++ b/DBCD.IO/Common/BitWriter.cs @@ -1,65 +1,70 @@ using System; +using System.Buffers; using System.IO; using System.Runtime.CompilerServices; using System.Text; namespace DBCD.IO.Common { - class BitWriter : IEquatable + class BitWriter : IEquatable, IDisposable { + private static readonly ArrayPool SharedPool = ArrayPool.Create(); + public int TotalBytesWrittenOut { get; private set; } - private byte nAccumulatedBits; - private byte[] buffer; + private byte AccumulatedBitsCount; + private byte[] Buffer; - private readonly byte[] _pool; + public BitWriter(int capacity) => Buffer = SharedPool.Rent(capacity); - public BitWriter(int capacity) - { - buffer = new byte[capacity]; - _pool = new byte[0x10]; - } - - public byte this[int i] => buffer[i]; + public byte this[int i] => Buffer[i]; + [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteAligned(T value) where T : struct { EnsureSize(); - Unsafe.WriteUnaligned(ref buffer[TotalBytesWrittenOut], value); + Unsafe.WriteUnaligned(ref Buffer[TotalBytesWrittenOut], value); TotalBytesWrittenOut += Unsafe.SizeOf(); } + [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteCStringAligned(string value) { byte[] data = Encoding.UTF8.GetBytes(value); + Array.Resize(ref data, data.Length + 1); + + EnsureSize(data.Length); + Unsafe.CopyBlockUnaligned(ref Buffer[TotalBytesWrittenOut], ref data[0], (uint)data.Length); - Resize(data.Length); - Array.Copy(data, 0, buffer, TotalBytesWrittenOut, data.Length); - TotalBytesWrittenOut += data.Length + 1; + TotalBytesWrittenOut += data.Length; } + [MethodImpl(MethodImplOptions.AggressiveInlining)] public void Write(T value, int nbits) where T : struct { - if (nAccumulatedBits == 0 && (nbits & 7) == 0) + Span pool = stackalloc byte[0x10]; + if (AccumulatedBitsCount == 0 && (nbits & 7) == 0) { EnsureSize(); - Unsafe.WriteUnaligned(ref buffer[TotalBytesWrittenOut], value); + Unsafe.WriteUnaligned(ref Buffer[TotalBytesWrittenOut], value); TotalBytesWrittenOut += nbits / 8; } else { - Unsafe.WriteUnaligned(ref _pool[0], value); + Unsafe.WriteUnaligned(ref pool[0], value); for (int i = 0; nbits > 0; i++) { - WriteBits(Math.Min(nbits, 8), _pool[i]); + WriteBits(Math.Min(nbits, 8), pool[i]); nbits -= 8; } } } + [MethodImpl(MethodImplOptions.AggressiveInlining)] public void Write(T value, int nbits, int offset) where T : struct { - Unsafe.WriteUnaligned(ref _pool[0], value); + Span pool = stackalloc byte[0x10]; + Unsafe.WriteUnaligned(ref pool[0], value); int byteOffset = offset >> 3; int lowLen = offset & 7; @@ -69,11 +74,11 @@ public void Write(T value, int nbits, int offset) where T : struct while ((nbits -= 8) >= 0) { // write last part of this byte - buffer[byteOffset] = (byte)((buffer[byteOffset] & (0xFF >> highLen)) | (_pool[i] << lowLen)); + Buffer[byteOffset] = (byte)((Buffer[byteOffset] & (0xFF >> highLen)) | (pool[i] << lowLen)); // write first part of next byte byteOffset++; - buffer[byteOffset] = (byte)((buffer[byteOffset] & (0xFF << lowLen)) | (_pool[i] >> highLen)); + Buffer[byteOffset] = (byte)((Buffer[byteOffset] & (0xFF << lowLen)) | (pool[i] >> highLen)); i++; } @@ -83,14 +88,15 @@ public void Write(T value, int nbits, int offset) where T : struct lowLen = nbits; highLen = 8 - nbits; - buffer[byteOffset] = (byte)((buffer[byteOffset] & (0xFF >> highLen)) | (_pool[i] << lowLen)); + Buffer[byteOffset] = (byte)((Buffer[byteOffset] & (0xFF >> highLen)) | (pool[i] << lowLen)); } } + [MethodImpl(MethodImplOptions.AggressiveInlining)] public void WriteCString(string value) { // Note: cstrings are always aligned to 8 bytes - if (nAccumulatedBits == 0) + if (AccumulatedBitsCount == 0) { WriteCStringAligned(value); } @@ -104,30 +110,38 @@ public void WriteCString(string value) } } - + [MethodImpl(MethodImplOptions.AggressiveInlining)] private void WriteBits(int bitCount, uint value) { EnsureSize(); for (int i = 0; i < bitCount; i++) { - buffer[TotalBytesWrittenOut] |= (byte)(((value >> i) & 0x1) << nAccumulatedBits); - nAccumulatedBits++; + Buffer[TotalBytesWrittenOut] |= (byte)(((value >> i) & 0x1) << AccumulatedBitsCount); + AccumulatedBitsCount++; - if (nAccumulatedBits > 7) + if (AccumulatedBitsCount > 7) { TotalBytesWrittenOut++; - nAccumulatedBits = 0; + AccumulatedBitsCount = 0; } } } + [MethodImpl(MethodImplOptions.AggressiveInlining)] private void EnsureSize(int size = 8) { - if (TotalBytesWrittenOut + size >= buffer.Length) - Array.Resize(ref buffer, buffer.Length + size + 0x10); - } + if (TotalBytesWrittenOut + size >= Buffer.Length) + { + byte[] rent = SharedPool.Rent(Buffer.Length + size); + + Unsafe.CopyBlockUnaligned(ref rent[0], ref Buffer[0], (uint)rent.Length); + SharedPool.Return(Buffer, true); + + Buffer = rent; + } + } public void Resize(int size) { @@ -150,10 +164,9 @@ public void ResizeToMultiple(int divisor) public void CopyTo(Stream stream) { - stream.Write(buffer, 0, TotalBytesWrittenOut); + stream.Write(Buffer, 0, TotalBytesWrittenOut); } - public bool Equals(BitWriter other) { if (TotalBytesWrittenOut != other.TotalBytesWrittenOut) @@ -172,20 +185,21 @@ public override int GetHashCode() { unchecked { - // jenkins one-at-a-time - int hashcode = 0; - for (int i = 0; i < TotalBytesWrittenOut; i++) - { - hashcode += buffer[i]; - hashcode += hashcode << 10; - hashcode ^= hashcode >> 6; - } + const int p = 16777619; + int hash = (int)2166136261; - hashcode += hashcode << 3; - hashcode ^= hashcode >> 11; - hashcode += hashcode << 15; - return hashcode; + for (int i = 0; i < TotalBytesWrittenOut; i++) + hash = (hash ^ Buffer[i]) * p; + + hash += hash << 13; + hash ^= hash >> 7; + hash += hash << 3; + hash ^= hash >> 17; + hash += hash << 5; + return hash; } } + + public void Dispose() => SharedPool.Return(Buffer); } -} +} \ No newline at end of file diff --git a/DBCD.IO/Common/DBStructs.cs b/DBCD.IO/Common/DBStructs.cs index 5e51fde..bd1e3ee 100644 --- a/DBCD.IO/Common/DBStructs.cs +++ b/DBCD.IO/Common/DBStructs.cs @@ -79,11 +79,13 @@ public T GetValue() where T : struct } } + [MethodImpl(MethodImplOptions.AggressiveInlining)] public static unsafe Value32 Create(T obj) where T : unmanaged { return *(Value32*)&obj; } + [MethodImpl(MethodImplOptions.AggressiveInlining)] public static unsafe Value32 Create(object obj) { if (obj is byte b) @@ -167,8 +169,8 @@ struct SectionHeader : IEncryptableDatabaseSection public int IndexDataSize; // int indexData[IndexDataSize / 4] public int ParentLookupDataSize; // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords], questionable usefulness... - ulong IEncryptableDatabaseSection.TactKeyLookup => this.TactKeyLookup; - int IEncryptableDatabaseSection.NumRecords => this.NumRecords; + ulong IEncryptableDatabaseSection.TactKeyLookup => TactKeyLookup; + int IEncryptableDatabaseSection.NumRecords => NumRecords; } [StructLayout(LayoutKind.Sequential, Pack = 2)] @@ -184,8 +186,8 @@ struct SectionHeaderWDC3 : IEncryptableDatabaseSection public int OffsetMapIDCount; public int CopyTableCount; - ulong IEncryptableDatabaseSection.TactKeyLookup => this.TactKeyLookup; - int IEncryptableDatabaseSection.NumRecords => this.NumRecords; + ulong IEncryptableDatabaseSection.TactKeyLookup => TactKeyLookup; + int IEncryptableDatabaseSection.NumRecords => NumRecords; } [StructLayout(LayoutKind.Sequential, Pack = 2)] @@ -205,4 +207,20 @@ struct SectionHeaderWDC4 : IEncryptableDatabaseSection int IEncryptableDatabaseSection.NumRecords => this.NumRecords; } + [StructLayout(LayoutKind.Sequential, Pack = 2)] + struct SectionHeaderWDC5 : IEncryptableDatabaseSection + { + public ulong TactKeyLookup; + public int FileOffset; + public int NumRecords; + public int StringTableSize; + public int OffsetRecordsEndOffset; // CatalogDataOffset, absolute value, {uint offset, ushort size}[MaxId - MinId + 1] + public int IndexDataSize; // int indexData[IndexDataSize / 4] + public int ParentLookupDataSize; // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords], questionable usefulness... + public int OffsetMapIDCount; + public int CopyTableCount; + + ulong IEncryptableDatabaseSection.TactKeyLookup => this.TactKeyLookup; + int IEncryptableDatabaseSection.NumRecords => this.NumRecords; + } } diff --git a/DBCD.IO/Common/HTFXStructs.cs b/DBCD.IO/Common/HTFXStructs.cs index c76d788..3b81a87 100644 --- a/DBCD.IO/Common/HTFXStructs.cs +++ b/DBCD.IO/Common/HTFXStructs.cs @@ -1,5 +1,7 @@ #pragma warning disable CS0169 +using System.Runtime.InteropServices; + namespace DBCD.IO.Common { public interface IHotfixEntry diff --git a/DBCD.IO/Common/OrderedHashSet.cs b/DBCD.IO/Common/OrderedHashSet.cs new file mode 100644 index 0000000..6149ac8 --- /dev/null +++ b/DBCD.IO/Common/OrderedHashSet.cs @@ -0,0 +1,1328 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Diagnostics.Contracts; +using System.Runtime.CompilerServices; +using System.Runtime.Serialization; +using System.Threading; + +namespace DBCD.IO.Common +{ + + /// + /// Purpose: Hash table implementation + /// + public static class HashHelpers + { + // This is the maximum prime smaller than Array.MaxArrayLength + public const int MaxPrimeArrayLength = 0x7FEFFFFD; + + // Table of prime numbers to use as hash table sizes. + // A typical resize algorithm would pick the smallest prime number in this array + // that is larger than twice the previous capacity. + // Suppose our Hashtable currently has capacity x and enough elements are added + // such that a resize needs to occur. Resizing first computes 2x then finds the + // first prime in the table greater than 2x, i.e. if primes are ordered + // p_1, p_2, ..., p_i, ..., it finds p_n such that p_n-1 < 2x < p_n. + // Doubling is important for preserving the asymptotic complexity of the + // hashtable operations such as add. Having a prime guarantees that double + // hashing does not lead to infinite loops. IE, your hash function will be + // h1(key) + i*h2(key), 0 <= i < size. h2 and the size must be relatively prime. + private static readonly int[] primes = { + 3, 7, 11, 17, 23, 29, 37, 47, 59, 71, 89, 107, 131, 163, 197, 239, 293, 353, 431, 521, 631, 761, 919, + 1103, 1327, 1597, 1931, 2333, 2801, 3371, 4049, 4861, 5839, 7013, 8419, 10103, 12143, 14591, + 17519, 21023, 25229, 30293, 36353, 43627, 52361, 62851, 75431, 90523, 108631, 130363, 156437, + 187751, 225307, 270371, 324449, 389357, 467237, 560689, 672827, 807403, 968897, 1162687, 1395263, + 1674319, 2009191, 2411033, 2893249, 3471899, 4166287, 4999559, 5999471, 7199369, 8639249, 10367101, + 12440537, 14928671, 17914409, 21497293, 25796759, 30956117, 37147349, 44576837, 53492207, 64190669, + 77028803, 92434613, 110921543, 133105859, 159727031, 191672443, 230006941, 276008387, 331210079, + 397452101, 476942527, 572331049, 686797261, 824156741, 988988137, 1186785773, 1424142949, 1708971541, + 2050765853, MaxPrimeArrayLength }; + + public static int GetPrime(int min) + { + if (min < 0) + throw new ArgumentException("Arg_HTCapacityOverflow"); + Contract.EndContractBlock(); + + for (int i = 0; i < primes.Length; i++) + { + int prime = primes[i]; + if (prime >= min) return prime; + } + + return min; + } + + // Returns size of hashtable to grow to. + public static int ExpandPrime(int oldSize) + { + int newSize = 2 * oldSize; + + // Allow the hashtables to grow to maximum possible size (~2G elements) before encoutering capacity overflow. + // Note that this check works even when _items.Length overflowed thanks to the (uint) cast + if ((uint)newSize > MaxPrimeArrayLength && MaxPrimeArrayLength > oldSize) + { + Debug.Assert(MaxPrimeArrayLength == GetPrime(MaxPrimeArrayLength), "Invalid MaxPrimeArrayLength"); + return MaxPrimeArrayLength; + } + + return GetPrime(newSize); + } + + private static ConditionalWeakTable s_serializationInfoTable; + + internal static ConditionalWeakTable SerializationInfoTable => LazyInitializer.EnsureInitialized(ref s_serializationInfoTable); + + internal static object GetEqualityComparerForSerialization(object comparer) => comparer; + } + + public sealed class CollectionDebugView + { + private readonly ICollection m_collection; + + public CollectionDebugView(ICollection collection) + { + if (collection == null) + { + throw new ArgumentNullException(nameof(collection)); + } + m_collection = collection; + } + + [DebuggerBrowsable(DebuggerBrowsableState.RootHidden)] + public T[] Items + { + get + { + T[] items = new T[m_collection.Count]; + m_collection.CopyTo(items, 0); + return items; + } + } + } + + /// + /// Represents an ordered set of values. + /// + /// + /// Values are kept in order in which they are added. + /// Order can be modified by , , , . + /// + [Serializable] + [DebuggerTypeProxy(typeof(CollectionDebugView<>))] + [DebuggerDisplay("Count = {Count}")] + [SuppressMessage("Microsoft.Naming", "CA1710:IdentifiersShouldHaveCorrectSuffix", Justification = "By design")] + public class OrderedHashSet : ICollection, ISerializable, IDeserializationCallback + { + private static bool IsValueType = typeof(T).IsValueType; + private static bool IsNullable = typeof(T).IsValueType && typeof(T).IsGenericType && typeof(T).GetGenericTypeDefinition() == typeof(Nullable<>); + + // store lower 31 bits of hash code + private const int Lower31BitMask = 0x7FFFFFFF; + + // factor used to increase hashset capacity + private const int GrowthFactor = 2; + + // when constructing a hashset from an existing collection, it may contain duplicates, + // so this is used as the max acceptable excess ratio of capacity to count. Note that + // this is only used on the ctor and not to automatically shrink if the hashset has, e.g, + // a lot of adds followed by removes. Users must explicitly shrink by calling TrimExcess. + // This is set to 3 because capacity is acceptable as 2x rounded up to nearest prime. + private const int ShrinkThreshold = 3; + + // constants for serialization + private const String CapacityName = "Capacity"; + private const String ElementsName = "Elements"; + private const String ComparerName = "Comparer"; + private const String VersionName = "Version"; + + private int[] m_buckets; + private Slot[] m_slots; + private int m_count; + private int m_lastIndex; + private int m_freeList; + private IEqualityComparer m_comparer; + private int m_version; + private int m_firstOrderIndex; // Index of first entry by order + private int m_lastOrderIndex; // Index of last entry by order + + // temporary variable needed during deserialization + private SerializationInfo m_siInfo; + + /// + /// Number of elements in this hashset + /// + public int Count + { + get { return m_count; } + } + + /// + /// Gets collection reader. + /// + public Reader Items + { + get { return new Reader(this); } + } + + /// + /// Gets reversed collection reader. + /// + public ReverseReader Reversed + { + get { return new ReverseReader(this); } + } + + #region Constructors + + public OrderedHashSet() + : this(EqualityComparer.Default) + { + } + + public OrderedHashSet(int capacity) + : this(capacity, EqualityComparer.Default) + { + } + + public OrderedHashSet(IEqualityComparer comparer) + : this(0, comparer) + { + } + + public OrderedHashSet(int capacity, IEqualityComparer comparer) + { + if (comparer == null) + { + comparer = EqualityComparer.Default; + } + + m_comparer = comparer; + m_lastIndex = 0; + m_count = 0; + m_freeList = -1; + m_version = 0; + m_firstOrderIndex = -1; + m_lastOrderIndex = -1; + + if (capacity > 0) + { + Initialize(capacity); + } + } + + public OrderedHashSet(IEnumerable collection) + : this(collection, EqualityComparer.Default) + { + } + + /// + /// Implementation Notes: + /// Since resizes are relatively expensive (require rehashing), this attempts to minimize + /// the need to resize by setting the initial capacity based on size of collection. + /// + /// + /// + public OrderedHashSet(IEnumerable collection, IEqualityComparer comparer) + : this(comparer) + { + if (collection == null) + { + throw new ArgumentNullException("collection"); + } + + // to avoid excess resizes, first set size based on collection's count. Collection + // may contain duplicates, so call TrimExcess if resulting hashset is larger than + // threshold + int suggestedCapacity = 0; + ICollection coll = collection as ICollection; + if (coll != null) + { + suggestedCapacity = coll.Count; + } + Initialize(suggestedCapacity); + + UnionWith(collection); + if ((m_count == 0 && m_slots.Length > HashHelpers.GetPrime(0)) || + (m_count > 0 && m_slots.Length / m_count > ShrinkThreshold)) + { + TrimExcess(); + } + } + + protected OrderedHashSet(SerializationInfo info, StreamingContext context) + { + // We can't do anything with the keys and values until the entire graph has been + // deserialized and we have a reasonable estimate that GetHashCode is not going to + // fail. For the time being, we'll just cache this. The graph is not valid until + // OnDeserialization has been called. + m_siInfo = info; + } + + #endregion + + #region ISerializable methods + + public virtual void GetObjectData(SerializationInfo info, StreamingContext context) + { + if (info == null) + { + throw new ArgumentNullException(nameof(info)); + } + + // need to serialize version to avoid problems with serializing while enumerating + info.AddValue(VersionName, m_version); + info.AddValue(ComparerName, m_comparer, typeof(IEqualityComparer)); + info.AddValue(CapacityName, m_buckets == null ? 0 : m_buckets.Length); + if (m_buckets != null) + { + T[] array = new T[m_count]; + CopyTo(array); // Copies ordered data + info.AddValue(ElementsName, array, typeof(T[])); + } + } + + #endregion + + #region IDeserializationCallback methods + + public virtual void OnDeserialization(Object sender) + { + + if (m_siInfo == null) + { + // It might be necessary to call OnDeserialization from a container if the + // container object also implements OnDeserialization. However, remoting will + // call OnDeserialization again. We can return immediately if this function is + // called twice. Note we set m_siInfo to null at the end of this method. + return; + } + + int capacity = m_siInfo.GetInt32(CapacityName); + m_comparer = (IEqualityComparer)m_siInfo.GetValue(ComparerName, typeof(IEqualityComparer)); + m_freeList = -1; + m_firstOrderIndex = -1; + m_lastOrderIndex = -1; + + if (capacity != 0) + { + m_buckets = new int[capacity]; + m_slots = new Slot[capacity]; + + T[] array = (T[])m_siInfo.GetValue(ElementsName, typeof(T[])); + + if (array == null) + { + throw new SerializationException("Serialization_MissingKeys"); + } + + // there are no resizes here because we already set capacity above + for (int i = 0; i < array.Length; i++) + { + Add(array[i]); + } + } + else + { + m_buckets = null; + } + + m_version = m_siInfo.GetInt32(VersionName); + m_siInfo = null; + } + + #endregion + + #region ICollection methods + + /// + /// Whether this is readonly + /// + bool ICollection.IsReadOnly + { + get { return false; } + } + + /// + /// Add item to this hashset. This is the explicit implementation of the ICollection + /// interface. The other Add method returns bool indicating whether item was added. + /// + /// item to add + void ICollection.Add(T item) + { + Add(item); + } + + /// + /// Remove all items from this set. This clears the elements but not the underlying + /// buckets and slots array. Follow this call by TrimExcess to release these. + /// + public void Clear() + { + if (m_lastIndex > 0) + { + Debug.Assert(m_buckets != null, "m_buckets was null but m_lastIndex > 0"); + + // clear the elements so that the gc can reclaim the references. + // clear only up to m_lastIndex for m_slots + Array.Clear(m_slots, 0, m_lastIndex); + Array.Clear(m_buckets, 0, m_buckets.Length); + m_lastIndex = 0; + m_count = 0; + m_freeList = -1; + m_firstOrderIndex = -1; + m_lastOrderIndex = -1; + } + m_version++; + } + + /// + /// Checks if this hashset contains the item + /// + /// item to check for containment + /// true if item contained; false if not + public bool Contains(T item) + { + if (m_buckets != null) + { + int hashCode = InternalGetHashCode(item); + // see note at "HashSet" level describing why "- 1" appears in for loop + for (int i = m_buckets[hashCode % m_buckets.Length] - 1; i >= 0; i = m_slots[i].next) + { + if (m_slots[i].hashCode == hashCode && m_comparer.Equals(m_slots[i].value, item)) + { + return true; + } + } + } + // either m_buckets is null or wasn't found + return false; + } + + /// + /// Copy items in this hashset to array, starting at arrayIndex + /// + /// array to add items to + /// index to start at + public void CopyTo(T[] array, int arrayIndex) + { + CopyTo(array, arrayIndex, m_count); + } + + /// + /// Take the union of this HashSet with other. Modifies this set. + /// + /// Implementation note: GetSuggestedCapacity (to increase capacity in advance avoiding + /// multiple resizes ended up not being useful in practice; quickly gets to the + /// point where it's a wasteful check. + /// + /// enumerable with items to add + public void UnionWith(IEnumerable other) + { + if (other == null) + { + throw new ArgumentNullException(nameof(other)); + } + + foreach (T item in other) + { + Add(item); + } + } + + private int InternalIndexOf(T item) + { + int num = this.InternalGetHashCode(item); + if(this.m_buckets != null) + { + for (int i = this.m_buckets[num % this.m_buckets.Length] - 1; i >= 0; i = this.m_slots[i].next) + { + if (this.m_slots[i].hashCode == num && this.m_comparer.Equals(this.m_slots[i].value, item)) + { + return i; + } + } + } + return -1; + } + + /// + /// Returns the zero-based index of the first occurrence of a value in the or in a portion of it. + /// + /// The object to locate in the . + /// The zero-based index of the first occurrence of item within the range of elements in the that extends from index to the last element, if found; otherwise, -1. + public int IndexOf(T item) => InternalIndexOf(item); + + /// + /// Remove item from this hashset + /// + /// item to remove + /// true if removed; false if not (i.e. if the item wasn't in the HashSet) + public bool Remove(T item) + { + if (m_buckets != null) + { + int hashCode = InternalGetHashCode(item); + int bucket = hashCode % m_buckets.Length; + int last = -1; + for (int i = m_buckets[bucket] - 1; i >= 0; last = i, i = m_slots[i].next) + { + if (m_slots[i].hashCode == hashCode && m_comparer.Equals(m_slots[i].value, item)) + { + if (last < 0) + { + // first iteration; update buckets + m_buckets[bucket] = m_slots[i].next + 1; + } + else + { + // subsequent iterations; update 'next' pointers + m_slots[last].next = m_slots[i].next; + } + + // Connect linked list + if (m_firstOrderIndex == i) // Is first + { + m_firstOrderIndex = m_slots[i].nextOrder; + } + if (m_lastOrderIndex == i) // Is last + { + m_lastOrderIndex = m_slots[i].previousOrder; + } + + var next = m_slots[i].nextOrder; + var prev = m_slots[i].previousOrder; + if (next != -1) + { + m_slots[next].previousOrder = prev; + } + if (prev != -1) + { + m_slots[prev].nextOrder = next; + } + + m_slots[i].hashCode = -1; + m_slots[i].value = default(T); + m_slots[i].next = m_freeList; + m_slots[i].previousOrder = -1; + m_slots[i].nextOrder = -1; + + m_count--; + m_version++; + if (m_count == 0) + { + m_lastIndex = 0; + m_freeList = -1; + } + else + { + m_freeList = i; + } + return true; + } + } + } + // either m_buckets is null or wasn't found + return false; + } + + #endregion + + #region IEnumerable methods + + public Enumerator GetEnumerator() + { + return new Enumerator(this); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return new Enumerator(this); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return new Enumerator(this); + } + + #endregion + + #region HashSet methods + + /// + /// Copies the elements to an array. + /// + public void CopyTo(T[] array) + { + CopyTo(array, 0, m_count); + } + + /// + /// Copies the specified number of elements to an array, starting at the specified array index. + /// + public void CopyTo(T[] array, int arrayIndex, int count) + { + if (array == null) + { + throw new ArgumentNullException(nameof(array)); + } + + // check array index valid index into array + if (arrayIndex < 0) + { + throw new ArgumentOutOfRangeException(nameof(arrayIndex), "ArgumentOutOfRange_NeedNonNegNum"); + } + + // also throw if count less than 0 + if (count < 0) + { + throw new ArgumentOutOfRangeException(nameof(count), "ArgumentOutOfRange_NeedNonNegNum"); + } + + // will array, starting at arrayIndex, be able to hold elements? Note: not + // checking arrayIndex >= array.Length (consistency with list of allowing + // count of 0; subsequent check takes care of the rest) + if (arrayIndex > array.Length || count > array.Length - arrayIndex) + { + throw new ArgumentException("Arg_ArrayPlusOffTooSmall"); + } + + int numCopied = 0; + for (int i = m_firstOrderIndex; i != -1 && numCopied < count; i = m_slots[i].nextOrder) + { + array[arrayIndex + numCopied] = m_slots[i].value; + numCopied++; + } + } + + public bool MoveFirst(T item) + { + int index = InternalIndexOf(item); + if (index != -1) + { + var prev = m_slots[index].previousOrder; + if (prev != -1) // Not first + { + // Disconnect + var next = m_slots[index].nextOrder; + if (next == -1) // Last + { + m_lastOrderIndex = prev; + } + else + { + m_slots[next].previousOrder = prev; + } + m_slots[prev].nextOrder = next; + + // Reconnect + m_slots[index].previousOrder = -1; + m_slots[index].nextOrder = m_firstOrderIndex; + m_slots[m_firstOrderIndex].previousOrder = index; + m_firstOrderIndex = index; + } + return true; + } + return false; + } + + public bool MoveLast(T item) + { + int index = InternalIndexOf(item); + if (index != -1) + { + var next = m_slots[index].nextOrder; + if (next != -1) // Not last + { + // Disconnect + var prev = m_slots[index].previousOrder; + if (prev == -1) // First + { + m_firstOrderIndex = next; + } + else + { + m_slots[prev].nextOrder = next; + } + m_slots[next].previousOrder = prev; + + // Reconnect + m_slots[index].nextOrder = -1; + m_slots[index].previousOrder = m_lastOrderIndex; + m_slots[m_lastOrderIndex].nextOrder = index; + m_lastOrderIndex = index; + } + return true; + } + return false; + } + + public bool MoveBefore(T itemToMove, T mark) + { + int index = InternalIndexOf(itemToMove); + int markIndex = InternalIndexOf(mark); + if (index != -1 && markIndex != -1 && index != markIndex) + { + // Disconnect + var next = m_slots[index].nextOrder; + var prev = m_slots[index].previousOrder; + if (prev == -1) // First + { + m_firstOrderIndex = next; + } + else + { + m_slots[prev].nextOrder = next; + } + if (next == -1) // Last + { + m_lastOrderIndex = prev; + } + else + { + m_slots[next].previousOrder = prev; + } + + // Reconnect + var preMark = m_slots[markIndex].previousOrder; + m_slots[index].nextOrder = markIndex; + m_slots[index].previousOrder = preMark; + m_slots[markIndex].previousOrder = index; + if (preMark == -1) + { + m_firstOrderIndex = index; + } + else + { + m_slots[preMark].nextOrder = index; + } + return true; + } + return false; + } + + public bool MoveAfter(T itemToMove, T mark) + { + int index = InternalIndexOf(itemToMove); + int markIndex = InternalIndexOf(mark); + if (index != -1 && markIndex != -1 && index != markIndex) + { + // Disconnect + var next = m_slots[index].nextOrder; + var prev = m_slots[index].previousOrder; + if (prev == -1) // First + { + m_firstOrderIndex = next; + } + else + { + m_slots[prev].nextOrder = next; + } + if (next == -1) // Last + { + m_lastOrderIndex = prev; + } + else + { + m_slots[next].previousOrder = prev; + } + + // Reconnect + var postMark = m_slots[markIndex].nextOrder; + m_slots[index].previousOrder = markIndex; + m_slots[index].nextOrder = postMark; + m_slots[markIndex].nextOrder = index; + if (postMark == -1) + { + m_lastOrderIndex = index; + } + else + { + m_slots[postMark].previousOrder = index; + } + return true; + } + return false; + } + + /// + /// Returns enumeration which goes from to last element in the set (including both). + /// When is not found, returns empty enumeration. + /// + public Range StartWith(T item) + { + return new Range(this, item); + } + + /// + /// Returns enumeration which goes from to first element in the set (including both). + /// When is not found, returns empty enumeration. + /// + public ReverseRange StartWithReversed(T item) + { + return new ReverseRange(this, item); + } + + /// + /// Remove elements that match specified predicate. Returns the number of elements removed + /// + /// + /// + public int RemoveWhere(Predicate match) + { + if (match == null) + { + throw new ArgumentNullException(nameof(match)); + } + + int numRemoved = 0; + for (int i = 0; i < m_lastIndex; i++) + { + if (m_slots[i].hashCode >= 0) + { + // cache value in case delegate removes it + T value = m_slots[i].value; + if (match(value)) + { + // check again that remove actually removed it + if (Remove(value)) + { + numRemoved++; + } + } + } + } + return numRemoved; + } + + /// + /// Gets the IEqualityComparer that is used to determine equality of keys for + /// the HashSet. + /// + public IEqualityComparer Comparer + { + get + { + return m_comparer; + } + } + + /// + /// Sets the capacity of this list to the size of the list (rounded up to nearest prime), + /// unless count is 0, in which case we release references. + /// + /// This method can be used to minimize a list's memory overhead once it is known that no + /// new elements will be added to the list. To completely clear a list and release all + /// memory referenced by the list, execute the following statements: + /// + /// Clear(); + /// TrimExcess(); + /// + public void TrimExcess() + { + Debug.Assert(m_count >= 0, "m_count is negative"); + + if (m_count == 0) + { + // if count is zero, clear references + m_buckets = null; + m_slots = null; + m_version++; + } + else + { + Debug.Assert(m_buckets != null, "m_buckets was null but m_count > 0"); + + // similar to IncreaseCapacity but moves down elements in case add/remove/etc + // caused fragmentation + int newSize = HashHelpers.GetPrime(m_count); + Slot[] newSlots = new Slot[newSize]; + int[] newBuckets = new int[newSize]; + + // move down slots and rehash at the same time. newIndex keeps track of current + // position in newSlots array + int newIndex = 0; + for (int i = 0; i < m_lastIndex; i++) + { + if (m_slots[i].hashCode >= 0) + { + newSlots[newIndex] = m_slots[i]; + + // rehash + int bucket = newSlots[newIndex].hashCode % newSize; + newSlots[newIndex].next = newBuckets[bucket] - 1; + newBuckets[bucket] = newIndex + 1; + + // temporarily store new index in m_slots[i].next + m_slots[i].next = newIndex; + + newIndex++; + } + } + + newIndex = 0; + for (int i = 0; i < m_lastIndex; i++) + { + if (m_slots[i].hashCode >= 0) + { + var next = m_slots[i].nextOrder; + var prev = m_slots[i].previousOrder; + + // Use temporarily stored index + if (next != -1) + { + newSlots[newIndex].nextOrder = m_slots[next].next; + } + else + { + m_lastOrderIndex = newIndex; + } + if (prev != -1) + { + newSlots[newIndex].previousOrder = m_slots[prev].next; + } + else + { + m_firstOrderIndex = newIndex; + } + newIndex++; + } + } + + Debug.Assert(newSlots.Length <= m_slots.Length, "capacity increased after TrimExcess"); + + m_lastIndex = newIndex; + m_slots = newSlots; + m_buckets = newBuckets; + m_freeList = -1; + } + } + + #endregion + + #region Helper methods + + /// + /// Initializes buckets and slots arrays. Uses suggested capacity by finding next prime + /// greater than or equal to capacity. + /// + /// + private void Initialize(int capacity) + { + Debug.Assert(m_buckets == null, "Initialize was called but m_buckets was non-null"); + + int size = HashHelpers.GetPrime(capacity); + + m_buckets = new int[size]; + m_slots = new Slot[size]; + } + + /// + /// Expand to new capacity. New capacity is next prime greater than or equal to suggested + /// size. This is called when the underlying array is filled. This performs no + /// defragmentation, allowing faster execution; note that this is reasonable since + /// AddIfNotPresent attempts to insert new elements in re-opened spots. + /// + private void IncreaseCapacity() + { + Debug.Assert(m_buckets != null, "IncreaseCapacity called on a set with no elements"); + + // Handle overflow conditions. Try to expand capacity by GrowthFactor. If that causes + // overflow, use size suggestion of m_count and see if HashHelpers returns a value + // greater than that. If not, capacity can't be increased so throw capacity overflow + // exception. + int sizeSuggestion = unchecked(m_count * GrowthFactor); + if (sizeSuggestion < 0) + { + sizeSuggestion = m_count; + } + int newSize = HashHelpers.GetPrime(sizeSuggestion); + if (newSize <= m_count) + { + throw new ArgumentException("Arg_HSCapacityOverflow"); + } + + // Able to increase capacity; copy elements to larger array and rehash + Slot[] newSlots = new Slot[newSize]; + if (m_slots != null) + { + Array.Copy(m_slots, 0, newSlots, 0, m_lastIndex); + } + + int[] newBuckets = new int[newSize]; + for (int i = 0; i < m_lastIndex; i++) + { + int bucket = newSlots[i].hashCode % newSize; + newSlots[i].next = newBuckets[bucket] - 1; + newBuckets[bucket] = i + 1; + } + m_slots = newSlots; + m_buckets = newBuckets; + + } + + /// + /// Add item to this HashSet. Returns bool indicating whether item was added (won't be + /// added if already present) + /// + /// + /// true if added, false if already present + public bool Add(T value) + { + if (m_buckets == null) + { + Initialize(0); + } + + int hashCode = InternalGetHashCode(value); + int bucket = hashCode % m_buckets.Length; + for (int i = m_buckets[hashCode % m_buckets.Length] - 1; i >= 0; i = m_slots[i].next) + { + if (m_slots[i].hashCode == hashCode && m_comparer.Equals(m_slots[i].value, value)) + { + return false; + } + } + int index; + if (m_freeList >= 0) + { + index = m_freeList; + m_freeList = m_slots[index].next; + } + else + { + if (m_lastIndex == m_slots.Length) + { + IncreaseCapacity(); + // this will change during resize + bucket = hashCode % m_buckets.Length; + } + index = m_lastIndex; + m_lastIndex++; + } + m_slots[index].hashCode = hashCode; + m_slots[index].value = value; + m_slots[index].next = m_buckets[bucket] - 1; + + // Append to linked list + if (m_lastOrderIndex != -1) + { + m_slots[m_lastOrderIndex].nextOrder = index; + } + if (m_firstOrderIndex == -1) + { + m_firstOrderIndex = index; + } + m_slots[index].nextOrder = -1; + m_slots[index].previousOrder = m_lastOrderIndex; + m_lastOrderIndex = index; + + m_buckets[bucket] = index + 1; + m_count++; + m_version++; + return true; + } + + /// + /// Workaround Comparers that throw ArgumentNullException for GetHashCode(null). + /// + /// + /// hash code + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private int InternalGetHashCode(T item) + { + // This check prevents boxing of value types to compare to null + if (IsValueType ? IsNullable && item.Equals(null) : item == null) + { + return 0; + } + return m_comparer.GetHashCode(item) & Lower31BitMask; + } + + #endregion + + internal struct Slot + { + internal int hashCode; // Lower 31 bits of hash code, -1 if unused + internal T value; + internal int next; // Index of next entry, -1 if last + internal int nextOrder; // Index of next entry by order, -1 if last + internal int previousOrder; // Index of previous entry by order, -1 if first + } + + public struct Reader : IReadOnlyCollection + { + private OrderedHashSet m_set; + + public int Count { get { return m_set.Count; } } + + public Reader(OrderedHashSet set) + { + this.m_set = set; + } + + public bool Contains(T item) + { + return m_set.Contains(item); + } + + public Range StartWith(T item) + { + return new Range(m_set, item); + } + + public Enumerator GetEnumerator() + { + return new Enumerator(m_set); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + } + + public struct ReverseReader : IReadOnlyCollection + { + private OrderedHashSet m_set; + + public int Count { get { return m_set.Count; } } + + public ReverseReader(OrderedHashSet set) + { + this.m_set = set; + } + + public bool Contains(T item) + { + return m_set.Contains(item); + } + + public ReverseRange StartWith(T item) + { + return new ReverseRange(m_set, item); + } + + public ReverseEnumerator GetEnumerator() + { + return new ReverseEnumerator(m_set); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + } + + /// + /// Part of starting with specified element. + /// Enumeration goes from specified element to last element in collection. + /// Returns empty enumeration when specified item is not in collection. + /// + public struct Range : IEnumerable + { + OrderedHashSet m_set; + T m_startingItem; + + public Range(OrderedHashSet set, T startingItem) + { + m_set = set; + m_startingItem = startingItem; + } + + public Enumerator GetEnumerator() + { + return new Enumerator(m_set, m_set.InternalIndexOf(m_startingItem)); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + } + + /// + /// Part of in reversed order starting with specified element. + /// Enumeration goes from specified element to first element in collection. + /// Returns empty enumeration when specified item is not in collection. + /// + public struct ReverseRange : IEnumerable + { + OrderedHashSet m_set; + T m_startingItem; + + public ReverseRange(OrderedHashSet set, T startingItem) + { + m_set = set; + m_startingItem = startingItem; + } + + public ReverseEnumerator GetEnumerator() + { + return new ReverseEnumerator(m_set, m_set.InternalIndexOf(m_startingItem)); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + } + + [Serializable] + public struct Enumerator : IEnumerator, IEnumerator + { + private OrderedHashSet m_set; + private int m_index; + private int m_version; + private T m_current; + + public T Current + { + get { return m_current; } + } + + internal Enumerator(OrderedHashSet set) + : this(set, set.m_firstOrderIndex) + { + } + + internal Enumerator(OrderedHashSet set, int startIndex) + { + m_set = set; + m_index = startIndex; + m_version = set.m_version; + m_current = default(T); + } + + public bool MoveNext() + { + if (m_version != m_set.m_version) + { + throw new InvalidOperationException("InvalidOperation_EnumFailedVersion"); + } + + while (m_index != -1) + { + m_current = m_set.m_slots[m_index].value; + m_index = m_set.m_slots[m_index].nextOrder; + return true; + } + m_current = default(T); + return false; + } + + void IDisposable.Dispose() + { + } + + Object IEnumerator.Current + { + get + { + if (m_index == m_set.m_firstOrderIndex || m_index == -1) + { + throw new InvalidOperationException("InvalidOperation_EnumOpCantHappen"); + } + return Current; + } + } + + void IEnumerator.Reset() + { + throw new NotSupportedException(); + } + } + + [Serializable] + public struct ReverseEnumerator : IEnumerator, IEnumerator + { + private OrderedHashSet m_set; + private int m_index; + private int m_version; + private T m_current; + + public T Current + { + get { return m_current; } + } + + internal ReverseEnumerator(OrderedHashSet set) + : this(set, set.m_lastOrderIndex) + { + } + + internal ReverseEnumerator(OrderedHashSet set, int startIndex) + { + m_set = set; + m_index = startIndex; + m_version = set.m_version; + m_current = default(T); + } + + public bool MoveNext() + { + if (m_version != m_set.m_version) + { + throw new InvalidOperationException("InvalidOperation_EnumFailedVersion"); + } + + while (m_index != -1) + { + m_current = m_set.m_slots[m_index].value; + m_index = m_set.m_slots[m_index].previousOrder; + return true; + } + m_current = default(T); + return false; + } + + Object IEnumerator.Current + { + get + { + if (m_index == m_set.m_lastOrderIndex || m_index == -1) + { + throw new InvalidOperationException("InvalidOperation_EnumOpCantHappen"); + } + return Current; + } + } + + void IDisposable.Dispose() + { + } + + void IEnumerator.Reset() + { + throw new NotSupportedException(); + } + } + } +} diff --git a/DBCD.IO/DBCD.IO.csproj b/DBCD.IO/DBCD.IO.csproj index d494a86..7a4e88d 100644 --- a/DBCD.IO/DBCD.IO.csproj +++ b/DBCD.IO/DBCD.IO.csproj @@ -2,6 +2,7 @@ netstandard2.0;net6.0;net7.0;net8.0 + 8.0 @@ -13,6 +14,8 @@ + + diff --git a/DBCD.IO/DBParser.cs b/DBCD.IO/DBParser.cs index a2c29aa..f01f1d9 100644 --- a/DBCD.IO/DBParser.cs +++ b/DBCD.IO/DBParser.cs @@ -1,9 +1,10 @@ -using DBCD.IO.Readers; +using DBCD.IO.Common; +using DBCD.IO.Readers; using DBCD.IO.Writers; using System; using System.Collections.Generic; using System.IO; -using System.Text; +using System.Linq; namespace DBCD.IO { @@ -11,9 +12,8 @@ public class DBParser { private readonly BaseReader _reader; - #region Fields - - public Type RecordType { get; private set; } + #region Header + public Type RecordType { get; protected set; } public string Identifier { get; } public int RecordsCount => _reader.RecordsCount; public int FieldsCount => _reader.FieldsCount; @@ -23,12 +23,8 @@ public class DBParser public uint LayoutHash => _reader.LayoutHash; public int IdFieldIndex => _reader.IdFieldIndex; public DB2Flags Flags => _reader.Flags; - public int Locale => _reader.Locale; - #endregion - #region Constructors - public DBParser(string fileName) : this(File.Open(fileName, FileMode.Open, FileAccess.Read, FileShare.Read)) { } public DBParser(Stream stream) @@ -39,6 +35,12 @@ public DBParser(Stream stream) stream.Position = 0; switch (Identifier) { + case "WDC5": + _reader = new WDC5Reader(stream); + break; + case "WDC4": + _reader = new WDC4Reader(stream); + break; case "WDC3": _reader = new WDC3Reader(stream); break; @@ -73,13 +75,17 @@ public DBParser(Stream stream) } } - #endregion - - #region Methods + /// + /// Get all records for + /// + public Storage GetRecords() where T : class, new() => new Storage(this); - public DBStorage ReadRecords() where T : class, new() => new DBStorage(this); + /// + /// Populate the storage with values. + /// + public void PopulateRecords(IDictionary storage) where T : class, new() => ReadRecords(storage); - public void ReadRecords(IDictionary storage) where T : class, new() + protected virtual void ReadRecords(IDictionary storage) where T : class, new() { var fieldCache = (RecordType = typeof(T)).ToFieldCache(); @@ -88,16 +94,46 @@ public DBParser(Stream stream) T entry = new T(); row.GetFields(fieldCache, entry); lock (storage) - storage[row.Id] = entry; + storage.Add(row.Id, entry); }); } + /// + /// Get's all encrypted DB2 Sections. + /// + public Dictionary GetEncryptedSections() + { + var reader = _reader as IEncryptionSupportingReader; + + if (reader == null || reader.GetEncryptedSections() == null) + { + return new Dictionary(); + } - public void WriteRecords(IDictionary storage, string fileName) where T : class, new() + return reader.GetEncryptedSections().Where(s => s != null).ToDictionary(s => s.TactKeyLookup, s => s.NumRecords); + } + + public Dictionary GetEncryptedIDs() { - WriteRecords(storage, File.Open(fileName, FileMode.Create, FileAccess.ReadWrite, FileShare.ReadWrite)); + var reader = this._reader as IEncryptionSupportingReader; + + if (reader == null || reader.GetEncryptedIDs() == null) + { + return new Dictionary(); + } + + return reader.GetEncryptedIDs(); } + /// + /// Write records to a new .db2 file. + /// + public void WriteRecords(IDictionary storage, string filename) where T : class, new() => + WriteRecords(storage, File.Open(filename, FileMode.Create, FileAccess.ReadWrite, FileShare.ReadWrite)); + + /// + /// Write records to a new .db2 file. + /// public void WriteRecords(IDictionary storage, Stream stream) where T : class, new() { if (typeof(T) != RecordType) @@ -106,6 +142,12 @@ public DBParser(Stream stream) BaseWriter writer; switch (Identifier) { + case "WDC5": + writer = new WDC5Writer((WDC5Reader)_reader, storage, stream); + break; + case "WDC4": + writer = new WDC4Writer((WDC4Reader)_reader, storage, stream); + break; case "WDC3": writer = new WDC3Writer((WDC3Reader)_reader, storage, stream); break; @@ -137,12 +179,9 @@ public DBParser(Stream stream) } } - /// /// Clears temporary data however prevents further calls /// public void ClearCache() => _reader.Clear(); - - #endregion } -} +} \ No newline at end of file diff --git a/DBCD.IO/DBStorage.cs b/DBCD.IO/DBStorage.cs deleted file mode 100644 index ba0178e..0000000 --- a/DBCD.IO/DBStorage.cs +++ /dev/null @@ -1,49 +0,0 @@ -using DBCD.IO.Readers; -using DBCD.IO.Writers; -using System; -using System.Collections.Generic; -using System.IO; - -namespace DBCD.IO -{ - public class DBStorage : SortedDictionary where T : class, new() - { - private readonly DBParser _reader; - - #region Header - - public string Identifier => _reader.Identifier; - public int RecordsCount => _reader.RecordsCount; - public int FieldsCount => _reader.FieldsCount; - public int RecordSize => _reader.RecordSize; - public uint TableHash => _reader.TableHash; - public uint LayoutHash => _reader.LayoutHash; - public int IdFieldIndex => _reader.IdFieldIndex; - public DB2Flags Flags => _reader.Flags; - public int Locale => _reader.Locale; - - #endregion - - #region Constructors - - public DBStorage(string fileName) : this(File.Open(fileName, FileMode.Open, FileAccess.Read, FileShare.Read)) { } - - public DBStorage(Stream stream) : this(new DBParser(stream)) => _reader.ClearCache(); - - public DBStorage(DBParser dbReader) - { - _reader = dbReader; - _reader.ReadRecords(this); - } - - #endregion - - #region Methods - - public void Save(string fileName) => _reader.WriteRecords(this, fileName); - - public void Save(Stream stream) => _reader.WriteRecords(this, stream); - - #endregion - } -} diff --git a/DBCD.IO/Extensions.cs b/DBCD.IO/Extensions.cs index 06d8734..d7d3114 100644 --- a/DBCD.IO/Extensions.cs +++ b/DBCD.IO/Extensions.cs @@ -1,8 +1,12 @@ using System; +using System.Collections.Generic; +using System.Diagnostics.Contracts; using System.IO; +using System.Linq; using System.Linq.Expressions; using System.Reflection; using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; using System.Text; namespace DBCD.IO @@ -29,6 +33,11 @@ public static Func GetGetter(this FieldInfo fieldInfo) return Expression.Lambda>(convertExpression, paramExpression).Compile(); } + public static T GetAttribute(this FieldInfo fieldInfo) where T : Attribute + { + return Attribute.GetCustomAttribute(fieldInfo, typeof(T)) as T; + } + public static FieldCache[] ToFieldCache(this Type type) { var fields = type.GetFields(); @@ -40,34 +49,37 @@ public static FieldCache[] ToFieldCache(this Type type) return cache; } - public static T GetAttribute(this FieldInfo fieldInfo) where T : Attribute - { - return Attribute.GetCustomAttribute(fieldInfo, typeof(T)) as T; - } - public static T Read(this BinaryReader reader) where T : struct { byte[] result = reader.ReadBytes(Unsafe.SizeOf()); return Unsafe.ReadUnaligned(ref result[0]); } - public static void Write(this BinaryWriter writer, T value) where T : struct + public static T[] ReadArray(this BinaryReader reader) where T : struct { - byte[] buffer = new byte[Unsafe.SizeOf()]; - Unsafe.WriteUnaligned(ref buffer[0], value); - writer.Write(buffer); + int numBytes = (int)reader.ReadInt64(); + + byte[] result = reader.ReadBytes(numBytes); + + reader.BaseStream.Position += (0 - numBytes) & 0x07; + return result.CopyTo(); + } + + public static T[] ReadArray(this BinaryReader reader, int size) where T : struct + { + int numBytes = Marshal.SizeOf() * size; + + byte[] result = reader.ReadBytes(numBytes); + return result.CopyTo(); } - public static unsafe T[] ReadArray(this BinaryReader reader, int size) where T : struct + public static unsafe T[] CopyTo(this byte[] src) where T : struct { - int sizeOf = Unsafe.SizeOf(); + T[] result = new T[src.Length / Unsafe.SizeOf()]; - byte[] src = reader.ReadBytes(sizeOf * size); - if (src.Length == 0) - return new T[0]; + if (src.Length > 0) + Unsafe.CopyBlockUnaligned(Unsafe.AsPointer(ref result[0]), Unsafe.AsPointer(ref src[0]), (uint)src.Length); - T[] result = new T[src.Length / sizeOf]; - Unsafe.CopyBlockUnaligned(Unsafe.AsPointer(ref result[0]), Unsafe.AsPointer(ref src[0]), (uint)src.Length); return result; } @@ -85,10 +97,27 @@ public static unsafe void WriteArray(this BinaryWriter writer, T[] value) whe writer.Write(buffer); } + public static void Write(this BinaryWriter writer, T value) where T : struct + { + byte[] buffer = new byte[Unsafe.SizeOf()]; + Unsafe.WriteUnaligned(ref buffer[0], value); + writer.Write(buffer); + } + public static bool HasFlagExt(this DB2Flags flag, DB2Flags valueToCheck) { return (flag & valueToCheck) == valueToCheck; } + + public static T MaxOrDefault(this ICollection source) + { + return source.DefaultIfEmpty().Max(); + } + + public static T MinOrDefault(this ICollection source) + { + return source.DefaultIfEmpty().Min(); + } } static class CStringExtensions @@ -121,5 +150,45 @@ public static void WriteCString(this BinaryWriter writer, string str) writer.Write(bytes); writer.Write((byte)0); } + + public static byte[] ToByteArray(this string str) + { + str = str.Replace(" ", string.Empty); + + var res = new byte[str.Length / 2]; + for (int i = 0; i < res.Length; i++) + { + res[i] = Convert.ToByte(str.Substring(i * 2, 2), 16); + } + return res; + } + } + + /// + /// A that provides extension methods for numeric types + /// + public static class NumericExtensions + { + [Pure] + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int MostSignificantBit(this int n) + { + if (n == 0) return 1; + else return ((int)(BitConverter.DoubleToInt64Bits(n) >> 52) & 0x7ff) - 1022; + } + + [Pure] + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int MostSignificantBit(this uint n) + { + if (n == 0) return 1; + else return ((int)(BitConverter.DoubleToInt64Bits(n) >> 52) & 0x7ff) - 1022; + } + + /// + /// Calculates the upper bound of the log base 2 of the input value + /// + /// The input value to compute the bound for (with n > 0) + public static int UpperBoundLog2(this int n) => 1 << MostSignificantBit(n); } } diff --git a/DBCD.IO/FieldCache.cs b/DBCD.IO/FieldCache.cs index 7ae9efe..f8a08dd 100644 --- a/DBCD.IO/FieldCache.cs +++ b/DBCD.IO/FieldCache.cs @@ -6,13 +6,14 @@ namespace DBCD.IO { class FieldCache { - private readonly FieldInfo Field; + public readonly FieldInfo Field; public readonly bool IsArray = false; public readonly bool IsLocalisedString = false; public readonly Action Setter; public readonly Func Getter; public readonly LocaleAttribute LocaleInfo; + public bool IsForeign { get; set; } = false; public bool IsNonInlineRelation { get; set; } = false; public bool IsRelation { get; set; } = false; public bool IndexMapField { get; set; } = false; @@ -42,12 +43,15 @@ public FieldCache(FieldInfo field) IsNonInlineRelation = IsRelation && relationAttribute.IsNonInline; FieldType = field.FieldType; MetaDataFieldType = IsNonInlineRelation ? relationAttribute.FieldType : FieldType; + + ForeignAttribute foreignAttribute = (ForeignAttribute)Attribute.GetCustomAttribute(field, typeof(ForeignAttribute)); + IsForeign = (foreignAttribute != null) ? foreignAttribute.IsForeign : false; } private int GetCardinality(FieldInfo field) { var cardinality = field.GetAttribute()?.Count; - return cardinality > 0 ? cardinality.Value : 1; + return cardinality.HasValue && cardinality > 0 ? cardinality.Value : 1; } private bool GetStringInfo(FieldInfo field, out LocaleAttribute attribute) diff --git a/DBCD.IO/HotfixReader.cs b/DBCD.IO/HotfixReader.cs index 12d8c97..f157001 100644 --- a/DBCD.IO/HotfixReader.cs +++ b/DBCD.IO/HotfixReader.cs @@ -1,4 +1,5 @@ -using DBCD.IO.Readers; +using DBCD.IO.Common; +using DBCD.IO.Readers; using System; using System.Collections.Generic; using System.IO; @@ -39,10 +40,10 @@ public HotfixReader(Stream stream) } - public void ApplyHotfixes(IDictionary storage, DBParser dbReader) where T : class, new() => ReadHotfixes(storage, dbReader); + public void ApplyHotfixes(IDictionary storage, DBParser parser) where T : class, new() => ReadHotfixes(storage, parser); - public void ApplyHotfixes(IDictionary storage, DBReader dbReader, RowProcessor processor) where T : class, new() - => ReadHotfixes(storage, dbReader, processor); + public void ApplyHotfixes(IDictionary storage, DBParser parser, RowProcessor processor) where T : class, new() + => ReadHotfixes(storage, parser, processor); public void CombineCaches(params string[] files) { @@ -51,7 +52,7 @@ public void CombineCaches(params string[] files) CombineCache(file); } } - + public void CombineCache(string file) { if (!File.Exists(file)) @@ -66,7 +67,7 @@ public void CombineCache(string file) _reader.Combine(reader); } - protected virtual void ReadHotfixes(IDictionary storage, DBReader dbReader, RowProcessor processor = null) where T : class, new() + protected virtual void ReadHotfixes(IDictionary storage, DBParser parser, RowProcessor processor = null) where T : class, new() { var fieldCache = typeof(T).ToFieldCache(); @@ -74,18 +75,18 @@ public void CombineCache(string file) processor = DefaultProcessor; // Id fields need to be excluded if not inline - if (dbReader.Flags.HasFlagExt(DB2Flags.Index)) - fieldCache[dbReader.IdFieldIndex].IndexMapField = true; + if (parser.Flags.HasFlagExt(DB2Flags.Index)) + fieldCache[parser.IdFieldIndex].IndexMapField = true; // TODO verify hotfixes need to be applied sequentially - var records = _reader.GetRecords(dbReader.TableHash).OrderBy(x => x.PushId); + var records = _reader.GetRecords(parser.TableHash).OrderBy(x => x.PushId); // Check if there are any valid cached records with data, don't remove row if so. // Example situation: Blizzard has invalidated TACTKey records in the same DBCache as valid ones. // Without the below check, valid cached TACTKey records would be removed by the invalidated records afterwards. // This only seems to be relevant for cached tables and specifically TACTKey, BroadcastText/ItemSparse only show up single times it seems. - var shouldDelete = (dbReader.TableHash != 3744420815 && dbReader.TableHash != 35137211) || !records.Any(r => r.IsValid && r.PushId == -1 && r.DataSize > 0); - + var shouldDelete = (parser.TableHash != 3744420815 && parser.TableHash != 35137211) || !records.Any(r => r.IsValid && r.PushId == -1 && r.DataSize > 0); + foreach (var row in records) { var operation = processor(row, shouldDelete); @@ -96,7 +97,7 @@ public void CombineCache(string file) row.GetFields(fieldCache, entry); storage[row.RecordId] = entry; } - else if(operation == RowOp.Delete) + else if (operation == RowOp.Delete) { storage.Remove(row.RecordId); } diff --git a/DBCD.IO/Readers/BaseEncryptionSupportingReader.cs b/DBCD.IO/Readers/BaseEncryptionSupportingReader.cs index 3663a2b..0a4ad3a 100644 --- a/DBCD.IO/Readers/BaseEncryptionSupportingReader.cs +++ b/DBCD.IO/Readers/BaseEncryptionSupportingReader.cs @@ -1,8 +1,8 @@ using System.Collections.Generic; using System.Linq; -using DBFileReaderLib.Common; +using DBCD.IO.Common; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { abstract class BaseEncryptionSupportingReader : BaseReader, IEncryptionSupportingReader { diff --git a/DBCD.IO/Readers/WDB6Reader.cs b/DBCD.IO/Readers/WDB6Reader.cs index 2cb9ca4..ded7a87 100644 --- a/DBCD.IO/Readers/WDB6Reader.cs +++ b/DBCD.IO/Readers/WDB6Reader.cs @@ -20,7 +20,7 @@ class WDB6Row : IDBRow public BitReader Data { get => m_data; set => m_data = value; } private readonly FieldMetaData[] m_fieldMeta; - private readonly Dictionary[] m_commonData; + private readonly Dictionary[] CommonData; public WDB6Row(BaseReader reader, BitReader data, int id, int recordIndex) { @@ -32,7 +32,7 @@ public WDB6Row(BaseReader reader, BitReader data, int id, int recordIndex) m_dataPosition = m_data.Position; m_fieldMeta = reader.Meta; - m_commonData = reader.CommonData; + CommonData = reader.CommonData; Id = id; } @@ -80,7 +80,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(0, m_data, m_fieldMeta[i], m_commonData?[i]); + Id = GetFieldValue(0, m_data, m_fieldMeta[i], CommonData?[i]); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -102,14 +102,14 @@ public void GetFields(FieldCache[] fields, T entry) SetCardinality(info, fieldIndex); if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_fieldMeta[fieldIndex], m_commonData?[fieldIndex], m_reader.StringTable, info.Cardinality); + value = reader(Id, m_data, m_fieldMeta[fieldIndex], CommonData?[fieldIndex], m_reader.StringTable, info.Cardinality); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_fieldMeta[fieldIndex], m_commonData?[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Id, m_data, m_fieldMeta[fieldIndex], CommonData?[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } diff --git a/DBCD.IO/Readers/WDC1Reader.cs b/DBCD.IO/Readers/WDC1Reader.cs index 6627d45..fac1142 100644 --- a/DBCD.IO/Readers/WDC1Reader.cs +++ b/DBCD.IO/Readers/WDC1Reader.cs @@ -20,9 +20,9 @@ class WDC1Row : IDBRow public BitReader Data { get => m_data; set => m_data = value; } private readonly FieldMetaData[] m_fieldMeta; - private readonly ColumnMetaData[] m_columnMeta; - private readonly Value32[][] m_palletData; - private readonly Dictionary[] m_commonData; + private readonly ColumnMetaData[] ColumnMeta; + private readonly Value32[][] PalletData; + private readonly Dictionary[] CommonData; private readonly int m_refID; public WDC1Row(BaseReader reader, BitReader data, int id, int refID, int recordIndex) @@ -35,9 +35,9 @@ public WDC1Row(BaseReader reader, BitReader data, int id, int refID, int recordI m_dataPosition = m_data.Position; m_fieldMeta = reader.Meta; - m_columnMeta = reader.ColumnMeta; - m_palletData = reader.PalletData; - m_commonData = reader.CommonData; + ColumnMeta = reader.ColumnMeta; + PalletData = reader.PalletData; + CommonData = reader.CommonData; m_refID = refID; Id = id; @@ -86,7 +86,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(0, m_data, m_fieldMeta[i], m_columnMeta[i], m_palletData[i], m_commonData[i]); + Id = GetFieldValue(0, m_data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -104,14 +104,14 @@ public void GetFields(FieldCache[] fields, T entry) if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], m_reader.StringTable); + value = reader(m_data, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Id, m_data, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -367,7 +367,7 @@ public WDC1Reader(Stream stream) refData.Entries.TryGetValue(i, out int refId); - IDBRow rec = new WDC1Row(this, bitReader, indexDataSize != 0 ? m_indexData[i] : -1, refId, i); + IDBRow rec = new WDC1Row(this, bitReader, indexDataSize != 0 ? IndexData[i] : -1, refId, i); _Records.Add(i, rec); } } diff --git a/DBCD.IO/Readers/WDC2Reader.cs b/DBCD.IO/Readers/WDC2Reader.cs index 4f29d11..b69a138 100644 --- a/DBCD.IO/Readers/WDC2Reader.cs +++ b/DBCD.IO/Readers/WDC2Reader.cs @@ -21,9 +21,9 @@ class WDC2Row : IDBRow public BitReader Data { get => m_data; set => m_data = value; } private readonly FieldMetaData[] m_fieldMeta; - private readonly ColumnMetaData[] m_columnMeta; - private readonly Value32[][] m_palletData; - private readonly Dictionary[] m_commonData; + private readonly ColumnMetaData[] ColumnMeta; + private readonly Value32[][] PalletData; + private readonly Dictionary[] CommonData; private readonly int m_refID; public WDC2Row(BaseReader reader, BitReader data, int recordsOffset, int id, int refID, int recordIndex) @@ -37,9 +37,9 @@ public WDC2Row(BaseReader reader, BitReader data, int recordsOffset, int id, int m_dataPosition = m_data.Position; m_fieldMeta = reader.Meta; - m_columnMeta = reader.ColumnMeta; - m_palletData = reader.PalletData; - m_commonData = reader.CommonData; + ColumnMeta = reader.ColumnMeta; + PalletData = reader.PalletData; + CommonData = reader.CommonData; m_refID = refID; Id = id; @@ -95,11 +95,11 @@ public void GetFields(FieldCache[] fields, T entry) { if (!m_reader.Flags.HasFlagExt(DB2Flags.Sparse)) { - m_data.Position = m_columnMeta[i].RecordOffset; + m_data.Position = ColumnMeta[i].RecordOffset; m_data.Offset = m_dataOffset; } - Id = GetFieldValue(0, m_data, m_fieldMeta[i], m_columnMeta[i], m_palletData[i], m_commonData[i]); + Id = GetFieldValue(0, m_data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); } info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); @@ -117,21 +117,21 @@ public void GetFields(FieldCache[] fields, T entry) if (!m_reader.Flags.HasFlagExt(DB2Flags.Sparse)) { - m_data.Position = m_columnMeta[fieldIndex].RecordOffset; + m_data.Position = ColumnMeta[fieldIndex].RecordOffset; m_data.Offset = m_dataOffset; } if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_recordsOffset, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], m_reader.StringTable); + value = reader(m_data, m_recordsOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_recordsOffset, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Id, m_data, m_recordsOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -257,7 +257,7 @@ public WDC2Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDC2Reader(Stream stream) { - using (var reader = new BinaryReader(stream)) + using (var reader = new BinaryReader(stream, Encoding.UTF8)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDC2 file is corrupted!"); @@ -421,11 +421,11 @@ public WDC2Reader(Stream stream) refData.Entries.TryGetValue(i, out int refId); - IDBRow rec = new WDC2Row(this, bitReader, sections[sectionIndex].FileOffset, sections[sectionIndex].IndexDataSize != 0 ? m_indexData[i] : -1, refId, i); + IDBRow rec = new WDC2Row(this, bitReader, sections[sectionIndex].FileOffset, sections[sectionIndex].IndexDataSize != 0 ? IndexData[i] : -1, refId, i); _Records.Add(i, rec); } } } } } -} +} \ No newline at end of file diff --git a/DBCD.IO/Readers/WDC3Reader.cs b/DBCD.IO/Readers/WDC3Reader.cs index 144c3a0..51db394 100644 --- a/DBCD.IO/Readers/WDC3Reader.cs +++ b/DBCD.IO/Readers/WDC3Reader.cs @@ -21,9 +21,9 @@ class WDC3Row : IDBRow public BitReader Data { get => m_data; set => m_data = value; } private readonly FieldMetaData[] m_fieldMeta; - private readonly ColumnMetaData[] m_columnMeta; - private readonly Value32[][] m_palletData; - private readonly Dictionary[] m_commonData; + private readonly ColumnMetaData[] ColumnMeta; + private readonly Value32[][] PalletData; + private readonly Dictionary[] CommonData; private readonly int m_refID; public WDC3Row(BaseReader reader, BitReader data, int id, int refID, int recordIndex) @@ -37,9 +37,9 @@ public WDC3Row(BaseReader reader, BitReader data, int id, int refID, int recordI m_dataPosition = m_data.Position; m_fieldMeta = reader.Meta; - m_columnMeta = reader.ColumnMeta; - m_palletData = reader.PalletData; - m_commonData = reader.CommonData; + ColumnMeta = reader.ColumnMeta; + PalletData = reader.PalletData; + CommonData = reader.CommonData; m_refID = refID; Id = id; @@ -100,7 +100,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(0, m_data, m_fieldMeta[i], m_columnMeta[i], m_palletData[i], m_commonData[i]); + Id = GetFieldValue(0, m_data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -118,14 +118,14 @@ public void GetFields(FieldCache[] fields, T entry) if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_recordOffset, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], m_reader.StringTable); + value = reader(m_data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_recordOffset, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Id, m_data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -224,7 +224,7 @@ private static string[] GetFieldValueStringArray(BitReader r, FieldMetaData fiel for (int i = 0; i < array.Length; i++) { var index = (r.Position >> 3) + recordOffset + r.ReadValue64(bitSize).GetValue(); - if(stringTable.TryGetValue(index, out string result)) + if (stringTable.TryGetValue(index, out string result)) { array[i] = result; } @@ -255,7 +255,7 @@ public WDC3Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDC3Reader(Stream stream) { - using (var reader = new BinaryReader(stream)) + using (var reader = new BinaryReader(stream, Encoding.UTF8)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDC3 file is corrupted!"); @@ -273,11 +273,11 @@ public WDC3Reader(Stream stream) LayoutHash = reader.ReadUInt32(); MinIndex = reader.ReadInt32(); MaxIndex = reader.ReadInt32(); - Locale = reader.ReadInt32(); + int locale = reader.ReadInt32(); Flags = (DB2Flags)reader.ReadUInt16(); IdFieldIndex = reader.ReadUInt16(); int totalFieldsCount = reader.ReadInt32(); - PackedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts + int packedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts int lookupColumnCount = reader.ReadInt32(); // count of lookup columns int columnMetaDataSize = reader.ReadInt32(); // 24 * NumFields bytes, describes column bit packing, {ushort recordOffset, ushort size, uint additionalDataSize, uint compressionType, uint packedDataOffset or commonvalue, uint cellSize, uint cardinality}[NumFields], sizeof(DBC2CommonValue) == 8 int commonDataSize = reader.ReadInt32(); @@ -328,13 +328,13 @@ public WDC3Reader(Stream stream) if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data - recordsData = reader.ReadBytes(section.NumRecords * RecordSize); - - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(section.NumRecords * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; // string data - if (m_stringsTable == null) - m_stringsTable = new Dictionary(section.StringTableSize / 0x20); + if (StringTable == null) + StringTable = new Dictionary(section.StringTableSize / 0x20); for (int i = 0; i < section.StringTableSize;) { @@ -348,33 +348,33 @@ public WDC3Reader(Stream stream) else { // sparse data with inlined strings - recordsData = reader.ReadBytes(section.OffsetRecordsEndOffset - section.FileOffset); + RecordsData = reader.ReadBytes(section.OffsetRecordsEndOffset - section.FileOffset); if (reader.BaseStream.Position != section.OffsetRecordsEndOffset) throw new Exception("reader.BaseStream.Position != section.OffsetRecordsEndOffset"); } // skip encrypted sections => has tact key + record data is zero filled - if (section.TactKeyLookup != 0 && Array.TrueForAll(recordsData, x => x == 0)) + if (section.TactKeyLookup != 0 && Array.TrueForAll(RecordsData, x => x == 0)) { bool completelyZero = false; if (section.IndexDataSize > 0 || section.CopyTableCount > 0) { - // this will be the record id from m_indexData or m_copyData + // this will be the record id from IndexData or CopyData // if this is zero then the id for this record will be zero which is invalid completelyZero = reader.ReadInt32() == 0; reader.BaseStream.Position -= 4; } else if (section.OffsetMapIDCount > 0) { - // this will be the first m_sparseEntries entry + // this will be the first SparseEntries entry // confirm it's size is not zero otherwise it is invalid completelyZero = reader.Read().Size == 0; reader.BaseStream.Position -= 6; } else { - // there is no additional data and recordsData is already known to be zeroed + // there is no additional data and RecordsData is already known to be zeroed // therefore the record will have an id of zero which is invalid completelyZero = true; } @@ -386,11 +386,11 @@ public WDC3Reader(Stream stream) } } // index data - m_indexData = reader.ReadArray(section.IndexDataSize / 4); + IndexData = reader.ReadArray(section.IndexDataSize / 4); // fix zero-filled index data - if (m_indexData.Length > 0 && m_indexData.All(x => x == 0)) - m_indexData = Enumerable.Range(MinIndex + previousRecordCount, section.NumRecords).ToArray(); + if (IndexData.Length > 0 && IndexData.All(x => x == 0)) + IndexData = Enumerable.Range(MinIndex + previousRecordCount, section.NumRecords).ToArray(); // duplicate rows data if (section.CopyTableCount > 0) @@ -402,9 +402,9 @@ public WDC3Reader(Stream stream) { var destinationRowID = reader.ReadInt32(); var sourceRowID = reader.ReadInt32(); - if(destinationRowID != sourceRowID) + if (destinationRowID != sourceRowID) { - m_copyData[destinationRowID] = sourceRowID; + CopyData[destinationRowID] = sourceRowID; } } } @@ -415,7 +415,7 @@ public WDC3Reader(Stream stream) if (TableHash == 145293629) reader.BaseStream.Position += 4 * section.OffsetMapIDCount; - m_sparseEntries = reader.ReadArray(section.OffsetMapIDCount).ToList(); + SparseEntries = reader.ReadArray(section.OffsetMapIDCount).ToList(); } // reference data @@ -435,8 +435,8 @@ public WDC3Reader(Stream stream) { int[] sparseIndexData = reader.ReadArray(section.OffsetMapIDCount); - if (section.IndexDataSize > 0 && m_indexData.Length != sparseIndexData.Length) - throw new Exception("m_indexData.Length != sparseIndexData.Length"); + if (section.IndexDataSize > 0 && IndexData.Length != sparseIndexData.Length) + throw new Exception("IndexData.Length != sparseIndexData.Length"); IndexData = sparseIndexData; } @@ -458,7 +458,7 @@ public WDC3Reader(Stream stream) refData.Entries.TryGetValue(i, out int refId); - IDBRow rec = new WDC3Row(this, bitReader, section.IndexDataSize != 0 ? m_indexData[i] : -1, refId, i + previousRecordCount); + IDBRow rec = new WDC3Row(this, bitReader, section.IndexDataSize != 0 ? IndexData[i] : -1, refId, i + previousRecordCount); _Records.Add(_Records.Count, rec); } @@ -467,4 +467,4 @@ public WDC3Reader(Stream stream) } } } -} +} \ No newline at end of file diff --git a/DBCD.IO/Readers/WDC4Reader.cs b/DBCD.IO/Readers/WDC4Reader.cs index 0cdf7c2..9791ce5 100644 --- a/DBCD.IO/Readers/WDC4Reader.cs +++ b/DBCD.IO/Readers/WDC4Reader.cs @@ -1,4 +1,4 @@ -using DBFileReaderLib.Common; +using DBCD.IO.Common; using System; using System.Collections.Generic; using System.IO; @@ -6,7 +6,7 @@ using System.Runtime.CompilerServices; using System.Text; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class WDC4Row : IDBRow { @@ -21,9 +21,9 @@ class WDC4Row : IDBRow public BitReader Data { get => m_data; set => m_data = value; } private readonly FieldMetaData[] m_fieldMeta; - private readonly ColumnMetaData[] m_columnMeta; - private readonly Value32[][] m_palletData; - private readonly Dictionary[] m_commonData; + private readonly ColumnMetaData[] ColumnMeta; + private readonly Value32[][] PalletData; + private readonly Dictionary[] CommonData; private readonly int m_refID; public WDC4Row(BaseReader reader, BitReader data, int id, int refID, int recordIndex) @@ -37,9 +37,9 @@ public WDC4Row(BaseReader reader, BitReader data, int id, int refID, int recordI m_dataPosition = m_data.Position; m_fieldMeta = reader.Meta; - m_columnMeta = reader.ColumnMeta; - m_palletData = reader.PalletData; - m_commonData = reader.CommonData; + ColumnMeta = reader.ColumnMeta; + PalletData = reader.PalletData; + CommonData = reader.CommonData; m_refID = refID; Id = id; @@ -100,7 +100,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(0, m_data, m_fieldMeta[i], m_columnMeta[i], m_palletData[i], m_commonData[i]); + Id = GetFieldValue(0, m_data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -118,14 +118,14 @@ public void GetFields(FieldCache[] fields, T entry) if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_recordOffset, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], m_reader.StringTable); + value = reader(m_data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_recordOffset, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Id, m_data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -292,31 +292,31 @@ public WDC4Reader(Stream stream) this.m_encryptedIDs = new Dictionary(); // field meta data - m_meta = reader.ReadArray(FieldsCount); + Meta = reader.ReadArray(FieldsCount); // column meta data - m_columnMeta = reader.ReadArray(FieldsCount); + ColumnMeta = reader.ReadArray(FieldsCount); // pallet data - m_palletData = new Value32[m_columnMeta.Length][]; - for (int i = 0; i < m_columnMeta.Length; i++) + PalletData = new Value32[ColumnMeta.Length][]; + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Pallet || m_columnMeta[i].CompressionType == CompressionType.PalletArray) + if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) { - m_palletData[i] = reader.ReadArray((int)m_columnMeta[i].AdditionalDataSize / 4); + PalletData[i] = reader.ReadArray((int)ColumnMeta[i].AdditionalDataSize / 4); } } // common data - m_commonData = new Dictionary[m_columnMeta.Length]; - for (int i = 0; i < m_columnMeta.Length; i++) + CommonData = new Dictionary[ColumnMeta.Length]; + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Common) + if (ColumnMeta[i].CompressionType == CompressionType.Common) { - var commonValues = new Dictionary((int)m_columnMeta[i].AdditionalDataSize / 8); - m_commonData[i] = commonValues; + var commonValues = new Dictionary((int)ColumnMeta[i].AdditionalDataSize / 8); + CommonData[i] = commonValues; - for (int j = 0; j < m_columnMeta[i].AdditionalDataSize / 8; j++) + for (int j = 0; j < ColumnMeta[i].AdditionalDataSize / 8; j++) commonValues[reader.ReadInt32()] = reader.Read(); } } @@ -344,18 +344,18 @@ public WDC4Reader(Stream stream) if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data - recordsData = reader.ReadBytes(section.NumRecords * RecordSize); - - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(section.NumRecords * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; // string data - if (m_stringsTable == null) - m_stringsTable = new Dictionary(section.StringTableSize / 0x20); + if (StringTable == null) + StringTable = new Dictionary(section.StringTableSize / 0x20); for (int i = 0; i < section.StringTableSize;) { long oldPos = reader.BaseStream.Position; - m_stringsTable[i + previousStringTableSize] = reader.ReadCString(); + StringTable[i + previousStringTableSize] = reader.ReadCString(); i += (int)(reader.BaseStream.Position - oldPos); } @@ -364,33 +364,33 @@ public WDC4Reader(Stream stream) else { // sparse data with inlined strings - recordsData = reader.ReadBytes(section.OffsetRecordsEndOffset - section.FileOffset); + RecordsData = reader.ReadBytes(section.OffsetRecordsEndOffset - section.FileOffset); if (reader.BaseStream.Position != section.OffsetRecordsEndOffset) throw new Exception("reader.BaseStream.Position != section.OffsetRecordsEndOffset"); } // skip encrypted sections => has tact key + record data is zero filled - if (section.TactKeyLookup != 0 && Array.TrueForAll(recordsData, x => x == 0)) + if (section.TactKeyLookup != 0 && Array.TrueForAll(RecordsData, x => x == 0)) { bool completelyZero = false; if (section.IndexDataSize > 0 || section.CopyTableCount > 0) { - // this will be the record id from m_indexData or m_copyData + // this will be the record id from IndexData or CopyData // if this is zero then the id for this record will be zero which is invalid completelyZero = reader.ReadInt32() == 0; reader.BaseStream.Position -= 4; } else if (section.OffsetMapIDCount > 0) { - // this will be the first m_sparseEntries entry + // this will be the first SparseEntries entry // confirm it's size is not zero otherwise it is invalid completelyZero = reader.Read().Size == 0; reader.BaseStream.Position -= 6; } else { - // there is no additional data and recordsData is already known to be zeroed + // there is no additional data and RecordsData is already known to be zeroed // therefore the record will have an id of zero which is invalid completelyZero = true; } @@ -402,17 +402,17 @@ public WDC4Reader(Stream stream) } } // index data - m_indexData = reader.ReadArray(section.IndexDataSize / 4); + IndexData = reader.ReadArray(section.IndexDataSize / 4); // fix zero-filled index data - if (m_indexData.Length > 0 && m_indexData.All(x => x == 0)) - m_indexData = Enumerable.Range(MinIndex + previousRecordCount, section.NumRecords).ToArray(); + if (IndexData.Length > 0 && IndexData.All(x => x == 0)) + IndexData = Enumerable.Range(MinIndex + previousRecordCount, section.NumRecords).ToArray(); // duplicate rows data if (section.CopyTableCount > 0) { - if (m_copyData == null) - m_copyData = new Dictionary(); + if (CopyData == null) + CopyData = new Dictionary(); for (int i = 0; i < section.CopyTableCount; i++) { @@ -420,7 +420,7 @@ public WDC4Reader(Stream stream) var sourceRowID = reader.ReadInt32(); if (destinationRowID != sourceRowID) { - m_copyData[destinationRowID] = sourceRowID; + CopyData[destinationRowID] = sourceRowID; } } } @@ -431,17 +431,17 @@ public WDC4Reader(Stream stream) if (TableHash == 145293629) reader.BaseStream.Position += 4 * section.OffsetMapIDCount; - m_sparseEntries = reader.ReadArray(section.OffsetMapIDCount).ToList(); + SparseEntries = reader.ReadArray(section.OffsetMapIDCount).ToList(); } if (section.OffsetMapIDCount > 0 && Flags.HasFlag(DB2Flags.SecondaryKey)) { int[] sparseIndexData = reader.ReadArray(section.OffsetMapIDCount); - if (section.IndexDataSize > 0 && m_indexData.Length != sparseIndexData.Length) - throw new Exception("m_indexData.Length != sparseIndexData.Length"); + if (section.IndexDataSize > 0 && IndexData.Length != sparseIndexData.Length) + throw new Exception("IndexData.Length != sparseIndexData.Length"); - m_indexData = sparseIndexData; + IndexData = sparseIndexData; } // reference data @@ -461,21 +461,21 @@ public WDC4Reader(Stream stream) { int[] sparseIndexData = reader.ReadArray(section.OffsetMapIDCount); - if (section.IndexDataSize > 0 && m_indexData.Length != sparseIndexData.Length) - throw new Exception("m_indexData.Length != sparseIndexData.Length"); + if (section.IndexDataSize > 0 && IndexData.Length != sparseIndexData.Length) + throw new Exception("IndexData.Length != sparseIndexData.Length"); - m_indexData = sparseIndexData; + IndexData = sparseIndexData; } int position = 0; for (int i = 0; i < section.NumRecords; i++) { - BitReader bitReader = new BitReader(recordsData) { Position = 0 }; + BitReader bitReader = new BitReader(RecordsData) { Position = 0 }; if (Flags.HasFlagExt(DB2Flags.Sparse)) { bitReader.Position = position; - position += m_sparseEntries[i].Size * 8; + position += SparseEntries[i].Size * 8; } else { @@ -485,11 +485,11 @@ public WDC4Reader(Stream stream) int refId; if (Flags.HasFlag(DB2Flags.SecondaryKey)) - refData.Entries.TryGetValue(m_indexData[i], out refId); + refData.Entries.TryGetValue(IndexData[i], out refId); else refData.Entries.TryGetValue(i, out refId); - IDBRow rec = new WDC4Row(this, bitReader, section.IndexDataSize != 0 ? m_indexData[i] : -1, refId, i + previousRecordCount); + IDBRow rec = new WDC4Row(this, bitReader, section.IndexDataSize != 0 ? IndexData[i] : -1, refId, i + previousRecordCount); _Records.Add(_Records.Count, rec); } @@ -498,4 +498,4 @@ public WDC4Reader(Stream stream) } } } -} +} \ No newline at end of file diff --git a/DBCD.IO/Readers/WDC5Reader.cs b/DBCD.IO/Readers/WDC5Reader.cs index 89b0c98..dc411ec 100644 --- a/DBCD.IO/Readers/WDC5Reader.cs +++ b/DBCD.IO/Readers/WDC5Reader.cs @@ -1,4 +1,4 @@ -using DBFileReaderLib.Common; +using DBCD.IO.Common; using System; using System.Collections.Generic; using System.IO; @@ -6,7 +6,7 @@ using System.Runtime.CompilerServices; using System.Text; -namespace DBFileReaderLib.Readers +namespace DBCD.IO.Readers { class WDC5Row : IDBRow { @@ -21,9 +21,9 @@ class WDC5Row : IDBRow public BitReader Data { get => m_data; set => m_data = value; } private readonly FieldMetaData[] m_fieldMeta; - private readonly ColumnMetaData[] m_columnMeta; - private readonly Value32[][] m_palletData; - private readonly Dictionary[] m_commonData; + private readonly ColumnMetaData[] ColumnMeta; + private readonly Value32[][] PalletData; + private readonly Dictionary[] CommonData; private readonly int m_refID; public WDC5Row(BaseReader reader, BitReader data, int id, int refID, int recordIndex) @@ -37,9 +37,9 @@ public WDC5Row(BaseReader reader, BitReader data, int id, int refID, int recordI m_dataPosition = m_data.Position; m_fieldMeta = reader.Meta; - m_columnMeta = reader.ColumnMeta; - m_palletData = reader.PalletData; - m_commonData = reader.CommonData; + ColumnMeta = reader.ColumnMeta; + PalletData = reader.PalletData; + CommonData = reader.CommonData; m_refID = refID; Id = id; @@ -100,7 +100,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(0, m_data, m_fieldMeta[i], m_columnMeta[i], m_palletData[i], m_commonData[i]); + Id = GetFieldValue(0, m_data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -118,14 +118,14 @@ public void GetFields(FieldCache[] fields, T entry) if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_recordOffset, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], m_reader.StringTable); + value = reader(m_data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_recordOffset, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Id, m_data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -295,31 +295,31 @@ public WDC5Reader(Stream stream) this.m_encryptedIDs = new Dictionary(); // field meta data - m_meta = reader.ReadArray(FieldsCount); + Meta = reader.ReadArray(FieldsCount); // column meta data - m_columnMeta = reader.ReadArray(FieldsCount); + ColumnMeta = reader.ReadArray(FieldsCount); // pallet data - m_palletData = new Value32[m_columnMeta.Length][]; - for (int i = 0; i < m_columnMeta.Length; i++) + PalletData = new Value32[ColumnMeta.Length][]; + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Pallet || m_columnMeta[i].CompressionType == CompressionType.PalletArray) + if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) { - m_palletData[i] = reader.ReadArray((int)m_columnMeta[i].AdditionalDataSize / 4); + PalletData[i] = reader.ReadArray((int)ColumnMeta[i].AdditionalDataSize / 4); } } // common data - m_commonData = new Dictionary[m_columnMeta.Length]; - for (int i = 0; i < m_columnMeta.Length; i++) + CommonData = new Dictionary[ColumnMeta.Length]; + for (int i = 0; i < ColumnMeta.Length; i++) { - if (m_columnMeta[i].CompressionType == CompressionType.Common) + if (ColumnMeta[i].CompressionType == CompressionType.Common) { - var commonValues = new Dictionary((int)m_columnMeta[i].AdditionalDataSize / 8); - m_commonData[i] = commonValues; + var commonValues = new Dictionary((int)ColumnMeta[i].AdditionalDataSize / 8); + CommonData[i] = commonValues; - for (int j = 0; j < m_columnMeta[i].AdditionalDataSize / 8; j++) + for (int j = 0; j < ColumnMeta[i].AdditionalDataSize / 8; j++) commonValues[reader.ReadInt32()] = reader.Read(); } } @@ -347,18 +347,18 @@ public WDC5Reader(Stream stream) if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data - recordsData = reader.ReadBytes(section.NumRecords * RecordSize); - - Array.Resize(ref recordsData, recordsData.Length + 8); // pad with extra zeros so we don't crash when reading + byte[] data = reader.ReadBytes(section.NumRecords * RecordSize); + Array.Resize(ref data, data.Length + 8); // pad with extra zeros so we don't crash when reading + RecordsData = data; // string data - if (m_stringsTable == null) - m_stringsTable = new Dictionary(section.StringTableSize / 0x20); + if (StringTable == null) + StringTable = new Dictionary(section.StringTableSize / 0x20); for (int i = 0; i < section.StringTableSize;) { long oldPos = reader.BaseStream.Position; - m_stringsTable[i + previousStringTableSize] = reader.ReadCString(); + StringTable[i + previousStringTableSize] = reader.ReadCString(); i += (int)(reader.BaseStream.Position - oldPos); } @@ -367,33 +367,33 @@ public WDC5Reader(Stream stream) else { // sparse data with inlined strings - recordsData = reader.ReadBytes(section.OffsetRecordsEndOffset - section.FileOffset); + RecordsData = reader.ReadBytes(section.OffsetRecordsEndOffset - section.FileOffset); if (reader.BaseStream.Position != section.OffsetRecordsEndOffset) throw new Exception("reader.BaseStream.Position != section.OffsetRecordsEndOffset"); } // skip encrypted sections => has tact key + record data is zero filled - if (section.TactKeyLookup != 0 && Array.TrueForAll(recordsData, x => x == 0)) + if (section.TactKeyLookup != 0 && Array.TrueForAll(RecordsData, x => x == 0)) { bool completelyZero = false; if (section.IndexDataSize > 0 || section.CopyTableCount > 0) { - // this will be the record id from m_indexData or m_copyData + // this will be the record id from IndexData or CopyData // if this is zero then the id for this record will be zero which is invalid completelyZero = reader.ReadInt32() == 0; reader.BaseStream.Position -= 4; } else if (section.OffsetMapIDCount > 0) { - // this will be the first m_sparseEntries entry + // this will be the first SparseEntries entry // confirm it's size is not zero otherwise it is invalid completelyZero = reader.Read().Size == 0; reader.BaseStream.Position -= 6; } else { - // there is no additional data and recordsData is already known to be zeroed + // there is no additional data and RecordsData is already known to be zeroed // therefore the record will have an id of zero which is invalid completelyZero = true; } @@ -405,17 +405,17 @@ public WDC5Reader(Stream stream) } } // index data - m_indexData = reader.ReadArray(section.IndexDataSize / 4); + IndexData = reader.ReadArray(section.IndexDataSize / 4); // fix zero-filled index data - if (m_indexData.Length > 0 && m_indexData.All(x => x == 0)) - m_indexData = Enumerable.Range(MinIndex + previousRecordCount, section.NumRecords).ToArray(); + if (IndexData.Length > 0 && IndexData.All(x => x == 0)) + IndexData = Enumerable.Range(MinIndex + previousRecordCount, section.NumRecords).ToArray(); // duplicate rows data if (section.CopyTableCount > 0) { - if (m_copyData == null) - m_copyData = new Dictionary(); + if (CopyData == null) + CopyData = new Dictionary(); for (int i = 0; i < section.CopyTableCount; i++) { @@ -423,7 +423,7 @@ public WDC5Reader(Stream stream) var sourceRowID = reader.ReadInt32(); if (destinationRowID != sourceRowID) { - m_copyData[destinationRowID] = sourceRowID; + CopyData[destinationRowID] = sourceRowID; } } } @@ -434,17 +434,17 @@ public WDC5Reader(Stream stream) if (TableHash == 145293629) reader.BaseStream.Position += 4 * section.OffsetMapIDCount; - m_sparseEntries = reader.ReadArray(section.OffsetMapIDCount).ToList(); + SparseEntries = reader.ReadArray(section.OffsetMapIDCount).ToList(); } if (section.OffsetMapIDCount > 0 && Flags.HasFlag(DB2Flags.SecondaryKey)) { int[] sparseIndexData = reader.ReadArray(section.OffsetMapIDCount); - if (section.IndexDataSize > 0 && m_indexData.Length != sparseIndexData.Length) - throw new Exception("m_indexData.Length != sparseIndexData.Length"); + if (section.IndexDataSize > 0 && IndexData.Length != sparseIndexData.Length) + throw new Exception("IndexData.Length != sparseIndexData.Length"); - m_indexData = sparseIndexData; + IndexData = sparseIndexData; } // reference data @@ -464,21 +464,21 @@ public WDC5Reader(Stream stream) { int[] sparseIndexData = reader.ReadArray(section.OffsetMapIDCount); - if (section.IndexDataSize > 0 && m_indexData.Length != sparseIndexData.Length) - throw new Exception("m_indexData.Length != sparseIndexData.Length"); + if (section.IndexDataSize > 0 && IndexData.Length != sparseIndexData.Length) + throw new Exception("IndexData.Length != sparseIndexData.Length"); - m_indexData = sparseIndexData; + IndexData = sparseIndexData; } int position = 0; for (int i = 0; i < section.NumRecords; i++) { - BitReader bitReader = new BitReader(recordsData) { Position = 0 }; + BitReader bitReader = new BitReader(RecordsData) { Position = 0 }; if (Flags.HasFlagExt(DB2Flags.Sparse)) { bitReader.Position = position; - position += m_sparseEntries[i].Size * 8; + position += SparseEntries[i].Size * 8; } else { @@ -488,11 +488,11 @@ public WDC5Reader(Stream stream) int refId; if (Flags.HasFlag(DB2Flags.SecondaryKey)) - refData.Entries.TryGetValue(m_indexData[i], out refId); + refData.Entries.TryGetValue(IndexData[i], out refId); else refData.Entries.TryGetValue(i, out refId); - IDBRow rec = new WDC4Row(this, bitReader, section.IndexDataSize != 0 ? m_indexData[i] : -1, refId, i + previousRecordCount); + IDBRow rec = new WDC4Row(this, bitReader, section.IndexDataSize != 0 ? IndexData[i] : -1, refId, i + previousRecordCount); _Records.Add(_Records.Count, rec); } @@ -501,4 +501,4 @@ public WDC5Reader(Stream stream) } } } -} +} \ No newline at end of file diff --git a/DBCD.IO/Storage.cs b/DBCD.IO/Storage.cs new file mode 100644 index 0000000..34538f7 --- /dev/null +++ b/DBCD.IO/Storage.cs @@ -0,0 +1,43 @@ +using System.Collections.Generic; +using System.IO; + +namespace DBCD.IO +{ + public class Storage : SortedDictionary where T : class, new() + { + private readonly DBParser parser; + + #region Header + + public string Identifier => parser.Identifier; + public int RecordsCount => parser.RecordsCount; + public int FieldsCount => parser.FieldsCount; + public int RecordSize => parser.RecordSize; + public uint TableHash => parser.TableHash; + public uint LayoutHash => parser.LayoutHash; + public int IdFieldIndex => parser.IdFieldIndex; + public DB2Flags Flags => parser.Flags; + + #endregion + + #region Constructors + public Storage(string fileName) : this(File.Open(fileName, FileMode.Open, FileAccess.Read, FileShare.Read)) { } + + public Storage(Stream stream) : this(new DBParser(stream)) => parser.ClearCache(); + + public Storage(DBParser dbParser) + { + parser = dbParser; + parser.PopulateRecords(this); + } + #endregion + + #region Methods + + public void Save(string fileName) => parser.WriteRecords(this, fileName); + + public void Save(Stream stream) => parser.WriteRecords(this, stream); + + #endregion + } +} \ No newline at end of file diff --git a/DBCD.IO/Writers/BaseWriter.cs b/DBCD.IO/Writers/BaseWriter.cs index 65c545a..1c1749c 100644 --- a/DBCD.IO/Writers/BaseWriter.cs +++ b/DBCD.IO/Writers/BaseWriter.cs @@ -1,30 +1,37 @@ using DBCD.IO.Common; using DBCD.IO.Readers; +using System; +using System.Collections.Concurrent; using System.Collections.Generic; +using System.Diagnostics; using System.IO; +using System.Linq; +using System.Text; +using System.Threading.Tasks; namespace DBCD.IO.Writers { abstract class BaseWriter where T : class { + private static readonly Value32Comparer Value32Comparer = new Value32Comparer(); + public FieldCache[] FieldCache { get; protected set; } public int RecordsCount { get; protected set; } - public int StringTableSize { get; protected set; } + public int StringTableSize { get; set; } public int FieldsCount { get; } - public int RecordSize { get; } + public int RecordSize { get; set; } public int IdFieldIndex { get; } public DB2Flags Flags { get; } + public int PackedDataOffset { get; set; } #region Data - public FieldMetaData[] Meta { get; protected set; } public ColumnMetaData[] ColumnMeta { get; protected set; } - public List[] PalletData { get; protected set; } + public OrderedHashSet[] PalletData { get; protected set; } public Dictionary[] CommonData { get; protected set; } public Dictionary StringTable { get; protected set; } public SortedDictionary CopyData { get; protected set; } public List ReferenceData { get; protected set; } - #endregion public BaseWriter(BaseReader reader) @@ -44,14 +51,13 @@ public BaseWriter(BaseReader reader) if (ColumnMeta != null) { CommonData = new Dictionary[ColumnMeta.Length]; - PalletData = new List[ColumnMeta.Length]; + PalletData = new OrderedHashSet[ColumnMeta.Length]; ReferenceData = new List(); - // create the lookup collections for (int i = 0; i < ColumnMeta.Length; i++) { CommonData[i] = new Dictionary(); - PalletData[i] = new List(); + PalletData[i] = new OrderedHashSet(Value32Comparer); } } @@ -69,7 +75,7 @@ public int InternString(string value) StringTable.Add(value, StringTableSize); int offset = StringTableSize; - StringTableSize += value.Length + 1; + StringTableSize += Encoding.UTF8.GetByteCount(value) + 1; return offset; } @@ -115,6 +121,135 @@ public void WriteSecondaryKeyData(BinaryWriter writer, IDictionary stora } } + public void HandleCompression(IDictionary storage) + { + var externalCompressions = new HashSet(new[] { CompressionType.None, CompressionType.Common }); + var valueComparer = new Value32Comparer(); + var indexFieldOffset = 0; + var bitpackedOffset = 0; + + RecordSize = 0; + PackedDataOffset = -1; + + for (int i = 0; i < FieldCache.Length; i++) + { + FieldCache info = FieldCache[i]; + + if (i == IdFieldIndex && Flags.HasFlagExt(DB2Flags.Index)) + { + indexFieldOffset++; + continue; + } + + int fieldIndex = i - indexFieldOffset; + + if (fieldIndex >= ColumnMeta.Length) + break; + + var meta = ColumnMeta[fieldIndex]; + var compressionType = meta.CompressionType; + int compressionSize = meta.Immediate.BitWidth; + + var newCompressedSize = compressionSize; + + var palletData = new ConcurrentBag(); + + if (!externalCompressions.Contains(compressionType)) + { + if (PackedDataOffset == -1) + PackedDataOffset = ((meta.RecordOffset + 8 - 1) / 8); + } + + switch (compressionType) + { + case CompressionType.SignedImmediate: + { + var largestMSB = storage.Values.Count switch + { + 0 => 0, + _ => storage.Values.AsParallel().Max(row => + { + var value32 = Value32.Create(info.Getter(row)); + return value32.GetValue().MostSignificantBit(); + }), + }; + + newCompressedSize = largestMSB + 1; + break; + } + case CompressionType.Immediate: + { + if(info.FieldType == typeof(float)) + newCompressedSize = 32; + else + { + var maxValue = storage.Values.Count switch + { + 0 => 0U, + _ => storage.Values.AsParallel().Max(row => + { + var value32 = Value32.Create(info.Getter(row)); + return value32.GetValue(); + }), + }; + + newCompressedSize = maxValue.MostSignificantBit(); + } + break; + } + case CompressionType.Pallet: + { + Parallel.ForEach(storage.Values, row => palletData.Add(new[] { Value32.Create(info.Getter(row)) })); + var fieldMaxSize = palletData.AsParallel().Distinct(valueComparer).Count(); + newCompressedSize = fieldMaxSize.MostSignificantBit(); + break; + } + case CompressionType.PalletArray: + { + Parallel.ForEach(storage.Values, row => + { + var baseArray = (Array)info.Getter(row); + Value32[] array = new Value32[baseArray.Length]; + for (int i = 0; i < baseArray.Length; i++) + array[i] = Value32.Create(baseArray.GetValue(i)); + palletData.Add(array); + }); + + var fieldMaxSize = palletData.AsParallel().Distinct(valueComparer).Count(); + newCompressedSize = fieldMaxSize.MostSignificantBit(); + break; + } + case CompressionType.Common: + ColumnMeta[fieldIndex].Size = 0; + break; + case CompressionType.None: + break; + default: + throw new NotImplementedException("This compression type is not yet supported"); + } + + if (!externalCompressions.Contains(compressionType)) + { + ColumnMeta[fieldIndex].Immediate.BitWidth = ColumnMeta[fieldIndex].Size = (ushort)(newCompressedSize); + ColumnMeta[fieldIndex].Immediate.BitOffset = bitpackedOffset; + ColumnMeta[fieldIndex].RecordOffset = (ushort) RecordSize; + + RecordSize += ColumnMeta[fieldIndex].Size; + + bitpackedOffset += ColumnMeta[fieldIndex].Immediate.BitWidth; + } + else + { + ColumnMeta[fieldIndex].RecordOffset = (ushort)RecordSize; + RecordSize += ColumnMeta[fieldIndex].Size; + } + + } + + PackedDataOffset = Math.Max(0, PackedDataOffset); + RecordSize = ((RecordSize + 8 - 1) / 8); + } + #endregion } } diff --git a/DBCD.IO/Writers/WDB2Writer.cs b/DBCD.IO/Writers/WDB2Writer.cs index 1ab5dd5..ff46ec3 100644 --- a/DBCD.IO/Writers/WDB2Writer.cs +++ b/DBCD.IO/Writers/WDB2Writer.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Text; namespace DBCD.IO.Writers { @@ -41,7 +40,7 @@ public void Serialize(int id, T row) if (info.IsArray) { - if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) { Array array = (Array)info.Getter(row); writer(bitWriter, m_writer, array); @@ -54,7 +53,7 @@ public void Serialize(int id, T row) } else { - if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) { object value = info.Getter(row); writer(bitWriter, m_writer, value); diff --git a/DBCD.IO/Writers/WDB3Writer.cs b/DBCD.IO/Writers/WDB3Writer.cs index 798d5a4..e50e4e7 100644 --- a/DBCD.IO/Writers/WDB3Writer.cs +++ b/DBCD.IO/Writers/WDB3Writer.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Text; namespace DBCD.IO.Writers { @@ -41,14 +40,14 @@ public void Serialize(int id, T row) if (info.IsArray) { - if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) writer(bitWriter, m_writer, (Array)info.Getter(row)); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { - if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) writer(bitWriter, m_writer, info.Getter(row)); else throw new Exception("Unhandled field type: " + typeof(T).Name); @@ -142,8 +141,8 @@ public WDB3Writer(WDB3Reader reader, IDictionary storage, Stream stream) using (var writer = new BinaryWriter(stream)) { - int minIndex = storage.Keys.Min(); - int maxIndex = storage.Keys.Max(); + int minIndex = storage.Keys.MinOrDefault(); + int maxIndex = storage.Keys.MaxOrDefault(); int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDB3FmtSig); diff --git a/DBCD.IO/Writers/WDB4Writer.cs b/DBCD.IO/Writers/WDB4Writer.cs index d7ecaa8..6fa99eb 100644 --- a/DBCD.IO/Writers/WDB4Writer.cs +++ b/DBCD.IO/Writers/WDB4Writer.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Text; namespace DBCD.IO.Writers { @@ -41,14 +40,14 @@ public void Serialize(int id, T row) if (info.IsArray) { - if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) writer(bitWriter, m_writer, (Array)info.Getter(row)); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { - if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) writer(bitWriter, m_writer, info.Getter(row)); else throw new Exception("Unhandled field type: " + typeof(T).Name); @@ -139,8 +138,8 @@ public WDB4Writer(WDB4Reader reader, IDictionary storage, Stream stream) using (var writer = new BinaryWriter(stream)) { - int minIndex = storage.Keys.Min(); - int maxIndex = storage.Keys.Max(); + int minIndex = storage.Keys.MinOrDefault(); + int maxIndex = storage.Keys.MaxOrDefault(); int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDB4FmtSig); diff --git a/DBCD.IO/Writers/WDB5Writer.cs b/DBCD.IO/Writers/WDB5Writer.cs index e96ba0c..30f33ed 100644 --- a/DBCD.IO/Writers/WDB5Writer.cs +++ b/DBCD.IO/Writers/WDB5Writer.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Text; namespace DBCD.IO.Writers { @@ -50,14 +49,14 @@ public void Serialize(int id, T row) if (info.IsArray) { - if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], (Array)info.Getter(row)); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { - if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], info.Getter(row)); else throw new Exception("Unhandled field type: " + typeof(T).Name); @@ -148,8 +147,8 @@ public WDB5Writer(WDB5Reader reader, IDictionary storage, Stream stream) using (var writer = new BinaryWriter(stream)) { - int minIndex = storage.Keys.Min(); - int maxIndex = storage.Keys.Max(); + int minIndex = storage.Keys.MinOrDefault(); + int maxIndex = storage.Keys.MaxOrDefault(); int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDB5FmtSig); @@ -166,12 +165,12 @@ public WDB5Writer(WDB5Reader reader, IDictionary storage, Stream stream) writer.Write((uint)Flags); writer.Write((ushort)IdFieldIndex); - if (storage.Count == 0) - return; - // field meta writer.WriteArray(Meta); + if (storage.Count == 0) + return; + // record data uint recordsOffset = (uint)writer.BaseStream.Position; foreach (var record in serializer.Records) diff --git a/DBCD.IO/Writers/WDB6Writer.cs b/DBCD.IO/Writers/WDB6Writer.cs index acff522..0f07cf8 100644 --- a/DBCD.IO/Writers/WDB6Writer.cs +++ b/DBCD.IO/Writers/WDB6Writer.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Text; namespace DBCD.IO.Writers { @@ -57,14 +56,14 @@ public void Serialize(int id, T row) if (info.IsArray) { - if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], (Array)info.Getter(row)); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { - if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], info.Getter(row)); else throw new Exception("Unhandled field type: " + typeof(T).Name); @@ -158,8 +157,8 @@ public WDB6Writer(WDB6Reader reader, IDictionary storage, Stream stream) using (var writer = new BinaryWriter(stream)) { - int minIndex = storage.Keys.Min(); - int maxIndex = storage.Keys.Max(); + int minIndex = storage.Keys.MinOrDefault(); + int maxIndex = storage.Keys.MaxOrDefault(); int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDB6FmtSig); @@ -178,13 +177,13 @@ public WDB6Writer(WDB6Reader reader, IDictionary storage, Stream stream) writer.Write(Meta.Length); // totalFieldCount writer.Write(0); // commonDataSize - if (storage.Count == 0) - return; - // field meta for (int i = 0; i < FieldsCount; i++) writer.Write(Meta[i]); + if (storage.Count == 0) + return; + // record data uint recordsOffset = (uint)writer.BaseStream.Position; foreach (var record in serializer.Records) diff --git a/DBCD.IO/Writers/WDBCWriter.cs b/DBCD.IO/Writers/WDBCWriter.cs index 1f70063..9073216 100644 --- a/DBCD.IO/Writers/WDBCWriter.cs +++ b/DBCD.IO/Writers/WDBCWriter.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Text; namespace DBCD.IO.Writers { @@ -37,14 +36,14 @@ public void Serialize(int id, T row) if (info.IsArray) { - if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) writer(bitWriter, m_writer, (Array)info.Getter(row)); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { - if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) writer(bitWriter, m_writer, info.Getter(row)); else throw new Exception("Unhandled field type: " + typeof(T).Name); diff --git a/DBCD.IO/Writers/WDC1Writer.cs b/DBCD.IO/Writers/WDC1Writer.cs index 0e5c96c..0f85b49 100644 --- a/DBCD.IO/Writers/WDC1Writer.cs +++ b/DBCD.IO/Writers/WDC1Writer.cs @@ -4,7 +4,6 @@ using System.Collections.Generic; using System.IO; using System.Linq; -using System.Text; namespace DBCD.IO.Writers { @@ -14,9 +13,9 @@ class WDC1RowSerializer : IDBRowSerializer where T : class private readonly BaseWriter m_writer; private readonly FieldMetaData[] m_fieldMeta; - private readonly ColumnMetaData[] m_columnMeta; - private readonly List[] m_palletData; - private readonly Dictionary[] m_commonData; + private readonly ColumnMetaData[] ColumnMeta; + private readonly OrderedHashSet[] PalletData; + private readonly Dictionary[] CommonData; private static readonly Value32Comparer Value32Comparer = new Value32Comparer(); @@ -25,9 +24,9 @@ public WDC1RowSerializer(BaseWriter writer) { m_writer = writer; m_fieldMeta = m_writer.Meta; - m_columnMeta = m_writer.ColumnMeta; - m_palletData = m_writer.PalletData; - m_commonData = m_writer.CommonData; + ColumnMeta = m_writer.ColumnMeta; + PalletData = m_writer.PalletData; + CommonData = m_writer.CommonData; Records = new Dictionary(); } @@ -56,24 +55,21 @@ public void Serialize(int id, T row) int fieldIndex = i - indexFieldOffSet; - // reference data field - if (fieldIndex >= m_writer.Meta.Length) - { + // relationship field, used for faster lookup on IDs + if (info.IsRelation) m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); - continue; - } if (info.IsArray) { - if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) - writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], (Array)info.Getter(row)); + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], (Array)info.Getter(row)); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { - if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) - writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], info.Getter(row)); + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) + writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], info.Getter(row)); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -100,8 +96,9 @@ public void GetCopyRows() } - private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> simpleWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> + private static Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, object>> simpleWriters = new Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, object>> { + [typeof(ulong)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), [typeof(long)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), [typeof(float)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), [typeof(int)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), @@ -119,7 +116,7 @@ public void GetCopyRows() } }; - private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> arrayWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> + private static Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, Array>> arrayWriters = new Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, Array>> { [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), [typeof(long[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), @@ -134,7 +131,7 @@ public void GetCopyRows() [typeof(string[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, (array as string[]).Select(x => writer.InternString(x)).ToArray()), }; - private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, object value) where TType : unmanaged + private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, OrderedHashSet palletData, Dictionary commonData, object value) where TType : unmanaged { switch (columnMeta.CompressionType) { @@ -160,9 +157,9 @@ private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fi } case CompressionType.Pallet: { - Value32[] array = new[] { Value32.Create((TType)value) }; + Value32[] array = new[] { Value32.Create(value) }; - int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + int palletIndex = palletData.IndexOf(array); if (palletIndex == -1) { palletIndex = palletData.Count; @@ -175,7 +172,7 @@ private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fi } } - private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, Array value) where TType : unmanaged + private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, OrderedHashSet palletData, Dictionary commonData, Array value) where TType : unmanaged { switch (columnMeta.CompressionType) { @@ -195,9 +192,9 @@ private static void WriteFieldValueArray(BitWriter r, FieldMetaData field // get data Value32[] array = new Value32[value.Length]; for (int i = 0; i < value.Length; i++) - array[i] = Value32.Create((TType)value.GetValue(i)); + array[i] = Value32.Create(value.GetValue(i)); - int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + int palletIndex = palletData.IndexOf(array); if (palletIndex == -1) { palletIndex = palletData.Count; @@ -221,9 +218,15 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) // always 2 empties StringTableSize++; + PackedDataOffset = reader.PackedDataOffset; + HandleCompression(storage); + WDC1RowSerializer serializer = new WDC1RowSerializer(this); serializer.Serialize(storage); - serializer.GetCopyRows(); + + // We write the copy rows if and only if it saves space and the table hasn't any reference rows. + if ((RecordSize) >= sizeof(int) * 2 && ReferenceData.Count == 0) + serializer.GetCopyRows(); RecordsCount = serializer.Records.Count - CopyData.Count; @@ -231,8 +234,8 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) using (var writer = new BinaryWriter(stream)) { - int minIndex = storage.Keys.Min(); - int maxIndex = storage.Keys.Max(); + int minIndex = storage.Keys.MinOrDefault(); + int maxIndex = storage.Keys.MaxOrDefault(); int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDC1FmtSig); @@ -250,21 +253,21 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) writer.Write((ushort)IdFieldIndex); writer.Write(FieldsCount); // totalFieldCount - writer.Write(reader.PackedDataOffset); - writer.Write(ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount + writer.Write(PackedDataOffset); + writer.Write(ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount writer.Write(0); // sparseTableOffset - writer.Write(RecordsCount * 4); // indexTableSize + writer.Write(Flags.HasFlagExt(DB2Flags.Index) ? RecordsCount * 4 : 0); // IndexDataSize writer.Write(ColumnMeta.Length * 24); // ColumnMetaDataSize writer.Write(commonDataSize); writer.Write(palletDataSize); writer.Write(referenceDataSize); - if (storage.Count == 0) - return; - // field meta writer.WriteArray(Meta); + if (storage.Count == 0) + return; + // record data uint recordsOffset = (uint)writer.BaseStream.Position; foreach (var record in serializer.Records) @@ -298,7 +301,7 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) // copy table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { - foreach (var copyRecord in CopyData) + foreach (var copyRecord in CopyData.OrderBy(r => r.Value)) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); diff --git a/DBCD.IO/Writers/WDC2Writer.cs b/DBCD.IO/Writers/WDC2Writer.cs index 5128156..87df27b 100644 --- a/DBCD.IO/Writers/WDC2Writer.cs +++ b/DBCD.IO/Writers/WDC2Writer.cs @@ -5,7 +5,6 @@ using System.IO; using System.Linq; using System.Runtime.CompilerServices; -using System.Text; namespace DBCD.IO.Writers { @@ -15,9 +14,9 @@ class WDC2RowSerializer : IDBRowSerializer where T : class private readonly BaseWriter m_writer; private readonly FieldMetaData[] m_fieldMeta; - private readonly ColumnMetaData[] m_columnMeta; - private readonly List[] m_palletData; - private readonly Dictionary[] m_commonData; + private readonly ColumnMetaData[] ColumnMeta; + private readonly OrderedHashSet[] PalletData; + private readonly Dictionary[] CommonData; private static readonly Value32Comparer Value32Comparer = new Value32Comparer(); @@ -26,9 +25,9 @@ public WDC2RowSerializer(BaseWriter writer) { m_writer = writer; m_fieldMeta = m_writer.Meta; - m_columnMeta = m_writer.ColumnMeta; - m_palletData = m_writer.PalletData; - m_commonData = m_writer.CommonData; + ColumnMeta = m_writer.ColumnMeta; + PalletData = m_writer.PalletData; + CommonData = m_writer.CommonData; Records = new Dictionary(); } @@ -57,24 +56,21 @@ public void Serialize(int id, T row) int fieldIndex = i - indexFieldOffSet; - // reference data field - if (fieldIndex >= m_writer.Meta.Length) - { + // relationship field, used for faster lookup on IDs + if (info.IsRelation) m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); - continue; - } if (info.IsArray) { - if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) - writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], (Array)info.Getter(row)); + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], (Array)info.Getter(row)); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { - if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) - writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], info.Getter(row)); + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) + writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], info.Getter(row)); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -134,7 +130,7 @@ public void UpdateStringOffsets(IDictionary rows) int index = fieldInfo.Key; var info = fieldInfo.Value; - var columnMeta = m_columnMeta[index]; + var columnMeta = ColumnMeta[index]; if (columnMeta.CompressionType != CompressionType.None) throw new Exception("CompressionType != CompressionType.None"); @@ -147,7 +143,7 @@ public void UpdateStringOffsets(IDictionary rows) var array = (string[])info.Getter(rows[record.Key]); for (int i = 0; i < array.Length; i++) { - fieldOffset = m_writer.StringTable[array[i]] + recordOffset - (columnMeta.RecordOffset / 8 * i); + fieldOffset = m_writer.StringTable[array[i]] + (recordOffset) - (sizeof(int) * i) - (columnMeta.RecordOffset / 8); record.Value.Write(fieldOffset, bitSize, columnMeta.RecordOffset + (i * bitSize)); } } @@ -163,8 +159,9 @@ public void UpdateStringOffsets(IDictionary rows) } - private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> simpleWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> + private static Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, object>> simpleWriters = new Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, object>> { + [typeof(ulong)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), [typeof(long)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), [typeof(float)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), [typeof(int)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), @@ -182,7 +179,7 @@ public void UpdateStringOffsets(IDictionary rows) } }; - private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> arrayWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> + private static Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, Array>> arrayWriters = new Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, Array>> { [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), [typeof(long[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), @@ -197,7 +194,7 @@ public void UpdateStringOffsets(IDictionary rows) [typeof(string[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, (array as string[]).Select(x => writer.InternString(x)).ToArray()), }; - private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, object value) where TType : unmanaged + private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, OrderedHashSet palletData, Dictionary commonData, object value) where TType : unmanaged { switch (columnMeta.CompressionType) { @@ -224,9 +221,9 @@ private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fi } case CompressionType.Pallet: { - Value32[] array = new[] { Value32.Create((TType)value) }; + Value32[] array = new[] { Value32.Create(value) }; - int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + int palletIndex = palletData.IndexOf(array); if (palletIndex == -1) { palletIndex = palletData.Count; @@ -239,7 +236,7 @@ private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fi } } - private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, Array value) where TType : unmanaged + private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, OrderedHashSet palletData, Dictionary commonData, Array value) where TType : unmanaged { switch (columnMeta.CompressionType) { @@ -259,9 +256,9 @@ private static void WriteFieldValueArray(BitWriter r, FieldMetaData field // get data Value32[] array = new Value32[value.Length]; for (int i = 0; i < value.Length; i++) - array[i] = Value32.Create((TType)value.GetValue(i)); + array[i] = Value32.Create(value.GetValue(i)); - int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + int palletIndex = palletData.IndexOf(array); if (palletIndex == -1) { palletIndex = palletData.Count; @@ -284,9 +281,16 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) // always 2 empties StringTableSize++; + PackedDataOffset = reader.PackedDataOffset; + HandleCompression(storage); + WDC2RowSerializer serializer = new WDC2RowSerializer(this); serializer.Serialize(storage); - serializer.GetCopyRows(); + + // We write the copy rows if and only if it saves space and the table hasn't any reference rows. + if ((RecordSize) >= sizeof(int) * 2 && ReferenceData.Count == 0) + serializer.GetCopyRows(); + serializer.UpdateStringOffsets(storage); RecordsCount = serializer.Records.Count - CopyData.Count; @@ -295,8 +299,8 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) using (var writer = new BinaryWriter(stream)) { - int minIndex = storage.Keys.Min(); - int maxIndex = storage.Keys.Max(); + int minIndex = storage.Keys.MinOrDefault(); + int maxIndex = storage.Keys.MaxOrDefault(); int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(reader.Signature); @@ -313,7 +317,7 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) writer.Write((ushort)IdFieldIndex); writer.Write(FieldsCount); // totalFieldCount - writer.Write(reader.PackedDataOffset); + writer.Write(PackedDataOffset); writer.Write(ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount writer.Write(ColumnMeta.Length * 24); // ColumnMetaDataSize writer.Write(commonDataSize); @@ -332,7 +336,7 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) writer.Write(StringTableSize); writer.Write(copyTableSize); writer.Write(0); // sparseTableOffset - writer.Write(RecordsCount * 4); // indexTableSize + writer.Write(Flags.HasFlagExt(DB2Flags.Index) ? RecordsCount * 4 : 0); // IndexDataSize writer.Write(referenceDataSize); // field meta @@ -397,7 +401,7 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) // copy table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { - foreach (var copyRecord in CopyData) + foreach (var copyRecord in CopyData.OrderBy(r => r.Value)) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); diff --git a/DBCD.IO/Writers/WDC3Writer.cs b/DBCD.IO/Writers/WDC3Writer.cs index 520e35c..e611f33 100644 --- a/DBCD.IO/Writers/WDC3Writer.cs +++ b/DBCD.IO/Writers/WDC3Writer.cs @@ -5,7 +5,6 @@ using System.IO; using System.Linq; using System.Runtime.CompilerServices; -using System.Text; namespace DBCD.IO.Writers { @@ -15,20 +14,17 @@ class WDC3RowSerializer : IDBRowSerializer where T : class private readonly BaseWriter m_writer; private readonly FieldMetaData[] m_fieldMeta; - private readonly ColumnMetaData[] m_columnMeta; - private readonly List[] m_palletData; - private readonly Dictionary[] m_commonData; - - private static readonly Value32Comparer Value32Comparer = new Value32Comparer(); - + private readonly ColumnMetaData[] ColumnMeta; + private readonly OrderedHashSet[] PalletData; + private readonly Dictionary[] CommonData; public WDC3RowSerializer(BaseWriter writer) { m_writer = writer; m_fieldMeta = m_writer.Meta; - m_columnMeta = m_writer.ColumnMeta; - m_palletData = m_writer.PalletData; - m_commonData = m_writer.CommonData; + ColumnMeta = m_writer.ColumnMeta; + PalletData = m_writer.PalletData; + CommonData = m_writer.CommonData; Records = new Dictionary(); } @@ -63,17 +59,21 @@ public void Serialize(int id, T row) continue; } + // relationship field, used for faster lookup on IDs + if (info.IsRelation) + m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); + if (info.IsArray) { - if (arrayWriters.TryGetValue(info.Field.FieldType, out var writer)) - writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], (Array)info.Getter(row)); + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], (Array)info.Getter(row)); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { - if (simpleWriters.TryGetValue(info.Field.FieldType, out var writer)) - writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], m_columnMeta[fieldIndex], m_palletData[fieldIndex], m_commonData[fieldIndex], info.Getter(row)); + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) + writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], info.Getter(row)); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -90,7 +90,7 @@ public void Serialize(int id, T row) public void GetCopyRows() { - var copydata = Records.GroupBy(x => x.Value).Where(x => x.Count() > 1).ToArray(); + var copydata = Records.GroupBy(x => x.Value).Where(x => x.Count() > 1); foreach (var copygroup in copydata) { int key = copygroup.First().Key; @@ -121,7 +121,6 @@ public void UpdateStringOffsets(IDictionary rows) int recordOffset = (Records.Count - m_writer.CopyData.Count) * m_writer.RecordSize; int fieldOffset = 0; - foreach (var record in Records) { // skip copy records @@ -133,7 +132,7 @@ public void UpdateStringOffsets(IDictionary rows) int index = fieldInfo.Key; var info = fieldInfo.Value; - var columnMeta = m_columnMeta[index]; + var columnMeta = ColumnMeta[index]; if (columnMeta.CompressionType != CompressionType.None) throw new Exception("CompressionType != CompressionType.None"); @@ -146,7 +145,7 @@ public void UpdateStringOffsets(IDictionary rows) var array = (string[])info.Getter(rows[record.Key]); for (int i = 0; i < array.Length; i++) { - fieldOffset = m_writer.StringTable[array[i]] + recordOffset - (columnMeta.RecordOffset / 8 * i); + fieldOffset = m_writer.StringTable[array[i]] + (recordOffset) - (sizeof(int) * i) - (columnMeta.RecordOffset / 8); record.Value.Write(fieldOffset, bitSize, columnMeta.RecordOffset + (i * bitSize)); } } @@ -162,8 +161,9 @@ public void UpdateStringOffsets(IDictionary rows) } - private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> simpleWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, object>> + private static Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, object>> simpleWriters = new Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, object>> { + [typeof(ulong)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), [typeof(long)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), [typeof(float)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), [typeof(int)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), @@ -181,7 +181,7 @@ public void UpdateStringOffsets(IDictionary rows) } }; - private static Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> arrayWriters = new Dictionary, FieldMetaData, ColumnMetaData, List, Dictionary, Array>> + private static Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, Array>> arrayWriters = new Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, Array>> { [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), [typeof(long[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), @@ -196,7 +196,7 @@ public void UpdateStringOffsets(IDictionary rows) [typeof(string[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, (array as string[]).Select(x => writer.InternString(x)).ToArray()), }; - private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, object value) where TType : unmanaged + private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, OrderedHashSet palletData, Dictionary commonData, object value) where TType : unmanaged { switch (columnMeta.CompressionType) { @@ -223,9 +223,8 @@ private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fi } case CompressionType.Pallet: { - Value32[] array = new[] { Value32.Create((TType)value) }; - - int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + Value32[] array = new[] { Value32.Create(value) }; + int palletIndex = palletData.IndexOf(array); if (palletIndex == -1) { palletIndex = palletData.Count; @@ -238,7 +237,7 @@ private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fi } } - private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, List palletData, Dictionary commonData, Array value) where TType : unmanaged + private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, OrderedHashSet palletData, Dictionary commonData, Array value) where TType : unmanaged { switch (columnMeta.CompressionType) { @@ -258,9 +257,9 @@ private static void WriteFieldValueArray(BitWriter r, FieldMetaData field // get data Value32[] array = new Value32[value.Length]; for (int i = 0; i < value.Length; i++) - array[i] = Value32.Create((TType)value.GetValue(i)); + array[i] = Value32.Create(value.GetValue(i)); - int palletIndex = palletData.FindIndex(x => Value32Comparer.Equals(array, x)); + int palletIndex = palletData.IndexOf(array); if (palletIndex == -1) { palletIndex = palletData.Count; @@ -284,9 +283,16 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) // always 2 empties StringTableSize++; + PackedDataOffset = reader.PackedDataOffset; + HandleCompression(storage); + WDC3RowSerializer serializer = new WDC3RowSerializer(this); serializer.Serialize(storage); - serializer.GetCopyRows(); + + // We write the copy rows if and only if it saves space and the table hasn't any reference rows. + if ((RecordSize) >= sizeof(int) * 2 && ReferenceData.Count == 0) + serializer.GetCopyRows(); + serializer.UpdateStringOffsets(storage); RecordsCount = serializer.Records.Count - CopyData.Count; @@ -295,14 +301,14 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) using (var writer = new BinaryWriter(stream)) { - int minIndex = storage.Keys.Min(); - int maxIndex = storage.Keys.Max(); + int minIndex = storage.Keys.MinOrDefault(); + int maxIndex = storage.Keys.MaxOrDefault(); writer.Write(WDC3FmtSig); writer.Write(RecordsCount); writer.Write(FieldsCount); writer.Write(RecordSize); - writer.Write(StringTableSize); + writer.Write(storage.Count != 0 ? StringTableSize : 0); writer.Write(reader.TableHash); writer.Write(reader.LayoutHash); writer.Write(minIndex); @@ -311,29 +317,33 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) writer.Write((ushort)Flags); writer.Write((ushort)IdFieldIndex); - writer.Write(FieldsCount); // totalFieldCount - writer.Write(reader.PackedDataOffset); - writer.Write(ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount - writer.Write(ColumnMeta.Length * 24); // ColumnMetaDataSize - writer.Write(commonDataSize); - writer.Write(palletDataSize); - writer.Write(1); // sections count + writer.Write(FieldsCount); // totalFieldCount + writer.Write(storage.Count != 0 ? PackedDataOffset : 0); + writer.Write(storage.Count != 0 ? (ReferenceData.Count > 0 ? 1 : 0) : 0); // RelationshipColumnCount + writer.Write(storage.Count != 0 ? ColumnMeta.Length * 24 : 0); // ColumnMetaDataSize + writer.Write(storage.Count != 0 ? commonDataSize : 0); + writer.Write(storage.Count != 0 ? palletDataSize : 0); + writer.Write(storage.Count != 0 ? 1 : 0); // sections count if (storage.Count == 0) + { + // only need to write field structure if empty + writer.WriteArray(Meta); return; + } // section header int fileOffset = HeaderSize + (Meta.Length * 4) + (ColumnMeta.Length * 24) + Unsafe.SizeOf() + palletDataSize + commonDataSize; - writer.Write(0UL); // TactKeyLookup - writer.Write(fileOffset); // FileOffset - writer.Write(RecordsCount); // NumRecords + writer.Write(0UL); // TactKeyLookup + writer.Write(fileOffset); // FileOffset + writer.Write(RecordsCount); // NumRecords writer.Write(StringTableSize); - writer.Write(0); // OffsetRecordsEndOffset - writer.Write(RecordsCount * 4); // IndexDataSize - writer.Write(referenceDataSize); // ParentLookupDataSize + writer.Write(0); // OffsetRecordsEndOffset + writer.Write(Flags.HasFlagExt(DB2Flags.Index) ? RecordsCount * 4 : 0); // IndexDataSize + writer.Write(referenceDataSize); // ParentLookupDataSize writer.Write(Flags.HasFlagExt(DB2Flags.Sparse) ? RecordsCount : 0); // OffsetMapIDCount - writer.Write(CopyData.Count); // CopyTableCount + writer.Write(CopyData.Count); // CopyTableCount // field meta writer.WriteArray(Meta); @@ -365,12 +375,12 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) } // record data - var m_sparseEntries = new Dictionary(storage.Count); + var SparseEntries = new Dictionary(storage.Count); foreach (var record in serializer.Records) { if (!CopyData.TryGetValue(record.Key, out int parent)) { - m_sparseEntries.Add(record.Key, new SparseEntry() + SparseEntries.Add(record.Key, new SparseEntry() { Offset = (uint)writer.BaseStream.Position, Size = (ushort)record.Value.TotalBytesWrittenOut @@ -401,8 +411,8 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) if (Flags.HasFlagExt(DB2Flags.Index)) writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); - // copy table - foreach (var copyRecord in CopyData) + // copy table (must be ordered by value) + foreach (var copyRecord in CopyData.OrderBy(r => r.Value)) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); @@ -410,7 +420,7 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) // sparse data if (Flags.HasFlagExt(DB2Flags.Sparse)) - writer.WriteArray(m_sparseEntries.Values.ToArray()); + writer.WriteArray(SparseEntries.Values.ToArray()); // reference data if (ReferenceData.Count > 0) @@ -426,9 +436,9 @@ public WDC3Writer(WDC3Reader reader, IDictionary storage, Stream stream) } } - // sparse data idss + // sparse data ids if (Flags.HasFlagExt(DB2Flags.Sparse)) - writer.WriteArray(m_sparseEntries.Keys.ToArray()); + writer.WriteArray(SparseEntries.Keys.ToArray()); } } diff --git a/DBCD.IO/Writers/WDC4Writer.cs b/DBCD.IO/Writers/WDC4Writer.cs new file mode 100644 index 0000000..0afe6e6 --- /dev/null +++ b/DBCD.IO/Writers/WDC4Writer.cs @@ -0,0 +1,477 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Runtime.CompilerServices; + +namespace DBCD.IO.Writers +{ + class WDC4RowSerializer : IDBRowSerializer where T : class + { + public IDictionary Records { get; private set; } + + private readonly BaseWriter m_writer; + private readonly FieldMetaData[] m_fieldMeta; + private readonly ColumnMetaData[] ColumnMeta; + private readonly OrderedHashSet[] PalletData; + private readonly Dictionary[] CommonData; + + public WDC4RowSerializer(BaseWriter writer) + { + m_writer = writer; + m_fieldMeta = m_writer.Meta; + ColumnMeta = m_writer.ColumnMeta; + PalletData = m_writer.PalletData; + CommonData = m_writer.CommonData; + + Records = new Dictionary(); + } + + public void Serialize(IDictionary rows) + { + foreach (var row in rows) + Serialize(row.Key, row.Value); + } + + public void Serialize(int id, T row) + { + BitWriter bitWriter = new BitWriter(m_writer.RecordSize); + + int indexFieldOffSet = 0; + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + FieldCache info = m_writer.FieldCache[i]; + + if (i == m_writer.IdFieldIndex && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + { + indexFieldOffSet++; + continue; + } + + int fieldIndex = i - indexFieldOffSet; + + // reference data field + if (fieldIndex >= m_writer.Meta.Length) + { + m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); + continue; + } + + // relationship field, used for faster lookup on IDs + if (info.IsRelation) + m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); + + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } + else + { + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) + writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } + } + + // pad to record size + if (!m_writer.Flags.HasFlagExt(DB2Flags.Sparse)) + bitWriter.Resize(m_writer.RecordSize); + else + bitWriter.ResizeToMultiple(4); + + Records[id] = bitWriter; + } + + public void GetCopyRows() + { + var copydata = Records.GroupBy(x => x.Value).Where(x => x.Count() > 1); + foreach (var copygroup in copydata) + { + int key = copygroup.First().Key; + foreach (var copy in copygroup.Skip(1)) + m_writer.CopyData[copy.Key] = key; + } + } + + public void UpdateStringOffsets(IDictionary rows) + { + if (m_writer.Flags.HasFlagExt(DB2Flags.Sparse) || m_writer.StringTableSize <= 1) + return; + + int indexFieldOffSet = 0; + var fieldInfos = new Dictionary>(); + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + if (i == m_writer.IdFieldIndex && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + indexFieldOffSet++; + else if (m_writer.FieldCache[i].Field.FieldType == typeof(string)) + fieldInfos[i - indexFieldOffSet] = m_writer.FieldCache[i]; + else if (m_writer.FieldCache[i].Field.FieldType == typeof(string[])) + fieldInfos[i - indexFieldOffSet] = m_writer.FieldCache[i]; + } + + if (fieldInfos.Count == 0) + return; + + int recordOffset = (Records.Count - m_writer.CopyData.Count) * m_writer.RecordSize; + int fieldOffset = 0; + foreach (var record in Records) + { + // skip copy records + if (m_writer.CopyData.ContainsKey(record.Key)) + continue; + + foreach (var fieldInfo in fieldInfos) + { + int index = fieldInfo.Key; + var info = fieldInfo.Value; + + var columnMeta = ColumnMeta[index]; + if (columnMeta.CompressionType != CompressionType.None) + throw new Exception("CompressionType != CompressionType.None"); + + int bitSize = 32 - m_fieldMeta[index].Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + if (info.IsArray) + { + var array = (string[])info.Getter(rows[record.Key]); + for (int i = 0; i < array.Length; i++) + { + fieldOffset = m_writer.StringTable[array[i]] + (recordOffset) - (sizeof(int) * i) - (columnMeta.RecordOffset / 8); + record.Value.Write(fieldOffset, bitSize, columnMeta.RecordOffset + (i * bitSize)); + } + } + else + { + fieldOffset = m_writer.StringTable[(string)info.Getter(rows[record.Key])] + recordOffset - (columnMeta.RecordOffset / 8); + record.Value.Write(fieldOffset, bitSize, columnMeta.RecordOffset); + } + } + + recordOffset -= m_writer.RecordSize; + } + } + + + private static Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, object>> simpleWriters = new Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, object>> + { + [typeof(ulong)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(long)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(float)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(int)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(uint)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(short)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(ushort)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(sbyte)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(byte)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(string)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => + { + if (writer.Flags.HasFlagExt(DB2Flags.Sparse)) + data.WriteCString((string)value); + else + WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, writer.InternString((string)value)); + } + }; + + private static Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, Array>> arrayWriters = new Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, Array>> + { + [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(long[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(float[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(int[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(uint[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(ushort[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(short[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(byte[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(sbyte[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(string[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, (array as string[]).Select(x => writer.InternString(x)).ToArray()), + }; + + private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, OrderedHashSet palletData, Dictionary commonData, object value) where TType : unmanaged + { + switch (columnMeta.CompressionType) + { + case CompressionType.None: + { + int bitSize = 32 - fieldMeta.Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + r.Write((TType)value, bitSize); + break; + } + case CompressionType.Immediate: + case CompressionType.SignedImmediate: + { + r.Write((TType)value, columnMeta.Immediate.BitWidth); + break; + } + case CompressionType.Common: + { + if (!columnMeta.Common.DefaultValue.GetValue().Equals(value)) + commonData.Add(Id, Value32.Create((TType)value)); + break; + } + case CompressionType.Pallet: + { + Value32[] array = new[] { Value32.Create(value) }; + int palletIndex = palletData.IndexOf(array); + if (palletIndex == -1) + { + palletIndex = palletData.Count; + palletData.Add(array); + } + + r.Write(palletIndex, columnMeta.Pallet.BitWidth); + break; + } + } + } + + private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, OrderedHashSet palletData, Dictionary commonData, Array value) where TType : unmanaged + { + switch (columnMeta.CompressionType) + { + case CompressionType.None: + { + int bitSize = 32 - fieldMeta.Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + for (int i = 0; i < value.Length; i++) + r.Write((TType)value.GetValue(i), bitSize); + + break; + } + case CompressionType.PalletArray: + { + // get data + Value32[] array = new Value32[value.Length]; + for (int i = 0; i < value.Length; i++) + array[i] = Value32.Create(value.GetValue(i)); + + int palletIndex = palletData.IndexOf(array); + if (palletIndex == -1) + { + palletIndex = palletData.Count; + palletData.Add(array); + } + + r.Write(palletIndex, columnMeta.Pallet.BitWidth); + break; + } + } + } + } + + class WDC4Writer : BaseWriter where T : class + { + private const int HeaderSize = 72; + private const uint WDC4FmtSig = 0x34434457; // WDC4 + + public WDC4Writer(WDC4Reader reader, IDictionary storage, Stream stream) : base(reader) + { + // always 2 empties + StringTableSize++; + + PackedDataOffset = reader.PackedDataOffset; + HandleCompression(storage); + + WDC4RowSerializer serializer = new WDC4RowSerializer(this); + serializer.Serialize(storage); + + // We write the copy rows if and only if it saves space and the table hasn't any reference rows. + if ((RecordSize) >= sizeof(int) * 2 && ReferenceData.Count == 0) + serializer.GetCopyRows(); + + serializer.UpdateStringOffsets(storage); + + RecordsCount = serializer.Records.Count - CopyData.Count; + + var (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); + + using (var writer = new BinaryWriter(stream)) + { + int minIndex = storage.Keys.MinOrDefault(); + int maxIndex = storage.Keys.MaxOrDefault(); + + writer.Write(WDC4FmtSig); + writer.Write(RecordsCount); + writer.Write(FieldsCount); + writer.Write(RecordSize); + writer.Write(storage.Count != 0 ? StringTableSize : 0); + writer.Write(reader.TableHash); + writer.Write(reader.LayoutHash); + writer.Write(minIndex); + writer.Write(maxIndex); + writer.Write(reader.Locale); + writer.Write((ushort)Flags); + writer.Write((ushort)IdFieldIndex); + + writer.Write(FieldsCount); // totalFieldCount + writer.Write(storage.Count != 0 ? PackedDataOffset : 0); + writer.Write(storage.Count != 0 ? (ReferenceData.Count > 0 ? 1 : 0) : 0); // RelationshipColumnCount + writer.Write(storage.Count != 0 ? ColumnMeta.Length * 24 : 0); // ColumnMetaDataSize + writer.Write(storage.Count != 0 ? commonDataSize : 0); + writer.Write(storage.Count != 0 ? palletDataSize : 0); + writer.Write(storage.Count != 0 ? 1 : 0); // sections count + + if (storage.Count == 0) + { + // only need to write field structure if empty + writer.WriteArray(Meta); + return; + } + + // section header + int fileOffset = HeaderSize + (Meta.Length * 4) + (ColumnMeta.Length * 24) + Unsafe.SizeOf() + palletDataSize + commonDataSize; + + writer.Write(0UL); // TactKeyLookup + writer.Write(fileOffset); // FileOffset + writer.Write(RecordsCount); // NumRecords + writer.Write(StringTableSize); + writer.Write(0); // OffsetRecordsEndOffset + writer.Write(Flags.HasFlagExt(DB2Flags.Index) ? RecordsCount * 4 : 0); // IndexDataSize + writer.Write(referenceDataSize); // ParentLookupDataSize + writer.Write(Flags.HasFlagExt(DB2Flags.Sparse) ? RecordsCount : 0); // OffsetMapIDCount + writer.Write(CopyData.Count); // CopyTableCount + + // field meta + writer.WriteArray(Meta); + + // column meta data + writer.WriteArray(ColumnMeta); + + // pallet data + for (int i = 0; i < ColumnMeta.Length; i++) + { + if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) + { + foreach (var palletData in PalletData[i]) + writer.WriteArray(palletData); + } + } + + // common data + for (int i = 0; i < ColumnMeta.Length; i++) + { + if (ColumnMeta[i].CompressionType == CompressionType.Common) + { + foreach (var commondata in CommonData[i]) + { + writer.Write(commondata.Key); + writer.Write(commondata.Value.GetValue()); + } + } + } + + // no need for encrypted_status since we're just writing one section with tact_key_hash == 0 + + // record data + var SparseEntries = new Dictionary(storage.Count); + foreach (var record in serializer.Records) + { + if (!CopyData.TryGetValue(record.Key, out int parent)) + { + SparseEntries.Add(record.Key, new SparseEntry() + { + Offset = (uint)writer.BaseStream.Position, + Size = (ushort)record.Value.TotalBytesWrittenOut + }); + + record.Value.CopyTo(writer.BaseStream); + } + } + + // string table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + writer.WriteCString(""); + foreach (var str in StringTable) + writer.WriteCString(str.Key); + } + + // set the OffsetRecordsEndOffset + if (Flags.HasFlagExt(DB2Flags.Sparse)) + { + long oldPos = writer.BaseStream.Position; + writer.BaseStream.Position = 92; + writer.Write((uint)oldPos); + writer.BaseStream.Position = oldPos; + } + + // index table + if (Flags.HasFlagExt(DB2Flags.Index)) + writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); + + // copy table (must be ordered by value) + foreach (var copyRecord in CopyData.OrderBy(r => r.Value)) + { + writer.Write(copyRecord.Key); + writer.Write(copyRecord.Value); + } + + // sparse data + if (Flags.HasFlagExt(DB2Flags.Sparse)) + writer.WriteArray(SparseEntries.Values.ToArray()); + + // reference data + if (ReferenceData.Count > 0) + { + writer.Write(ReferenceData.Count); + writer.Write(ReferenceData.Min()); + writer.Write(ReferenceData.Max()); + + for (int i = 0; i < ReferenceData.Count; i++) + { + writer.Write(ReferenceData[i]); + writer.Write(i); + } + } + + // sparse data ids + if (Flags.HasFlagExt(DB2Flags.Sparse)) + writer.WriteArray(SparseEntries.Keys.ToArray()); + } + } + + private (int CommonDataSize, int PalletDataSize, int RefDataSize) GetDataSizes() + { + // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords] + int refSize = 0; + if (ReferenceData.Count > 0) + refSize = 12 + (ReferenceData.Count * 8); + + int commonSize = 0, palletSize = 0; + for (int i = 0; i < ColumnMeta.Length; i++) + { + switch (ColumnMeta[i].CompressionType) + { + // {uint id, uint copyid}[] + case CompressionType.Common: + ColumnMeta[i].AdditionalDataSize = (uint)(CommonData[i].Count * 8); + commonSize += (int)ColumnMeta[i].AdditionalDataSize; + break; + + // {uint values[Cardinality]}[] + case CompressionType.Pallet: + case CompressionType.PalletArray: + ColumnMeta[i].AdditionalDataSize = (uint)PalletData[i].Sum(x => x.Length * 4); + palletSize += (int)ColumnMeta[i].AdditionalDataSize; + break; + } + } + + return (commonSize, palletSize, refSize); + } + } +} diff --git a/DBCD.IO/Writers/WDC5Writer.cs b/DBCD.IO/Writers/WDC5Writer.cs new file mode 100644 index 0000000..fe88421 --- /dev/null +++ b/DBCD.IO/Writers/WDC5Writer.cs @@ -0,0 +1,483 @@ +using DBCD.IO.Common; +using DBCD.IO.Readers; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text; + +namespace DBCD.IO.Writers +{ + class WDC5RowSerializer : IDBRowSerializer where T : class + { + public IDictionary Records { get; private set; } + + private readonly BaseWriter m_writer; + private readonly FieldMetaData[] m_fieldMeta; + private readonly ColumnMetaData[] ColumnMeta; + private readonly OrderedHashSet[] PalletData; + private readonly Dictionary[] CommonData; + + public WDC5RowSerializer(BaseWriter writer) + { + m_writer = writer; + m_fieldMeta = m_writer.Meta; + ColumnMeta = m_writer.ColumnMeta; + PalletData = m_writer.PalletData; + CommonData = m_writer.CommonData; + + Records = new Dictionary(); + } + + public void Serialize(IDictionary rows) + { + foreach (var row in rows) + Serialize(row.Key, row.Value); + } + + public void Serialize(int id, T row) + { + BitWriter bitWriter = new BitWriter(m_writer.RecordSize); + + int indexFieldOffSet = 0; + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + FieldCache info = m_writer.FieldCache[i]; + + if (i == m_writer.IdFieldIndex && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + { + indexFieldOffSet++; + continue; + } + + int fieldIndex = i - indexFieldOffSet; + + // reference data field + if (fieldIndex >= m_writer.Meta.Length) + { + m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); + continue; + } + + // relationship field, used for faster lookup on IDs + if (info.IsRelation) + m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); + + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } + else + { + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) + writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } + } + + // pad to record size + if (!m_writer.Flags.HasFlagExt(DB2Flags.Sparse)) + bitWriter.Resize(m_writer.RecordSize); + else + bitWriter.ResizeToMultiple(4); + + Records[id] = bitWriter; + } + + public void GetCopyRows() + { + var copydata = Records.GroupBy(x => x.Value).Where(x => x.Count() > 1); + foreach (var copygroup in copydata) + { + int key = copygroup.First().Key; + foreach (var copy in copygroup.Skip(1)) + m_writer.CopyData[copy.Key] = key; + } + } + + public void UpdateStringOffsets(IDictionary rows) + { + if (m_writer.Flags.HasFlagExt(DB2Flags.Sparse) || m_writer.StringTableSize <= 1) + return; + + int indexFieldOffSet = 0; + var fieldInfos = new Dictionary>(); + for (int i = 0; i < m_writer.FieldCache.Length; i++) + { + if (i == m_writer.IdFieldIndex && m_writer.Flags.HasFlagExt(DB2Flags.Index)) + indexFieldOffSet++; + else if (m_writer.FieldCache[i].Field.FieldType == typeof(string)) + fieldInfos[i - indexFieldOffSet] = m_writer.FieldCache[i]; + else if (m_writer.FieldCache[i].Field.FieldType == typeof(string[])) + fieldInfos[i - indexFieldOffSet] = m_writer.FieldCache[i]; + } + + if (fieldInfos.Count == 0) + return; + + int recordOffset = (Records.Count - m_writer.CopyData.Count) * m_writer.RecordSize; + int fieldOffset = 0; + foreach (var record in Records) + { + // skip copy records + if (m_writer.CopyData.ContainsKey(record.Key)) + continue; + + foreach (var fieldInfo in fieldInfos) + { + int index = fieldInfo.Key; + var info = fieldInfo.Value; + + var columnMeta = ColumnMeta[index]; + if (columnMeta.CompressionType != CompressionType.None) + throw new Exception("CompressionType != CompressionType.None"); + + int bitSize = 32 - m_fieldMeta[index].Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + if (info.IsArray) + { + var array = (string[])info.Getter(rows[record.Key]); + for (int i = 0; i < array.Length; i++) + { + fieldOffset = m_writer.StringTable[array[i]] + (recordOffset) - (sizeof(int) * i) - (columnMeta.RecordOffset / 8); + record.Value.Write(fieldOffset, bitSize, columnMeta.RecordOffset + (i * bitSize)); + } + } + else + { + fieldOffset = m_writer.StringTable[(string)info.Getter(rows[record.Key])] + recordOffset - (columnMeta.RecordOffset / 8); + record.Value.Write(fieldOffset, bitSize, columnMeta.RecordOffset); + } + } + + recordOffset -= m_writer.RecordSize; + } + } + + + private static Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, object>> simpleWriters = new Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, object>> + { + [typeof(ulong)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(long)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(float)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(int)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(uint)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(short)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(ushort)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(sbyte)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(byte)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, value), + [typeof(string)] = (id, data, writer, fieldMeta, columnMeta, palletData, commonData, value) => + { + if (writer.Flags.HasFlagExt(DB2Flags.Sparse)) + data.WriteCString((string)value); + else + WriteFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData, writer.InternString((string)value)); + } + }; + + private static Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, Array>> arrayWriters = new Dictionary, FieldMetaData, ColumnMetaData, OrderedHashSet, Dictionary, Array>> + { + [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(long[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(float[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(int[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(uint[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(ulong[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(ushort[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(short[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(byte[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(sbyte[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, array), + [typeof(string[])] = (data, writer, fieldMeta, columnMeta, palletData, commonData, array) => WriteFieldValueArray(data, fieldMeta, columnMeta, palletData, commonData, (array as string[]).Select(x => writer.InternString(x)).ToArray()), + }; + + private static void WriteFieldValue(int Id, BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, OrderedHashSet palletData, Dictionary commonData, object value) where TType : unmanaged + { + switch (columnMeta.CompressionType) + { + case CompressionType.None: + { + int bitSize = 32 - fieldMeta.Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + r.Write((TType)value, bitSize); + break; + } + case CompressionType.Immediate: + case CompressionType.SignedImmediate: + { + r.Write((TType)value, columnMeta.Immediate.BitWidth); + break; + } + case CompressionType.Common: + { + if (!columnMeta.Common.DefaultValue.GetValue().Equals(value)) + commonData.Add(Id, Value32.Create((TType)value)); + break; + } + case CompressionType.Pallet: + { + Value32[] array = new[] { Value32.Create(value) }; + int palletIndex = palletData.IndexOf(array); + if (palletIndex == -1) + { + palletIndex = palletData.Count; + palletData.Add(array); + } + + r.Write(palletIndex, columnMeta.Pallet.BitWidth); + break; + } + } + } + + private static void WriteFieldValueArray(BitWriter r, FieldMetaData fieldMeta, ColumnMetaData columnMeta, OrderedHashSet palletData, Dictionary commonData, Array value) where TType : unmanaged + { + switch (columnMeta.CompressionType) + { + case CompressionType.None: + { + int bitSize = 32 - fieldMeta.Bits; + if (bitSize <= 0) + bitSize = columnMeta.Immediate.BitWidth; + + for (int i = 0; i < value.Length; i++) + r.Write((TType)value.GetValue(i), bitSize); + + break; + } + case CompressionType.PalletArray: + { + // get data + Value32[] array = new Value32[value.Length]; + for (int i = 0; i < value.Length; i++) + array[i] = Value32.Create(value.GetValue(i)); + + int palletIndex = palletData.IndexOf(array); + if (palletIndex == -1) + { + palletIndex = palletData.Count; + palletData.Add(array); + } + + r.Write(palletIndex, columnMeta.Pallet.BitWidth); + break; + } + } + } + } + + class WDC5Writer : BaseWriter where T : class + { + private const int HeaderSize = 200; + private const uint WDC5FmtSig = 0x35434457; // WDC5 + + public WDC5Writer(WDC5Reader reader, IDictionary storage, Stream stream) : base(reader) + { + // always 2 empties + StringTableSize++; + + PackedDataOffset = reader.PackedDataOffset; + HandleCompression(storage); + + WDC5RowSerializer serializer = new WDC5RowSerializer(this); + serializer.Serialize(storage); + + // We write the copy rows if and only if it saves space and the table hasn't any reference rows. + if ((RecordSize) >= sizeof(int) * 2 && ReferenceData.Count == 0) + serializer.GetCopyRows(); + + serializer.UpdateStringOffsets(storage); + + RecordsCount = serializer.Records.Count - CopyData.Count; + + var (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); + + var staticVersionString = "WowStatic_Patch_10_2_7"; // @TODO Make string dynamic based on build + + using (var writer = new BinaryWriter(stream)) + { + int minIndex = storage.Keys.MinOrDefault(); + int maxIndex = storage.Keys.MaxOrDefault(); + + writer.Write(WDC5FmtSig); + writer.Write((uint)5); // numaric version + writer.Write(Encoding.ASCII.GetBytes(staticVersionString.PadRight(128, '\0'))); + + writer.Write(RecordsCount); + writer.Write(FieldsCount); + writer.Write(RecordSize); + writer.Write(storage.Count != 0 ? StringTableSize : 0); + writer.Write(reader.TableHash); + writer.Write(reader.LayoutHash); + writer.Write(minIndex); + writer.Write(maxIndex); + writer.Write(reader.Locale); + writer.Write((ushort)Flags); + writer.Write((ushort)IdFieldIndex); + + writer.Write(FieldsCount); // totalFieldCount + writer.Write(storage.Count != 0 ? PackedDataOffset : 0); + writer.Write(storage.Count != 0 ? (ReferenceData.Count > 0 ? 1 : 0) : 0); // RelationshipColumnCount + writer.Write(storage.Count != 0 ? ColumnMeta.Length * 24 : 0); // ColumnMetaDataSize + writer.Write(storage.Count != 0 ? commonDataSize : 0); + writer.Write(storage.Count != 0 ? palletDataSize : 0); + writer.Write(storage.Count != 0 ? 1 : 0); // sections count + + if (storage.Count == 0) + { + // only need to write field structure if empty + writer.WriteArray(Meta); + return; + } + + // section header + int fileOffset = HeaderSize + (Meta.Length * 4) + (ColumnMeta.Length * 24) + Unsafe.SizeOf() + palletDataSize + commonDataSize; + + writer.Write(0UL); // TactKeyLookup + writer.Write(fileOffset); // FileOffset + writer.Write(RecordsCount); // NumRecords + writer.Write(StringTableSize); + writer.Write(0); // OffsetRecordsEndOffset + writer.Write(Flags.HasFlagExt(DB2Flags.Index) ? RecordsCount * 4 : 0); // IndexDataSize + writer.Write(referenceDataSize); // ParentLookupDataSize + writer.Write(Flags.HasFlagExt(DB2Flags.Sparse) ? RecordsCount : 0); // OffsetMapIDCount + writer.Write(CopyData.Count); // CopyTableCount + + // field meta + writer.WriteArray(Meta); + + // column meta data + writer.WriteArray(ColumnMeta); + + // pallet data + for (int i = 0; i < ColumnMeta.Length; i++) + { + if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) + { + foreach (var palletData in PalletData[i]) + writer.WriteArray(palletData); + } + } + + // common data + for (int i = 0; i < ColumnMeta.Length; i++) + { + if (ColumnMeta[i].CompressionType == CompressionType.Common) + { + foreach (var commondata in CommonData[i]) + { + writer.Write(commondata.Key); + writer.Write(commondata.Value.GetValue()); + } + } + } + + // no need for encrypted_status since we're just writing one section with tact_key_hash == 0 + + // record data + var SparseEntries = new Dictionary(storage.Count); + foreach (var record in serializer.Records) + { + if (!CopyData.TryGetValue(record.Key, out int parent)) + { + SparseEntries.Add(record.Key, new SparseEntry() + { + Offset = (uint)writer.BaseStream.Position, + Size = (ushort)record.Value.TotalBytesWrittenOut + }); + + record.Value.CopyTo(writer.BaseStream); + } + } + + // string table + if (!Flags.HasFlagExt(DB2Flags.Sparse)) + { + writer.WriteCString(""); + foreach (var str in StringTable) + writer.WriteCString(str.Key); + } + + // set the OffsetRecordsEndOffset + if (Flags.HasFlagExt(DB2Flags.Sparse)) + { + long oldPos = writer.BaseStream.Position; + writer.BaseStream.Position = 92; + writer.Write((uint)oldPos); + writer.BaseStream.Position = oldPos; + } + + // index table + if (Flags.HasFlagExt(DB2Flags.Index)) + writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); + + // copy table (must be ordered by value) + foreach (var copyRecord in CopyData.OrderBy(r => r.Value)) + { + writer.Write(copyRecord.Key); + writer.Write(copyRecord.Value); + } + + // sparse data + if (Flags.HasFlagExt(DB2Flags.Sparse)) + writer.WriteArray(SparseEntries.Values.ToArray()); + + // reference data + if (ReferenceData.Count > 0) + { + writer.Write(ReferenceData.Count); + writer.Write(ReferenceData.Min()); + writer.Write(ReferenceData.Max()); + + for (int i = 0; i < ReferenceData.Count; i++) + { + writer.Write(ReferenceData[i]); + writer.Write(i); + } + } + + // sparse data ids + if (Flags.HasFlagExt(DB2Flags.Sparse)) + writer.WriteArray(SparseEntries.Keys.ToArray()); + } + } + + private (int CommonDataSize, int PalletDataSize, int RefDataSize) GetDataSizes() + { + // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords] + int refSize = 0; + if (ReferenceData.Count > 0) + refSize = 12 + (ReferenceData.Count * 8); + + int commonSize = 0, palletSize = 0; + for (int i = 0; i < ColumnMeta.Length; i++) + { + switch (ColumnMeta[i].CompressionType) + { + // {uint id, uint copyid}[] + case CompressionType.Common: + ColumnMeta[i].AdditionalDataSize = (uint)(CommonData[i].Count * 8); + commonSize += (int)ColumnMeta[i].AdditionalDataSize; + break; + + // {uint values[Cardinality]}[] + case CompressionType.Pallet: + case CompressionType.PalletArray: + ColumnMeta[i].AdditionalDataSize = (uint)PalletData[i].Sum(x => x.Length * 4); + palletSize += (int)ColumnMeta[i].AdditionalDataSize; + break; + } + } + + return (commonSize, palletSize, refSize); + } + } +} diff --git a/DBCD.Tests/DBCD.Tests.csproj b/DBCD.Tests/DBCD.Tests.csproj index 7c00e3e..011f93f 100644 --- a/DBCD.Tests/DBCD.Tests.csproj +++ b/DBCD.Tests/DBCD.Tests.csproj @@ -1,11 +1,9 @@ - + net8.0 - false - - 7.3 + 9.0 diff --git a/DBCD/DBCDStorage.cs b/DBCD/DBCDStorage.cs index 73db978..df4c370 100644 --- a/DBCD/DBCDStorage.cs +++ b/DBCD/DBCDStorage.cs @@ -1,5 +1,7 @@ using DBCD.Helpers; + using DBCD.IO; +using System; using System.Collections; using System.Collections.Generic; using System.Collections.ObjectModel; @@ -11,10 +13,10 @@ namespace DBCD { public class DBCDRow : DynamicObject { - public int ID; + private int ID; private readonly dynamic raw; - private readonly FieldAccessor fieldAccessor; + private FieldAccessor fieldAccessor; internal DBCDRow(int ID, dynamic raw, FieldAccessor fieldAccessor) { @@ -28,14 +30,21 @@ public override bool TryGetMember(GetMemberBinder binder, out object result) return fieldAccessor.TryGetMember(this.raw, binder.Name, out result); } + public override bool TrySetMember(SetMemberBinder binder, object value) + { + return fieldAccessor.TrySetMember(this.raw, binder.Name, value); + } + public object this[string fieldName] { get => fieldAccessor[this.raw, fieldName]; + set => fieldAccessor[this.raw, fieldName] = value; } public object this[string fieldName, int index] { get => ((Array)this[fieldName]).GetValue(index); + set => ((Array)this[fieldName]).SetValue(value, index); } public T Field(string fieldName) @@ -52,6 +61,10 @@ public override IEnumerable GetDynamicMemberNames() { return fieldAccessor.FieldNames; } + + public T AsType() => (T)raw; + + public Type GetUnderlyingType() => raw.GetType(); } public class DynamicKeyValuePair @@ -66,66 +79,123 @@ internal DynamicKeyValuePair(T key, dynamic value) } } + public class RowConstructor + { + private readonly IDBCDStorage storage; + public RowConstructor(IDBCDStorage storage) + { + this.storage = storage; + } + + public bool Create(int index, Action f) + { + var constructedRow = storage.ConstructRow(index); + if (storage.ContainsKey(index)) + return false; + else + { + f(constructedRow); + storage.Add(index, constructedRow); + } + + return true; + } + } + public interface IDBCDStorage : IEnumerable>, IDictionary { string[] AvailableColumns { get; } + DBCDRow ConstructRow(int index); + + void ApplyingHotfixes(HotfixReader hotfixReader); + void ApplyingHotfixes(HotfixReader hotfixReader, HotfixReader.RowProcessor processor); + Dictionary GetEncryptedSections(); - Dictionary GetEncryptedIDs(); - IDBCDStorage ApplyingHotfixes(HotfixReader hotfixReader); - IDBCDStorage ApplyingHotfixes(HotfixReader hotfixReader, HotfixReader.RowProcessor processor); + void Save(string filename); + + Dictionary ToDictionary(); } - public class DBCDStorage : ReadOnlyDictionary, IDBCDStorage where T : class, new() + public class DBCDStorage : Dictionary, IDBCDStorage where T : class, new() { private readonly FieldAccessor fieldAccessor; - private readonly ReadOnlyDictionary storage; + private readonly Storage storage; private readonly DBCDInfo info; - private readonly DBReader reader; + private readonly DBParser parser; string[] IDBCDStorage.AvailableColumns => this.info.availableColumns; public override string ToString() => $"{this.info.tableName}"; public DBCDStorage(Stream stream, DBCDInfo info) : this(new DBParser(stream), info) { } - public DBCDStorage(DBParser dbReader, DBCDInfo info) : base(new Dictionary()) + public DBCDStorage(DBParser dbParser, DBCDInfo info) : this(dbParser, dbParser.GetRecords(), info) { } + + public DBCDStorage(DBParser parser, Storage storage, DBCDInfo info) : base(new Dictionary()) { - this.availableColumns = info.availableColumns; - this.tableName = info.tableName; - this.fieldAccessor = new FieldAccessor(typeof(T)); + this.info = info; + this.fieldAccessor = new FieldAccessor(typeof(T), info.availableColumns); + this.parser = parser; + this.storage = storage; - // populate the collection so we don't iterate all values and create new rows each time - storage = new ReadOnlyDictionary(dbReader.ReadRecords()); foreach (var record in storage) - base.Dictionary.Add(record.Key, new DBCDRow(record.Key, record.Value, fieldAccessor)); + base.Add(record.Key, new DBCDRow(record.Key, record.Value, fieldAccessor)); - // clear temp data from memory - dbReader.ClearCache(); + storage.Clear(); } - public IDBCDStorage ApplyingHotfixes(HotfixReader hotfixReader) + IEnumerator> IEnumerable>.GetEnumerator() { - return this.ApplyingHotfixes(hotfixReader, null); + var enumerator = GetEnumerator(); + while (enumerator.MoveNext()) + yield return new DynamicKeyValuePair(enumerator.Current.Key, enumerator.Current.Value); } - public IDBCDStorage ApplyingHotfixes(HotfixReader hotfixReader, HotfixReader.RowProcessor processor) + public Dictionary GetEncryptedSections() => this.parser.GetEncryptedSections(); + + public void ApplyingHotfixes(HotfixReader hotfixReader) { - var mutableStorage = this.storage.ToDictionary(k => k.Key, v => v.Value); + this.ApplyingHotfixes(hotfixReader, null); + } - hotfixReader.ApplyHotfixes(mutableStorage, this.reader, processor); + public void ApplyingHotfixes(HotfixReader hotfixReader, HotfixReader.RowProcessor processor) + { + var mutableStorage = this.storage.ToDictionary(k => k.Key, v => v.Value); - return new DBCDStorage(this.reader, new ReadOnlyDictionary(mutableStorage), this.info); + hotfixReader.ApplyHotfixes(mutableStorage, this.parser, processor); + +#if NETSTANDARD2_0 + foreach(var record in mutableStorage) + base[record.Key] = new DBCDRow(record.Key, record.Value, fieldAccessor); +#else + foreach (var (id, row) in mutableStorage) + base[id] = new DBCDRow(id, row, fieldAccessor); +#endif + foreach (var key in mutableStorage.Keys.Except(base.Keys)) + base.Remove(key); } - IEnumerator> IEnumerable>.GetEnumerator() + public void Save(string filename) { - var enumerator = GetEnumerator(); - while (enumerator.MoveNext()) - yield return new DynamicKeyValuePair(enumerator.Current.Key, enumerator.Current.Value); +#if NETSTANDARD2_0 + var sortedDictionary = new SortedDictionary(this); + foreach (var record in sortedDictionary) + storage.Add(record.Key, record.Value.AsType()); +#else + foreach (var (id, record) in new SortedDictionary(this)) + storage.Add(id, record.AsType()); +#endif + + + storage?.Save(filename); } - public Dictionary GetEncryptedSections() => this.reader.GetEncryptedSections(); - public Dictionary GetEncryptedIDs() => this.reader.GetEncryptedIDs(); + public DBCDRow ConstructRow(int index) => new DBCDRow(index, new T(), fieldAccessor); + + public Dictionary ToDictionary() + { + return this; + } } -} +} \ No newline at end of file diff --git a/DBCD/Helpers/FieldAccessor.cs b/DBCD/Helpers/FieldAccessor.cs index 8a6735b..30e6a1f 100644 --- a/DBCD/Helpers/FieldAccessor.cs +++ b/DBCD/Helpers/FieldAccessor.cs @@ -7,39 +7,48 @@ namespace DBCD.Helpers { internal class FieldAccessor { - public IEnumerable FieldNames => _accessors.Keys; + public IEnumerable FieldNames => _getters.Keys; + + private readonly Dictionary> _getters; + private readonly Dictionary> _setters; - private readonly Dictionary> _accessors; private readonly CultureInfo _convertCulture; public FieldAccessor(Type type, string[] fields) { - _accessors = new Dictionary>(); + _getters = new Dictionary>(); + _setters = new Dictionary>(); _convertCulture = CultureInfo.InvariantCulture; var ownerParameter = Expression.Parameter(typeof(object)); + var valueParameter = Expression.Parameter(typeof(object)); foreach (var field in fields) { var fieldExpression = Expression.Field(Expression.Convert(ownerParameter, type), field); + var conversionExpression = Expression.Convert(fieldExpression, typeof(object)); - var accessorExpression = Expression.Lambda>(conversionExpression, ownerParameter); + var getterExpression = Expression.Lambda>(conversionExpression, ownerParameter); + _getters.Add(field, getterExpression.Compile()); + - _accessors.Add(field, accessorExpression.Compile()); + var assignExpression = Expression.Assign(fieldExpression, Expression.Convert(valueParameter, fieldExpression.Type)); + var setterExpression = Expression.Lambda>(assignExpression, ownerParameter, valueParameter); + _setters.Add(field, setterExpression.Compile()); } } - public object this[object obj, string key] { - get => _accessors[key](obj); + get => _getters[key](obj); + set => _setters[key](obj, value); } public bool TryGetMember(object obj, string field, out object value) { - if (_accessors.TryGetValue(field, out var accessor)) + if (_getters.TryGetValue(field, out var getter)) { - value = accessor(obj); + value = getter(obj); return true; } else @@ -49,9 +58,20 @@ public bool TryGetMember(object obj, string field, out object value) } } + public bool TrySetMember(object obj, string field, object value) + { + if (_setters.TryGetValue(field, out var setter)) + { + setter(obj, value); + return true; + } + + return false; + } + public T GetMemberAs(object obj, string field) { - var value = _accessors[field](obj); + var value = _getters[field](obj); if (value is T direct) return direct; @@ -66,7 +86,6 @@ public T GetMemberAs(object obj, string field) } } - private T ConvertArray(Array array) { var type = typeof(T); @@ -85,4 +104,4 @@ private T ConvertArray(Array array) return (T)(object)result; } } -} +} \ No newline at end of file From b665b88c9b7f7a2856d2e3e3993f5f545911f54b Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Thu, 8 Aug 2024 06:24:32 +0200 Subject: [PATCH 12/40] Fix WDC5 header size Co-Authored-By: barncastle <1619104+barncastle@users.noreply.github.com> Co-Authored-By: BinarySpace <29510799+YetAnotherBinarySpace@users.noreply.github.com> Co-Authored-By: Luzifix <7042325+luzifix@users.noreply.github.com> --- DBCD.IO/Readers/WDC5Reader.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DBCD.IO/Readers/WDC5Reader.cs b/DBCD.IO/Readers/WDC5Reader.cs index dc411ec..70d7c07 100644 --- a/DBCD.IO/Readers/WDC5Reader.cs +++ b/DBCD.IO/Readers/WDC5Reader.cs @@ -248,7 +248,7 @@ public IDBRow Clone() class WDC5Reader : BaseEncryptionSupportingReader { - private const int HeaderSize = 72; + private const int HeaderSize = 200; private const uint WDC5FmtSig = 0x35434457; // WDC5 public WDC5Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) { } From e6a5f3b4b0ec6115628e49b12400aff43d6d3fbd Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Thu, 8 Aug 2024 06:46:04 +0200 Subject: [PATCH 13/40] Reconcile reader differences Co-Authored-By: BinarySpace <29510799+YetAnotherBinarySpace@users.noreply.github.com> Co-Authored-By: Luzifix <7042325+luzifix@users.noreply.github.com> --- DBCD.IO/Readers/WDB2Reader.cs | 23 ++++++------ DBCD.IO/Readers/WDB3Reader.cs | 21 ++++++----- DBCD.IO/Readers/WDB4Reader.cs | 21 ++++++----- DBCD.IO/Readers/WDB5Reader.cs | 29 ++++++++------- DBCD.IO/Readers/WDB6Reader.cs | 37 ++++++++++---------- DBCD.IO/Readers/WDBCReader.cs | 19 +++++----- DBCD.IO/Readers/WDC1Reader.cs | 42 +++++++++++----------- DBCD.IO/Readers/WDC2Reader.cs | 29 ++++++++------- DBCD.IO/Readers/WDC3Reader.cs | 66 ++++++++++++++++------------------- DBCD.IO/Readers/WDC4Reader.cs | 25 +++++++------ DBCD.IO/Readers/WDC5Reader.cs | 4 +-- 11 files changed, 152 insertions(+), 164 deletions(-) diff --git a/DBCD.IO/Readers/WDB2Reader.cs b/DBCD.IO/Readers/WDB2Reader.cs index 0e20983..e2a97ad 100644 --- a/DBCD.IO/Readers/WDB2Reader.cs +++ b/DBCD.IO/Readers/WDB2Reader.cs @@ -1,26 +1,25 @@ -using DBCD.IO.Common; -using System; +using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Runtime.CompilerServices; using System.Text; +using DBCD.IO.Common; namespace DBCD.IO.Readers { class WDB2Row : IDBRow { - private BitReader m_data; private BaseReader m_reader; private readonly int m_recordIndex; public int Id { get; set; } - public BitReader Data { get => m_data; set => m_data = value; } + public BitReader Data { get; set; } public WDB2Row(BaseReader reader, BitReader data, int recordIndex) { m_reader = reader; - m_data = data; + Data = data; m_recordIndex = recordIndex + 1; Id = m_recordIndex = recordIndex + 1; @@ -61,7 +60,7 @@ public void GetFields(FieldCache[] fields, T entry) FieldCache info = fields[i]; if (info.IndexMapField) { - Id = GetFieldValue(m_data); + Id = GetFieldValue(Data); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; } @@ -71,20 +70,20 @@ public void GetFields(FieldCache[] fields, T entry) if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_reader.StringTable, info.Cardinality); + value = reader(Data, m_reader.StringTable, info.Cardinality); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else if (info.IsLocalisedString) { - m_data.Position += 32 * info.LocaleInfo.Locale; - value = simpleReaders[typeof(string)](m_data, m_reader.StringTable, m_reader); - m_data.Position += 32 * (info.LocaleInfo.LocaleCount - info.LocaleInfo.Locale); + Data.Position += 32 * info.LocaleInfo.Locale; + value = simpleReaders[typeof(string)](Data, m_reader.StringTable, m_reader); + Data.Position += 32 * (info.LocaleInfo.LocaleCount - info.LocaleInfo.Locale); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_reader.StringTable, m_reader); + value = reader(Data, m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -123,7 +122,7 @@ public WDB2Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDB2Reader(Stream stream) { - using (var reader = new BinaryReader(stream)) + using (var reader = new BinaryReader(stream, Encoding.UTF8)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDB2 file is corrupted!"); diff --git a/DBCD.IO/Readers/WDB3Reader.cs b/DBCD.IO/Readers/WDB3Reader.cs index 24c47ac..e2d7de9 100644 --- a/DBCD.IO/Readers/WDB3Reader.cs +++ b/DBCD.IO/Readers/WDB3Reader.cs @@ -10,25 +10,24 @@ namespace DBCD.IO.Readers { class WDB3Row : IDBRow { - private BitReader m_data; private BaseReader m_reader; private readonly int m_dataOffset; private readonly int m_dataPosition; private readonly int m_recordIndex; public int Id { get; set; } - public BitReader Data { get => m_data; set => m_data = value; } + public BitReader Data { get; set; } public WDB3Row(BaseReader reader, BitReader data, int id, int recordIndex) { m_reader = reader; - m_data = data; + Data = data; m_recordIndex = recordIndex; Id = id; - m_dataOffset = m_data.Offset; - m_dataPosition = m_data.Position; + m_dataOffset = Data.Offset; + m_dataPosition = Data.Position; } private static Dictionary, BaseReader, object>> simpleReaders = new Dictionary, BaseReader, object>> @@ -63,8 +62,8 @@ public void GetFields(FieldCache[] fields, T entry) { int indexFieldOffSet = 0; - m_data.Position = m_dataPosition; - m_data.Offset = m_dataOffset; + Data.Position = m_dataPosition; + Data.Offset = m_dataOffset; for (int i = 0; i < fields.Length; i++) { @@ -74,7 +73,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(m_data); + Id = GetFieldValue(Data); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -93,14 +92,14 @@ public void GetFields(FieldCache[] fields, T entry) if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_reader.StringTable, info.Cardinality); + value = reader(Data, m_reader.StringTable, info.Cardinality); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_reader.StringTable, m_reader); + value = reader(Data, m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -147,7 +146,7 @@ public WDB3Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDB3Reader(Stream stream) { - using (var reader = new BinaryReader(stream)) + using (var reader = new BinaryReader(stream, Encoding.UTF8)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDB3 file is corrupted!"); diff --git a/DBCD.IO/Readers/WDB4Reader.cs b/DBCD.IO/Readers/WDB4Reader.cs index 0c65fd4..ab6be1c 100644 --- a/DBCD.IO/Readers/WDB4Reader.cs +++ b/DBCD.IO/Readers/WDB4Reader.cs @@ -10,25 +10,24 @@ namespace DBCD.IO.Readers { class WDB4Row : IDBRow { - private BitReader m_data; private BaseReader m_reader; private readonly int m_dataOffset; private readonly int m_dataPosition; private readonly int m_recordIndex; public int Id { get; set; } - public BitReader Data { get => m_data; set => m_data = value; } + public BitReader Data { get; set; } public WDB4Row(BaseReader reader, BitReader data, int id, int recordIndex) { m_reader = reader; - m_data = data; + Data = data; m_recordIndex = recordIndex; Id = id; - m_dataOffset = m_data.Offset; - m_dataPosition = m_data.Position; + m_dataOffset = Data.Offset; + m_dataPosition = Data.Position; } private static Dictionary, BaseReader, object>> simpleReaders = new Dictionary, BaseReader, object>> @@ -63,8 +62,8 @@ public void GetFields(FieldCache[] fields, T entry) { int indexFieldOffSet = 0; - m_data.Position = m_dataPosition; - m_data.Offset = m_dataOffset; + Data.Position = m_dataPosition; + Data.Offset = m_dataOffset; for (int i = 0; i < fields.Length; i++) { @@ -74,7 +73,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(m_data); + Id = GetFieldValue(Data); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -93,14 +92,14 @@ public void GetFields(FieldCache[] fields, T entry) if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_reader.StringTable, info.Cardinality); + value = reader(Data, m_reader.StringTable, info.Cardinality); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_reader.StringTable, m_reader); + value = reader(Data, m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -138,7 +137,7 @@ public WDB4Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDB4Reader(Stream stream) { - using (var reader = new BinaryReader(stream)) + using (var reader = new BinaryReader(stream, Encoding.UTF8)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDB4 file is corrupted!"); diff --git a/DBCD.IO/Readers/WDB5Reader.cs b/DBCD.IO/Readers/WDB5Reader.cs index 1fe052c..cc72d3e 100644 --- a/DBCD.IO/Readers/WDB5Reader.cs +++ b/DBCD.IO/Readers/WDB5Reader.cs @@ -9,27 +9,26 @@ namespace DBCD.IO.Readers { class WDB5Row : IDBRow { - private BitReader m_data; private BaseReader m_reader; private readonly int m_dataOffset; private readonly int m_dataPosition; private readonly int m_recordIndex; public int Id { get; set; } - public BitReader Data { get => m_data; set => m_data = value; } + public BitReader Data { get; set; } private readonly FieldMetaData[] m_fieldMeta; public WDB5Row(BaseReader reader, BitReader data, int id, int recordIndex) { m_reader = reader; - m_data = data; + Data = data; m_recordIndex = recordIndex; Id = id; - m_dataOffset = m_data.Offset; - m_dataPosition = m_data.Position; + m_dataOffset = Data.Offset; + m_dataPosition = Data.Position; m_fieldMeta = reader.Meta; } @@ -65,8 +64,8 @@ public void GetFields(FieldCache[] fields, T entry) { int indexFieldOffSet = 0; - m_data.Position = m_dataPosition; - m_data.Offset = m_dataOffset; + Data.Position = m_dataPosition; + Data.Offset = m_dataOffset; for (int i = 0; i < fields.Length; i++) { @@ -76,7 +75,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(m_data, m_fieldMeta[i]); + Id = GetFieldValue(Data, m_fieldMeta[i]); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -98,14 +97,14 @@ public void GetFields(FieldCache[] fields, T entry) SetCardinality(info, fieldIndex); if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_fieldMeta[fieldIndex], m_reader.StringTable, info.Cardinality); + value = reader(Data, m_fieldMeta[fieldIndex], m_reader.StringTable, info.Cardinality); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_fieldMeta[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Data, m_fieldMeta[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -158,14 +157,14 @@ public IDBRow Clone() class WDB5Reader : BaseReader { - private const int HeaderSize = 48; + private const int HeaderSize = 52; private const uint WDB5FmtSig = 0x35424457; // WDB5 public WDB5Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) { } public WDB5Reader(Stream stream) { - using (var reader = new BinaryReader(stream)) + using (var reader = new BinaryReader(stream, Encoding.UTF8)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDB5 file is corrupted!"); @@ -188,12 +187,12 @@ public WDB5Reader(Stream stream) Flags = (DB2Flags)reader.ReadUInt16(); IdFieldIndex = reader.ReadUInt16(); - if (RecordsCount == 0) - return; - // field meta data Meta = reader.ReadArray(FieldsCount); + if (RecordsCount == 0) + return; + if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data diff --git a/DBCD.IO/Readers/WDB6Reader.cs b/DBCD.IO/Readers/WDB6Reader.cs index ded7a87..3544bdc 100644 --- a/DBCD.IO/Readers/WDB6Reader.cs +++ b/DBCD.IO/Readers/WDB6Reader.cs @@ -10,14 +10,13 @@ namespace DBCD.IO.Readers { class WDB6Row : IDBRow { - private BitReader m_data; private BaseReader m_reader; private readonly int m_dataOffset; private readonly int m_dataPosition; private readonly int m_recordIndex; public int Id { get; set; } - public BitReader Data { get => m_data; set => m_data = value; } + public BitReader Data { get; set; } private readonly FieldMetaData[] m_fieldMeta; private readonly Dictionary[] CommonData; @@ -25,11 +24,11 @@ class WDB6Row : IDBRow public WDB6Row(BaseReader reader, BitReader data, int id, int recordIndex) { m_reader = reader; - m_data = data; + Data = data; m_recordIndex = recordIndex; - m_dataOffset = m_data.Offset; - m_dataPosition = m_data.Position; + m_dataOffset = Data.Offset; + m_dataPosition = Data.Position; m_fieldMeta = reader.Meta; CommonData = reader.CommonData; @@ -69,8 +68,8 @@ public void GetFields(FieldCache[] fields, T entry) { int indexFieldOffSet = 0; - m_data.Position = m_dataPosition; - m_data.Offset = m_dataOffset; + Data.Position = m_dataPosition; + Data.Offset = m_dataOffset; for (int i = 0; i < fields.Length; i++) { @@ -80,7 +79,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(0, m_data, m_fieldMeta[i], CommonData?[i]); + Id = GetFieldValue(0, Data, m_fieldMeta[i], CommonData?[i]); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -102,14 +101,14 @@ public void GetFields(FieldCache[] fields, T entry) SetCardinality(info, fieldIndex); if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_fieldMeta[fieldIndex], CommonData?[fieldIndex], m_reader.StringTable, info.Cardinality); + value = reader(Id, Data, m_fieldMeta[fieldIndex], CommonData?[fieldIndex], m_reader.StringTable, info.Cardinality); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_fieldMeta[fieldIndex], CommonData?[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Id, Data, m_fieldMeta[fieldIndex], CommonData?[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -177,7 +176,7 @@ class WDB6Reader : BaseReader private const uint WDB6FmtSig = 0x36424457; // WDB6 // CommonData type enum to bit size - private readonly Dictionary CommonDataTypeBits = new Dictionary + private readonly Dictionary CommonTypeBits = new Dictionary { { 0, 0 }, // string { 1, 16 }, // short @@ -190,7 +189,7 @@ public WDB6Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDB6Reader(Stream stream) { - using (var reader = new BinaryReader(stream)) + using (var reader = new BinaryReader(stream, Encoding.UTF8)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDB6 file is corrupted!"); @@ -215,14 +214,12 @@ public WDB6Reader(Stream stream) int totalFieldCount = reader.ReadInt32(); int commonDataSize = reader.ReadInt32(); + // field meta data + Meta = reader.ReadArray(FieldsCount); + if (RecordsCount == 0) return; - // field meta data - var meta = reader.ReadArray(FieldsCount); - Array.Resize(ref meta, totalFieldCount); - Meta = meta; - if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data @@ -285,6 +282,8 @@ public WDB6Reader(Stream stream) if (commonDataSize > 0) { + Array.Resize(ref Meta, totalFieldCount); + int fieldCount = reader.ReadInt32(); CommonData = new Dictionary[fieldCount]; @@ -297,7 +296,7 @@ public WDB6Reader(Stream stream) { int count = reader.ReadInt32(); byte type = reader.ReadByte(); - int size = CommonDataIsAligned ? 4 : (32 - CommonDataTypeBits[type]) >> 3; + int size = CommonDataIsAligned ? 4 : (32 - CommonTypeBits[type]) >> 3; // add the new meta entry if (i > FieldsCount) @@ -306,7 +305,7 @@ public WDB6Reader(Stream stream) Meta[i] = new FieldMetaData() { - Bits = CommonDataTypeBits[type], + Bits = CommonTypeBits[type], Offset = (short)(Meta[i - 1].Offset + ((32 - Meta[i - 1].Bits) >> 3)) }; } diff --git a/DBCD.IO/Readers/WDBCReader.cs b/DBCD.IO/Readers/WDBCReader.cs index 5c7be7d..83bd3c6 100644 --- a/DBCD.IO/Readers/WDBCReader.cs +++ b/DBCD.IO/Readers/WDBCReader.cs @@ -10,17 +10,16 @@ namespace DBCD.IO.Readers { class WDBCRow : IDBRow { - private BitReader m_data; private BaseReader m_reader; private readonly int m_recordIndex; public int Id { get; set; } - public BitReader Data { get => m_data; set => m_data = value; } + public BitReader Data { get; set; } public WDBCRow(BaseReader reader, BitReader data, int recordIndex) { m_reader = reader; - m_data = data; + Data = data; m_recordIndex = recordIndex + 1; Id = m_recordIndex = recordIndex + 1; @@ -61,7 +60,7 @@ public void GetFields(FieldCache[] fields, T entry) FieldCache info = fields[i]; if (info.IndexMapField) { - Id = GetFieldValue(m_data); + Id = GetFieldValue(Data); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; } @@ -71,20 +70,20 @@ public void GetFields(FieldCache[] fields, T entry) if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_reader.StringTable, info.Cardinality); + value = reader(Data, m_reader.StringTable, info.Cardinality); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else if (info.IsLocalisedString) { - m_data.Position += 32 * info.LocaleInfo.Locale; - value = simpleReaders[typeof(string)](m_data, m_reader.StringTable, m_reader); - m_data.Position += 32 * (info.LocaleInfo.LocaleCount - info.LocaleInfo.Locale); + Data.Position += 32 * info.LocaleInfo.Locale; + value = simpleReaders[typeof(string)](Data, m_reader.StringTable, m_reader); + Data.Position += 32 * (info.LocaleInfo.LocaleCount - info.LocaleInfo.Locale); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_reader.StringTable, m_reader); + value = reader(Data, m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -122,7 +121,7 @@ public WDBCReader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDBCReader(Stream stream) { - using (var reader = new BinaryReader(stream)) + using (var reader = new BinaryReader(stream, Encoding.UTF8)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDBC file is corrupted!"); diff --git a/DBCD.IO/Readers/WDC1Reader.cs b/DBCD.IO/Readers/WDC1Reader.cs index fac1142..b710c35 100644 --- a/DBCD.IO/Readers/WDC1Reader.cs +++ b/DBCD.IO/Readers/WDC1Reader.cs @@ -10,14 +10,13 @@ namespace DBCD.IO.Readers { class WDC1Row : IDBRow { - private BitReader m_data; private BaseReader m_reader; private readonly int m_dataOffset; private readonly int m_dataPosition; private readonly int m_recordIndex; public int Id { get; set; } - public BitReader Data { get => m_data; set => m_data = value; } + public BitReader Data { get; set; } private readonly FieldMetaData[] m_fieldMeta; private readonly ColumnMetaData[] ColumnMeta; @@ -28,11 +27,11 @@ class WDC1Row : IDBRow public WDC1Row(BaseReader reader, BitReader data, int id, int refID, int recordIndex) { m_reader = reader; - m_data = data; + Data = data; m_recordIndex = recordIndex; - m_dataOffset = m_data.Offset; - m_dataPosition = m_data.Position; + m_dataOffset = Data.Offset; + m_dataPosition = Data.Position; m_fieldMeta = reader.Meta; ColumnMeta = reader.ColumnMeta; @@ -75,8 +74,8 @@ public void GetFields(FieldCache[] fields, T entry) { int indexFieldOffSet = 0; - m_data.Position = m_dataPosition; - m_data.Offset = m_dataOffset; + Data.Position = m_dataPosition; + Data.Offset = m_dataOffset; for (int i = 0; i < fields.Length; i++) { @@ -86,7 +85,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(0, m_data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); + Id = GetFieldValue(0, Data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -104,14 +103,14 @@ public void GetFields(FieldCache[] fields, T entry) if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); + value = reader(Data, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Id, Data, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -136,6 +135,7 @@ private static T GetFieldValue(int Id, BitReader r, FieldMetaData fieldMeta, { if ((columnMeta.Immediate.Flags & 0x1) == 0x1) return r.ReadValue64Signed(columnMeta.Immediate.BitWidth).GetValue(); + return r.ReadValue64(columnMeta.Immediate.BitWidth).GetValue(); } case CompressionType.Common: @@ -212,7 +212,7 @@ public WDC1Reader(string dbcFile) : this(new FileStream(dbcFile, FileMode.Open)) public WDC1Reader(Stream stream) { - using (var reader = new BinaryReader(stream)) + using (var reader = new BinaryReader(stream, Encoding.UTF8)) { if (reader.BaseStream.Length < HeaderSize) throw new InvalidDataException("WDC1 file is corrupted!"); @@ -237,21 +237,21 @@ public WDC1Reader(Stream stream) IdFieldIndex = reader.ReadUInt16(); int totalFieldsCount = reader.ReadInt32(); - PackedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts - int lookupColumnCount = reader.ReadInt32(); // count of lookup columns - int sparseTableOffset = reader.ReadInt32(); // absolute value, {uint offset, ushort size}[MaxId - MinId + 1] - int indexDataSize = reader.ReadInt32(); // int indexData[IndexDataSize / 4] - int columnMetaDataSize = reader.ReadInt32(); // 24 * NumFields bytes, describes column bit packing, {ushort recordOffset, ushort size, uint additionalDataSize, uint compressionType, uint packedDataOffset or commonvalue, uint cellSize, uint cardinality}[NumFields], sizeof(DBC2CommonValue) == 8 + PackedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts + int lookupColumnCount = reader.ReadInt32(); // count of lookup columns + int sparseTableOffset = reader.ReadInt32(); // absolute value, {uint offset, ushort size}[MaxId - MinId + 1] + int indexDataSize = reader.ReadInt32(); // int indexData[IndexDataSize / 4] + int columnMetaDataSize = reader.ReadInt32(); // 24 * NumFields bytes, describes column bit packing, {ushort recordOffset, ushort size, uint additionalDataSize, uint compressionType, uint packedDataOffset or commonvalue, uint cellSize, uint cardinality}[NumFields], sizeof(DBC2CommonValue) == 8 int commonDataSize = reader.ReadInt32(); - int palletDataSize = reader.ReadInt32(); // in bytes, sizeof(DBC2PalletValue) == 4 - int referenceDataSize = reader.ReadInt32(); // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords], questionable usefulness... - - if (RecordsCount == 0) - return; + int palletDataSize = reader.ReadInt32(); // in bytes, sizeof(DBC2PalletValue) == 4 + int referenceDataSize = reader.ReadInt32(); // uint NumRecords, uint minId, uint maxId, {uint id, uint index}[NumRecords], questionable usefulness... // field meta data Meta = reader.ReadArray(FieldsCount); + if (RecordsCount == 0) + return; + if (!Flags.HasFlagExt(DB2Flags.Sparse)) { // records data diff --git a/DBCD.IO/Readers/WDC2Reader.cs b/DBCD.IO/Readers/WDC2Reader.cs index b69a138..6065b5e 100644 --- a/DBCD.IO/Readers/WDC2Reader.cs +++ b/DBCD.IO/Readers/WDC2Reader.cs @@ -10,7 +10,6 @@ namespace DBCD.IO.Readers { class WDC2Row : IDBRow { - private BitReader m_data; private BaseReader m_reader; private readonly int m_dataOffset; private readonly int m_dataPosition; @@ -18,7 +17,7 @@ class WDC2Row : IDBRow private readonly int m_recordIndex; public int Id { get; set; } - public BitReader Data { get => m_data; set => m_data = value; } + public BitReader Data { get; set; } private readonly FieldMetaData[] m_fieldMeta; private readonly ColumnMetaData[] ColumnMeta; @@ -29,12 +28,12 @@ class WDC2Row : IDBRow public WDC2Row(BaseReader reader, BitReader data, int recordsOffset, int id, int refID, int recordIndex) { m_reader = reader; - m_data = data; + Data = data; m_recordsOffset = recordsOffset; m_recordIndex = recordIndex; - m_dataOffset = m_data.Offset; - m_dataPosition = m_data.Position; + m_dataOffset = Data.Offset; + m_dataPosition = Data.Position; m_fieldMeta = reader.Meta; ColumnMeta = reader.ColumnMeta; @@ -79,8 +78,8 @@ public void GetFields(FieldCache[] fields, T entry) { int indexFieldOffSet = 0; - m_data.Position = m_dataPosition; - m_data.Offset = m_dataOffset; + Data.Position = m_dataPosition; + Data.Offset = m_dataOffset; for (int i = 0; i < fields.Length; i++) { @@ -95,11 +94,11 @@ public void GetFields(FieldCache[] fields, T entry) { if (!m_reader.Flags.HasFlagExt(DB2Flags.Sparse)) { - m_data.Position = ColumnMeta[i].RecordOffset; - m_data.Offset = m_dataOffset; + Data.Position = ColumnMeta[i].RecordOffset; + Data.Offset = m_dataOffset; } - Id = GetFieldValue(0, m_data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); + Id = GetFieldValue(0, Data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); } info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); @@ -117,21 +116,21 @@ public void GetFields(FieldCache[] fields, T entry) if (!m_reader.Flags.HasFlagExt(DB2Flags.Sparse)) { - m_data.Position = ColumnMeta[fieldIndex].RecordOffset; - m_data.Offset = m_dataOffset; + Data.Position = ColumnMeta[fieldIndex].RecordOffset; + Data.Offset = m_dataOffset; } if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_recordsOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); + value = reader(Data, m_recordsOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_recordsOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Id, Data, m_recordsOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -428,4 +427,4 @@ public WDC2Reader(Stream stream) } } } -} \ No newline at end of file +} diff --git a/DBCD.IO/Readers/WDC3Reader.cs b/DBCD.IO/Readers/WDC3Reader.cs index 51db394..04463c4 100644 --- a/DBCD.IO/Readers/WDC3Reader.cs +++ b/DBCD.IO/Readers/WDC3Reader.cs @@ -10,7 +10,6 @@ namespace DBCD.IO.Readers { class WDC3Row : IDBRow { - private BitReader m_data; private BaseReader m_reader; private readonly int m_dataOffset; private readonly int m_dataPosition; @@ -18,7 +17,7 @@ class WDC3Row : IDBRow private readonly int m_recordIndex; public int Id { get; set; } - public BitReader Data { get => m_data; set => m_data = value; } + public BitReader Data { get; set; } private readonly FieldMetaData[] m_fieldMeta; private readonly ColumnMetaData[] ColumnMeta; @@ -29,12 +28,12 @@ class WDC3Row : IDBRow public WDC3Row(BaseReader reader, BitReader data, int id, int refID, int recordIndex) { m_reader = reader; - m_data = data; + Data = data; m_recordOffset = (recordIndex * reader.RecordSize) - (reader.RecordsCount * reader.RecordSize); m_recordIndex = recordIndex; - m_dataOffset = m_data.Offset; - m_dataPosition = m_data.Position; + m_dataOffset = Data.Offset; + m_dataPosition = Data.Position; m_fieldMeta = reader.Meta; ColumnMeta = reader.ColumnMeta; @@ -89,8 +88,8 @@ public void GetFields(FieldCache[] fields, T entry) { int indexFieldOffSet = 0; - m_data.Position = m_dataPosition; - m_data.Offset = m_dataOffset; + Data.Position = m_dataPosition; + Data.Offset = m_dataOffset; for (int i = 0; i < fields.Length; i++) { @@ -100,7 +99,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(0, m_data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); + Id = GetFieldValue(0, Data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -118,14 +117,14 @@ public void GetFields(FieldCache[] fields, T entry) if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); + value = reader(Data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Id, Data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -265,30 +264,27 @@ public WDC3Reader(Stream stream) if (magic != WDC3FmtSig) throw new InvalidDataException("WDC3 file is corrupted!"); - RecordsCount = reader.ReadInt32(); - FieldsCount = reader.ReadInt32(); - RecordSize = reader.ReadInt32(); - StringTableSize = reader.ReadInt32(); - TableHash = reader.ReadUInt32(); - LayoutHash = reader.ReadUInt32(); - MinIndex = reader.ReadInt32(); - MaxIndex = reader.ReadInt32(); - int locale = reader.ReadInt32(); - Flags = (DB2Flags)reader.ReadUInt16(); - IdFieldIndex = reader.ReadUInt16(); - int totalFieldsCount = reader.ReadInt32(); - int packedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts - int lookupColumnCount = reader.ReadInt32(); // count of lookup columns - int columnMetaDataSize = reader.ReadInt32(); // 24 * NumFields bytes, describes column bit packing, {ushort recordOffset, ushort size, uint additionalDataSize, uint compressionType, uint packedDataOffset or commonvalue, uint cellSize, uint cardinality}[NumFields], sizeof(DBC2CommonValue) == 8 - int commonDataSize = reader.ReadInt32(); - int palletDataSize = reader.ReadInt32(); // in bytes, sizeof(DBC2PalletValue) == 4 - int sectionsCount = reader.ReadInt32(); - - if (sectionsCount == 0 || RecordsCount == 0) - return; - - var sections = reader.ReadArray(sectionsCount).ToList(); - this.m_sections = sections.OfType().ToList(); + RecordsCount = reader.ReadInt32(); + FieldsCount = reader.ReadInt32(); + RecordSize = reader.ReadInt32(); + StringTableSize = reader.ReadInt32(); + TableHash = reader.ReadUInt32(); + LayoutHash = reader.ReadUInt32(); + MinIndex = reader.ReadInt32(); + MaxIndex = reader.ReadInt32(); + Locale = reader.ReadInt32(); + Flags = (DB2Flags)reader.ReadUInt16(); + IdFieldIndex = reader.ReadUInt16(); + int totalFieldsCount = reader.ReadInt32(); + PackedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts + int lookupColumnCount = reader.ReadInt32(); // count of lookup columns + int columnMetaDataSize = reader.ReadInt32(); // 24 * NumFields bytes, describes column bit packing, {ushort recordOffset, ushort size, uint additionalDataSize, uint compressionType, uint packedDataOffset or commonvalue, uint cellSize, uint cardinality}[NumFields], sizeof(DBC2CommonValue) == 8 + int commonDataSize = reader.ReadInt32(); + int palletDataSize = reader.ReadInt32(); // in bytes, sizeof(DBC2PalletValue) == 4 + int sectionsCount = reader.ReadInt32(); + + var sections = (sectionsCount == 0) ? new List() : reader.ReadArray(sectionsCount).ToList(); + m_sections = sections.OfType().ToList(); // field meta data Meta = reader.ReadArray(FieldsCount); @@ -467,4 +463,4 @@ public WDC3Reader(Stream stream) } } } -} \ No newline at end of file +} diff --git a/DBCD.IO/Readers/WDC4Reader.cs b/DBCD.IO/Readers/WDC4Reader.cs index 9791ce5..e440b6e 100644 --- a/DBCD.IO/Readers/WDC4Reader.cs +++ b/DBCD.IO/Readers/WDC4Reader.cs @@ -10,7 +10,6 @@ namespace DBCD.IO.Readers { class WDC4Row : IDBRow { - private BitReader m_data; private BaseReader m_reader; private readonly int m_dataOffset; private readonly int m_dataPosition; @@ -18,7 +17,7 @@ class WDC4Row : IDBRow private readonly int m_recordIndex; public int Id { get; set; } - public BitReader Data { get => m_data; set => m_data = value; } + public BitReader Data { get; set; } private readonly FieldMetaData[] m_fieldMeta; private readonly ColumnMetaData[] ColumnMeta; @@ -29,12 +28,12 @@ class WDC4Row : IDBRow public WDC4Row(BaseReader reader, BitReader data, int id, int refID, int recordIndex) { m_reader = reader; - m_data = data; + Data = data; m_recordOffset = (recordIndex * reader.RecordSize) - (reader.RecordsCount * reader.RecordSize); m_recordIndex = recordIndex; - m_dataOffset = m_data.Offset; - m_dataPosition = m_data.Position; + m_dataOffset = Data.Offset; + m_dataPosition = Data.Position; m_fieldMeta = reader.Meta; ColumnMeta = reader.ColumnMeta; @@ -89,8 +88,8 @@ public void GetFields(FieldCache[] fields, T entry) { int indexFieldOffSet = 0; - m_data.Position = m_dataPosition; - m_data.Offset = m_dataOffset; + Data.Position = m_dataPosition; + Data.Offset = m_dataOffset; for (int i = 0; i < fields.Length; i++) { @@ -100,7 +99,7 @@ public void GetFields(FieldCache[] fields, T entry) if (Id != -1) indexFieldOffSet++; else - Id = GetFieldValue(0, m_data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); + Id = GetFieldValue(0, Data, m_fieldMeta[i], ColumnMeta[i], PalletData[i], CommonData[i]); info.Setter(entry, Convert.ChangeType(Id, info.FieldType)); continue; @@ -118,14 +117,14 @@ public void GetFields(FieldCache[] fields, T entry) if (info.IsArray) { if (arrayReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(m_data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); + value = reader(Data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable); else throw new Exception("Unhandled array type: " + typeof(T).Name); } else { if (simpleReaders.TryGetValue(info.FieldType, out var reader)) - value = reader(Id, m_data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); + value = reader(Id, Data, m_recordOffset, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], m_reader.StringTable, m_reader); else throw new Exception("Unhandled field type: " + typeof(T).Name); } @@ -273,11 +272,11 @@ public WDC4Reader(Stream stream) LayoutHash = reader.ReadUInt32(); MinIndex = reader.ReadInt32(); MaxIndex = reader.ReadInt32(); - int locale = reader.ReadInt32(); + Locale = reader.ReadInt32(); Flags = (DB2Flags)reader.ReadUInt16(); IdFieldIndex = reader.ReadUInt16(); int totalFieldsCount = reader.ReadInt32(); - int packedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts + PackedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts int lookupColumnCount = reader.ReadInt32(); // count of lookup columns int columnMetaDataSize = reader.ReadInt32(); // 24 * NumFields bytes, describes column bit packing, {ushort recordOffset, ushort size, uint additionalDataSize, uint compressionType, uint packedDataOffset or commonvalue, uint cellSize, uint cardinality}[NumFields], sizeof(DBC2CommonValue) == 8 int commonDataSize = reader.ReadInt32(); @@ -498,4 +497,4 @@ public WDC4Reader(Stream stream) } } } -} \ No newline at end of file +} diff --git a/DBCD.IO/Readers/WDC5Reader.cs b/DBCD.IO/Readers/WDC5Reader.cs index 70d7c07..3199e14 100644 --- a/DBCD.IO/Readers/WDC5Reader.cs +++ b/DBCD.IO/Readers/WDC5Reader.cs @@ -276,11 +276,11 @@ public WDC5Reader(Stream stream) LayoutHash = reader.ReadUInt32(); MinIndex = reader.ReadInt32(); MaxIndex = reader.ReadInt32(); - int locale = reader.ReadInt32(); + Locale = reader.ReadInt32(); Flags = (DB2Flags)reader.ReadUInt16(); IdFieldIndex = reader.ReadUInt16(); int totalFieldsCount = reader.ReadInt32(); - int packedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts + PackedDataOffset = reader.ReadInt32(); // Offset within the field where packed data starts int lookupColumnCount = reader.ReadInt32(); // count of lookup columns int columnMetaDataSize = reader.ReadInt32(); // 24 * NumFields bytes, describes column bit packing, {ushort recordOffset, ushort size, uint additionalDataSize, uint compressionType, uint packedDataOffset or commonvalue, uint cellSize, uint cardinality}[NumFields], sizeof(DBC2CommonValue) == 8 int commonDataSize = reader.ReadInt32(); From ab2f0d7438652f0d33f3f3fed839a3c3bd3982d8 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Thu, 8 Aug 2024 06:55:21 +0200 Subject: [PATCH 14/40] Fix compile Co-Authored-By: BinarySpace <29510799+YetAnotherBinarySpace@users.noreply.github.com> Co-Authored-By: Luzifix <7042325+luzifix@users.noreply.github.com> --- DBCD.IO/Readers/WDB6Reader.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/DBCD.IO/Readers/WDB6Reader.cs b/DBCD.IO/Readers/WDB6Reader.cs index 3544bdc..a1b1e8f 100644 --- a/DBCD.IO/Readers/WDB6Reader.cs +++ b/DBCD.IO/Readers/WDB6Reader.cs @@ -282,7 +282,9 @@ public WDB6Reader(Stream stream) if (commonDataSize > 0) { - Array.Resize(ref Meta, totalFieldCount); + var meta = reader.ReadArray(FieldsCount); + Array.Resize(ref meta, totalFieldCount); + Meta = meta; int fieldCount = reader.ReadInt32(); CommonData = new Dictionary[fieldCount]; From d0e72e8e3c764beb6c6062f29552127b2928c588 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Thu, 8 Aug 2024 06:56:07 +0200 Subject: [PATCH 15/40] Merge tests Co-Authored-By: BinarySpace <29510799+YetAnotherBinarySpace@users.noreply.github.com> Co-Authored-By: Luzifix <7042325+luzifix@users.noreply.github.com> --- DBCD.Tests/{UnitTest1.cs => ReadingTest.cs} | 7 +-- DBCD.Tests/Utilities/IO.cs | 54 +++++++++++++++++++++ DBCD.Tests/WritingTest.cs | 54 +++++++++++++++++++++ 3 files changed, 109 insertions(+), 6 deletions(-) rename DBCD.Tests/{UnitTest1.cs => ReadingTest.cs} (97%) create mode 100644 DBCD.Tests/Utilities/IO.cs create mode 100644 DBCD.Tests/WritingTest.cs diff --git a/DBCD.Tests/UnitTest1.cs b/DBCD.Tests/ReadingTest.cs similarity index 97% rename from DBCD.Tests/UnitTest1.cs rename to DBCD.Tests/ReadingTest.cs index 49d8b81..c6f8b3d 100644 --- a/DBCD.Tests/UnitTest1.cs +++ b/DBCD.Tests/ReadingTest.cs @@ -1,15 +1,10 @@ using DBCD.Providers; -using DBCD.IO.Attributes; using Microsoft.VisualStudio.TestTools.UnitTesting; -using System; -using System.IO; -using DBCD.IO; -using System.Linq; namespace DBCD.Tests { [TestClass] - public class UnitTest1 + public class ReadingTest { static GithubDBDProvider githubDBDProvider = new(true); static readonly WagoDBCProvider wagoDBCProvider = new(); diff --git a/DBCD.Tests/Utilities/IO.cs b/DBCD.Tests/Utilities/IO.cs new file mode 100644 index 0000000..cc76ec0 --- /dev/null +++ b/DBCD.Tests/Utilities/IO.cs @@ -0,0 +1,54 @@ +using System.Collections.Generic; +using System.Globalization; +using System.IO; +using System.Linq; + +namespace DBCD.Tests.Utilities +{ + internal class IO + { + public static bool TryGetExactPath(string path, out string exactPath) + { + bool result = false; + exactPath = null; + + // DirectoryInfo accepts either a file path or a directory path, and most of its properties work for either. + // However, its Exists property only works for a directory path. + DirectoryInfo directory = new DirectoryInfo(path); + if (File.Exists(path) || directory.Exists) + { + List parts = new List(); + + DirectoryInfo parentDirectory = directory.Parent; + while (parentDirectory != null) + { + FileSystemInfo entry = parentDirectory.EnumerateFileSystemInfos(directory.Name).First(); + parts.Add(entry.Name); + + directory = parentDirectory; + parentDirectory = directory.Parent; + } + + // Handle the root part (i.e., drive letter or UNC \\server\share). + string root = directory.FullName; + if (root.Contains(':')) + { + root = root.ToUpper(); + } + else + { + string[] rootParts = root.Split('\\'); + root = string.Join("\\", rootParts.Select(part => CultureInfo.CurrentCulture.TextInfo.ToTitleCase(part))); + } + + parts.Add(root); + parts.Reverse(); + exactPath = Path.Combine(parts.ToArray()); + result = true; + } + + return result; + } + + } +} diff --git a/DBCD.Tests/WritingTest.cs b/DBCD.Tests/WritingTest.cs new file mode 100644 index 0000000..4bdd02a --- /dev/null +++ b/DBCD.Tests/WritingTest.cs @@ -0,0 +1,54 @@ +using DBCD.Providers; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Newtonsoft.Json; +using System.IO; + +namespace DBCD.Tests +{ + [TestClass] + public class WritingTest + { + public static GithubDBDProvider DBDProvider { get; } = new GithubDBDProvider(true); + public static string InputPath { get; } = $"{Directory.GetCurrentDirectory()}\\dbc"; + public static DBCD InputDBCD { get; } = new DBCD(new FilesystemDBCProvider(InputPath), DBDProvider); + public static DBCD SavedDBCD { get; } = new DBCD(new FilesystemDBCProvider("tmp"), DBDProvider); + + public static string Build { get; } = "9.1.0.39653"; + + [TestMethod] + public void TestWritingAllDB2s() + { + string[] allDB2s = Directory.GetFiles(InputPath, "*.db2", SearchOption.TopDirectoryOnly); + + if (Directory.Exists("tmp")) + Directory.Delete("tmp", true); + Directory.CreateDirectory("tmp"); + + foreach (var db2File in allDB2s) + { + if (Utilities.IO.TryGetExactPath(db2File, out string exactPath)) + { + var tableName = Path.GetFileNameWithoutExtension(exactPath); + + var originalStorage = InputDBCD.Load(tableName, Build); + originalStorage.Save($"tmp/{tableName}.db2"); + + var savedStorage = SavedDBCD.Load(tableName, Build); + + // Lazy compare + var originalJson = JsonConvert.SerializeObject(originalStorage.Values, Formatting.Indented); + var newJson = JsonConvert.SerializeObject(savedStorage.Values, Formatting.Indented); + if (originalJson != newJson) + { + File.WriteAllText("original.json", originalJson); + File.WriteAllText("new.json", newJson); + + throw new InvalidDataException($"The saved storage {tableName} should not differ from the original one!"); + } + } + } + + Directory.Delete("tmp", true); + } + } +} From 25224bb49a4a736aa05623d0ac5e8f6829fa8d22 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Thu, 8 Aug 2024 06:56:25 +0200 Subject: [PATCH 16/40] Remove try/catch to mirror current Co-Authored-By: BinarySpace <29510799+YetAnotherBinarySpace@users.noreply.github.com> Co-Authored-By: Luzifix <7042325+luzifix@users.noreply.github.com> --- DBCD.IO/Readers/BaseReader.cs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/DBCD.IO/Readers/BaseReader.cs b/DBCD.IO/Readers/BaseReader.cs index f514fee..74246c3 100644 --- a/DBCD.IO/Readers/BaseReader.cs +++ b/DBCD.IO/Readers/BaseReader.cs @@ -43,15 +43,8 @@ abstract class BaseReader public void Enumerate(Action action) { - try - { - Parallel.ForEach(_Records.Values, action); - Parallel.ForEach(GetCopyRows(), action); - } - catch (AggregateException ex) - { - throw ex.InnerException; - } + Parallel.ForEach(_Records.Values, action); + Parallel.ForEach(GetCopyRows(), action); } public void Clear() From 7dd1077f7c62ff1cae6783bad9e7da2efc56aa13 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Thu, 8 Aug 2024 06:58:10 +0200 Subject: [PATCH 17/40] Add Benchmark project Co-Authored-By: BinarySpace <29510799+YetAnotherBinarySpace@users.noreply.github.com> Co-Authored-By: Luzifix <7042325+luzifix@users.noreply.github.com> --- DBCD.Benchmark/Benchmarks/WritingBenchmark.cs | 40 ++++++++++++++ DBCD.Benchmark/DBCD.Benchmark.csproj | 18 +++++++ DBCD.Benchmark/Program.cs | 5 ++ DBCD.Benchmark/Utilities/IO.cs | 53 +++++++++++++++++++ DBCD.sln | 6 +++ 5 files changed, 122 insertions(+) create mode 100644 DBCD.Benchmark/Benchmarks/WritingBenchmark.cs create mode 100644 DBCD.Benchmark/DBCD.Benchmark.csproj create mode 100644 DBCD.Benchmark/Program.cs create mode 100644 DBCD.Benchmark/Utilities/IO.cs diff --git a/DBCD.Benchmark/Benchmarks/WritingBenchmark.cs b/DBCD.Benchmark/Benchmarks/WritingBenchmark.cs new file mode 100644 index 0000000..5dcedda --- /dev/null +++ b/DBCD.Benchmark/Benchmarks/WritingBenchmark.cs @@ -0,0 +1,40 @@ +using BenchmarkDotNet.Attributes; +using DBCD.Benchmark.Utilities; +using DBCD.Providers; + +namespace DBCD.Benchmark.Benchmarks +{ + [MemoryDiagnoser] + public class WritingBenchmark + { + public static GithubDBDProvider DBDProvider { get; } = new GithubDBDProvider(true); + public static string InputPath { get; } = $"{Directory.GetCurrentDirectory()}\\dbc"; + public static DBCD InputDBCD { get; } = new DBCD(new FilesystemDBCProvider(InputPath), DBDProvider); + public static DBCD SavedDBCD { get; } = new DBCD(new FilesystemDBCProvider("tmp"), DBDProvider); + + public static string Build { get; } = "9.1.0.39653"; + + [Benchmark] + public void TestWritingAllDB2s() + { + string[] allDB2s = Directory.GetFiles(InputPath, "*.db2", SearchOption.TopDirectoryOnly); + + if (Directory.Exists("tmp")) + Directory.Delete("tmp", true); + Directory.CreateDirectory("tmp"); + + foreach (var db2File in allDB2s) + { + if (Utilities.IO.TryGetExactPath(db2File, out string exactPath)) + { + var tableName = Path.GetFileNameWithoutExtension(exactPath); + + var originalStorage = InputDBCD.Load(tableName, Build); + originalStorage.Save($"tmp/{tableName}.db2"); + } + } + + Directory.Delete("tmp", true); + } + } +} diff --git a/DBCD.Benchmark/DBCD.Benchmark.csproj b/DBCD.Benchmark/DBCD.Benchmark.csproj new file mode 100644 index 0000000..bd4e470 --- /dev/null +++ b/DBCD.Benchmark/DBCD.Benchmark.csproj @@ -0,0 +1,18 @@ + + + + Exe + net6.0 + enable + enable + + + + + + + + + + + diff --git a/DBCD.Benchmark/Program.cs b/DBCD.Benchmark/Program.cs new file mode 100644 index 0000000..521116f --- /dev/null +++ b/DBCD.Benchmark/Program.cs @@ -0,0 +1,5 @@ +// See https://aka.ms/new-console-template for more information +using BenchmarkDotNet.Running; +using DBCD.Benchmark.Benchmarks; + +BenchmarkRunner.Run(); \ No newline at end of file diff --git a/DBCD.Benchmark/Utilities/IO.cs b/DBCD.Benchmark/Utilities/IO.cs new file mode 100644 index 0000000..5336a09 --- /dev/null +++ b/DBCD.Benchmark/Utilities/IO.cs @@ -0,0 +1,53 @@ +using System.Collections.Generic; +using System.Globalization; +using System.IO; +using System.Linq; + +namespace DBCD.Benchmark.Utilities +{ + internal class IO + { + public static bool TryGetExactPath(string path, out string exactPath) + { + bool result = false; + exactPath = null; + + // DirectoryInfo accepts either a file path or a directory path, and most of its properties work for either. + // However, its Exists property only works for a directory path. + DirectoryInfo directory = new DirectoryInfo(path); + if (File.Exists(path) || directory.Exists) + { + List parts = new List(); + + DirectoryInfo parentDirectory = directory.Parent; + while (parentDirectory != null) + { + FileSystemInfo entry = parentDirectory.EnumerateFileSystemInfos(directory.Name).First(); + parts.Add(entry.Name); + + directory = parentDirectory; + parentDirectory = directory.Parent; + } + + // Handle the root part (i.e., drive letter or UNC \\server\share). + string root = directory.FullName; + if (root.Contains(':')) + { + root = root.ToUpper(); + } + else + { + string[] rootParts = root.Split('\\'); + root = string.Join("\\", rootParts.Select(part => CultureInfo.CurrentCulture.TextInfo.ToTitleCase(part))); + } + + parts.Add(root); + parts.Reverse(); + exactPath = Path.Combine(parts.ToArray()); + result = true; + } + + return result; + } + } +} diff --git a/DBCD.sln b/DBCD.sln index 220a51d..3bcc88e 100644 --- a/DBCD.sln +++ b/DBCD.sln @@ -9,6 +9,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DBCD.IO", "DBCD.IO\DBCD.IO. EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DBCD.Tests", "DBCD.Tests\DBCD.Tests.csproj", "{96CFC512-3818-487F-8FB6-7632E340ABB9}" EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DBCD.Benchmark", "DBCD.Benchmark\DBCD.Benchmark.csproj", "{5EA3D33B-9901-48CB-B558-0D8A90F2CD7C}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -27,6 +29,10 @@ Global {96CFC512-3818-487F-8FB6-7632E340ABB9}.Debug|Any CPU.Build.0 = Debug|Any CPU {96CFC512-3818-487F-8FB6-7632E340ABB9}.Release|Any CPU.ActiveCfg = Release|Any CPU {96CFC512-3818-487F-8FB6-7632E340ABB9}.Release|Any CPU.Build.0 = Release|Any CPU + {5EA3D33B-9901-48CB-B558-0D8A90F2CD7C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {5EA3D33B-9901-48CB-B558-0D8A90F2CD7C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {5EA3D33B-9901-48CB-B558-0D8A90F2CD7C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {5EA3D33B-9901-48CB-B558-0D8A90F2CD7C}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE From f537bcd81c332c2ef185f360adfc8f476f99efb2 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Thu, 8 Aug 2024 06:58:16 +0200 Subject: [PATCH 18/40] Update README Co-Authored-By: BinarySpace <29510799+YetAnotherBinarySpace@users.noreply.github.com> Co-Authored-By: Luzifix <7042325+luzifix@users.noreply.github.com> --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 28e9067..102ac9d 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,2 @@ # DBCD -C# library for reading DBC/DB2 database files from World of Warcraft with built-in support for [WoWDBDefs](https://github.com/wowdev/WoWDBDefs) definitions. +C# library for reading and writing DBC/DB2 database files from World of Warcraft with built-in support for [WoWDBDefs](https://github.com/wowdev/WoWDBDefs) definitions. From ecce99a28186a4c022b4198743a4d282af92c33a Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Thu, 8 Aug 2024 06:59:40 +0200 Subject: [PATCH 19/40] Add ReadingBenchmark Co-Authored-By: BinarySpace <29510799+YetAnotherBinarySpace@users.noreply.github.com> Co-Authored-By: Luzifix <7042325+luzifix@users.noreply.github.com> --- DBCD.Benchmark/Benchmarks/ReadingBenchmark.cs | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 DBCD.Benchmark/Benchmarks/ReadingBenchmark.cs diff --git a/DBCD.Benchmark/Benchmarks/ReadingBenchmark.cs b/DBCD.Benchmark/Benchmarks/ReadingBenchmark.cs new file mode 100644 index 0000000..856bf39 --- /dev/null +++ b/DBCD.Benchmark/Benchmarks/ReadingBenchmark.cs @@ -0,0 +1,31 @@ +using BenchmarkDotNet.Attributes; +using DBCD.Benchmark.Utilities; +using DBCD.Providers; + +namespace DBCD.Benchmark.Benchmarks +{ + [MemoryDiagnoser] + public class ReadingBenchmark + { + public static GithubDBDProvider DBDProvider { get; } = new GithubDBDProvider(true); + public static string InputPath { get; } = $"{Directory.GetCurrentDirectory()}\\dbc"; + public static DBCD InputDBCD { get; } = new DBCD(new FilesystemDBCProvider(InputPath), DBDProvider); + + public static string Build { get; } = "11.0.2.55959"; + + [Benchmark] + public void TestReadingAllDB2s() + { + string[] allDB2s = Directory.GetFiles(InputPath, "*.db2", SearchOption.TopDirectoryOnly); + + foreach (var db2File in allDB2s) + { + if (Utilities.IO.TryGetExactPath(db2File, out string exactPath)) + { + var tableName = Path.GetFileNameWithoutExtension(exactPath); + var originalStorage = InputDBCD.Load(tableName, Build); + } + } + } + } +} From 87332b1d7fb1dc32f86c5d19d161bada25287665 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Thu, 8 Aug 2024 08:12:42 +0200 Subject: [PATCH 20/40] Update tests --- DBCD.Tests/Providers/WagoDBCProvider.cs | 61 ++++++++++++++-------- DBCD.Tests/ReadingTest.cs | 41 +++++++++++++++ DBCD.Tests/TestDBDProvider.cs | 16 ------ DBCD.Tests/WritingTest.cs | 67 ++++++++++++++++++------- 4 files changed, 131 insertions(+), 54 deletions(-) delete mode 100644 DBCD.Tests/TestDBDProvider.cs diff --git a/DBCD.Tests/Providers/WagoDBCProvider.cs b/DBCD.Tests/Providers/WagoDBCProvider.cs index 684cc0d..2d3c656 100644 --- a/DBCD.Tests/Providers/WagoDBCProvider.cs +++ b/DBCD.Tests/Providers/WagoDBCProvider.cs @@ -1,6 +1,9 @@ using System; +using System.Collections.Generic; using System.IO; +using System.Linq; using System.Net.Http; +using System.Text; namespace DBCD.Providers { @@ -10,34 +13,47 @@ namespace DBCD.Providers public class WagoDBCProvider : IDBCProvider { private readonly HttpClient client = new(); + private readonly Dictionary DB2FileDataIDs = new(); - public Stream StreamForTableName(string tableName, string build) + public WagoDBCProvider() { - uint fileDataID; + if (DB2FileDataIDs.Count == 0) + LoadDBDManifest(); + } - // For tests, we only support a few tables. Instead of loading a listfile/manifest we just hardcode the IDs. Add more if needed. - switch (tableName.ToLower()) - { - case "itemsparse": - fileDataID = 1572924; - break; - case "spellname": - fileDataID = 1990283; - break; - case "map": - fileDataID = 1349477; - break; - case "mapdifficulty": - fileDataID = 1367868; - break; - default: - throw new Exception("FileDataID not known for table " + tableName); - } + private struct DBDManifestEntry { + public string tableName; + public string tableHash; + public uint dbcFileDataID; + public uint db2FileDataID; + } + + private void LoadDBDManifest() + { + var manifest = client.GetStringAsync("https://raw.githubusercontent.com/wowdev/WoWDBDefs/master/manifest.json").Result; + var dbdManifest = Newtonsoft.Json.JsonConvert.DeserializeObject>(manifest); + + foreach(var entry in dbdManifest) + DB2FileDataIDs[entry.tableName] = entry.db2FileDataID; + } + + public string[] GetAllTableNames() + { + return DB2FileDataIDs.Keys.ToArray(); + } + + public Stream StreamForTableName(string tableName, string build) + { + if (!DB2FileDataIDs.TryGetValue(tableName, out uint fileDataID)) + throw new Exception("Unable to find table " + tableName + " in FDID lookup!"); if(!Directory.Exists("DBCCache")) Directory.CreateDirectory("DBCCache"); - var cacheFile = Path.Combine("DBCCache", tableName + "-" + build + ".db2"); + if (!Directory.Exists(Path.Combine("DBCCache", build))) + Directory.CreateDirectory(Path.Combine("DBCCache", build)); + + var cacheFile = Path.Combine("DBCCache", build, tableName + ".db2"); if (File.Exists(cacheFile)) { var lastWrite = File.GetLastWriteTime(cacheFile); @@ -46,6 +62,9 @@ public Stream StreamForTableName(string tableName, string build) } var bytes = client.GetByteArrayAsync("https://wago.tools/api/casc/" + fileDataID + "?version=" + build).Result; + if (bytes.Length == 0 || (bytes.Length < 40 && Encoding.ASCII.GetString(bytes).Contains("error"))) + throw new FileNotFoundException(); + File.WriteAllBytes(cacheFile, bytes); return new MemoryStream(bytes); } diff --git a/DBCD.Tests/ReadingTest.cs b/DBCD.Tests/ReadingTest.cs index c6f8b3d..df9ca2e 100644 --- a/DBCD.Tests/ReadingTest.cs +++ b/DBCD.Tests/ReadingTest.cs @@ -1,5 +1,8 @@ using DBCD.Providers; using Microsoft.VisualStudio.TestTools.UnitTesting; +using System; +using System.IO; +using System.Net.Http; namespace DBCD.Tests { @@ -105,6 +108,44 @@ public void TestGithubDBDProviderWithCache() githubDBDProvider.StreamForTableName("ItemSparse"); } + [TestMethod] + public void TestReadingAllDB2s() + { + return; // Only run this test manually + + var build = "9.1.0.39653"; // WDC3 + + var dbcd = new DBCD(wagoDBCProvider, githubDBDProvider); + var allDB2s = wagoDBCProvider.GetAllTableNames(); + + foreach (var tableName in allDB2s) + { + // I think this table is meant to crash the test, so we skip it + if (tableName == "UnitTestSparse") + continue; + + try + { + var storage = dbcd.Load(tableName, build); + } + catch(FileNotFoundException e) + { + Console.WriteLine($"Failed to load {tableName} for build {build}, does not exist in build."); + } + catch(AggregateException e) + { + if(e.InnerException is HttpRequestException) + { + Console.WriteLine($"Failed to load {tableName} for build {build}, does not exist."); + } + else + { + throw e; + } + } + } + } + //[TestMethod] //public void TestHotfixApplying() //{ diff --git a/DBCD.Tests/TestDBDProvider.cs b/DBCD.Tests/TestDBDProvider.cs deleted file mode 100644 index 4e27385..0000000 --- a/DBCD.Tests/TestDBDProvider.cs +++ /dev/null @@ -1,16 +0,0 @@ -using DBCD.Providers; -using System; -using System.Collections.Generic; -using System.IO; -using System.Text; - -namespace DBCD.Tests -{ - class TestDBDProvider : IDBDProvider - { - public Stream StreamForTableName(string tableName, string build = null) - { - return File.OpenRead(Path.Combine(@"C:\Users\TomSpearman\Downloads\WoWDBDefs\definitions", tableName + ".dbd")); - } - } -} diff --git a/DBCD.Tests/WritingTest.cs b/DBCD.Tests/WritingTest.cs index 4bdd02a..ab96123 100644 --- a/DBCD.Tests/WritingTest.cs +++ b/DBCD.Tests/WritingTest.cs @@ -1,7 +1,10 @@ using DBCD.Providers; using Microsoft.VisualStudio.TestTools.UnitTesting; using Newtonsoft.Json; +using System; +using System.Collections.Generic; using System.IO; +using System.Net.Http; namespace DBCD.Tests { @@ -9,8 +12,9 @@ namespace DBCD.Tests public class WritingTest { public static GithubDBDProvider DBDProvider { get; } = new GithubDBDProvider(true); - public static string InputPath { get; } = $"{Directory.GetCurrentDirectory()}\\dbc"; - public static DBCD InputDBCD { get; } = new DBCD(new FilesystemDBCProvider(InputPath), DBDProvider); + public static string InputPath { get; } = $"{Directory.GetCurrentDirectory()}\\DBCCache"; + public static WagoDBCProvider wagoDBCProvider = new(); + public static DBCD InputDBCD { get; } = new DBCD(wagoDBCProvider, DBDProvider); public static DBCD SavedDBCD { get; } = new DBCD(new FilesystemDBCProvider("tmp"), DBDProvider); public static string Build { get; } = "9.1.0.39653"; @@ -18,36 +22,65 @@ public class WritingTest [TestMethod] public void TestWritingAllDB2s() { - string[] allDB2s = Directory.GetFiles(InputPath, "*.db2", SearchOption.TopDirectoryOnly); + return; // Only run this test manually + + var allDB2s = wagoDBCProvider.GetAllTableNames(); if (Directory.Exists("tmp")) Directory.Delete("tmp", true); + Directory.CreateDirectory("tmp"); - foreach (var db2File in allDB2s) + foreach (var tableName in allDB2s) { - if (Utilities.IO.TryGetExactPath(db2File, out string exactPath)) - { - var tableName = Path.GetFileNameWithoutExtension(exactPath); + if (tableName == "UnitTestSparse") + continue; + + // TODO: possible DBD being wrong + if (tableName == "SummonProperties") + continue; + + var originalValues = new List(); + try + { var originalStorage = InputDBCD.Load(tableName, Build); + originalValues.AddRange(originalStorage.Values); originalStorage.Save($"tmp/{tableName}.db2"); + } + catch (FileNotFoundException e) + { + // This is not a reading test, I could not care less + continue; + } + catch (AggregateException e) + { + if (e.InnerException is HttpRequestException) + { + // This is not a reading test, I could not care less + continue; + } + else + { + throw e; + } + } - var savedStorage = SavedDBCD.Load(tableName, Build); + var savedStorage = SavedDBCD.Load(tableName, Build); - // Lazy compare - var originalJson = JsonConvert.SerializeObject(originalStorage.Values, Formatting.Indented); - var newJson = JsonConvert.SerializeObject(savedStorage.Values, Formatting.Indented); - if (originalJson != newJson) - { - File.WriteAllText("original.json", originalJson); - File.WriteAllText("new.json", newJson); + // Lazy compare + var originalJson = JsonConvert.SerializeObject(originalValues, Formatting.Indented); + var newJson = JsonConvert.SerializeObject(savedStorage.Values, Formatting.Indented); + if (originalJson != newJson) + { + File.WriteAllText("original.json", originalJson); + File.WriteAllText("new.json", newJson); - throw new InvalidDataException($"The saved storage {tableName} should not differ from the original one!"); - } + throw new InvalidDataException($"The saved storage {tableName} should not differ from the original one!"); } } + Directory.Delete("tmp", true); } } From 5003e224b4a52f98e93ef503ee08c34fdf5e796a Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Thu, 8 Aug 2024 20:49:44 +0200 Subject: [PATCH 21/40] Add util for checking if a build exists in a DBD for tests --- DBCD/Providers/FilesystemDBDProvider.cs | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/DBCD/Providers/FilesystemDBDProvider.cs b/DBCD/Providers/FilesystemDBDProvider.cs index 958b46c..0ce040c 100644 --- a/DBCD/Providers/FilesystemDBDProvider.cs +++ b/DBCD/Providers/FilesystemDBDProvider.cs @@ -1,4 +1,6 @@ using System.IO; +using System.Linq; +using DBDefsLib; namespace DBCD.Providers { @@ -11,6 +13,30 @@ public class FilesystemDBDProvider : IDBDProvider public FilesystemDBDProvider(string directory) => this.directory = directory; + /// + /// Function that checks if a certain build exists in a DBD file. Note that this causes a full read/parse of the file. + /// + public bool ContainsBuild(string tableName, string build) + { + if(!File.Exists(Path.Combine(directory, $"{tableName}.dbd"))) + return false; + + var reader = new DBDReader(); + var definition = reader.Read(StreamForTableName(tableName)); + var targetBuild = new Build(build); + + foreach (var versionDefinition in definition.versionDefinitions) + { + if (versionDefinition.builds.Contains(targetBuild)) + return true; + + if(versionDefinition.buildRanges.Any(range => range.Contains(targetBuild))) + return true; + } + + return false; + } + public Stream StreamForTableName(string tableName, string build = null) => File.OpenRead(Path.Combine(directory, $"{tableName}.dbd")); } } From e9ae98c80dbebdbe584be264616ed3c55a55a26e Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Fri, 9 Aug 2024 01:35:23 +0200 Subject: [PATCH 22/40] Fix WDC4/WDC5 writing crashes --- DBCD.IO/Readers/WDC4Reader.cs | 11 ++++++----- DBCD.IO/Readers/WDC5Reader.cs | 11 ++++++----- DBCD.IO/Writers/WDC4Writer.cs | 17 ++++++++++++----- DBCD.IO/Writers/WDC5Writer.cs | 22 ++++++++++++++-------- 4 files changed, 38 insertions(+), 23 deletions(-) diff --git a/DBCD.IO/Readers/WDC4Reader.cs b/DBCD.IO/Readers/WDC4Reader.cs index e440b6e..489f81a 100644 --- a/DBCD.IO/Readers/WDC4Reader.cs +++ b/DBCD.IO/Readers/WDC4Reader.cs @@ -283,12 +283,8 @@ public WDC4Reader(Stream stream) int palletDataSize = reader.ReadInt32(); // in bytes, sizeof(DBC2PalletValue) == 4 int sectionsCount = reader.ReadInt32(); - if (sectionsCount == 0 || RecordsCount == 0) - return; - - var sections = reader.ReadArray(sectionsCount); + var sections = (sectionsCount == 0) ? new List() : reader.ReadArray(sectionsCount).ToList(); this.m_sections = sections.OfType().ToList(); - this.m_encryptedIDs = new Dictionary(); // field meta data Meta = reader.ReadArray(FieldsCount); @@ -296,6 +292,11 @@ public WDC4Reader(Stream stream) // column meta data ColumnMeta = reader.ReadArray(FieldsCount); + if (sectionsCount == 0 || RecordsCount == 0) + return; + + this.m_encryptedIDs = new Dictionary(); + // pallet data PalletData = new Value32[ColumnMeta.Length][]; for (int i = 0; i < ColumnMeta.Length; i++) diff --git a/DBCD.IO/Readers/WDC5Reader.cs b/DBCD.IO/Readers/WDC5Reader.cs index 3199e14..d194ecc 100644 --- a/DBCD.IO/Readers/WDC5Reader.cs +++ b/DBCD.IO/Readers/WDC5Reader.cs @@ -287,12 +287,8 @@ public WDC5Reader(Stream stream) int palletDataSize = reader.ReadInt32(); // in bytes, sizeof(DBC2PalletValue) == 4 int sectionsCount = reader.ReadInt32(); - if (sectionsCount == 0 || RecordsCount == 0) - return; - - var sections = reader.ReadArray(sectionsCount); + var sections = (sectionsCount == 0) ? new List() : reader.ReadArray(sectionsCount).ToList(); this.m_sections = sections.OfType().ToList(); - this.m_encryptedIDs = new Dictionary(); // field meta data Meta = reader.ReadArray(FieldsCount); @@ -300,6 +296,11 @@ public WDC5Reader(Stream stream) // column meta data ColumnMeta = reader.ReadArray(FieldsCount); + if(sectionsCount == 0 || RecordsCount == 0) + return; + + this.m_encryptedIDs = new Dictionary(); + // pallet data PalletData = new Value32[ColumnMeta.Length][]; for (int i = 0; i < ColumnMeta.Length; i++) diff --git a/DBCD.IO/Writers/WDC4Writer.cs b/DBCD.IO/Writers/WDC4Writer.cs index 0afe6e6..01c96f9 100644 --- a/DBCD.IO/Writers/WDC4Writer.cs +++ b/DBCD.IO/Writers/WDC4Writer.cs @@ -339,7 +339,7 @@ public WDC4Writer(WDC4Reader reader, IDictionary storage, Stream stream) writer.Write(fileOffset); // FileOffset writer.Write(RecordsCount); // NumRecords writer.Write(StringTableSize); - writer.Write(0); // OffsetRecordsEndOffset + writer.Write(0); // OffsetRecordsEndOffset, this is set after writing the records for sparse tables writer.Write(Flags.HasFlagExt(DB2Flags.Index) ? RecordsCount * 4 : 0); // IndexDataSize writer.Write(referenceDataSize); // ParentLookupDataSize writer.Write(Flags.HasFlagExt(DB2Flags.Sparse) ? RecordsCount : 0); // OffsetMapIDCount @@ -404,7 +404,7 @@ public WDC4Writer(WDC4Reader reader, IDictionary storage, Stream stream) if (Flags.HasFlagExt(DB2Flags.Sparse)) { long oldPos = writer.BaseStream.Position; - writer.BaseStream.Position = 92; + writer.BaseStream.Position = HeaderSize + 20; writer.Write((uint)oldPos); writer.BaseStream.Position = oldPos; } @@ -424,6 +424,10 @@ public WDC4Writer(WDC4Reader reader, IDictionary storage, Stream stream) if (Flags.HasFlagExt(DB2Flags.Sparse)) writer.WriteArray(SparseEntries.Values.ToArray()); + // sparse data ids (if flag 0x2 is set) + if (Flags.HasFlagExt(DB2Flags.Sparse) && Flags.HasFlag(DB2Flags.SecondaryKey)) + writer.WriteArray(SparseEntries.Keys.ToArray()); + // reference data if (ReferenceData.Count > 0) { @@ -434,12 +438,15 @@ public WDC4Writer(WDC4Reader reader, IDictionary storage, Stream stream) for (int i = 0; i < ReferenceData.Count; i++) { writer.Write(ReferenceData[i]); - writer.Write(i); + if (Flags.HasFlag(DB2Flags.SecondaryKey)) + writer.Write(SparseEntries.Keys.ElementAt(i)); + else + writer.Write(i); } } - // sparse data ids - if (Flags.HasFlagExt(DB2Flags.Sparse)) + // sparse data ids (if flag 0x2 is not set) + if (Flags.HasFlagExt(DB2Flags.Sparse) && !Flags.HasFlag(DB2Flags.SecondaryKey)) writer.WriteArray(SparseEntries.Keys.ToArray()); } } diff --git a/DBCD.IO/Writers/WDC5Writer.cs b/DBCD.IO/Writers/WDC5Writer.cs index fe88421..a18e249 100644 --- a/DBCD.IO/Writers/WDC5Writer.cs +++ b/DBCD.IO/Writers/WDC5Writer.cs @@ -276,7 +276,7 @@ private static void WriteFieldValueArray(BitWriter r, FieldMetaData field class WDC5Writer : BaseWriter where T : class { - private const int HeaderSize = 200; + private const int HeaderSize = 204; private const uint WDC5FmtSig = 0x35434457; // WDC5 public WDC5Writer(WDC5Reader reader, IDictionary storage, Stream stream) : base(reader) @@ -308,9 +308,8 @@ public WDC5Writer(WDC5Reader reader, IDictionary storage, Stream stream) int maxIndex = storage.Keys.MaxOrDefault(); writer.Write(WDC5FmtSig); - writer.Write((uint)5); // numaric version + writer.Write((uint)5); // numeric version writer.Write(Encoding.ASCII.GetBytes(staticVersionString.PadRight(128, '\0'))); - writer.Write(RecordsCount); writer.Write(FieldsCount); writer.Write(RecordSize); @@ -345,7 +344,7 @@ public WDC5Writer(WDC5Reader reader, IDictionary storage, Stream stream) writer.Write(fileOffset); // FileOffset writer.Write(RecordsCount); // NumRecords writer.Write(StringTableSize); - writer.Write(0); // OffsetRecordsEndOffset + writer.Write(0); // OffsetRecordsEndOffset, this is set after writing the records for sparse tables writer.Write(Flags.HasFlagExt(DB2Flags.Index) ? RecordsCount * 4 : 0); // IndexDataSize writer.Write(referenceDataSize); // ParentLookupDataSize writer.Write(Flags.HasFlagExt(DB2Flags.Sparse) ? RecordsCount : 0); // OffsetMapIDCount @@ -410,7 +409,7 @@ public WDC5Writer(WDC5Reader reader, IDictionary storage, Stream stream) if (Flags.HasFlagExt(DB2Flags.Sparse)) { long oldPos = writer.BaseStream.Position; - writer.BaseStream.Position = 92; + writer.BaseStream.Position = HeaderSize + 20; writer.Write((uint)oldPos); writer.BaseStream.Position = oldPos; } @@ -430,6 +429,10 @@ public WDC5Writer(WDC5Reader reader, IDictionary storage, Stream stream) if (Flags.HasFlagExt(DB2Flags.Sparse)) writer.WriteArray(SparseEntries.Values.ToArray()); + // sparse data ids (if flag 0x2 is set) + if (Flags.HasFlagExt(DB2Flags.Sparse) && Flags.HasFlag(DB2Flags.SecondaryKey)) + writer.WriteArray(SparseEntries.Keys.ToArray()); + // reference data if (ReferenceData.Count > 0) { @@ -440,12 +443,15 @@ public WDC5Writer(WDC5Reader reader, IDictionary storage, Stream stream) for (int i = 0; i < ReferenceData.Count; i++) { writer.Write(ReferenceData[i]); - writer.Write(i); + if (Flags.HasFlag(DB2Flags.SecondaryKey)) + writer.Write(SparseEntries.Keys.ElementAt(i)); + else + writer.Write(i); } } - // sparse data ids - if (Flags.HasFlagExt(DB2Flags.Sparse)) + // sparse data ids (if flag 0x2 is not set) + if (Flags.HasFlagExt(DB2Flags.Sparse) && !Flags.HasFlag(DB2Flags.SecondaryKey)) writer.WriteArray(SparseEntries.Keys.ToArray()); } } From b9eddb5f82ab6ef11b06c882b980ddb0040d49a6 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Fri, 9 Aug 2024 03:58:33 +0200 Subject: [PATCH 23/40] Also look for DBCs in FileSystemProvider --- DBCD/Providers/FilesystemDBCProvider.cs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/DBCD/Providers/FilesystemDBCProvider.cs b/DBCD/Providers/FilesystemDBCProvider.cs index b450496..d6fef2c 100644 --- a/DBCD/Providers/FilesystemDBCProvider.cs +++ b/DBCD/Providers/FilesystemDBCProvider.cs @@ -3,7 +3,7 @@ namespace DBCD.Providers { /// - /// Loads DB2 files from a local directory. + /// Loads DBC/DB2 files from a local directory. /// public class FilesystemDBCProvider : IDBCProvider { @@ -11,6 +11,15 @@ public class FilesystemDBCProvider : IDBCProvider public FilesystemDBCProvider(string directory) => this.directory = directory; - public Stream StreamForTableName(string tableName, string build) => File.OpenRead(Path.Combine(directory, $"{tableName}.db2")); + public Stream StreamForTableName(string tableName, string build) + { + if(File.Exists(Path.Combine(directory, $"{tableName}.db2"))) + return File.OpenRead(Path.Combine(directory, $"{tableName}.db2")); + + if(File.Exists(Path.Combine(directory, $"{tableName}.dbc"))) + return File.OpenRead(Path.Combine(directory, $"{tableName}.dbc")); + + throw new FileNotFoundException("Unable to find DBC/DB2 file on disk for table " + tableName); + } } } From ca15a14fbf0d4a880e13b56dbb50fcbfc1a755ab Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Fri, 9 Aug 2024 04:01:19 +0200 Subject: [PATCH 24/40] WDC5 schema reading/writing --- DBCD.IO/Readers/BaseReader.cs | 22 ++++++++++++++++++---- DBCD.IO/Readers/WDC5Reader.cs | 5 ++--- DBCD.IO/Writers/WDC5Writer.cs | 4 ++-- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/DBCD.IO/Readers/BaseReader.cs b/DBCD.IO/Readers/BaseReader.cs index 74246c3..e10ccae 100644 --- a/DBCD.IO/Readers/BaseReader.cs +++ b/DBCD.IO/Readers/BaseReader.cs @@ -12,16 +12,30 @@ abstract class BaseReader public int FieldsCount { get; protected set; } public int RecordSize { get; protected set; } public int StringTableSize { get; protected set; } - public uint TableHash { get; protected set; } - public uint LayoutHash { get; protected set; } + + // WDB2-WDB3 + public uint Build { get; protected set; } + + // WDB2+ public int MinIndex { get; protected set; } public int MaxIndex { get; protected set; } - public int IdFieldIndex { get; protected set; } + + // WDB3+ public DB2Flags Flags { get; protected set; } public int Locale { get; protected set; } - public uint Build { get; protected set; } + + // WDB5+ + public uint TableHash { get; protected set; } + public uint LayoutHash { get; protected set; } + public int IdFieldIndex { get; protected set; } + + // WDC1+ public int PackedDataOffset { get; protected set; } + // WDC5+ + public uint SchemaVersion { get; protected set; } + public string SchemaString { get; protected set; } + #region Data public FieldMetaData[] Meta { get; protected set; } diff --git a/DBCD.IO/Readers/WDC5Reader.cs b/DBCD.IO/Readers/WDC5Reader.cs index d194ecc..b8484a1 100644 --- a/DBCD.IO/Readers/WDC5Reader.cs +++ b/DBCD.IO/Readers/WDC5Reader.cs @@ -265,9 +265,8 @@ public WDC5Reader(Stream stream) if (magic != WDC5FmtSig) throw new InvalidDataException("WDC5 file is corrupted!"); - var versionButInteger = reader.ReadUInt32(); - var buildStringWithFarTooMuchPadding = reader.ReadBytes(128); - + SchemaVersion = reader.ReadUInt32(); + SchemaString = Encoding.UTF8.GetString(reader.ReadBytes(128)).TrimEnd('\0'); RecordsCount = reader.ReadInt32(); FieldsCount = reader.ReadInt32(); RecordSize = reader.ReadInt32(); diff --git a/DBCD.IO/Writers/WDC5Writer.cs b/DBCD.IO/Writers/WDC5Writer.cs index a18e249..952d45a 100644 --- a/DBCD.IO/Writers/WDC5Writer.cs +++ b/DBCD.IO/Writers/WDC5Writer.cs @@ -300,7 +300,7 @@ public WDC5Writer(WDC5Reader reader, IDictionary storage, Stream stream) var (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); - var staticVersionString = "WowStatic_Patch_10_2_7"; // @TODO Make string dynamic based on build + var staticVersionString = reader.SchemaString; // TODO: Allow overriding using (var writer = new BinaryWriter(stream)) { @@ -308,7 +308,7 @@ public WDC5Writer(WDC5Reader reader, IDictionary storage, Stream stream) int maxIndex = storage.Keys.MaxOrDefault(); writer.Write(WDC5FmtSig); - writer.Write((uint)5); // numeric version + writer.Write(reader.SchemaVersion); writer.Write(Encoding.ASCII.GetBytes(staticVersionString.PadRight(128, '\0'))); writer.Write(RecordsCount); writer.Write(FieldsCount); From 64479c5005c42df0a8063645d22ad02f5bd3548b Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Fri, 9 Aug 2024 06:13:27 +0200 Subject: [PATCH 25/40] Fix some WDC1/WDC2 crashes --- DBCD.IO/Writers/WDC1Writer.cs | 11 +++++++---- DBCD.IO/Writers/WDC2Writer.cs | 11 +++++++---- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/DBCD.IO/Writers/WDC1Writer.cs b/DBCD.IO/Writers/WDC1Writer.cs index 0f85b49..b49546a 100644 --- a/DBCD.IO/Writers/WDC1Writer.cs +++ b/DBCD.IO/Writers/WDC1Writer.cs @@ -219,7 +219,9 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) StringTableSize++; PackedDataOffset = reader.PackedDataOffset; - HandleCompression(storage); + var (commonDataSize, palletDataSize, referenceDataSize) = (0, 0, 0); + if (ColumnMeta != null) + HandleCompression(storage); WDC1RowSerializer serializer = new WDC1RowSerializer(this); serializer.Serialize(storage); @@ -230,7 +232,8 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) RecordsCount = serializer.Records.Count - CopyData.Count; - var (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); + if (ColumnMeta != null) + (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); using (var writer = new BinaryWriter(stream)) { @@ -254,10 +257,10 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) writer.Write(FieldsCount); // totalFieldCount writer.Write(PackedDataOffset); - writer.Write(ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount + writer.Write(ReferenceData != null && ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount writer.Write(0); // sparseTableOffset writer.Write(Flags.HasFlagExt(DB2Flags.Index) ? RecordsCount * 4 : 0); // IndexDataSize - writer.Write(ColumnMeta.Length * 24); // ColumnMetaDataSize + writer.Write(ColumnMeta != null && ColumnMeta.Length > 0 ? ColumnMeta.Length * 24 : 0); // ColumnMetaDataSize writer.Write(commonDataSize); writer.Write(palletDataSize); writer.Write(referenceDataSize); diff --git a/DBCD.IO/Writers/WDC2Writer.cs b/DBCD.IO/Writers/WDC2Writer.cs index 87df27b..78fd2ae 100644 --- a/DBCD.IO/Writers/WDC2Writer.cs +++ b/DBCD.IO/Writers/WDC2Writer.cs @@ -282,7 +282,9 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) StringTableSize++; PackedDataOffset = reader.PackedDataOffset; - HandleCompression(storage); + var (commonDataSize, palletDataSize, referenceDataSize) = (0, 0, 0); + if (ColumnMeta != null) + HandleCompression(storage); WDC2RowSerializer serializer = new WDC2RowSerializer(this); serializer.Serialize(storage); @@ -295,7 +297,8 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) RecordsCount = serializer.Records.Count - CopyData.Count; - var (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); + if (ColumnMeta != null) + (commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); using (var writer = new BinaryWriter(stream)) { @@ -318,8 +321,8 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) writer.Write(FieldsCount); // totalFieldCount writer.Write(PackedDataOffset); - writer.Write(ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount - writer.Write(ColumnMeta.Length * 24); // ColumnMetaDataSize + writer.Write(ReferenceData != null && ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount + writer.Write(ColumnMeta != null && ColumnMeta.Length > 0 ? ColumnMeta.Length * 24 : 0); // ColumnMetaDataSize writer.Write(commonDataSize); writer.Write(palletDataSize); writer.Write(1); // sections count From 93740e82b57a5365a53c896f6f14466e815e615e Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Fri, 9 Aug 2024 06:22:07 +0200 Subject: [PATCH 26/40] Sign-extend WDC1 Immediate values --- DBCD.IO/Writers/BaseWriter.cs | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/DBCD.IO/Writers/BaseWriter.cs b/DBCD.IO/Writers/BaseWriter.cs index 1c1749c..2d0c4df 100644 --- a/DBCD.IO/Writers/BaseWriter.cs +++ b/DBCD.IO/Writers/BaseWriter.cs @@ -183,17 +183,34 @@ public void HandleCompression(IDictionary storage) newCompressedSize = 32; else { - var maxValue = storage.Values.Count switch + if ((meta.Immediate.Flags & 0x1) == 0x1) { - 0 => 0U, - _ => storage.Values.AsParallel().Max(row => + var largestMSB = storage.Values.Count switch { - var value32 = Value32.Create(info.Getter(row)); - return value32.GetValue(); - }), - }; - - newCompressedSize = maxValue.MostSignificantBit(); + 0 => 0, + _ => storage.Values.AsParallel().Max(row => + { + var value32 = Value32.Create(info.Getter(row)); + return value32.GetValue().MostSignificantBit(); + }), + }; + + newCompressedSize = largestMSB + 1; + } + else + { + var maxValue = storage.Values.Count switch + { + 0 => 0U, + _ => storage.Values.AsParallel().Max(row => + { + var value32 = Value32.Create(info.Getter(row)); + return value32.GetValue(); + }), + }; + + newCompressedSize = maxValue.MostSignificantBit(); + } } break; } From 324cf920014b4ce0cc4ef83dae0b542b2535ca14 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Fri, 9 Aug 2024 07:06:38 +0200 Subject: [PATCH 27/40] Don't handle noninline relations as fields --- DBCD.IO/Writers/WDC1Writer.cs | 26 +++++++++++++++----------- DBCD.IO/Writers/WDC2Writer.cs | 28 ++++++++++++++++------------ 2 files changed, 31 insertions(+), 23 deletions(-) diff --git a/DBCD.IO/Writers/WDC1Writer.cs b/DBCD.IO/Writers/WDC1Writer.cs index b49546a..bec1bc6 100644 --- a/DBCD.IO/Writers/WDC1Writer.cs +++ b/DBCD.IO/Writers/WDC1Writer.cs @@ -56,22 +56,26 @@ public void Serialize(int id, T row) int fieldIndex = i - indexFieldOffSet; // relationship field, used for faster lookup on IDs - if (info.IsRelation) - m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); - - if (info.IsArray) + if (info.IsNonInlineRelation) { - if (arrayWriters.TryGetValue(info.FieldType, out var writer)) - writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], (Array)info.Getter(row)); - else - throw new Exception("Unhandled array type: " + typeof(T).Name); + m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); } else { - if (simpleWriters.TryGetValue(info.FieldType, out var writer)) - writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], info.Getter(row)); + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } else - throw new Exception("Unhandled field type: " + typeof(T).Name); + { + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) + writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } } } diff --git a/DBCD.IO/Writers/WDC2Writer.cs b/DBCD.IO/Writers/WDC2Writer.cs index 78fd2ae..1900a46 100644 --- a/DBCD.IO/Writers/WDC2Writer.cs +++ b/DBCD.IO/Writers/WDC2Writer.cs @@ -57,22 +57,26 @@ public void Serialize(int id, T row) int fieldIndex = i - indexFieldOffSet; // relationship field, used for faster lookup on IDs - if (info.IsRelation) - m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); - - if (info.IsArray) + if (info.IsNonInlineRelation) { - if (arrayWriters.TryGetValue(info.FieldType, out var writer)) - writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], (Array)info.Getter(row)); - else - throw new Exception("Unhandled array type: " + typeof(T).Name); + m_writer.ReferenceData.Add((int)Convert.ChangeType(info.Getter(row), typeof(int))); } else { - if (simpleWriters.TryGetValue(info.FieldType, out var writer)) - writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], info.Getter(row)); + if (info.IsArray) + { + if (arrayWriters.TryGetValue(info.FieldType, out var writer)) + writer(bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], (Array)info.Getter(row)); + else + throw new Exception("Unhandled array type: " + typeof(T).Name); + } else - throw new Exception("Unhandled field type: " + typeof(T).Name); + { + if (simpleWriters.TryGetValue(info.FieldType, out var writer)) + writer(id, bitWriter, m_writer, m_fieldMeta[fieldIndex], ColumnMeta[fieldIndex], PalletData[fieldIndex], CommonData[fieldIndex], info.Getter(row)); + else + throw new Exception("Unhandled field type: " + typeof(T).Name); + } } } @@ -290,7 +294,7 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) serializer.Serialize(storage); // We write the copy rows if and only if it saves space and the table hasn't any reference rows. - if ((RecordSize) >= sizeof(int) * 2 && ReferenceData.Count == 0) + if ((RecordSize) >= sizeof(int) * 2 && (ReferenceData == null || ReferenceData.Count == 0)) serializer.GetCopyRows(); serializer.UpdateStringOffsets(storage); From 545e5e010d9ba0a1b6c9a3c34b57731ed8547cfd Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Fri, 9 Aug 2024 07:07:00 +0200 Subject: [PATCH 28/40] WDC2 writing crash fix --- DBCD.IO/Readers/WDC2Reader.cs | 8 ++++---- DBCD.IO/Writers/BaseWriter.cs | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/DBCD.IO/Readers/WDC2Reader.cs b/DBCD.IO/Readers/WDC2Reader.cs index 6065b5e..defe868 100644 --- a/DBCD.IO/Readers/WDC2Reader.cs +++ b/DBCD.IO/Readers/WDC2Reader.cs @@ -288,15 +288,15 @@ public WDC2Reader(Stream stream) if (sectionsCount > 1) throw new Exception("WDC2 only supports 1 section"); - if (sectionsCount == 0 || RecordsCount == 0) - return; - - var sections = reader.ReadArray(sectionsCount).ToList(); + var sections = (sectionsCount == 0) ? new List() : reader.ReadArray(sectionsCount).ToList(); this.m_sections = sections.OfType().ToList(); // field meta data Meta = reader.ReadArray(FieldsCount); + if (RecordsCount == 0) + return; + // column meta data ColumnMeta = reader.ReadArray(FieldsCount); diff --git a/DBCD.IO/Writers/BaseWriter.cs b/DBCD.IO/Writers/BaseWriter.cs index 2d0c4df..a4a1330 100644 --- a/DBCD.IO/Writers/BaseWriter.cs +++ b/DBCD.IO/Writers/BaseWriter.cs @@ -264,6 +264,8 @@ public void HandleCompression(IDictionary storage) } PackedDataOffset = Math.Max(0, PackedDataOffset); + + // TODO: Review how Blizzard handles this. This behavior matches a lot of the original DB2s, but not all. Maybe some math needs doing to make sure we're on 4 byte boundaries? RecordSize = ((RecordSize + 8 - 1) / 8); } From 1079e526384b784b97c06c8ece7633ca2b1e5fad Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Fri, 9 Aug 2024 08:57:30 +0200 Subject: [PATCH 29/40] Workaround offset issue in WDC1/WDC2 sparse tables --- DBCD.IO/Writers/WDC1Writer.cs | 6 ++++++ DBCD.IO/Writers/WDC2Writer.cs | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/DBCD.IO/Writers/WDC1Writer.cs b/DBCD.IO/Writers/WDC1Writer.cs index bec1bc6..695d8a8 100644 --- a/DBCD.IO/Writers/WDC1Writer.cs +++ b/DBCD.IO/Writers/WDC1Writer.cs @@ -299,6 +299,12 @@ public WDC1Writer(WDC1Reader reader, IDictionary storage, Stream stream) writer.BaseStream.Position = oldPos; WriteOffsetRecords(writer, serializer, recordsOffset, maxIndex - minIndex + 1); + + // Ensure we are at the right offset + var currentOffset = writer.BaseStream.Position; + var supposedOffset = oldPos + ((maxIndex - minIndex + 1) * 6); + if (currentOffset != supposedOffset) + writer.BaseStream.Position = supposedOffset; } // index table diff --git a/DBCD.IO/Writers/WDC2Writer.cs b/DBCD.IO/Writers/WDC2Writer.cs index 1900a46..45810e0 100644 --- a/DBCD.IO/Writers/WDC2Writer.cs +++ b/DBCD.IO/Writers/WDC2Writer.cs @@ -399,6 +399,12 @@ public WDC2Writer(WDC2Reader reader, IDictionary storage, Stream stream) writer.BaseStream.Position = oldPos; WriteOffsetRecords(writer, serializer, recordsOffset, maxIndex - minIndex + 1); + + // Ensure we are at the right offset + var currentOffset = writer.BaseStream.Position; + var supposedOffset = oldPos + ((maxIndex - minIndex + 1) * 6); + if (currentOffset != supposedOffset) + writer.BaseStream.Position = supposedOffset; } // index table From 247c1ccb3f9a6e766eb22f4705e94d963f82d3cb Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Fri, 9 Aug 2024 08:57:40 +0200 Subject: [PATCH 30/40] Fix WDC1/WDC2 offset map writing --- DBCD.IO/Writers/BaseWriter.cs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/DBCD.IO/Writers/BaseWriter.cs b/DBCD.IO/Writers/BaseWriter.cs index a4a1330..e54232f 100644 --- a/DBCD.IO/Writers/BaseWriter.cs +++ b/DBCD.IO/Writers/BaseWriter.cs @@ -83,7 +83,10 @@ public void WriteOffsetRecords(BinaryWriter writer, IDBRowSerializer serializ { var sparseIdLookup = new Dictionary(sparseCount); - for (int i = 0; i < sparseCount; i++) + var startRecord = serializer.Records.First().Key; + var endRecord = serializer.Records.Last().Key; + + for (int i = startRecord; i <= endRecord; i++) { if (serializer.Records.TryGetValue(i, out var record)) { @@ -91,12 +94,12 @@ public void WriteOffsetRecords(BinaryWriter writer, IDBRowSerializer serializ { // copy records use their parent's offset writer.Write(sparseIdLookup[copyid]); - writer.Write(record.TotalBytesWrittenOut); + writer.Write((ushort)record.TotalBytesWrittenOut); } else { writer.Write(sparseIdLookup[i] = recordOffset); - writer.Write(record.TotalBytesWrittenOut); + writer.Write((ushort)record.TotalBytesWrittenOut); recordOffset += (uint)record.TotalBytesWrittenOut; } } From fca6efb1b67e845001b7f306bcedde73fdf383f6 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Fri, 9 Aug 2024 11:48:05 +0200 Subject: [PATCH 31/40] Attempt at cleaning up DBCDStorage.cs diff --- DBCD/DBCDStorage.cs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/DBCD/DBCDStorage.cs b/DBCD/DBCDStorage.cs index df4c370..c8a9a3d 100644 --- a/DBCD/DBCDStorage.cs +++ b/DBCD/DBCDStorage.cs @@ -108,11 +108,12 @@ public interface IDBCDStorage : IEnumerable>, IDictiona DBCDRow ConstructRow(int index); + Dictionary GetEncryptedSections(); + Dictionary GetEncryptedIDs(); + void ApplyingHotfixes(HotfixReader hotfixReader); void ApplyingHotfixes(HotfixReader hotfixReader, HotfixReader.RowProcessor processor); - Dictionary GetEncryptedSections(); - void Save(string filename); Dictionary ToDictionary(); @@ -145,15 +146,6 @@ public DBCDStorage(DBParser parser, Storage storage, DBCDInfo info) : base(ne storage.Clear(); } - IEnumerator> IEnumerable>.GetEnumerator() - { - var enumerator = GetEnumerator(); - while (enumerator.MoveNext()) - yield return new DynamicKeyValuePair(enumerator.Current.Key, enumerator.Current.Value); - } - - public Dictionary GetEncryptedSections() => this.parser.GetEncryptedSections(); - public void ApplyingHotfixes(HotfixReader hotfixReader) { this.ApplyingHotfixes(hotfixReader, null); @@ -166,7 +158,7 @@ public void ApplyingHotfixes(HotfixReader hotfixReader, HotfixReader.RowProcesso hotfixReader.ApplyHotfixes(mutableStorage, this.parser, processor); #if NETSTANDARD2_0 - foreach(var record in mutableStorage) + foreach (var record in mutableStorage) base[record.Key] = new DBCDRow(record.Key, record.Value, fieldAccessor); #else foreach (var (id, row) in mutableStorage) @@ -176,6 +168,16 @@ public void ApplyingHotfixes(HotfixReader hotfixReader, HotfixReader.RowProcesso base.Remove(key); } + IEnumerator> IEnumerable>.GetEnumerator() + { + var enumerator = GetEnumerator(); + while (enumerator.MoveNext()) + yield return new DynamicKeyValuePair(enumerator.Current.Key, enumerator.Current.Value); + } + + public Dictionary GetEncryptedSections() => this.parser.GetEncryptedSections(); + public Dictionary GetEncryptedIDs() => this.parser.GetEncryptedIDs(); + public void Save(string filename) { #if NETSTANDARD2_0 @@ -186,8 +188,6 @@ public void Save(string filename) foreach (var (id, record) in new SortedDictionary(this)) storage.Add(id, record.AsType()); #endif - - storage?.Save(filename); } From 7d649c55965ad1b1fbdfde427d1caadaecb16f16 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Fri, 9 Aug 2024 11:56:32 +0200 Subject: [PATCH 32/40] Make DBCDRow.ID public again --- DBCD/DBCDStorage.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DBCD/DBCDStorage.cs b/DBCD/DBCDStorage.cs index c8a9a3d..cbf8f18 100644 --- a/DBCD/DBCDStorage.cs +++ b/DBCD/DBCDStorage.cs @@ -13,7 +13,7 @@ namespace DBCD { public class DBCDRow : DynamicObject { - private int ID; + public int ID; private readonly dynamic raw; private FieldAccessor fieldAccessor; From 44840761c1a994b52dca38479b6b2f446e67a5fe Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Sat, 10 Aug 2024 03:09:07 +0200 Subject: [PATCH 33/40] Fix WDB6 oopsie --- DBCD.IO/Readers/WDB6Reader.cs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/DBCD.IO/Readers/WDB6Reader.cs b/DBCD.IO/Readers/WDB6Reader.cs index a1b1e8f..ffdf7de 100644 --- a/DBCD.IO/Readers/WDB6Reader.cs +++ b/DBCD.IO/Readers/WDB6Reader.cs @@ -215,7 +215,9 @@ public WDB6Reader(Stream stream) int commonDataSize = reader.ReadInt32(); // field meta data - Meta = reader.ReadArray(FieldsCount); + var meta = reader.ReadArray(FieldsCount); + Array.Resize(ref meta, totalFieldCount); + Meta = meta; if (RecordsCount == 0) return; @@ -282,10 +284,6 @@ public WDB6Reader(Stream stream) if (commonDataSize > 0) { - var meta = reader.ReadArray(FieldsCount); - Array.Resize(ref meta, totalFieldCount); - Meta = meta; - int fieldCount = reader.ReadInt32(); CommonData = new Dictionary[fieldCount]; From 52f6fceef43f40047939b7e53a9e113e0726634f Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Sat, 10 Aug 2024 03:21:09 +0200 Subject: [PATCH 34/40] Sync benchmarks --- DBCD.Benchmark/Benchmarks/ReadingBenchmark.cs | 42 ++++++---- DBCD.Benchmark/Benchmarks/StringTableBench.cs | 76 +++++++++++++++++++ DBCD.Benchmark/DBCD.Benchmark.csproj | 5 +- DBCD.Benchmark/Program.cs | 2 +- 4 files changed, 108 insertions(+), 17 deletions(-) create mode 100644 DBCD.Benchmark/Benchmarks/StringTableBench.cs diff --git a/DBCD.Benchmark/Benchmarks/ReadingBenchmark.cs b/DBCD.Benchmark/Benchmarks/ReadingBenchmark.cs index 856bf39..e3f511f 100644 --- a/DBCD.Benchmark/Benchmarks/ReadingBenchmark.cs +++ b/DBCD.Benchmark/Benchmarks/ReadingBenchmark.cs @@ -1,5 +1,4 @@ using BenchmarkDotNet.Attributes; -using DBCD.Benchmark.Utilities; using DBCD.Providers; namespace DBCD.Benchmark.Benchmarks @@ -7,25 +6,40 @@ namespace DBCD.Benchmark.Benchmarks [MemoryDiagnoser] public class ReadingBenchmark { - public static GithubDBDProvider DBDProvider { get; } = new GithubDBDProvider(true); - public static string InputPath { get; } = $"{Directory.GetCurrentDirectory()}\\dbc"; - public static DBCD InputDBCD { get; } = new DBCD(new FilesystemDBCProvider(InputPath), DBDProvider); - - public static string Build { get; } = "11.0.2.55959"; + private static readonly FilesystemDBDProvider localDBDProvider = new FilesystemDBDProvider("D:\\Projects\\WoWDBDefs\\definitions"); + private static readonly FilesystemDBCProvider localDBCProvider = new FilesystemDBCProvider("D:\\Projects\\DBCDStaging\\DBCD.Tests\\bin\\Debug\\net8.0\\DBCCache\\11.0.2.56044"); + private readonly string[] allDB2s = Directory.GetFiles("D:\\Projects\\DBCDStaging\\DBCD.Tests\\bin\\Debug\\net8.0\\DBCCache\\11.0.2.56044", "*.db2", SearchOption.AllDirectories).Select(x => Path.GetFileNameWithoutExtension(x)).ToArray(); + private readonly string build = "11.0.2.56044"; [Benchmark] - public void TestReadingAllDB2s() + public int TestReadingAllDB2s() { - string[] allDB2s = Directory.GetFiles(InputPath, "*.db2", SearchOption.TopDirectoryOnly); + var inputDBCD = new DBCD(localDBCProvider, localDBDProvider); - foreach (var db2File in allDB2s) + //var build = "3.3.5.12340"; // WDBC + //var build = "6.0.1.18179"; // WDB2 + //var build = "7.0.1.20740"; // WDB3, only 1 DBD sadly + //var build = "7.0.1.20810"; // WDB4, only 2 DBDs sadly + //var build = "7.2.0.23436"; // WDB5, only Map.db2 + //var build = "7.3.5.25928"; // WDB6 + //var build = "7.3.5.25928"; // WDC1 + //var build = "8.0.1.26231"; // WDC2 + //var build = "9.1.0.39653"; // WDC3 + //var build = "10.1.0.48480"; // WDC4 + var build = "11.0.2.56044"; // WDC5 + + foreach (var tableName in allDB2s) { - if (Utilities.IO.TryGetExactPath(db2File, out string exactPath)) - { - var tableName = Path.GetFileNameWithoutExtension(exactPath); - var originalStorage = InputDBCD.Load(tableName, Build); - } + if (tableName == "UnitTestSparse") + continue; + + if (!localDBDProvider.ContainsBuild(tableName, build)) + continue; + + var storage = inputDBCD.Load(tableName, build); } + + return allDB2s.Count(); } } } diff --git a/DBCD.Benchmark/Benchmarks/StringTableBench.cs b/DBCD.Benchmark/Benchmarks/StringTableBench.cs new file mode 100644 index 0000000..ad2b760 --- /dev/null +++ b/DBCD.Benchmark/Benchmarks/StringTableBench.cs @@ -0,0 +1,76 @@ +using BenchmarkDotNet.Attributes; +using System.Text; + +namespace DBCD.Benchmark.Benchmarks +{ + [MemoryDiagnoser] + public class StringTableBench + { + private static byte[] InputBytes = File.ReadAllBytes("E:\\stringtable.bytes"); + private static int StringTableSize = (int)InputBytes.Length; + + [Benchmark] + public void OldMethod() + { + using (var stream = new MemoryStream(InputBytes)) + using (var reader = new BinaryReader(stream)) + { + var StringTable = new Dictionary(StringTableSize / 0x20); + for (int i = 0; i < StringTableSize;) + { + long oldPos = reader.BaseStream.Position; + StringTable[i] = reader.ReadCString(); + i += (int)(reader.BaseStream.Position - oldPos); + } + } + } + + [Benchmark] + public void NewMethod() + { + using (var stream = new MemoryStream(InputBytes)) + using (var reader = new BinaryReader(stream)) + { + var StringTable = reader.ReadStringTable(StringTableSize); + } + } + } + + public static class BinaryReaderExtensions + { + public static string ReadCString(this BinaryReader reader) + { + var bytes = new List(); + byte b; + while ((b = reader.ReadByte()) != 0) + bytes.Add(b); + + return Encoding.UTF8.GetString(bytes.ToArray()); + } + + public static Dictionary ReadStringTable(this BinaryReader reader, int stringTableSize, int baseOffset = 0, bool usePos = false) + { + var StringTable = new Dictionary(stringTableSize / 0x20); + + if (stringTableSize == 0) + return StringTable; + + var curOfs = 0; + var decoded = Encoding.UTF8.GetString(reader.ReadBytes(stringTableSize)); + foreach (var str in decoded.Split('\0')) + { + if (curOfs == stringTableSize) + break; + + if (usePos) + StringTable[(reader.BaseStream.Position - stringTableSize) + curOfs] = str; + else + StringTable[baseOffset + curOfs] = str; + + curOfs += Encoding.UTF8.GetByteCount(str) + 1; + } + + return StringTable; + } + } +} diff --git a/DBCD.Benchmark/DBCD.Benchmark.csproj b/DBCD.Benchmark/DBCD.Benchmark.csproj index bd4e470..b27fb78 100644 --- a/DBCD.Benchmark/DBCD.Benchmark.csproj +++ b/DBCD.Benchmark/DBCD.Benchmark.csproj @@ -2,13 +2,14 @@ Exe - net6.0 + net8.0 enable enable + true - + diff --git a/DBCD.Benchmark/Program.cs b/DBCD.Benchmark/Program.cs index 521116f..e472093 100644 --- a/DBCD.Benchmark/Program.cs +++ b/DBCD.Benchmark/Program.cs @@ -2,4 +2,4 @@ using BenchmarkDotNet.Running; using DBCD.Benchmark.Benchmarks; -BenchmarkRunner.Run(); \ No newline at end of file +BenchmarkRunner.Run(); \ No newline at end of file From e545a082d6d99fa405b450ca247a54237ece0984 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Sat, 10 Aug 2024 03:21:46 +0200 Subject: [PATCH 35/40] Add caching to filesystem providers --- DBCD/Providers/FilesystemDBCProvider.cs | 36 +++++++++++++++++++------ DBCD/Providers/FilesystemDBDProvider.cs | 23 ++++++++++++---- 2 files changed, 46 insertions(+), 13 deletions(-) diff --git a/DBCD/Providers/FilesystemDBCProvider.cs b/DBCD/Providers/FilesystemDBCProvider.cs index d6fef2c..ef37283 100644 --- a/DBCD/Providers/FilesystemDBCProvider.cs +++ b/DBCD/Providers/FilesystemDBCProvider.cs @@ -1,4 +1,5 @@ -using System.IO; +using System.Collections.Generic; +using System.IO; namespace DBCD.Providers { @@ -7,19 +8,38 @@ namespace DBCD.Providers /// public class FilesystemDBCProvider : IDBCProvider { - private readonly string directory; + private readonly string Directory; + private readonly bool UseCache; + public Dictionary<(string, string), byte[]> Cache = new Dictionary<(string, string), byte[]>(); - public FilesystemDBCProvider(string directory) => this.directory = directory; + public FilesystemDBCProvider(string directory, bool useCache = false) => (this.Directory, this.UseCache) = (directory, useCache); public Stream StreamForTableName(string tableName, string build) { - if(File.Exists(Path.Combine(directory, $"{tableName}.db2"))) - return File.OpenRead(Path.Combine(directory, $"{tableName}.db2")); + if (UseCache && Cache.TryGetValue((tableName, build), out var cachedData)) + { + return new MemoryStream(cachedData); + } + else + { + if (File.Exists(Path.Combine(Directory, $"{tableName}.db2"))) + { + var bytes = File.ReadAllBytes(Path.Combine(Directory, $"{tableName}.db2")); + if (UseCache) + Cache[(tableName, build)] = bytes; + return new MemoryStream(bytes); + } - if(File.Exists(Path.Combine(directory, $"{tableName}.dbc"))) - return File.OpenRead(Path.Combine(directory, $"{tableName}.dbc")); + if (File.Exists(Path.Combine(Directory, $"{tableName}.dbc"))) + { + var bytes = File.ReadAllBytes(Path.Combine(Directory, $"{tableName}.dbc")); + if(UseCache) + Cache[(tableName, build)] = File.ReadAllBytes(Path.Combine(Directory, $"{tableName}.dbc")); + return new MemoryStream(bytes); + } - throw new FileNotFoundException("Unable to find DBC/DB2 file on disk for table " + tableName); + throw new FileNotFoundException("Unable to find DBC/DB2 file on disk for table " + tableName); + } } } } diff --git a/DBCD/Providers/FilesystemDBDProvider.cs b/DBCD/Providers/FilesystemDBDProvider.cs index 0ce040c..529a3d8 100644 --- a/DBCD/Providers/FilesystemDBDProvider.cs +++ b/DBCD/Providers/FilesystemDBDProvider.cs @@ -1,6 +1,7 @@ -using System.IO; +using DBDefsLib; +using System.Collections.Generic; +using System.IO; using System.Linq; -using DBDefsLib; namespace DBCD.Providers { @@ -13,12 +14,14 @@ public class FilesystemDBDProvider : IDBDProvider public FilesystemDBDProvider(string directory) => this.directory = directory; + public Dictionary<(string, string), byte[]> Cache = new Dictionary<(string, string), byte[]>(); + /// /// Function that checks if a certain build exists in a DBD file. Note that this causes a full read/parse of the file. /// public bool ContainsBuild(string tableName, string build) { - if(!File.Exists(Path.Combine(directory, $"{tableName}.dbd"))) + if (!File.Exists(Path.Combine(directory, $"{tableName}.dbd"))) return false; var reader = new DBDReader(); @@ -30,13 +33,23 @@ public bool ContainsBuild(string tableName, string build) if (versionDefinition.builds.Contains(targetBuild)) return true; - if(versionDefinition.buildRanges.Any(range => range.Contains(targetBuild))) + if (versionDefinition.buildRanges.Any(range => range.Contains(targetBuild))) return true; } return false; } - public Stream StreamForTableName(string tableName, string build = null) => File.OpenRead(Path.Combine(directory, $"{tableName}.dbd")); + public Stream StreamForTableName(string tableName, string build = null) + { + if (Cache.TryGetValue((tableName, build), out var cachedData)) + return new MemoryStream(cachedData); + else + { + var data = File.ReadAllBytes(Path.Combine(directory, $"{tableName}.dbd")); + Cache[(tableName, build)] = data; + return new MemoryStream(data); + } + } } } From 3845e085906bf14b6ec95c4bac203312a26df495 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Sat, 10 Aug 2024 03:22:36 +0200 Subject: [PATCH 36/40] Use faster and shared string table reader Seems like treating the entire thing as a string and splitting it is far faster than reading byte by byte for some reason --- DBCD.IO/Extensions.cs | 39 ++++++++++++++++++++++++++++++++--- DBCD.IO/Readers/WDB2Reader.cs | 8 +------ DBCD.IO/Readers/WDB3Reader.cs | 8 +------ DBCD.IO/Readers/WDB4Reader.cs | 8 +------ DBCD.IO/Readers/WDB5Reader.cs | 8 +------ DBCD.IO/Readers/WDB6Reader.cs | 8 +------ DBCD.IO/Readers/WDBCReader.cs | 8 +------ DBCD.IO/Readers/WDC1Reader.cs | 8 +------ DBCD.IO/Readers/WDC2Reader.cs | 8 +------ DBCD.IO/Readers/WDC3Reader.cs | 9 +++----- DBCD.IO/Readers/WDC4Reader.cs | 9 +++----- DBCD.IO/Readers/WDC5Reader.cs | 9 +++----- 12 files changed, 53 insertions(+), 77 deletions(-) diff --git a/DBCD.IO/Extensions.cs b/DBCD.IO/Extensions.cs index d7d3114..c1ff99b 100644 --- a/DBCD.IO/Extensions.cs +++ b/DBCD.IO/Extensions.cs @@ -6,7 +6,6 @@ using System.Linq.Expressions; using System.Reflection; using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; using System.Text; namespace DBCD.IO @@ -55,6 +54,38 @@ public static T Read(this BinaryReader reader) where T : struct return Unsafe.ReadUnaligned(ref result[0]); } + + /// + /// Reads a NUL-separated string table from the current stream + /// + /// Size of the string table + /// Use WDC2-style position-base table key numbering + /// Base offset to use for the string table keys + public static Dictionary ReadStringTable(this BinaryReader reader, int stringTableSize, int baseOffset = 0, bool usePos = false) + { + var StringTable = new Dictionary(stringTableSize / 0x20); + + if(stringTableSize == 0) + return StringTable; + + var curOfs = 0; + var decoded = Encoding.UTF8.GetString(reader.ReadBytes(stringTableSize)); + foreach (var str in decoded.Split('\0')) + { + if (curOfs == stringTableSize) + break; + + if(usePos) + StringTable[(reader.BaseStream.Position - stringTableSize) + curOfs] = str; + else + StringTable[baseOffset + curOfs] = str; + + curOfs += Encoding.UTF8.GetByteCount(str) + 1; + } + + return StringTable; + } + public static T[] ReadArray(this BinaryReader reader) where T : struct { int numBytes = (int)reader.ReadInt64(); @@ -67,7 +98,7 @@ public static T[] ReadArray(this BinaryReader reader) where T : struct public static T[] ReadArray(this BinaryReader reader, int size) where T : struct { - int numBytes = Marshal.SizeOf() * size; + int numBytes = Unsafe.SizeOf() * size; byte[] result = reader.ReadBytes(numBytes); return result.CopyTo(); @@ -126,6 +157,7 @@ static class CStringExtensions /// the current stream and advances the current position of the stream by string length + 1. /// /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] public static string ReadCString(this BinaryReader reader) { return reader.ReadCString(Encoding.UTF8); @@ -135,6 +167,7 @@ public static string ReadCString(this BinaryReader reader) /// the current stream and advances the current position of the stream by string length + 1. /// /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] public static string ReadCString(this BinaryReader reader, Encoding encoding) { var bytes = new System.Collections.Generic.List(0x20); @@ -168,7 +201,7 @@ public static byte[] ToByteArray(this string str) /// A that provides extension methods for numeric types /// public static class NumericExtensions - { + { [Pure] [MethodImpl(MethodImplOptions.AggressiveInlining)] public static int MostSignificantBit(this int n) diff --git a/DBCD.IO/Readers/WDB2Reader.cs b/DBCD.IO/Readers/WDB2Reader.cs index e2a97ad..2b7896d 100644 --- a/DBCD.IO/Readers/WDB2Reader.cs +++ b/DBCD.IO/Readers/WDB2Reader.cs @@ -173,13 +173,7 @@ public WDB2Reader(Stream stream) _Records.Add(i, rec); } - StringTable = new Dictionary(StringTableSize / 0x20); - for (int i = 0; i < StringTableSize;) - { - long oldPos = reader.BaseStream.Position; - StringTable[i] = reader.ReadCString(); - i += (int)(reader.BaseStream.Position - oldPos); - } + StringTable = reader.ReadStringTable(StringTableSize); } } } diff --git a/DBCD.IO/Readers/WDB3Reader.cs b/DBCD.IO/Readers/WDB3Reader.cs index e2d7de9..47be082 100644 --- a/DBCD.IO/Readers/WDB3Reader.cs +++ b/DBCD.IO/Readers/WDB3Reader.cs @@ -220,13 +220,7 @@ public WDB3Reader(Stream stream) } // string table - StringTable = new Dictionary(StringTableSize / 0x20); - for (int i = 0; i < StringTableSize;) - { - long oldPos = reader.BaseStream.Position; - StringTable[i] = reader.ReadCString(); - i += (int)(reader.BaseStream.Position - oldPos); - } + StringTable = reader.ReadStringTable(StringTableSize); // index table if ((reader.BaseStream.Position + copyTableSize) < reader.BaseStream.Length) diff --git a/DBCD.IO/Readers/WDB4Reader.cs b/DBCD.IO/Readers/WDB4Reader.cs index ab6be1c..8b5104a 100644 --- a/DBCD.IO/Readers/WDB4Reader.cs +++ b/DBCD.IO/Readers/WDB4Reader.cs @@ -171,13 +171,7 @@ public WDB4Reader(Stream stream) RecordsData = data; // string table - StringTable = new Dictionary(StringTableSize / 0x20); - for (int i = 0; i < StringTableSize;) - { - long oldPos = reader.BaseStream.Position; - StringTable[i] = reader.ReadCString(); - i += (int)(reader.BaseStream.Position - oldPos); - } + StringTable = reader.ReadStringTable(StringTableSize); } else { diff --git a/DBCD.IO/Readers/WDB5Reader.cs b/DBCD.IO/Readers/WDB5Reader.cs index cc72d3e..7cee954 100644 --- a/DBCD.IO/Readers/WDB5Reader.cs +++ b/DBCD.IO/Readers/WDB5Reader.cs @@ -201,13 +201,7 @@ public WDB5Reader(Stream stream) RecordsData = data; // string table - StringTable = new Dictionary(StringTableSize / 0x20); - for (int i = 0; i < StringTableSize;) - { - long oldPos = reader.BaseStream.Position; - StringTable[i] = reader.ReadCString(); - i += (int)(reader.BaseStream.Position - oldPos); - } + StringTable = reader.ReadStringTable(StringTableSize); } else { diff --git a/DBCD.IO/Readers/WDB6Reader.cs b/DBCD.IO/Readers/WDB6Reader.cs index ffdf7de..2e49a4f 100644 --- a/DBCD.IO/Readers/WDB6Reader.cs +++ b/DBCD.IO/Readers/WDB6Reader.cs @@ -230,13 +230,7 @@ public WDB6Reader(Stream stream) RecordsData = data; // string table - StringTable = new Dictionary(StringTableSize / 0x20); - for (int i = 0; i < StringTableSize;) - { - long oldPos = reader.BaseStream.Position; - StringTable[i] = reader.ReadCString(); - i += (int)(reader.BaseStream.Position - oldPos); - } + StringTable = reader.ReadStringTable(StringTableSize); } else { diff --git a/DBCD.IO/Readers/WDBCReader.cs b/DBCD.IO/Readers/WDBCReader.cs index 83bd3c6..0f3d6e9 100644 --- a/DBCD.IO/Readers/WDBCReader.cs +++ b/DBCD.IO/Readers/WDBCReader.cs @@ -150,13 +150,7 @@ public WDBCReader(Stream stream) _Records.Add(i, rec); } - StringTable = new Dictionary(StringTableSize / 0x20); - for (int i = 0; i < StringTableSize;) - { - long oldPos = reader.BaseStream.Position; - StringTable[i] = reader.ReadCString(); - i += (int)(reader.BaseStream.Position - oldPos); - } + StringTable = reader.ReadStringTable(StringTableSize); } } } diff --git a/DBCD.IO/Readers/WDC1Reader.cs b/DBCD.IO/Readers/WDC1Reader.cs index b710c35..3445e75 100644 --- a/DBCD.IO/Readers/WDC1Reader.cs +++ b/DBCD.IO/Readers/WDC1Reader.cs @@ -260,13 +260,7 @@ public WDC1Reader(Stream stream) RecordsData = data; // string data - StringTable = new Dictionary(StringTableSize / 0x20); - for (int i = 0; i < StringTableSize;) - { - long oldPos = reader.BaseStream.Position; - StringTable[i] = reader.ReadCString(); - i += (int)(reader.BaseStream.Position - oldPos); - } + StringTable = reader.ReadStringTable(StringTableSize); } else { diff --git a/DBCD.IO/Readers/WDC2Reader.cs b/DBCD.IO/Readers/WDC2Reader.cs index defe868..3e61703 100644 --- a/DBCD.IO/Readers/WDC2Reader.cs +++ b/DBCD.IO/Readers/WDC2Reader.cs @@ -336,13 +336,7 @@ public WDC2Reader(Stream stream) RecordsData = data; // string data - StringTable = new Dictionary(sections[sectionIndex].StringTableSize / 0x20); - for (int i = 0; i < sections[sectionIndex].StringTableSize;) - { - long oldPos = reader.BaseStream.Position; - StringTable[oldPos] = reader.ReadCString(); - i += (int)(reader.BaseStream.Position - oldPos); - } + StringTable = reader.ReadStringTable(sections[sectionIndex].StringTableSize, 0, true); } else { diff --git a/DBCD.IO/Readers/WDC3Reader.cs b/DBCD.IO/Readers/WDC3Reader.cs index 04463c4..f3c262b 100644 --- a/DBCD.IO/Readers/WDC3Reader.cs +++ b/DBCD.IO/Readers/WDC3Reader.cs @@ -332,12 +332,9 @@ public WDC3Reader(Stream stream) if (StringTable == null) StringTable = new Dictionary(section.StringTableSize / 0x20); - for (int i = 0; i < section.StringTableSize;) - { - long oldPos = reader.BaseStream.Position; - StringTable[i + previousStringTableSize] = reader.ReadCString(); - i += (int)(reader.BaseStream.Position - oldPos); - } + var sectionStringTable = reader.ReadStringTable(section.StringTableSize, previousStringTableSize); + foreach (var entry in sectionStringTable) + StringTable[entry.Key] = entry.Value; previousStringTableSize += section.StringTableSize; } diff --git a/DBCD.IO/Readers/WDC4Reader.cs b/DBCD.IO/Readers/WDC4Reader.cs index 489f81a..067b6d9 100644 --- a/DBCD.IO/Readers/WDC4Reader.cs +++ b/DBCD.IO/Readers/WDC4Reader.cs @@ -352,12 +352,9 @@ public WDC4Reader(Stream stream) if (StringTable == null) StringTable = new Dictionary(section.StringTableSize / 0x20); - for (int i = 0; i < section.StringTableSize;) - { - long oldPos = reader.BaseStream.Position; - StringTable[i + previousStringTableSize] = reader.ReadCString(); - i += (int)(reader.BaseStream.Position - oldPos); - } + var sectionStringTable = reader.ReadStringTable(section.StringTableSize, previousStringTableSize); + foreach (var entry in sectionStringTable) + StringTable[entry.Key] = entry.Value; previousStringTableSize += section.StringTableSize; } diff --git a/DBCD.IO/Readers/WDC5Reader.cs b/DBCD.IO/Readers/WDC5Reader.cs index b8484a1..59a2a75 100644 --- a/DBCD.IO/Readers/WDC5Reader.cs +++ b/DBCD.IO/Readers/WDC5Reader.cs @@ -355,12 +355,9 @@ public WDC5Reader(Stream stream) if (StringTable == null) StringTable = new Dictionary(section.StringTableSize / 0x20); - for (int i = 0; i < section.StringTableSize;) - { - long oldPos = reader.BaseStream.Position; - StringTable[i + previousStringTableSize] = reader.ReadCString(); - i += (int)(reader.BaseStream.Position - oldPos); - } + var sectionStringTable = reader.ReadStringTable(section.StringTableSize, previousStringTableSize); + foreach (var entry in sectionStringTable) + StringTable[entry.Key] = entry.Value; previousStringTableSize += section.StringTableSize; } From 06f66ee5ae909c73be34b32d8256dcc493dba581 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Sat, 10 Aug 2024 06:09:48 +0200 Subject: [PATCH 37/40] Instead of returning empty strings, 0 out index --- DBCD.IO/Readers/WDC3Reader.cs | 21 ++++++++++----------- DBCD.IO/Readers/WDC4Reader.cs | 21 ++++++++++----------- DBCD.IO/Readers/WDC5Reader.cs | 21 ++++++++++----------- 3 files changed, 30 insertions(+), 33 deletions(-) diff --git a/DBCD.IO/Readers/WDC3Reader.cs b/DBCD.IO/Readers/WDC3Reader.cs index f3c262b..fea0a8d 100644 --- a/DBCD.IO/Readers/WDC3Reader.cs +++ b/DBCD.IO/Readers/WDC3Reader.cs @@ -63,10 +63,11 @@ private static string getStringTableRecord(Dictionary stringTable, { var index = recordOffset + (data.Position >> 3) + GetFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData); - if (!stringTable.TryGetValue(index, out string result)) - result = ""; + // This presumably is needed because when strings are supposed to be empty ('0' in record data) the index turns negative, which is invalid. + if (index < 0) + index = 0; - return result; + return stringTable[index]; } private static Dictionary, Dictionary, object>> arrayReaders = new Dictionary, Dictionary, object>> @@ -223,14 +224,12 @@ private static string[] GetFieldValueStringArray(BitReader r, FieldMetaData fiel for (int i = 0; i < array.Length; i++) { var index = (r.Position >> 3) + recordOffset + r.ReadValue64(bitSize).GetValue(); - if (stringTable.TryGetValue(index, out string result)) - { - array[i] = result; - } - else - { - array[i] = ""; - } + + // This presumably is needed because when strings are supposed to be empty ('0' in record data) the index turns negative, which is invalid. + if (index < 0) + index = 0; + + array[i] = stringTable[index]; } return array; diff --git a/DBCD.IO/Readers/WDC4Reader.cs b/DBCD.IO/Readers/WDC4Reader.cs index 067b6d9..37d7b67 100644 --- a/DBCD.IO/Readers/WDC4Reader.cs +++ b/DBCD.IO/Readers/WDC4Reader.cs @@ -63,10 +63,11 @@ private static string getStringTableRecord(Dictionary stringTable, { var index = recordOffset + (data.Position >> 3) + GetFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData); - if (!stringTable.TryGetValue(index, out string result)) - result = ""; + // This presumably is needed because when strings are supposed to be empty ('0' in record data) the index turns negative, which is invalid. + if (index < 0) + index = 0; - return result; + return stringTable[index]; } private static Dictionary, Dictionary, object>> arrayReaders = new Dictionary, Dictionary, object>> @@ -223,14 +224,12 @@ private static string[] GetFieldValueStringArray(BitReader r, FieldMetaData fiel for (int i = 0; i < array.Length; i++) { var index = (r.Position >> 3) + recordOffset + r.ReadValue64(bitSize).GetValue(); - if (stringTable.TryGetValue(index, out string result)) - { - array[i] = result; - } - else - { - array[i] = ""; - } + + // This presumably is needed because when strings are supposed to be empty ('0' in record data) the index turns negative, which is invalid. + if (index < 0) + index = 0; + + array[i] = stringTable[index]; } return array; diff --git a/DBCD.IO/Readers/WDC5Reader.cs b/DBCD.IO/Readers/WDC5Reader.cs index 59a2a75..7f48744 100644 --- a/DBCD.IO/Readers/WDC5Reader.cs +++ b/DBCD.IO/Readers/WDC5Reader.cs @@ -64,10 +64,11 @@ private static string getStringTableRecord(Dictionary stringTable, { var index = recordOffset + (data.Position >> 3) + GetFieldValue(id, data, fieldMeta, columnMeta, palletData, commonData); - if (!stringTable.TryGetValue(index, out string result)) - result = ""; + // This presumably is needed because when strings are supposed to be empty ('0' in record data) the index turns negative, which is invalid. + if (index < 0) + index = 0; - return result; + return stringTable[index]; } private static Dictionary, Dictionary, object>> arrayReaders = new Dictionary, Dictionary, object>> @@ -224,14 +225,12 @@ private static string[] GetFieldValueStringArray(BitReader r, FieldMetaData fiel for (int i = 0; i < array.Length; i++) { var index = (r.Position >> 3) + recordOffset + r.ReadValue64(bitSize).GetValue(); - if (stringTable.TryGetValue(index, out string result)) - { - array[i] = result; - } - else - { - array[i] = ""; - } + + // This presumably is needed because when strings are supposed to be empty ('0' in record data) the index turns negative, which is invalid. + if (index < 0) + index = 0; + + array[i] = stringTable[index]; } return array; From 57672ef9f21dbe7571a1646ae45f7c8624f749e9 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Sun, 11 Aug 2024 00:10:52 +0200 Subject: [PATCH 38/40] Update project files --- DBCD.IO/DBCD.IO.csproj | 4 ++-- DBCD/DBCD.csproj | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/DBCD.IO/DBCD.IO.csproj b/DBCD.IO/DBCD.IO.csproj index 7a4e88d..aab484d 100644 --- a/DBCD.IO/DBCD.IO.csproj +++ b/DBCD.IO/DBCD.IO.csproj @@ -1,8 +1,8 @@ - netstandard2.0;net6.0;net7.0;net8.0 - 8.0 + netstandard2.0;net6.0;net8.0 + latest diff --git a/DBCD/DBCD.csproj b/DBCD/DBCD.csproj index 7bf003d..26a62cb 100644 --- a/DBCD/DBCD.csproj +++ b/DBCD/DBCD.csproj @@ -1,8 +1,8 @@  - netstandard2.0;net6.0;net7.0;net8.0 - 7.3 + netstandard2.0;net6.0;net8.0 + latest From 298464ea993654d516d9d69cd732cdc9ed3e85a8 Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Sun, 11 Aug 2024 01:01:53 +0200 Subject: [PATCH 39/40] Update local-only (disabled!) tests --- DBCD.Tests/Providers/WagoDBCProvider.cs | 4 +- DBCD.Tests/ReadingTest.cs | 46 +++++++---- DBCD.Tests/WritingTest.cs | 101 +++++++++++++++++++++--- 3 files changed, 122 insertions(+), 29 deletions(-) diff --git a/DBCD.Tests/Providers/WagoDBCProvider.cs b/DBCD.Tests/Providers/WagoDBCProvider.cs index 2d3c656..cd7c59d 100644 --- a/DBCD.Tests/Providers/WagoDBCProvider.cs +++ b/DBCD.Tests/Providers/WagoDBCProvider.cs @@ -56,8 +56,8 @@ public Stream StreamForTableName(string tableName, string build) var cacheFile = Path.Combine("DBCCache", build, tableName + ".db2"); if (File.Exists(cacheFile)) { - var lastWrite = File.GetLastWriteTime(cacheFile); - if (DateTime.Now - lastWrite < new TimeSpan(1, 0, 0, 0)) + //var lastWrite = File.GetLastWriteTime(cacheFile); + //if (DateTime.Now - lastWrite < new TimeSpan(1, 0, 0, 0)) return new MemoryStream(File.ReadAllBytes(cacheFile)); } diff --git a/DBCD.Tests/ReadingTest.cs b/DBCD.Tests/ReadingTest.cs index df9ca2e..3cb6b97 100644 --- a/DBCD.Tests/ReadingTest.cs +++ b/DBCD.Tests/ReadingTest.cs @@ -2,7 +2,6 @@ using Microsoft.VisualStudio.TestTools.UnitTesting; using System; using System.IO; -using System.Net.Http; namespace DBCD.Tests { @@ -112,38 +111,55 @@ public void TestGithubDBDProviderWithCache() public void TestReadingAllDB2s() { return; // Only run this test manually - - var build = "9.1.0.39653"; // WDC3 - - var dbcd = new DBCD(wagoDBCProvider, githubDBDProvider); + var localDBDProvider = new FilesystemDBDProvider("D:\\Projects\\WoWDBDefs\\definitions"); + + //var build = "3.3.5.12340"; // WDBC + //var build = "6.0.1.18179"; // WDB2 + //var build = "7.0.1.20740"; // WDB3, only 1 DBD sadly + //var build = "7.0.1.20810"; // WDB4, only 2 DBDs sadly + //var build = "7.2.0.23436"; // WDB5, only Map.db2 + //var build = "7.3.5.25928"; // WDB6 + //var build = "7.3.5.25928"; // WDC1 + //var build = "8.0.1.26231"; // WDC2 + //var build = "9.1.0.39653"; // WDC3 + //var build = "10.1.0.48480"; // WDC4 + var build = "11.0.2.56044"; // WDC5 + + var localDBCProvider = new FilesystemDBCProvider(Path.Combine("DBCCache", build)); + var dbcd = new DBCD(localDBCProvider, localDBDProvider); var allDB2s = wagoDBCProvider.GetAllTableNames(); + var attemptedTables = 0; + var successfulTables = 0; + foreach (var tableName in allDB2s) { // I think this table is meant to crash the test, so we skip it if (tableName == "UnitTestSparse") continue; + if (!localDBDProvider.ContainsBuild(tableName, build)) + continue; + + attemptedTables++; + try { var storage = dbcd.Load(tableName, build); + successfulTables++; } - catch(FileNotFoundException e) + catch (FileNotFoundException e) { Console.WriteLine($"Failed to load {tableName} for build {build}, does not exist in build."); + successfulTables++; // this counts } - catch(AggregateException e) + catch (Exception e) { - if(e.InnerException is HttpRequestException) - { - Console.WriteLine($"Failed to load {tableName} for build {build}, does not exist."); - } - else - { - throw e; - } + Console.WriteLine("Failed to load " + tableName + " for build " + build + ": " + e.Message + "\n" + e.StackTrace); } } + + Assert.AreEqual(attemptedTables, successfulTables); } //[TestMethod] diff --git a/DBCD.Tests/WritingTest.cs b/DBCD.Tests/WritingTest.cs index ab96123..710cdb6 100644 --- a/DBCD.Tests/WritingTest.cs +++ b/DBCD.Tests/WritingTest.cs @@ -4,6 +4,7 @@ using System; using System.Collections.Generic; using System.IO; +using System.Linq; using System.Net.Http; namespace DBCD.Tests @@ -11,19 +12,29 @@ namespace DBCD.Tests [TestClass] public class WritingTest { - public static GithubDBDProvider DBDProvider { get; } = new GithubDBDProvider(true); public static string InputPath { get; } = $"{Directory.GetCurrentDirectory()}\\DBCCache"; public static WagoDBCProvider wagoDBCProvider = new(); - public static DBCD InputDBCD { get; } = new DBCD(wagoDBCProvider, DBDProvider); - public static DBCD SavedDBCD { get; } = new DBCD(new FilesystemDBCProvider("tmp"), DBDProvider); - public static string Build { get; } = "9.1.0.39653"; [TestMethod] public void TestWritingAllDB2s() { return; // Only run this test manually + var localDBDProvider = new FilesystemDBDProvider("D:\\Projects\\WoWDBDefs\\definitions"); + + //var build = "3.3.5.12340"; // WDBC + //var build = "6.0.1.18179"; // WDB2 + //var build = "7.0.1.20740"; // WDB3, TODO: Find DBDs for a DB2 + //var build = "7.0.1.20810"; // WDB4, TODO: Find DBDs for a DB2 + //var build = "7.0.3.21479"; // WDB5, TODO: Find DBDs for a DB2 + //var build = "7.2.0.23436"; // WDB6 + //var build = "7.3.5.25928"; // WDC1 + //var build = "8.0.1.26231"; // WDC2 + var build = "9.2.7.45745"; // WDC3 + //var build = "10.1.0.48480"; // WDC4 + //var build = "11.0.2.56044"; // WDC5 + var allDB2s = wagoDBCProvider.GetAllTableNames(); if (Directory.Exists("tmp")) @@ -31,26 +42,48 @@ public void TestWritingAllDB2s() Directory.CreateDirectory("tmp"); + var localDBCProvider = new FilesystemDBCProvider(Path.Combine("DBCCache", build)); + var tmpDBCProvider = new FilesystemDBCProvider("tmp"); + + var InputDBCD = new DBCD(localDBCProvider, localDBDProvider); + var SavedDBCD = new DBCD(tmpDBCProvider, localDBDProvider); + + var attemptedTables = 0; + var successfulTables = 0; + var identicalTables = 0; + foreach (var tableName in allDB2s) { - if (tableName == "UnitTestSparse") + if (!localDBDProvider.ContainsBuild(tableName, build)) continue; - // TODO: possible DBD being wrong - if (tableName == "SummonProperties") + if (tableName == "UnitTestSparse") continue; var originalValues = new List(); + attemptedTables++; + try { - var originalStorage = InputDBCD.Load(tableName, Build); + var originalStorage = InputDBCD.Load(tableName, build); + + //if(tableName == "ModelFileData") + //{ + // var row = originalStorage.ConstructRow(4252801); + // row["FileDataID"] = 4252801; + // row["Flags"] = (byte)0; + // row["LodCount"] = (byte)3; + // row["ModelResourcesID"] = (uint)62664; + //} + originalValues.AddRange(originalStorage.Values); originalStorage.Save($"tmp/{tableName}.db2"); } catch (FileNotFoundException e) { // This is not a reading test, I could not care less + attemptedTables--; continue; } catch (AggregateException e) @@ -58,16 +91,25 @@ public void TestWritingAllDB2s() if (e.InnerException is HttpRequestException) { // This is not a reading test, I could not care less + attemptedTables--; continue; } else { - throw e; + Console.WriteLine("Failed to write " + tableName + " for build " + build + ": " + e.Message + "\n" + e.StackTrace); + continue; } } + catch (Exception e) + { + Console.WriteLine("Failed to write " + tableName + " for build " + build + ": " + e.Message + "\n" + e.StackTrace); + continue; + } - var savedStorage = SavedDBCD.Load(tableName, Build); - + //try + //{ + var savedStorage = SavedDBCD.Load(tableName, build); + successfulTables++; // Lazy compare var originalJson = JsonConvert.SerializeObject(originalValues, Formatting.Indented); var newJson = JsonConvert.SerializeObject(savedStorage.Values, Formatting.Indented); @@ -78,10 +120,45 @@ public void TestWritingAllDB2s() throw new InvalidDataException($"The saved storage {tableName} should not differ from the original one!"); } + + using (var originalStream = localDBCProvider.StreamForTableName(tableName, build)) + using (var originalMS = new MemoryStream()) + using (var savedStream = tmpDBCProvider.StreamForTableName(tableName, build)) + using (var savedMS = new MemoryStream()) + { + if (originalStream.Length != savedStream.Length) + { + Console.WriteLine(originalStream.Length + " vs " + savedStream.Length + " for " + tableName + " " + build); + continue; + } + + originalStream.CopyTo(originalMS); + originalStream.Position = 0; + + savedStream.CopyTo(savedMS); + savedStream.Position = 0; + + var originalBytes = originalMS.ToArray(); + var savedBytes = savedMS.ToArray(); + + if (!originalBytes.SequenceEqual(savedBytes)) + Console.WriteLine("Different bytes for " + tableName + " " + build); + else + identicalTables++; + } + //} + //catch (Exception e) + //{ + // Console.WriteLine("Failed to load rewritten " + tableName + " for build " + build + ": " + e.Message + "\n" + e.StackTrace); + //} } + Console.WriteLine(successfulTables + "/" + attemptedTables + " written succesfully"); + Console.WriteLine(identicalTables + "/" + successfulTables + " identical (" + (successfulTables - identicalTables) + " different)"); + + Assert.AreEqual(attemptedTables, successfulTables); - Directory.Delete("tmp", true); + //Directory.Delete("tmp", true); } } } From 01e52f78c8c922307e5c42f680e60fb4e83590ee Mon Sep 17 00:00:00 2001 From: Martin Benjamins Date: Mon, 12 Aug 2024 06:02:10 +0200 Subject: [PATCH 40/40] Add Version to project files --- DBCD.IO/DBCD.IO.csproj | 1 + DBCD/DBCD.csproj | 1 + 2 files changed, 2 insertions(+) diff --git a/DBCD.IO/DBCD.IO.csproj b/DBCD.IO/DBCD.IO.csproj index aab484d..f14b92b 100644 --- a/DBCD.IO/DBCD.IO.csproj +++ b/DBCD.IO/DBCD.IO.csproj @@ -3,6 +3,7 @@ netstandard2.0;net6.0;net8.0 latest + 2.0.0 diff --git a/DBCD/DBCD.csproj b/DBCD/DBCD.csproj index 26a62cb..e66dd1b 100644 --- a/DBCD/DBCD.csproj +++ b/DBCD/DBCD.csproj @@ -3,6 +3,7 @@ netstandard2.0;net6.0;net8.0 latest + 2.0.0