diff --git a/src/compressedanimation/compressedanimation.rs b/src/compressedanimation/compressedanimation.rs new file mode 100644 index 0000000..4881593 --- /dev/null +++ b/src/compressedanimation/compressedanimation.rs @@ -0,0 +1,105 @@ +use std::fmt::Debug; +use std::rc::Rc; +use crate::{ + compressedanimation::splinecompressedanimation::{SplineCompressedAnimation}, + error::{Error, Result}, + NodeWalker, +}; + +/// Represent values that may change over time. +pub trait InterpolatableTimeToValueTrait { + /// Determine whether there are any values. + fn is_empty(&self) -> bool; + + /// Determine whether there are more than one value. + fn is_static(&self) -> bool; + + /// Get the duration stored. + fn duration(&self) -> f32; + + /// Get the significant time points of frames, in seconds. + fn frame_times(&self) -> Vec; + + /// Get the interpolated value over time in seconds. + fn interpolate(&self, t: f32) -> [f32; COUNT]; +} + +impl Debug for dyn InterpolatableTimeToValueTrait { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "duration={}", self.duration()) + } +} + +/// Represent an animation consisting of TRS components. +pub trait AnimationTrait { + /// Get the duration of the animation. + fn duration(&self) -> f32; + + /// Get the number of tracks(bones) stored in this animation. + fn num_tracks(&self) -> usize; + + /// Get the significant time points of frames, in seconds. + fn frame_times(&self) -> Vec; + + /// Get the translation component of this animation of specified track(bone). + fn translation(&self, track_index: usize) -> Rc>; + + /// Get the rotation component of this animation of specified track(bone). + fn rotation(&self, track_index: usize) -> Rc>; + + /// Get the scale component of this animation of specified track(bone). + fn scale(&self, track_index: usize) -> Rc>; +} + +impl Debug for dyn AnimationTrait { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "duration={}, num_tracks={}", self.duration(), self.num_tracks()) + } +} + +#[derive(Debug)] +pub struct BaseCompressedAnimation { + pub duration: f32, + pub num_tracks: usize, +} + +impl BaseCompressedAnimation { + pub fn new(node: &NodeWalker) -> Result { + if !node.is_or_inherited_from("hkaAnimation") { + return Err(Error::Invalid( + "Given node is not a valid animation.".into() + )); + } + + let duration = node.field_f32("duration", None)?; + let num_transform_tracks = node.field_i32("numberOfTransformTracks", None)? as usize; + + return Ok(Self { + duration, + num_tracks: num_transform_tracks, + }); + } +} + +/// Create a new animation from the given hkaAnimation node. +pub fn read_animation(animation_node: &NodeWalker) -> Result> { + let base = BaseCompressedAnimation::new(animation_node)?; + + if animation_node.is_or_inherited_from("hkaSplineCompressedAnimation") { + Ok(Rc::new(SplineCompressedAnimation::new(animation_node, base)?)) + } else { + Err(Error::Invalid(format!("Unsupported animation type."))) + } +} + +/// Create a new vector of animations from the given root node of a tagfile. +pub fn new_from_root(root_node: &NodeWalker) -> Result>> { + root_node + .field_node_vec("namedVariants")?.first() + .ok_or(Error::Invalid("namedVariants node contains no children.".into()))? + .field_node("variant")? + .field_node_vec("animations")? + .iter() + .map(|animation_node| read_animation(animation_node)) + .collect() +} \ No newline at end of file diff --git a/src/compressedanimation/concatanimation.rs b/src/compressedanimation/concatanimation.rs new file mode 100644 index 0000000..2f7b458 --- /dev/null +++ b/src/compressedanimation/concatanimation.rs @@ -0,0 +1,137 @@ +use std::rc::Rc; +use crate::{ + compressedanimation::AnimationTrait, + compressedanimation::compressedanimation::InterpolatableTimeToValueTrait, + error::{Error, Result}, +}; + +#[derive(Debug)] +pub struct ConcatAnimation { + parts: Vec>, + translations: Vec>>, + rotations: Vec>>, + scales: Vec>>, +} + +impl ConcatAnimation { + pub fn new(parts: Vec>) -> Result { + let mut translations = Vec::>>::new(); + let mut rotations = Vec::>>::new(); + let mut scales = Vec::>>::new(); + + if !parts.is_empty() { + if parts.iter().skip(1).any(|x| x.duration() != parts[0].duration()) { + return Err(Error::Invalid("Durations of all parts must be equal.".into())) + } + + let num_tracks = parts[0].num_tracks(); + if parts.iter().skip(1).any(|x| x.num_tracks() != num_tracks) { + return Err(Error::Invalid("Number of tracks of all parts must be equal.".into())) + } + + for track in 0..num_tracks { + translations.push(ConcatInterpolatableTimeToValue::new(parts.iter().map(|x| x.translation(track) as Rc>).collect())?.into()); + rotations.push(ConcatInterpolatableTimeToValue::new(parts.iter().map(|x| x.rotation(track) as Rc>).collect())?.into()); + scales.push(ConcatInterpolatableTimeToValue::new(parts.iter().map(|x| x.scale(track) as Rc>).collect())?.into()); + } + } + + Ok(Self { + parts, + translations, + rotations, + scales + }) + } +} + +impl AnimationTrait for ConcatAnimation { + fn duration(&self) -> f32 { + if self.parts.is_empty() { 0f32 } else { self.parts[0].duration() } + } + + fn num_tracks(&self) -> usize { + if self.parts.is_empty() { 0 } else { self.parts[0].num_tracks() } + } + + fn frame_times(&self) -> Vec { + let mut res = Vec::::new(); + let mut t = 0f32; + for part in &self.parts { + res.extend(part.frame_times().iter().map(|x| x + t)); + t += part.duration(); + } + res + } + + fn translation(&self, track_index: usize) -> Rc> { + self.translations[track_index].clone() + } + + fn rotation(&self, track_index: usize) -> Rc> { + self.rotations[track_index].clone() + } + + fn scale(&self, track_index: usize) -> Rc> { + self.scales[track_index].clone() + } +} + +#[derive(Debug)] +struct ConcatInterpolatableTimeToValue { + parts: Vec>>, +} + +impl ConcatInterpolatableTimeToValue { + fn new(parts: Vec>>) -> Result { + if parts.is_empty() { + return Err(Error::Invalid("There must be at least one part.".into())) + } + + if parts.iter().any(|x| x.duration() != parts[0].duration()) { + return Err(Error::Invalid("All parts must have the same duration.".into())) + } + + Ok(Self { + parts, + }) + } +} + +impl InterpolatableTimeToValueTrait for ConcatInterpolatableTimeToValue { + fn is_empty(&self) -> bool { + self.parts.iter().all(|x| x.is_empty()) + } + + fn is_static(&self) -> bool { + self.parts.iter().all(|x| x.is_static()) + } + + fn duration(&self) -> f32 { + self.parts.iter().map(|x| x.duration()).sum() + } + + fn frame_times(&self) -> Vec { + let mut res = Vec::::new(); + + let mut t = 0f32; + for part in &self.parts { + res.extend(part.frame_times().iter().map(|x| x + t)); + t += part.duration(); + } + + res + } + + fn interpolate(&self, mut t: f32) -> [f32; COUNT] { + loop { + for part in &self.parts { + if t < part.duration() { + return part.interpolate(t) + } + + t -= part.duration() + } + } + } +} diff --git a/src/compressedanimation/mod.rs b/src/compressedanimation/mod.rs new file mode 100644 index 0000000..a27785e --- /dev/null +++ b/src/compressedanimation/mod.rs @@ -0,0 +1,7 @@ +//! Utilities for decompressing compressed TRS animation. + +mod compressedanimation; +mod splinecompressedanimation; +mod concatanimation; + +pub use compressedanimation::{AnimationTrait, InterpolatableTimeToValueTrait, read_animation, new_from_root}; diff --git a/src/compressedanimation/splinecompressedanimation.rs b/src/compressedanimation/splinecompressedanimation.rs new file mode 100644 index 0000000..ef055eb --- /dev/null +++ b/src/compressedanimation/splinecompressedanimation.rs @@ -0,0 +1,632 @@ +use std::cmp::min; +use std::io::{Cursor, Read, Seek, SeekFrom}; +use std::rc::Rc; +use crate::{ + compressedanimation::compressedanimation::{ + AnimationTrait, + BaseCompressedAnimation, + }, + error::{Error, Result}, + macros::{read_primitive}, + NodeWalker, +}; +use crate::compressedanimation::compressedanimation::InterpolatableTimeToValueTrait; +use crate::compressedanimation::concatanimation::ConcatAnimation; + +#[derive(Debug)] +pub struct SplineCompressedAnimation { + pub base: BaseCompressedAnimation, + pub block_duration: f32, + pub frame_duration: f32, + pub blocks: Vec>, + animation: ConcatAnimation, +} + +impl SplineCompressedAnimation { + pub fn new(node: &NodeWalker, base: BaseCompressedAnimation) -> Result { + let max_frames_per_block = node.field_i32("maxFramesPerBlock", None)? as usize; + let block_duration = node.field_f32("blockDuration", None)?; + let frame_duration = node.field_f32("frameDuration", None)?; + let mut block_offsets = node.field_i32_vec("blockOffsets", None)? + .iter().map(|x| *x as usize).collect::>(); + let data = node.field_u8_vec("data", None)?; + block_offsets.push(data.len()); + + let mut num_pending_frames = node.field_i32("numFrames", None)? as usize; + let mut pending_duration = node.field_f32("duration", None)?; + + let blocks = block_offsets.iter() + .zip(block_offsets.iter().skip(1)) + .map(|(from, to)| { + let num_frames = min(num_pending_frames, max_frames_per_block) as usize; + num_pending_frames -= num_frames; + + let duration = if pending_duration > block_duration { block_duration } else { pending_duration }; + pending_duration -= duration; + + let block = Block::from_bytes( + data.as_slice()[*from..*to].as_ref(), + base.num_tracks, + num_frames, + frame_duration, + duration); + block.and_then(|x| Ok(Rc::new(x))) + }) + .collect::>>>()?; + + let animation = ConcatAnimation::new(blocks.iter().map(|x| x.to_owned() as Rc).collect())?; + + Ok(Self { + base, + block_duration, + frame_duration, + blocks, + animation, + }) + } +} + +impl AnimationTrait for SplineCompressedAnimation { + fn duration(&self) -> f32 { return self.base.duration; } + + fn num_tracks(&self) -> usize { return self.base.num_tracks; } + + fn frame_times(&self) -> Vec { + self.animation.frame_times() + } + + fn translation(&self, track_index: usize) -> Rc> { + self.animation.translation(track_index) + } + + fn rotation(&self, track_index: usize) -> Rc> { + self.animation.rotation(track_index) + } + + fn scale(&self, track_index: usize) -> Rc> { + self.animation.scale(track_index) + } +} + +struct BlockDataReader<'a> { + reader: Cursor<&'a [u8]>, +} + +impl<'a> BlockDataReader<'a> { + fn new(data: &'a [u8]) -> Self { + Self { reader: Cursor::new(data) } + } + + fn align(&mut self, unit: usize) -> Result<()> { + match self.reader.stream_position()? as usize % unit { + 0 => {} + n => { self.reader.seek(SeekFrom::Current(unit as i64 - n as i64))?; } + } + + Ok(()) + } + + read_primitive!(u8, read_u8); + read_primitive!(u16, read_u16); + read_primitive!(u32, read_u32); + read_primitive!(f32, read_f32); + + fn read_scaled_compressed_scalar(&mut self, t: &CompressedScalarType) -> Result { + match t { + CompressedScalarType::K8 => Ok(self.read_u8()? as f32 / u8::MAX as f32), + CompressedScalarType::K16 => Ok(self.read_u16()? as f32 / u16::MAX as f32), + } + } + + fn read_bytes(&mut self, len: usize) -> Result> { + let mut res = vec![0u8; len]; + self.reader.read_exact(&mut res)?; + Ok(res) + } + + fn read_k32_quat(&mut self) -> Result<[f32; 4]> { + + let val = self.read_u32()?; + + let phi_theta = (val & 0x3FFFF) as f32; + + let r = 1f32 - (((val >> 18) & 0x3FF) as f32 / 0x3FF as f32).powi(2); + + let mut phi = phi_theta.sqrt().floor(); + let mut theta = 0f32; + + if phi != 0f32 { + theta = std::f32::consts::PI / 4f32 * (phi_theta - phi * phi) / phi; + phi = std::f32::consts::PI / 1022f32 * phi; + } + + let magnitude = (1f32 - r.powi(2)).sqrt(); + let (s_phi, c_phi) = phi.sin_cos(); + let (s_theta, c_theta) = theta.sin_cos(); + + Ok([ + s_phi * c_theta * magnitude * (if 0 == (val & 0x10000000) { 1f32 } else { -1f32 }), + s_phi * s_theta * magnitude * (if 0 == (val & 0x20000000) { 1f32 } else { -1f32 }), + c_phi * magnitude * (if 0 == (val & 0x40000000) { 1f32 } else { -1f32 }), + r * (if 0 == (val & 0x80000000) { 1f32 } else { -1f32 }), + ]) + } + + fn read_k40_quat(&mut self) -> Result<[f32; 4]> { + const MASK: i64 = (1 << 12) - 1; + const DELTA: i64 = MASK >> 1; + const DELTAF: f32 = DELTA as f32; + + let mut v = [0u8; 5]; + self.reader.read_exact(&mut v)?; + let n = 0i64 + | ((v[4] as i64) << 32) + | ((v[3] as i64) << 24) + | ((v[2] as i64) << 16) + | ((v[1] as i64) << 8) + | ((v[0] as i64) << 0); + + let mut tmp: [f32; 4] = [ + (((n >> 0) & MASK) - DELTA) as f32 * std::f32::consts::FRAC_1_SQRT_2 / DELTAF, + (((n >> 12) & MASK) - DELTA) as f32 * std::f32::consts::FRAC_1_SQRT_2 / DELTAF, + (((n >> 24) & MASK) - DELTA) as f32 * std::f32::consts::FRAC_1_SQRT_2 / DELTAF, + 0f32, + ]; + let shift = ((n >> 36) & 0x3) as usize; + let invert = 0 != ((n >> 38) & 0x1); + + tmp[3] = (1f32 - tmp[0] * tmp[0] - tmp[1] * tmp[1] - tmp[2] * tmp[2]).sqrt(); + if invert { + tmp[3] = -tmp[3] + } + + for i in 0..(3 - shift) { + (tmp[3 - i], tmp[2 - i]) = (tmp[2 - i], tmp[3 - i]) + } + + Ok(tmp) + } + + fn read_k48_quat(&mut self) -> Result<[f32; 4]> { + const MASK: i32 = (1 << 15) - 1; + const DELTA: i32 = MASK >> 1; + const DELTAF: f32 = DELTA as f32; + + let x = self.read_u16()?; + let y = self.read_u16()?; + let z = self.read_u16()?; + let shift = (((y >> 14) & 2) | (x >> 15)) as usize; + let invert = 0 != (z & 0x8000); + + let mut tmp: [f32; 4] = [ + (((x as i32) & MASK) - DELTA) as f32 * std::f32::consts::FRAC_1_SQRT_2 / DELTAF, + (((y as i32) & MASK) - DELTA) as f32 * std::f32::consts::FRAC_1_SQRT_2 / DELTAF, + (((z as i32) & MASK) - DELTA) as f32 * std::f32::consts::FRAC_1_SQRT_2 / DELTAF, + 0f32, + ]; + + tmp[3] = (1f32 - tmp[0] * tmp[0] - tmp[1] * tmp[1] - tmp[2] * tmp[2]).sqrt(); + if invert { + tmp[3] = -tmp[3] + } + + for i in 0..(3 - shift) { + (tmp[3 - i], tmp[2 - i]) = (tmp[2 - i], tmp[3 - i]) + } + + Ok(tmp) + } +} + +#[derive(Debug)] +pub struct Block { + pub num_frames: usize, + pub frame_duration: f32, + pub duration: f32, + pub tracks: Vec, +} + +impl Block { + pub fn from_bytes(data: &[u8], num_tracks: usize, num_frames: usize, frame_duration: f32, duration: f32) -> Result { + let mut reader = BlockDataReader::new(data); + let masks = (0..num_tracks) + .map(|_| match TransformMask::new(&mut reader) { + Ok(v) => Ok(v), + Err(e) => Err(Error::Invalid(e.to_string())), + }) + .collect::>>()?; + let tracks = masks.iter() + .map(|mask| Track::new(&mut reader, mask, num_frames, frame_duration, duration)) + .collect::>>()?; + + Ok(Self { + num_frames, + frame_duration, + duration, + tracks, + }) + } +} + +impl AnimationTrait for Block { + fn duration(&self) -> f32 { + self.duration + } + + fn num_tracks(&self) -> usize { + self.tracks.len() + } + + fn frame_times(&self) -> Vec { + (0..(self.num_frames - 1)).map(|x| x as f32 * self.frame_duration).collect() + } + + fn translation(&self, track_index: usize) -> Rc> { + self.tracks[track_index].translate.clone() + } + + fn rotation(&self, track_index: usize) -> Rc> { + self.tracks[track_index].rotate.clone() + } + + fn scale(&self, track_index: usize) -> Rc> { + self.tracks[track_index].scale.clone() + } +} + +#[derive(Debug)] +pub struct Track { + pub frames: usize, + pub translate: Rc>, + pub rotate: Rc>, + pub scale: Rc>, +} + +impl Track { + fn new(mut reader: &mut BlockDataReader, mask: &TransformMask, num_frames: usize, frame_duration: f32, duration: f32) -> Result { + let translate = CompressedFloatArray::<3>::new(&mut reader, &mask.translate, &mask.translate_primitive_type()?)?; + reader.align(4)?; + let rotate = CompressedFloatArray::<4>::new(&mut reader, &mask.rotate, &mask.rotate_primitive_type()?)?; + reader.align(4)?; + let scale = CompressedFloatArray::<3>::new(&mut reader, &mask.scale, &mask.scale_primitive_type()?)?; + reader.align(4)?; + Ok(Self { + frames: num_frames, + translate: TimedCompressedFloatArray::new(translate, num_frames, frame_duration, duration).into(), + rotate: TimedCompressedFloatArray::new(rotate, num_frames, frame_duration, duration).into(), + scale: TimedCompressedFloatArray::new(scale, num_frames, frame_duration, duration).into(), + }) + } +} + +#[derive(Debug)] +pub enum CompressedFloatArray { + Spline(Nurbs), + Static([f32; COUNT]), + Empty, +} + +impl CompressedFloatArray<3> { + fn new(reader: &mut BlockDataReader, mask: &VectorMask, primitive_type: &CompressedScalarType) -> Result { + if mask.has_spline() { + let num_items = reader.read_u16()? as usize; + let degree = reader.read_u8()? as usize; + let knots = reader.read_bytes(num_items + degree + 2)?; + reader.align(4)?; + + let mut ranges = [[0f32; 2]; 3]; + for i in 0..3 { + for j in 0..(mask.mask(i)? as usize) { + ranges[i][j] = reader.read_f32()? + } + } + + let mut control_points = vec![[0f32; 3]; num_items + 1]; + for control_point in &mut control_points { + for j in 0..3 { + control_point[j] = match mask.mask(j)? { + ValueMask::Spline => + ranges[j][0] + (ranges[j][1] - ranges[j][0]) * reader.read_scaled_compressed_scalar(primitive_type)?, + _ => ranges[j][0], + }; + } + } + + Ok(Self::Spline(Nurbs::<3>::new(control_points, knots, degree))) + } else if mask.has_static() { + Ok(Self::Static([ + match mask.mask(0)? { + ValueMask::Static => reader.read_f32()?, + _ => 0f32 + }, + match mask.mask(1)? { + ValueMask::Static => reader.read_f32()?, + _ => 0f32 + }, + match mask.mask(2)? { + ValueMask::Static => reader.read_f32()?, + _ => 0f32 + }, + ])) + } else { + Ok(Self::Empty) + } + } +} + +impl CompressedFloatArray<4> { + fn new(reader: &mut BlockDataReader, mask: &QuatMask, primitive_type: &CompressedQuaternionType) -> Result { + if mask.has_spline() { + let num_items = reader.read_u16()? as usize; + let degree = reader.read_u8()? as usize; + let knots = reader.read_bytes(num_items + degree + 2)?; + reader.align(match primitive_type { + CompressedQuaternionType::K32 => 4, + CompressedQuaternionType::K48 => 2, + _ => 1, // maybe not for other types, idk + })?; + + let control_points = match primitive_type { + CompressedQuaternionType::K32 => (0..num_items + 1) + .map(|_| reader.read_k32_quat()) + .collect::>>()?, + CompressedQuaternionType::K40 => (0..num_items + 1) + .map(|_| reader.read_k40_quat()) + .collect::>>()?, + CompressedQuaternionType::K48 => (0..num_items + 1) + .map(|_| reader.read_k48_quat()) + .collect::>>()?, + _ => return Err(Error::Invalid("Unsupported compressed primitive type.".into())), + }; + + Ok(Self::Spline(Nurbs::<4>::new(control_points, knots, degree))) + } else if mask.has_static() { + Ok(Self::Static(reader.read_k40_quat()?)) + } else { + Ok(Self::Empty) + } + } +} + +#[derive(Debug)] +pub struct TimedCompressedFloatArray { + array: CompressedFloatArray, + num_frames: usize, + frame_duration: f32, + duration: f32, +} + +impl TimedCompressedFloatArray { + fn new(array: CompressedFloatArray, num_frames: usize, frame_duration: f32, duration: f32) -> Self { + Self { + array, + num_frames, + frame_duration, + duration, + } + } +} + +impl InterpolatableTimeToValueTrait for TimedCompressedFloatArray { + fn is_empty(&self) -> bool { + match self.array { + CompressedFloatArray::Empty => true, + _ => false, + } + } + + fn is_static(&self) -> bool { + match self.array { + CompressedFloatArray::Empty => true, + CompressedFloatArray::Static(_) => true, + _ => false, + } + } + + fn duration(&self) -> f32 { + self.duration + } + + fn frame_times(&self) -> Vec { + match &self.array { + CompressedFloatArray::Spline(_) => (0..(self.num_frames - 1)).map(|x| x as f32 * self.frame_duration).collect(), + CompressedFloatArray::Static(_) => vec!(0f32), + CompressedFloatArray::Empty => vec!(0f32), + } + } + + fn interpolate(&self, t: f32) -> [f32; COUNT] { + match &self.array { + CompressedFloatArray::Spline(nurbs) => nurbs.interpolate(t / self.frame_duration), + CompressedFloatArray::Static(v) => *v, + CompressedFloatArray::Empty => [0f32; COUNT], + } + } +} + +enum ValueMask { + Spline = 2, + Static = 1, + Empty = 0, +} + +#[derive(Debug)] +struct VectorMask { + bits: u8, +} + +impl VectorMask { + fn new(bits: u8) -> Self { Self { bits } } + + fn has_static(&self) -> bool { 0 != (self.bits & 0x0F) } + fn has_spline(&self) -> bool { 0 != (self.bits & 0xF0) } + + fn mask(&self, component_index: usize) -> Result { + if 3 <= component_index { + panic!("Component index out of range.") + } + match (self.bits >> component_index) & 0x11 { + 0x00 => Ok(ValueMask::Empty), + 0x01 => Ok(ValueMask::Static), + 0x10 => Ok(ValueMask::Spline), + _ => Err(Error::Invalid("Invalid mask".into())), + } + } +} + +#[derive(Debug)] +struct QuatMask { + bits: u8, +} + +impl QuatMask { + fn new(bits: u8) -> Self { Self { bits } } + + fn has_static(&self) -> bool { 0 != (self.bits & 0x0F) } + fn has_spline(&self) -> bool { 0 != (self.bits & 0xF0) } +} + +#[derive(Debug)] +struct TransformMask { + compression: u8, + translate: VectorMask, + rotate: QuatMask, + scale: VectorMask, +} + +impl TransformMask { + fn new(reader: &mut BlockDataReader) -> Result { + let mut buf = [0u8; 4]; + reader.reader.read_exact(&mut buf)?; + Ok(Self { + compression: buf[0], + translate: VectorMask::new(buf[1]), + rotate: QuatMask::new(buf[2]), + scale: VectorMask::new(buf[3]), + }) + } + + fn translate_primitive_type(&self) -> Result { + CompressedScalarType::from(self.compression & 0x3) + } + + fn rotate_primitive_type(&self) -> Result { + CompressedQuaternionType::from((self.compression >> 2) & 0xF) + } + + fn scale_primitive_type(&self) -> Result { + CompressedScalarType::from(self.compression >> 6) + } +} + +enum CompressedScalarType { + K8, + K16, +} + +impl CompressedScalarType { + fn from(value: u8) -> Result { + match value { + 0 => Ok(Self::K8), + 1 => Ok(Self::K16), + _ => Err(Error::Invalid(format!("{0} is not a valid CompressedScalarType.", value).into())) + } + } +} + +enum CompressedQuaternionType { + K32, + K40, + K48, + K24, + K16, + K128, +} + +impl CompressedQuaternionType { + fn from(value: u8) -> Result { + match value { + 0 => Ok(Self::K32), + 1 => Ok(Self::K40), + 2 => Ok(Self::K48), + 3 => Ok(Self::K24), + 4 => Ok(Self::K16), + 5 => Ok(Self::K128), + _ => Err(Error::Invalid(format!("{0} is not a valid CompressedQuaternionType.", value).into())) + } + } +} + +#[derive(Debug)] +pub struct Nurbs { + control_points: Vec<[f32; N]>, + knots: Vec, + degree: usize, +} + +impl Nurbs { + pub fn new(control_points: Vec<[f32; N]>, knots: Vec, degree: usize) -> Self { + Self { + control_points, + knots, + degree, + } + } + + pub fn interpolate(&self, t: f32) -> [f32; N] { + let span = self.find_span(t); + let basis = self.bspline_basis(span, t); + + let mut value = [0f32; N]; + for i in 0..(self.degree + 1) { + for j in 0..N { + value[j] += self.control_points[span - i][j] * basis[i] + } + } + + value + } + + /* + * bsplineBasis and findSpan are based on the implementations of + * https://github.com/PredatorCZ/HavokLib + */ + fn bspline_basis(&self, span: usize, t: f32) -> [f32; N] { + let mut res = [0f32; N]; + res[0] = 1f32; + + for i in 0..self.degree { + for j in (0..(i + 1)).rev() { + let mut tmp = res[j]; + tmp *= t - self.knots[span - j] as f32; + tmp /= (self.knots[span + i + 1 - j] - self.knots[span - j]) as f32; + res[j + 1] += res[j] - tmp; + res[j] = tmp; + } + } + + res + } + + fn find_span(&self, t: f32) -> usize { + if t >= self.knots[self.control_points.len()].into() { + self.control_points.len() - 1 + } else { + let mut low = self.degree; + let mut high = self.control_points.len(); + let mut mid = (low + high) / 2; + + while t < self.knots[mid].into() || t >= self.knots[mid + 1].into() { + if t < self.knots[mid].into() { + high = mid; + } else { + low = mid; + } + + mid = (low + high) / 2; + } + + mid + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 8e17d88..d112ed5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,10 +4,12 @@ #![warn(missing_debug_implementations, missing_docs)] mod error; +mod macros; mod node; mod value; mod walker; +pub mod compressedanimation; pub mod tagfile; pub use {error::Error, value::Value, walker::NodeWalker}; diff --git a/src/macros.rs b/src/macros.rs new file mode 100644 index 0000000..1f09414 --- /dev/null +++ b/src/macros.rs @@ -0,0 +1,12 @@ + +macro_rules! read_primitive { + ($type:ty, $fn_name:ident) => { + pub fn $fn_name(&mut self) -> Result<$type> { + let mut buffer = [0u8; std::mem::size_of::<$type>()]; + self.reader.read_exact(&mut buffer)?; + Ok(<$type>::from_le_bytes(buffer)) + } + }; +} + +pub(crate) use read_primitive; diff --git a/src/tagfile/common.rs b/src/tagfile/common.rs index 26be7d4..26f38a0 100644 --- a/src/tagfile/common.rs +++ b/src/tagfile/common.rs @@ -1,19 +1,10 @@ use std::io::Read; use crate::error::Result; +use crate::macros::read_primitive; use super::tagfile::Tagfile; -macro_rules! read_primitive { - ($type:ty, $fn_name:ident) => { - pub fn $fn_name(&mut self) -> Result<$type> { - let mut buffer = [0u8; std::mem::size_of::<$type>()]; - self.reader.read_exact(&mut buffer)?; - Ok(<$type>::from_le_bytes(buffer)) - } - }; -} - impl Tagfile { read_primitive!(u64, read_u64); read_primitive!(u8, read_u8); diff --git a/src/tagfile/node.rs b/src/tagfile/node.rs index bbd88e9..7967087 100644 --- a/src/tagfile/node.rs +++ b/src/tagfile/node.rs @@ -74,6 +74,8 @@ impl Tagfile { FieldKind::Integer => Ok(Value::I32(self.read_i32()?)), + FieldKind::Float => Ok(Value::F32(self.read_f32()?)), + FieldKind::String => Ok(Value::String(self.read_string()?)), FieldKind::Struct(name) => { @@ -160,6 +162,10 @@ impl Tagfile { .collect::>>() } + FieldKind::Byte => (0..count) + .map(|_| Ok(Value::U8(self.read_u8()?))) + .collect::>>(), + FieldKind::String => (0..count) .map(|_| Ok(Value::String(self.read_string()?))) .collect::>>(), diff --git a/src/walker.rs b/src/walker.rs index f1b8389..6c3396c 100644 --- a/src/walker.rs +++ b/src/walker.rs @@ -1,16 +1,128 @@ use std::{fmt, rc::Rc}; use crate::{ - node::{Field, Node}, + error::{Error, Result}, + node::{Definition, Node}, value::Value, }; /// View into a collection of nodes. +#[derive(Clone)] pub struct NodeWalker { pub(super) nodes: Rc>, pub(super) index: usize, } +macro_rules! nodewalker_field_typed_ref { + ($type:ty, $fn_name:ident, $enum_pattern:ident, $docstr:expr) => { + #[doc="Read the value of the specified field as "] + #[doc=$docstr] + #[doc="."] + pub fn $fn_name<'a>(&'a self, field_name: &str, default_value: Option<&'a $type>) -> Result<&'a $type> { + match self.field(field_name) { + Some(value) => match value { + Value::$enum_pattern(x) => Ok(x), + _ => Err(Error::Invalid( + format!("Field {0} has an invalid type.", field_name).into() + )), + }, + None => match default_value { + Some(value) => Ok(value), + None => Err(Error::Invalid( + format!("Field {0} is missing.", field_name).into() + )) + }, + } + } + } +} + +macro_rules! nodewalker_field_typed_copy { + ($type:ty, $fn_name:ident, $enum_pattern:ident, $docstr:expr) => { + #[doc="Read the value of the specified field as "] + #[doc=$docstr] + #[doc="."] + pub fn $fn_name(&self, field_name: &str, default_value: Option<$type>) -> Result<$type> { + match self.field(field_name) { + Some(value) => match value { + Value::$enum_pattern(x) => Ok(*x), + _ => Err(Error::Invalid( + format!("Field {0} has an invalid type.", field_name).into() + )), + }, + None => match default_value { + Some(value) => Ok(value), + None => Err(Error::Invalid( + format!("Field {0} is missing.", field_name).into() + )) + }, + } + } + } +} + +macro_rules! nodewalker_field_typed_vec_ref { + ($type:ty, $fn_name:ident, $enum_pattern:ident, $docstr:expr) => { + #[doc="Read the value of the specified field as "] + #[doc=$docstr] + #[doc="."] + pub fn $fn_name<'a>(&'a self, field_name: &str, default_value: Option>) -> Result> { + match self.field(field_name) { + Some(value) => match value { + Value::Vector(x) => x.iter() + .map(|x| match x { + Value::$enum_pattern(y) => Ok(y), + _ => Err(Error::Invalid( + format!("Field {0} has an invalid type in array.", field_name).into() + )), + }) + .collect(), + _ => Err(Error::Invalid( + format!("Field {0} has an invalid type.", field_name).into() + )), + }, + None => match default_value { + Some(value) => Ok(value), + None => Err(Error::Invalid( + format!("Field {0} is missing.", field_name).into() + )) + }, + } + } + } +} + +macro_rules! nodewalker_field_typed_vec_copy { + ($type:ty, $fn_name:ident, $enum_pattern:ident, $docstr:expr) => { + #[doc="Read the value of the specified field as "] + #[doc=$docstr] + #[doc="."] + pub fn $fn_name(&self, field_name: &str, default_value: Option>) -> Result> { + match self.field(field_name) { + Some(value) => match value { + Value::Vector(x) => x.iter() + .map(|x| match x { + Value::$enum_pattern(y) => Ok(*y), + _ => Err(Error::Invalid( + format!("Field {0} has an invalid type in array.", field_name).into() + )), + }) + .collect(), + _ => Err(Error::Invalid( + format!("Field {0} has an invalid type.", field_name).into() + )), + }, + None => match default_value { + Some(value) => Ok(value), + None => Err(Error::Invalid( + format!("Field {0} is missing.", field_name).into() + )) + }, + } + } + }; +} + impl NodeWalker { /// Get a walker instance for the requested node index. pub fn node(&self, index: usize) -> NodeWalker { @@ -35,27 +147,111 @@ impl NodeWalker { self.current().definition.version } - fn iter_fields(&self) -> impl Iterator)> { - let current = self.current(); - let mask_indexes = current.field_mask.iter().scan(0usize, |index, mask| { - Some(match mask { - true => { - let id = *index; - *index += 1; - Some(id) + /// Check if current node is an instance of specified type (definition). + pub fn is_or_inherited_from(&self, definition_name: &str) -> bool { + let mut d = &self.current().definition; + loop { + if d.name == definition_name { + return true; + } + + match &d.parent { + Some(x) => d = x, + None => return false + } + } + } + + fn field_impl( + &self, + field_name: &str, + definition: &Rc, + field_index: &mut usize, + value_index: &mut usize, + ) -> Option<&Value> { + if definition.parent.is_some() { + let r = self.field_impl( + field_name, + definition.parent.as_ref().unwrap(), + field_index, + value_index); + if r.is_some() { + return r; + } + } + + let node = self.current(); + for i in 0..definition.fields.len() { + if definition.fields[i].name == field_name { + return match node.field_mask[*field_index] { + true => Some(&node.values[*value_index]), + _ => None, + }; + } else { + if node.field_mask[*field_index] { + *value_index += 1; } - false => None, - }) - }); + } + + *field_index += 1; + } - std::iter::zip(current.definition.fields(), mask_indexes) + None } /// Get the value of the specified field. - pub fn field(&self, name: &str) -> Option<&Value> { - self.iter_fields() - .find(|(field, _index)| field.name == name) - .and_then(|(_field, index)| index.map(|index| &self.current().values[index])) + pub fn field(&self, field_name: &str) -> Option<&Value> { + let mut field_index: usize = 0; + let mut value_index: usize = 0; + self.field_impl(field_name, &self.current().definition, &mut field_index, &mut value_index) + } + + nodewalker_field_typed_copy!(u8, field_u8, U8, "a u8"); + nodewalker_field_typed_vec_copy!(u8, field_u8_vec, U8, "a Vec"); + nodewalker_field_typed_copy!(i32, field_i32, I32, "an i32"); + nodewalker_field_typed_vec_copy!(i32, field_i32_vec, I32, "a Vec"); + nodewalker_field_typed_copy!(f32, field_f32, F32, "a f32"); + nodewalker_field_typed_vec_copy!(f32, field_f32_vec, F32, "a Vec"); + nodewalker_field_typed_ref!(String, field_string, String, "a &String"); + nodewalker_field_typed_vec_ref!(String, field_string_vec, String, "a Vec<&String>"); + nodewalker_field_typed_ref!(Vec, field_vec, Vector, "a &Vec"); + nodewalker_field_typed_vec_ref!(Vec, field_vec_vec, Vector, "a Vec<&Vec>"); + + /// Read the value of the specified field as a NodeWalker. + pub fn field_node(&self, field_name: &str) -> Result { + match self.field(field_name) { + Some(value) => match value { + Value::Node(x) => Ok(self.node(*x)), + _ => Err(Error::Invalid( + format!("Field {0} has an invalid type.", field_name).into() + )), + }, + None => Err(Error::Invalid( + format!("Field {0} is missing.", field_name).into() + )), + } + } + + /// Read the value of the specified field as a Vec. + pub fn field_node_vec(&self, field_name: &str) -> Result> { + match self.field(field_name) { + Some(value) => match value { + Value::Vector(x) => x.iter() + .map(|x| match x { + Value::Node(y) => Ok(self.node(*y)), + _ => Err(Error::Invalid( + format!("Field {0} has an invalid type in array.", field_name).into() + )), + }) + .collect(), + _ => Err(Error::Invalid( + format!("Field {0} has an invalid type.", field_name).into() + )), + }, + None => Err(Error::Invalid( + format!("Field {0} is missing.", field_name).into() + )), + } } }