From 2ad1c7ddc5deb33fb64ab25dba4a3f86e9940230 Mon Sep 17 00:00:00 2001 From: Andy Watkins Date: Mon, 22 Jul 2024 14:24:36 +0100 Subject: [PATCH] 1.0.0 --- crates/odonata-base/src/eg/endgame.rs | 6 +- crates/odonata-base/src/infra/metric.rs | 75 ++++++++++++++++++++----- docs/README.md | 2 +- 3 files changed, 66 insertions(+), 17 deletions(-) diff --git a/crates/odonata-base/src/eg/endgame.rs b/crates/odonata-base/src/eg/endgame.rs index 0163f15d..0b9cab05 100644 --- a/crates/odonata-base/src/eg/endgame.rs +++ b/crates/odonata-base/src/eg/endgame.rs @@ -37,7 +37,7 @@ impl Default for EndGameScoring { Self { enabled: true, win_bonus: 0.cp(), - certain_win_bonus: 1000.cp(), + certain_win_bonus: 10000.cp(), likely_draw_scale: 1.0, scale_by_hmvc: true, } @@ -270,9 +270,9 @@ impl EndGame { let us = winner == b.turn(); if let Some((metric1, metric2)) = self.metrics(winner, b) { pov = if us { - pov + 3 * Score::from_cp(-metric1) + 3 * Score::from_cp(-metric2) + 3 * Score::from_cp(-metric1) + 3 * Score::from_cp(-metric2) } else { - pov + 3 * Score::from_cp(metric1) + 3 * Score::from_cp(metric2) + 3 * Score::from_cp(metric1) + 3 * Score::from_cp(metric2) }; // win specific scoring, so we award win_bonus as other features will be ignored diff --git a/crates/odonata-base/src/infra/metric.rs b/crates/odonata-base/src/infra/metric.rs index 96731795..79b1adf7 100644 --- a/crates/odonata-base/src/infra/metric.rs +++ b/crates/odonata-base/src/infra/metric.rs @@ -94,6 +94,55 @@ impl CellThroughput { } } +#[derive(Debug, Default, serde_with::SerializeDisplay)] +pub struct AtomicMean { + count: AtomicCell, + hits: AtomicCell, +} + +impl Clone for AtomicMean { + fn clone(&self) -> Self { + Self { + count: AtomicCell::new(self.count.load()), + hits: AtomicCell::new(self.hits.load()), + } + } +} + +impl Metric for AtomicMean {} + + +impl Display for AtomicMean { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:0.2}", self.mean()) + } +} + +impl AtomicMean { + pub const fn new() -> Self { + Self { + count: AtomicCell::new(0), + hits: AtomicCell::new(0), + } + } + + pub fn count(&self) -> u64 { + self.count.load() + } + + pub fn add(&self, i: i64) { + self.hits.fetch_add(i); + self.count.fetch_add(1); + } + + pub fn total(&self) -> i64 { + self.hits.load() + } + pub fn mean(&self) -> f64 { + self.hits.load() as f64 / self.count.load() as f64 + } +} + #[derive(Debug, serde_with::SerializeDisplay)] pub struct AtomicThroughput { start: Mutex>, @@ -383,20 +432,20 @@ impl Clone for CountMetric { } } -impl From<(String, i64)> for CountMetric { - fn from(value: (String, i64)) -> Self { - CountMetric { - name: value.0, - count: CachePadded::new(value.1.into()), - } - } -} +// impl From<(String, i64)> for CountMetric { +// fn from(value: (String, i64)) -> Self { +// CountMetric { +// name: value.0, +// count: CachePadded::new(value.1.into()), +// } +// } +// } -impl From for (String, i64) { - fn from(val: CountMetric) -> Self { - (val.name, val.count.load()) - } -} +// impl From for (String, i64) { +// fn from(val: CountMetric) -> Self { +// (val.name, val.count.load()) +// } +// } impl CountMetric { fn new(name: &str) -> Self { diff --git a/docs/README.md b/docs/README.md index 2f24a0c7..543fcf49 100644 --- a/docs/README.md +++ b/docs/README.md @@ -68,7 +68,7 @@ odonata.exe uci "perft 6; board; go depth 6" ## Chess position evaluation, training and search tuning Odonata has both a hand-crafted evaluation (HCE) and a [NNUE](https://en.wikipedia.org/w/index.php?title=NNUE) style neural-network. The HCE was tuned using the [L-BFGS](https://en.wikipedia.org/wiki/Limited-memory_BFGS) algorithm and a tuner based upon [ArgMin](https://argmin-rs.org/). The training data was generated from self-play games and from Odonata vs internet opponent games on [Lichess](https://lichess.org/@/odonata-bot). -The NNUE was trained using 400 million positions from self-play games, and evaluations using Odonata's own HCE. The trainer is a Rust based, self-written CPU trainer using mini-batch AdamW, stepped learning rate, Normal/He-style weight initilisation, Rayon for multi-threading, and Rust's auto-vectorization for SIMD. It's not as fast as a GPU trainer, but manages 3 MM positions/second so a training run is a few hours. +The NNUE was trained using 700 million positions from self-play games, and evaluations using Odonata's own HCE and NNUE. The trainer is a Rust based, self-written CPU trainer using mini-batch AdamW, stepped learning rate, Normal/He-style weight initilisation, Rayon for multi-threading, and Rust's auto-vectorization for SIMD. It's not as fast as a GPU trainer, but manages 3 MM positions/second so a training run is a few hours. The initial proof-of-concept 128-node neural-net was trained using an old version of [Bullet trainer](https://github.com/jw1912/bullet), which I can recommend, and which came to my attention via [Leorik](https://github.com/lithander/Leorik).