diff --git a/CHANGELOG.md b/CHANGELOG.md index e03c92a..3d507a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,15 @@ # ExplainableAI.jl +## Version `v0.6.2` +This is first release of ExplainableAI.jl as part of the +[Julia-XAI](https://github.com/Julia-XAI) organization ([#149][pr-149]) +and the last release that includes LRP before it is moved to its own separate package. + +- ![Feature][badge-feature] Add Concept Relevance Propagation analyzer `CRP` ([#146][pr-146], [#148][pr-148]) +- ![Feature][badge-feature] Add option to process heatmaps batch-wise + using keyword argument `process_batch=true` ([#146][pr-146], [#148][pr-148]) +- ![Bugfix][badge-bugfix] Remove `FlatRule` on dense layers + from `EpsilonPlusFlat` and `EpsilonAlpha2Beta1Flat` composite presets ([#147][pr-147]) + ## Version `v0.6.1` This release brings GPU support to all analyzers. - ![Feature][badge-feature] Support LRP on GPUs ([#142][pr-142], [#140][pr-140]) @@ -165,6 +176,10 @@ Performance improvements: ![Maintenance][badge-maintenance] ![Documentation][badge-docs] --> +[pr-149]: https://github.com/Julia-XAI/ExplainableAI.jl/pull/149 +[pr-148]: https://github.com/Julia-XAI/ExplainableAI.jl/pull/148 +[pr-147]: https://github.com/Julia-XAI/ExplainableAI.jl/pull/147 +[pr-146]: https://github.com/Julia-XAI/ExplainableAI.jl/pull/146 [pr-145]: https://github.com/Julia-XAI/ExplainableAI.jl/pull/145 [pr-144]: https://github.com/Julia-XAI/ExplainableAI.jl/pull/144 [pr-142]: https://github.com/Julia-XAI/ExplainableAI.jl/pull/142 diff --git a/Project.toml b/Project.toml index 6a1cfe9..97e2895 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ExplainableAI" uuid = "4f1bc3e1-d60d-4ed0-9367-9bdff9846d3b" authors = ["Adrian Hill"] -version = "0.6.1" +version = "0.6.2" [deps] ColorSchemes = "35d6a980-a343-548e-a6ea-1d62b119f2f4" diff --git a/README.md b/README.md index 4d3d8f2..c2d5109 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ ___ Explainable AI in Julia. This package implements interpretability methods for black box models, -with a focus on local explanations and attribution maps. +with a focus on local explanations and attribution maps in input space. It is similar to [Captum][captum-repo] and [Zennit][zennit-repo] for PyTorch and [iNNvestigate][innvestigate-repo] for Keras models.