Skip to content

Commit

Permalink
added hep-ex entries (#168)
Browse files Browse the repository at this point in the history
  • Loading branch information
DalilaSalamani authored Jul 13, 2023
1 parent 4de744e commit 4c395dc
Show file tree
Hide file tree
Showing 5 changed files with 106 additions and 7 deletions.
78 changes: 78 additions & 0 deletions HEPML.bib
Original file line number Diff line number Diff line change
@@ -1,5 +1,27 @@
# HEPML Papers
% Jul. 11, 2023
@article{Coccaro:2023nol,
author = "Coccaro, Andrea and Di Bello, Francesco Armando and Giagu, Stefano and Rambelli, Lucrezia and Stocchetti, Nicola",
title = "{Fast Neural Network Inference on FPGAs for Triggering on Long-Lived Particles at Colliders}",
eprint = "2307.05152",
archivePrefix = "arXiv",
primaryClass = "hep-ex",
month = "7",
year = "2023"
}

% Jul. 11, 2023
@article{Rehm:2023ovj,
author = "Rehm, Florian and Vallecorsa, Sofia and Borras, Kerstin and Grossi, Michele and Kruecker, Dirk and Varo, Valle",
title = "{Precise Image Generation on Current Noisy Quantum Computing Devices}",
eprint = "2307.05253",
archivePrefix = "arXiv",
primaryClass = "quant-ph",
month = "7",
year = "2023"
}

% Jul. 11, 2023
@article{Algren:2023spv,
author = "Algren, Malte and Raine, John Andrew and Golling, Tobias",
Expand All @@ -11,6 +33,17 @@ @article{Algren:2023spv
year = "2023"
}

% Jul. 10, 2023
@article{Acosta:2023zik,
author = "Acosta, Fernando Torales and Mikuni, Vinicius and Nachman, Benjamin and Arratia, Miguel and Barish, Kenneth and Karki, Bishnu and Milton, Ryan and Karande, Piyush and Angerami, Aaron",
title = "{Comparison of Point Cloud and Image-based Models for Calorimeter Fast Simulation}",
eprint = "2307.04780",
archivePrefix = "arXiv",
primaryClass = "cs.LG",
month = "7",
year = "2023"
}

% Jul. 5, 2023
@article{Raine:2023fko,
author = "Raine, John Andrew and Leigh, Matthew and Zoch, Knut and Golling, Tobias",
Expand All @@ -22,6 +55,51 @@ @article{Raine:2023fko
year = "2023"
}

% Jun. 23, 2023
@article{Dubinski:2023fsy,
author = "Dubi\'nski, Jan and Deja, Kamil and Wenzel, Sandro and Rokita, Przemys\l{}aw and Trzci\'nski, Tomasz",
title = "{Machine Learning methods for simulating particle response in the Zero Degree Calorimeter at the ALICE experiment, CERN}",
eprint = "2306.13606",
archivePrefix = "arXiv",
primaryClass = "cs.CV",
month = "6",
year = "2023"
}

% Jun. 22, 2023
@article{Anzalone:2023ugq,
author = "Anzalone, Luca and Chhibra, Simranjit Singh and Maier, Benedikt and Chernyavskaya, Nadezda and Pierini, Maurizio",
title = "{Triggering Dark Showers with Conditional Dual Auto-Encoders}",
eprint = "2306.12955",
archivePrefix = "arXiv",
primaryClass = "hep-ex",
month = "6",
year = "2023"
}

% Jun. 21, 2023
@article{Heinrich:2023bmt,
author = "Heinrich, Lukas and Mishra-Sharma, Siddharth and Pollard, Chris and Windischhofer, Philipp",
title = "{Hierarchical Neural Simulation-Based Inference Over Event Ensembles}",
eprint = "2306.12584",
archivePrefix = "arXiv",
primaryClass = "stat.ML",
reportNumber = "MIT-CTP/5576",
month = "6",
year = "2023"
}

% Jun. 20, 2023
@article{DeZoort:2023dvb,
author = "DeZoort, Gage and Hanin, Boris",
title = "{Principles for Initialization and Architecture Selection in Graph Neural Networks with ReLU Activations}",
eprint = "2306.11668",
archivePrefix = "arXiv",
primaryClass = "stat.ML",
month = "6",
year = "2023"
}

% Jun. 19, 2023
@article{Karmakar:2023mhy,
author = "Karmakar, Annesha and Pal, Anikesh and Kumar, G. Anil and Bhavika and Anand, V. and Tyagi, Mohit",
Expand Down
14 changes: 7 additions & 7 deletions HEPML.tex
Original file line number Diff line number Diff line change
Expand Up @@ -104,15 +104,15 @@
\item \textbf{Learning strategies}
\\\textit{There is no unique way to train a classifier and designing an effective learning strategy is often one of the biggest challenges for achieving optimality.}
\begin{itemize}
\item \textbf{Hyperparameters}~\cite{Tani:2020dyi,Dudko:2021cie,Bevan:2017stx}
\item \textbf{Hyperparameters}~\cite{Tani:2020dyi,Dudko:2021cie,Bevan:2017stx,DeZoort:2023dvb}
\\\textit{In addition to learnable weights $w$, classifiers have a number of non-differentiable parameters like the number of layers in a neural network. These parameters are called hyperparameters.}
\item \textbf{Weak/Semi supervision}~\cite{Dery:2017fap,Metodiev:2017vrx,Komiske:2018oaa,Collins:2018epr,Collins:2019jip,Borisyak:2019vbz,Cohen:2017exh,Komiske:2018vkc,Metodiev:2018ftz,collaboration2020dijet,Amram:2020ykb,Brewer:2020och,Dahbi:2020zjw,Lee:2019ssx,Lieberman:2021krq,Komiske:2022vxg,Li:2022omf,Finke:2022lsu,LeBlanc:2022bwd,Dolan:2022ikg,Bardhan:2023mia}
\\\textit{For supervised learning, the labels $y_i$ are known. In the case that the labels are noisy or only known with some uncertainty, then the learning is called weak supervision. Semi-supervised learning is the related case where labels are known for only a fraction of the training examples.}
\item \textbf{Unsupervised}~\cite{Mackey:2015hwa,Komiske:2019fks,1797846,Dillon:2019cqt,Cai:2020vzx,Howard:2021pos,Dillon:2021gag}
\\\textit{When no labels are provided, the learning is called unsupervised.}
\item \textbf{Reinforcement Learning}~\cite{Carrazza:2019efs,Brehmer:2020brs,John:2020sak,Harvey:2021oue,Cranmer:2021gdt,Windisch:2021mem,Dersy:2022bym,Nishimura:2023wdu}
\\\textit{Instead of learning to distinguish different types of examples, the goal of reinforcement learning is to learn a strategy (policy). The prototypical example of reinforcement learning in learning a strategy to play video games using some kind of score as a feedback during the learning.}
\item \textbf{Quantum Machine Learning}~\cite{Mott:2017xdb,Zlokapa:2019lvv,Blance:2020nhl,Terashi:2020wfi,Chen:2020zkj,Wu:2020cye,Guan:2020bdl,Chen:2021ouz,Blance:2021gcs,Heredge:2021vww,Wu:2021xsj,Belis:2021zqi,Araz:2021ifk,Bravo-Prieto:2021ehz,Kim:2021wrr,Ngairangbam:2021yma,Gianelle:2022unu,Abel:2022lqr,Araz:2022haf,Delgado:2022aty,Alvi:2022fkk,Peixoto:2022zzk,Araz:2022zxk,Rousselot:2023pcj}
\item \textbf{Quantum Machine Learning}~\cite{Mott:2017xdb,Zlokapa:2019lvv,Blance:2020nhl,Terashi:2020wfi,Chen:2020zkj,Wu:2020cye,Guan:2020bdl,Chen:2021ouz,Blance:2021gcs,Heredge:2021vww,Wu:2021xsj,Belis:2021zqi,Araz:2021ifk,Bravo-Prieto:2021ehz,Kim:2021wrr,Ngairangbam:2021yma,Gianelle:2022unu,Abel:2022lqr,Araz:2022haf,Delgado:2022aty,Alvi:2022fkk,Peixoto:2022zzk,Araz:2022zxk,Rousselot:2023pcj,Rehm:2023ovj}
\\\textit{Quantum computers are based on unitary operations applied to quantum states. These states live in a vast Hilbert space which may have a usefully large information capacity for machine learning.}
\item \textbf{Feature ranking}~\cite{Faucett:2020vbu,Grojean:2020ech,Das:2022cjl}
\\\textit{It is often useful to take a set of input features and rank them based on their usefulness.}
Expand All @@ -128,7 +128,7 @@
\begin{itemize}
\item \textbf{Software}~\cite{Strong:2020mge,Gligorov:2012qt,Weitekamp:DLPS2017,Nguyen:2018ugw,Bourgeois:2018nvk,1792136,Balazs:2021uhg,Rehm:2021zow,Mahesh:2021iph,Amrouche:2021tio,Pol:2021iqw,Goncharov:2021wvd,Saito:2021vpp,Jiang:2022zho,Guo:2023nfu,Tyson:2023zkx}
\\\textit{Strategies for efficient inference for a given hardware architecture.}
\item \textbf{Hardware/firmware}~\cite{Duarte:2018ite,DiGuglielmo:2020eqx,Summers:2020xiy,1808088,Iiyama:2020wap,Mohan:2020vvi,Carrazza:2020qwu,Rankin:2020usv,Heintz:2020soy,Rossi:2020sbh,Aarrestad:2021zos,Hawks:2021ruw,Teixeira:2021yhl,Hong:2021snb,DiGuglielmo:2021ide,Migliorini:2021fuj,Govorkova:2021utb,Elabd:2021lgo,Jwa:2019zlh,Butter:2022lkf,Sun:2022bxx,Khoda:2022dwz,Carlson:2022vac,Abidi:2022ogh,MeyerzuTheenhausen:2022ffb,Herbst:2023lug}
\item \textbf{Hardware/firmware}~\cite{Duarte:2018ite,DiGuglielmo:2020eqx,Summers:2020xiy,1808088,Iiyama:2020wap,Mohan:2020vvi,Carrazza:2020qwu,Rankin:2020usv,Heintz:2020soy,Rossi:2020sbh,Aarrestad:2021zos,Hawks:2021ruw,Teixeira:2021yhl,Hong:2021snb,DiGuglielmo:2021ide,Migliorini:2021fuj,Govorkova:2021utb,Elabd:2021lgo,Jwa:2019zlh,Butter:2022lkf,Sun:2022bxx,Khoda:2022dwz,Carlson:2022vac,Abidi:2022ogh,MeyerzuTheenhausen:2022ffb,Herbst:2023lug,Coccaro:2023nol}
\\\textit{Various accelerators have been studied for fast inference that is very important for latency-limited applications like the trigger at collider experiments.}
\item \textbf{Deployment}~\cite{Kuznetsov:2020mcj,SunnebornGudnadottir:2021nhk}
\\\textit{This category is for the deployment of machine learning interfaces, such as in the cloud.}
Expand Down Expand Up @@ -165,19 +165,19 @@
\item \textbf{Generative models / density estimation}
\\\textit{The goal of generative modeling is to learn (explicitly or implicitly) a probability density $p(x)$ for the features $x\in\mathbb{R}^n$. This task is usually unsupervised (no labels).}
\begin{itemize}
\item \textbf{GANs}~\cite{deOliveira:2017pjk,Paganini:2017hrr,Paganini:2017dwg,Alonso-Monsalve:2018aqs,Butter:2019eyo,Martinez:2019jlu,Bellagente:2019uyp,Vallecorsa:2019ked,SHiP:2019gcl,Carrazza:2019cnt,Butter:2019cae,Lin:2019htn,DiSipio:2019imz,Hashemi:2019fkn,Chekalina:2018hxi,ATL-SOFT-PUB-2018-001,Zhou:2018ill,Carminati:2018khv,Vallecorsa:2018zco,Datta:2018mwd,Musella:2018rdi,Erdmann:2018kuh,Deja:2019vcv,Derkach:2019qfk,Erbin:2018csv,Erdmann:2018jxd,Urban:2018tqv,Oliveira:DLPS2017,deOliveira:2017rwa,Farrell:2019fsm,Hooberman:DLPS2017,Belayneh:2019vyx,Wang:2020tap,buhmann2020getting,Alanazi:2020jod,2009.03796,2008.06545,Kansal:2020svm,Maevskiy:2020ank,Lai:2020byl,Choi:2021sku,Rehm:2021zow,Rehm:2021zoz,Carrazza:2021hny,Rehm:2021qwm,Lebese:2021foi,Winterhalder:2021ave,Kansal:2021cqp,NEURIPS2020_a878dbeb,Khattak:2021ndw,Mu:2021nno,Li:2021cbp,Bravo-Prieto:2021ehz,Anderlini:2021qpm,Chisholm:2021pdn,Desai:2021wbb,Buhmann:2021caf,Bieringer:2022cbs,Ghosh:2022zdz,Anderlini:2022ckd,Ratnikov:2022hge,Rogachev:2022hjg,ATLAS:2022jhk,Anderlini:2022hgm,Buhmann:2023pmh,Yue:2023uva,Hashemi:2023ruu,Diefenbacher:2023prl,Chan:2023ume}
\item \textbf{GANs}~\cite{deOliveira:2017pjk,Paganini:2017hrr,Paganini:2017dwg,Alonso-Monsalve:2018aqs,Butter:2019eyo,Martinez:2019jlu,Bellagente:2019uyp,Vallecorsa:2019ked,SHiP:2019gcl,Carrazza:2019cnt,Butter:2019cae,Lin:2019htn,DiSipio:2019imz,Hashemi:2019fkn,Chekalina:2018hxi,ATL-SOFT-PUB-2018-001,Zhou:2018ill,Carminati:2018khv,Vallecorsa:2018zco,Datta:2018mwd,Musella:2018rdi,Erdmann:2018kuh,Deja:2019vcv,Derkach:2019qfk,Erbin:2018csv,Erdmann:2018jxd,Urban:2018tqv,Oliveira:DLPS2017,deOliveira:2017rwa,Farrell:2019fsm,Hooberman:DLPS2017,Belayneh:2019vyx,Wang:2020tap,buhmann2020getting,Alanazi:2020jod,2009.03796,2008.06545,Kansal:2020svm,Maevskiy:2020ank,Lai:2020byl,Choi:2021sku,Rehm:2021zow,Rehm:2021zoz,Carrazza:2021hny,Rehm:2021qwm,Lebese:2021foi,Winterhalder:2021ave,Kansal:2021cqp,NEURIPS2020_a878dbeb,Khattak:2021ndw,Mu:2021nno,Li:2021cbp,Bravo-Prieto:2021ehz,Anderlini:2021qpm,Chisholm:2021pdn,Desai:2021wbb,Buhmann:2021caf,Bieringer:2022cbs,Ghosh:2022zdz,Anderlini:2022ckd,Ratnikov:2022hge,Rogachev:2022hjg,ATLAS:2022jhk,Anderlini:2022hgm,Buhmann:2023pmh,Yue:2023uva,Hashemi:2023ruu,Diefenbacher:2023prl,Chan:2023ume,Dubinski:2023fsy}
\\\textit{Generative Adversarial Networks~\cite{Goodfellow:2014upx} learn $p(x)$ implicitly through the minimax optimization of two networks: one that maps noise to structure $G(z)$ and one a classifier (called the discriminator) that learns to distinguish examples generated from $G(z)$ and those generated from the target process. When the discriminator is maximally `confused', then the generator is effectively mimicking $p(x)$.}
\item \textbf{Autoencoders}~\cite{Monk:2018zsb,ATL-SOFT-PUB-2018-001,Cheng:2020dal,1816035,Howard:2021pos,Buhmann:2021lxj,Bortolato:2021zic,deja2020endtoend,Hariri:2021clz,Fanelli:2019qaq,Collins:2021pld,Orzari:2021suh,Jawahar:2021vyu,Tsan:2021brw,Buhmann:2021caf,Touranakou:2022qrp,Ilten:2022jfm,Collins:2022qpr,AbhishekAbhishek:2022wby,Cresswell:2022tof,Roche:2023int}
\item \textbf{Autoencoders}~\cite{Monk:2018zsb,ATL-SOFT-PUB-2018-001,Cheng:2020dal,1816035,Howard:2021pos,Buhmann:2021lxj,Bortolato:2021zic,deja2020endtoend,Hariri:2021clz,Fanelli:2019qaq,Collins:2021pld,Orzari:2021suh,Jawahar:2021vyu,Tsan:2021brw,Buhmann:2021caf,Touranakou:2022qrp,Ilten:2022jfm,Collins:2022qpr,AbhishekAbhishek:2022wby,Cresswell:2022tof,Roche:2023int,Anzalone:2023ugq}
\\\textit{An autoencoder consists of two functions: one that maps $x$ into a latent space $z$ (encoder) and a second one that maps the latent space back into the original space (decoder). The encoder and decoder are simultaneously trained so that their composition is nearly the identity. When the latent space has a well-defined probability density (as in variational autoencoders), then one can sample from the autoencoder by applying the detector to a randomly chosen element of the latent space.}
\item \textbf{Normalizing flows}~\cite{Albergo:2019eim,1800956,Kanwar:2003.06413,Brehmer:2020vwc,Bothmann:2020ywa,Gao:2020zvv,Gao:2020vdv,Nachman:2020lpy,Choi:2020bnf,Lu:2020npg,Bieringer:2020tnw,Hollingsworth:2021sii,Winterhalder:2021ave,Krause:2021ilc,Hackett:2021idh,Menary:2021tjg,Hallin:2021wme,NEURIPS2020_a878dbeb,Vandegar:2020yvw,Jawahar:2021vyu,Bister:2021arb,Krause:2021wez,Butter:2021csz,Winterhalder:2021ngy,Butter:2022lkf,Verheyen:2022tov,Leigh:2022lpn,Chen:2022ytr,Albandea:2022fky,Krause:2022jna,Cresswell:2022tof,Dolan:2022ikg,Backes:2022vmn,Heimel:2022wyj,Albandea:2023wgd,Rousselot:2023pcj,Diefenbacher:2023vsw,Nicoli:2023qsl,R:2023dcr,Nachman:2023clf,Raine:2023fko}
\\\textit{Normalizing flows~\cite{pmlr-v37-rezende15} learn $p(x)$ explicitly by starting with a simple probability density and then applying a series of bijective transformations with tractable Jacobians.}
\item \textbf{Diffusion Models}~\cite{Mikuni:2022xry,Leigh:2023toe,Mikuni:2023dvk,Shmakov:2023kjj,Buhmann:2023bwk,Butter:2023fov}
\item \textbf{Diffusion Models}~\cite{Mikuni:2022xry,Leigh:2023toe,Mikuni:2023dvk,Shmakov:2023kjj,Buhmann:2023bwk,Butter:2023fov,Acosta:2023zik}
\\\textit{These approaches learn the gradient of the density instead of the density directly.}
\item \textbf{Transformer Models}~\cite{Finke:2023veq,Butter:2023fov,Raine:2023fko}
\\\textit{These approaches learn the density or perform generative modeling using transformer-based networks.}
\item \textbf{Physics-inspired}~\cite{Andreassen:2018apy,Andreassen:2019txo,1808876,Lai:2020byl,Barenboim:2021vzh}
\\\textit{A variety of methods have been proposed to use machine learning tools (e.g. neural networks) combined with physical components.}
\item \textbf{Mixture Models}~\cite{Chen:2020uds,Burton:2021tsd,Graziani:2021vai,Liu:2022dem}
\item \textbf{Mixture Models}~\cite{Chen:2020uds,Burton:2021tsd,Graziani:2021vai,Liu:2022dem,Heinrich:2023bmt}
\\\textit{A mixture model is a superposition of simple probability densities. For example, a Gaussian mixture model is a sum of normal probability densities. Mixture density networks are mixture models where the coefficients in front of the constituent densities as well as the density parameters (e.g. mean and variances of Gaussians) are parameterized by neural networks.}
\item \textbf{Phase space generation}~\cite{Bendavid:2017zhk,Bothmann:2020ywa,Gao:2020zvv,Gao:2020vdv,Klimek:2018mza,Carrazza:2020rdn,Nachman:2020fff,Chen:2020nfb,Verheyen:2020bjw,Backes:2020vka,Danziger:2021eeg,Yoon:2020zmb,Maitre:2022xle,Jinno:2022sbr,Heimel:2022wyj,Renteria-Estrada:2023buo,Singh:2023yvj}
\\\textit{Monte Carlo event generators integrate over a phase space that needs to be generated efficiently and this can be aided by machine learning methods.}
Expand Down
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -562,6 +562,7 @@ This review was built with the help of the HEP-ML community, the [INSPIRE REST A
* [Evolutionary algorithms for hyperparameter optimization in machine learning for application in high energy physics](https://arxiv.org/abs/2011.04434) [[DOI](https://doi.org/10.1140/epjc/s10052-021-08950-y)]
* [Application of Deep Learning Technique to an Analysis of Hard Scattering Processes at Colliders](https://arxiv.org/abs/2109.08520)
* [Support vector machines and generalisation in HEP](https://arxiv.org/abs/1702.04686) [[DOI](https://doi.org/10.1088/1742-6596/898/7/072021)]
* [Principles for Initialization and Architecture Selection in Graph Neural Networks with ReLU Activations](https://arxiv.org/abs/2306.11668)

#### Weak/Semi supervision

Expand Down Expand Up @@ -634,6 +635,7 @@ This review was built with the help of the HEP-ML community, the [INSPIRE REST A
* [Fitting a Collider in a Quantum Computer: Tackling the Challenges of Quantum Machine Learning for Big Datasets](https://arxiv.org/abs/2211.03233)
* [Quantum-probabilistic Hamiltonian learning for generative modelling \& anomaly detection](https://arxiv.org/abs/2211.03803)
* [Generative Invertible Quantum Neural Networks](https://arxiv.org/abs/2302.12906)
* [Precise Image Generation on Current Noisy Quantum Computing Devices](https://arxiv.org/abs/2307.05253)

#### Feature ranking

Expand Down Expand Up @@ -713,6 +715,7 @@ This review was built with the help of the HEP-ML community, the [INSPIRE REST A
* [Charged Particle Tracking with Machine Learning on FPGAs](https://arxiv.org/abs/2212.02348)
* [Neural-network-based level-1 trigger upgrade for the SuperCDMS experiment at SNOLAB](https://arxiv.org/abs/2212.07864) [[DOI](https://doi.org/10.1088/1748-0221/18/06/P06012)]
* [Implementation of a framework for deploying AI inference engines in FPGAs](https://arxiv.org/abs/2305.19455)
* [Fast Neural Network Inference on FPGAs for Triggering on Long-Lived Particles at Colliders](https://arxiv.org/abs/2307.05152)

#### Deployment

Expand Down Expand Up @@ -1013,6 +1016,7 @@ This review was built with the help of the HEP-ML community, the [INSPIRE REST A
* [Ultra-High-Resolution Detector Simulation with Intra-Event Aware GAN and Self-Supervised Relational Reasoning](https://arxiv.org/abs/2303.08046)
* [New Angles on Fast Calorimeter Shower Simulation](https://arxiv.org/abs/2303.18150)
* [Fitting a Deep Generative Hadronization Model](https://arxiv.org/abs/2305.17169)
* [Machine Learning methods for simulating particle response in the Zero Degree Calorimeter at the ALICE experiment, CERN](https://arxiv.org/abs/2306.13606)

### Autoencoders

Expand All @@ -1037,6 +1041,7 @@ This review was built with the help of the HEP-ML community, the [INSPIRE REST A
* [CaloDVAE : Discrete Variational Autoencoders for Fast Calorimeter Shower Simulation](https://arxiv.org/abs/2210.07430)
* [CaloMan: Fast generation of calorimeter showers with density estimation on learned manifolds](https://arxiv.org/abs/2211.15380)
* [Nanosecond anomaly detection with decision trees for high energy physics and real-time application to exotic Higgs decays](https://arxiv.org/abs/2304.03836)
* [Triggering Dark Showers with Conditional Dual Auto-Encoders](https://arxiv.org/abs/2306.12955)

### Normalizing flows

Expand Down Expand Up @@ -1090,6 +1095,7 @@ This review was built with the help of the HEP-ML community, the [INSPIRE REST A
* [End-To-End Latent Variational Diffusion Models for Inverse Problems in High Energy Physics](https://arxiv.org/abs/2305.10399)
* [CaloClouds: Fast Geometry-Independent Highly-Granular Calorimeter Simulation](https://arxiv.org/abs/2305.04847)
* [Jet Diffusion versus JetGPT -- Modern Networks for the LHC](https://arxiv.org/abs/2305.10475)
* [Comparison of Point Cloud and Image-based Models for Calorimeter Fast Simulation](https://arxiv.org/abs/2307.04780)

### Transformer Models

Expand All @@ -1111,6 +1117,7 @@ This review was built with the help of the HEP-ML community, the [INSPIRE REST A
* [Mixture Density Network Estimation of Continuous Variable Maximum Likelihood Using Discrete Training Samples](https://arxiv.org/abs/2103.13416)
* [A Neural-Network-defined Gaussian Mixture Model for particle identification applied to the LHCb fixed-target programme](https://arxiv.org/abs/2110.10259)
* [Geometry-aware Autoregressive Models for Calorimeter Shower Simulations](https://arxiv.org/abs/2212.08233)
* [Hierarchical Neural Simulation-Based Inference Over Event Ensembles](https://arxiv.org/abs/2306.12584)

### Phase space generation

Expand Down
Loading

0 comments on commit 4c395dc

Please sign in to comment.