From 982bec04463f933789ae5e655205bcd5ac93aa5d Mon Sep 17 00:00:00 2001 From: lc3267 Date: Mon, 14 Oct 2024 13:50:28 +0000 Subject: [PATCH 1/3] folder structure Signed-off-by: lc3267 --- README.md | 30 +- examples/build_qubo.py | 27 +- examples/sample_qubo_neal.py | 23 +- examples/sample_qubo_qbsolv.py | 33 +- notebooks/simple-run-example.ipynb | 90 ++--- {src/hepqpr/qallse => qallse}/__init__.py | 0 {src => qallse/cli}/__init__.py | 0 .../qallse/seeding => qallse/cli}/__main__.py | 4 +- qallse/cli/entrypoints.py | 320 ++++++++++++++++++ {src/hepqpr/qallse => qallse}/cli/func.py | 68 ++-- {src/hepqpr/qallse => qallse}/cli/utils.py | 24 +- .../qallse => qallse}/data_structures.py | 33 +- {src/hepqpr/qallse => qallse}/data_wrapper.py | 0 .../qallse => qallse}/dsmaker/__init__.py | 0 .../dsmaker/data/event000001000-cells.csv | 0 .../dsmaker/data/event000001000-hits.csv | 0 .../dsmaker/data/event000001000-particles.csv | 0 .../dsmaker/data/event000001000-truth.csv | 0 .../qallse => qallse}/dsmaker/dsmaker.py | 223 +++++++----- {src/hepqpr/qallse => qallse}/dumper.py | 57 ++-- .../qallse/cli => qallse/other}/__init__.py | 0 .../other/dw_timing_recorder.py | 0 qallse/other/filter_input_doublets.py | 47 +++ .../qallse => qallse}/other/parse_qbsolv.py | 0 .../other/stdout_redirect.py | 0 {src/hepqpr/qallse => qallse}/plotting.py | 0 {src/hepqpr/qallse => qallse}/qallse.py | 0 {src/hepqpr/qallse => qallse}/qallse_base.py | 0 {src/hepqpr/qallse => qallse}/qallse_d0.py | 0 {src/hepqpr/qallse => qallse}/qallse_mp.py | 0 .../qallse => qallse}/seeding/__init__.py | 0 qallse/seeding/__main__.py | 8 + .../qallse => qallse}/seeding/config.py | 0 .../seeding/doublet_making.py | 0 {src/hepqpr/qallse => qallse}/seeding/main.py | 54 +-- .../qallse => qallse}/seeding/storage.py | 0 .../qallse => qallse}/seeding/topology.py | 0 .../hepqpr/qallse => qallse}/seeding/utils.py | 0 .../qallse => qallse}/track_recreater.py | 55 ++- {src/hepqpr/qallse => qallse}/type_alias.py | 0 {src/hepqpr/qallse => qallse}/utils.py | 0 scripts/1-recreate_datasets.py | 53 +-- scripts/2-build_qubos.py | 39 ++- scripts/3-solve_qubos.py | 95 ++++-- src/hepqpr/__init__.py | 1 - src/hepqpr/qallse/cli/__main__.py | 7 - src/hepqpr/qallse/cli/entrypoints.py | 240 ------------- src/hepqpr/qallse/other/__init__.py | 0 .../qallse/other/filter_input_doublets.py | 35 -- src/setup.py | 53 --- 50 files changed, 933 insertions(+), 686 deletions(-) rename {src/hepqpr/qallse => qallse}/__init__.py (100%) rename {src => qallse/cli}/__init__.py (100%) rename {src/hepqpr/qallse/seeding => qallse/cli}/__main__.py (56%) create mode 100644 qallse/cli/entrypoints.py rename {src/hepqpr/qallse => qallse}/cli/func.py (65%) rename {src/hepqpr/qallse => qallse}/cli/utils.py (53%) rename {src/hepqpr/qallse => qallse}/data_structures.py (91%) rename {src/hepqpr/qallse => qallse}/data_wrapper.py (100%) rename {src/hepqpr/qallse => qallse}/dsmaker/__init__.py (100%) rename {src/hepqpr/qallse => qallse}/dsmaker/data/event000001000-cells.csv (100%) rename {src/hepqpr/qallse => qallse}/dsmaker/data/event000001000-hits.csv (100%) rename {src/hepqpr/qallse => qallse}/dsmaker/data/event000001000-particles.csv (100%) rename {src/hepqpr/qallse => qallse}/dsmaker/data/event000001000-truth.csv (100%) rename {src/hepqpr/qallse => qallse}/dsmaker/dsmaker.py (50%) rename {src/hepqpr/qallse => qallse}/dumper.py (80%) rename {src/hepqpr/qallse/cli => qallse/other}/__init__.py (100%) rename {src/hepqpr/qallse => qallse}/other/dw_timing_recorder.py (100%) create mode 100644 qallse/other/filter_input_doublets.py rename {src/hepqpr/qallse => qallse}/other/parse_qbsolv.py (100%) rename {src/hepqpr/qallse => qallse}/other/stdout_redirect.py (100%) rename {src/hepqpr/qallse => qallse}/plotting.py (100%) rename {src/hepqpr/qallse => qallse}/qallse.py (100%) rename {src/hepqpr/qallse => qallse}/qallse_base.py (100%) rename {src/hepqpr/qallse => qallse}/qallse_d0.py (100%) rename {src/hepqpr/qallse => qallse}/qallse_mp.py (100%) rename {src/hepqpr/qallse => qallse}/seeding/__init__.py (100%) create mode 100644 qallse/seeding/__main__.py rename {src/hepqpr/qallse => qallse}/seeding/config.py (100%) rename {src/hepqpr/qallse => qallse}/seeding/doublet_making.py (100%) rename {src/hepqpr/qallse => qallse}/seeding/main.py (51%) rename {src/hepqpr/qallse => qallse}/seeding/storage.py (100%) rename {src/hepqpr/qallse => qallse}/seeding/topology.py (100%) rename {src/hepqpr/qallse => qallse}/seeding/utils.py (100%) rename {src/hepqpr/qallse => qallse}/track_recreater.py (80%) rename {src/hepqpr/qallse => qallse}/type_alias.py (100%) rename {src/hepqpr/qallse => qallse}/utils.py (100%) delete mode 100644 src/hepqpr/__init__.py delete mode 100644 src/hepqpr/qallse/cli/__main__.py delete mode 100644 src/hepqpr/qallse/cli/entrypoints.py delete mode 100644 src/hepqpr/qallse/other/__init__.py delete mode 100644 src/hepqpr/qallse/other/filter_input_doublets.py delete mode 100644 src/setup.py diff --git a/README.md b/README.md index ec0107e..ff37e3b 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ $$ $$$$$$$$$$$$$$$$$$$$$$$ $$ $$$ $$$$$ $$$ \__> \/ \/ \/ ``` -# HEPQPR.Qallse +# qallse The [HEPQPR](https://sites.google.com/lbl.gov/hep-qpr).Qallse project encodes the HEP (ATLAS) pattern recognition problem into a QUBO and solves it using a D-Wave or other classical QUBO libraries (qbsolv, neal). @@ -67,7 +67,7 @@ I am available for any question (email or Github issue is fine) and would be gla ### Current models -Different versions of the model building (i.e. QUBO generation) exist. They are organised into a class hierarchy starting at the abstract class `hepqpr.qallse.QallseBase`: +Different versions of the model building (i.e. QUBO generation) exist. They are organised into a class hierarchy starting at the abstract class `qallse.QallseBase`: * `.qallse.Qallse`: basic implementation, using constant bias weights. * `.qallse_mp.QallseMp`: adds a filtering step during triplets generation, which greatly limits the size of the QUBO; @@ -151,18 +151,18 @@ Dataset written in /tmp/mini/ds05/event000001000* (seed=376778465, num. tracks=4 # build the model > qallse -i /tmp/mini/ds05/event000001000-hits.csv -o /tmp/mini build INPUT -- precision (%): 0.8610, recall (%): 99.5885, missing: 1 -2019-01-29T09:54:05.691 [hepqpr.qallse.qallse_d0 INFO ] created 15341 doublets. -2019-01-29T09:54:06.995 [hepqpr.qallse.qallse_d0 INFO ] created 3160 triplets. -2019-01-29T09:54:07.022 [hepqpr.qallse.qallse_d0 INFO ] created 686 quadruplets. -2019-01-29T09:54:07.022 [hepqpr.qallse.qallse_d0 INFO ] Model built in 3.12s. doublets: 15341/0, triplets: 3160/0, quadruplets: 686 -2019-01-29T09:54:07.030 [hepqpr.qallse.qallse_d0 INFO ] MaxPath done in 0.02s. doublets: 544, triplets: 628, quadruplets: 638 (dropped 48) -2019-01-29T09:54:07.073 [hepqpr.qallse.qallse_d0 INFO ] Qubo generated in 0.07s. Size: 2877. Vars: 628, excl. couplers: 1611, incl. couplers: 638 +2019-01-29T09:54:05.691 [qallse.qallse_d0 INFO ] created 15341 doublets. +2019-01-29T09:54:06.995 [qallse.qallse_d0 INFO ] created 3160 triplets. +2019-01-29T09:54:07.022 [qallse.qallse_d0 INFO ] created 686 quadruplets. +2019-01-29T09:54:07.022 [qallse.qallse_d0 INFO ] Model built in 3.12s. doublets: 15341/0, triplets: 3160/0, quadruplets: 686 +2019-01-29T09:54:07.030 [qallse.qallse_d0 INFO ] MaxPath done in 0.02s. doublets: 544, triplets: 628, quadruplets: 638 (dropped 48) +2019-01-29T09:54:07.073 [qallse.qallse_d0 INFO ] Qubo generated in 0.07s. Size: 2877. Vars: 628, excl. couplers: 1611, incl. couplers: 638 Wrote qubo to /tmp/mini/qubo.pickle # solve using neal > qallse -i /tmp/mini/ds05/event000001000-hits.csv -o /tmp/mini neal -2019-01-29T09:56:51.207 [hepqpr.qallse.cli.func INFO ] QUBO of size 2877 sampled in 0.14s (NEAL, seed=1615186406). -2019-01-29T09:56:51.619 [hepqpr.qallse.track_recreater INFO ] Found 0 conflicting doublets +2019-01-29T09:56:51.207 [qallse.cli.func INFO ] QUBO of size 2877 sampled in 0.14s (NEAL, seed=1615186406). +2019-01-29T09:56:51.619 [qallse.track_recreater INFO ] Found 0 conflicting doublets SAMPLE -- energy: -165.7110, ideal: -163.1879 (diff: -2.523028) best sample occurrence: 1/10 SCORE -- precision (%): 99.1769547325103, recall (%): 99.1769547325103, missing: 2 @@ -203,7 +203,7 @@ qallse -i /tmp/mini/ds05/event000001000-hits.csv -o /tmp/mini qbsolv \ The `examples` directory contains some examples on how to do everything from scripts instead of using the commandline. -Other very useful functions are available in `hepqpr.qallse.cli.func` and pretty self-explanatory. +Other very useful functions are available in `qallse.cli.func` and pretty self-explanatory. ### Running from an IPython notebook @@ -229,7 +229,7 @@ See the notebook example for more information. ### The plotting module -You can use `hepqpr.qallse.plotting` for plotting doublets and tracks easily. +You can use `qallse.plotting` for plotting doublets and tracks easily. __Jupyter__: if you are running in a notebook, you need to tell the module so by calling `set_notebook_mode()`. @@ -238,9 +238,9 @@ The methods take a `DataWrapper` and a list of xplets (an xplet is here a list o Typical usage: ```python -from hepqpr.qallse import DataWrapper -from hepqpr.qallse.cli.func import process_response -from hepqpr.qallse.plotting import * +from qallse import DataWrapper +from qallse.cli.func import process_response +from qallse.plotting import * set_notebook_mode() # if running inside a notebook diff --git a/examples/build_qubo.py b/examples/build_qubo.py index 7681388..fe94537 100644 --- a/examples/build_qubo.py +++ b/examples/build_qubo.py @@ -14,24 +14,28 @@ import sys import logging -from hepqpr.qallse import * -from hepqpr.qallse import dumper +from qallse import * +from qallse import dumper # ==== BUILD CONFIG loglevel = logging.DEBUG -input_path = '/tmp/ez-0.1_hpt-1.0/event000001000-hits.csv' # TODO change it ! -output_path = '/tmp' # TODO change it +input_path = "/tmp/ez-0.1_hpt-1.0/event000001000-hits.csv" # TODO change it ! +output_path = "/tmp" # TODO change it model_class = QallseD0 # model class to use extra_config = dict() # model config dump_config = dict( - output_path='/tmp', - prefix='', - xplets_kwargs=dict(format='json', indent=3), # use json (vs "pickle") and indent the output - qubo_kwargs=dict(w_marker=None, c_marker=None) # save the real coefficients VS generic placeholders + output_path="/tmp", + prefix="", + xplets_kwargs=dict( + format="json", indent=3 + ), # use json (vs "pickle") and indent the output + qubo_kwargs=dict( + w_marker=None, c_marker=None + ), # save the real coefficients VS generic placeholders ) # ==== configure logging @@ -39,15 +43,16 @@ logging.basicConfig( stream=sys.stderr, format="%(asctime)s.%(msecs)03d [%(name)-15s %(levelname)-5s] %(message)s", - datefmt='%Y-%m-%dT%H:%M:%S') + datefmt="%Y-%m-%dT%H:%M:%S", +) -logging.getLogger('hepqpr').setLevel(loglevel) +logging.getLogger("hepqpr").setLevel(loglevel) # ==== build model # load data dw = DataWrapper.from_path(input_path) -doublets = pd.read_csv(input_path.replace('-hits.csv', '-doublets.csv')) +doublets = pd.read_csv(input_path.replace("-hits.csv", "-doublets.csv")) # build model model = model_class(dw, **extra_config) diff --git a/examples/sample_qubo_neal.py b/examples/sample_qubo_neal.py index d5c8a22..b27190f 100644 --- a/examples/sample_qubo_neal.py +++ b/examples/sample_qubo_neal.py @@ -13,15 +13,15 @@ import pickle from os.path import join as path_join -from hepqpr.qallse import * +from qallse import * from neal import SimulatedAnnealingSampler # ==== RUN CONFIG loglevel = logging.DEBUG -input_path = '/tmp/ez-0.1_hpt-1.0/event000001000-hits.csv' # TODO change it ! -qubo_path = '/tmp' # TODO change it +input_path = "/tmp/ez-0.1_hpt-1.0/event000001000-hits.csv" # TODO change it ! +qubo_path = "/tmp" # TODO change it sampler = SimulatedAnnealingSampler() @@ -30,15 +30,16 @@ logging.basicConfig( stream=sys.stderr, format="%(asctime)s.%(msecs)03d [%(name)-15s %(levelname)-5s] %(message)s", - datefmt='%Y-%m-%dT%H:%M:%S') + datefmt="%Y-%m-%dT%H:%M:%S", +) -logging.getLogger('hepqpr').setLevel(loglevel) +logging.getLogger("hepqpr").setLevel(loglevel) # ==== build model # load data dw = DataWrapper.from_path(input_path) -with open(path_join(qubo_path, 'qubo.pickle'), 'rb') as f: +with open(path_join(qubo_path, "qubo.pickle"), "rb") as f: Q = pickle.load(f) # sample qubo @@ -57,8 +58,10 @@ trackml_score = dw.compute_trackml_score(final_tracks) # print stats -print(f'SAMPLE -- energy: {en:.4f}, ideal: {en0:.4f} (diff: {en-en0:.6f})') -print(f' best sample occurrence: {occs[0]}/{occs.sum()}') +print(f"SAMPLE -- energy: {en:.4f}, ideal: {en0:.4f} (diff: {en-en0:.6f})") +print(f" best sample occurrence: {occs[0]}/{occs.sum()}") -print(f'SCORE -- precision (%): {p * 100}, recall (%): {r * 100}, missing: {len(ms)}') -print(f' tracks found: {len(final_tracks)}, trackml score (%): {trackml_score * 100}') +print(f"SCORE -- precision (%): {p * 100}, recall (%): {r * 100}, missing: {len(ms)}") +print( + f" tracks found: {len(final_tracks)}, trackml score (%): {trackml_score * 100}" +) diff --git a/examples/sample_qubo_qbsolv.py b/examples/sample_qubo_qbsolv.py index 4202138..7760a53 100644 --- a/examples/sample_qubo_qbsolv.py +++ b/examples/sample_qubo_qbsolv.py @@ -8,16 +8,18 @@ """ -raise NotImplementedError("Dwave's QBsolv is deprecated as of 2022. " \ - "The hybrid Dwave solver requires refactoring.") +raise NotImplementedError( + "Dwave's QBsolv is deprecated as of 2022. " + "The hybrid Dwave solver requires refactoring." +) import sys import logging import pickle from os.path import join as path_join -from hepqpr.qallse import * -from hepqpr.qallse.other.stdout_redirect import capture_stdout +from qallse import * +from qallse.other.stdout_redirect import capture_stdout from dwave_qbsolv import QBSolv @@ -26,9 +28,9 @@ loglevel = logging.DEBUG -input_path = '/tmp/ez-0.1_hpt-1.0/event000001000-hits.csv' # TODO change it ! -qubo_path = '/tmp' # TODO change it -logfile = '/tmp/qbsolv.log' # try to run parse_qbsolv -i /tmp/qbsolv.log afterwards :) +input_path = "/tmp/ez-0.1_hpt-1.0/event000001000-hits.csv" # TODO change it ! +qubo_path = "/tmp" # TODO change it +logfile = "/tmp/qbsolv.log" # try to run parse_qbsolv -i /tmp/qbsolv.log afterwards :) sampler = QBSolv() sampler_args = dict( @@ -42,15 +44,16 @@ logging.basicConfig( stream=sys.stderr, format="%(asctime)s.%(msecs)03d [%(name)-15s %(levelname)-5s] %(message)s", - datefmt='%Y-%m-%dT%H:%M:%S') + datefmt="%Y-%m-%dT%H:%M:%S", +) -logging.getLogger('hepqpr').setLevel(loglevel) +logging.getLogger("hepqpr").setLevel(loglevel) # ==== build model # load data dw = DataWrapper.from_path(input_path) -with open(path_join(qubo_path, 'qubo.pickle'), 'rb') as f: +with open(path_join(qubo_path, "qubo.pickle"), "rb") as f: Q = pickle.load(f) # sample qubo @@ -70,8 +73,10 @@ trackml_score = dw.compute_trackml_score(final_tracks) # print stats -print(f'SAMPLE -- energy: {en:.4f}, ideal: {en0:.4f} (diff: {en-en0:.6f})') -print(f' best sample occurrence: {occs[0]}/{occs.sum()}') +print(f"SAMPLE -- energy: {en:.4f}, ideal: {en0:.4f} (diff: {en-en0:.6f})") +print(f" best sample occurrence: {occs[0]}/{occs.sum()}") -print(f'SCORE -- precision (%): {p * 100}, recall (%): {r * 100}, missing: {len(ms)}') -print(f' tracks found: {len(final_tracks)}, trackml score (%): {trackml_score * 100}') +print(f"SCORE -- precision (%): {p * 100}, recall (%): {r * 100}, missing: {len(ms)}") +print( + f" tracks found: {len(final_tracks)}, trackml score (%): {trackml_score * 100}" +) diff --git a/notebooks/simple-run-example.ipynb b/notebooks/simple-run-example.ipynb index ad4c06a..2d4729c 100644 --- a/notebooks/simple-run-example.ipynb +++ b/notebooks/simple-run-example.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This notebook runs a `Qallse` model in simulation mode and shows how to simply visualize the results using utilities defined in module `hepqpr.qallse.plotting`." + "This notebook runs a `Qallse` model in simulation mode and shows how to simply visualize the results using utilities defined in module `qallse.plotting`." ] }, { @@ -34,9 +34,9 @@ "import pandas as pd\n", "import numpy as np\n", "\n", - "from hepqpr.qallse.plotting import *\n", - "from hepqpr.qallse import *\n", - "from hepqpr.qallse.dsmaker import create_dataset\n", + "from qallse.plotting import *\n", + "from qallse import *\n", + "from qallse.dsmaker import create_dataset\n", "\n", "# initialise the plotting module in \"notebook\" mode\n", "set_notebook_mode()\n", @@ -102,17 +102,17 @@ "name": "stderr", "output_type": "stream", "text": [ - "17:46:22.850 DEBUG hepqpr.qallse.dsmaker.dsmaker: Loaded 120939 hits from /Users/lin/git/quantum-annealing-project/project-2/src/hepqpr/qallse/dsmaker/data/event000001000.\n", - "17:46:22.873 DEBUG hepqpr.qallse.dsmaker.dsmaker: Filtered hits from barrel. Remaining hits: 65518.\n", - "17:46:22.914 DEBUG hepqpr.qallse.dsmaker.dsmaker: Dropped double hits. Remaining hits: 55446.\n", - "17:46:22.935 DEBUG hepqpr.qallse.dsmaker.dsmaker: High Pt hits: 32302/524\n", - "17:46:22.967 DEBUG hepqpr.qallse.dsmaker.dsmaker: num_hits=524\n", - "17:46:22.967 DEBUG hepqpr.qallse.dsmaker.dsmaker: num_tracks=81\n", - "17:46:22.968 DEBUG hepqpr.qallse.dsmaker.dsmaker: num_important_tracks=4\n", - "17:46:22.968 DEBUG hepqpr.qallse.dsmaker.dsmaker: num_noise=130\n", - "17:46:22.969 DEBUG hepqpr.qallse.dsmaker.dsmaker: random_seed=1547862382\n", - "17:46:22.969 DEBUG hepqpr.qallse.dsmaker.dsmaker: time=2019-01-18T17:46:22.967102\n", - "17:46:23.219 INFO hepqpr.qallse.dsmaker.dsmaker: Doublets (len=1352) generated in f/var/folders/qp/grrqv98s1c37f8bxtkblfpcr0000gn/T/tmpra3o223v/ez-0.01_hpt-1.0/event000001000.\n" + "17:46:22.850 DEBUG qallse.dsmaker.dsmaker: Loaded 120939 hits from /Users/lin/git/quantum-annealing-project/project-2/src/hepqpr/qallse/dsmaker/data/event000001000.\n", + "17:46:22.873 DEBUG qallse.dsmaker.dsmaker: Filtered hits from barrel. Remaining hits: 65518.\n", + "17:46:22.914 DEBUG qallse.dsmaker.dsmaker: Dropped double hits. Remaining hits: 55446.\n", + "17:46:22.935 DEBUG qallse.dsmaker.dsmaker: High Pt hits: 32302/524\n", + "17:46:22.967 DEBUG qallse.dsmaker.dsmaker: num_hits=524\n", + "17:46:22.967 DEBUG qallse.dsmaker.dsmaker: num_tracks=81\n", + "17:46:22.968 DEBUG qallse.dsmaker.dsmaker: num_important_tracks=4\n", + "17:46:22.968 DEBUG qallse.dsmaker.dsmaker: num_noise=130\n", + "17:46:22.969 DEBUG qallse.dsmaker.dsmaker: random_seed=1547862382\n", + "17:46:22.969 DEBUG qallse.dsmaker.dsmaker: time=2019-01-18T17:46:22.967102\n", + "17:46:23.219 INFO qallse.dsmaker.dsmaker: Doublets (len=1352) generated in f/var/folders/qp/grrqv98s1c37f8bxtkblfpcr0000gn/T/tmpra3o223v/ez-0.01_hpt-1.0/event000001000.\n" ] } ], @@ -217,34 +217,34 @@ "name": "stderr", "output_type": "stream", "text": [ - "17:46:33.161 DEBUG hepqpr.qallse.qallse_d0: using config:\n", - "17:46:33.162 DEBUG hepqpr.qallse.qallse_d0: beamspot_center: (0, 0, 0)\n", - "17:46:33.163 DEBUG hepqpr.qallse.qallse_d0: beamspot_width: 27.5\n", - "17:46:33.163 DEBUG hepqpr.qallse.qallse_d0: cheat: False\n", - "17:46:33.164 DEBUG hepqpr.qallse.qallse_d0: d0_denom: 3.0\n", - "17:46:33.165 DEBUG hepqpr.qallse.qallse_d0: d0_factor: 0.5\n", - "17:46:33.165 DEBUG hepqpr.qallse.qallse_d0: max_layer_span: 2\n", - "17:46:33.166 DEBUG hepqpr.qallse.qallse_d0: min_qplet_path: 2\n", - "17:46:33.166 DEBUG hepqpr.qallse.qallse_d0: num_multiplier: -1\n", - "17:46:33.167 DEBUG hepqpr.qallse.qallse_d0: qplet_max_dcurv: 0.0001\n", - "17:46:33.167 DEBUG hepqpr.qallse.qallse_d0: qplet_max_strength: -0.2\n", - "17:46:33.168 DEBUG hepqpr.qallse.qallse_d0: qubo_bias_weight: 0\n", - "17:46:33.168 DEBUG hepqpr.qallse.qallse_d0: qubo_conflict_strength: 1\n", - "17:46:33.169 DEBUG hepqpr.qallse.qallse_d0: rz_power: 1\n", - "17:46:33.169 DEBUG hepqpr.qallse.qallse_d0: strength_bounds: None\n", - "17:46:33.169 DEBUG hepqpr.qallse.qallse_d0: tplet_max_curv: 0.0008\n", - "17:46:33.170 DEBUG hepqpr.qallse.qallse_d0: tplet_max_drz: 0.1\n", - "17:46:33.170 DEBUG hepqpr.qallse.qallse_d0: volayer_power: 2\n", - "17:46:33.171 DEBUG hepqpr.qallse.qallse_d0: xy_power: 1\n", - "17:46:33.171 DEBUG hepqpr.qallse.qallse_d0: xy_relative_strength: 0.5\n", - "17:46:33.172 DEBUG hepqpr.qallse.qallse_d0: z0_denom: 1.0\n", - "17:46:33.172 DEBUG hepqpr.qallse.qallse_d0: z0_factor: 0.1\n", - "17:46:33.238 INFO hepqpr.qallse.qallse_d0: created 810 doublets.\n", - "17:46:33.263 INFO hepqpr.qallse.qallse_d0: created 267 triplets.\n", - "17:46:33.268 INFO hepqpr.qallse.qallse_d0: created 201 quadruplets.\n", - "17:46:33.268 INFO hepqpr.qallse.qallse_d0: Model built in 0.08s. doublets: 0, triplets: 0, quadruplets: 201\n", - "17:46:33.272 INFO hepqpr.qallse.qallse_d0: MaxPath done in 0.01s. doublets: 159, triplets: 187, quadruplets: 192 (dropped 9)\n", - "17:46:33.292 INFO hepqpr.qallse.qallse_d0: Qubo generated in 0.03s. Size: 852. Vars: 187, excl. couplers: 473, incl. couplers: 192\n" + "17:46:33.161 DEBUG qallse.qallse_d0: using config:\n", + "17:46:33.162 DEBUG qallse.qallse_d0: beamspot_center: (0, 0, 0)\n", + "17:46:33.163 DEBUG qallse.qallse_d0: beamspot_width: 27.5\n", + "17:46:33.163 DEBUG qallse.qallse_d0: cheat: False\n", + "17:46:33.164 DEBUG qallse.qallse_d0: d0_denom: 3.0\n", + "17:46:33.165 DEBUG qallse.qallse_d0: d0_factor: 0.5\n", + "17:46:33.165 DEBUG qallse.qallse_d0: max_layer_span: 2\n", + "17:46:33.166 DEBUG qallse.qallse_d0: min_qplet_path: 2\n", + "17:46:33.166 DEBUG qallse.qallse_d0: num_multiplier: -1\n", + "17:46:33.167 DEBUG qallse.qallse_d0: qplet_max_dcurv: 0.0001\n", + "17:46:33.167 DEBUG qallse.qallse_d0: qplet_max_strength: -0.2\n", + "17:46:33.168 DEBUG qallse.qallse_d0: qubo_bias_weight: 0\n", + "17:46:33.168 DEBUG qallse.qallse_d0: qubo_conflict_strength: 1\n", + "17:46:33.169 DEBUG qallse.qallse_d0: rz_power: 1\n", + "17:46:33.169 DEBUG qallse.qallse_d0: strength_bounds: None\n", + "17:46:33.169 DEBUG qallse.qallse_d0: tplet_max_curv: 0.0008\n", + "17:46:33.170 DEBUG qallse.qallse_d0: tplet_max_drz: 0.1\n", + "17:46:33.170 DEBUG qallse.qallse_d0: volayer_power: 2\n", + "17:46:33.171 DEBUG qallse.qallse_d0: xy_power: 1\n", + "17:46:33.171 DEBUG qallse.qallse_d0: xy_relative_strength: 0.5\n", + "17:46:33.172 DEBUG qallse.qallse_d0: z0_denom: 1.0\n", + "17:46:33.172 DEBUG qallse.qallse_d0: z0_factor: 0.1\n", + "17:46:33.238 INFO qallse.qallse_d0: created 810 doublets.\n", + "17:46:33.263 INFO qallse.qallse_d0: created 267 triplets.\n", + "17:46:33.268 INFO qallse.qallse_d0: created 201 quadruplets.\n", + "17:46:33.268 INFO qallse.qallse_d0: Model built in 0.08s. doublets: 0, triplets: 0, quadruplets: 201\n", + "17:46:33.272 INFO qallse.qallse_d0: MaxPath done in 0.01s. doublets: 159, triplets: 187, quadruplets: 192 (dropped 9)\n", + "17:46:33.292 INFO qallse.qallse_d0: Qubo generated in 0.03s. Size: 852. Vars: 187, excl. couplers: 473, incl. couplers: 192\n" ] }, { @@ -284,7 +284,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "17:46:38.267 INFO hepqpr.qallse.qallse_d0: QUBO of size 852 sampled in 0.76s (seed 2025012979).\n" + "17:46:38.267 INFO qallse.qallse_d0: QUBO of size 852 sampled in 0.76s (seed 2025012979).\n" ] }, { @@ -318,7 +318,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "17:46:40.892 INFO hepqpr.qallse.track_recreater: Found 0 conflicting doublets\n" + "17:46:40.892 INFO qallse.track_recreater: Found 0 conflicting doublets\n" ] } ], diff --git a/src/hepqpr/qallse/__init__.py b/qallse/__init__.py similarity index 100% rename from src/hepqpr/qallse/__init__.py rename to qallse/__init__.py diff --git a/src/__init__.py b/qallse/cli/__init__.py similarity index 100% rename from src/__init__.py rename to qallse/cli/__init__.py diff --git a/src/hepqpr/qallse/seeding/__main__.py b/qallse/cli/__main__.py similarity index 56% rename from src/hepqpr/qallse/seeding/__main__.py rename to qallse/cli/__main__.py index 9bc0064..6dd4ea0 100644 --- a/src/hepqpr/qallse/seeding/__main__.py +++ b/qallse/cli/__main__.py @@ -1,6 +1,8 @@ def main(): - from hepqpr.qallse.seeding.main import cli + from qallse.cli.entrypoints import cli + cli() + if __name__ == "__main__": main() diff --git a/qallse/cli/entrypoints.py b/qallse/cli/entrypoints.py new file mode 100644 index 0000000..8b57cb4 --- /dev/null +++ b/qallse/cli/entrypoints.py @@ -0,0 +1,320 @@ +import sys + +import click +import pickle +from os import path as op + +from qallse.cli.func import * +from qallse.cli.utils import * + + +class GlobalOptions: + def __init__(self, hits_path, opath=None, prefix=""): + self.hits_path = hits_path + self.output_path = opath + self.prefix = prefix + self._dw = None # lazy creation (not created with --help) + + @property + def path(self): + if self.hits_path is None: + # simulate the required=True click option, because if used directly, + # one cannot display a subcommand help without passing the hit path ... + click.echo("Error: Missing option '-i' / '--hits-path'.", err=True) + sys.exit(1) + return self.hits_path.replace("-hits.csv", "") + + @property + def dw(self): + if self._dw is None: + self._dw = DataWrapper.from_path(self.path) + return self._dw + + def get_output_path(self, filename): + return op.join(self.output_path, self.prefix + filename) + + +# ------ + + +@click.group(context_settings=dict(help_option_names=["-h", "--help"])) +@click.option("--debug/--no-debug", is_flag=True, default=False) +@click.option("-i", "--hits-path", help="[required] Path to the hits file.") +@click.option( + "-o", + "--output-path", + default=None, + metavar="directory", + help="Where to save the output files.", +) +@click.option( + "-p", + "--prefix", + default="", + metavar="text", + help="Prefix prepended to all output files.", +) +@click.pass_context +def cli(ctx, debug, hits_path, output_path, prefix): + """ + Solve the pattern recognition problem using QA. + + The is the path to a hit file generated using the + `create_dataset` method. The directory should also contain a truth file + and the initial doublets file (created either during `create_dataset` or + using the `run_seeding` script). + + Output files will be saved to the given , if any, using default names. + If set, the will be prepended to all output files. + """ + # configure logging + init_logging(logging.DEBUG if debug else logging.INFO) + + # load input data + ctx.obj = GlobalOptions(hits_path, output_path, prefix) + + +@cli.command("build") +@click.option( + "--add-missing", + is_flag=True, + default=False, + help="If set, ensure 100% input recall.", +) +@click.option( + "-c", "--cls", default="qallse_d0", metavar="module_name", help="Model to use." +) +@click.option( + "-e", + "--extra", + type=str, + multiple=True, + metavar="key=value", + help="Override default model configuration.", +) +@click.pass_obj +def cli_build(ctx, add_missing, cls, extra): + """ + Generate the QUBO. + + The QUBO and the xplets used by it are saved as pickle files in the current directory + (use --output-path and --prefix options to change it). + + will add any true missing doublet to the input, ensuring an input recall of 100%. + lets you choose which model to use: qallse_d0 (default), qallse, qallse_mp, etc. + are key=values corresponding to configuration options of the model, (e.g. -e qubo_conflict_strength=0.5). + """ + from qallse import dumper + + extra_config = extra_to_dict(extra) + ModelClass = qallse_class_from_string("." + cls) + model = ModelClass(ctx.dw, **extra_config) + + build_model(ctx.path, model, add_missing) + dumper.dump_model( + model, + ctx.output_path, + ctx.prefix, + qubo_kwargs=dict(w_marker=None, c_marker=None), + ) + print("Wrote qubo to", ctx.get_output_path("qubo.pickle")) + + +@cli.command("qbsolv") +@click.option( + "-q", "--qubo", default=None, metavar="filepath", help="Path a the pickled QUBO." +) +@click.option( + "-dw", + "--dwave-conf", + default=None, + type=str, + metavar="filepath", + help="Path to a dwave.conf. If set, use a D-Wave as the sub-QUBO solver.", +) +@click.option( + "-v", + "--verbosity", + type=click.IntRange(-1, 6), + default=-1, + metavar="int", + help="qbsolv verbosity.", +) +@click.option( + "-l", + "--logfile", + type=str, + default=None, + metavar="filepath", + help="Where to redirect the qbsolv output. Does only make sense for verbosity > 0.", +) +@click.option( + "-e", + "--extra", + type=str, + multiple=True, + metavar="key=", + help="Additional options to qbsolv. " + "Allowed keys: seed, num_repeats, (+If D-Wave: num_reads).", +) +@click.pass_obj +def cli_qbsolv(ctx, qubo, dwave_conf, verbosity, logfile, extra): + """ + Sample a QUBO using qbsolv (!slower!) and a D-Wave (optional). + + By default, this will run qbsolv (https://github.com/dwavesystems/qbsolv) + in simulation. To use a D-Wave, set the option to + a valid dwave configuration file (see https://cloud.dwavesys.com/leap/). + + is the path to the pickled qubo (default to /qubo.pickle). + and are passed to qbsolv. will redirect all qbsolv output to + a file (see also the parse_qbsolv script). + """ + try: + if qubo is None: + qubo = ctx.get_output_path("qubo.pickle") + with open(qubo, "rb") as f: + Q = pickle.load(f) + except: + print(f"Failed to load QUBO. Are you sure {qubo} is a pickled qubo file ?") + sys.exit(-1) + + qbsolv_kwargs = extra_to_dict(extra, typ=int) + qbsolv_kwargs["logfile"] = logfile + qbsolv_kwargs["verbosity"] = verbosity + + if dwave_conf is not None: + response = solve_dwave(Q, dwave_conf, **qbsolv_kwargs) + else: + response = solve_qbsolv(Q, **qbsolv_kwargs) + + print_stats(ctx.dw, response, Q) + if ctx.output_path is not None: + oname = ctx.get_output_path("qbsolv_response.pickle") + with open(oname, "wb") as f: + pickle.dump(response, f) + print(f"Wrote response to {oname}") + + +@cli.command("neal", help="Sample a QUBO using neal.") +@click.option( + "-q", + "--qubo", + default=None, + metavar="filepath", + help="Path to the pickled QUBO. Default to /qubo.pickle", +) +@click.option( + "-s", "--seed", default=None, type=int, metavar="int", help="Seed to use." +) +@click.pass_obj +def cli_neal(ctx, qubo, seed): + """ + Solve a QUBO using neal (!fast!) + + neal (https://github.com/dwavesystems/dwave-neal) is a simulated annealing sampler. + It is faster than qbsolv by two order of magnitude with similar (if not better) results. + """ + try: + if qubo is None: + qubo = ctx.get_output_path("qubo.pickle") + with open(qubo, "rb") as f: + Q = pickle.load(f) + except: + print(f"Failed to load QUBO. Are you sure {qubo} is a pickled qubo file ?") + sys.exit(-1) + + response = solve_neal(Q, seed=seed) + print_stats(ctx.dw, response, Q) + if ctx.output_path is not None: + oname = ctx.get_output_path("neal_response.pickle") + with open(oname, "wb") as f: + pickle.dump(response, f) + print(f"Wrote response to {oname}") + + +@cli.command( + "quickstart", + context_settings=dict(ignore_unknown_options=True, allow_extra_args=True), +) +@click.pass_context +def cli_quickstart(ctx): + """ + Run the whole algorithm (build+neal). + + This accepts the same options as the build command. If no is set, + a temporary directory is created for the time of the run and deleted on exit. + + Minimal example using a very small dataset: + + \b + create_dataset -n 0.01 -p mini + qallse -i mini/event000001000-hits.csv quickstart + + """ + + def _chain(): + ctx.forward(cli_build) + ctx.invoke(cli_neal) + + if ctx.obj.output_path is None: + import tempfile + + with tempfile.TemporaryDirectory() as tmpdir: + ctx.obj.output_path = tmpdir + _chain() + else: + _chain() + + +@cli.command("plot") +@click.option( + "-r", + "--response", + metavar="filepath", + required=True, + help="Path to the response file.", +) +@click.option( + "-d", + "--dims", + default="xy", + type=click.Choice(["xy", "zr", "zxy"]), + help="Dimensions of the plot.", +) +@click.option( + "-m", + "--mode", + default="d", + type=click.Choice(["d", "t", "dt"]), + help="Plot the doublets only (d), the triplets only (t), or both (dt).", +) +@click.pass_obj +def cli_plot(ctx, response, dims, mode): + """ + Plot the final doublets and final tracks. + + This uses (https://plot.ly) and the qallse.plotting module to + show the final tracks and doublets. + The plots are saved as html files either in or in the current directory. + + WARNING: don't try to plot results from large datasets, especially 3D plots !! + """ + from qallse.plotting import iplot_results, iplot_results_tracks + + dims = list(dims) + + with open(response, "rb") as f: + r = pickle.load(f) + final_doublets, final_tracks = process_response(r) + _, missings, _ = diff_rows(final_doublets, ctx.dw.get_real_doublets()) + + if ctx.output_path is None: + ctx.output_path = "." + dout = ctx.get_output_path("plot-doublets.html") + tout = ctx.get_output_path("plot-triplets.html") + + if "d" in mode: + iplot_results(ctx.dw, final_doublets, missings, dims=dims, filename=dout) + if "t" in mode: + iplot_results_tracks(ctx.dw, final_tracks, dims=dims, filename=tout) diff --git a/src/hepqpr/qallse/cli/func.py b/qallse/cli/func.py similarity index 65% rename from src/hepqpr/qallse/cli/func.py rename to qallse/cli/func.py index 25ee19f..7491ae2 100644 --- a/src/hepqpr/qallse/cli/func.py +++ b/qallse/cli/func.py @@ -1,19 +1,21 @@ import logging import sys -from hepqpr.qallse import * +from qallse import * logger = logging.getLogger(__name__) # ======= utils + def init_logging(level=logging.INFO, stream=sys.stderr): logging.basicConfig( stream=stream, - format='%(asctime)s.%(msecs)03d [%(name)-15s %(levelname)-5s] %(message)s', - datefmt='%Y-%m-%dT%H:%M:%S') - logging.getLogger('hepqpr').setLevel(level) + format="%(asctime)s.%(msecs)03d [%(name)-15s %(levelname)-5s] %(message)s", + datefmt="%Y-%m-%dT%H:%M:%S", + ) + logging.getLogger("hepqpr").setLevel(level) from contextlib import contextmanager @@ -35,16 +37,19 @@ def time_this(): # ======= model building + def build_model(path, model, add_missing): - doublets = pd.read_csv(path + '-doublets.csv') + doublets = pd.read_csv(path + "-doublets.csv") # prepare doublets if add_missing: - print('Cheat on, adding missing doublets.') + print("Cheat on, adding missing doublets.") doublets = model.dataw.add_missing_doublets(doublets) else: p, r, ms = model.dataw.compute_score(doublets) - print(f'INPUT -- precision (%): {p * 100:.4f}, recall (%): {r * 100:.4f}, missing: {len(ms)}') + print( + f"INPUT -- precision (%): {p * 100:.4f}, recall (%): {r * 100:.4f}, missing: {len(ms)}" + ) # build the qubo model.build_model(doublets=doublets) @@ -52,61 +57,78 @@ def build_model(path, model, add_missing): # ======= sampling + def solve_neal(Q, seed=None, **kwargs): from neal import SimulatedAnnealingSampler + # generate seed for logging purpose if seed is None: import random + seed = random.randint(0, 1 << 31) # run neal start_time = time.process_time() response = SimulatedAnnealingSampler().sample_qubo(Q, seed=seed, **kwargs) exec_time = time.process_time() - start_time - logger.info(f'QUBO of size {len(Q)} sampled in {exec_time:.2f}s (NEAL, seed={seed}).') + logger.info( + f"QUBO of size {len(Q)} sampled in {exec_time:.2f}s (NEAL, seed={seed})." + ) return response def solve_qbsolv(Q, logfile=None, seed=None, **kwargs): - from hepqpr.qallse.other.stdout_redirect import capture_stdout + from qallse.other.stdout_redirect import capture_stdout - raise NotImplementedError("Dwave's QBsolv is deprecated as of 2022. " \ - "The hybrid Dwave solver requires refactoring.") + raise NotImplementedError( + "Dwave's QBsolv is deprecated as of 2022. " + "The hybrid Dwave solver requires refactoring." + ) from dwave_qbsolv import QBSolv + # generate seed for logging purpose if seed is None: import random + seed = random.randint(0, 1 << 31) # run qbsolv - logger.debug('Running qbsolv with extra arguments: %s', kwargs) + logger.debug("Running qbsolv with extra arguments: %s", kwargs) start_time = time.process_time() if logfile is not None: logger.debug( - f'Writting qbsolv output to {logfile}. If you see an output in stdout, run "export PYTHONUNBUFFERED=1".') + f'Writting qbsolv output to {logfile}. If you see an output in stdout, run "export PYTHONUNBUFFERED=1".' + ) with capture_stdout(logfile): response = QBSolv().sample_qubo(Q, seed=seed, **kwargs) else: response = QBSolv().sample_qubo(Q, seed=seed, **kwargs) exec_time = time.process_time() - start_time - logger.info(f'QUBO of size {len(Q)} sampled in {exec_time:.2f}s (QBSOLV, seed={seed}).') + logger.info( + f"QUBO of size {len(Q)} sampled in {exec_time:.2f}s (QBSOLV, seed={seed})." + ) return response def solve_dwave(Q, conf_file, **kwargs): import urllib3 + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) from dwave.system import EmbeddingComposite from dwave.system import DWaveSampler + sampler = DWaveSampler(config_file=conf_file, permissive_ssl=True) solver = EmbeddingComposite(sampler) - logger.info(f'Using {sampler.solver} as the sub-QUBO solver.') - if 'num_reads' not in kwargs: kwargs['num_reads'] = 10 - if 'num_repeats' not in kwargs: kwargs['num_repeats'] = 10 + logger.info(f"Using {sampler.solver} as the sub-QUBO solver.") + if "num_reads" not in kwargs: + kwargs["num_reads"] = 10 + if "num_repeats" not in kwargs: + kwargs["num_repeats"] = 10 return solve_qbsolv(Q, solver=solver, **kwargs) # ======= results + def process_response(response): sample = next(response.samples()) final_triplets = [Triplet.name_to_hit_ids(k) for k, v in sample.items() if v == 1] @@ -121,13 +143,17 @@ def print_stats(dw, response, Q=None): en0 = 0 if Q is None else dw.compute_energy(Q) en = response.record.energy[0] - print(f'SAMPLE -- energy: {en:.4f}, ideal: {en0:.4f} (diff: {en-en0:.6f})') + print(f"SAMPLE -- energy: {en:.4f}, ideal: {en0:.4f} (diff: {en-en0:.6f})") occs = response.record.num_occurrences - print(f' best sample occurrence: {occs[0]}/{occs.sum()}') + print(f" best sample occurrence: {occs[0]}/{occs.sum()}") p, r, ms = dw.compute_score(final_doublets) - print(f'SCORE -- precision (%): {p * 100}, recall (%): {r * 100}, missing: {len(ms)}') + print( + f"SCORE -- precision (%): {p * 100}, recall (%): {r * 100}, missing: {len(ms)}" + ) trackml_score = dw.compute_trackml_score(final_tracks) - print(f' tracks found: {len(final_tracks)}, trackml score (%): {trackml_score * 100}') + print( + f" tracks found: {len(final_tracks)}, trackml score (%): {trackml_score * 100}" + ) return final_doublets, final_tracks diff --git a/src/hepqpr/qallse/cli/utils.py b/qallse/cli/utils.py similarity index 53% rename from src/hepqpr/qallse/cli/utils.py rename to qallse/cli/utils.py index 927356b..9b9dd8e 100644 --- a/src/hepqpr/qallse/cli/utils.py +++ b/qallse/cli/utils.py @@ -1,26 +1,30 @@ # ======= instantiation + def _to_camelcase(text): """ Converts underscore_delimited_text to CamelCase. Example: "tool_name" becomes "ToolName" """ - return ''.join(word.title() for word in text.split('_')) + return "".join(word.title() for word in text.split("_")) def qallse_class_from_string(cls): try: import importlib - if cls.startswith('.'): - module = 'hepqpr.qallse' + cls + + if cls.startswith("."): + module = "qallse" + cls cls = cls[1:] else: - module = '.'.join(cls.split('.')[:-1]) - cls = cls.split('.')[-1] + module = ".".join(cls.split(".")[:-1]) + cls = cls.split(".")[-1] cls = _to_camelcase(cls) return getattr(importlib.import_module(module), cls) except Exception as err: - raise RuntimeError(f'Error instantiating "{module}.{cls}". Are you sure it exists ?') from err + raise RuntimeError( + f'Error instantiating "{module}.{cls}". Are you sure it exists ?' + ) from err def extra_to_dict(extra, typ=str): @@ -28,8 +32,10 @@ def extra_to_dict(extra, typ=str): dict_extra = dict() for s in extra: try: - k, v = s.split('=') + k, v = s.split("=") dict_extra[k.strip()] = typ(v.strip()) except: - print(f'error: {s} could not be processed. Extra args should be in the form k=') - return dict_extra \ No newline at end of file + print( + f"error: {s} could not be processed. Extra args should be in the form k=" + ) + return dict_extra diff --git a/src/hepqpr/qallse/data_structures.py b/qallse/data_structures.py similarity index 91% rename from src/hepqpr/qallse/data_structures.py rename to qallse/data_structures.py index 9824db4..5dba407 100644 --- a/src/hepqpr/qallse/data_structures.py +++ b/qallse/data_structures.py @@ -1,5 +1,5 @@ """ -This module contains the definition of all the data structures used by our model, :py:class:`hepqpr.qallse.qallse.Qallse` +This module contains the definition of all the data structures used by our model, :py:class:`qallse.qallse.Qallse` as well as some useful type alias used throughout the project. """ @@ -19,7 +19,18 @@ class Volayer: """ #: Define the mapping of `volume_id` and `layer_id` into one number (the index in the list) - ordering = [(8, 2), (8, 4), (8, 6), (8, 8), (13, 2), (13, 4), (13, 6), (13, 8), (17, 2), (17, 4)] + ordering = [ + (8, 2), + (8, 4), + (8, 6), + (8, 8), + (13, 2), + (13, 4), + (13, 6), + (13, 8), + (17, 2), + (17, 4), + ] @classmethod def get_index(cls, volayer: Tuple[int, int]) -> int: @@ -39,7 +50,7 @@ class Xplet: It contains lists of inner and outer xplets (with one more hit) and sets of "kept" inner and outer xplets, i.e. xplets actually used when generating the qubo. Those lists and sets are populated during model building - (see :py:meth:`hepqpr.qallse.Qallse.build_model`). + (see :py:meth:`qallse.Qallse.build_model`). """ def __init__(self, hits, inout_cls=None): @@ -63,16 +74,16 @@ def hit_ids(self) -> TXplet: @classmethod def name_to_hit_ids(cls, str): """Convert a string representation of an xplet into a list of hit ids (see :py:meth:~`__str__`).""" - return [int(h) for h in str.split('_')] + return [int(h) for h in str.split("_")] @classmethod def hit_ids_to_name(cls, hits): """Inverse of :py:meth:~`name_to_hit_ids`.""" - return '_'.join(map(str, hits)) + return "_".join(map(str, hits)) def __str__(self): """Return a string made of hit ids joined by an underscore. This can be used in the QUBO as an identifier.""" - return '_'.join(map(str, self.hits)) + return "_".join(map(str, self.hits)) def __repr__(self): return self.__str__() @@ -80,8 +91,10 @@ def __repr__(self): def to_dict(self): d = dict(name=str(self), hits=self.hit_ids()) for k, v in self.__dict__.items(): - if k == 'hits' or k.startswith('inner') or k.startswith('outer'): continue - if isinstance(v, Xplet): v = str(v) + if k == "hits" or k.startswith("inner") or k.startswith("outer"): + continue + if isinstance(v, Xplet): + v = str(v) d[k] = v return d @@ -160,7 +173,7 @@ def __init__(self, d1: Doublet, d2: Doublet): #: Sign of the `drz` difference self.drz_sign = 1 if abs(d1.rz_angle + self.drz - d2.rz_angle) < 1e-3 else -1 #: QUBO weight, assigned later - self.weight = .0 + self.weight = 0.0 def doublets(self) -> List[Doublet]: """Return the ordered list of doublets composing this triplet.""" @@ -188,7 +201,7 @@ def __init__(self, t1: Triplet, t2: Triplet): self.volayer_span = self.hits[-1].volayer - self.hits[0].volayer #: QUBO coupling strength between the two triplets. Should be negative to encourage #: the two triplets to be kept together. - self.strength = .0 + self.strength = 0.0 def doublets(self) -> List[Doublet]: """Return the ordered list of doublets composing this triplet.""" diff --git a/src/hepqpr/qallse/data_wrapper.py b/qallse/data_wrapper.py similarity index 100% rename from src/hepqpr/qallse/data_wrapper.py rename to qallse/data_wrapper.py diff --git a/src/hepqpr/qallse/dsmaker/__init__.py b/qallse/dsmaker/__init__.py similarity index 100% rename from src/hepqpr/qallse/dsmaker/__init__.py rename to qallse/dsmaker/__init__.py diff --git a/src/hepqpr/qallse/dsmaker/data/event000001000-cells.csv b/qallse/dsmaker/data/event000001000-cells.csv similarity index 100% rename from src/hepqpr/qallse/dsmaker/data/event000001000-cells.csv rename to qallse/dsmaker/data/event000001000-cells.csv diff --git a/src/hepqpr/qallse/dsmaker/data/event000001000-hits.csv b/qallse/dsmaker/data/event000001000-hits.csv similarity index 100% rename from src/hepqpr/qallse/dsmaker/data/event000001000-hits.csv rename to qallse/dsmaker/data/event000001000-hits.csv diff --git a/src/hepqpr/qallse/dsmaker/data/event000001000-particles.csv b/qallse/dsmaker/data/event000001000-particles.csv similarity index 100% rename from src/hepqpr/qallse/dsmaker/data/event000001000-particles.csv rename to qallse/dsmaker/data/event000001000-particles.csv diff --git a/src/hepqpr/qallse/dsmaker/data/event000001000-truth.csv b/qallse/dsmaker/data/event000001000-truth.csv similarity index 100% rename from src/hepqpr/qallse/dsmaker/data/event000001000-truth.csv rename to qallse/dsmaker/data/event000001000-truth.csv diff --git a/src/hepqpr/qallse/dsmaker/dsmaker.py b/qallse/dsmaker/dsmaker.py similarity index 50% rename from src/hepqpr/qallse/dsmaker/dsmaker.py rename to qallse/dsmaker/dsmaker.py index 50c20eb..ece10a3 100644 --- a/src/hepqpr/qallse/dsmaker/dsmaker.py +++ b/qallse/dsmaker/dsmaker.py @@ -28,7 +28,7 @@ .. code:: - from hepqpr.qallse.dsmaker import create_dataset + from qallse.dsmaker import create_dataset metadata, path = create_dataset( density=0.1, output_path='/tmp', @@ -58,67 +58,73 @@ def _get_default_input_path(): from os import path - return path.join(path.dirname(path.realpath(__file__)), 'data', 'event000001000') + + return path.join(path.dirname(path.realpath(__file__)), "data", "event000001000") def create_dataset( - input_path=_get_default_input_path(), - output_path='.', - density=.1, - min_hits_per_track=5, - high_pt_cut=1., - double_hits_ok=False, - gen_doublets=False, - prefix=None, random_seed=None, phi_bounds=None) -> Tuple[Dict, str]: - input_path = input_path.replace('-hits.csv', '') # just in case + input_path=_get_default_input_path(), + output_path=".", + density=0.1, + min_hits_per_track=5, + high_pt_cut=1.0, + double_hits_ok=False, + gen_doublets=False, + prefix=None, + random_seed=None, + phi_bounds=None, +) -> Tuple[Dict, str]: + input_path = input_path.replace("-hits.csv", "") # just in case # capture all parameters, so we can dump them to a file later input_params = locals() # initialise random if random_seed is None: - random_seed = random.randint(0, 1<<30) + random_seed = random.randint(0, 1 << 30) random.seed(random_seed) - event_id = re.search('(event[0-9]+)', input_path)[0] + event_id = re.search("(event[0-9]+)", input_path)[0] # compute the prefix if prefix is None: - prefix = f'ez-{density}' + prefix = f"ez-{density}" if high_pt_cut > 0: - prefix += f'_hpt-{high_pt_cut}' + prefix += f"_hpt-{high_pt_cut}" else: - prefix += '_baby' + prefix += "_baby" if double_hits_ok: - prefix += '_dbl' + prefix += "_dbl" # ---------- prepare data # load the data - hits = pd.read_csv(input_path + '-hits.csv') - particles = pd.read_csv(input_path + '-particles.csv') - truth = pd.read_csv(input_path + '-truth.csv') + hits = pd.read_csv(input_path + "-hits.csv") + particles = pd.read_csv(input_path + "-particles.csv") + truth = pd.read_csv(input_path + "-truth.csv") # add indexes - particles.set_index('particle_id', drop=False, inplace=True) - truth.set_index('hit_id', drop=False, inplace=True) - hits.set_index('hit_id', drop=False, inplace=True) + particles.set_index("particle_id", drop=False, inplace=True) + truth.set_index("hit_id", drop=False, inplace=True) + hits.set_index("hit_id", drop=False, inplace=True) # create a merged dataset with hits and truth - df = hits.join(truth, rsuffix='_', how='inner') + df = hits.join(truth, rsuffix="_", how="inner") - logger.debug(f'Loaded {len(df)} hits from {input_path}.') + logger.debug(f"Loaded {len(df)} hits from {input_path}.") # ---------- filter hits # keep only hits in the barrel region df = df[hits.volume_id.isin(BARREL_VOLUME_IDS)] - logger.debug(f'Filtered hits from barrel. Remaining hits: {len(df)}.') + logger.debug(f"Filtered hits from barrel. Remaining hits: {len(df)}.") if phi_bounds is not None: - df['phi'] = np.arctan2(df.y, df.x) + df["phi"] = np.arctan2(df.y, df.x) df = df[(df.phi >= phi_bounds[0]) & (df.phi <= phi_bounds[1])] - logger.debug(f'Filtered using phi bounds {phi_bounds}. Remaining hits: {len(df)}.') + logger.debug( + f"Filtered using phi bounds {phi_bounds}. Remaining hits: {len(df)}." + ) # store the noise for later, then remove them from the main dataframe # do this before filtering double hits, as noise will be thrown away as duplicates @@ -126,8 +132,10 @@ def create_dataset( df = df[df.particle_id != 0] if not double_hits_ok: - df.drop_duplicates(['particle_id', 'volume_id', 'layer_id'], keep='first', inplace=True) - logger.debug(f'Dropped double hits. Remaining hits: {len(df) + len(noise_df)}.') + df.drop_duplicates( + ["particle_id", "volume_id", "layer_id"], keep="first", inplace=True + ) + logger.debug(f"Dropped double hits. Remaining hits: {len(df) + len(noise_df)}.") # ---------- sample tracks @@ -152,13 +160,15 @@ def create_dataset( if high_pt_cut > 0: # set low pt weights to 0 - hpt_mask = np.sqrt(truth.tpx ** 2 + truth.tpy ** 2) >= high_pt_cut - new_truth.loc[~hpt_mask, 'weight'] = 0 - logger.debug(f'High Pt hits: {sum(hpt_mask)}/{len(new_truth)}') + hpt_mask = np.sqrt(truth.tpx**2 + truth.tpy**2) >= high_pt_cut + new_truth.loc[~hpt_mask, "weight"] = 0 + logger.debug(f"High Pt hits: {sum(hpt_mask)}/{len(new_truth)}") if min_hits_per_track > 0: - short_tracks = new_truth.groupby('particle_id').filter(lambda g: len(g) < min_hits_per_track) - new_truth.loc[short_tracks.index, 'weight'] = 0 + short_tracks = new_truth.groupby("particle_id").filter( + lambda g: len(g) < min_hits_per_track + ) + new_truth.loc[short_tracks.index, "weight"] = 0 new_truth.weight = new_truth.weight / new_truth.weight.sum() @@ -169,9 +179,9 @@ def create_dataset( os.makedirs(output_path, exist_ok=True) output_path = os.path.join(output_path, event_id) - new_hits.to_csv(output_path + '-hits.csv', index=False) - new_truth.to_csv(output_path + '-truth.csv', index=False) - new_particles.to_csv(output_path + '-particles.csv', index=False) + new_hits.to_csv(output_path + "-hits.csv", index=False) + new_truth.to_csv(output_path + "-truth.csv", index=False) + new_particles.to_csv(output_path + "-particles.csv", index=False) # ---------- write metadata @@ -184,52 +194,98 @@ def create_dataset( time=datetime.now().isoformat(), ) for k, v in metadata.items(): - logger.debug(f' {k}={v}') + logger.debug(f" {k}={v}") - metadata['params'] = input_params + metadata["params"] = input_params - with open(output_path + '-meta.json', 'w') as f: + with open(output_path + "-meta.json", "w") as f: json.dump(metadata, f, indent=4) # ------------ gen doublets if gen_doublets: - from hepqpr.qallse.seeding import generate_doublets + from qallse.seeding import generate_doublets + doublets_df = generate_doublets(hits=new_hits) - with open(output_path + '-doublets.csv', 'w') as f: + with open(output_path + "-doublets.csv", "w") as f: doublets_df.to_csv(f, index=False) - logger.info(f'Doublets (len={len(doublets_df)}) generated in f{output_path}.') + logger.info( + f"Doublets (len={len(doublets_df)}) generated in f{output_path}." + ) return metadata, output_path -@click.command(context_settings=dict(help_option_names=['-h', '--help'])) -@click.option('-n', '--density', type=click.FloatRange(0, 1), default=.1, - help='The sampling to apply, in percent.') -@click.option('--hpt', type=float, default=1., - help='Only select tracks with a transverse momentum ' - 'higher or equal than FLOAT (in GeV, inclusive)') -@click.option('--double-hits/--no-double-hits', is_flag=True, default=False, - help='Keep only one instance of double hits.') -@click.option('-m', '--min-hits', type=int, default=5, - help='The minimum number of hits per tracks (inclusive)') -@click.option('-p', '--prefix', type=str, default=None, - help='Name of the dataset output directory') -@click.option('-s', '--seed', type=int, default=None, - help='Seed to use when initializing the random module') -@click.option('--no-doublets', is_flag=True, default=False, - help='Don\'t generate initial doublets') -@click.option('-v', '--verbose', is_flag=True, default=False, - help='Be verbose.') -@click.option('-o', '--output-path', default='.', - help='Where to create the dataset directoy') -@click.option('-i', 'input_path', default=_get_default_input_path(), - help='Path to the original event hits file') -def cli(density, hpt, double_hits, min_hits, prefix, seed, - no_doublets, verbose, output_path, input_path): - ''' - Create datasets from TrackML events suitable for HEPQPR.Qallse. +@click.command(context_settings=dict(help_option_names=["-h", "--help"])) +@click.option( + "-n", + "--density", + type=click.FloatRange(0, 1), + default=0.1, + help="The sampling to apply, in percent.", +) +@click.option( + "--hpt", + type=float, + default=1.0, + help="Only select tracks with a transverse momentum " + "higher or equal than FLOAT (in GeV, inclusive)", +) +@click.option( + "--double-hits/--no-double-hits", + is_flag=True, + default=False, + help="Keep only one instance of double hits.", +) +@click.option( + "-m", + "--min-hits", + type=int, + default=5, + help="The minimum number of hits per tracks (inclusive)", +) +@click.option( + "-p", + "--prefix", + type=str, + default=None, + help="Name of the dataset output directory", +) +@click.option( + "-s", + "--seed", + type=int, + default=None, + help="Seed to use when initializing the random module", +) +@click.option( + "--no-doublets", is_flag=True, default=False, help="Don't generate initial doublets" +) +@click.option("-v", "--verbose", is_flag=True, default=False, help="Be verbose.") +@click.option( + "-o", "--output-path", default=".", help="Where to create the dataset directoy" +) +@click.option( + "-i", + "input_path", + default=_get_default_input_path(), + help="Path to the original event hits file", +) +def cli( + density, + hpt, + double_hits, + min_hits, + prefix, + seed, + no_doublets, + verbose, + output_path, + input_path, +): + """ + Create datasets from TrackML events suitable for qallse. Main simplifications: no hits from the end-caps, no double hits (use the flag to force the inclusion of double hits). @@ -240,23 +296,32 @@ def cli(density, hpt, double_hits, min_hits, prefix, seed, If is set, particles and noise are random-sampled using the given percentage. This shouldn't alter the dataset characteristics, except for the noise-to-hit ratio (a bit lower). - ''' + """ if verbose: import sys + logging.basicConfig( stream=sys.stderr, format="%(asctime)s [dsmaker] %(message)s", - datefmt='%Y-%m-%dT%H:%M:%S', - level=logging.DEBUG) + datefmt="%Y-%m-%dT%H:%M:%S", + level=logging.DEBUG, + ) meta, path = create_dataset( - input_path, output_path, - density, min_hits, - hpt, double_hits, - not no_doublets, prefix, seed) + input_path, + output_path, + density, + min_hits, + hpt, + double_hits, + not no_doublets, + prefix, + seed, + ) + + seed, density = meta["random_seed"], meta["num_tracks"] + print(f"Dataset written in {path}* (seed={seed}, num. tracks={density})") - seed, density = meta['random_seed'], meta['num_tracks'] - print(f'Dataset written in {path}* (seed={seed}, num. tracks={density})') if __name__ == "__main__": cli() diff --git a/src/hepqpr/qallse/dumper.py b/qallse/dumper.py similarity index 80% rename from src/hepqpr/qallse/dumper.py rename to qallse/dumper.py index e9ab882..4aac4b8 100644 --- a/src/hepqpr/qallse/dumper.py +++ b/qallse/dumper.py @@ -5,7 +5,7 @@ .. code:: - from hepqpr.qallse import QallseD0, DataWrapper, dumper + from qallse import QallseD0, DataWrapper, dumper # build model model = QallseD0(DataWrapper.from_path('/data/path/eventx')) @@ -20,6 +20,7 @@ dumper.dump_xplets(xplets, format='json') # use json, so you can view the actual format """ + import json import pickle from contextlib import contextmanager @@ -32,6 +33,7 @@ # ---- custom Json encoder to handle special types + class _XpletsJsonEncoder(JSONEncoder): def default(self, obj): if isinstance(obj, Xplet): @@ -43,12 +45,12 @@ def default(self, obj): # ---- default argument values -_default_opath = '.' -_default_prefix = '' +_default_opath = "." +_default_prefix = "" @contextmanager -def use_markers(model, w_marker=None, c_marker='c'): +def use_markers(model, w_marker=None, c_marker="c"): """ Temporarily modifies the _compute_* methods of the model to insert placeholder values instead of coefficients in the QUBO. Note that the original methods are still @@ -58,7 +60,7 @@ def use_markers(model, w_marker=None, c_marker='c'): with use_markers(model) as altered_model: Q = altered_model.to_qubo() - :param model: an implementation of :py:class:`hepqpr.qallse.QallseBase` + :param model: an implementation of :py:class:`qallse.QallseBase` :param w_marker: the placeholder used for linear weights. Set it to None to use the original weight. :param c_marker: the placeholder used for conflict strengths. Set it to None to use the original weight. Default to 'c'. :return: an altered model @@ -85,8 +87,10 @@ def new_cc(*args, **kwargs): yield model - if old_cw is not None: model._compute_weight = old_cw - if old_cc is not None: model._compute_conflict_strength = old_cc + if old_cw is not None: + model._compute_weight = old_cw + if old_cc is not None: + model._compute_conflict_strength = old_cc def xplets_to_serializable_dict(model): @@ -97,11 +101,11 @@ def xplets_to_serializable_dict(model): into string. .. warning:: - This only works after model building (i.e. call to :py:meth:`hepqpr.qallse.QallseBase.build_model`). + This only works after model building (i.e. call to :py:meth:`qallse.QallseBase.build_model`). Also, some implementations might modify the xplets during qubo building, so it is better to call `model.to_qubo` beforehand. - :param model: an implementation of :py:class:`hepqpr.qallse.QallseBase` + :param model: an implementation of :py:class:`qallse.QallseBase` :return: a dict without cyclic references. """ xplets = [] @@ -115,7 +119,7 @@ def dump_qubo(model, output_path=_default_opath, prefix=_default_prefix, **marke Pickle a QUBO using specific markers. See also :py:meth:`use_markers`. The default filename is `qubo.pickle`. - :param model: an implementation of :py:class:`~hepqpr.qallse.QallseBase` + :param model: an implementation of :py:class:`~qallse.QallseBase` :param output_path: the output directory :param prefix: a prefix to use in the filename :param markers: see :py:meth:`use_markers` @@ -123,12 +127,18 @@ def dump_qubo(model, output_path=_default_opath, prefix=_default_prefix, **marke """ with use_markers(model, **markers) as altered_model: Q = altered_model.to_qubo() - with open(path_join(output_path, prefix + 'qubo.pickle'), 'wb') as f: + with open(path_join(output_path, prefix + "qubo.pickle"), "wb") as f: pickle.dump(Q, f) return Q -def dump_xplets(obj, output_path=_default_opath, prefix=_default_prefix, - format='pickle', **lib_kwargs): + +def dump_xplets( + obj, + output_path=_default_opath, + prefix=_default_prefix, + format="pickle", + **lib_kwargs, +): """ Save the output of :py:meth:`xplets_to_serializable_dict` to disk. @@ -141,19 +151,24 @@ def dump_xplets(obj, output_path=_default_opath, prefix=_default_prefix, if isinstance(obj, QallseBase): obj = xplets_to_serializable_dict(obj) - fname = path_join(output_path, f'{prefix}xplets.{format}') - if format == 'pickle': - with open(fname, 'wb') as f: + fname = path_join(output_path, f"{prefix}xplets.{format}") + if format == "pickle": + with open(fname, "wb") as f: pickle.dump(obj, f, **lib_kwargs) - elif format == 'json': - with open(fname, 'w') as f: + elif format == "json": + with open(fname, "w") as f: json.dump(obj, f, cls=_XpletsJsonEncoder, **lib_kwargs) else: - raise Exception(f'Unknown format: {format}') + raise Exception(f"Unknown format: {format}") -def dump_model(model, output_path=_default_opath, prefix=_default_prefix, - xplets_kwargs=None, qubo_kwargs=None): +def dump_model( + model, + output_path=_default_opath, + prefix=_default_prefix, + xplets_kwargs=None, + qubo_kwargs=None, +): """ Calls :py:meth:`dump_qubo` and :py:meth:`dump_xplets`. """ diff --git a/src/hepqpr/qallse/cli/__init__.py b/qallse/other/__init__.py similarity index 100% rename from src/hepqpr/qallse/cli/__init__.py rename to qallse/other/__init__.py diff --git a/src/hepqpr/qallse/other/dw_timing_recorder.py b/qallse/other/dw_timing_recorder.py similarity index 100% rename from src/hepqpr/qallse/other/dw_timing_recorder.py rename to qallse/other/dw_timing_recorder.py diff --git a/qallse/other/filter_input_doublets.py b/qallse/other/filter_input_doublets.py new file mode 100644 index 0000000..9880c9f --- /dev/null +++ b/qallse/other/filter_input_doublets.py @@ -0,0 +1,47 @@ +import click +from qallse import Volayer +import pandas as pd + + +def filter_doublets(hits, doublets, max_holes): + doublets = doublets.copy() + + # compute doublet spans + hits["volayer"] = hits[["volume_id", "layer_id"]].apply( + lambda serie: Volayer.get_index(serie.tolist()), axis=1 + ) + doublets["span"] = ( + hits.volayer.get(doublets.end).values - hits.volayer.get(doublets.start).values + ) + + # filter + return doublets[doublets.span <= max_holes + 1] + + +@click.command() +@click.option( + "-h", + "--max-holes", + type=int, + default=1, + help="Maximum number of holes (i.e. missing layers) allowed in doublets.", +) +@click.option("-i", "hits_path", required=True, help="path to the hits files") +def cli(max_holes, hits_path): + # load data + doublets_path = hits_path.replace("-hits.csv", "-doublets.csv") + hits = pd.read_csv(hits_path, index_col=0) + doublets = pd.read_csv(doublets_path) + # filter + filtered_doublets = filter_doublets(hits, doublets, max_holes) + n_discarded = len(doublets) - len(filtered_doublets) + print( + f"Discarded {n_discarded} doublets out of {len(doublets)} ({(n_discarded/len(doublets))*100:.3f}%)." + ) + # save + doublets.to_csv(doublets_path + ".orig", index=False) + filtered_doublets.to_csv(doublets_path, columns=["start", "end"], index=False) + + +if __name__ == "__main__": + cli() diff --git a/src/hepqpr/qallse/other/parse_qbsolv.py b/qallse/other/parse_qbsolv.py similarity index 100% rename from src/hepqpr/qallse/other/parse_qbsolv.py rename to qallse/other/parse_qbsolv.py diff --git a/src/hepqpr/qallse/other/stdout_redirect.py b/qallse/other/stdout_redirect.py similarity index 100% rename from src/hepqpr/qallse/other/stdout_redirect.py rename to qallse/other/stdout_redirect.py diff --git a/src/hepqpr/qallse/plotting.py b/qallse/plotting.py similarity index 100% rename from src/hepqpr/qallse/plotting.py rename to qallse/plotting.py diff --git a/src/hepqpr/qallse/qallse.py b/qallse/qallse.py similarity index 100% rename from src/hepqpr/qallse/qallse.py rename to qallse/qallse.py diff --git a/src/hepqpr/qallse/qallse_base.py b/qallse/qallse_base.py similarity index 100% rename from src/hepqpr/qallse/qallse_base.py rename to qallse/qallse_base.py diff --git a/src/hepqpr/qallse/qallse_d0.py b/qallse/qallse_d0.py similarity index 100% rename from src/hepqpr/qallse/qallse_d0.py rename to qallse/qallse_d0.py diff --git a/src/hepqpr/qallse/qallse_mp.py b/qallse/qallse_mp.py similarity index 100% rename from src/hepqpr/qallse/qallse_mp.py rename to qallse/qallse_mp.py diff --git a/src/hepqpr/qallse/seeding/__init__.py b/qallse/seeding/__init__.py similarity index 100% rename from src/hepqpr/qallse/seeding/__init__.py rename to qallse/seeding/__init__.py diff --git a/qallse/seeding/__main__.py b/qallse/seeding/__main__.py new file mode 100644 index 0000000..a10c028 --- /dev/null +++ b/qallse/seeding/__main__.py @@ -0,0 +1,8 @@ +def main(): + from qallse.seeding.main import cli + + cli() + + +if __name__ == "__main__": + main() diff --git a/src/hepqpr/qallse/seeding/config.py b/qallse/seeding/config.py similarity index 100% rename from src/hepqpr/qallse/seeding/config.py rename to qallse/seeding/config.py diff --git a/src/hepqpr/qallse/seeding/doublet_making.py b/qallse/seeding/doublet_making.py similarity index 100% rename from src/hepqpr/qallse/seeding/doublet_making.py rename to qallse/seeding/doublet_making.py diff --git a/src/hepqpr/qallse/seeding/main.py b/qallse/seeding/main.py similarity index 51% rename from src/hepqpr/qallse/seeding/main.py rename to qallse/seeding/main.py index 02f9721..92e4e9c 100644 --- a/src/hepqpr/qallse/seeding/main.py +++ b/qallse/seeding/main.py @@ -12,7 +12,7 @@ def generate_doublets(*args, **kwargs) -> pd.DataFrame: seeding_results = run_seeding(*args, **kwargs) doublets = structures_to_doublets(*seeding_results) - doublets_df = pd.DataFrame(doublets, columns=['start', 'end']).drop_duplicates() + doublets_df = pd.DataFrame(doublets, columns=["start", "end"]).drop_duplicates() return doublets_df @@ -21,7 +21,7 @@ def run_seeding(hits_path=None, hits=None, config_cls=HptSeedingConfig): n_layers = len(det.layers) hits = pd.read_csv(hits_path, index_col=False) if hits is None else hits.copy() - hits = hits.iloc[np.where(np.in1d(hits['volume_id'], [8, 13, 17]))] + hits = hits.iloc[np.where(np.in1d(hits["volume_id"], [8, 13, 17]))] config = config_cls(n_layers) # setting up structures @@ -33,44 +33,54 @@ def run_seeding(hits_path=None, hits=None, config_cls=HptSeedingConfig): return hits, spStorage, doubletsStorage -def structures_to_doublets(hits: pd.DataFrame = None, sps: SpacepointStorage = None, ds: DoubletStorage = None): +def structures_to_doublets( + hits: pd.DataFrame = None, sps: SpacepointStorage = None, ds: DoubletStorage = None +): doublets = [] for i, sp in enumerate(ds.spmIdx): - inner_indexes = ds.inner[ds.innerStart[i]:ds.innerStart[i + 1 if i + 1 < len(ds.spmIdx) else -1]] + inner_indexes = ds.inner[ + ds.innerStart[i] : ds.innerStart[i + 1 if i + 1 < len(ds.spmIdx) else -1] + ] doublets += [(sps.idsp[i], sps.idsp[sp]) for i in inner_indexes] - outer_indexes = ds.outer[ds.outerStart[i]:ds.outerStart[i + 1 if i + 1 < len(ds.spmIdx) else -1]] + outer_indexes = ds.outer[ + ds.outerStart[i] : ds.outerStart[i + 1 if i + 1 < len(ds.spmIdx) else -1] + ] doublets += [(sps.idsp[sp], sps.idsp[i]) for i in outer_indexes] return np.unique(np.array(doublets), axis=0) -@click.command(context_settings=dict(help_option_names=['-h', '--help'])) -@click.option('-o', '--out', default=None) -@click.option('--score/--no-score', is_flag=True, default=True) -@click.argument('hits_path', default='/tmp/barrel_100/event000001000') +@click.command(context_settings=dict(help_option_names=["-h", "--help"])) +@click.option("-o", "--out", default=None) +@click.option("--score/--no-score", is_flag=True, default=True) +@click.argument("hits_path", default="/tmp/barrel_100/event000001000") def cli(out=None, score=True, hits_path=None): - ''' + """ Generate initial doublets. - ''' - path = hits_path.replace('-hits.csv', '') - event_id = re.search('(event[0-9]+)', hits_path)[0] - if out is None: out = os.path.dirname(hits_path) + """ + path = hits_path.replace("-hits.csv", "") + event_id = re.search("(event[0-9]+)", hits_path)[0] + if out is None: + out = os.path.dirname(hits_path) - print(f'Loading file {hits_path}') - hits = pd.read_csv(path + '-hits.csv').set_index('hit_id', drop=False) + print(f"Loading file {hits_path}") + hits = pd.read_csv(path + "-hits.csv").set_index("hit_id", drop=False) doublets_df = generate_doublets(hits=hits) - print(f'found {doublets_df.shape[0]} doublets.') + print(f"found {doublets_df.shape[0]} doublets.") if score: - from hepqpr.qallse.data_wrapper import DataWrapper + from qallse.data_wrapper import DataWrapper + dw = DataWrapper.from_path(path) p, r, ms = dw.compute_score(doublets_df.values) - print(f'DBLETS SCORE -- precision {p * 100}%, recall: {r * 100}% (missing doublets: {len(ms)})') + print( + f"DBLETS SCORE -- precision {p * 100}%, recall: {r * 100}% (missing doublets: {len(ms)})" + ) os.makedirs(out, exist_ok=True) - with open(os.path.join(out, f'{event_id}-doublets.csv'), 'w') as f: + with open(os.path.join(out, f"{event_id}-doublets.csv"), "w") as f: doublets_df.to_csv(f, index=False) - print(f'doublets written to {f.name}') + print(f"doublets written to {f.name}") - print('done') + print("done") diff --git a/src/hepqpr/qallse/seeding/storage.py b/qallse/seeding/storage.py similarity index 100% rename from src/hepqpr/qallse/seeding/storage.py rename to qallse/seeding/storage.py diff --git a/src/hepqpr/qallse/seeding/topology.py b/qallse/seeding/topology.py similarity index 100% rename from src/hepqpr/qallse/seeding/topology.py rename to qallse/seeding/topology.py diff --git a/src/hepqpr/qallse/seeding/utils.py b/qallse/seeding/utils.py similarity index 100% rename from src/hepqpr/qallse/seeding/utils.py rename to qallse/seeding/utils.py diff --git a/src/hepqpr/qallse/track_recreater.py b/qallse/track_recreater.py similarity index 80% rename from src/hepqpr/qallse/track_recreater.py rename to qallse/track_recreater.py index e7c5149..d127332 100644 --- a/src/hepqpr/qallse/track_recreater.py +++ b/qallse/track_recreater.py @@ -2,7 +2,7 @@ This module defines classes to reassemble tracks from subtracks. :py:class:~`TrackRecreater` is very generic and can handle any kind of subtracks. -:py:class:~`TrackRecreaterD` is especially made for handling :py:class:`hepqpr.qallse.qallse.Qallse` outputs, that is +:py:class:~`TrackRecreaterD` is especially made for handling :py:class:`qallse.qallse.Qallse` outputs, that is a list of doublets that can potentially contain duplicates and/or conflicting doublets. """ @@ -56,7 +56,7 @@ def recreate(self, subtracks: Union[List, np.ndarray, pd.DataFrame]) -> List: merges = 1 # we are using '+' to concatenate lists, and if the doublets are numpy array, '+' actually # adds cell by cell... so ensure we deal with real lists ! - tracks = subtracks.tolist() if hasattr(subtracks, 'tolist') else subtracks + tracks = subtracks.tolist() if hasattr(subtracks, "tolist") else subtracks # iterations stops when no new merge can be performed iterations = 0 while merges > 0: @@ -71,7 +71,7 @@ def _recreate(self, subtracks) -> Tuple[List, bool]: merges = 0 for subtrack in subtracks: if subtrack[-1] in self._ends or subtrack[0] in self._starts: - logger.warning(f'conflicting subtrack added {subtrack}') + logger.warning(f"conflicting subtrack added {subtrack}") if subtrack[-1] in self._starts: new_xplet = subtrack[:-1] + self._starts[subtrack[-1]] self._remove(self._starts[subtrack[-1]]) @@ -87,8 +87,10 @@ def _recreate(self, subtracks) -> Tuple[List, bool]: return self.final_tracks, merges def _remove(self, subtrack): - if subtrack[0] in self._starts: del self._starts[subtrack[0]] - if subtrack[-1] in self._ends: del self._ends[subtrack[-1]] + if subtrack[0] in self._starts: + del self._starts[subtrack[0]] + if subtrack[-1] in self._ends: + del self._ends[subtrack[-1]] def _add(self, subtrack): self._starts[subtrack[0]] = subtrack @@ -115,7 +117,9 @@ def __init__(self): #: List of conflicts found during the last call to :py:meth:~`recreate` self.conflicts = [] - def process_results(self, doublets, resolve_conflicts=True, min_hits_per_track=5) -> Tuple[List, List]: + def process_results( + self, doublets, resolve_conflicts=True, min_hits_per_track=5 + ) -> Tuple[List, List]: """ Recreate tracks and handle duplicates from a set of doublets. :param doublets: a set of doublets, with possible duplicates and conflicts @@ -133,33 +137,42 @@ def process_results(self, doublets, resolve_conflicts=True, min_hits_per_track=5 return final_tracks, final_doublets - def recreate(self, doublets: Union[List, np.ndarray, pd.DataFrame], resolve_conflicts=True): + def recreate( + self, doublets: Union[List, np.ndarray, pd.DataFrame], resolve_conflicts=True + ): dblets, conflicts = self.find_conflicts(doublets) self.conflicts = conflicts.values.tolist() - logger.info(f'Found {len(self.conflicts)} conflicting doublets') + logger.info(f"Found {len(self.conflicts)} conflicting doublets") super().recreate(dblets.values) if resolve_conflicts and len(self.conflicts) > 0: n_resolved = self._resolve_conflicts(self.conflicts) - logger.info(f'Added {n_resolved} conflicting doublets') + logger.info(f"Added {n_resolved} conflicting doublets") return self.final_tracks @classmethod - def find_conflicts(cls, doublets: Union[pd.DataFrame, List, np.array]) -> [pd.DataFrame, pd.DataFrame]: + def find_conflicts( + cls, doublets: Union[pd.DataFrame, List, np.array] + ) -> [pd.DataFrame, pd.DataFrame]: """ Remove duplicates and extract conflicts from a list of doublets. :param doublets: the doublets :return: a dataframe of doublets devoid of duplicates or conflicts and a dataframe with all the conflicts """ - df = doublets if isinstance(doublets, pd.DataFrame) else \ - pd.DataFrame(doublets, columns=['start', 'end']) + df = ( + doublets + if isinstance(doublets, pd.DataFrame) + else pd.DataFrame(doublets, columns=["start", "end"]) + ) # remove exact duplicates df.drop_duplicates(inplace=True) # find conflicts, i.e. doublets either starting or ending at the same hit - conflicts = df[df.duplicated('start', keep=False) | df.duplicated('end', keep=False)] + conflicts = df[ + df.duplicated("start", keep=False) | df.duplicated("end", keep=False) + ] return df.drop(conflicts.index), conflicts def _resolve_conflicts(self, conflicts) -> int: @@ -172,9 +185,13 @@ def _resolve_conflicts(self, conflicts) -> int: # compute the score based on the resulting track length if added. # This has to be recomputed each time, since adding a doublet to the solution # may change the landscape. - score = 0 # TODO: use another score that looks at the shape of the tracks - if c[0] in self._ends: score += len(self._ends[c[0]]) - if c[1] in self._starts: score += len(self._starts[c[1]]) + score = ( + 0 # TODO: use another score that looks at the shape of the tracks + ) + if c[0] in self._ends: + score += len(self._ends[c[0]]) + if c[1] in self._starts: + score += len(self._starts[c[1]]) sum_score += score if score > best_score: best_score, best_candidate = score, c @@ -186,7 +203,11 @@ def _resolve_conflicts(self, conflicts) -> int: resolved.append(best_candidate) self._recreate([best_candidate]) # remove conflicts that can no longer be added - conflicts = [c for c in conflicts if c[0] not in self._starts and c[1] not in self._ends] + conflicts = [ + c + for c in conflicts + if c[0] not in self._starts and c[1] not in self._ends + ] if len(resolved): logger.debug(f'Conflicts added: {", ".join(map(str, resolved))}.') diff --git a/src/hepqpr/qallse/type_alias.py b/qallse/type_alias.py similarity index 100% rename from src/hepqpr/qallse/type_alias.py rename to qallse/type_alias.py diff --git a/src/hepqpr/qallse/utils.py b/qallse/utils.py similarity index 100% rename from src/hepqpr/qallse/utils.py rename to qallse/utils.py diff --git a/scripts/1-recreate_datasets.py b/scripts/1-recreate_datasets.py index 4384d68..7c060a3 100644 --- a/scripts/1-recreate_datasets.py +++ b/scripts/1-recreate_datasets.py @@ -7,17 +7,18 @@ - update the BUILD CONFIG options below (input paths and output paths) """ + import os.path as op -from hepqpr.qallse.cli.func import * -from hepqpr.qallse.dsmaker import create_dataset +from qallse.cli.func import * +from qallse.dsmaker import create_dataset # ==== BUILD CONFIG loglevel = logging.DEBUG -trackml_train_path = '~/git/quantum-annealing-project/trackml-data/train_100_events/' +trackml_train_path = "~/git/quantum-annealing-project/trackml-data/train_100_events/" -output_path = '/tmp/hpt-collapse' # f'~/current/hpt-collapse +output_path = "/tmp/hpt-collapse" # f'~/current/hpt-collapse # ==== seeds used @@ -53,40 +54,44 @@ # ==== generation -headers = 'event,percent,num_hits,num_noise,num_tracks,num_important_tracks,random_seed,cpu_time,wall_time'.split(',') +headers = "event,percent,num_hits,num_noise,num_tracks,num_important_tracks,random_seed,cpu_time,wall_time".split( + "," +) -if __name__ == '__main__': +if __name__ == "__main__": mat = [] - for row in ds_info.strip().split('\n'): - e, d, s = row.split(',') + for row in ds_info.strip().split("\n"): + e, d, s = row.split(",") event, ds, seed = int(e), float(d), int(s) - prefix = f'ds{ds*100:.0f}' + prefix = f"ds{ds*100:.0f}" - print(f'\n>>>> {prefix} <<<<\n') + print(f"\n>>>> {prefix} <<<<\n") with time_this() as time_info: metas, path = create_dataset( density=ds, - input_path=op.join(trackml_train_path, f'event00000{event}-hits.csv'), + input_path=op.join(trackml_train_path, f"event00000{event}-hits.csv"), output_path=output_path, prefix=prefix, min_hits_per_track=5, high_pt_cut=1.0, random_seed=int(seed), double_hits_ok=False, - gen_doublets=True + gen_doublets=True, ) - mat.append([ - event, - int(ds * 100), - metas['num_hits'], - metas['num_noise'], - metas['num_tracks'], - metas['num_important_tracks'], - seed, - time_info[0], - time_info[1], - ]) + mat.append( + [ + event, + int(ds * 100), + metas["num_hits"], + metas["num_noise"], + metas["num_tracks"], + metas["num_important_tracks"], + seed, + time_info[0], + time_info[1], + ] + ) stats = pd.DataFrame(mat, columns=headers) - stats.to_csv('recreate_datasets.csv', index=False) + stats.to_csv("recreate_datasets.csv", index=False) diff --git a/scripts/2-build_qubos.py b/scripts/2-build_qubos.py index 9661012..d6598bb 100644 --- a/scripts/2-build_qubos.py +++ b/scripts/2-build_qubos.py @@ -10,26 +10,28 @@ """ -from hepqpr.qallse.cli.func import * +from qallse.cli.func import * # ==== BUILD CONFIG TODO change it loglevel = logging.DEBUG -events = [1000] # events to run -dss = [10] # densities, here just ds10 +events = [1000] # events to run +dss = [10] # densities, here just ds10 -data_path = '/tmp/hpt-collapse/ds{ds}/event00000{event}-hits.csv' +data_path = "/tmp/hpt-collapse/ds{ds}/event00000{event}-hits.csv" -output_path = '/tmp/' -output_prefix = 'evt{event}-ds{ds}-' +output_path = "/tmp/" +output_prefix = "evt{event}-ds{ds}-" model_class = QallseD0 # model class to use extra_config = dict() # model config dump_config = dict( xplets_kwargs=dict(), # use json or "pickle" - qubo_kwargs=dict(w_marker=None, c_marker=None) # save the real coefficients VS generic placeholders + qubo_kwargs=dict( + w_marker=None, c_marker=None + ), # save the real coefficients VS generic placeholders ) # ==== configure logging @@ -38,17 +40,17 @@ # ==== build model -if __name__ == '__main__': +if __name__ == "__main__": mat = [] for event in events: for ds in dss: - print(f'\n==========>>> processing event {event} ds{ds}\n', flush=True) + print(f"\n==========>>> processing event {event} ds{ds}\n", flush=True) # load data path = data_path.format(event=event, ds=ds) dw = DataWrapper.from_path(path) - doublets = pd.read_csv(path.replace('-hits.csv', '-doublets.csv')) + doublets = pd.read_csv(path.replace("-hits.csv", "-doublets.csv")) # build model with time_this() as time_info: @@ -60,19 +62,22 @@ model, output_path=output_path, prefix=output_prefix.format(event=event, ds=ds), - **dump_config + **dump_config, ) # gather stats mat.append( [ - event, ds, + event, + ds, len(model.qubo_doublets), len(model.qubo_triplets), len(model.quadruplets), - len(Q) - ] + time_info) + len(Q), + ] + + time_info + ) - headers = 'event,percent,n_doublets,n_triplets,n_qplets,q,cpu_time,wall_time' - stats = pd.DataFrame(mat, columns=headers.split(',')) - stats.to_csv('build_qubo.csv', index=False) + headers = "event,percent,n_doublets,n_triplets,n_qplets,q,cpu_time,wall_time" + stats = pd.DataFrame(mat, columns=headers.split(",")) + stats.to_csv("build_qubo.csv", index=False) diff --git a/scripts/3-solve_qubos.py b/scripts/3-solve_qubos.py index b21d937..5327e49 100644 --- a/scripts/3-solve_qubos.py +++ b/scripts/3-solve_qubos.py @@ -10,10 +10,11 @@ - update the values in BUILD CONFIG """ + import pickle import random -from hepqpr.qallse.cli.func import * +from qallse.cli.func import * import os.path as op # ==== RUN CONFIG TODO change it @@ -24,15 +25,19 @@ dss = [10] repeat = 1 -data_path = '/tmp/hpt-collapse/ds{ds}/event00000{event}-hits.csv' # path to the datasets +data_path = ( + "/tmp/hpt-collapse/ds{ds}/event00000{event}-hits.csv" # path to the datasets +) -qubo_path = '/tmp' # path where the qubos are pickled -qubo_prefix = 'evt{event}-ds{ds}-' # prefix for the qubo files -output_path = '/tmp/' # where to serialize the responses +qubo_path = "/tmp" # path where the qubos are pickled +qubo_prefix = "evt{event}-ds{ds}-" # prefix for the qubo files +output_path = "/tmp/" # where to serialize the responses output_prefix = qubo_prefix # prefix for serialized responses -solver = 'neal' # solver to use -solver_config = dict() # parameters for the solver. Note that "seed" is generated later. +solver = "neal" # solver to use +solver_config = ( + dict() +) # parameters for the solver. Note that "seed" is generated later. # ==== configure logging @@ -41,32 +46,35 @@ # ==== build model + def run_one(event, ds): # load data path = data_path.format(event=event, ds=ds) dw = DataWrapper.from_path(path) - qubo_filepath = op.join(qubo_path, qubo_prefix.format(event=event, ds=ds) + 'qubo.pickle') + qubo_filepath = op.join( + qubo_path, qubo_prefix.format(event=event, ds=ds) + "qubo.pickle" + ) - with open(qubo_filepath, 'rb') as f: + with open(qubo_filepath, "rb") as f: Q = pickle.load(f) en0 = dw.compute_energy(Q) for i in range(repeat): # set seed seed = random.randint(0, 1 << 30) - solver_config['seed'] = seed + solver_config["seed"] = seed # build model with time_this() as time_info: with time_this() as qtime_info: - if solver == 'neal': + if solver == "neal": response = solve_neal(Q, **solver_config) - elif solver == 'qbsolv': + elif solver == "qbsolv": response = solve_qbsolv(Q, **solver_config) - elif solver == 'dwave': + elif solver == "dwave": response = solve_dwave(Q, **solver_config) else: - raise Exception('Invalid solver name.') + raise Exception("Invalid solver name.") final_doublets, final_tracks = process_response(response) @@ -77,41 +85,60 @@ def run_one(event, ds): # output composition _, _, d_real = diff_rows(dw.get_real_doublets(), final_doublets) - _, d_fakes, d_real_all = diff_rows(dw.get_real_doublets(with_unfocused=True), final_doublets) + _, d_fakes, d_real_all = diff_rows( + dw.get_real_doublets(with_unfocused=True), final_doublets + ) # save response output_filename = op.join( output_path, - output_prefix.format(event=event, ds=ds) + f'{solver}-{i}-response.pickle') + output_prefix.format(event=event, ds=ds) + f"{solver}-{i}-response.pickle", + ) - with open(output_filename, 'wb') as f: + with open(output_filename, "wb") as f: pickle.dump(response, f) # gather stats mat.append( [ - event, ds, seed, i, + event, + ds, + seed, + i, len(final_tracks), - len(dw.get_real_doublets()), len(final_doublets), - len(d_real), len(d_real_all), len(d_fakes), - p, r, trackml, len(ms), - en, en0, en - en0, - ] + qtime_info + time_info) - - -if __name__ == '__main__': + len(dw.get_real_doublets()), + len(final_doublets), + len(d_real), + len(d_real_all), + len(d_fakes), + p, + r, + trackml, + len(ms), + en, + en0, + en - en0, + ] + + qtime_info + + time_info + ) + + +if __name__ == "__main__": mat = [] for event in events: for ds in dss: - print(f'\n==========>>> processing event {event} ds{ds}\n', flush=True) + print(f"\n==========>>> processing event {event} ds{ds}\n", flush=True) run_one(event, ds) - headers = 'event,percent,seed,repeat,tracks_gen,' \ - 'n_true,n_gen,n_real,n_real_all,n_fakes,' \ - 'precision,recall,trackml,missings,' \ - 'en,en0,endiff,' \ - 'qtime_cpu,qtime_wall,cpu_time,wall_time' + headers = ( + "event,percent,seed,repeat,tracks_gen," + "n_true,n_gen,n_real,n_real_all,n_fakes," + "precision,recall,trackml,missings," + "en,en0,endiff," + "qtime_cpu,qtime_wall,cpu_time,wall_time" + ) - stats = pd.DataFrame(mat, columns=headers.split(',')) - stats.to_csv(f'solve_qubos_{solver}.csv', index=False) + stats = pd.DataFrame(mat, columns=headers.split(",")) + stats.to_csv(f"solve_qubos_{solver}.csv", index=False) diff --git a/src/hepqpr/__init__.py b/src/hepqpr/__init__.py deleted file mode 100644 index 0260537..0000000 --- a/src/hepqpr/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__path__ = __import__('pkgutil').extend_path(__path__, __name__) \ No newline at end of file diff --git a/src/hepqpr/qallse/cli/__main__.py b/src/hepqpr/qallse/cli/__main__.py deleted file mode 100644 index dcd045b..0000000 --- a/src/hepqpr/qallse/cli/__main__.py +++ /dev/null @@ -1,7 +0,0 @@ -def main(): - from hepqpr.qallse.cli.entrypoints import cli - cli() - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/src/hepqpr/qallse/cli/entrypoints.py b/src/hepqpr/qallse/cli/entrypoints.py deleted file mode 100644 index e34bea7..0000000 --- a/src/hepqpr/qallse/cli/entrypoints.py +++ /dev/null @@ -1,240 +0,0 @@ -import sys - -import click -import pickle -from os import path as op - -from hepqpr.qallse.cli.func import * -from hepqpr.qallse.cli.utils import * - - -class GlobalOptions: - def __init__(self, hits_path, opath=None, prefix=''): - self.hits_path = hits_path - self.output_path = opath - self.prefix = prefix - self._dw = None # lazy creation (not created with --help) - - @property - def path(self): - if self.hits_path is None: - # simulate the required=True click option, because if used directly, - # one cannot display a subcommand help without passing the hit path ... - click.echo("Error: Missing option '-i' / '--hits-path'.", err=True) - sys.exit(1) - return self.hits_path.replace('-hits.csv', '') - - @property - def dw(self): - if self._dw is None: self._dw = DataWrapper.from_path(self.path) - return self._dw - - def get_output_path(self, filename): - return op.join(self.output_path, self.prefix + filename) - - -# ------ - -@click.group(context_settings=dict(help_option_names=['-h', '--help'])) -@click.option('--debug/--no-debug', is_flag=True, default=False) -@click.option('-i', '--hits-path', - help='[required] Path to the hits file.') -@click.option('-o', '--output-path', default=None, metavar='directory', - help='Where to save the output files.') -@click.option('-p', '--prefix', default='', metavar='text', - help='Prefix prepended to all output files.') -@click.pass_context -def cli(ctx, debug, hits_path, output_path, prefix): - ''' - Solve the pattern recognition problem using QA. - - The is the path to a hit file generated using the - `create_dataset` method. The directory should also contain a truth file - and the initial doublets file (created either during `create_dataset` or - using the `run_seeding` script). - - Output files will be saved to the given , if any, using default names. - If set, the will be prepended to all output files. - ''' - # configure logging - init_logging(logging.DEBUG if debug else logging.INFO) - - # load input data - ctx.obj = GlobalOptions(hits_path, output_path, prefix) - - -@cli.command('build') -@click.option('--add-missing', is_flag=True, default=False, - help='If set, ensure 100% input recall.') -@click.option('-c', '--cls', default='qallse_d0', metavar='module_name', - help='Model to use.') -@click.option('-e', '--extra', type=str, multiple=True, metavar='key=value', - help='Override default model configuration.') -@click.pass_obj -def cli_build(ctx, add_missing, cls, extra): - ''' - Generate the QUBO. - - The QUBO and the xplets used by it are saved as pickle files in the current directory - (use --output-path and --prefix options to change it). - - will add any true missing doublet to the input, ensuring an input recall of 100%. - lets you choose which model to use: qallse_d0 (default), qallse, qallse_mp, etc. - are key=values corresponding to configuration options of the model, (e.g. -e qubo_conflict_strength=0.5). - ''' - from hepqpr.qallse import dumper - extra_config = extra_to_dict(extra) - ModelClass = qallse_class_from_string('.' + cls) - model = ModelClass(ctx.dw, **extra_config) - - build_model(ctx.path, model, add_missing) - dumper.dump_model(model, ctx.output_path, ctx.prefix, qubo_kwargs=dict(w_marker=None, c_marker=None)) - print('Wrote qubo to', ctx.get_output_path("qubo.pickle")) - -@cli.command('qbsolv') -@click.option('-q', '--qubo', default=None, metavar='filepath', - help='Path a the pickled QUBO.') -@click.option('-dw', '--dwave-conf', default=None, type=str, metavar='filepath', - help='Path to a dwave.conf. If set, use a D-Wave as the sub-QUBO solver.') -@click.option('-v', '--verbosity', type=click.IntRange(-1, 6), default=-1, metavar='int', - help='qbsolv verbosity.') -@click.option('-l', '--logfile', type=str, default=None, metavar='filepath', - help='Where to redirect the qbsolv output. Does only make sense for verbosity > 0.') -@click.option('-e', '--extra', type=str, multiple=True, metavar='key=', - help='Additional options to qbsolv. ' - 'Allowed keys: seed, num_repeats, (+If D-Wave: num_reads).') -@click.pass_obj -def cli_qbsolv(ctx, qubo, dwave_conf, verbosity, logfile, extra): - ''' - Sample a QUBO using qbsolv (!slower!) and a D-Wave (optional). - - By default, this will run qbsolv (https://github.com/dwavesystems/qbsolv) - in simulation. To use a D-Wave, set the option to - a valid dwave configuration file (see https://cloud.dwavesys.com/leap/). - - is the path to the pickled qubo (default to /qubo.pickle). - and are passed to qbsolv. will redirect all qbsolv output to - a file (see also the parse_qbsolv script). - ''' - try: - if qubo is None: qubo = ctx.get_output_path('qubo.pickle') - with open(qubo, 'rb') as f: - Q = pickle.load(f) - except: - print(f'Failed to load QUBO. Are you sure {qubo} is a pickled qubo file ?') - sys.exit(-1) - - qbsolv_kwargs = extra_to_dict(extra, typ=int) - qbsolv_kwargs['logfile'] = logfile - qbsolv_kwargs['verbosity'] = verbosity - - if dwave_conf is not None: - response = solve_dwave(Q, dwave_conf, **qbsolv_kwargs) - else: - response = solve_qbsolv(Q, **qbsolv_kwargs) - - print_stats(ctx.dw, response, Q) - if ctx.output_path is not None: - oname = ctx.get_output_path('qbsolv_response.pickle') - with open(oname, 'wb') as f: pickle.dump(response, f) - print(f'Wrote response to {oname}') - - -@cli.command('neal', - help='Sample a QUBO using neal.') -@click.option('-q', '--qubo', default=None, metavar='filepath', - help='Path to the pickled QUBO. Default to /qubo.pickle') -@click.option('-s', '--seed', default=None, type=int, metavar='int', - help='Seed to use.') -@click.pass_obj -def cli_neal(ctx, qubo, seed): - ''' - Solve a QUBO using neal (!fast!) - - neal (https://github.com/dwavesystems/dwave-neal) is a simulated annealing sampler. - It is faster than qbsolv by two order of magnitude with similar (if not better) results. - ''' - try: - if qubo is None: qubo = ctx.get_output_path('qubo.pickle') - with open(qubo, 'rb') as f: - Q = pickle.load(f) - except: - print(f'Failed to load QUBO. Are you sure {qubo} is a pickled qubo file ?') - sys.exit(-1) - - response = solve_neal(Q, seed=seed) - print_stats(ctx.dw, response, Q) - if ctx.output_path is not None: - oname = ctx.get_output_path('neal_response.pickle') - with open(oname, 'wb') as f: pickle.dump(response, f) - print(f'Wrote response to {oname}') - - -@cli.command('quickstart', - context_settings=dict(ignore_unknown_options=True, allow_extra_args=True)) -@click.pass_context -def cli_quickstart(ctx): - ''' - Run the whole algorithm (build+neal). - - This accepts the same options as the build command. If no is set, - a temporary directory is created for the time of the run and deleted on exit. - - Minimal example using a very small dataset: - - \b - create_dataset -n 0.01 -p mini - qallse -i mini/event000001000-hits.csv quickstart - - ''' - - def _chain(): - ctx.forward(cli_build) - ctx.invoke(cli_neal) - - if ctx.obj.output_path is None: - import tempfile - with tempfile.TemporaryDirectory() as tmpdir: - ctx.obj.output_path = tmpdir - _chain() - else: - _chain() - - -@cli.command('plot') -@click.option('-r', '--response', metavar='filepath', required=True, - help='Path to the response file.') -@click.option('-d', '--dims', default='xy', type=click.Choice(['xy', 'zr', 'zxy']), - help='Dimensions of the plot.') -@click.option('-m', '--mode', default='d', type=click.Choice(['d', 't', 'dt']), - help='Plot the doublets only (d), the triplets only (t), or both (dt).') -@click.pass_obj -def cli_plot(ctx, response, dims, mode): - ''' - Plot the final doublets and final tracks. - - This uses (https://plot.ly) and the hepqpr.qallse.plotting module to - show the final tracks and doublets. - The plots are saved as html files either in or in the current directory. - - WARNING: don't try to plot results from large datasets, especially 3D plots !! - ''' - from hepqpr.qallse.plotting import iplot_results, iplot_results_tracks - - dims = list(dims) - - with open(response, 'rb') as f: - r = pickle.load(f) - final_doublets, final_tracks = process_response(r) - _, missings, _ = diff_rows(final_doublets, ctx.dw.get_real_doublets()) - - - if ctx.output_path is None: - ctx.output_path = '.' - dout = ctx.get_output_path('plot-doublets.html') - tout = ctx.get_output_path('plot-triplets.html') - - if 'd' in mode: - iplot_results(ctx.dw, final_doublets, missings, dims=dims, filename=dout) - if 't' in mode: - iplot_results_tracks(ctx.dw, final_tracks, dims=dims, filename=tout) diff --git a/src/hepqpr/qallse/other/__init__.py b/src/hepqpr/qallse/other/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/hepqpr/qallse/other/filter_input_doublets.py b/src/hepqpr/qallse/other/filter_input_doublets.py deleted file mode 100644 index 8b6f3e9..0000000 --- a/src/hepqpr/qallse/other/filter_input_doublets.py +++ /dev/null @@ -1,35 +0,0 @@ -import click -from hepqpr.qallse import Volayer -import pandas as pd - - -def filter_doublets(hits, doublets, max_holes): - doublets = doublets.copy() - - # compute doublet spans - hits['volayer'] = hits[['volume_id', 'layer_id']].apply(lambda serie: Volayer.get_index(serie.tolist()), axis=1) - doublets['span'] = hits.volayer.get(doublets.end).values - hits.volayer.get(doublets.start).values - - # filter - return doublets[doublets.span <= max_holes + 1] - - -@click.command() -@click.option('-h', '--max-holes', type=int, default=1, - help='Maximum number of holes (i.e. missing layers) allowed in doublets.') -@click.option('-i', 'hits_path', required=True, help='path to the hits files') -def cli(max_holes, hits_path): - # load data - doublets_path = hits_path.replace('-hits.csv', '-doublets.csv') - hits = pd.read_csv(hits_path, index_col=0) - doublets = pd.read_csv(doublets_path) - # filter - filtered_doublets = filter_doublets(hits, doublets, max_holes) - n_discarded = len(doublets) - len(filtered_doublets) - print(f'Discarded {n_discarded} doublets out of {len(doublets)} ({(n_discarded/len(doublets))*100:.3f}%).') - # save - doublets.to_csv(doublets_path + '.orig', index=False) - filtered_doublets.to_csv(doublets_path, columns=['start', 'end'], index=False) - -if __name__ == '__main__': - cli() diff --git a/src/setup.py b/src/setup.py deleted file mode 100644 index 12f954f..0000000 --- a/src/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -import setuptools -import io -from os import path - -here = path.abspath(path.dirname(__file__)) - -# Get the long description from the README file -with io.open(path.join(here, '..', 'README.md'), mode='rt', encoding='utf-8') as f: - long_description = f.read() - -setuptools.setup( - name='hepqpr-qallse', - version='0.2.0', - author='Lucy Linder', - author_email='lucy.derlin@gmail.com', - description='High Energy Physics, Quantum Pattern Recognition using QUBO/D-Wave', - license='Apache License 2.0', - long_description=long_description, - long_description_content_type='text/markdown', - url='https://github.com/derlin/hepqpr-qallse', - - packages=setuptools.find_packages(), - package_data={'': ['*.csv', '**/*.csv']}, # include all *.csv under src - # include_package_data=True - entry_points={ - 'console_scripts': [ - 'qallse = hepqpr.qallse.cli.__main__:main', - 'create_dataset = hepqpr.qallse.dsmaker.dsmaker:cli', - 'run_seeding = hepqpr.qallse.seeding.__main__:main', - 'parse_qbsolv = hepqpr.qallse.other.parse_qbsolv:cli', - 'filter_doublets = hepqpr.qallse.other.filter_input_doublets:cli' - ] - }, - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Science/Research', - 'Topic :: Scientific/Engineering :: Information Analysis', - 'Topic :: Scientific/Engineering :: Physics', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Operating System :: OS Independent' - ], - install_requires=[ - 'numpy==2.1.2', - 'pandas==2.2.3', - 'trackml @ git+https://github.com/LAL/trackml-library.git#egg=trackml-v2', - 'dwave-neal==0.6.0', - 'click==8.1.7', - 'jsonschema==4.23.0', - 'plotly==5.24.1', - ], - python_requires='==3.13', -) From 4c4b7b0e924ac1c69aec4f4afc8626194bac834b Mon Sep 17 00:00:00 2001 From: lc3267 Date: Mon, 14 Oct 2024 13:50:34 +0000 Subject: [PATCH 2/3] poetry package Signed-off-by: lc3267 --- poetry.lock | 242 +++++++++++++++++++++++++++++++++++++++++++++++++ pyproject.toml | 17 ++++ 2 files changed, 259 insertions(+) create mode 100644 poetry.lock create mode 100644 pyproject.toml diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..379c39a --- /dev/null +++ b/poetry.lock @@ -0,0 +1,242 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "numpy" +version = "2.1.2" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.10" +files = [ + {file = "numpy-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:30d53720b726ec36a7f88dc873f0eec8447fbc93d93a8f079dfac2629598d6ee"}, + {file = "numpy-2.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d3ca0a72dd8846eb6f7dfe8f19088060fcb76931ed592d29128e0219652884"}, + {file = "numpy-2.1.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:fc44e3c68ff00fd991b59092a54350e6e4911152682b4782f68070985aa9e648"}, + {file = "numpy-2.1.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:7c1c60328bd964b53f8b835df69ae8198659e2b9302ff9ebb7de4e5a5994db3d"}, + {file = "numpy-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6cdb606a7478f9ad91c6283e238544451e3a95f30fb5467fbf715964341a8a86"}, + {file = "numpy-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d666cb72687559689e9906197e3bec7b736764df6a2e58ee265e360663e9baf7"}, + {file = "numpy-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6eef7a2dbd0abfb0d9eaf78b73017dbfd0b54051102ff4e6a7b2980d5ac1a03"}, + {file = "numpy-2.1.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:12edb90831ff481f7ef5f6bc6431a9d74dc0e5ff401559a71e5e4611d4f2d466"}, + {file = "numpy-2.1.2-cp310-cp310-win32.whl", hash = "sha256:a65acfdb9c6ebb8368490dbafe83c03c7e277b37e6857f0caeadbbc56e12f4fb"}, + {file = "numpy-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:860ec6e63e2c5c2ee5e9121808145c7bf86c96cca9ad396c0bd3e0f2798ccbe2"}, + {file = "numpy-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b42a1a511c81cc78cbc4539675713bbcf9d9c3913386243ceff0e9429ca892fe"}, + {file = "numpy-2.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:faa88bc527d0f097abdc2c663cddf37c05a1c2f113716601555249805cf573f1"}, + {file = "numpy-2.1.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:c82af4b2ddd2ee72d1fc0c6695048d457e00b3582ccde72d8a1c991b808bb20f"}, + {file = "numpy-2.1.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:13602b3174432a35b16c4cfb5de9a12d229727c3dd47a6ce35111f2ebdf66ff4"}, + {file = "numpy-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ebec5fd716c5a5b3d8dfcc439be82a8407b7b24b230d0ad28a81b61c2f4659a"}, + {file = "numpy-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2b49c3c0804e8ecb05d59af8386ec2f74877f7ca8fd9c1e00be2672e4d399b1"}, + {file = "numpy-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cbba4b30bf31ddbe97f1c7205ef976909a93a66bb1583e983adbd155ba72ac2"}, + {file = "numpy-2.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8e00ea6fc82e8a804433d3e9cedaa1051a1422cb6e443011590c14d2dea59146"}, + {file = "numpy-2.1.2-cp311-cp311-win32.whl", hash = "sha256:5006b13a06e0b38d561fab5ccc37581f23c9511879be7693bd33c7cd15ca227c"}, + {file = "numpy-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:f1eb068ead09f4994dec71c24b2844f1e4e4e013b9629f812f292f04bd1510d9"}, + {file = "numpy-2.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7bf0a4f9f15b32b5ba53147369e94296f5fffb783db5aacc1be15b4bf72f43b"}, + {file = "numpy-2.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b1d0fcae4f0949f215d4632be684a539859b295e2d0cb14f78ec231915d644db"}, + {file = "numpy-2.1.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:f751ed0a2f250541e19dfca9f1eafa31a392c71c832b6bb9e113b10d050cb0f1"}, + {file = "numpy-2.1.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:bd33f82e95ba7ad632bc57837ee99dba3d7e006536200c4e9124089e1bf42426"}, + {file = "numpy-2.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b8cde4f11f0a975d1fd59373b32e2f5a562ade7cde4f85b7137f3de8fbb29a0"}, + {file = "numpy-2.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d95f286b8244b3649b477ac066c6906fbb2905f8ac19b170e2175d3d799f4df"}, + {file = "numpy-2.1.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ab4754d432e3ac42d33a269c8567413bdb541689b02d93788af4131018cbf366"}, + {file = "numpy-2.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e585c8ae871fd38ac50598f4763d73ec5497b0de9a0ab4ef5b69f01c6a046142"}, + {file = "numpy-2.1.2-cp312-cp312-win32.whl", hash = "sha256:9c6c754df29ce6a89ed23afb25550d1c2d5fdb9901d9c67a16e0b16eaf7e2550"}, + {file = "numpy-2.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:456e3b11cb79ac9946c822a56346ec80275eaf2950314b249b512896c0d2505e"}, + {file = "numpy-2.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a84498e0d0a1174f2b3ed769b67b656aa5460c92c9554039e11f20a05650f00d"}, + {file = "numpy-2.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4d6ec0d4222e8ffdab1744da2560f07856421b367928026fb540e1945f2eeeaf"}, + {file = "numpy-2.1.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:259ec80d54999cc34cd1eb8ded513cb053c3bf4829152a2e00de2371bd406f5e"}, + {file = "numpy-2.1.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:675c741d4739af2dc20cd6c6a5c4b7355c728167845e3c6b0e824e4e5d36a6c3"}, + {file = "numpy-2.1.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b2d4e667895cc55e3ff2b56077e4c8a5604361fc21a042845ea3ad67465aa8"}, + {file = "numpy-2.1.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43cca367bf94a14aca50b89e9bc2061683116cfe864e56740e083392f533ce7a"}, + {file = "numpy-2.1.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:76322dcdb16fccf2ac56f99048af32259dcc488d9b7e25b51e5eca5147a3fb98"}, + {file = "numpy-2.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:32e16a03138cabe0cb28e1007ee82264296ac0983714094380b408097a418cfe"}, + {file = "numpy-2.1.2-cp313-cp313-win32.whl", hash = "sha256:242b39d00e4944431a3cd2db2f5377e15b5785920421993770cddb89992c3f3a"}, + {file = "numpy-2.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:f2ded8d9b6f68cc26f8425eda5d3877b47343e68ca23d0d0846f4d312ecaa445"}, + {file = "numpy-2.1.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ffef621c14ebb0188a8633348504a35c13680d6da93ab5cb86f4e54b7e922b5"}, + {file = "numpy-2.1.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ad369ed238b1959dfbade9018a740fb9392c5ac4f9b5173f420bd4f37ba1f7a0"}, + {file = "numpy-2.1.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d82075752f40c0ddf57e6e02673a17f6cb0f8eb3f587f63ca1eaab5594da5b17"}, + {file = "numpy-2.1.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:1600068c262af1ca9580a527d43dc9d959b0b1d8e56f8a05d830eea39b7c8af6"}, + {file = "numpy-2.1.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a26ae94658d3ba3781d5e103ac07a876b3e9b29db53f68ed7df432fd033358a8"}, + {file = "numpy-2.1.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13311c2db4c5f7609b462bc0f43d3c465424d25c626d95040f073e30f7570e35"}, + {file = "numpy-2.1.2-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:2abbf905a0b568706391ec6fa15161fad0fb5d8b68d73c461b3c1bab6064dd62"}, + {file = "numpy-2.1.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:ef444c57d664d35cac4e18c298c47d7b504c66b17c2ea91312e979fcfbdfb08a"}, + {file = "numpy-2.1.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:bdd407c40483463898b84490770199d5714dcc9dd9b792f6c6caccc523c00952"}, + {file = "numpy-2.1.2-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:da65fb46d4cbb75cb417cddf6ba5e7582eb7bb0b47db4b99c9fe5787ce5d91f5"}, + {file = "numpy-2.1.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c193d0b0238638e6fc5f10f1b074a6993cb13b0b431f64079a509d63d3aa8b7"}, + {file = "numpy-2.1.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a7d80b2e904faa63068ead63107189164ca443b42dd1930299e0d1cb041cec2e"}, + {file = "numpy-2.1.2.tar.gz", hash = "sha256:13532a088217fa624c99b843eeb54640de23b3414b14aa66d023805eb731066c"}, +] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "pandas" +version = "2.2.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, + {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, + {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, + {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, + {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, + {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, + {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.22.4", markers = "python_version < \"3.11\""}, + {version = ">=1.23.2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + +[[package]] +name = "plotly" +version = "5.24.1" +description = "An open-source, interactive data visualization library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "plotly-5.24.1-py3-none-any.whl", hash = "sha256:f67073a1e637eb0dc3e46324d9d51e2fe76e9727c892dde64ddf1e1b51f29089"}, + {file = "plotly-5.24.1.tar.gz", hash = "sha256:dbc8ac8339d248a4bcc36e08a5659bacfe1b079390b8953533f4eb22169b4bae"}, +] + +[package.dependencies] +packaging = "*" +tenacity = ">=6.2.0" + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pytz" +version = "2024.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "tenacity" +version = "9.0.0" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"}, + {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + +[[package]] +name = "tzdata" +version = "2024.2" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, +] + +[metadata] +lock-version = "2.0" +python-versions = "^3.10" +content-hash = "91d28ab732ec8a07782756ffdde5c56ab2cf4133458515267e759cf2c9e943a9" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..b3722ee --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,17 @@ +[tool.poetry] +name = "qallse" +version = "0.1.0" +description = "" +authors = ["Maja Franz ", "Melvin Strobl "] +readme = "README.md" + +[tool.poetry.dependencies] +python = "^3.11" +numpy = "^2.1.2" +plotly = "^5.24.1" +pandas = "^2.2.3" + + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" From 1de7e38c5f8ae7f52daaa8241f9a2735f320b090 Mon Sep 17 00:00:00 2001 From: lc3267 Date: Mon, 14 Oct 2024 14:27:46 +0000 Subject: [PATCH 3/3] added dwave annealing package Signed-off-by: lc3267 --- poetry.lock | 121 +++++++++++++++++++++++++++++++++++++++++++++++-- pyproject.toml | 1 + 2 files changed, 119 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 379c39a..e57b696 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,5 +1,121 @@ # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +[[package]] +name = "dimod" +version = "0.12.17" +description = "A shared API for binary quadratic model samplers." +optional = false +python-versions = ">=3.8" +files = [ + {file = "dimod-0.12.17-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:20a59eb33450d57208a1d915be3c773db9379d9bb6f977857165e3be7f6a1b74"}, + {file = "dimod-0.12.17-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ec08ce3dc36ad5be9131001843d5fc44a92616520b023767602cfe605c8cee0d"}, + {file = "dimod-0.12.17-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f4a5e15cfac158901ecef45cd5e59d62d448112444b51b34bbd6881aca43c2d"}, + {file = "dimod-0.12.17-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38df2759283c4436b049aa675848a8b42b89d09edc9da557f957f3ff2d7c4fd2"}, + {file = "dimod-0.12.17-cp310-cp310-win_amd64.whl", hash = "sha256:868b1ceeabd312ed3a745c201d6fd1968af6eab70ce9b3a5192053b924f6349e"}, + {file = "dimod-0.12.17-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed67b18140bdce08fbbbb161ffb1908631c2313964ba3a79e5d6b5a89cf040dd"}, + {file = "dimod-0.12.17-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1ebd0777cb872d5c8d3261a0a2a8957ad41c50db5244900382e8faf68a5a03d8"}, + {file = "dimod-0.12.17-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:052d65a504156cde868b0da139cbc549e1dea9630d02572f7f677d8525145801"}, + {file = "dimod-0.12.17-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6f2f534fcb36903326f6f7fbb6e60fea3a1461b686cf938a4ed3096d595ffa1"}, + {file = "dimod-0.12.17-cp311-cp311-win_amd64.whl", hash = "sha256:186b1906b7a00a48c294e61037cfa190495a0fb1c0b307f415d468ec74ee8e37"}, + {file = "dimod-0.12.17-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fee71b16388a1ee0eadd314e6b24abe773d33bd8fd03ca50fc8503fcf1546b8a"}, + {file = "dimod-0.12.17-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c61d45e434743b680d5455f4e5ec76379bc6e993caec5a72da00280165c74220"}, + {file = "dimod-0.12.17-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e79a7b604a68036f5126294c57717f65af9402ee371eb2eeac4632f3bedc4e9"}, + {file = "dimod-0.12.17-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ad20808472b6e01af4ab4534b2aa144ab921e56da7fa30c12c1ffb966db41f7"}, + {file = "dimod-0.12.17-cp312-cp312-win_amd64.whl", hash = "sha256:5d8ad0845132f2ad08db448e18cffcdde2b8de131f7bf5d1659568145fdea749"}, + {file = "dimod-0.12.17-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6d3a004ea15c511628429bf9204f1ce280aa7d64b7b1940071a74540602def3a"}, + {file = "dimod-0.12.17-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:84de576dcfa23b6062f7436e4988617ac6b755c4383229922afce1ba67f02b0b"}, + {file = "dimod-0.12.17-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37285569902b83814e477a25aa6ee0db0ea72a954889f617c49ec908d31e84e6"}, + {file = "dimod-0.12.17-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3307b52112464c976bd0e5c83ed789c308129a1bc19089903e664da82bc78bbf"}, + {file = "dimod-0.12.17-cp313-cp313-win_amd64.whl", hash = "sha256:a487cdc9470d8f2dcc5ad20a10ea6fb3b720ecfe8ca0509fc3980d76429498a8"}, + {file = "dimod-0.12.17-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cd0265ac780ca3ab5857579834ae4b037fe4e615b010314b2e1a38fd1e8c3c36"}, + {file = "dimod-0.12.17-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4dac0a814ccdbc4edd444b24935244b1ef9bd49f3a81bc142e082acff6a95c7c"}, + {file = "dimod-0.12.17-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e2a4aff63a30fa2ea289a5208623c9b0edb2cca105b95da97edce70b30dcbbb"}, + {file = "dimod-0.12.17-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fef3dfb2ccc227681da3c43f13ef140aa51a55ec97a20c15a3f9142f4a826035"}, + {file = "dimod-0.12.17-cp38-cp38-win_amd64.whl", hash = "sha256:f47d095187ad54aa78409b19a0b711ce746125a72bdc0917f901a58d39fe0112"}, + {file = "dimod-0.12.17-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ca4f59edac66cce42c6014a41e698074bae9ed313dfd4e4ae9641de976349c9f"}, + {file = "dimod-0.12.17-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:14f6b12d8adec123f049439e7070286b26f42efbe0a2a59cd3c55757dca7f0aa"}, + {file = "dimod-0.12.17-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90a7d8703dd9419c02ebf487957efaa43a2062b88a2f63af0183b492d0dbacdd"}, + {file = "dimod-0.12.17-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:feba2e35185c1f2700411948f5197887fa5ba85990a15cb7068cf3de31ece995"}, + {file = "dimod-0.12.17-cp39-cp39-win_amd64.whl", hash = "sha256:effe038b1755846a594ceb1d359853642688ec9e164708b4f059e0d2a3ba12b5"}, + {file = "dimod-0.12.17.tar.gz", hash = "sha256:88f192164e86c07d01a0e4a6aee536da80434aa373374e8b4b3d393fdb27b224"}, +] + +[package.dependencies] +numpy = ">=1.17.3" + +[[package]] +name = "dwave-neal" +version = "0.6.0" +description = "General Ising graph simulated annealing solver" +optional = false +python-versions = ">=3.7" +files = [ + {file = "dwave-neal-0.6.0.tar.gz", hash = "sha256:8ce51fee3339195df1ab69920fdb5afc496b5fd945e487fad3547c983d90c564"}, + {file = "dwave_neal-0.6.0-py3-none-any.whl", hash = "sha256:8b7d89f0c52de6ac80e0f580ec272f6409b1cf9edb12250d22429425a13bd935"}, +] + +[package.dependencies] +dwave-samplers = ">=1.0.0,<2.0.0" + +[[package]] +name = "dwave-samplers" +version = "1.4.0" +description = "Ocean-compatible collection of solvers/samplers" +optional = false +python-versions = ">=3.9" +files = [ + {file = "dwave_samplers-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:268675c867efef88e97ca591e1bfcf52dc1ca6508cd210fa6f0fde29da0cec1e"}, + {file = "dwave_samplers-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5a995f7ec2287525e4b7b5d4c37ce7ee3d97b31965a271b7637c5a9da037612e"}, + {file = "dwave_samplers-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a35841abbfe2ceaa56036a71061df966568b3f03b4408e6990563491ba4da52"}, + {file = "dwave_samplers-1.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:647af252e69d3dfe2df3793f1fd21e397fe16a7bf04d999428e34a89ba0143e9"}, + {file = "dwave_samplers-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:4e8fe5e70319ed230bdd94b6d05984379adbc5cefb25610e99baf663d33b5169"}, + {file = "dwave_samplers-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:52633c950d4c73d399cf73db58382f413b3affe8f674731a9070a4da3bac75fa"}, + {file = "dwave_samplers-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8fd4e28d2d4ada343b9d9288ceef845463aade759fb614e91facb98c557323ee"}, + {file = "dwave_samplers-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fe5dee5bd3c9ddb6295335c0eb1b7b0056cdd0d64afcc6685506958f24d577a"}, + {file = "dwave_samplers-1.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9459c7f0629f51a965c814a9f42946ffcdb44477b8698503fc6fd0493c4ddfb8"}, + {file = "dwave_samplers-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:1ad0d07f5d48c31de239ed00688d91d2ced9627c9132f7eb108dd4c79ea4f3c2"}, + {file = "dwave_samplers-1.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:782b992e2e760730406a49afeac738e1fa8915cd65a25a811b1431124219f655"}, + {file = "dwave_samplers-1.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03d845f45435ec906cbcf4cc2c643ff060e0483c03b82f17679306c1721a286c"}, + {file = "dwave_samplers-1.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3246bc4383201ee05c0e554a2737e321354569130dd6a3d90cd091ddfd87411"}, + {file = "dwave_samplers-1.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db90f6e71e24b35da1e91d32d565bab7c5f5b138d7cb68208ef46b2137983263"}, + {file = "dwave_samplers-1.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:cc6b1a6831458385e3977afa0ae6f57fe6f52a76613dd138c398688f2f6e900b"}, + {file = "dwave_samplers-1.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:041434f9892c84d505a107f0ed78a5b0b43f6ec5be2bf0c8a9667838a39df1e8"}, + {file = "dwave_samplers-1.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9468a0095e5fcbc6fed0f526f06ccebe045c3570e80f7f414555cc489d328caf"}, + {file = "dwave_samplers-1.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be461795b7f85447d6a3c15b348d53836a2de081c94385c5077eb783c1e5cd15"}, + {file = "dwave_samplers-1.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88471baba97cc7f236c4b03c2210829fe41e1702acfc690a267bf5781c84c3b6"}, + {file = "dwave_samplers-1.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:88c97c606c4f5d45dc8183700939f2b6aa6ef95b0c2d93ad462c09487b03e3bd"}, + {file = "dwave_samplers-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:46a779b7b8b92412f0acbce2d1ab49db54069e5d0a26327fe4c54f84a1730f20"}, + {file = "dwave_samplers-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f57cd1611f683dd98f552782b02685cc168dc9f66b71f9f0efafa653b125ca34"}, + {file = "dwave_samplers-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee6acda48d0cfb5ae514627007d7e0cdf9c883e3e1a8ac53320e040fee547b1a"}, + {file = "dwave_samplers-1.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9028d1aca1d887afaecceca560cbfd7bab3e14f65ea2758920bd62656ef8b5c"}, + {file = "dwave_samplers-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:80db29be04bae8860a04555298a2b7e808160a5bf1ebad4dfabd8243b2ffc743"}, + {file = "dwave_samplers-1.4.0.tar.gz", hash = "sha256:1e59384eb9dd8ee60c4167923c43ac0d36b2a8c950b572c7685cad590bd47156"}, +] + +[package.dependencies] +dimod = ">=0.12.13,<0.13.0" +networkx = ">=3.0" +numpy = ">=1.19.0,<3.0.0" + +[[package]] +name = "networkx" +version = "3.4.1" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.10" +files = [ + {file = "networkx-3.4.1-py3-none-any.whl", hash = "sha256:e30a87b48c9a6a7cc220e732bffefaee585bdb166d13377734446ce1a0620eed"}, + {file = "networkx-3.4.1.tar.gz", hash = "sha256:f9df45e85b78f5bd010993e897b4f1fdb242c11e015b101bd951e5c0e29982d8"}, +] + +[package.extras] +default = ["matplotlib (>=3.7)", "numpy (>=1.24)", "pandas (>=2.0)", "scipy (>=1.10,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.5)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["intersphinx-registry", "myst-nb (>=1.1)", "numpydoc (>=1.8.0)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.15)", "sphinx (>=7.3)", "sphinx-gallery (>=0.16)", "texext (>=0.6.7)"] +example = ["cairocffi (>=1.7)", "contextily (>=1.6)", "igraph (>=0.11)", "momepy (>=0.7.2)", "osmnx (>=1.9)", "scikit-learn (>=1.5)", "seaborn (>=0.13)"] +extra = ["lxml (>=4.6)", "pydot (>=3.0.1)", "pygraphviz (>=1.14)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] + [[package]] name = "numpy" version = "2.1.2" @@ -126,7 +242,6 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.22.4", markers = "python_version < \"3.11\""}, {version = ">=1.23.2", markers = "python_version == \"3.11\""}, {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, ] @@ -238,5 +353,5 @@ files = [ [metadata] lock-version = "2.0" -python-versions = "^3.10" -content-hash = "91d28ab732ec8a07782756ffdde5c56ab2cf4133458515267e759cf2c9e943a9" +python-versions = "^3.11" +content-hash = "93eea99693bc2f9172dff84f64d384ef5f6e327485e1a6411d399a796caaa48c" diff --git a/pyproject.toml b/pyproject.toml index b3722ee..c0ac2aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,6 +10,7 @@ python = "^3.11" numpy = "^2.1.2" plotly = "^5.24.1" pandas = "^2.2.3" +dwave-neal = "^0.6.0" [build-system]