From 969234568b8811e502296b7b1517160365cb8f9d Mon Sep 17 00:00:00 2001 From: Jakob van Santen Date: Wed, 25 Sep 2024 13:32:41 +0200 Subject: [PATCH 1/6] sort imports --- conda-recipes/nusigma/nusigma/__init__.py | 6 ++-- conda-recipes/nusigma/setup.py | 3 +- notebooks/data preparation/PSF fitting.ipynb | 21 +++++++------ .../Fictive optical detector.ipynb | 9 +++--- notebooks/paper plots/radio_demo_plots.ipynb | 5 +-- .../001-Framework_Introduction.ipynb | 6 ++-- ...002-Transient_Population_Sensitivity.ipynb | 8 +++-- notebooks/tutorials/003-Effective_Areas.ipynb | 4 ++- .../004-Radio_Event_Distributions.ipynb | 8 ++--- resources/docs/plots/angular_resolution.py | 2 +- .../docs/plots/cascade_production_density.py | 5 +-- resources/docs/plots/cascade_volume.py | 3 +- resources/docs/plots/geometries.py | 9 +++--- .../docs/plots/muon_production_efficiency.py | 5 +-- resources/docs/plots/ps_sensitivity_demo.py | 13 ++++---- resources/docs/plots/selection_efficiency.py | 2 +- .../scripts/2021_midscale/calc_radio_sens.py | 30 ++++++++++-------- resources/scripts/2021_midscale/calc_sens.py | 23 ++++++++------ .../scripts/2021_midscale/count_events.ipynb | 3 +- .../scripts/2021_midscale/extract_PSF.py | 22 ++++++------- .../extract_energy_resolution.py | 9 +++--- resources/scripts/2021_midscale/plot_PSF.py | 7 +++-- .../scripts/2021_midscale/plot_radio_sens.py | 8 +++-- resources/scripts/2021_midscale/plot_sens.py | 8 +++-- resources/scripts/data prep/fit_psf.py | 10 +++--- resources/scripts/data prep/gcd_to_txt.py | 4 +-- .../data prep/muon_energy_resolution.py | 5 +-- .../data prep/muon_selection_efficiency.py | 7 ++--- setup.py | 5 +-- tests/test_nufate.py | 3 +- tests/test_nuflux.py | 1 + tests/test_sensitivity.py | 5 +-- toise/angular_resolution.py | 10 +++--- toise/cache.py | 19 ++++++------ toise/classification_efficiency.py | 6 ++-- toise/diffuse.py | 23 +++++++------- toise/effective_areas.py | 31 ++++++++++--------- toise/energy_resolution.py | 5 +-- .../externals/AtmosphericSelfVeto/__init__.py | 2 +- .../externals/AtmosphericSelfVeto/selfveto.py | 2 +- toise/externals/nuFATE/__init__.py | 4 ++- toise/externals/nuFATE/crosssections.py | 13 +++++--- toise/externals/ternary.py | 24 +++++++------- toise/factory.py | 22 +++++++------ toise/fictive.py | 5 +-- toise/figures/cli.py | 22 +++++++------ toise/figures/detector/__init__.py | 21 ++++++++----- toise/figures/detector/cascades.py | 19 +++++++----- toise/figures/detector/tracks.py | 5 +-- .../diffuse/E2_Gen2_Long_whitepaper.py | 8 ++--- .../diffuse/UHE_nevents_Gen2_flux_models.py | 18 +++++------ toise/figures/diffuse/flavor.py | 24 +++++++------- toise/figures/diffuse/galactic.py | 19 ++++++------ toise/figures/diffuse/single_power_law.py | 30 +++++++++--------- toise/figures/diffuse/spectrum.py | 22 ++++++------- toise/figures/pointsource/flare.py | 5 +-- toise/figures_of_merit.py | 12 +++++-- toise/grb.py | 8 +++-- toise/multillh.py | 3 +- toise/plotting.py | 4 +-- toise/pointsource.py | 14 +++++---- toise/radio_aeff_generation.py | 22 +++++++------ toise/radio_muon_background.py | 6 ++-- toise/radio_response.py | 7 ++--- toise/salyut.py | 3 +- toise/surface_veto.py | 16 +++++----- toise/surfaces.py | 5 +-- toise/transient.py | 6 ++-- toise/util.py | 7 +++-- 69 files changed, 407 insertions(+), 324 deletions(-) diff --git a/conda-recipes/nusigma/nusigma/__init__.py b/conda-recipes/nusigma/nusigma/__init__.py index c9a5a62..ac67f7f 100644 --- a/conda-recipes/nusigma/nusigma/__init__.py +++ b/conda-recipes/nusigma/nusigma/__init__.py @@ -1,7 +1,7 @@ -from ._nusigma import * - # set path to table directory -from os.path import dirname, realpath, expandvars +from os.path import dirname, expandvars, realpath + +from ._nusigma import * basedir = dirname(realpath(expandvars(__file__))) + "/" nusetup(basedir) diff --git a/conda-recipes/nusigma/setup.py b/conda-recipes/nusigma/setup.py index 5621a90..9eec7aa 100644 --- a/conda-recipes/nusigma/setup.py +++ b/conda-recipes/nusigma/setup.py @@ -1,5 +1,4 @@ -from numpy.distutils.core import setup -from numpy.distutils.core import Extension +from numpy.distutils.core import Extension, setup setup( name="nusigma", diff --git a/notebooks/data preparation/PSF fitting.ipynb b/notebooks/data preparation/PSF fitting.ipynb index 98e6e43..7dd6f09 100644 --- a/notebooks/data preparation/PSF fitting.ipynb +++ b/notebooks/data preparation/PSF fitting.ipynb @@ -16,20 +16,21 @@ "outputs": [], "source": [ "%matplotlib inline\n", + "import os\n", + "import warnings\n", + "\n", + "import autograd\n", + "import autograd.numpy as n\n", "import matplotlib.pyplot as plt\n", - "import tables\n", - "import pandas as pd\n", "import numpy as np\n", - "from toolz import memoize\n", - "from toise import plotting, surfaces, util, effective_areas, angular_resolution\n", - "\n", + "import pandas as pd\n", "import photospline\n", - "from scipy import optimize\n", + "import tables\n", "from autograd.misc.flatten import flatten_func\n", - "import autograd\n", - "import autograd.numpy as n\n", - "import os\n", - "import warnings\n", + "from scipy import optimize\n", + "from toolz import memoize\n", + "\n", + "from toise import angular_resolution, effective_areas, plotting, surfaces, util\n", "\n", "warnings.filterwarnings(\"ignore\") # turn off warnings" ] diff --git a/notebooks/paper plots/Fictive optical detector.ipynb b/notebooks/paper plots/Fictive optical detector.ipynb index f12e23a..aa2c95f 100644 --- a/notebooks/paper plots/Fictive optical detector.ipynb +++ b/notebooks/paper plots/Fictive optical detector.ipynb @@ -6,12 +6,13 @@ "metadata": {}, "outputs": [], "source": [ - "from toise import effective_areas, angular_resolution\n", - "import matplotlib.pyplot as plt\n", "import matplotlib.gridspec as gridspec\n", - "from matplotlib.ticker import NullFormatter\n", + "import matplotlib.pyplot as plt\n", "import numpy as np\n", - "from matplotlib import style" + "from matplotlib import style\n", + "from matplotlib.ticker import NullFormatter\n", + "\n", + "from toise import angular_resolution, effective_areas" ] }, { diff --git a/notebooks/paper plots/radio_demo_plots.ipynb b/notebooks/paper plots/radio_demo_plots.ipynb index 4cf642c..af93aa1 100644 --- a/notebooks/paper plots/radio_demo_plots.ipynb +++ b/notebooks/paper plots/radio_demo_plots.ipynb @@ -15,9 +15,10 @@ "metadata": {}, "outputs": [], "source": [ - "from toise import radio_response\n", - "import numpy as np\n", "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "from toise import radio_response\n", "\n", "plt.rcParams[\"figure.dpi\"] = 300\n", "plt.rcParams[\"figure.figsize\"] = (6, 5)" diff --git a/notebooks/tutorials/001-Framework_Introduction.ipynb b/notebooks/tutorials/001-Framework_Introduction.ipynb index 5ed46fe..6f307c0 100644 --- a/notebooks/tutorials/001-Framework_Introduction.ipynb +++ b/notebooks/tutorials/001-Framework_Introduction.ipynb @@ -7,9 +7,10 @@ "outputs": [], "source": [ "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", "import warnings\n", "\n", + "import matplotlib.pyplot as plt\n", + "\n", "warnings.filterwarnings(\"ignore\") # turn off warnings" ] }, @@ -44,6 +45,7 @@ ], "source": [ "import numpy\n", + "\n", "from toise import factory\n", "\n", "factory.set_kwargs(psi_bins={k: [0, numpy.pi] for k in (\"tracks\", \"cascades\", \"radio\")})" @@ -325,8 +327,8 @@ } ], "source": [ - "from scipy.optimize import bisect\n", "from scipy import stats\n", + "from scipy.optimize import bisect\n", "\n", "# test statistic between astro = f and astro = 0\n", "ts = lambda f: -2 * (llh.llh(**llh.fit(astro=f)) - llh.llh(**llh.fit(astro=0)))\n", diff --git a/notebooks/tutorials/002-Transient_Population_Sensitivity.ipynb b/notebooks/tutorials/002-Transient_Population_Sensitivity.ipynb index 8e55925..e4bae9c 100644 --- a/notebooks/tutorials/002-Transient_Population_Sensitivity.ipynb +++ b/notebooks/tutorials/002-Transient_Population_Sensitivity.ipynb @@ -35,10 +35,12 @@ "from matplotlib import style\n", "\n", "style.use(\"../../figures/toise.mplstyle\")\n", - "import numpy as np\n", - "from functools import partial\n", - "from toise import factory, diffuse, surface_veto, pointsource, grb, multillh, plotting\n", "import warnings\n", + "from functools import partial\n", + "\n", + "import numpy as np\n", + "\n", + "from toise import diffuse, factory, grb, multillh, plotting, pointsource, surface_veto\n", "\n", "warnings.filterwarnings(\"ignore\") # turn off warnings" ] diff --git a/notebooks/tutorials/003-Effective_Areas.ipynb b/notebooks/tutorials/003-Effective_Areas.ipynb index a67d965..b7116c0 100644 --- a/notebooks/tutorials/003-Effective_Areas.ipynb +++ b/notebooks/tutorials/003-Effective_Areas.ipynb @@ -22,10 +22,12 @@ ], "source": [ "%matplotlib inline\n", + "import warnings\n", + "\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", + "\n", "from toise import factory, plotting\n", - "import warnings\n", "\n", "warnings.filterwarnings(\"ignore\") # turn off warnings" ] diff --git a/notebooks/tutorials/004-Radio_Event_Distributions.ipynb b/notebooks/tutorials/004-Radio_Event_Distributions.ipynb index 9749344..dc63435 100644 --- a/notebooks/tutorials/004-Radio_Event_Distributions.ipynb +++ b/notebooks/tutorials/004-Radio_Event_Distributions.ipynb @@ -36,9 +36,9 @@ "outputs": [], "source": [ "import numpy as np\n", + "\n", "import toise\n", - "from toise import radio_aeff_generation\n", - "from toise import factory\n", + "from toise import factory, radio_aeff_generation\n", "from toise.util import constants" ] }, @@ -574,9 +574,9 @@ } ], "source": [ - "from toise import diffuse\n", "from matplotlib.colors import LogNorm # logarithmic coloring\n", "\n", + "from toise import diffuse\n", "\n", "astro = diffuse.DiffuseAstro(radio_aeff, nyears).expectations()\n", "\n", @@ -750,7 +750,7 @@ } ], "source": [ - "from toise.diffuse import AhlersGZKFlux, VanVlietGZKFlux, ReasonableGZKFlux\n", + "from toise.diffuse import AhlersGZKFlux, ReasonableGZKFlux, VanVlietGZKFlux\n", "\n", "logEmin = 7\n", "logEmax = 10\n", diff --git a/resources/docs/plots/angular_resolution.py b/resources/docs/plots/angular_resolution.py index aa1c8c9..8fb6e54 100644 --- a/resources/docs/plots/angular_resolution.py +++ b/resources/docs/plots/angular_resolution.py @@ -1,8 +1,8 @@ import matplotlib.pyplot as plt import numpy as np +from icecube.toise import angular_resolution, plotting from matplotlib.gridspec import GridSpec from mpl_toolkits.axes_grid.anchored_artists import AnchoredText -from icecube.toise import plotting, angular_resolution configs = [ ("IceCube", None), diff --git a/resources/docs/plots/cascade_production_density.py b/resources/docs/plots/cascade_production_density.py index 7b75232..70e57f1 100644 --- a/resources/docs/plots/cascade_production_density.py +++ b/resources/docs/plots/cascade_production_density.py @@ -1,9 +1,10 @@ +import itertools + import matplotlib.pyplot as plt import numpy as np -import itertools +from icecube.toise import effective_areas, plotting, util from matplotlib.gridspec import GridSpec from mpl_toolkits.axes_grid.anchored_artists import AnchoredText -from icecube.toise import plotting, effective_areas, util # get muon production efficiency edges, muprod = effective_areas.get_cascade_production_density() diff --git a/resources/docs/plots/cascade_volume.py b/resources/docs/plots/cascade_volume.py index 2fc68f5..4e33b73 100644 --- a/resources/docs/plots/cascade_volume.py +++ b/resources/docs/plots/cascade_volume.py @@ -2,7 +2,8 @@ import numpy as np from matplotlib.gridspec import GridSpec from mpl_toolkits.axes_grid.anchored_artists import AnchoredText -from toise import plotting, effective_areas + +from toise import effective_areas, plotting configs = [ ("IceCube", 125.0), diff --git a/resources/docs/plots/geometries.py b/resources/docs/plots/geometries.py index b271758..ce03a19 100644 --- a/resources/docs/plots/geometries.py +++ b/resources/docs/plots/geometries.py @@ -1,14 +1,15 @@ +import itertools + import matplotlib.pyplot as plt import numpy as np -import itertools +from icecube import dataclasses, dataio, icetray +from icecube.toise import effective_areas, plotting, surfaces from matplotlib.gridspec import GridSpec from mpl_toolkits.axes_grid.anchored_artists import AnchoredText -from icecube.toise import plotting, surfaces, effective_areas -from icecube import icetray, dataclasses, dataio def string_heads(gcdfile): - from icecube import icetray, dataio, dataclasses + from icecube import dataclasses, dataio, icetray f = dataio.I3File(gcdfile) omgeo = f.pop_frame(icetray.I3Frame.Geometry)["I3Geometry"].omgeo diff --git a/resources/docs/plots/muon_production_efficiency.py b/resources/docs/plots/muon_production_efficiency.py index 4a78a7a..a971c3f 100644 --- a/resources/docs/plots/muon_production_efficiency.py +++ b/resources/docs/plots/muon_production_efficiency.py @@ -1,9 +1,10 @@ +import itertools + import matplotlib.pyplot as plt import numpy as np -import itertools +from icecube.toise import effective_areas, plotting, util from matplotlib.gridspec import GridSpec from mpl_toolkits.axes_grid.anchored_artists import AnchoredText -from icecube.toise import plotting, effective_areas, util # get muon production efficiency edges, muprod = effective_areas.get_muon_production_efficiency() diff --git a/resources/docs/plots/ps_sensitivity_demo.py b/resources/docs/plots/ps_sensitivity_demo.py index 8d513f8..11116c0 100644 --- a/resources/docs/plots/ps_sensitivity_demo.py +++ b/resources/docs/plots/ps_sensitivity_demo.py @@ -1,15 +1,16 @@ import matplotlib.pyplot as plt import numpy as np -from matplotlib.gridspec import GridSpec -from mpl_toolkits.axes_grid.anchored_artists import AnchoredText from icecube.toise import ( - plotting, - effective_areas, + angular_resolution, diffuse, - pointsource, + effective_areas, + energy_resolution, multillh, + plotting, + pointsource, ) -from icecube.toise import angular_resolution, energy_resolution +from matplotlib.gridspec import GridSpec +from mpl_toolkits.axes_grid.anchored_artists import AnchoredText def create_aeff( diff --git a/resources/docs/plots/selection_efficiency.py b/resources/docs/plots/selection_efficiency.py index a6599d6..b33a0e9 100644 --- a/resources/docs/plots/selection_efficiency.py +++ b/resources/docs/plots/selection_efficiency.py @@ -1,8 +1,8 @@ import matplotlib.pyplot as plt import numpy as np +from icecube.toise import effective_areas, plotting from matplotlib.gridspec import GridSpec from mpl_toolkits.axes_grid.anchored_artists import AnchoredText -from icecube.toise import plotting, effective_areas configs = [ ("IceCube", 125.0), diff --git a/resources/scripts/2021_midscale/calc_radio_sens.py b/resources/scripts/2021_midscale/calc_radio_sens.py index 1866a13..43d9e4b 100644 --- a/resources/scripts/2021_midscale/calc_radio_sens.py +++ b/resources/scripts/2021_midscale/calc_radio_sens.py @@ -1,22 +1,26 @@ +import json +from collections import OrderedDict, defaultdict + +import numpy as np +from tqdm import tqdm + +# generate aeff for radio from toise import ( - effective_areas, - diffuse, - pointsource, angular_resolution, + diffuse, + effective_areas, + factory, + figures, + figures_of_merit, grb, - surface_veto, multillh, plotting, + pointsource, + radio_aeff_generation, + surface_veto, + util, ) -from toise import factory, figures_of_merit, util, figures -from toise.util import data_dir, center -from tqdm import tqdm -import numpy as np -from collections import OrderedDict, defaultdict -import json - -# generate aeff for radio -from toise import radio_aeff_generation +from toise.util import center, data_dir # this file is basically a copy/strip down of the figures.pointsource.flare.sensitivity workbook diff --git a/resources/scripts/2021_midscale/calc_sens.py b/resources/scripts/2021_midscale/calc_sens.py index cc7123a..a54dab0 100644 --- a/resources/scripts/2021_midscale/calc_sens.py +++ b/resources/scripts/2021_midscale/calc_sens.py @@ -1,18 +1,23 @@ +import json +from collections import OrderedDict, defaultdict + +from tqdm import tqdm + from toise import ( - effective_areas, - diffuse, - pointsource, angular_resolution, + diffuse, + effective_areas, + factory, + figures, + figures_of_merit, grb, - surface_veto, multillh, plotting, + pointsource, + surface_veto, + util, ) -from toise import factory, figures_of_merit, util, figures -from toise.util import data_dir, center -from tqdm import tqdm -from collections import OrderedDict, defaultdict -import json +from toise.util import center, data_dir # this file is basically a copy/strip down of the figures.pointsource.flare.sensitivity workbook diff --git a/resources/scripts/2021_midscale/count_events.ipynb b/resources/scripts/2021_midscale/count_events.ipynb index 16b56a6..f7c5c41 100644 --- a/resources/scripts/2021_midscale/count_events.ipynb +++ b/resources/scripts/2021_midscale/count_events.ipynb @@ -7,7 +7,8 @@ "outputs": [], "source": [ "import numpy\n", - "from toise import factory, diffuse, plotting, effective_areas\n", + "\n", + "from toise import diffuse, effective_areas, factory, plotting\n", "\n", "%matplotlib inline\n", "import matplotlib.pyplot as plt" diff --git a/resources/scripts/2021_midscale/extract_PSF.py b/resources/scripts/2021_midscale/extract_PSF.py index aee885c..949bea3 100644 --- a/resources/scripts/2021_midscale/extract_PSF.py +++ b/resources/scripts/2021_midscale/extract_PSF.py @@ -1,19 +1,19 @@ -import matplotlib.pyplot as plt -import tables -import pandas as pd -import numpy as np -from toolz import memoize -from toise import plotting, surfaces +import argparse +import os -import photospline -from scipy import optimize -from autograd.misc.flatten import flatten_func import autograd import autograd.numpy as n -import os import h5py +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import photospline +import tables +from autograd.misc.flatten import flatten_func +from scipy import optimize +from toolz import memoize -import argparse +from toise import plotting, surfaces parser = argparse.ArgumentParser() parser.add_argument( diff --git a/resources/scripts/2021_midscale/extract_energy_resolution.py b/resources/scripts/2021_midscale/extract_energy_resolution.py index 51f3d05..1eaa412 100644 --- a/resources/scripts/2021_midscale/extract_energy_resolution.py +++ b/resources/scripts/2021_midscale/extract_energy_resolution.py @@ -1,11 +1,12 @@ +import argparse + import matplotlib.pyplot as plt -import tables -import pandas as pd import numpy as np +import pandas as pd +import tables from toolz import memoize -from toise import plotting, surfaces -import argparse +from toise import plotting, surfaces parser = argparse.ArgumentParser() parser.add_argument( diff --git a/resources/scripts/2021_midscale/plot_PSF.py b/resources/scripts/2021_midscale/plot_PSF.py index 2f205f3..631d9fc 100644 --- a/resources/scripts/2021_midscale/plot_PSF.py +++ b/resources/scripts/2021_midscale/plot_PSF.py @@ -1,9 +1,10 @@ +import argparse + import matplotlib.pyplot as plt import numpy as np -from toise import plotting, angular_resolution, effective_areas, surfaces, util -from scipy.optimize import fsolve, bisect +from scipy.optimize import bisect, fsolve -import argparse +from toise import angular_resolution, effective_areas, plotting, surfaces, util parser = argparse.ArgumentParser() parser.add_argument( diff --git a/resources/scripts/2021_midscale/plot_radio_sens.py b/resources/scripts/2021_midscale/plot_radio_sens.py index 108c8a3..48a4d9f 100644 --- a/resources/scripts/2021_midscale/plot_radio_sens.py +++ b/resources/scripts/2021_midscale/plot_radio_sens.py @@ -1,8 +1,10 @@ +import gzip +import json + import matplotlib.pyplot as plt import numpy as np -from toise import util, grb -import json -import gzip + +from toise import grb, util """ This code plots the sensitivity or discovery potential (or whatever) for a detector diff --git a/resources/scripts/2021_midscale/plot_sens.py b/resources/scripts/2021_midscale/plot_sens.py index 7f97c85..3595bd1 100644 --- a/resources/scripts/2021_midscale/plot_sens.py +++ b/resources/scripts/2021_midscale/plot_sens.py @@ -1,8 +1,10 @@ +import gzip +import json + import matplotlib.pyplot as plt import numpy as np -from toise import util, grb -import json -import gzip + +from toise import grb, util """ This code plots the sensitivity or discovery potential (or whatever) for a detector diff --git a/resources/scripts/data prep/fit_psf.py b/resources/scripts/data prep/fit_psf.py index 11c53ea..6a56a53 100755 --- a/resources/scripts/data prep/fit_psf.py +++ b/resources/scripts/data prep/fit_psf.py @@ -1,11 +1,13 @@ #!/bin/sh /cvmfs/icecube.opensciencegrid.org/py2-v1/icetray-start # METAPROJECT /data/user/jvansanten/tarballs/offline-software.openblas.2015-03-06 +import os +from collections import defaultdict + +import numpy from icecube.photospline import spglam as glam -from icecube.photospline.utils import pad_knots from icecube.photospline import splinefitstable -from collections import defaultdict -import numpy, os +from icecube.photospline.utils import pad_knots def create_psf( @@ -88,8 +90,8 @@ def fit_psf(h, smooth=1e-6): def plot_psf(h, spline, cos_theta=0): - import matplotlib.pyplot as plt import dashi + import matplotlib.pyplot as plt dashi.visual() diff --git a/resources/scripts/data prep/gcd_to_txt.py b/resources/scripts/data prep/gcd_to_txt.py index a070c14..2216c6e 100755 --- a/resources/scripts/data prep/gcd_to_txt.py +++ b/resources/scripts/data prep/gcd_to_txt.py @@ -8,9 +8,7 @@ args = parser.parse_args() import numpy -from icecube import icetray, dataclasses, dataio - -from icecube import icetray, dataio, dataclasses +from icecube import dataclasses, dataio, icetray f = dataio.I3File(args.gcd_file) fr = f.pop_frame(icetray.I3Frame.Geometry) diff --git a/resources/scripts/data prep/muon_energy_resolution.py b/resources/scripts/data prep/muon_energy_resolution.py index cfc2ce3..896229f 100755 --- a/resources/scripts/data prep/muon_energy_resolution.py +++ b/resources/scripts/data prep/muon_energy_resolution.py @@ -15,9 +15,10 @@ def plot_energy_resolution(h2, axis_range=(2.5, 7)): dashi.visual() import matplotlib.pyplot as plt - from matplotlib import gridspec, cm + from matplotlib import cm, gridspec from matplotlib.colors import LogNorm from matplotlib.ticker import NullFormatter + from toise import plotting with plotting.pretty(tex=False): @@ -110,8 +111,8 @@ def save_energy_resolution_profile(h2, fname, smoothing=1e-2): if __name__ == "__main__": - import tables import dashi + import tables with tables.open_file( "/Users/brianclark/Documents/work/Gen2/gen2_optical/gen2-analysis/toise/data/energy_reconstruction/aachen_muon_energy_resolution.hdf5" diff --git a/resources/scripts/data prep/muon_selection_efficiency.py b/resources/scripts/data prep/muon_selection_efficiency.py index 4b77cd9..2b82448 100755 --- a/resources/scripts/data prep/muon_selection_efficiency.py +++ b/resources/scripts/data prep/muon_selection_efficiency.py @@ -117,9 +117,10 @@ def fit_muon_selection_efficiency(efficiency, error, binedges, smoothing=1): opts = parser.parse_args() - from icecube.photospline import splinefitstable import os + import tables + from icecube.photospline import splinefitstable with tables.open_file(opts.infile[0]) as hdf: if opts.cuts is not None: @@ -144,10 +145,8 @@ def fit_muon_selection_efficiency(efficiency, error, binedges, smoothing=1): if opts.plot: import matplotlib.pyplot as plt - from icecube.toise import plotting - from icecube.photospline import spglam as glam - from icecube.photospline import spglam as glam + from icecube.toise import plotting with plotting.pretty(tex=False): ax = plt.gca() diff --git a/setup.py b/setup.py index f8cd2cf..2e1abee 100755 --- a/setup.py +++ b/setup.py @@ -1,9 +1,10 @@ #!/usr/bin/env python from distutils.core import setup +from os import environ, mkdir, path, unlink +from subprocess import PIPE, check_call + from setuptools import find_packages -from subprocess import check_call, PIPE -from os import path, unlink, environ, mkdir cwd = path.join(path.dirname(__file__), "toise", "data") if not path.isdir(cwd): diff --git a/tests/test_nufate.py b/tests/test_nufate.py index 03e3862..6a79b35 100644 --- a/tests/test_nufate.py +++ b/tests/test_nufate.py @@ -1,6 +1,7 @@ +import numpy as np + from toise.externals import nuFATE from toise.util import center -import numpy as np def test_cache(): diff --git a/tests/test_nuflux.py b/tests/test_nuflux.py index 2545957..5131223 100644 --- a/tests/test_nuflux.py +++ b/tests/test_nuflux.py @@ -2,6 +2,7 @@ import numpy as np import pytest + from toise.diffuse import AtmosphericNu from toise.effective_areas import effective_area from toise.externals.AtmosphericSelfVeto import AnalyticPassingFraction diff --git a/tests/test_sensitivity.py b/tests/test_sensitivity.py index 25956f2..5232464 100644 --- a/tests/test_sensitivity.py +++ b/tests/test_sensitivity.py @@ -2,11 +2,12 @@ import numpy as np import pytest +from scipy import stats +from scipy.optimize import bisect + from toise import diffuse, factory, figures_of_merit, multillh, surface_veto from toise.effective_areas import effective_area from toise.figures import figure -from scipy import stats -from scipy.optimize import bisect @pytest.fixture(scope="session") diff --git a/toise/angular_resolution.py b/toise/angular_resolution.py index 1c03fa1..c4d7626 100644 --- a/toise/angular_resolution.py +++ b/toise/angular_resolution.py @@ -1,9 +1,10 @@ -from scipy import interpolate, stats -import pickle import os +import pickle + import numpy +from scipy import interpolate, stats -from .util import data_dir, center +from .util import center, data_dir def get_angular_resolution( @@ -161,8 +162,9 @@ def __init__( self, fname="Sunflower_240_kingpsf4", psf_class=(0, 4), scale=1.0, **kwargs ): super(KingPointSpreadFunction, self).__init__(**kwargs) - import pandas as pd import operator + + import pandas as pd from scipy import interpolate if not fname.startswith("/"): diff --git a/toise/cache.py b/toise/cache.py index b65df08..a2269f8 100644 --- a/toise/cache.py +++ b/toise/cache.py @@ -1,16 +1,17 @@ # Custom cache instance class must implement AbstractCacheInstance interface: -from easy_cache import caches -from easy_cache import ecached, ecached_property, create_cache_key -from easy_cache.abc import AbstractCacheInstance -from easy_cache.core import DEFAULT_TIMEOUT, NOT_FOUND -from .util import data_dir -from os.path import join -from os import listdir, unlink -import pickle as pickle -from photospline import SplineTable import gzip import json +import pickle as pickle import time +from os import listdir, unlink +from os.path import join + +from easy_cache import caches, create_cache_key, ecached, ecached_property +from easy_cache.abc import AbstractCacheInstance +from easy_cache.core import DEFAULT_TIMEOUT, NOT_FOUND +from photospline import SplineTable + +from .util import data_dir class PickleCache(AbstractCacheInstance): diff --git a/toise/classification_efficiency.py b/toise/classification_efficiency.py index 0b4b0bb..b6a3d5c 100644 --- a/toise/classification_efficiency.py +++ b/toise/classification_efficiency.py @@ -3,12 +3,14 @@ contained-vertex events into single cascades, starting tracks, and double cascades. """ -import numpy as np -from .util import data_dir import json import os import warnings +import numpy as np + +from .util import data_dir + def get_classification_efficiency(geometry="IceCube", spacing=125): if geometry == "Fictive": diff --git a/toise/diffuse.py b/toise/diffuse.py index 54677f5..c2aab72 100644 --- a/toise/diffuse.py +++ b/toise/diffuse.py @@ -1,21 +1,22 @@ -import warnings -import numpy as np import itertools -from scipy.integrate import quad -from io import StringIO -from copy import copy -from . import multillh -import healpy +import logging import os -import numexpr import pickle as pickle -import logging -from functools import partial +import warnings +from copy import copy from enum import Enum +from functools import partial +from io import StringIO + +import healpy import nuflux +import numexpr +import numpy as np +from scipy.integrate import quad -from .util import * +from . import multillh from .pointsource import is_zenith_weight +from .util import * try: from functools import lru_cache diff --git a/toise/effective_areas.py b/toise/effective_areas.py index b10d3b4..171afbe 100644 --- a/toise/effective_areas.py +++ b/toise/effective_areas.py @@ -1,19 +1,19 @@ +import copy +import itertools +import os +import warnings + import dashi +import healpy +import numpy +import numpy as np import tables from scipy import interpolate -import numpy as np - -import os -import numpy -import itertools -import healpy -import warnings -import copy -from .surfaces import get_fiducial_surface -from .energy_resolution import get_energy_resolution from .angular_resolution import get_angular_resolution from .classification_efficiency import get_classification_efficiency +from .energy_resolution import get_energy_resolution +from .surfaces import get_fiducial_surface from .util import * @@ -574,8 +574,8 @@ def create_bundle_aeff( # 2) Selection efficiency # 3) Veto coverage - import tables import dashi + import tables from scipy.special import erf nside = None @@ -677,8 +677,8 @@ def create_throughgoing_aeff( # 4) Point spread function # 5) Energy resolution - import tables import dashi + import tables from scipy.special import erf nside = None @@ -763,8 +763,8 @@ def create_cascade_aeff( # 4) Point spread function # 5) Energy resolution - import tables import dashi + import tables from scipy.special import erf nside = None @@ -837,8 +837,8 @@ def create_starting_aeff( # 4) Point spread function # 5) Energy resolution - import tables import dashi + import tables from scipy.special import erf nside = None @@ -1034,9 +1034,10 @@ def _load_radio_veff(filename): """ :returns: a tuple (edges, veff). veff has units of m^3 """ - import pandas as pd import json + import pandas as pd + if not filename.startswith("/"): filename = os.path.join(data_dir, "aeff", filename) with open(filename) as f: diff --git a/toise/energy_resolution.py b/toise/energy_resolution.py index 7d773ae..fbc312f 100644 --- a/toise/energy_resolution.py +++ b/toise/energy_resolution.py @@ -1,7 +1,8 @@ -from scipy import interpolate -import pickle import os +import pickle + import numpy +from scipy import interpolate from scipy.special import erf from .util import data_dir diff --git a/toise/externals/AtmosphericSelfVeto/__init__.py b/toise/externals/AtmosphericSelfVeto/__init__.py index 3c85b7e..69ef4d7 100644 --- a/toise/externals/AtmosphericSelfVeto/__init__.py +++ b/toise/externals/AtmosphericSelfVeto/__init__.py @@ -1,7 +1,7 @@ import numpy -from . import selfveto from ...util import PDGCode, data_dir +from . import selfveto class AnalyticPassingFraction(object): diff --git a/toise/externals/AtmosphericSelfVeto/selfveto.py b/toise/externals/AtmosphericSelfVeto/selfveto.py index 0f709e4..01b7439 100755 --- a/toise/externals/AtmosphericSelfVeto/selfveto.py +++ b/toise/externals/AtmosphericSelfVeto/selfveto.py @@ -655,8 +655,8 @@ def plot_response_function(enu, depth, cos_theta, kind): if __name__ == "__main__": - from optparse import OptionParser, OptionGroup import sys + from optparse import OptionGroup, OptionParser parser = OptionParser(usage="%prog [OPTIONS]", description=__doc__) parser.add_option( diff --git a/toise/externals/nuFATE/__init__.py b/toise/externals/nuFATE/__init__.py index b1a8f22..52a9aa9 100644 --- a/toise/externals/nuFATE/__init__.py +++ b/toise/externals/nuFATE/__init__.py @@ -1,6 +1,8 @@ +from itertools import product + import numpy as np from toolz import memoize -from itertools import product + from .crosssections import DISCrossSection, GlashowResonanceCrossSection from .earth import get_t_earth diff --git a/toise/externals/nuFATE/crosssections.py b/toise/externals/nuFATE/crosssections.py index 8986f0e..3e9128e 100644 --- a/toise/externals/nuFATE/crosssections.py +++ b/toise/externals/nuFATE/crosssections.py @@ -1,12 +1,15 @@ -import numpy as np -from functools import partial -from . import taudecay -import toise.util -import os import itertools +import os +from functools import partial + +import numpy as np import photospline + +import toise.util from toise.cache import ecached, lru_cache +from . import taudecay + TOTAL_XSEC_BIAS = 80 DPDX_BIAS = 10 ENU_GRID = np.logspace(1, 15, 14 * 2 + 1) diff --git a/toise/externals/ternary.py b/toise/externals/ternary.py index 810c893..fb6665c 100644 --- a/toise/externals/ternary.py +++ b/toise/externals/ternary.py @@ -4,26 +4,26 @@ """ import matplotlib +import matplotlib.axis as maxis +import matplotlib.path as mpath +import matplotlib.pyplot as plt +import matplotlib.spines as mspines +import numpy as np +from matplotlib import transforms as mtransforms from matplotlib.axes import Axes from matplotlib.patches import Circle, Polygon from matplotlib.path import Path -from matplotlib.ticker import NullLocator, Formatter, FixedLocator, MultipleLocator +from matplotlib.projections import register_projection +from matplotlib.ticker import FixedLocator, Formatter, MultipleLocator, NullLocator from matplotlib.transforms import ( Affine2D, - IdentityTransform, + BboxTransformFrom, BboxTransformTo, - Transform, + IdentityTransform, ScaledTranslation, + Transform, + TransformedBbox, ) -from matplotlib.transforms import TransformedBbox, BboxTransformFrom -from matplotlib import transforms as mtransforms -from matplotlib.projections import register_projection -import matplotlib.spines as mspines -import matplotlib.axis as maxis -import matplotlib.path as mpath -import matplotlib.pyplot as plt - -import numpy as np class TernaryAxes(Axes): diff --git a/toise/factory.py b/toise/factory.py index 8bc1778..5617a4c 100644 --- a/toise/factory.py +++ b/toise/factory.py @@ -1,22 +1,24 @@ -import numpy -from scipy import interpolate -from functools import partial import numbers import os import pickle as pickle +from functools import partial + +import numpy +from scipy import interpolate + from . import ( - effective_areas, - diffuse, - pointsource, angular_resolution, + classification_efficiency, + diffuse, + effective_areas, grb, - surface_veto, multillh, plotting, + pointsource, + surface_veto, + surfaces, ) -from . import classification_efficiency -from .util import data_dir, center, defer -from . import surfaces +from .util import center, data_dir, defer def make_key(opts, kwargs): diff --git a/toise/fictive.py b/toise/fictive.py index 2196f86..d4e46e9 100644 --- a/toise/fictive.py +++ b/toise/fictive.py @@ -2,9 +2,10 @@ Idealized detectors used to estimate sensitivities in the WIS whitepaper """ -from toolz import memoize from numpy import vectorize -from . import surfaces, energy_resolution, effective_areas +from toolz import memoize + +from . import effective_areas, energy_resolution, surfaces def make_cylinder(volume=1.0, aspect=1.0): diff --git a/toise/figures/cli.py b/toise/figures/cli.py index 146c89e..2abd6a2 100644 --- a/toise/figures/cli.py +++ b/toise/figures/cli.py @@ -1,15 +1,15 @@ import argparse -import json import gzip -import sys -import os -from setuptools import find_packages -from pkgutil import iter_modules import importlib import inspect - +import json +import os +import sys +from pkgutil import iter_modules from typing import Any, Callable, Literal, Sequence, Union, get_args, get_origin +from setuptools import find_packages + try: from typing import List except ImportError: @@ -37,8 +37,8 @@ def find_modules(path): try: import docutils.frontend - import docutils.utils import docutils.parsers.rst + import docutils.utils from docutils import nodes class ParamHarvester(nodes.SparseNodeVisitor): @@ -153,9 +153,10 @@ def _maybe_call_sequence(f: Union[Callable, Sequence[Callable]]): def make_figure_data(): - import traceback - import sys import pdb + import sys + import traceback + from toise import figures # find all submodules of toise.figures and import them @@ -293,8 +294,9 @@ def _add_options_for_args(parser, spec: inspect.Signature, param_help): def make_figure(): - import traceback import pdb + import traceback + from toise import figures # find all submodules of toise.figures and import them diff --git a/toise/figures/detector/__init__.py b/toise/figures/detector/__init__.py index 145f00b..40cf1fd 100644 --- a/toise/figures/detector/__init__.py +++ b/toise/figures/detector/__init__.py @@ -1,12 +1,13 @@ -from toise.figures import figure, figure_data -from toise.figures.diffuse.flavor import psi_binning - -from toise import surfaces import gzip -import numpy as np import itertools + +import numpy as np import pandas +from toise import surfaces +from toise.figures import figure, figure_data +from toise.figures.diffuse.flavor import psi_binning + def get_string_heads(geometry, spacing, **kwargs): with gzip.GzipFile(surfaces.get_geometry_file(geometry, spacing)) as f: @@ -115,10 +116,11 @@ def get_surface_geometry(): @figure def surface_geometry(reverse_order=False, which_radio="2022_tdr"): import itertools + + import matplotlib.patheffects as path_effects import matplotlib.pyplot as plt import numpy as np from mpl_toolkits.axes_grid1 import inset_locator - import matplotlib.patheffects as path_effects from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar fig, axes = plt.subplots(1, 4, figsize=(8, 2.5)) @@ -398,10 +400,11 @@ def radio_array_2022_tdr(): @figure def radio_surface_geometry(): import itertools + + import matplotlib.patheffects as path_effects import matplotlib.pyplot as plt import numpy as np from mpl_toolkits.axes_grid1 import inset_locator - import matplotlib.patheffects as path_effects from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar fig, axes = plt.subplots(1, 1, figsize=(2 * 2.5, 2 * 2.5)) @@ -585,8 +588,10 @@ def effective_areas(exposures): @figure def effective_areas(datasets): - import matplotlib.pyplot as plt import os + + import matplotlib.pyplot as plt + from toise import plotting nplots = len(datasets[0]["data"]) diff --git a/toise/figures/detector/cascades.py b/toise/figures/detector/cascades.py index 6bd97cc..41d1354 100644 --- a/toise/figures/detector/cascades.py +++ b/toise/figures/detector/cascades.py @@ -1,9 +1,9 @@ -from toise.figures import figure, figure_data -from toise.figures.diffuse.flavor import psi_binning - import dashi import numpy as np +from toise.figures import figure, figure_data +from toise.figures.diffuse.flavor import psi_binning + def sparse_hese_veto_plot(ax, f): npe = 10 ** f["arr_0"] @@ -54,10 +54,12 @@ def fit(y, yerr, range=None): @figure def volume(): + import os + import matplotlib.pyplot as plt import numpy as np - import os - from toise import factory, effective_areas, surfaces, plotting + + from toise import effective_areas, factory, plotting, surfaces edep = np.logspace(4, 8, 101) fig = plt.figure(figsize=(3.375, 3.375)) @@ -111,8 +113,10 @@ def effective_area(exposures): @figure def effective_area(datasets): - import matplotlib.pyplot as plt import os + + import matplotlib.pyplot as plt + from toise import plotting fig = plt.figure(figsize=(3.375, 3.375)) @@ -138,9 +142,10 @@ def effective_area(datasets): @figure def sparse_veto(): + import os + import matplotlib.pyplot as plt import numpy as np - import os fig = plt.figure(figsize=(3.375, 3.375)) ax = plt.gca() diff --git a/toise/figures/detector/tracks.py b/toise/figures/detector/tracks.py index ef7e6b0..d7cfe52 100644 --- a/toise/figures/detector/tracks.py +++ b/toise/figures/detector/tracks.py @@ -8,14 +8,15 @@ def performance(tabulated_psf=False): """ import matplotlib.pyplot as plt import numpy as np + from scipy.optimize import bisect, fsolve + from toise import ( - plotting, angular_resolution, effective_areas, + plotting, surfaces, util, ) - from scipy.optimize import fsolve, bisect @np.vectorize def median_opening_angle(psf, energy, cos_theta): diff --git a/toise/figures/diffuse/E2_Gen2_Long_whitepaper.py b/toise/figures/diffuse/E2_Gen2_Long_whitepaper.py index f808d8b..4d9a140 100644 --- a/toise/figures/diffuse/E2_Gen2_Long_whitepaper.py +++ b/toise/figures/diffuse/E2_Gen2_Long_whitepaper.py @@ -1,4 +1,4 @@ -from toise.figures import figure_data, figure +from toise.figures import figure, figure_data @figure @@ -11,11 +11,11 @@ def diffuse_sensitivity_uhe(): matplotlib.rc("axes", labelsize=20) legendfontsize = 13 - import matplotlib.pyplot as plt - import numpy as np - # from NuRadioMC.utilities import fluxes import os + + import matplotlib.pyplot as plt + import numpy as np from scipy.interpolate import interp1d class units: diff --git a/toise/figures/diffuse/UHE_nevents_Gen2_flux_models.py b/toise/figures/diffuse/UHE_nevents_Gen2_flux_models.py index 17f0785..b3cd863 100644 --- a/toise/figures/diffuse/UHE_nevents_Gen2_flux_models.py +++ b/toise/figures/diffuse/UHE_nevents_Gen2_flux_models.py @@ -3,20 +3,18 @@ # # Flux model plot for Gen2 -import numpy as np -import matplotlib.pyplot as plt +from enum import Enum from itertools import product -from toise.diffuse import DiffuseModel -from toise import factory -from toise.util import center -from toise import diffuse +import matplotlib.pyplot as plt +import numpy as np -# get enums for models ... -from toise.diffuse import DIFFUSE_MODELS -from toise.figures import figure_data, figure +from toise import diffuse, factory -from enum import Enum +# get enums for models ... +from toise.diffuse import DIFFUSE_MODELS, DiffuseModel +from toise.figures import figure, figure_data +from toise.util import center IC_DIFFUSE = Enum("IC_DIFFUSE", ["HESE", "NUMU"]) diff --git a/toise/figures/diffuse/flavor.py b/toise/figures/diffuse/flavor.py index a6cd185..5dc428d 100644 --- a/toise/figures/diffuse/flavor.py +++ b/toise/figures/diffuse/flavor.py @@ -2,21 +2,19 @@ Flavor ratio fits """ +import copy from functools import partial -import pandas as pd import numpy as np +import pandas as pd +import toolz +from matplotlib.lines import Line2D from scipy.optimize import bisect from tqdm import tqdm -import toolz -import copy - -from toise.figures import figure_data, figure +from toise import diffuse, factory, multillh, plotting, surface_veto from toise.cache import ecached, lru_cache -from toise import diffuse, multillh, plotting, surface_veto, factory, plotting - -from matplotlib.lines import Line2D +from toise.figures import figure, figure_data def make_components(aeffs, astro_class=diffuse.DiffuseAstro): @@ -430,9 +428,10 @@ def triangle(datasets): """ Plot exclusion contours in flavor composition """ - from toise.externals import ternary import matplotlib.pyplot as plt + from toise.externals import ternary + ax = ternary.flavor_triangle(grid=True) labels = [] handles = [] @@ -482,10 +481,11 @@ def muon_damping(datasets, preliminary=False): """ Plot exclusion contours in flavor composition """ - from toise.externals import ternary - import matplotlib.pyplot as plt import matplotlib.colors as mcolors - from scipy import optimize, interpolate, stats + import matplotlib.pyplot as plt + from scipy import interpolate, optimize, stats + + from toise.externals import ternary fig = plt.figure(figsize=(5, 4)) w, h = fig.bbox_inches.width, fig.bbox_inches.height diff --git a/toise/figures/diffuse/galactic.py b/toise/figures/diffuse/galactic.py index 778f7e5..6cb4b29 100644 --- a/toise/figures/diffuse/galactic.py +++ b/toise/figures/diffuse/galactic.py @@ -1,22 +1,22 @@ -from toise.figures import figure_data, figure - -from toise import diffuse, pointsource, surface_veto, multillh, factory -from toise.cache import ecached, lru_cache - # from scipy import stats, optimize from copy import copy -import numpy as np # from tqdm import tqdm from functools import partial from typing import List, Literal, Optional +import numpy as np + +from toise import diffuse, factory, multillh, pointsource, surface_veto +from toise.cache import ecached, lru_cache +from toise.figures import figure, figure_data + from .dnnc import ( + AngularSmearing, + RingAveraging, create_dnn_aeff, get_dnn_smoothing, get_monopod_smoothing, - AngularSmearing, - RingAveraging, ) factory.add_configuration( @@ -221,9 +221,10 @@ def fermi_pi0( @figure def rates(datasets): + import healpy import matplotlib.pyplot as plt from matplotlib.colors import LogNorm - import healpy + from toise import plotting dataset = datasets[0] diff --git a/toise/figures/diffuse/single_power_law.py b/toise/figures/diffuse/single_power_law.py index 194f39d..1582f29 100644 --- a/toise/figures/diffuse/single_power_law.py +++ b/toise/figures/diffuse/single_power_law.py @@ -1,27 +1,27 @@ -from toise.figures import figure_data, figure, table +from copy import copy +from functools import partial +from io import StringIO +from itertools import product + +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.lines import Line2D +from pandas import DataFrame +from scipy import optimize, stats +from tqdm import tqdm from toise import ( diffuse, + factory, multillh, plotting, - surface_veto, pointsource, - factory, + surface_veto, ) from toise.cache import ecached, lru_cache +from toise.figures import figure, figure_data, table -from scipy import stats, optimize -from copy import copy -import numpy as np -from tqdm import tqdm -from functools import partial -from io import StringIO -from itertools import product -from pandas import DataFrame - -from .flavor import extract_ts, detector_label -from matplotlib.lines import Line2D -import matplotlib.pyplot as plt +from .flavor import detector_label, extract_ts def make_components(aeffs, emin=1e2, emax=1e11): diff --git a/toise/figures/diffuse/spectrum.py b/toise/figures/diffuse/spectrum.py index 2d97142..6d32d52 100644 --- a/toise/figures/diffuse/spectrum.py +++ b/toise/figures/diffuse/spectrum.py @@ -1,21 +1,21 @@ -from toise.figures import figure_data, figure +from copy import copy +from functools import partial +from io import StringIO + +import numpy as np +from scipy import optimize, stats +from tqdm import tqdm from toise import ( diffuse, + factory, multillh, plotting, - surface_veto, pointsource, - factory, + surface_veto, ) from toise.cache import ecached, lru_cache - -from scipy import stats, optimize -from copy import copy -import numpy as np -from tqdm import tqdm -from functools import partial -from io import StringIO +from toise.figures import figure, figure_data def make_components(aeffs, emin=1e2, emax=1e11): @@ -730,8 +730,8 @@ def unfolded_flux_plus_sensitivity_mm( ax=None, ): import matplotlib.pyplot as plt - from matplotlib.patches import Patch from matplotlib.container import ErrorbarContainer + from matplotlib.patches import Patch _default_plot_elements = [ "cr", diff --git a/toise/figures/pointsource/flare.py b/toise/figures/pointsource/flare.py index c6e04db..83bf5e8 100644 --- a/toise/figures/pointsource/flare.py +++ b/toise/figures/pointsource/flare.py @@ -4,18 +4,19 @@ from functools import partial import numpy as np +from tqdm import tqdm + from toise import ( diffuse, factory, figures_of_merit, multillh, pointsource, + radio_aeff_generation, surface_veto, util, ) from toise.figures import figure, figure_data -from tqdm import tqdm -from toise import radio_aeff_generation try: from typing import List diff --git a/toise/figures_of_merit.py b/toise/figures_of_merit.py index d6524d6..5c8abe7 100644 --- a/toise/figures_of_merit.py +++ b/toise/figures_of_merit.py @@ -1,9 +1,17 @@ from enum import Enum from functools import partial + import numpy -from . import factory, diffuse, surface_veto, pointsource, multillh + +from . import ( + diffuse, + factory, + multillh, + pointsource, + radio_aeff_generation, + surface_veto, +) from .util import constants -from . import radio_aeff_generation # Enum has no facility for setting docstrings inline. Do it by hand. TOT = Enum("TOT", ["ul", "dp", "fc"]) diff --git a/toise/grb.py b/toise/grb.py index bd50a0f..4ec588b 100644 --- a/toise/grb.py +++ b/toise/grb.py @@ -1,7 +1,9 @@ -import numpy as np -from scipy import stats, interpolate -from io import StringIO import warnings +from io import StringIO + +import numpy as np +from scipy import interpolate, stats + from . import pointsource diff --git a/toise/multillh.py b/toise/multillh.py index 30691a8..1940c54 100644 --- a/toise/multillh.py +++ b/toise/multillh.py @@ -1,6 +1,7 @@ # Adapted from [multillh](http://code.icecube.wisc.edu/svn/sandbox/nwhitehorn/multillh) import logging + import numpy import scipy.optimize @@ -280,8 +281,8 @@ def fit(self, minimizer_params=dict(), **fixedparams): If a parameter is fixed to an iterable value, it will be treated as a discrete parameter and minimized on a grid. """ - from collections.abc import Iterable import itertools + from collections.abc import Iterable freeparams = list(self.components.keys()) discrete_params = [] diff --git a/toise/plotting.py b/toise/plotting.py index 9ec44d7..7ade9ca 100644 --- a/toise/plotting.py +++ b/toise/plotting.py @@ -1,6 +1,6 @@ +import numpy from matplotlib import cm from matplotlib.colors import ListedColormap -import numpy def stepped_path(edges, bins, cumulative=False): @@ -46,8 +46,8 @@ def format_energy(fmt, energy): def plot_profile2d(profile, x, y, levels=[68, 90, 99], colors="k", **kwargs): - from scipy.stats import chi2 import matplotlib.pyplot as plt + from scipy.stats import chi2 xv = numpy.unique(profile[x]) yv = numpy.unique(profile[y]) diff --git a/toise/pointsource.py b/toise/pointsource.py index c17d076..8036e86 100644 --- a/toise/pointsource.py +++ b/toise/pointsource.py @@ -1,13 +1,15 @@ -from scipy.optimize import bisect -from functools import partial -import numpy -from scipy import optimize, stats, interpolate -from io import StringIO import itertools +import logging from copy import copy +from functools import partial +from io import StringIO + +import numpy +from scipy import interpolate, optimize, stats +from scipy.optimize import bisect + from .multillh import LLHEval, asimov_llh, get_expectations from .util import * -import logging def is_zenith_weight(zenith_weight, aeff): diff --git a/toise/radio_aeff_generation.py b/toise/radio_aeff_generation.py index 54d0e54..dcdebe8 100644 --- a/toise/radio_aeff_generation.py +++ b/toise/radio_aeff_generation.py @@ -1,23 +1,25 @@ +import json +import logging import os from copy import copy -import json -import logging import numpy as np import pandas as pd from scipy import interpolate from toise.externals import nuFATE -from .radio_response import radio_analysis_efficiency -from .radio_response import RadioPointSpreadFunction -from .radio_response import RadioEnergyResolution -from .effective_areas import calculate_cascade_production_density -from .effective_areas import effective_area -from .radio_muon_background import get_muon_distribution -from .radio_muon_background import get_tabulated_muon_distribution - +from .effective_areas import calculate_cascade_production_density, effective_area from .pointsource import is_zenith_weight +from .radio_muon_background import ( + get_muon_distribution, + get_tabulated_muon_distribution, +) +from .radio_response import ( + RadioEnergyResolution, + RadioPointSpreadFunction, + radio_analysis_efficiency, +) from .util import * logger = logging.getLogger("toise aeff calculation for radio") diff --git a/toise/radio_muon_background.py b/toise/radio_muon_background.py index 45c34ba..ec26bed 100644 --- a/toise/radio_muon_background.py +++ b/toise/radio_muon_background.py @@ -1,8 +1,8 @@ -import numpy as np -import matplotlib.pyplot as plt -from matplotlib.colors import LogNorm import logging +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.colors import LogNorm logger = logging.getLogger("toise radio muon background") diff --git a/toise/radio_response.py b/toise/radio_response.py index 1996ccd..5b41eec 100644 --- a/toise/radio_response.py +++ b/toise/radio_response.py @@ -1,14 +1,13 @@ +import logging import os + import numpy as np -from scipy.stats import multivariate_normal from scipy import interpolate from scipy.special import erf -from scipy.stats import cauchy +from scipy.stats import cauchy, multivariate_normal from .energy_resolution import EnergySmearingMatrix -import logging - logger = logging.getLogger("radio resolution parametrisation") diff --git a/toise/salyut.py b/toise/salyut.py index 94c48be..1639d29 100644 --- a/toise/salyut.py +++ b/toise/salyut.py @@ -3,9 +3,10 @@ """ import numpy +import scipy.interpolate import scipy.optimize import scipy.stats -import scipy.interpolate + from .util import * diff --git a/toise/surface_veto.py b/toise/surface_veto.py index 029a3b3..4871ea5 100644 --- a/toise/surface_veto.py +++ b/toise/surface_veto.py @@ -1,16 +1,18 @@ -from scipy.special import erf, erfc -from scipy.optimize import fsolve -from scipy import interpolate -import healpy -import pickle import os -import numpy +import pickle from copy import copy +import healpy +import numpy +from scipy import interpolate +from scipy.optimize import fsolve +from scipy.special import erf, erfc + from toise import effective_areas + from . import surfaces -from .util import * from .pointsource import is_zenith_weight +from .util import * # hobo-costing! diff --git a/toise/surfaces.py b/toise/surfaces.py index b4311e4..072f4c9 100644 --- a/toise/surfaces.py +++ b/toise/surfaces.py @@ -1,6 +1,7 @@ -import numpy import os +import numpy + from .util import data_dir @@ -492,7 +493,7 @@ def from_i3file(cls, fname, padding=0): :param fname: path to an I3 file containing at least a G frame :param padding: distance, in meters, to expand the surface in all directions """ - from icecube import icetray, dataio, dataclasses + from icecube import dataclasses, dataio, icetray f = dataio.I3File(fname) fr = f.pop_frame(icetray.I3Frame.Geometry) diff --git a/toise/transient.py b/toise/transient.py index 1122f44..d278620 100644 --- a/toise/transient.py +++ b/toise/transient.py @@ -1,10 +1,12 @@ -from os import path from enum import Enum -from .util import data_dir, constants, PDGCode +from os import path + import numpy as np from scipy import interpolate from scipy.integrate import quad + from .pointsource import PointSource +from .util import PDGCode, constants, data_dir # Enum has no facility for setting docstrings inline. Do it by hand. TRANSIENT_MODELS = Enum( diff --git a/toise/util.py b/toise/util.py index 2c80fad..4df6977 100644 --- a/toise/util.py +++ b/toise/util.py @@ -1,9 +1,10 @@ import os +from functools import partial +from os import environ, mkdir, path, unlink +from subprocess import PIPE, check_call + import numpy as np -from subprocess import check_call, PIPE -from os import path, unlink, environ, mkdir from lazy_object_proxy import Proxy -from functools import partial def defer(func, *args, **kwargs): From 9025cd3efd70e72148402ad3dea4f9c9078d3414 Mon Sep 17 00:00:00 2001 From: Jakob van Santen Date: Wed, 25 Sep 2024 13:47:48 +0200 Subject: [PATCH 2/6] remove unused imports --- resources/docs/plots/cascade_production_density.py | 1 - resources/docs/plots/cascade_volume.py | 2 -- resources/docs/plots/geometries.py | 6 ++---- resources/docs/plots/muon_production_efficiency.py | 1 - resources/docs/plots/ps_sensitivity_demo.py | 1 - resources/scripts/2021_midscale/calc_radio_sens.py | 12 +----------- resources/scripts/2021_midscale/calc_sens.py | 11 +---------- resources/scripts/2021_midscale/count_events.ipynb | 5 ++--- resources/scripts/2021_midscale/extract_PSF.py | 1 - .../2021_midscale/extract_energy_resolution.py | 2 -- resources/scripts/2021_midscale/plot_PSF.py | 2 +- resources/scripts/data prep/fit_psf.py | 1 - resources/scripts/data prep/gcd_to_txt.py | 2 +- tests/test_sensitivity.py | 1 - toise/angular_resolution.py | 1 - toise/cache.py | 10 +++++++--- toise/diffuse.py | 3 --- toise/effective_areas.py | 12 ------------ toise/energy_resolution.py | 1 - toise/externals/AtmosphericSelfVeto/__init__.py | 2 +- toise/externals/nuFATE/crosssections.py | 3 --- toise/externals/ternary.py | 8 ++------ toise/factory.py | 6 +----- toise/figures/cli.py | 2 +- toise/figures/detector/__init__.py | 3 --- toise/figures/detector/cascades.py | 4 +--- toise/figures/detector/tracks.py | 2 +- toise/figures/diffuse/E2_Gen2_Long_whitepaper.py | 2 +- .../figures/diffuse/UHE_nevents_Gen2_flux_models.py | 2 +- toise/figures/diffuse/flavor.py | 2 -- toise/figures/diffuse/galactic.py | 7 +++---- toise/figures/diffuse/single_power_law.py | 8 ++------ toise/figures/diffuse/spectrum.py | 6 +----- toise/figures/pointsource/flare.py | 2 -- toise/figures_of_merit.py | 3 --- toise/plotting.py | 3 --- toise/pointsource.py | 3 +-- toise/radio_aeff_generation.py | 4 ---- toise/radio_muon_background.py | 2 -- toise/radio_response.py | 3 --- toise/surfaces.py | 2 +- toise/util.py | 4 ++-- 42 files changed, 34 insertions(+), 124 deletions(-) diff --git a/resources/docs/plots/cascade_production_density.py b/resources/docs/plots/cascade_production_density.py index 70e57f1..bedc739 100644 --- a/resources/docs/plots/cascade_production_density.py +++ b/resources/docs/plots/cascade_production_density.py @@ -1,7 +1,6 @@ import itertools import matplotlib.pyplot as plt -import numpy as np from icecube.toise import effective_areas, plotting, util from matplotlib.gridspec import GridSpec from mpl_toolkits.axes_grid.anchored_artists import AnchoredText diff --git a/resources/docs/plots/cascade_volume.py b/resources/docs/plots/cascade_volume.py index 4e33b73..d5708ad 100644 --- a/resources/docs/plots/cascade_volume.py +++ b/resources/docs/plots/cascade_volume.py @@ -1,7 +1,5 @@ import matplotlib.pyplot as plt import numpy as np -from matplotlib.gridspec import GridSpec -from mpl_toolkits.axes_grid.anchored_artists import AnchoredText from toise import effective_areas, plotting diff --git a/resources/docs/plots/geometries.py b/resources/docs/plots/geometries.py index ce03a19..f3cc0d2 100644 --- a/resources/docs/plots/geometries.py +++ b/resources/docs/plots/geometries.py @@ -1,15 +1,13 @@ -import itertools import matplotlib.pyplot as plt import numpy as np -from icecube import dataclasses, dataio, icetray -from icecube.toise import effective_areas, plotting, surfaces +from icecube.toise import plotting, surfaces from matplotlib.gridspec import GridSpec from mpl_toolkits.axes_grid.anchored_artists import AnchoredText def string_heads(gcdfile): - from icecube import dataclasses, dataio, icetray + from icecube import dataclasses, dataio, icetray # noqa: F401 f = dataio.I3File(gcdfile) omgeo = f.pop_frame(icetray.I3Frame.Geometry)["I3Geometry"].omgeo diff --git a/resources/docs/plots/muon_production_efficiency.py b/resources/docs/plots/muon_production_efficiency.py index a971c3f..9b3faf2 100644 --- a/resources/docs/plots/muon_production_efficiency.py +++ b/resources/docs/plots/muon_production_efficiency.py @@ -1,7 +1,6 @@ import itertools import matplotlib.pyplot as plt -import numpy as np from icecube.toise import effective_areas, plotting, util from matplotlib.gridspec import GridSpec from mpl_toolkits.axes_grid.anchored_artists import AnchoredText diff --git a/resources/docs/plots/ps_sensitivity_demo.py b/resources/docs/plots/ps_sensitivity_demo.py index 11116c0..316f7d6 100644 --- a/resources/docs/plots/ps_sensitivity_demo.py +++ b/resources/docs/plots/ps_sensitivity_demo.py @@ -10,7 +10,6 @@ pointsource, ) from matplotlib.gridspec import GridSpec -from mpl_toolkits.axes_grid.anchored_artists import AnchoredText def create_aeff( diff --git a/resources/scripts/2021_midscale/calc_radio_sens.py b/resources/scripts/2021_midscale/calc_radio_sens.py index 43d9e4b..0b2bb36 100644 --- a/resources/scripts/2021_midscale/calc_radio_sens.py +++ b/resources/scripts/2021_midscale/calc_radio_sens.py @@ -1,26 +1,16 @@ import json -from collections import OrderedDict, defaultdict +from collections import OrderedDict import numpy as np from tqdm import tqdm # generate aeff for radio from toise import ( - angular_resolution, - diffuse, - effective_areas, factory, figures, figures_of_merit, - grb, - multillh, - plotting, - pointsource, radio_aeff_generation, - surface_veto, - util, ) -from toise.util import center, data_dir # this file is basically a copy/strip down of the figures.pointsource.flare.sensitivity workbook diff --git a/resources/scripts/2021_midscale/calc_sens.py b/resources/scripts/2021_midscale/calc_sens.py index a54dab0..3964898 100644 --- a/resources/scripts/2021_midscale/calc_sens.py +++ b/resources/scripts/2021_midscale/calc_sens.py @@ -1,23 +1,14 @@ import json -from collections import OrderedDict, defaultdict +from collections import OrderedDict from tqdm import tqdm from toise import ( - angular_resolution, - diffuse, effective_areas, factory, figures, figures_of_merit, - grb, - multillh, - plotting, - pointsource, - surface_veto, - util, ) -from toise.util import center, data_dir # this file is basically a copy/strip down of the figures.pointsource.flare.sensitivity workbook diff --git a/resources/scripts/2021_midscale/count_events.ipynb b/resources/scripts/2021_midscale/count_events.ipynb index f7c5c41..733ded8 100644 --- a/resources/scripts/2021_midscale/count_events.ipynb +++ b/resources/scripts/2021_midscale/count_events.ipynb @@ -8,10 +8,9 @@ "source": [ "import numpy\n", "\n", - "from toise import diffuse, effective_areas, factory, plotting\n", + "from toise import diffuse, effective_areas, factory\n", "\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt" + "%matplotlib inline" ] }, { diff --git a/resources/scripts/2021_midscale/extract_PSF.py b/resources/scripts/2021_midscale/extract_PSF.py index 949bea3..826fa2e 100644 --- a/resources/scripts/2021_midscale/extract_PSF.py +++ b/resources/scripts/2021_midscale/extract_PSF.py @@ -3,7 +3,6 @@ import autograd import autograd.numpy as n -import h5py import matplotlib.pyplot as plt import numpy as np import pandas as pd diff --git a/resources/scripts/2021_midscale/extract_energy_resolution.py b/resources/scripts/2021_midscale/extract_energy_resolution.py index 1eaa412..f70d973 100644 --- a/resources/scripts/2021_midscale/extract_energy_resolution.py +++ b/resources/scripts/2021_midscale/extract_energy_resolution.py @@ -6,8 +6,6 @@ import tables from toolz import memoize -from toise import plotting, surfaces - parser = argparse.ArgumentParser() parser.add_argument( "-n", diff --git a/resources/scripts/2021_midscale/plot_PSF.py b/resources/scripts/2021_midscale/plot_PSF.py index 631d9fc..2383752 100644 --- a/resources/scripts/2021_midscale/plot_PSF.py +++ b/resources/scripts/2021_midscale/plot_PSF.py @@ -2,7 +2,7 @@ import matplotlib.pyplot as plt import numpy as np -from scipy.optimize import bisect, fsolve +from scipy.optimize import bisect from toise import angular_resolution, effective_areas, plotting, surfaces, util diff --git a/resources/scripts/data prep/fit_psf.py b/resources/scripts/data prep/fit_psf.py index 6a56a53..8fd89dd 100755 --- a/resources/scripts/data prep/fit_psf.py +++ b/resources/scripts/data prep/fit_psf.py @@ -16,7 +16,6 @@ def create_psf( reco="SplineMPEMuEXDifferential", cut=None, ): - import dashi from cubicle.hdfweights import HDFScanner hdf = HDFScanner(fname, primary="/" + muon, type="unweighted") diff --git a/resources/scripts/data prep/gcd_to_txt.py b/resources/scripts/data prep/gcd_to_txt.py index 2216c6e..74463d1 100755 --- a/resources/scripts/data prep/gcd_to_txt.py +++ b/resources/scripts/data prep/gcd_to_txt.py @@ -8,7 +8,7 @@ args = parser.parse_args() import numpy -from icecube import dataclasses, dataio, icetray +from icecube import dataio, icetray f = dataio.I3File(args.gcd_file) fr = f.pop_frame(icetray.I3Frame.Geometry) diff --git a/tests/test_sensitivity.py b/tests/test_sensitivity.py index 5232464..b068b68 100644 --- a/tests/test_sensitivity.py +++ b/tests/test_sensitivity.py @@ -7,7 +7,6 @@ from toise import diffuse, factory, figures_of_merit, multillh, surface_veto from toise.effective_areas import effective_area -from toise.figures import figure @pytest.fixture(scope="session") diff --git a/toise/angular_resolution.py b/toise/angular_resolution.py index c4d7626..b8f4a75 100644 --- a/toise/angular_resolution.py +++ b/toise/angular_resolution.py @@ -1,5 +1,4 @@ import os -import pickle import numpy from scipy import interpolate, stats diff --git a/toise/cache.py b/toise/cache.py index a2269f8..86bf9ea 100644 --- a/toise/cache.py +++ b/toise/cache.py @@ -3,16 +3,20 @@ import json import pickle as pickle import time -from os import listdir, unlink +from os import unlink from os.path import join -from easy_cache import caches, create_cache_key, ecached, ecached_property +from easy_cache import ecached from easy_cache.abc import AbstractCacheInstance from easy_cache.core import DEFAULT_TIMEOUT, NOT_FOUND from photospline import SplineTable from .util import data_dir +__all__ = [ + "ecached", + "lru_cache", +] class PickleCache(AbstractCacheInstance): def __init__(self, base_dir=join(data_dir, "cache"), *args, **kwargs): @@ -118,4 +122,4 @@ def delete(self, key): try: from functools import lru_cache except ImportError: - from backports.functools_lru_cache import lru_cache + pass diff --git a/toise/diffuse.py b/toise/diffuse.py index c2aab72..fdde49f 100644 --- a/toise/diffuse.py +++ b/toise/diffuse.py @@ -1,11 +1,9 @@ import itertools -import logging import os import pickle as pickle import warnings from copy import copy from enum import Enum -from functools import partial from io import StringIO import healpy @@ -14,7 +12,6 @@ import numpy as np from scipy.integrate import quad -from . import multillh from .pointsource import is_zenith_weight from .util import * diff --git a/toise/effective_areas.py b/toise/effective_areas.py index 171afbe..ca05d20 100644 --- a/toise/effective_areas.py +++ b/toise/effective_areas.py @@ -574,9 +574,6 @@ def create_bundle_aeff( # 2) Selection efficiency # 3) Veto coverage - import dashi - import tables - from scipy.special import erf nside = None if isinstance(cos_theta, int): @@ -677,9 +674,6 @@ def create_throughgoing_aeff( # 4) Point spread function # 5) Energy resolution - import dashi - import tables - from scipy.special import erf nside = None if isinstance(cos_theta, int): @@ -763,9 +757,6 @@ def create_cascade_aeff( # 4) Point spread function # 5) Energy resolution - import dashi - import tables - from scipy.special import erf nside = None if isinstance(cos_theta, int): @@ -837,9 +828,6 @@ def create_starting_aeff( # 4) Point spread function # 5) Energy resolution - import dashi - import tables - from scipy.special import erf nside = None if isinstance(cos_theta, int): diff --git a/toise/energy_resolution.py b/toise/energy_resolution.py index fbc312f..d8d5d72 100644 --- a/toise/energy_resolution.py +++ b/toise/energy_resolution.py @@ -1,5 +1,4 @@ import os -import pickle import numpy from scipy import interpolate diff --git a/toise/externals/AtmosphericSelfVeto/__init__.py b/toise/externals/AtmosphericSelfVeto/__init__.py index 69ef4d7..c1ac92a 100644 --- a/toise/externals/AtmosphericSelfVeto/__init__.py +++ b/toise/externals/AtmosphericSelfVeto/__init__.py @@ -1,6 +1,6 @@ import numpy -from ...util import PDGCode, data_dir +from ...util import PDGCode from . import selfveto diff --git a/toise/externals/nuFATE/crosssections.py b/toise/externals/nuFATE/crosssections.py index 3e9128e..b250554 100644 --- a/toise/externals/nuFATE/crosssections.py +++ b/toise/externals/nuFATE/crosssections.py @@ -1,11 +1,8 @@ -import itertools -import os from functools import partial import numpy as np import photospline -import toise.util from toise.cache import ecached, lru_cache from . import taudecay diff --git a/toise/externals/ternary.py b/toise/externals/ternary.py index fb6665c..cd6efb4 100644 --- a/toise/externals/ternary.py +++ b/toise/externals/ternary.py @@ -3,25 +3,21 @@ ternary_project.py by Kevin L. Davies """ -import matplotlib import matplotlib.axis as maxis import matplotlib.path as mpath import matplotlib.pyplot as plt import matplotlib.spines as mspines import numpy as np -from matplotlib import transforms as mtransforms from matplotlib.axes import Axes -from matplotlib.patches import Circle, Polygon +from matplotlib.patches import Polygon from matplotlib.path import Path from matplotlib.projections import register_projection -from matplotlib.ticker import FixedLocator, Formatter, MultipleLocator, NullLocator +from matplotlib.ticker import MultipleLocator from matplotlib.transforms import ( Affine2D, BboxTransformFrom, BboxTransformTo, - IdentityTransform, ScaledTranslation, - Transform, TransformedBbox, ) diff --git a/toise/factory.py b/toise/factory.py index 5617a4c..06f3f45 100644 --- a/toise/factory.py +++ b/toise/factory.py @@ -9,16 +9,12 @@ from . import ( angular_resolution, classification_efficiency, - diffuse, effective_areas, - grb, multillh, - plotting, - pointsource, surface_veto, surfaces, ) -from .util import center, data_dir, defer +from .util import data_dir, defer def make_key(opts, kwargs): diff --git a/toise/figures/cli.py b/toise/figures/cli.py index 2abd6a2..41e9742 100644 --- a/toise/figures/cli.py +++ b/toise/figures/cli.py @@ -6,7 +6,7 @@ import os import sys from pkgutil import iter_modules -from typing import Any, Callable, Literal, Sequence, Union, get_args, get_origin +from typing import Callable, Literal, Sequence, Union, get_args, get_origin from setuptools import find_packages diff --git a/toise/figures/detector/__init__.py b/toise/figures/detector/__init__.py index 40cf1fd..21cd875 100644 --- a/toise/figures/detector/__init__.py +++ b/toise/figures/detector/__init__.py @@ -399,12 +399,10 @@ def radio_array_2022_tdr(): @figure def radio_surface_geometry(): - import itertools import matplotlib.patheffects as path_effects import matplotlib.pyplot as plt import numpy as np - from mpl_toolkits.axes_grid1 import inset_locator from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar fig, axes = plt.subplots(1, 1, figsize=(2 * 2.5, 2 * 2.5)) @@ -588,7 +586,6 @@ def effective_areas(exposures): @figure def effective_areas(datasets): - import os import matplotlib.pyplot as plt diff --git a/toise/figures/detector/cascades.py b/toise/figures/detector/cascades.py index 41d1354..f0ee924 100644 --- a/toise/figures/detector/cascades.py +++ b/toise/figures/detector/cascades.py @@ -54,12 +54,11 @@ def fit(y, yerr, range=None): @figure def volume(): - import os import matplotlib.pyplot as plt import numpy as np - from toise import effective_areas, factory, plotting, surfaces + from toise import effective_areas, factory edep = np.logspace(4, 8, 101) fig = plt.figure(figsize=(3.375, 3.375)) @@ -113,7 +112,6 @@ def effective_area(exposures): @figure def effective_area(datasets): - import os import matplotlib.pyplot as plt diff --git a/toise/figures/detector/tracks.py b/toise/figures/detector/tracks.py index d7cfe52..b50fe93 100644 --- a/toise/figures/detector/tracks.py +++ b/toise/figures/detector/tracks.py @@ -8,7 +8,7 @@ def performance(tabulated_psf=False): """ import matplotlib.pyplot as plt import numpy as np - from scipy.optimize import bisect, fsolve + from scipy.optimize import bisect from toise import ( angular_resolution, diff --git a/toise/figures/diffuse/E2_Gen2_Long_whitepaper.py b/toise/figures/diffuse/E2_Gen2_Long_whitepaper.py index 4d9a140..c73b3aa 100644 --- a/toise/figures/diffuse/E2_Gen2_Long_whitepaper.py +++ b/toise/figures/diffuse/E2_Gen2_Long_whitepaper.py @@ -1,4 +1,4 @@ -from toise.figures import figure, figure_data +from toise.figures import figure @figure diff --git a/toise/figures/diffuse/UHE_nevents_Gen2_flux_models.py b/toise/figures/diffuse/UHE_nevents_Gen2_flux_models.py index b3cd863..9e1431f 100644 --- a/toise/figures/diffuse/UHE_nevents_Gen2_flux_models.py +++ b/toise/figures/diffuse/UHE_nevents_Gen2_flux_models.py @@ -13,7 +13,7 @@ # get enums for models ... from toise.diffuse import DIFFUSE_MODELS, DiffuseModel -from toise.figures import figure, figure_data +from toise.figures import figure from toise.util import center IC_DIFFUSE = Enum("IC_DIFFUSE", ["HESE", "NUMU"]) diff --git a/toise/figures/diffuse/flavor.py b/toise/figures/diffuse/flavor.py index 5dc428d..2da4f88 100644 --- a/toise/figures/diffuse/flavor.py +++ b/toise/figures/diffuse/flavor.py @@ -9,7 +9,6 @@ import pandas as pd import toolz from matplotlib.lines import Line2D -from scipy.optimize import bisect from tqdm import tqdm from toise import diffuse, factory, multillh, plotting, surface_veto @@ -428,7 +427,6 @@ def triangle(datasets): """ Plot exclusion contours in flavor composition """ - import matplotlib.pyplot as plt from toise.externals import ternary diff --git a/toise/figures/diffuse/galactic.py b/toise/figures/diffuse/galactic.py index 6cb4b29..2b9bd20 100644 --- a/toise/figures/diffuse/galactic.py +++ b/toise/figures/diffuse/galactic.py @@ -3,12 +3,12 @@ # from tqdm import tqdm from functools import partial -from typing import List, Literal, Optional +from typing import List, Literal import numpy as np -from toise import diffuse, factory, multillh, pointsource, surface_veto -from toise.cache import ecached, lru_cache +from toise import diffuse, factory, multillh, surface_veto +from toise.cache import lru_cache from toise.figures import figure, figure_data from .dnnc import ( @@ -223,7 +223,6 @@ def fermi_pi0( def rates(datasets): import healpy import matplotlib.pyplot as plt - from matplotlib.colors import LogNorm from toise import plotting diff --git a/toise/figures/diffuse/single_power_law.py b/toise/figures/diffuse/single_power_law.py index 1582f29..c2a8e61 100644 --- a/toise/figures/diffuse/single_power_law.py +++ b/toise/figures/diffuse/single_power_law.py @@ -1,24 +1,20 @@ from copy import copy -from functools import partial -from io import StringIO from itertools import product import matplotlib.pyplot as plt import numpy as np from matplotlib.lines import Line2D from pandas import DataFrame -from scipy import optimize, stats +from scipy import stats from tqdm import tqdm from toise import ( diffuse, factory, multillh, - plotting, - pointsource, surface_veto, ) -from toise.cache import ecached, lru_cache +from toise.cache import lru_cache from toise.figures import figure, figure_data, table from .flavor import detector_label, extract_ts diff --git a/toise/figures/diffuse/spectrum.py b/toise/figures/diffuse/spectrum.py index 6d32d52..bc77b34 100644 --- a/toise/figures/diffuse/spectrum.py +++ b/toise/figures/diffuse/spectrum.py @@ -3,15 +3,13 @@ from io import StringIO import numpy as np -from scipy import optimize, stats +from scipy import optimize from tqdm import tqdm from toise import ( diffuse, factory, multillh, - plotting, - pointsource, surface_veto, ) from toise.cache import ecached, lru_cache @@ -628,7 +626,6 @@ def apply_style(kwargs): @figure def unfolded_flux_multimessenger(datasets, label="Gen2-InIce+Radio"): import matplotlib.pyplot as plt - from matplotlib.container import ErrorbarContainer fig = plt.figure(figsize=(7, 3.5), dpi=300) ax = plt.gca() @@ -730,7 +727,6 @@ def unfolded_flux_plus_sensitivity_mm( ax=None, ): import matplotlib.pyplot as plt - from matplotlib.container import ErrorbarContainer from matplotlib.patches import Patch _default_plot_elements = [ diff --git a/toise/figures/pointsource/flare.py b/toise/figures/pointsource/flare.py index 83bf5e8..9442cff 100644 --- a/toise/figures/pointsource/flare.py +++ b/toise/figures/pointsource/flare.py @@ -1,6 +1,4 @@ -import re from collections import OrderedDict -from copy import copy from functools import partial import numpy as np diff --git a/toise/figures_of_merit.py b/toise/figures_of_merit.py index 5c8abe7..ebdca6b 100644 --- a/toise/figures_of_merit.py +++ b/toise/figures_of_merit.py @@ -1,8 +1,6 @@ from enum import Enum from functools import partial -import numpy - from . import ( diffuse, factory, @@ -11,7 +9,6 @@ radio_aeff_generation, surface_veto, ) -from .util import constants # Enum has no facility for setting docstrings inline. Do it by hand. TOT = Enum("TOT", ["ul", "dp", "fc"]) diff --git a/toise/plotting.py b/toise/plotting.py index 7ade9ca..c3d4851 100644 --- a/toise/plotting.py +++ b/toise/plotting.py @@ -1,6 +1,4 @@ import numpy -from matplotlib import cm -from matplotlib.colors import ListedColormap def stepped_path(edges, bins, cumulative=False): @@ -66,7 +64,6 @@ def plot_profile2d(profile, x, y, levels=[68, 90, 99], colors="k", **kwargs): def pretty_style(tex=True): - import matplotlib style = { "figure.figsize": (3.375, 3.375), diff --git a/toise/pointsource.py b/toise/pointsource.py index 8036e86..c31f0d1 100644 --- a/toise/pointsource.py +++ b/toise/pointsource.py @@ -1,4 +1,3 @@ -import itertools import logging from copy import copy from functools import partial @@ -8,7 +7,7 @@ from scipy import interpolate, optimize, stats from scipy.optimize import bisect -from .multillh import LLHEval, asimov_llh, get_expectations +from .multillh import asimov_llh, get_expectations from .util import * diff --git a/toise/radio_aeff_generation.py b/toise/radio_aeff_generation.py index dcdebe8..0573730 100644 --- a/toise/radio_aeff_generation.py +++ b/toise/radio_aeff_generation.py @@ -11,10 +11,6 @@ from .effective_areas import calculate_cascade_production_density, effective_area from .pointsource import is_zenith_weight -from .radio_muon_background import ( - get_muon_distribution, - get_tabulated_muon_distribution, -) from .radio_response import ( RadioEnergyResolution, RadioPointSpreadFunction, diff --git a/toise/radio_muon_background.py b/toise/radio_muon_background.py index ec26bed..ac8d523 100644 --- a/toise/radio_muon_background.py +++ b/toise/radio_muon_background.py @@ -1,8 +1,6 @@ import logging -import matplotlib.pyplot as plt import numpy as np -from matplotlib.colors import LogNorm logger = logging.getLogger("toise radio muon background") diff --git a/toise/radio_response.py b/toise/radio_response.py index 5b41eec..039fcba 100644 --- a/toise/radio_response.py +++ b/toise/radio_response.py @@ -1,9 +1,6 @@ import logging -import os import numpy as np -from scipy import interpolate -from scipy.special import erf from scipy.stats import cauchy, multivariate_normal from .energy_resolution import EnergySmearingMatrix diff --git a/toise/surfaces.py b/toise/surfaces.py index 072f4c9..536a5f6 100644 --- a/toise/surfaces.py +++ b/toise/surfaces.py @@ -493,7 +493,7 @@ def from_i3file(cls, fname, padding=0): :param fname: path to an I3 file containing at least a G frame :param padding: distance, in meters, to expand the surface in all directions """ - from icecube import dataclasses, dataio, icetray + from icecube import dataclasses, dataio, icetray # noqa: F401 f = dataio.I3File(fname) fr = f.pop_frame(icetray.I3Frame.Geometry) diff --git a/toise/util.py b/toise/util.py index 4df6977..7af738e 100644 --- a/toise/util.py +++ b/toise/util.py @@ -1,7 +1,7 @@ import os from functools import partial -from os import environ, mkdir, path, unlink -from subprocess import PIPE, check_call +from os import path, unlink +from subprocess import check_call import numpy as np from lazy_object_proxy import Proxy From d3134e132e0cbeb66ce9dd5ace12db883bbff922 Mon Sep 17 00:00:00 2001 From: Jakob van Santen Date: Wed, 25 Sep 2024 13:53:04 +0200 Subject: [PATCH 3/6] cleaup up * imports --- resources/docs/plots/ps_sensitivity_demo.py | 6 +- toise/cache.py | 2 +- toise/diffuse.py | 13 +-- toise/effective_areas.py | 2 +- toise/fictive.py | 2 +- .../diffuse/plot_flux_results_for_jvs.py | 7 -- toise/figures/diffuse/single_power_law.py | 2 +- toise/pointsource.py | 82 +++++++++---------- toise/radio_aeff_generation.py | 4 +- toise/surface_veto.py | 2 +- 10 files changed, 53 insertions(+), 69 deletions(-) diff --git a/resources/docs/plots/ps_sensitivity_demo.py b/resources/docs/plots/ps_sensitivity_demo.py index 316f7d6..9639714 100644 --- a/resources/docs/plots/ps_sensitivity_demo.py +++ b/resources/docs/plots/ps_sensitivity_demo.py @@ -1,6 +1,8 @@ import matplotlib.pyplot as plt import numpy as np -from icecube.toise import ( +from matplotlib.gridspec import GridSpec + +from toise import ( angular_resolution, diffuse, effective_areas, @@ -8,8 +10,8 @@ multillh, plotting, pointsource, + surface_veto, ) -from matplotlib.gridspec import GridSpec def create_aeff( diff --git a/toise/cache.py b/toise/cache.py index 86bf9ea..5cdd8b2 100644 --- a/toise/cache.py +++ b/toise/cache.py @@ -6,7 +6,7 @@ from os import unlink from os.path import join -from easy_cache import ecached +from easy_cache import caches, ecached from easy_cache.abc import AbstractCacheInstance from easy_cache.core import DEFAULT_TIMEOUT, NOT_FOUND from photospline import SplineTable diff --git a/toise/diffuse.py b/toise/diffuse.py index fdde49f..72aea45 100644 --- a/toise/diffuse.py +++ b/toise/diffuse.py @@ -13,7 +13,7 @@ from scipy.integrate import quad from .pointsource import is_zenith_weight -from .util import * +from .util import PDGCode, center, constants, data_dir try: from functools import lru_cache @@ -1005,17 +1005,6 @@ def astro_gzk_flux(enu, norm=4.11e-6, spec=-2.46, cutoff=3e6): return astro_flux(enu, norm, spec, cutoff) + ahlers_flux(enu) -def total_flux(enu): - """returns the all-flavor differential flux summed over atmos, astro, and ahlers gzk""" - ahlers_flux = AhlersGZKFlux() - return ( - atmos_flux(enu, "honda2006") - + atmos_flux(enu, "sarcevic_std") - + astro_powerlaw_cutoff(enu) - + ahlers_flux(enu) - ) - - class ArbitraryFlux(DiffuseAstro): def __init__(self, *args, **kwargs): """Base class for defining arbitrary diffuse fluxes which can be spectral-weighted""" diff --git a/toise/effective_areas.py b/toise/effective_areas.py index ca05d20..20b4218 100644 --- a/toise/effective_areas.py +++ b/toise/effective_areas.py @@ -14,7 +14,7 @@ from .classification_efficiency import get_classification_efficiency from .energy_resolution import get_energy_resolution from .surfaces import get_fiducial_surface -from .util import * +from .util import center, data_dir, defer def load_jvs_mese(): diff --git a/toise/fictive.py b/toise/fictive.py index d4e46e9..60889a5 100644 --- a/toise/fictive.py +++ b/toise/fictive.py @@ -5,7 +5,7 @@ from numpy import vectorize from toolz import memoize -from . import effective_areas, energy_resolution, surfaces +from . import diffuse, effective_areas, energy_resolution, pointsource, surfaces def make_cylinder(volume=1.0, aspect=1.0): diff --git a/toise/figures/diffuse/plot_flux_results_for_jvs.py b/toise/figures/diffuse/plot_flux_results_for_jvs.py index d77b4e5..e4b3a9f 100644 --- a/toise/figures/diffuse/plot_flux_results_for_jvs.py +++ b/toise/figures/diffuse/plot_flux_results_for_jvs.py @@ -2,13 +2,6 @@ import pylab as p -def xerr(bins, log=False): - binc = get_binc(bins, log) - l = binc - bins[:-1] - h = bins[1:] - binc - return (l, h) - - def create_figure(format=None): figsize = (5.5, 3.5) if format == "wide" else (4.5, 3.5) fig = p.figure(figsize=figsize) diff --git a/toise/figures/diffuse/single_power_law.py b/toise/figures/diffuse/single_power_law.py index c2a8e61..d62603a 100644 --- a/toise/figures/diffuse/single_power_law.py +++ b/toise/figures/diffuse/single_power_law.py @@ -17,7 +17,7 @@ from toise.cache import lru_cache from toise.figures import figure, figure_data, table -from .flavor import detector_label, extract_ts +from .flavor import detector_label, extract_ts, subdivide def make_components(aeffs, emin=1e2, emax=1e11): diff --git a/toise/pointsource.py b/toise/pointsource.py index c31f0d1..f931a73 100644 --- a/toise/pointsource.py +++ b/toise/pointsource.py @@ -3,7 +3,7 @@ from functools import partial from io import StringIO -import numpy +import numpy as np from scipy import interpolate, optimize, stats from scipy.optimize import bisect @@ -13,9 +13,9 @@ def is_zenith_weight(zenith_weight, aeff): zenith_dim = aeff.dimensions.index("true_zenith_band") - # print issubclass(numpy.asarray(zenith_weight).dtype.type, numpy.floating), len(zenith_weight), aeff.values.shape[zenith_dim] + # print issubclass(np.asarray(zenith_weight).dtype.type, np.floating), len(zenith_weight), aeff.values.shape[zenith_dim] return ( - issubclass(numpy.asarray(zenith_weight).dtype.type, numpy.floating) + issubclass(np.asarray(zenith_weight).dtype.type, np.floating) and len(zenith_weight) == aeff.values.shape[zenith_dim] ) @@ -53,7 +53,7 @@ def __init__(self, effective_area, fluence, zenith_selection, with_energy=True): # 1/yr rate = fluence[tuple(expand)] * (effective_area * 1e4) - assert numpy.isfinite(rate).all() + assert np.isfinite(rate).all() self._use_energies = with_energy @@ -92,7 +92,7 @@ def expectations(self, **kwargs): self._last_expectations = total return self._last_expectations - def get_chunk(self, emin=-numpy.inf, emax=numpy.inf): + def get_chunk(self, emin=-np.inf, emax=np.inf): ebins = self._edges[0] start, stop = ebins.searchsorted((emin, emax)) start = max((0, start - 1)) @@ -106,7 +106,7 @@ def get_chunk(self, emin=-numpy.inf, emax=numpy.inf): return chunk def differential_chunks( - self, decades=1, emin=-numpy.inf, emax=numpy.inf, exclusive=False + self, decades=1, emin=-np.inf, emax=np.inf, exclusive=False ): """ Yield copies of self with the neutrino spectrum restricted to *decade* @@ -114,7 +114,7 @@ def differential_chunks( """ # now, sum over decades in neutrino energy ebins = self._edges[0] - loge = numpy.log10(ebins) + loge = np.log10(ebins) bin_range = int(round(decades / (loge[1] - loge[0]))) # when emin is "equal" to an edge in ebins @@ -157,7 +157,7 @@ def __init__( livetime, zenith_bin, emin=0, - emax=numpy.inf, + emax=np.inf, with_energy=True, ): # reference flux is E^2 Phi = 1e-12 TeV cm^-2 s^-1 @@ -317,7 +317,7 @@ def draw_source_strengths(n_sources): @staticmethod def draw_sindec(n_sources): - return numpy.random.uniform(-1, 1, n_sources) + return np.random.uniform(-1, 1, n_sources) def __init__(self, effective_area, livetime, fluxes, sindecs, with_energy=True): """ @@ -333,8 +333,8 @@ def __init__(self, effective_area, livetime, fluxes, sindecs, with_energy=True): # scatter sources through the zenith bands isotropically zenith_bins = effective_area.bin_edges[1] - self.sources_per_band = numpy.histogram(-sindecs, bins=zenith_bins)[0] - self.flux_per_band = numpy.histogram( + self.sources_per_band = np.histogram(-sindecs, bins=zenith_bins)[0] + self.flux_per_band = np.histogram( -sindecs, bins=zenith_bins, weights=fluxes )[0] @@ -354,7 +354,7 @@ def intflux(e, gamma): * 24 * 3600 ) - fluence = numpy.outer(fluence, self.flux_per_band) + fluence = np.outer(fluence, self.flux_per_band) super(StackedPopulation, self).__init__( effective_area, fluence, slice(None), with_energy @@ -369,29 +369,29 @@ def source_to_local_zenith(declination, latitude, ct_bins): :param latitude: observer latitude in degrees :param ct_bins: edges of bins in cos(zenith), in ascending order """ - dec = numpy.radians(declination) - lat = numpy.radians(latitude) + dec = np.radians(declination) + lat = np.radians(latitude) def offset(hour_angle, ct=0): "difference between source elevation and bin edge at given hour angle" return ( - numpy.cos(hour_angle) * numpy.cos(dec) * numpy.cos(lat) - + numpy.sin(dec) * numpy.sin(lat) + np.cos(hour_angle) * np.cos(dec) * np.cos(lat) + + np.sin(dec) * np.sin(lat) ) - ct # find minimum and maximum elevation - lo = numpy.searchsorted(ct_bins[1:], offset(numpy.pi)) - hi = numpy.searchsorted(ct_bins[:-1], offset(0)) - hour_angle = numpy.empty(len(ct_bins)) + lo = np.searchsorted(ct_bins[1:], offset(np.pi)) + hi = np.searchsorted(ct_bins[:-1], offset(0)) + hour_angle = np.empty(len(ct_bins)) # source never crosses the band - hour_angle[: lo + 1] = numpy.pi + hour_angle[: lo + 1] = np.pi hour_angle[hi:] = 0 # source enters or exits hour_angle[lo + 1 : hi] = list( - map(partial(bisect, offset, 0, numpy.pi), ct_bins[lo + 1 : hi]) + map(partial(bisect, offset, 0, np.pi), ct_bins[lo + 1 : hi]) ) - return abs(numpy.diff(hour_angle)) / numpy.pi + return abs(np.diff(hour_angle)) / np.pi def nevents(llh, **hypo): @@ -404,7 +404,7 @@ def nevents(llh, **hypo): hypo[k] = llh.components[k].seed else: hypo[k] = 1 - return sum(map(numpy.sum, llh.expectations(**hypo).values())) + return sum(map(np.sum, llh.expectations(**hypo).values())) def discovery_potential( @@ -459,14 +459,14 @@ def f(flux_norm): total = nevents(allh, ps=1, **fixed) nb = nevents(allh, ps=0, **fixed) ns = total - nb - baseline = min((1000, numpy.sqrt(critical_ts) / (ns / numpy.sqrt(nb)))) / 10 + baseline = min((1000, np.sqrt(critical_ts) / (ns / np.sqrt(nb)))) / 10 baseline = max( - ((numpy.sqrt(critical_ts) / (ns / numpy.sqrt(nb))) / 10, 0.3 / ns) + ((np.sqrt(critical_ts) / (ns / np.sqrt(nb))) / 10, 0.3 / ns) ) # logging.getLogger().warn('total: %.2g ns: %.2g nb: %.2g baseline norm: %.2g ts: %.2g' % (total, ns, nb, baseline, ts(baseline))) # baseline = 1000 - if not numpy.isfinite(baseline): - return numpy.inf, numpy.inf, numpy.inf + if not np.isfinite(baseline): + return np.inf, np.inf, np.inf else: # actual = optimize.bisect(f, 0, baseline, xtol=baseline*1e-2) actual = optimize.fsolve(f, baseline, xtol=tolerance, factor=1, epsfcn=1) @@ -489,7 +489,7 @@ def events_above(observables, edges, ecutoff): # print(np.shape(observables[k])) if len(edge_k) > 2: edge_k = [edge_k[1], edge_k[2]] - cut = numpy.where(edge_k[1][1:] > ecutoff)[0][0] + cut = np.where(edge_k[1][1:] > ecutoff)[0][0] n += observables[k].sum(axis=0)[cut:].sum() return n @@ -526,8 +526,8 @@ def fc_upper_limit(point_source, diffuse_components, ecutoff=0, cl=0.9, **fixed) # Average 90% upper limit for known background # taken from Table XII of Feldman & Cousins (1998) fc_upper_limit.table = interpolate.interp1d( - numpy.loadtxt(StringIO("0 0.5 1 1.5 2 2.5 3 3.5 4 5 6 7 8 9 10 11 12 13 14 15")), - numpy.loadtxt( + np.loadtxt(StringIO("0 0.5 1 1.5 2 2.5 3 3.5 4 5 6 7 8 9 10 11 12 13 14 15")), + np.loadtxt( StringIO( "2.44 2.86 3.28 3.62 3.94 4.20 4.42 4.63 4.83 5.18 5.53 5.90 6.18 6.49 6.76 7.02 7.28 7.51 7.75 7.99" ) @@ -577,9 +577,9 @@ def f(flux_norm): total = nevents(allh, ps=1, **fixed) nb = nevents(allh, ps=0, **fixed) ns = total - nb - baseline = min((1000, numpy.sqrt(critical_ts) / (ns / numpy.sqrt(nb)))) / 10 + baseline = min((1000, np.sqrt(critical_ts) / (ns / np.sqrt(nb)))) / 10 baseline = max( - ((numpy.sqrt(critical_ts) / (ns / numpy.sqrt(nb))) / 10, 0.3 / ns) + ((np.sqrt(critical_ts) / (ns / np.sqrt(nb))) / 10, 0.3 / ns) ) logging.getLogger().debug( "total: %.2g ns: %.2g nb: %.2g baseline norm: %.2g" @@ -587,8 +587,8 @@ def f(flux_norm): ) # logging.getLogger().warn('total: %.2g ns: %.2g nb: %.2g baseline norm: %.2g ts: %.2g' % (total, ns, nb, baseline, ts(baseline))) # baseline = 1000 - if not numpy.isfinite(baseline): - return numpy.inf, numpy.inf, numpy.inf + if not np.isfinite(baseline): + return np.inf, np.inf, np.inf else: # actual = optimize.bisect(f, 0, baseline, xtol=baseline*1e-2) actual = optimize.fsolve(f, baseline, xtol=tolerance, factor=1, epsfcn=1) @@ -608,8 +608,8 @@ def differential_discovery_potential( baseline=None, tolerance=1e-2, decades=0.5, - emin=-numpy.inf, - emax=numpy.inf, + emin=-np.inf, + emax=np.inf, **fixed ): """ @@ -644,8 +644,8 @@ def differential_upper_limit( baseline=None, tolerance=1e-2, decades=0.5, - emin=-numpy.inf, - emax=numpy.inf, + emin=-np.inf, + emax=np.inf, **fixed ): """ @@ -679,8 +679,8 @@ def differential_fc_upper_limit( ecutoff=0, cl=0.9, decades=0.5, - emin=-numpy.inf, - emax=numpy.inf, + emin=-np.inf, + emax=np.inf, **fixed ): energies = [] @@ -692,4 +692,4 @@ def differential_fc_upper_limit( sensitivities.append( fc_upper_limit(pschunk, diffuse_components, ecutoff, cl, **fixed) ) - return numpy.asarray(energies), numpy.asarray(sensitivities) + return np.asarray(energies), np.asarray(sensitivities) diff --git a/toise/radio_aeff_generation.py b/toise/radio_aeff_generation.py index 0573730..b419481 100644 --- a/toise/radio_aeff_generation.py +++ b/toise/radio_aeff_generation.py @@ -16,7 +16,7 @@ RadioPointSpreadFunction, radio_analysis_efficiency, ) -from .util import * +from .util import center, constants, data_dir logger = logging.getLogger("toise aeff calculation for radio") @@ -707,7 +707,7 @@ def combine_aeffs( aeff = copy.deepcopy(aeff1) def overlap(E, logE, ovl): - interpolator = interpolate.interp1d(loge, ovl, fill_value="extrapolate") + interpolator = interpolate.interp1d(logE, ovl, fill_value="extrapolate") interpolation_result = np.maximum(interpolator(np.log10(E)), 0) return (interpolation_result + 1.0) ** -1 diff --git a/toise/surface_veto.py b/toise/surface_veto.py index 4871ea5..3a70aa2 100644 --- a/toise/surface_veto.py +++ b/toise/surface_veto.py @@ -12,7 +12,7 @@ from . import surfaces from .pointsource import is_zenith_weight -from .util import * +from .util import center, constants, data_dir # hobo-costing! From dce489c1542efbc4c329f3be5ee4e3728e43690e Mon Sep 17 00:00:00 2001 From: Jakob van Santen Date: Wed, 25 Sep 2024 14:00:28 +0200 Subject: [PATCH 4/6] numpy -> np see https://github.com/icecube/toise/pull/46 --- .gitignore | 1 + toise/angular_resolution.py | 58 ++--- toise/effective_areas.py | 239 +++++++++--------- toise/energy_resolution.py | 32 +-- .../externals/AtmosphericSelfVeto/__init__.py | 52 ++-- .../externals/AtmosphericSelfVeto/selfveto.py | 118 ++++----- toise/factory.py | 38 +-- toise/fictive.py | 29 ++- toise/figures/diffuse/dnnc.py | 3 +- toise/figures/diffuse/spectrum.py | 8 +- toise/multillh.py | 44 ++-- toise/plotting.py | 28 +- toise/surface_veto.py | 78 +++--- toise/surfaces.py | 176 ++++++------- 14 files changed, 453 insertions(+), 451 deletions(-) diff --git a/.gitignore b/.gitignore index 563212d..a09ab5d 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ notebooks/**/*.pdf notebooks/**/*.fits notebooks/**/.ipynb_checkpoints .vscode +env* diff --git a/toise/angular_resolution.py b/toise/angular_resolution.py index b8f4a75..a85c3d6 100644 --- a/toise/angular_resolution.py +++ b/toise/angular_resolution.py @@ -1,6 +1,6 @@ import os -import numpy +import numpy as np from scipy import interpolate, stats from .util import center, data_dir @@ -13,7 +13,7 @@ def get_angular_resolution( return FictiveCascadePointSpreadFunction() elif channel == "radio": return FictiveCascadePointSpreadFunction( - lower_limit=numpy.radians(2), crossover_energy=0 + lower_limit=np.radians(2), crossover_energy=0 ) if geometry == "Fictive": return FictiveKingPointSpreadFunction() @@ -32,10 +32,10 @@ class AngularResolution(object): def __init__( self, fname=os.path.join(data_dir, "veto", "aachen_angular_resolution.npz") ): - f = numpy.load(fname) + f = np.load(fname) xd = f["log10_energy"] yd = f["cos_theta"] - x, y = numpy.meshgrid(xd, yd) + x, y = np.meshgrid(xd, yd) zd = f["median_opening_angle"] # extrapolate with a constant zd[-8:, :] = zd[-9, :] @@ -45,12 +45,12 @@ def __init__( ) def median_opening_angle(self, energy, cos_theta): - loge, ct = numpy.broadcast_arrays(numpy.log10(energy), cos_theta) + loge, ct = np.broadcast_arrays(np.log10(energy), cos_theta) mu_reco = self._spline.ev(loge.flatten(), ct.flatten()).reshape(loge.shape) # dirty hack: add the muon/neutrino opening angle in quadtrature - return numpy.radians(numpy.sqrt(mu_reco**2 + 0.7**2 / (10 ** (loge - 3)))) + return np.radians(np.sqrt(mu_reco**2 + 0.7**2 / (10 ** (loge - 3)))) class PointSpreadFunction(object): @@ -72,16 +72,16 @@ def __init__(self, fname="aachen_psf.fits", scale=1.0): self._scale = scale def __call__(self, psi, energy, cos_theta): - psi, loge, ct = numpy.broadcast_arrays( - numpy.degrees(psi) / self._scale, numpy.log10(energy), cos_theta + psi, loge, ct = np.broadcast_arrays( + np.degrees(psi) / self._scale, np.log10(energy), cos_theta ) - loge = numpy.clip(loge, *self._loge_extents) + loge = np.clip(loge, *self._loge_extents) if self._mirror: - ct = -numpy.abs(ct) - ct = numpy.clip(ct, *self._ct_extents) + ct = -np.abs(ct) + ct = np.clip(ct, *self._ct_extents) evaluates = self._spline.evaluate_simple([loge, ct, psi]) - return numpy.where(numpy.isfinite(evaluates), evaluates, 1.0) + return np.where(np.isfinite(evaluates), evaluates, 1.0) class _king_gen(stats.rv_continuous): @@ -122,10 +122,10 @@ class _fm_gen(stats.rv_continuous): """ def _argcheck(self, kappa): - return numpy.all(kappa >= 0) + return np.all(kappa >= 0) def _pdf(self, x, kappa): - return numpy.exp(kappa * x) * kappa / (2 * numpy.sinh(kappa)) + return np.exp(kappa * x) * kappa / (2 * np.sinh(kappa)) fisher = _fm_gen(name="fisher", a=-1, b=1) @@ -136,17 +136,17 @@ def __init__(self, scale=1.0): self._scale = scale def get_quantile(self, p, energy, cos_theta): - p, loge, ct = numpy.broadcast_arrays(p, numpy.log10(energy), cos_theta) + p, loge, ct = np.broadcast_arrays(p, np.log10(energy), cos_theta) if hasattr(self._scale, "__call__"): scale = self._scale(10**loge) else: scale = self._scale sigma, gamma = self.get_params(loge, ct) - return numpy.radians(king.ppf(p, sigma, gamma)) / scale + return np.radians(king.ppf(p, sigma, gamma)) / scale def __call__(self, psi, energy, cos_theta): - psi, loge, ct = numpy.broadcast_arrays( - numpy.degrees(psi), numpy.log10(energy), cos_theta + psi, loge, ct = np.broadcast_arrays( + np.degrees(psi), np.log10(energy), cos_theta ) if hasattr(self._scale, "__call__"): scale = self._scale(10**loge) @@ -234,14 +234,14 @@ def get_params(self, log_energy, cos_theta): """ Interpolate for sigma and gamma """ - with numpy.errstate(invalid="ignore"): - angular_resolution_scale = numpy.where( + with np.errstate(invalid="ignore"): + angular_resolution_scale = np.where( log_energy < 6, 0.05 * (6 - log_energy) ** 2.5, 0 ) # dip at the horizon, improvement with energy up to 1e6 sigma = 10 ** ( - numpy.where( - numpy.abs(cos_theta) < 0.15, + np.where( + np.abs(cos_theta) < 0.15, (cos_theta * 3) ** 2 - 1.2, (cos_theta / 1.05) ** 2 - 1.0, ) @@ -253,17 +253,17 @@ def get_params(self, log_energy, cos_theta): class FictiveCascadePointSpreadFunction(object): - def __init__(self, lower_limit=numpy.radians(5), crossover_energy=1e6): + def __init__(self, lower_limit=np.radians(5), crossover_energy=1e6): self._b = lower_limit - self._a = self._b * numpy.sqrt(crossover_energy) + self._a = self._b * np.sqrt(crossover_energy) def get_params(self, loge, cos_theta): - return self._a / numpy.sqrt(10**loge) + self._b + return self._a / np.sqrt(10**loge) + self._b def __call__(self, psi, energy, cos_theta): - psi, energy, cos_theta = numpy.broadcast_arrays(psi, energy, cos_theta) - sigma = self._a / numpy.sqrt(energy) + self._b + psi, energy, cos_theta = np.broadcast_arrays(psi, energy, cos_theta) + sigma = self._a / np.sqrt(energy) + self._b - evaluates = 1 - numpy.exp(-(psi**2) / (2 * sigma**2)) - return numpy.where(numpy.isfinite(evaluates), evaluates, 1.0) + evaluates = 1 - np.exp(-(psi**2) / (2 * sigma**2)) + return np.where(np.isfinite(evaluates), evaluates, 1.0) diff --git a/toise/effective_areas.py b/toise/effective_areas.py index 20b4218..8be087e 100644 --- a/toise/effective_areas.py +++ b/toise/effective_areas.py @@ -5,7 +5,6 @@ import dashi import healpy -import numpy import numpy as np import tables from scipy import interpolate @@ -36,17 +35,17 @@ def load_jvs_mese(): for j, channel in enumerate(("cascade", "track")): for interaction in "cc", "nc", "gr": try: - data = numpy.loadtxt(base.format(**locals())) + data = np.loadtxt(base.format(**locals())) except: pass if shape is None: edges = [] for k in range(4): - lo = numpy.unique(data[:, k * 2]) - hi = numpy.unique(data[:, k * 2 + 1]) - edges.append(numpy.concatenate((lo, [hi[-1]]))) + lo = np.unique(data[:, k * 2]) + hi = np.unique(data[:, k * 2 + 1]) + edges.append(np.concatenate((lo, [hi[-1]]))) shape = [len(e) - 1 for e in reversed(edges)] - aeff = numpy.zeros([6] + list(reversed(shape)) + [2]) + aeff = np.zeros([6] + list(reversed(shape)) + [2]) aeff[i, ..., j] += data[:, -2].reshape(shape).T return edges, aeff @@ -59,7 +58,7 @@ def __init__( if not filename.startswith("/"): filename = os.path.join(data_dir, "selection_efficiency", filename) if filename.endswith(".npz"): - f = numpy.load(filename) + f = np.load(filename) loge = f["log_energy"] eff = f["efficiency"] @@ -75,13 +74,13 @@ def __init__( detected.project([0]), generated.project([0]) ) - edges = numpy.concatenate((sp.x - sp.xerr, [sp.x[-1] + sp.xerr[-1]])) - loge = 0.5 * (numpy.log10(edges[1:]) + numpy.log10(edges[:-1])) + edges = np.concatenate((sp.x - sp.xerr, [sp.x[-1] + sp.xerr[-1]])) + loge = 0.5 * (np.log10(edges[1:]) + np.log10(edges[:-1])) - loge = numpy.concatenate((loge, loge + loge[-1] + numpy.diff(loge)[0])) - v = numpy.concatenate((sp.y, sp.y[-5:].mean() * numpy.ones(sp.y.size))) - w = 1 / numpy.concatenate((sp.yerr, 1e-2 * numpy.ones(sp.yerr.size))) - w[~numpy.isfinite(w)] = 1 + loge = np.concatenate((loge, loge + loge[-1] + np.diff(loge)[0])) + v = np.concatenate((sp.y, sp.y[-5:].mean() * np.ones(sp.y.size))) + w = 1 / np.concatenate((sp.yerr, 1e-2 * np.ones(sp.yerr.size))) + w[~np.isfinite(w)] = 1 self.interp = interpolate.UnivariateSpline(loge, v, w=w) if energy_threshold is None: @@ -90,9 +89,9 @@ def __init__( self.energy_threshold = energy_threshold def __call__(self, muon_energy, cos_theta): - return numpy.where( + return np.where( muon_energy >= self.energy_threshold, - numpy.clip(self.interp(numpy.log10(muon_energy)), 0, 1), + np.clip(self.interp(np.log10(muon_energy)), 0, 1), 0.0, ) @@ -117,16 +116,16 @@ def _eval(self, loge, cos_theta): return self._spline.eval([loge, cos_theta]) def __call__(self, muon_energy, cos_theta): - loge, cos_theta = numpy.broadcast_arrays(numpy.log10(muon_energy), cos_theta) + loge, cos_theta = np.broadcast_arrays(np.log10(muon_energy), cos_theta) if hasattr(self._scale, "__call__"): scale = self._scale(10**loge) else: scale = self._scale - return numpy.where( + return np.where( muon_energy >= self.energy_threshold, - numpy.clip( + np.clip( scale - * self._spline.evaluate_simple([numpy.log10(muon_energy), cos_theta]), + * self._spline.evaluate_simple([np.log10(muon_energy), cos_theta]), 0, 1, ), @@ -180,7 +179,7 @@ def __call__(self, deposited_energy, cos_theta): return ( 0.75 * self._efficiency - / (1 + numpy.exp(-2.5 * numpy.log(deposited_energy / self._threshold))) + / (1 + np.exp(-2.5 * np.log(deposited_energy / self._threshold))) ) @@ -225,12 +224,12 @@ class StepFunction(VetoThreshold): """ def __init__(self, threshold=0, maximum_inclination=60): - self.max_inclination = numpy.cos(numpy.radians(maximum_inclination)) + self.max_inclination = np.cos(np.radians(maximum_inclination)) self.threshold = threshold def accept(self, e_mu, cos_theta=1.0): - return numpy.where( + return np.where( cos_theta > 0.05, (e_mu > self.threshold) & (cos_theta >= self.max_inclination), True, @@ -240,7 +239,7 @@ def veto(self, e_mu, cos_theta=1.0): """ Return True if an atmospheric event would be rejected by the veto """ - return numpy.where( + return np.where( cos_theta > 0.05, (e_mu > self.threshold) & (cos_theta >= self.max_inclination), False, @@ -280,28 +279,28 @@ def _interpolate_production_efficiency( with tables.open_file(os.path.join(data_dir, "cross_sections", fname)) as hdf: for family, anti in itertools.product(flavors, ("", "_bar")): h = dashi.histload(hdf, "/nu" + family + anti) - edges = [numpy.log10(h.binedges[0]), h.binedges[1]] + list( - map(numpy.log10, h.binedges[2:]) + edges = [np.log10(h.binedges[0]), h.binedges[1]] + list( + map(np.log10, h.binedges[2:]) ) centers = list(map(center, edges)) newcenters = [ centers[0], - numpy.clip(cos_zenith, centers[1].min(), centers[1].max()), + np.clip(cos_zenith, centers[1].min(), centers[1].max()), ] + centers[2:] - with numpy.errstate(divide="ignore"): - y = numpy.where( - ~(h.bincontent <= 0), numpy.log10(h.bincontent), -numpy.inf + with np.errstate(divide="ignore"): + y = np.where( + ~(h.bincontent <= 0), np.log10(h.bincontent), -np.inf ) - assert not numpy.isnan(y).any() + assert not np.isnan(y).any() interpolant = interpolate.RegularGridInterpolator( - centers, y, bounds_error=True, fill_value=-numpy.inf + centers, y, bounds_error=True, fill_value=-np.inf ) - xi = numpy.vstack( - [x.flatten() for x in numpy.meshgrid(*newcenters, indexing="ij")] + xi = np.vstack( + [x.flatten() for x in np.meshgrid(*newcenters, indexing="ij")] ).T - assert numpy.isfinite(xi).all() + assert np.isfinite(xi).all() # NB: we use nearest-neighbor interpolation here because # n-dimensional linear interpolation has the unfortunate side-effect @@ -311,15 +310,15 @@ def _interpolate_production_efficiency( # underestimates the muon flux from steeply falling neutrino spectra. v = interpolant(xi, "nearest").reshape([x.size for x in newcenters]) - v[~numpy.isfinite(v)] = -numpy.inf + v[~np.isfinite(v)] = -np.inf - assert not numpy.isnan(v).any() + assert not np.isnan(v).any() efficiencies.append(10**v) return (h.binedges[0], None,) + tuple( h.binedges[2:] - ), numpy.array(efficiencies) + ), np.array(efficiencies) def _ring_range(nside): @@ -330,8 +329,8 @@ def _ring_range(nside): # get cos(colatitude) at the center of each ring, and invert to get # cos(zenith). This assumes that the underlying map is in equatorial # coordinates. - centers = -healpy.ringinfo(nside, numpy.arange(1, 4 * nside))[2] - return numpy.concatenate(([-1], 0.5 * (centers[1:] + centers[:-1]), [1])) + centers = -healpy.ringinfo(nside, np.arange(1, 4 * nside))[2] + return np.concatenate(([-1], 0.5 * (centers[1:] + centers[:-1]), [1])) def get_muon_production_efficiency(ct_edges=None): @@ -347,7 +346,7 @@ def get_muon_production_efficiency(ct_edges=None): with the same axes. """ if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) @@ -369,7 +368,7 @@ def get_starting_event_efficiency(ct_edges=None): with the same axes. """ if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) @@ -393,7 +392,7 @@ def get_cascade_production_density(ct_edges=None): with the same axes. """ if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) @@ -426,7 +425,7 @@ def get_doublebang_production_density(ct_edges=None): with the same axes. """ if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) @@ -481,7 +480,7 @@ def restrict_energy_range(self, emin, emax): # find bins with lower edge >= emin and upper edge <= emax mask = (self.bin_edges[0][1:] <= emax) & (self.bin_edges[0][:-1] >= emin) - idx = numpy.arange(mask.size)[mask][[0, -1]] + idx = np.arange(mask.size)[mask][[0, -1]] reduced = copy.copy(self) reduced.bin_edges = list(reduced.bin_edges) @@ -496,7 +495,7 @@ def truncate_energy_range(self, emin, emax): # find bins with lower edge >= emin and upper edge <= emax mask = (self.bin_edges[0][1:] <= emax) & (self.bin_edges[0][:-1] >= emin) - idx = numpy.arange(mask.size)[mask][[0, -1]] + idx = np.arange(mask.size)[mask][[0, -1]] reduced = copy.copy(self) @@ -526,18 +525,18 @@ def nring(self): @property def ring_repeat_pattern(self): assert self.is_healpix - return healpy.ringinfo(self.nside, numpy.arange(self.nring) + 1)[1] + return healpy.ringinfo(self.nside, np.arange(self.nring) + 1)[1] def eval_psf(point_spread_function, mu_energy, ct, psi_bins): - ct, mu_energy, psi_bins = numpy.meshgrid(ct, mu_energy, psi_bins, indexing="ij") + ct, mu_energy, psi_bins = np.meshgrid(ct, mu_energy, psi_bins, indexing="ij") return point_spread_function(psi_bins, mu_energy, ct) def create_bundle_aeff( energy_resolution=defer(get_energy_resolution, "IceCube"), - veto_efficiency: VetoThreshold = StepFunction(numpy.inf), - veto_coverage=lambda ct: numpy.zeros(len(ct) - 1), + veto_efficiency: VetoThreshold = StepFunction(np.inf), + veto_coverage=lambda ct: np.zeros(len(ct) - 1), selection_efficiency=defer(MuonSelectionEfficiency), surface=defer(get_fiducial_surface, "IceCube"), cos_theta=None, @@ -584,21 +583,21 @@ def create_bundle_aeff( # Step 2: Geometric muon effective area (no selection effects yet) # NB: assumes cylindrical symmetry. - aeff = numpy.vectorize(surface.average_area)(cos_theta[:-1], cos_theta[1:])[None, :] + aeff = np.vectorize(surface.average_area)(cos_theta[:-1], cos_theta[1:])[None, :] # Step 3: apply selection efficiency - # selection_efficiency = selection_efficiency(*numpy.meshgrid(center(e_mu), center(cos_theta), indexing='ij')).T + # selection_efficiency = selection_efficiency(*np.meshgrid(center(e_mu), center(cos_theta), indexing='ij')).T selection_efficiency = selection_efficiency( - *numpy.meshgrid(e_mu[1:], center(cos_theta), indexing="ij") + *np.meshgrid(e_mu[1:], center(cos_theta), indexing="ij") ) aeff = aeff * selection_efficiency # Step 4: apply smearing for energy resolution response = energy_resolution.get_response_matrix(e_mu, e_mu) - aeff = numpy.apply_along_axis( - numpy.inner, + aeff = np.apply_along_axis( + np.inner, 2, - aeff[..., None] * numpy.eye(response.shape[0])[:, None, :], + aeff[..., None] * np.eye(response.shape[0])[:, None, :], response, ) @@ -609,7 +608,7 @@ def create_bundle_aeff( # Step 5.2: apply suppression from surface veto veto_suppression = 1 - veto_efficiency.accept( - *numpy.meshgrid(center(e_mu), center(cos_theta), indexing="ij") + *np.meshgrid(center(e_mu), center(cos_theta), indexing="ij") ) # combine into an energy- and zenith-dependent acceptance for muon bundles @@ -630,11 +629,11 @@ def create_bundle_aeff( def create_throughgoing_aeff( energy_resolution=defer(get_energy_resolution, "IceCube"), - veto_coverage=lambda ct: numpy.zeros(len(ct) - 1), + veto_coverage=lambda ct: np.zeros(len(ct) - 1), selection_efficiency=defer(MuonSelectionEfficiency), surface=defer(get_fiducial_surface, "IceCube"), psf=defer(get_angular_resolution, "IceCube"), - psi_bins=numpy.sqrt(numpy.linspace(0, numpy.radians(2) ** 2, 100)), + psi_bins=np.sqrt(np.linspace(0, np.radians(2) ** 2, 100)), cos_theta=None, ): """ @@ -686,37 +685,37 @@ def create_throughgoing_aeff( # Step 2: Geometric muon effective area (no selection effects yet) # NB: assumes cylindrical symmetry. aeff = efficiency * ( - numpy.vectorize(surface.average_area)(cos_theta[:-1], cos_theta[1:])[ + np.vectorize(surface.average_area)(cos_theta[:-1], cos_theta[1:])[ None, None, :, None ] ) # Step 3: apply selection efficiency - # selection_efficiency = selection_efficiency(*numpy.meshgrid(center(e_mu), center(cos_theta), indexing='ij')).T + # selection_efficiency = selection_efficiency(*np.meshgrid(center(e_mu), center(cos_theta), indexing='ij')).T selection_efficiency = selection_efficiency( - *numpy.meshgrid(e_mu[1:], center(cos_theta), indexing="ij") + *np.meshgrid(e_mu[1:], center(cos_theta), indexing="ij") ).T # Explicit energy threshold disabled for now; let muon background take over # at whatever energy it drowns out the signal - # selection_efficiency *= energy_threshold.accept(*numpy.meshgrid(e_mu[1:], center(cos_theta), indexing='ij')).T + # selection_efficiency *= energy_threshold.accept(*np.meshgrid(e_mu[1:], center(cos_theta), indexing='ij')).T aeff *= selection_efficiency[None, None, :, :] # Step 4: apply smearing for angular resolution # Add an overflow bin if none present - if numpy.isfinite(psi_bins[-1]): - psi_bins = numpy.concatenate((psi_bins, [numpy.inf])) + if np.isfinite(psi_bins[-1]): + psi_bins = np.concatenate((psi_bins, [np.inf])) cdf = eval_psf(psf, center(e_mu), center(cos_theta), psi_bins[:-1]) - total_aeff = numpy.zeros((6,) + aeff.shape[1:] + (psi_bins.size - 1,)) + total_aeff = np.zeros((6,) + aeff.shape[1:] + (psi_bins.size - 1,)) # expand differential contributions along the opening-angle axis - total_aeff[2:4, ..., :-1] = aeff[..., None] * numpy.diff(cdf, axis=2)[None, ...] + total_aeff[2:4, ..., :-1] = aeff[..., None] * np.diff(cdf, axis=2)[None, ...] # put the remainder in the overflow bin total_aeff[2:4, ..., -1] = aeff * (1 - cdf[..., -1])[None, None, ...] # Step 5: apply smearing for energy resolution response = energy_resolution.get_response_matrix(e_mu, e_mu) - total_aeff = numpy.apply_along_axis(numpy.inner, 3, total_aeff, response) + total_aeff = np.apply_along_axis(np.inner, 3, total_aeff, response) # Step 6: split the effective area in into a portion shadowed by the # surface veto (if it exists) and one that is not @@ -736,12 +735,12 @@ def create_throughgoing_aeff( def create_cascade_aeff( channel="cascade", energy_resolution=defer(get_energy_resolution, channel="cascade"), - energy_threshold=StepFunction(numpy.inf), - veto_coverage=lambda ct: numpy.zeros(len(ct) - 1), + energy_threshold=StepFunction(np.inf), + veto_coverage=lambda ct: np.zeros(len(ct) - 1), selection_efficiency=defer(HECascadeSelectionEfficiency), surface=defer(get_fiducial_surface, "IceCube"), psf=defer(get_angular_resolution, "IceCube", channel="cascade"), - psi_bins=numpy.sqrt(numpy.linspace(0, numpy.radians(20) ** 2, 10)), + psi_bins=np.sqrt(np.linspace(0, np.radians(20) ** 2, 10)), cos_theta=None, ): """ @@ -776,25 +775,25 @@ def create_cascade_aeff( # Step 3: apply selection efficiency selection_efficiency = selection_efficiency( - *numpy.meshgrid(e_shower[1:], center(cos_theta), indexing="ij") + *np.meshgrid(e_shower[1:], center(cos_theta), indexing="ij") ).T aeff *= selection_efficiency[None, None, ...] # Step 4: apply smearing for angular resolution # Add an overflow bin if none present - if numpy.isfinite(psi_bins[-1]): - psi_bins = numpy.concatenate((psi_bins, [numpy.inf])) + if np.isfinite(psi_bins[-1]): + psi_bins = np.concatenate((psi_bins, [np.inf])) cdf = eval_psf(psf, center(e_shower), center(cos_theta), psi_bins[:-1]) - total_aeff = numpy.empty(aeff.shape + (psi_bins.size - 1,)) + total_aeff = np.empty(aeff.shape + (psi_bins.size - 1,)) # expand differential contributions along the opening-angle axis - total_aeff[..., :-1] = aeff[..., None] * numpy.diff(cdf, axis=2)[None, ...] + total_aeff[..., :-1] = aeff[..., None] * np.diff(cdf, axis=2)[None, ...] # put the remainder in the overflow bin total_aeff[..., -1] = aeff * (1 - cdf[..., -1])[None, None, ...] # Step 5: apply smearing for energy resolution response = energy_resolution.get_response_matrix(e_shower, e_shower) - total_aeff = numpy.apply_along_axis(numpy.inner, 3, total_aeff, response) + total_aeff = np.apply_along_axis(np.inner, 3, total_aeff, response) edges = (e_nu, cos_theta, e_shower, psi_bins) @@ -805,15 +804,15 @@ def create_cascade_aeff( def create_starting_aeff( energy_resolution=defer(get_energy_resolution, channel="cascade"), - energy_threshold=StepFunction(numpy.inf), - veto_coverage=lambda ct: numpy.zeros(len(ct) - 1), + energy_threshold=StepFunction(np.inf), + veto_coverage=lambda ct: np.zeros(len(ct) - 1), selection_efficiency=defer(HECascadeSelectionEfficiency), classification_efficiency=defer(get_classification_efficiency, "IceCube"), surface=defer(get_fiducial_surface, "IceCube"), psf=defer(get_angular_resolution, "IceCube", channel="cascade"), - psi_bins=numpy.sqrt(numpy.linspace(0, numpy.radians(20) ** 2, 10)), - neutrino_energy=numpy.logspace(4, 12, 81), - cos_theta=numpy.linspace(-1, 1, 21), + psi_bins=np.sqrt(np.linspace(0, np.radians(20) ** 2, 10)), + neutrino_energy=np.logspace(4, 12, 81), + cos_theta=np.linspace(-1, 1, 21), ): """ Create an effective area for neutrinos interacting inside the volume @@ -845,7 +844,7 @@ def create_starting_aeff( # Step 3: apply overall selection efficiency selection_efficiency = selection_efficiency( - *numpy.meshgrid(e_shower[1:], center(cos_theta), indexing="ij") + *np.meshgrid(e_shower[1:], center(cos_theta), indexing="ij") ).T aeff *= selection_efficiency[None, None, ...] # Step 3.5: calculate channel selection efficiency, padding the shape for @@ -853,7 +852,7 @@ def create_starting_aeff( weights = {} e_shower_center = np.exp(center(np.log(e_shower))) for event_class in classification_efficiency.classes: - weights[event_class] = numpy.array( + weights[event_class] = np.array( [ classification_efficiency(nutype, event_class, e_shower_center)[ None, None, :, None @@ -864,19 +863,19 @@ def create_starting_aeff( # Step 4: apply smearing for angular resolution # Add an overflow bin if none present - if numpy.isfinite(psi_bins[-1]): - psi_bins = numpy.concatenate((psi_bins, [numpy.inf])) + if np.isfinite(psi_bins[-1]): + psi_bins = np.concatenate((psi_bins, [np.inf])) cdf = eval_psf(psf, center(e_shower), center(cos_theta), psi_bins[:-1]) - total_aeff = numpy.empty(aeff.shape + (psi_bins.size - 1,)) + total_aeff = np.empty(aeff.shape + (psi_bins.size - 1,)) # expand differential contributions along the opening-angle axis - total_aeff[..., :-1] = aeff[..., None] * numpy.diff(cdf, axis=2)[None, ...] + total_aeff[..., :-1] = aeff[..., None] * np.diff(cdf, axis=2)[None, ...] # put the remainder in the overflow bin total_aeff[..., -1] = aeff * (1 - cdf[..., -1])[None, None, ...] # Step 5: apply smearing for energy resolution response = energy_resolution.get_response_matrix(e_shower, e_shower) - total_aeff = numpy.apply_along_axis(numpy.inner, 3, total_aeff, response) + total_aeff = np.apply_along_axis(np.inner, 3, total_aeff, response) edges = (e_nu, cos_theta, e_shower, psi_bins) @@ -904,12 +903,12 @@ def _interpolate_ara_aeff(ct_edges=None, depth=200, nstations=37): from scipy import interpolate if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) # interpolate to a grid compatible with the IceCube/Gen2 effective areas - loge_edges = numpy.linspace(2, 12, 101) + loge_edges = np.linspace(2, 12, 101) fpath = os.path.join(data_dir, "aeff", "cosZenDepAeff_z{}.half.txt".format(depth)) @@ -936,23 +935,23 @@ def _interpolate_ara_aeff(ct_edges=None, depth=200, nstations=37): paeff.reverse() aeff.append(paeff) - aeff = numpy.asarray(aeff) * nstations + aeff = np.asarray(aeff) * nstations # convert energy from exponent to GeV - # energy = 10**edge(numpy.asarray(energy))*1e-9 + # energy = 10**edge(np.asarray(energy))*1e-9 - centers = (numpy.asarray(energy) - 9, numpy.asarray(cos_theta)) + centers = (np.asarray(energy) - 9, np.asarray(cos_theta)) - edges = numpy.array([energy, cos_theta]) + edges = np.array([energy, cos_theta]) # centers = map(center, edges) newcenters = [ center(loge_edges), - numpy.clip(center(ct_edges), centers[1].min(), centers[1].max()), + np.clip(center(ct_edges), centers[1].min(), centers[1].max()), ] - xi = numpy.vstack( - [x.flatten() for x in numpy.meshgrid(*newcenters, indexing="ij")] + xi = np.vstack( + [x.flatten() for x in np.meshgrid(*newcenters, indexing="ij")] ).T - assert numpy.isfinite(xi).all() + assert np.isfinite(xi).all() interpolant = interpolate.RegularGridInterpolator( centers, aeff, bounds_error=False, fill_value=0 @@ -966,7 +965,7 @@ def _interpolate_ara_aeff(ct_edges=None, depth=200, nstations=37): v = interpolant(xi, method="nearest").reshape([x.size for x in newcenters]) # assume flavor-independence for ARA by extending same aeff across all flavors - return (10**loge_edges, ct_edges), numpy.repeat(v[None, ...], 6, axis=0) + return (10**loge_edges, ct_edges), np.repeat(v[None, ...], 6, axis=0) def create_ara_aeff( @@ -999,15 +998,15 @@ def create_ara_aeff( # Step 2: for now, assume no energy resolution # Note that it doesn't matter which energy distribution we use, just as # long as it's identical for all neutrino energy bins - # e_reco = numpy.copy(e_nu) - # aeff = numpy.repeat(aeff[...,None], aeff.shape[1], axis=-1) + # e_reco = np.copy(e_nu) + # aeff = np.repeat(aeff[...,None], aeff.shape[1], axis=-1) # aeff /= aeff.shape[1] - e_reco = numpy.array([e_nu[0], e_nu[-1]]) + e_reco = np.array([e_nu[0], e_nu[-1]]) aeff = aeff[..., None] # Step 3: dummy angular resolution smearing - psi_bins = numpy.asarray([0, numpy.inf]) - total_aeff = numpy.zeros(aeff.shape + (psi_bins.size - 1,)) + psi_bins = np.asarray([0, np.inf]) + total_aeff = np.zeros(aeff.shape + (psi_bins.size - 1,)) # put everything in first psi_bin for no angular resolution total_aeff[..., 0] = aeff[...] @@ -1093,7 +1092,7 @@ def create_radio_aeff( nstations=305, energy_resolution=get_energy_resolution(channel="radio"), psf=get_angular_resolution(channel="radio"), - psi_bins=numpy.sqrt(numpy.linspace(0, numpy.radians(20) ** 2, 10)), + psi_bins=np.sqrt(np.linspace(0, np.radians(20) ** 2, 10)), veff_filename=dict( e="nu_e_Gen2_100m_1.5sigma.json", mu="nu_mu_Gen2_100m_1.5sigma.json" ), @@ -1127,19 +1126,19 @@ def create_radio_aeff( # Step 4: apply smearing for angular resolution # Add an overflow bin if none present - if numpy.isfinite(psi_bins[-1]): - psi_bins = numpy.concatenate((psi_bins, [numpy.inf])) + if np.isfinite(psi_bins[-1]): + psi_bins = np.concatenate((psi_bins, [np.inf])) cdf = eval_psf(psf, center(e_shower), center(cos_theta), psi_bins[:-1]) - total_aeff = numpy.empty(aeff.shape + (psi_bins.size - 1,)) + total_aeff = np.empty(aeff.shape + (psi_bins.size - 1,)) # expand differential contributions along the opening-angle axis - total_aeff[..., :-1] = aeff[..., None] * numpy.diff(cdf, axis=2)[None, ...] + total_aeff[..., :-1] = aeff[..., None] * np.diff(cdf, axis=2)[None, ...] # put the remainder in the overflow bin total_aeff[..., -1] = aeff * (1 - cdf[..., -1])[None, None, ...] # Step 5: apply smearing for energy resolution response = energy_resolution.get_response_matrix(e_shower, e_shower) - total_aeff = numpy.apply_along_axis(numpy.inner, 3, total_aeff, response) + total_aeff = np.apply_along_axis(np.inner, 3, total_aeff, response) edges = (e_nu, cos_theta, e_shower, psi_bins) @@ -1164,13 +1163,13 @@ def _interpolate_gen2_ehe_aeff(ct_edges=None): from scipy import interpolate if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) # interpolate to a grid compatible with the IceCube/Gen2 effective areas - loge_edges = numpy.linspace(2, 12, 101) + loge_edges = np.linspace(2, 12, 101) flavors = ["NuE", "NuMu", "NuTau"] filenames = [ @@ -1180,7 +1179,7 @@ def _interpolate_gen2_ehe_aeff(ct_edges=None): aeffs = [] for filename in filenames: - data = numpy.load(filename) + data = np.load(filename) cos_theta_bins = data["cos_theta_bins"] energy_bins = data["energy_bins"] energy_bins_at_det = data["energy_bins_at_det"] @@ -1192,18 +1191,18 @@ def _interpolate_gen2_ehe_aeff(ct_edges=None): new_centers = [ center(loge_edges), - numpy.clip(center(ct_edges), cos_theta_center.min(), cos_theta_center.max()), + np.clip(center(ct_edges), cos_theta_center.min(), cos_theta_center.max()), center(loge_edges), ] - xi = numpy.vstack( - [x.flatten() for x in numpy.meshgrid(*new_centers, indexing="ij")] + xi = np.vstack( + [x.flatten() for x in np.meshgrid(*new_centers, indexing="ij")] ) vs = [] for aeff in aeffs: interpolant = interpolate.RegularGridInterpolator( - (numpy.log10(e_center), cos_theta_center, numpy.log10(e_center_at_det)), + (np.log10(e_center), cos_theta_center, np.log10(e_center_at_det)), np.moveaxis(aeff, -1, 0), bounds_error=False, fill_value=0, @@ -1214,7 +1213,7 @@ def _interpolate_gen2_ehe_aeff(ct_edges=None): # Assume Nu/NuBar symmetry for effective areas return ( (10**loge_edges, ct_edges, 10**loge_edges), - numpy.concatenate([numpy.repeat(v[None, ...], 2, axis=0) for v in vs]), + np.concatenate([np.repeat(v[None, ...], 2, axis=0) for v in vs]), ) @@ -1244,8 +1243,8 @@ def create_gen2_ehe_aeff( aeff = aeff.sum(axis=-1, keepdims=True) # Step 3: dummy angular resolution smearing - psi_bins = numpy.array([0, numpy.inf]) - total_aeff = numpy.zeros(aeff.shape + (psi_bins.size - 1,)) + psi_bins = np.array([0, np.inf]) + total_aeff = np.zeros(aeff.shape + (psi_bins.size - 1,)) # put everything in first psi_bin for no angular resolution total_aeff[..., 0] = aeff[...] diff --git a/toise/energy_resolution.py b/toise/energy_resolution.py index d8d5d72..edddc91 100644 --- a/toise/energy_resolution.py +++ b/toise/energy_resolution.py @@ -1,6 +1,6 @@ import os -import numpy +import numpy as np from scipy import interpolate from scipy.special import erf @@ -12,7 +12,7 @@ def get_energy_resolution(geometry="Sunflower", spacing=200, channel="muon"): return FictiveCascadeEnergyResolution() elif channel == "radio": return FictiveCascadeEnergyResolution( - lower_limit=numpy.log10(1 + 0.2), crossover_energy=1e8 + lower_limit=np.log10(1 + 0.2), crossover_energy=1e8 ) if geometry == "Fictive": return FictiveMuonEnergyResolution() @@ -32,7 +32,7 @@ def __init__( self, bias=None, sigma=None, - loge_range=(-numpy.inf, numpy.inf), + loge_range=(-np.inf, np.inf), overdispersion=1.0, ): """ @@ -60,19 +60,19 @@ def get_response_matrix(self, true_energy, reco_energy): :param true_energy: edges of true muon energy bins :param reco_energy: edges of reconstructed muon energy bins """ - loge_true = numpy.log10(true_energy) - loge_center = numpy.clip( + loge_true = np.log10(true_energy) + loge_center = np.clip( 0.5 * (loge_true[:-1] + loge_true[1:]), *self._loge_range ) - loge_width = numpy.diff(loge_true) - loge_lo = numpy.log10(reco_energy[:-1]) - loge_hi = numpy.log10(reco_energy[1:]) + loge_width = np.diff(loge_true) + loge_lo = np.log10(reco_energy[:-1]) + loge_hi = np.log10(reco_energy[1:]) # evaluate at the right edge for maximum smearing on a falling spectrum loge_center = loge_true[1:] - mu, hi = numpy.meshgrid( + mu, hi = np.meshgrid( self.bias(loge_center) + loge_width, loge_hi, indexing="ij" ) - sigma, lo = numpy.meshgrid(self.sigma(loge_center), loge_lo, indexing="ij") + sigma, lo = np.meshgrid(self.sigma(loge_center), loge_lo, indexing="ij") return ((erf((hi - mu) / sigma) - erf((lo - mu) / sigma)) / 2.0).T @@ -89,7 +89,7 @@ def __init__(self, fname="aachen_muon_energy_profile.npz", overdispersion=1.0): """ if not fname.startswith("/"): fname = os.path.join(data_dir, "energy_reconstruction", fname) - f = numpy.load(fname) + f = np.load(fname) loge_range = (f["loge"].min(), f["loge"].max()) bias = interpolate.UnivariateSpline(f["loge"], f["mean"], s=f["smoothing"]) sigma = interpolate.UnivariateSpline( @@ -102,20 +102,20 @@ def __init__(self, fname="aachen_muon_energy_profile.npz", overdispersion=1.0): class FictiveMuonEnergyResolution(EnergySmearingMatrix): def bias(self, loge): - return numpy.log10(10 ** (loge / 1.13) + 500) + return np.log10(10 ** (loge / 1.13) + 500) def sigma(self, loge): - return 0.22 + 0.23 * (1 - numpy.exp(-(10**loge) / 5e6)) + return 0.22 + 0.23 * (1 - np.exp(-(10**loge) / 5e6)) class FictiveCascadeEnergyResolution(EnergySmearingMatrix): - def __init__(self, lower_limit=numpy.log10(1.1), crossover_energy=1e6): + def __init__(self, lower_limit=np.log10(1.1), crossover_energy=1e6): super(FictiveCascadeEnergyResolution, self).__init__() self._b = lower_limit - self._a = self._b * numpy.sqrt(crossover_energy) + self._a = self._b * np.sqrt(crossover_energy) def bias(self, loge): return loge def sigma(self, loge): - return self._b + self._a / numpy.sqrt(10**loge) + return self._b + self._a / np.sqrt(10**loge) diff --git a/toise/externals/AtmosphericSelfVeto/__init__.py b/toise/externals/AtmosphericSelfVeto/__init__.py index c1ac92a..c27f4e9 100644 --- a/toise/externals/AtmosphericSelfVeto/__init__.py +++ b/toise/externals/AtmosphericSelfVeto/__init__.py @@ -1,4 +1,4 @@ -import numpy +import numpy as np from ...util import PDGCode from . import selfveto @@ -34,10 +34,10 @@ def __init__(self, kind="conventional", veto_threshold=1e3, floor=1e-4): nue = numu self._eval = dict() - self._eval[PDGCode.NuMu] = numpy.vectorize( + self._eval[PDGCode.NuMu] = np.vectorize( lambda enu, ct, depth: numu.evaluate_simple([enu, ct, depth]) ) - self._eval[PDGCode.NuE] = numpy.vectorize( + self._eval[PDGCode.NuE] = np.vectorize( lambda enu, ct, depth: nue.evaluate_simple([enu, ct, depth]) ) @@ -46,20 +46,20 @@ def pad_knots(knots, order=2): """ Pad knots out for full support at the boundaries """ - pre = knots[0] - (knots[1] - knots[0]) * numpy.arange(order, 0, -1) - post = knots[-1] + (knots[-1] - knots[-2]) * numpy.arange(1, order + 1) - return numpy.concatenate((pre, knots, post)) + pre = knots[0] - (knots[1] - knots[0]) * np.arange(order, 0, -1) + post = knots[-1] + (knots[-1] - knots[-2]) * np.arange(1, order + 1) + return np.concatenate((pre, knots, post)) def edges(centers): - dx = numpy.diff(centers)[0] / 2.0 - return numpy.concatenate((centers - dx, [centers[-1] + dx])) + dx = np.diff(centers)[0] / 2.0 + return np.concatenate((centers - dx, [centers[-1] + dx])) - log_enu, ct = numpy.linspace(1, 9, 51), numpy.linspace(self.ct_min, 1, 21) - depth = numpy.linspace(1e3, 3e3, 11) + log_enu, ct = np.linspace(1, 9, 51), np.linspace(self.ct_min, 1, 21) + depth = np.linspace(1e3, 3e3, 11) depth_g = depth[None, None, :] - log_enu_g, ct_g = list(map(numpy.transpose, numpy.meshgrid(log_enu, ct))) + log_enu_g, ct_g = list(map(np.transpose, np.meshgrid(log_enu, ct))) - pr = numpy.zeros(ct_g.shape + (depth.size,)) + pr = np.zeros(ct_g.shape + (depth.size,)) for i, d in enumerate(depth): slant = selfveto.overburden(ct_g, d) emu = selfveto.minimum_muon_energy(slant, veto_threshold) @@ -79,7 +79,7 @@ def _create_spline(self, kind, veto_threshold): pr, centers, knots, order, penalty, penorder = self._eval_grid( kind, veto_threshold ) - z, w = photospline.ndsparse.from_data(pr, numpy.ones(pr.shape)) + z, w = photospline.ndsparse.from_data(pr, np.ones(pr.shape)) spline = photospline.glam_fit(z, w, centers, knots, order, penalty, penorder) return spline @@ -107,32 +107,32 @@ def __call__(self, particleType, enu, ct, depth, spline=True): directly. Otherwise, use the much faster B-spline representation. """ - if numpy.isscalar(ct) and not ct > self.ct_min: - return numpy.array(1.0) + if np.isscalar(ct) and not ct > self.ct_min: + return np.array(1.0) emu = selfveto.minimum_muon_energy( selfveto.overburden(ct, depth), self.veto_threshold ) # Verify that we're using a sane encoding scheme assert abs(PDGCode.NuMuBar) == PDGCode.NuMu - particleType = abs(numpy.asarray(particleType)) + particleType = abs(np.asarray(particleType)) if spline: - pr = numpy.where( + pr = np.where( particleType == PDGCode.NuMu, - self._eval[PDGCode.NuMu](numpy.log10(enu), ct, depth), - numpy.where( + self._eval[PDGCode.NuMu](np.log10(enu), ct, depth), + np.where( particleType == PDGCode.NuE, - self._eval[PDGCode.NuE](numpy.log10(enu), ct, depth), + self._eval[PDGCode.NuE](np.log10(enu), ct, depth), 1, ), ) else: - enu, ct, depth = numpy.broadcast_arrays(enu, ct, depth) + enu, ct, depth = np.broadcast_arrays(enu, ct, depth) if self.kind == "conventional": - pr = numpy.where( + pr = np.where( particleType == PDGCode.NuMu, selfveto.uncorrelated_passing_rate(enu, emu, ct, kind="numu"), - numpy.where( + np.where( particleType == PDGCode.NuE, selfveto.uncorrelated_passing_rate(enu, emu, ct, kind="nue"), 1, @@ -148,10 +148,10 @@ def __call__(self, particleType, enu, ct, depth, spline=True): # decays of pions and kaons, but is at least a conservative estimate # for 3-body decays of D mesons. direct = selfveto.correlated_passing_rate(enu, emu, ct) - pr *= numpy.where(particleType == PDGCode.NuMu, direct, 1) + pr *= np.where(particleType == PDGCode.NuMu, direct, 1) - return numpy.where( + return np.where( ct > self.ct_min, - numpy.where(pr <= 1, numpy.where(pr >= self.floor, pr, self.floor), 1), + np.where(pr <= 1, np.where(pr >= self.floor, pr, self.floor), 1), 1, ) diff --git a/toise/externals/AtmosphericSelfVeto/selfveto.py b/toise/externals/AtmosphericSelfVeto/selfveto.py index 01b7439..9410107 100755 --- a/toise/externals/AtmosphericSelfVeto/selfveto.py +++ b/toise/externals/AtmosphericSelfVeto/selfveto.py @@ -32,7 +32,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -import numpy +import numpy as np def overburden(cos_theta, depth=1950, elevation=2400): @@ -49,7 +49,7 @@ def overburden(cos_theta, depth=1950, elevation=2400): r = 6371315 + elevation # this is secrety a translation in polar coordinates return ( - numpy.sqrt(2 * r * depth + (cos_theta * (r - depth)) ** 2 - depth**2) + np.sqrt(2 * r * depth + (cos_theta * (r - depth)) ** 2 - depth**2) - (r - depth) * cos_theta ) @@ -66,7 +66,7 @@ def polynomial(x, coefficients): return sum(c * x**i for i, c in enumerate(coefficients)) coeffs = [[2.793, -0.476, 0.187], [2.069, -0.201, 0.023], [-2.689, 3.882]] - a, b, c = (polynomial(numpy.log10(emin), c) for c in coeffs) + a, b, c = (polynomial(np.log10(emin), c) for c in coeffs) return 10 ** polynomial(distance, (a, b / 1e4, c / 1e10)) @@ -78,7 +78,7 @@ def effective_costheta(costheta): """ x = costheta p = [0.102573, -0.068287, 0.958633, 0.0407253, 0.817285] - return numpy.sqrt( + return np.sqrt( (x**2 + p[0] ** 2 + p[1] * x ** p[2] + p[3] * x ** p[4]) / (1 + p[0] ** 2 + p[1] + p[3]) ) @@ -93,10 +93,10 @@ def __init__(self, **kwargs): self.new_kwargs = kwargs def __enter__(self): - self.old_kwargs = numpy.seterr(**self.new_kwargs) + self.old_kwargs = np.seterr(**self.new_kwargs) def __exit__(self, *args): - numpy.seterr(**self.old_kwargs) + np.seterr(**self.old_kwargs) elbert_params = { @@ -155,13 +155,13 @@ def elbert_yield( decay_prob = 1.0 / (En * effective_costheta(cos_theta)) with fpe_context(all="ignore"): - icdf = numpy.where( + icdf = np.where( x >= 1, 0.0, a * primary_mass * decay_prob * x ** (-p1) * (1 - x**p3) ** p2, ) if differential: - icdf *= (1.0 / En) * numpy.where( + icdf *= (1.0 / En) * np.where( x >= 1, 0.0, (p1 / x + p2 * p3 * x ** (p3 - 1) / (1 - x**p3)) ) @@ -209,21 +209,21 @@ def gaisser_flux(energy, ptype): rigidity = [4e6, 30e6, 2e9] return sum( - n[idx] * energy ** (-g[idx]) * numpy.exp(-energy / (r * z)) + n[idx] * energy ** (-g[idx]) * np.exp(-energy / (r * z)) for n, g, r in zip(norm, gamma, rigidity) ) def logspace(start, stop, num): """ - A version of numpy.logspace that takes array arguments + A version of np.logspace that takes array arguments """ # Add a trailing dimension to the inputs - start = numpy.asarray(start)[..., None] - stop = numpy.asarray(stop)[..., None] + start = np.asarray(start)[..., None] + stop = np.asarray(stop)[..., None] num = int(num) step = (stop - start) / float(num - 1) - y = (numpy.core.numeric.arange(0, num) * step + start).squeeze() + y = (np.core.numeric.arange(0, num) * step + start).squeeze() return 10**y @@ -243,14 +243,14 @@ def response_function(enu, emu, cos_theta, kind="numu"): :returns: a tuple (response, muonyield, energy_per_nucleon) """ # make everything an array - enu, emu, cos_theta = list(map(numpy.asarray, (enu, emu, cos_theta))) - shape = numpy.broadcast(enu, emu, cos_theta).shape + enu, emu, cos_theta = list(map(np.asarray, (enu, emu, cos_theta))) + shape = np.broadcast(enu, emu, cos_theta).shape # contributions to the differential neutrino flux from chunks of the # primary spectrum for each element - contrib = numpy.zeros(shape + (5, 100)) + contrib = np.zeros(shape + (5, 100)) # mean integral muon yield from same chunks - muyield = numpy.zeros(shape + (5, 100)) - energy_per_nucleon = logspace(numpy.log10(enu), numpy.log10(enu) + 3, 101) + muyield = np.zeros(shape + (5, 100)) + energy_per_nucleon = logspace(np.log10(enu), np.log10(enu) + 3, 101) ptypes = [ getattr(ParticleType, pt) for pt in ("PPlus", "He4Nucleus", "N14Nucleus", "Al27Nucleus", "Fe56Nucleus") @@ -260,7 +260,7 @@ def response_function(enu, emu, cos_theta, kind="numu"): # primary energies that contribute to the neutrino flux at given energy penergy = a * energy_per_nucleon # width of energy bins - de = numpy.diff(penergy) + de = np.diff(penergy) # center of energy bins pe = penergy[..., :-1] + de / 2.0 # hobo-integrate the flux @@ -277,17 +277,17 @@ def response_function(enu, emu, cos_theta, kind="numu"): return ( contrib, muyield, - energy_per_nucleon[..., :-1] + numpy.diff(energy_per_nucleon) / 2.0, + energy_per_nucleon[..., :-1] + np.diff(energy_per_nucleon) / 2.0, ) def get_bin_edges(energy_grid): - half_width = 10 ** (numpy.diff(numpy.log10(energy_grid))[0] / 2) - return numpy.concatenate((energy_grid / half_width, [energy_grid[-1] * half_width])) + half_width = 10 ** (np.diff(np.log10(energy_grid))[0] / 2) + return np.concatenate((energy_grid / half_width, [energy_grid[-1] * half_width])) def get_bin_width(energy_grid): - return numpy.diff(get_bin_edges(energy_grid)) + return np.diff(get_bin_edges(energy_grid)) def extract_yields(yield_record, types): @@ -295,16 +295,16 @@ def extract_yields(yield_record, types): def interpolate_integral_yield(log_N, log_e, log_emin): - interp = numpy.interp(log_emin, log_e, log_N) - if numpy.isfinite(interp): - return numpy.exp(interp) + interp = np.interp(log_emin, log_e, log_N) + if np.isfinite(interp): + return np.exp(interp) else: return 0.0 def integral_yield(e_min, e_grid, diff_yield): edges = get_bin_edges(e_grid) - de = numpy.diff(edges) + de = np.diff(edges) intyield = de * diff_yield axis = 1 index = [slice(None)] * diff_yield.ndim @@ -313,12 +313,12 @@ def integral_yield(e_min, e_grid, diff_yield): # cumulative sum from the right N = intyield[index].cumsum(axis=axis)[index] - return numpy.apply_along_axis( + return np.apply_along_axis( interpolate_integral_yield, 1, - numpy.log(N), - numpy.log(edges[1:]), - numpy.log(e_min), + np.log(N), + np.log(edges[1:]), + np.log(e_min), ) @@ -333,11 +333,11 @@ def mceq_response_function(fname, emu, nu_types, prompt_muons=False): :returns: a tuple (response, muonyield, energy_per_nucleon) """ - bundle = numpy.load(fname) + bundle = np.load(fname) e_grid = bundle["e_grid"] - contrib = numpy.empty((e_grid.size, 5, e_grid.size)) - muyield = numpy.empty((e_grid.size, 5, e_grid.size)) + contrib = np.empty((e_grid.size, 5, e_grid.size)) + muyield = np.empty((e_grid.size, 5, e_grid.size)) ptypes = [ getattr(ParticleType, pt) for pt in ("PPlus", "He4Nucleus", "N14Nucleus", "Al27Nucleus", "Fe56Nucleus") @@ -380,7 +380,7 @@ def uncorrelated_passing_rate(enu, emu, cos_theta, kind="numu"): contrib /= contrib.sum(axis=(-1, -2), keepdims=True) # weight contributions by probability of having zero muons. if that # probability is always 1, then this returns 1 by construction - return (numpy.exp(-muyield) * contrib).sum(axis=(-1, -2)) + return (np.exp(-muyield) * contrib).sum(axis=(-1, -2)) def mceq_uncorrelated_passing_rate(fname, emu, nu_types, prompt_muons=False): @@ -391,7 +391,7 @@ def mceq_uncorrelated_passing_rate(fname, emu, nu_types, prompt_muons=False): contrib /= contrib.sum(axis=(-1, -2), keepdims=True) # weight contributions by probability of having zero muons. if that # probability is always 1, then this returns 1 by construction - return (numpy.exp(-muyield) * contrib).sum(axis=(-1, -2)) + return (np.exp(-muyield) * contrib).sum(axis=(-1, -2)) def analytic_numu_flux(enu, cos_theta, emu=None): @@ -427,16 +427,16 @@ def analytic_numu_flux(enu, cos_theta, emu=None): F = (GAMMA + 2.0) / (GAMMA + 1.0) B_PI = ( - F * (ALAM_PI - ALAM_N) / (ALAM_PI * numpy.log(ALAM_PI / ALAM_N) * (1.0 - R_PI)) + F * (ALAM_PI - ALAM_N) / (ALAM_PI * np.log(ALAM_PI / ALAM_N) * (1.0 - R_PI)) ) - B_K = F * (ALAM_K - ALAM_N) / (ALAM_K * numpy.log(ALAM_K / ALAM_N) * (1.0 - R_K)) + B_K = F * (ALAM_K - ALAM_N) / (ALAM_K * np.log(ALAM_K / ALAM_N) * (1.0 - R_K)) if emu is not None: z = 1 + emu / enu zpimin = 1.0 / (1.0 - R_PI) zkmin = 1.0 / (1.0 - R_K) - zzpi = numpy.where(z >= zpimin, z, zpimin) - zzk = numpy.where(z >= zkmin, z, zkmin) + zzpi = np.where(z >= zpimin, z, zpimin) + zzk = np.where(z >= zkmin, z, zkmin) A_PI = Z_NPI / ((1.0 - R_PI) * (GAMMA + 1) * zzpi ** (GAMMA + 1.0)) A_K = Z_NK / ((1.0 - R_K) * (GAMMA + 1) * zzk ** (GAMMA + 1.0)) @@ -484,10 +484,10 @@ def plot_passing_rate(depth): import pylab ax = pylab.gca() - enu = numpy.logspace(3, 7, 101) + enu = np.logspace(3, 7, 101) for zenith, color in zip((0, 70, 80, 85), colors): - cos_theta = numpy.cos(numpy.radians(zenith)) + cos_theta = np.cos(np.radians(zenith)) emu = minimum_muon_energy(overburden(cos_theta, depth)) correlated = correlated_passing_rate(enu, emu, cos_theta) uncorr_numu = uncorrelated_passing_rate(enu, emu, cos_theta, kind="numu") @@ -515,7 +515,7 @@ def plot_passing_rate(depth): def format_energy(fmt, energy): - places = int(numpy.log10(energy) / 3) * 3 + places = int(np.log10(energy) / 3) * 3 if places == 1: unit = "GeV" elif places == 3: @@ -547,7 +547,7 @@ def plot_response_function(enu, depth, cos_theta, kind): pylab.plot(energy_per_nucleon, response[i, :], color=color, lw=1, label=label) pylab.plot( energy_per_nucleon, - numpy.exp(-muyield[i, :]) * response[i, :], + np.exp(-muyield[i, :]) * response[i, :], color=color, ls="--", lw=1, @@ -556,7 +556,7 @@ def plot_response_function(enu, depth, cos_theta, kind): ax.plot(energy_per_nucleon, response.sum(axis=0), color="k", lw=2, label="Total") ax.plot( energy_per_nucleon, - (numpy.exp(-muyield) * response).sum(axis=-2), + (np.exp(-muyield) * response).sum(axis=-2), color="k", ls="--", lw=2, @@ -569,7 +569,7 @@ def plot_response_function(enu, depth, cos_theta, kind): ax.legend( loc="lower left", prop=dict(size="small"), title="Flux contributions", ncol=2 ) - passrate = (numpy.exp(-muyield) * response).sum(axis=(-2, -1)) / response.sum( + passrate = (np.exp(-muyield) * response).sum(axis=(-2, -1)) / response.sum( axis=(-2, -1) ) if kind == "charm": @@ -584,14 +584,14 @@ def plot_response_function(enu, depth, cos_theta, kind): % ( format_energy("%d", enu), kindlabel, - numpy.round(numpy.degrees(numpy.arccos(cos_theta))), + np.round(np.degrees(np.arccos(cos_theta))), depth, passrate * 100, ), size="medium", ) - logmax = numpy.ceil(numpy.log10(response.max())) + logmax = np.ceil(np.log10(response.max())) ax.set_ylim(10 ** (logmax - 8), 10 ** (logmax)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_ticks(ax.yaxis.get_ticklocs()[2:-1]) @@ -600,7 +600,7 @@ def plot_response_function(enu, depth, cos_theta, kind): for i, color, label in zip(range(5), colors, elements): pylab.plot(energy_per_nucleon, muyield[i, :], color=color, lw=1, label=label) pylab.loglog() - logmax = numpy.ceil(numpy.log10(muyield.max())) + logmax = np.ceil(np.log10(muyield.max())) ax.set_ylim(10 ** (logmax - 4), 10 ** (logmax)) ax.xaxis.set_major_formatter(NullFormatter()) ax.set_ylabel(r"$\left 1\, \mathrm{TeV} \right>$") @@ -610,7 +610,7 @@ def plot_response_function(enu, depth, cos_theta, kind): for i, color, label in zip(range(5), colors, elements): pylab.plot( energy_per_nucleon, - numpy.exp(-muyield[i, :]), + np.exp(-muyield[i, :]), color=color, lw=1, label=label, @@ -637,7 +637,7 @@ def plot_response_function(enu, depth, cos_theta, kind): of the integrals of the solid and dashed black curves (%d%%). """ % ( format_energy("%d", enu), - numpy.round(numpy.degrees(numpy.arccos(cos_theta))), + np.round(np.degrees(np.arccos(cos_theta))), depth, passrate * 100, ) @@ -707,21 +707,21 @@ def plot_response_function(enu, depth, cos_theta, kind): plot_response_function( opts.energy, opts.depth, - numpy.cos(numpy.radians(opts.zenith)), + np.cos(np.radians(opts.zenith)), [opts.flavor, "charm"][opts.charm], ) sys.exit(0) # Calculate passing rate on a grid of energies and zenith angles - enu = numpy.logspace(3, 7, 101) - cos_theta = numpy.arange(0, 1, 0.05) + 0.05 - enu, cos_theta = numpy.meshgrid(enu, cos_theta) + enu = np.logspace(3, 7, 101) + cos_theta = np.arange(0, 1, 0.05) + 0.05 + enu, cos_theta = np.meshgrid(enu, cos_theta) emu = minimum_muon_energy(overburden(cos_theta, opts.depth)) if opts.flavor == "numu": passrate = correlated_passing_rate(enu, emu, cos_theta) else: - passrate = numpy.ones(enu.shape) + passrate = np.ones(enu.shape) if opts.charm: kind = "charm" @@ -729,17 +729,17 @@ def plot_response_function(enu, depth, cos_theta, kind): kind = opts.flavor passrate *= uncorrelated_passing_rate(enu, emu, cos_theta, kind=kind) - data = numpy.vstack( + data = np.vstack( list( map( - numpy.ndarray.flatten, + np.ndarray.flatten, (cos_theta, overburden(cos_theta, opts.depth), emu, enu, passrate), ) ) ).T fields = ["cos_theta", "overburden", "emu_min", "energy", "passrate"] - numpy.savetxt( + np.savetxt( sys.stdout, data, fmt="%12.3e", diff --git a/toise/factory.py b/toise/factory.py index 06f3f45..ce8504d 100644 --- a/toise/factory.py +++ b/toise/factory.py @@ -3,7 +3,7 @@ import pickle as pickle from functools import partial -import numpy +import numpy as np from scipy import interpolate from . import ( @@ -21,7 +21,7 @@ def make_key(opts, kwargs): key = dict(opts.__dict__) key.update(kwargs) for k, v in kwargs.items(): - if isinstance(v, numpy.ndarray): + if isinstance(v, np.ndarray): key[k] = (v[0], v[-1], len(v)) else: key[k] = v @@ -64,7 +64,7 @@ def create_aeff(opts, **kwargs): def selection_efficiency(emu, cos_theta): return seleff(emu, cos_theta=0) - # selection_efficiency = lambda emu, cos_theta: numpy.ones(emu.shape) + # selection_efficiency = lambda emu, cos_theta: np.ones(emu.shape) # selection_efficiency = effective_areas.get_muon_selection_efficiency("IceCube", None) else: selection_efficiency = seleff @@ -78,7 +78,7 @@ def selection_efficiency(emu, cos_theta): continue try: len(v) - kwargs[k] = numpy.asarray(v) + kwargs[k] = np.asarray(v) except TypeError: kwargs[k] = v @@ -144,9 +144,9 @@ def create_cascade_aeff(opts, **kwargs): for k in "psi_bins", "cos_theta": if k in kwargs: - kwargs[k] = numpy.asarray(kwargs[k]) + kwargs[k] = np.asarray(kwargs[k]) elif hasattr(opts, k): - kwargs[k] = numpy.asarray(getattr(opts, k)) + kwargs[k] = np.asarray(getattr(opts, k)) aeff = effective_areas.create_cascade_aeff( energy_resolution=effective_areas.get_energy_resolution( @@ -175,9 +175,9 @@ def create_starting_aeff(opts, **kwargs): for k in "psi_bins", "cos_theta": if k in kwargs: - kwargs[k] = numpy.asarray(kwargs[k]) + kwargs[k] = np.asarray(kwargs[k]) elif hasattr(opts, k): - kwargs[k] = numpy.asarray(getattr(opts, k)) + kwargs[k] = np.asarray(getattr(opts, k)) return effective_areas.create_starting_aeff( energy_resolution=effective_areas.get_energy_resolution( @@ -236,9 +236,9 @@ def _create(self, opts, **kwargs): psi_bins = kwargs.pop("psi_bins") for k in "cos_theta", "neutrino_energy": if k in kwargs: - kwargs[k] = numpy.asarray(kwargs[k]) + kwargs[k] = np.asarray(kwargs[k]) elif hasattr(opts, k): - kwargs[k] = numpy.asarray(getattr(opts, k)) + kwargs[k] = np.asarray(getattr(opts, k)) kwargs["psi_bins"] = psi_bins["radio"] if hasattr(opts, "config_file"): from . import radio_aeff_generation @@ -374,18 +374,18 @@ def b(x): return -0.82 + 14.54 / x def med(emu, b): - return 0.11 + b / numpy.sqrt(emu) + return 0.11 + b / np.sqrt(emu) scale = med(energy, b(scale)) / med(energy, b(1)) if ssmpe: # see: https://github.com/fbradascio/IceCube/blob/b8556b7b3d3c53a1cfab4bf53737bebff1264707/SensitivityStudy_SSMPE_vs_MPE.ipynb # https://doi.org/10.1051/epjconf/201920705002 # NB: this improvement was evaluated at 2x PDOM sensitivty - scale *= numpy.minimum( + scale *= np.minimum( 1, - numpy.polyval( + np.polyval( [0.01266943, -0.1901559, 0.80568256, 0.04948373], - numpy.log10(energy / 2), + np.log10(energy / 2), ), ) if mdom: @@ -413,7 +413,7 @@ def make_options(**kwargs): # icecube has no veto...yet if kwargs.get("geometry") == "IceCube": defaults["veto_area"] = 0.0 - defaults["veto_threshold"] = numpy.inf + defaults["veto_threshold"] = np.inf defaults.update(kwargs) return argparse.Namespace(**defaults) @@ -556,12 +556,12 @@ def scale_gen2_sensors( default_psi_bins = { - "tracks": numpy.linspace(0, numpy.radians(1.5) ** 2, 150) ** (1.0 / 2), - "cascades": numpy.linspace(0, numpy.radians(60) ** 2, 50) ** (1.0 / 2), - "radio": numpy.linspace(0, numpy.radians(15) ** 2, 50) ** (1.0 / 2), + "tracks": np.linspace(0, np.radians(1.5) ** 2, 150) ** (1.0 / 2), + "cascades": np.linspace(0, np.radians(60) ** 2, 50) ** (1.0 / 2), + "radio": np.linspace(0, np.radians(15) ** 2, 50) ** (1.0 / 2), } -# default_cos_theta_bins = numpy.linspace(-1, 1, 21) +# default_cos_theta_bins = np.linspace(-1, 1, 21) default_cos_theta_bins = [ -1.0, -0.95, diff --git a/toise/fictive.py b/toise/fictive.py index 60889a5..9979317 100644 --- a/toise/fictive.py +++ b/toise/fictive.py @@ -2,6 +2,7 @@ Idealized detectors used to estimate sensitivities in the WIS whitepaper """ +import numpy as np from numpy import vectorize from toolz import memoize @@ -9,8 +10,8 @@ def make_cylinder(volume=1.0, aspect=1.0): - r = numpy.cbrt(2 * volume * aspect / numpy.pi**2) - h = numpy.pi * r / 2 / aspect + r = np.cbrt(2 * volume * aspect / np.pi**2) + h = np.pi * r / 2 / aspect return surfaces.Cylinder(h * 1e3, r * 1e3) @@ -23,12 +24,12 @@ def sigma(self, loge): class GaussianPointSpreadFunction(object): - def __init__(self, median_opening_angle=numpy.radians(0.5)): - self._sigma = median_opening_angle / numpy.sqrt(2 * numpy.log(2)) + def __init__(self, median_opening_angle=np.radians(0.5)): + self._sigma = median_opening_angle / np.sqrt(2 * np.log(2)) def __call__(self, psi, energy, cos_theta): - psi, loge, ct = numpy.broadcast_arrays(psi / self._sigma, energy, cos_theta) - return 1.0 - numpy.exp(-(psi**2) / 2.0) + psi, loge, ct = np.broadcast_arrays(psi / self._sigma, energy, cos_theta) + return 1.0 - np.exp(-(psi**2) / 2.0) @memoize @@ -36,7 +37,7 @@ def base_aeff(): """ Create a horizontal neutrino effective area for a 1 km^2 detector """ - cos_theta = linspace(-1, 1, 40)[19:21] + cos_theta = np.linspace(-1, 1, 40)[19:21] ( e_nu, cos_theta, @@ -45,9 +46,9 @@ def base_aeff(): # Step 5: apply smearing for energy resolution response = FictiveEnergyResolution().get_response_matrix(e_mu, e_mu) - efficiency = numpy.apply_along_axis(numpy.inner, 3, efficiency, response) + efficiency = np.apply_along_axis(np.inner, 3, efficiency, response) - total_aeff = numpy.zeros((6,) + efficiency.shape[1:]) + total_aeff = np.zeros((6,) + efficiency.shape[1:]) total_aeff[2:4, ...] = efficiency * 1e6 # side-on geometry area: 1 km2 edges = (e_nu, cos_theta, e_mu) @@ -70,17 +71,17 @@ def get_aeff(angular_resolution=0.5, energy_threshold=1e3): idx = e_mu.searchsorted(energy_threshold) - psf = GaussianPointSpreadFunction(numpy.radians(angular_resolution)) - psi_bins = numpy.concatenate( + psf = GaussianPointSpreadFunction(np.radians(angular_resolution)) + psi_bins = np.concatenate( ( - sqrt(linspace(0, numpy.radians(angular_resolution * 4) ** 2, 101)), - [numpy.inf], + np.sqrt(np.linspace(0, np.radians(angular_resolution * 4) ** 2, 101)), + [np.inf], ) ) # Step 4: apply smearing for angular resolution cdf = psf(psi_bins[:-1], 0, 0) - smear = numpy.concatenate((diff(cdf), [1.0 - cdf[-1]])) + smear = np.concatenate((np.diff(cdf), [1.0 - cdf[-1]])) expand = [None] * 4 + [slice(None)] diff --git a/toise/figures/diffuse/dnnc.py b/toise/figures/diffuse/dnnc.py index ae2e82b..b4c3111 100644 --- a/toise/figures/diffuse/dnnc.py +++ b/toise/figures/diffuse/dnnc.py @@ -3,7 +3,6 @@ from pathlib import Path import healpy -import numpy import numpy as np import pandas as pd from scipy import interpolate, optimize @@ -39,7 +38,7 @@ def create_dnn_aeff(nside: int = 16, scale=1): values = np.array(list(values.values())).T[:, ::-1] # snap zenith bands to rings of a healpix map - pixel_centers = -healpy.ringinfo(nside, numpy.arange(1, 4 * nside))[2] + pixel_centers = -healpy.ringinfo(nside, np.arange(1, 4 * nside))[2] # broadcast effective area into each ring values = values[:, np.digitize(pixel_centers, ct_edges) - 1] # reset zenith bands to the bounds of the healpix rings diff --git a/toise/figures/diffuse/spectrum.py b/toise/figures/diffuse/spectrum.py index bc77b34..5ccb970 100644 --- a/toise/figures/diffuse/spectrum.py +++ b/toise/figures/diffuse/spectrum.py @@ -111,12 +111,14 @@ def ts_diff(value): energy_range = list(llh.components[key]._components.values())[0][0].energy_range if plotit and energy_range[0] > 1e6: - x = linspace(0, g0 * 2, 101) + import matplotlib.pyplot as plt + + x = np.linspace(0, g0 * 2, 101) energy_range = list(llh.components[key]._components.values())[0][0].energy_range - line = plot( + line = plt.plot( x, [ts_diff(x_) for x_ in x], label="%.1g-%.1g" % tuple(energy_range) )[0] - axvline(nom[key], color=line.get_color()) + plt.axvline(nom[key], color=line.get_color()) return lo, hi diff --git a/toise/multillh.py b/toise/multillh.py index 1940c54..54c72bb 100644 --- a/toise/multillh.py +++ b/toise/multillh.py @@ -2,7 +2,7 @@ import logging -import numpy +import numpy as np import scipy.optimize @@ -99,10 +99,10 @@ def differential_chunks(self, *args, **kwargs): "label": label, "livetime": livetime, "emin": max( - component.energy_range[0], kwargs.get("emin", -numpy.inf) + component.energy_range[0], kwargs.get("emin", -np.inf) ), "emax": min( - component.energy_range[1], kwargs.get("emax", numpy.inf) + component.energy_range[1], kwargs.get("emax", np.inf) ), "chunks": component.differential_chunks( *args, exclusive=True, **kwargs @@ -143,7 +143,7 @@ def differential_chunks(self, *args, **kwargs): all_done = True else: combo = Combination(components) - combo.energy_range = eranges[numpy.argmax(ecenters)] + combo.energy_range = eranges[np.argmax(ecenters)] combo.energy_center = max(ecenters) yield max(ecenters), combo @@ -222,18 +222,18 @@ def llh(self, **kwargs): llh += self.components[param].prior(kwargs[param], **kwargs) for prop in lamb: - with numpy.errstate(divide="ignore"): - log_lambda = numpy.log(lamb[prop]) - log_data = numpy.log(self.data[prop]) - log_lambda[numpy.isinf(log_lambda)] = 0 - log_data[numpy.isinf(log_data)] = 0 + with np.errstate(divide="ignore"): + log_lambda = np.log(lamb[prop]) + log_data = np.log(self.data[prop]) + log_lambda[np.isinf(log_lambda)] = 0 + log_data[np.isinf(log_data)] = 0 if self.unbinned: - norm = numpy.sum(lamb[prop]) + norm = np.sum(lamb[prop]) for event in self.data[prop]: - llh += numpy.sum(event * lamb[prop]) / norm + llh += np.sum(event * lamb[prop]) / norm llh -= norm else: - llh += numpy.sum(self.data[prop] * (log_lambda - log_data)) - numpy.sum( + llh += np.sum(self.data[prop] * (log_lambda - log_data)) - np.sum( lamb[prop] - self.data[prop] ) @@ -247,8 +247,8 @@ def llh_contributions(self, **kwargs): lamb = self.expectations(**kwargs) llh = dict() for prop in lamb: - log_lambda = numpy.log(lamb[prop]) - log_lambda[numpy.isinf(log_lambda)] = 0 + log_lambda = np.log(lamb[prop]) + log_lambda[np.isinf(log_lambda)] = 0 llh[prop] = self.data[prop] * log_lambda - lamb[prop] return llh @@ -264,10 +264,10 @@ def sat_llh(self, **kwargs): llh = 0 dof = 0 for prop in self.data: - log_lambda = numpy.log(self.data[prop]) - log_lambda[numpy.isinf(log_lambda)] = 0 - llh += numpy.sum(self.data[prop] * log_lambda - self.data[prop]) - dof += numpy.sum(self.data[prop] != 0) + log_lambda = np.log(self.data[prop]) + log_lambda[np.isinf(log_lambda)] = 0 + llh += np.sum(self.data[prop] * log_lambda - self.data[prop]) + dof += np.sum(self.data[prop] != 0) return (llh, dof + 1) def fit(self, minimizer_params=dict(), **fixedparams): @@ -323,7 +323,7 @@ def minllh(x): fixedparams.update(dict(zip(freeparams, bestfit["x"]))) return fixedparams else: - bestllh = numpy.inf + bestllh = np.inf bestparams = dict(fixedparams) for points in itertools.product( *tuple(fixedparams[k] for k in discrete_params) @@ -369,7 +369,7 @@ def profile1d(self, param, values, minimizer_params=dict(), **fixedparams): for i in range(len(dtypes)): if isinstance(llhpoints[-1][i], str): dtypes[i] = "|S32" - return numpy.array(llhpoints, dtype=list(zip(dkeys, dtypes))) + return np.array(llhpoints, dtype=list(zip(dkeys, dtypes))) def profile2d(self, param1, values1, param2, values2, **fixedparams): """ @@ -387,7 +387,7 @@ def profile2d(self, param1, values1, param2, values2, **fixedparams): fit = self.fit(**params) mlh = self.llh(**fit) llhpoints.append(list(fit.values()) + [mlh]) - return numpy.asarray(llhpoints).view( + return np.asarray(llhpoints).view( dtype=list( zip(list(fit.keys()) + ["LLH"], [float] * (len(list(fit.keys())) + 1)) ) @@ -407,7 +407,7 @@ def _pseudo_llh(components, poisson, **nominal): if poisson: pseudodata = dict() for tag in expectations: - pseudodata[tag] = numpy.random.poisson(expectations[tag]) + pseudodata[tag] = np.random.poisson(expectations[tag]) else: pseudodata = expectations allh.data = pseudodata diff --git a/toise/plotting.py b/toise/plotting.py index c3d4851..6a560c1 100644 --- a/toise/plotting.py +++ b/toise/plotting.py @@ -1,4 +1,4 @@ -import numpy +import numpy as np def stepped_path(edges, bins, cumulative=False): @@ -11,8 +11,8 @@ def stepped_path(edges, bins, cumulative=False): if len(edges) != len(bins) + 1: raise ValueError("edges must be 1 element longer than bins") - x = numpy.zeros((2 * len(edges))) - y = numpy.zeros((2 * len(edges))) + x = np.zeros((2 * len(edges))) + y = np.zeros((2 * len(edges))) if cumulative is not False: if cumulative == "<": @@ -27,7 +27,7 @@ def stepped_path(edges, bins, cumulative=False): def format_energy(fmt, energy): - places = int(numpy.log10(energy) / 3) * 3 + places = int(np.log10(energy) / 3) * 3 if places == 0: unit = "GeV" elif places == 3: @@ -47,11 +47,11 @@ def plot_profile2d(profile, x, y, levels=[68, 90, 99], colors="k", **kwargs): import matplotlib.pyplot as plt from scipy.stats import chi2 - xv = numpy.unique(profile[x]) - yv = numpy.unique(profile[y]) + xv = np.unique(profile[x]) + yv = np.unique(profile[y]) shape = (xv.size, yv.size) - ts = 2 * (numpy.nanmax(profile["LLH"]) - profile["LLH"]).reshape(shape) + ts = 2 * (np.nanmax(profile["LLH"]) - profile["LLH"]).reshape(shape) pvalue = chi2.cdf(ts.T, 2) * 100 ax = plt.gca() @@ -128,21 +128,21 @@ def label_curve(ax, line, x=None, y=None, orientation="parallel", offset=0, **kw xd = line.get_xdata() yd = line.get_ydata() # sort if necessary - if (numpy.diff(xd) < 0).any(): + if (np.diff(xd) < 0).any(): order = xd.argsort() xd = xd[order] yd = yd[order] # de-step if necessary - ux = numpy.unique(xd) + ux = np.unique(xd) if 2 * ux.size == xd.size and (ux == xd[::2]).all(): xd = (xd[::2] + xd[1::2]) / 2 yd = yd[1::2] # interpolate for x if y is supplied if x is None: - x = numpy.interp([y], yd, xd)[0] + x = np.interp([y], yd, xd)[0] # get points on either side of the anchor point - i = numpy.searchsorted(xd, x) + i = np.searchsorted(xd, x) if i > xd.size - 2: i = xd.size - 2 xb, yb = xd[i : i + 2], yd[i : i + 2] @@ -160,7 +160,7 @@ def label_curve(ax, line, x=None, y=None, orientation="parallel", offset=0, **kw p1 = ax.transData.transform_point([xb[0], yb[0]]) p2 = ax.transData.transform_point([xb[1], yb[1]]) if orientation == "parallel": - kw["rotation"] = numpy.degrees(numpy.arctan2(p2[1] - p1[1], p2[0] - p1[0])) + kw["rotation"] = np.degrees(np.arctan2(p2[1] - p1[1], p2[0] - p1[0])) kw.update(**kwargs) @@ -168,8 +168,8 @@ def label_curve(ax, line, x=None, y=None, orientation="parallel", offset=0, **kw # calculate normal in *screen coordinates* if offset != 0: xy = ax.transData.transform_point(text.get_position()) - norm = numpy.array([p2[1] - p1[1], p2[0] - p1[0]]) - norm = norm / (numpy.hypot(norm[0], norm[1]) / offset) + norm = np.array([p2[1] - p1[1], p2[0] - p1[0]]) + norm = norm / (np.hypot(norm[0], norm[1]) / offset) xy = ax.transData.inverted().transform_point((xy[0] - norm[0], xy[1] + norm[1])) text.set_position(xy) return text diff --git a/toise/surface_veto.py b/toise/surface_veto.py index 3a70aa2..837e937 100644 --- a/toise/surface_veto.py +++ b/toise/surface_veto.py @@ -3,7 +3,7 @@ from copy import copy import healpy -import numpy +import numpy as np from scipy import interpolate from scipy.optimize import fsolve from scipy.special import erf, erfc @@ -23,8 +23,8 @@ def surface_area(theta_max, volume): """ d = 1950 - volume._z_range[0] # depth of the bottom of the detector return ( - numpy.pi - * (d * numpy.tan(theta_max) + numpy.sqrt(volume.get_cap_area() / numpy.pi)) ** 2 + np.pi + * (d * np.tan(theta_max) + np.sqrt(volume.get_cap_area() / np.pi)) ** 2 ) @@ -54,7 +54,7 @@ def veto_cost_for_angle(theta_max, emu_min, surface): above emu_min out to theta_max """ fill = fill_factor_for_threshold(emu_min) - area = surface_area(numpy.radians(theta_max), surface) + area = surface_area(np.radians(theta_max), surface) return array_cost(area / 1e6, fill) / 1e6 @@ -79,7 +79,7 @@ def area_diff(margin): def get_geometric_coverage_for_area( - geometry, spacing, area, ct_bins=numpy.linspace(0, 1, 11), nsamples=int(1e4) + geometry, spacing, area, ct_bins=np.linspace(0, 1, 11), nsamples=int(1e4) ): """ Calculate the geometric coverage of a surface veto by Monte Carlo @@ -95,7 +95,7 @@ def get_geometric_coverage_for_area( margin = margin_for_area(ref_surface, area) veto_surface = ref_surface.expand(margin) - coverage = numpy.zeros(ct_bins.size - 1) + coverage = np.zeros(ct_bins.size - 1) if not area > 0: return coverage @@ -128,7 +128,7 @@ def __init__(self, geometry="Sunflower", spacing=240, area=10.0): else: self.cache = dict() - def __call__(self, ct_bins=numpy.linspace(-1, 1, 11)): + def __call__(self, ct_bins=np.linspace(-1, 1, 11)): key = ( self.geometry, self.spacing, @@ -162,17 +162,17 @@ def __init__(self, fname=os.path.join(data_dir, "veto", "vetoeffs.pickle")): z = 1 - (1 - v_mu) * (1 - v_e) self.spline = interpolate.RectBivariateSpline(x, y, z) # don't trust table beyond its statistical limits - self.pmax = numpy.nanmax(v_mu[v_mu < 1]) + self.pmax = np.nanmax(v_mu[v_mu < 1]) self.log_emin = x[0] self.log_emax = x[-1] self.ct_min = y[0] self.ct_max = y[-2] def __call__(self, energy, cos_theta): - logE, cos_theta = numpy.broadcast_arrays( - numpy.log10(energy / 1e3), numpy.clip(cos_theta, -1, self.ct_max) + logE, cos_theta = np.broadcast_arrays( + np.log10(energy / 1e3), np.clip(cos_theta, -1, self.ct_max) ) - p = numpy.clip(self.spline(logE, cos_theta, grid=False), 0, self.pmax) + p = np.clip(self.spline(logE, cos_theta, grid=False), 0, self.pmax) p[logE > self.log_emax] = self.pmax p[logE < self.log_emin] = 0 p[cos_theta < self.ct_min] = 0 @@ -232,7 +232,7 @@ def overburden(cos_theta, depth=1950, elevation=2400): r = 6371315 + elevation # this is secrety a translation in polar coordinates return ( - numpy.sqrt(2 * r * depth + (cos_theta * (r - depth)) ** 2 - depth**2) + np.sqrt(2 * r * depth + (cos_theta * (r - depth)) ** 2 - depth**2) - (r - depth) * cos_theta ) @@ -249,7 +249,7 @@ def polynomial(x, coefficients): return sum(c * x**i for i, c in enumerate(coefficients)) coeffs = [[2.793, -0.476, 0.187], [2.069, -0.201, 0.023], [-2.689, 3.882]] - a, b, c = (polynomial(numpy.log10(emin), c) for c in coeffs) + a, b, c = (polynomial(np.log10(emin), c) for c in coeffs) return 10 ** polynomial(distance, (a, b / 1e4, c / 1e10)) @@ -294,7 +294,7 @@ def gaisser_flux(energy, ptype): rigidity = [4e6, 30e6, 2e9] return sum( - n[idx] * energy ** (-g[idx]) * numpy.exp(-energy / (r * z)) + n[idx] * energy ** (-g[idx]) * np.exp(-energy / (r * z)) for n, g, r in zip(norm, gamma, rigidity) ) @@ -326,7 +326,7 @@ def bundle_energy_at_depth(eprim, a=1, cos_theta=1.0, depth=1950.0): et * a / cos_theta * alpha / (alpha - 1.0) * (1 * emin / eprim) ** (-alpha + 1) ) # assume that constant energy loss term is negligible - return surface_energy, numpy.exp(-bmu * X) + return surface_energy, np.exp(-bmu * X) def bundle_energy_distribution(emu_edges, eprim, a=1, cos_theta=1.0, depth=1950.0): @@ -339,20 +339,20 @@ def bundle_energy_distribution(emu_edges, eprim, a=1, cos_theta=1.0, depth=1950. :param depth: vertical depth of detector """ surface_energy, mean_loss = bundle_energy_at_depth(eprim, a, cos_theta, depth) - mu = numpy.log10(mean_loss * surface_energy) + mu = np.log10(mean_loss * surface_energy) # don't allow bundles to have more energy at the surface than the parent shower - hi = numpy.log10(numpy.minimum(emu_edges[1:], surface_energy)) - lo = numpy.log10(emu_edges[:-1]) + hi = np.log10(np.minimum(emu_edges[1:], surface_energy)) + lo = np.log10(emu_edges[:-1]) # width of log-normal distribution fit to (surface bundle energy / primary # energy) See: # https://wiki.icecube.wisc.edu/index.php/The_optimization_of_the_empirical_model_(IC22) - sigma = numpy.minimum( - 1.25 - 0.16 * numpy.log10(eprim) + 0.00563 * numpy.log10(eprim) ** 2, 1.0 + sigma = np.minimum( + 1.25 - 0.16 * np.log10(eprim) + 0.00563 * np.log10(eprim) ** 2, 1.0 ) - dist = numpy.maximum((erf((hi - mu) / sigma) - erf((lo - mu) / sigma)) / 2.0, 0.0) + dist = np.maximum((erf((hi - mu) / sigma) - erf((lo - mu) / sigma)) / 2.0, 0.0) # compensate for truncating the gaussian at the surface energy - upper_tail = erfc((numpy.log10(surface_energy) - mu) / sigma) / 2.0 + upper_tail = erfc((np.log10(surface_energy) - mu) / sigma) / 2.0 return dist / (1.0 - upper_tail) @@ -367,19 +367,19 @@ def bundle_flux_at_depth(emu, cos_theta): H/He/CNO/MgAlSi/Fe """ # make everything an array - emu, cos_theta = list(map(numpy.asarray, (emu, cos_theta))) - emu_center = 10 ** (center(numpy.log10(emu))) - shape = numpy.broadcast(emu_center, cos_theta).shape + emu, cos_theta = list(map(np.asarray, (emu, cos_theta))) + emu_center = 10 ** (center(np.log10(emu))) + shape = np.broadcast(emu_center, cos_theta).shape # primary spectrum for each element - contrib = numpy.zeros(shape + (5, 1000)) + contrib = np.zeros(shape + (5, 1000)) - penergy = numpy.logspace(2, 12, 1000) + penergy = np.logspace(2, 12, 1000) if cos_theta <= 0: return contrib, penergy - logstep = numpy.unique(numpy.diff(numpy.log10(penergy)))[0] - de = 10 ** (numpy.log10(penergy) + logstep / 2.0) - 10 ** ( - numpy.log10(penergy) - logstep / 2.0 + logstep = np.unique(np.diff(np.log10(penergy)))[0] + de = 10 ** (np.log10(penergy) + logstep / 2.0) - 10 ** ( + np.log10(penergy) - logstep / 2.0 ) ptypes = [ @@ -396,7 +396,7 @@ def bundle_flux_at_depth(emu, cos_theta): ) contrib[..., i, :] = weights * c # convert to differential flux - contrib /= numpy.diff(emu)[:, None, None] + contrib /= np.diff(emu)[:, None, None] return contrib, penergy @@ -411,7 +411,7 @@ def trigger_efficiency(eprim, threshold=10**5.5, sharpness=7): a step function """ return 1 / ( - 1 + numpy.exp(-(numpy.log10(eprim) - numpy.log10(threshold)) * sharpness) + 1 + np.exp(-(np.log10(eprim) - np.log10(threshold)) * sharpness) ) @@ -427,9 +427,9 @@ def __init__(self, effective_area, livetime=1.0): emu, cos_theta = effective_area.bin_edges[:2] # FIXME: account for healpix binning - self._solid_angle = 2 * numpy.pi * numpy.diff(self._aeff.bin_edges[1]) + self._solid_angle = 2 * np.pi * np.diff(self._aeff.bin_edges[1]) - flux = numpy.stack( + flux = np.stack( [ bundle_flux_at_depth(emu, ct)[0][:, :4, :].sum(axis=(1, 2)) for ct in center(cos_theta) @@ -438,11 +438,11 @@ def __init__(self, effective_area, livetime=1.0): # from icecube import MuonGun # model = MuonGun.load_model('GaisserH4a_atmod12_SIBYLL') - # flux, edist = numpy.vectorize(model.flux), numpy.vectorize(model.energy) - # emuc, ct = numpy.meshgrid(center(cos_theta), center(emu)) + # flux, edist = np.vectorize(model.flux), np.vectorize(model.energy) + # emuc, ct = np.meshgrid(center(cos_theta), center(emu)) # flux = flux(MuonGun.depth(0), ct, 1)*edist(MuonGun.depth(0), ct, 1, 0, emuc) - flux *= numpy.diff(emu)[:, None] + flux *= np.diff(emu)[:, None] if effective_area.is_healpix: flux *= healpy.nside2pixarea(effective_area.nside) else: @@ -485,7 +485,7 @@ def point_source_background( ), "Don't know how to make PS backgrounds from HEALpix maps yet" background = copy(self) - bin_areas = (numpy.pi * numpy.diff(psi_bins**2))[None, ...] + bin_areas = (np.pi * np.diff(psi_bins**2))[None, ...] # observation time shorter for triggered transient searches if livetime is not None: bin_areas *= livetime / self._livetime / constants.annum @@ -505,7 +505,7 @@ def point_source_background( # dimensions of the keys in expectations are now energy, radial bin if is_zenith_weight(zenith_index, self._aeff): background.expectations = ( - numpy.nansum( + np.nansum( (self.expectations * zenith_index[:, None]) / omega, axis=0 )[..., None] * bin_areas diff --git a/toise/surfaces.py b/toise/surfaces.py index 536a5f6..34c46cc 100644 --- a/toise/surfaces.py +++ b/toise/surfaces.py @@ -1,6 +1,6 @@ import os -import numpy +import numpy as np from .util import data_dir @@ -149,18 +149,18 @@ def cross(o, a, b): hull = lower[:-1] + upper[:-1] # convert into numpy array - return numpy.array(hull) + return np.array(hull) def hull_to_normals(points): # append first point at the end to close the hull - points = numpy.append(points, [points[0]], axis=0) + points = np.append(points, [points[0]], axis=0) vecs = points[1:] - points[:-1] - magn = numpy.sqrt(vecs[:, 0] ** 2 + vecs[:, 1] ** 2) + magn = np.sqrt(vecs[:, 0] ** 2 + vecs[:, 1] ** 2) - normals = numpy.array( - [vecs[:, 1] / magn, -vecs[:, 0] / magn, numpy.zeros(magn.shape)] + normals = np.array( + [vecs[:, 1] / magn, -vecs[:, 0] / magn, np.zeros(magn.shape)] ).T return normals @@ -168,11 +168,11 @@ def hull_to_normals(points): def hull_to_lengths(points): # append first point at the end to close the hull - points = numpy.append(points, [points[0]], axis=0) + points = np.append(points, [points[0]], axis=0) vecs = points[1:] - points[:-1] - return numpy.sqrt(vecs[:, 0] ** 2 + vecs[:, 1] ** 2) + return np.sqrt(vecs[:, 0] ** 2 + vecs[:, 1] ** 2) def signed_area(points): @@ -181,10 +181,10 @@ def signed_area(points): """ # append first point at the end to close the hull - points = numpy.append(points, [points[0]], axis=0) + points = np.append(points, [points[0]], axis=0) return ( - numpy.sum( + np.sum( points[:, 0][:-1] * points[:, 1][1:] - points[:, 0][1:] * points[:, 1][:-1] ) / 2.0 @@ -215,26 +215,26 @@ def azimuth_averaged_area(self, cos_theta): :param cos_theta: cosine of the zenith angle """ - return self.get_cap_area() * abs(cos_theta) + self.get_side_area() * numpy.sqrt( + return self.get_cap_area() * abs(cos_theta) + self.get_side_area() * np.sqrt( 1 - cos_theta**2 ) def get_maximum_area(self): - ct_max = numpy.cos(numpy.arctan(self.get_side_area() / self.get_cap_area())) + ct_max = np.cos(np.arctan(self.get_side_area() / self.get_cap_area())) return self.azimuth_averaged_area(ct_max) def sample_direction(self, cos_min=-1, cos_max=1, size=1): max_area = self.get_maximum_area() blocksize = min((size * 4, 65535)) accepted = 0 - directions = numpy.empty((size, 3)) + directions = np.empty((size, 3)) while accepted < size: - ct = numpy.random.uniform(cos_min, cos_max, blocksize) - st = numpy.sqrt((1 - ct) * (1 + ct)) - azi = numpy.random.uniform(0, 2 * numpy.pi, blocksize) - candidates = numpy.vstack((numpy.cos(azi) * st, numpy.sin(azi) * st, ct)).T + ct = np.random.uniform(cos_min, cos_max, blocksize) + st = np.sqrt((1 - ct) * (1 + ct)) + azi = np.random.uniform(0, 2 * np.pi, blocksize) + candidates = np.vstack((np.cos(azi) * st, np.sin(azi) * st, ct)).T candidates = candidates[ - numpy.random.uniform(0, max_area, blocksize) + np.random.uniform(0, max_area, blocksize) <= self.projected_area(candidates), :, ] @@ -251,10 +251,10 @@ def _integrate_area(a, b, cap, sides): cap * (b**2 - a**2) + sides * ( - numpy.arccos(a) - - numpy.arccos(b) - - numpy.sqrt(1 - a**2) * a - + numpy.sqrt(1 - b**2) * b + np.arccos(a) + - np.arccos(b) + - np.sqrt(1 - a**2) * a + + np.sqrt(1 - b**2) * b ) ) / 2.0 @@ -281,11 +281,11 @@ def etendue(self, cosMin=-1.0, cosMax=1.0): 0, cosMax, cap, sides ) else: - area = numpy.nan + area = np.nan raise ValueError( "Can't deal with zenith range [%.1e, %.1e]" % (cosMin, cosMax) ) - return 2 * numpy.pi * area + return 2 * np.pi * area def average_area(self, cosMin=-1, cosMax=1): """ @@ -296,7 +296,7 @@ def average_area(self, cosMin=-1, cosMax=1): :param cosMax: cosine of the minimum zenith angle :returns: the average projected area in the zenith angle range """ - return self.etendue(cosMin, cosMax) / (2 * numpy.pi * (cosMax - cosMin)) + return self.etendue(cosMin, cosMax) / (2 * np.pi * (cosMax - cosMin)) def volume(self): return self.get_cap_area() * self.length @@ -313,7 +313,7 @@ def __init__(self, xy_points, z_range): # hull points, in counterclockwise order self._x = hull # next neighbor in the hull - self._nx = numpy.roll(hull, -1, axis=0) + self._nx = np.roll(hull, -1, axis=0) # vector connecting each pair of points in the hull self._dx = self._nx - self._x @@ -324,10 +324,10 @@ def __init__(self, xy_points, z_range): self._side_lengths = hull_to_lengths(hull) side_areas = self._side_lengths * self.length cap_area = [signed_area(hull)] * 2 - cap_normals = numpy.array([[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]) + cap_normals = np.array([[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]) - self._areas = numpy.concatenate((side_areas, cap_area)) - self._normals = numpy.concatenate((side_normals, cap_normals)) + self._areas = np.concatenate((side_areas, cap_area)) + self._normals = np.concatenate((side_normals, cap_normals)) assert self._areas.size == self._normals.shape[0] def get_z_range(self): @@ -340,11 +340,11 @@ def get_side_area(self): # the projected area of a plane, averaged over a 2\pi rotation that # passes through the normal, is # A*\int_0^\pi \Theta(\sin\alpha)\sin\alpha d\alpha / 2\pi = A/\pi - return self._side_lengths.sum() * self.length / numpy.pi + return self._side_lengths.sum() * self.length / np.pi def projected_area(self, direction): - inner = numpy.dot(direction, self._normals.T) - areas = numpy.where(inner < 0, -inner * self._areas, 0) + inner = np.dot(direction, self._normals.T) + areas = np.where(inner < 0, -inner * self._areas, 0) return areas.sum(axis=areas.ndim - 1) def _sample_on_caps(self, directions, bottom, offset, scale): @@ -355,10 +355,10 @@ def _sample_on_caps(self, directions, bottom, offset, scale): size = len(directions) accepted = 0 blocksize = min((size * 4, 65535)) - positions = numpy.empty((len(directions), 3)) + positions = np.empty((len(directions), 3)) while accepted < size: - cpos = numpy.random.uniform(size=(blocksize, 2)) * scale + offset - mask = numpy.array(list(map(self._point_in_hull, cpos))) + cpos = np.random.uniform(size=(blocksize, 2)) * scale + offset + mask = np.array(list(map(self._point_in_hull, cpos))) cpos = cpos[mask] if len(cpos) + accepted > size: cpos = cpos[: size - accepted] @@ -371,8 +371,8 @@ def _sample_on_caps(self, directions, bottom, offset, scale): return positions def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): - directions = numpy.empty((size, 3)) - positions = numpy.empty((size, 3)) + directions = np.empty((size, 3)) + positions = np.empty((size, 3)) accepted = 0 blocksize = min((size * 4, 65535)) @@ -383,12 +383,12 @@ def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): block = min((blocksize, size - accepted)) cdir = self.sample_direction(cos_min, cos_max, block) directions[accepted : accepted + block] = cdir - inner = numpy.dot(cdir, self._normals.T) - areas = numpy.where(inner < 0, -inner * self._areas, 0) + inner = np.dot(cdir, self._normals.T) + areas = np.where(inner < 0, -inner * self._areas, 0) prob = areas.cumsum(axis=1) prob /= prob[:, -1:] - p = numpy.random.uniform(size=block) - target = numpy.array([prob[i, :].searchsorted(p[i]) for i in range(block)]) + p = np.random.uniform(size=block) + target = np.array([prob[i, :].searchsorted(p[i]) for i in range(block)]) # first, handle sides sides = target < len(self._areas) - 2 @@ -396,14 +396,14 @@ def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): nsides = sides.sum() xy = ( self._x[side_target] - + numpy.random.uniform(size=nsides)[:, None] * self._dx[side_target] + + np.random.uniform(size=nsides)[:, None] * self._dx[side_target] ) - xyz = numpy.concatenate( + xyz = np.concatenate( ( xy, ( self._z_range[0] - + numpy.random.uniform(size=nsides) * self.length + + np.random.uniform(size=nsides) * self.length )[:, None], ), axis=1, @@ -434,7 +434,7 @@ def expand(self, padding): # normalized vector connecting each vertex to the next one d = self._dx / self._side_lengths[:, None] # and the one connecting the previous vertex - prev_d = numpy.roll(d, 1, axis=0) + prev_d = np.roll(d, 1, axis=0) # sine of the inner angle of each vertex det = prev_d[:, 0] * d[:, 1] - prev_d[:, 1] * d[:, 0] assert (det != 0.0).all(), "Edges can't be [anti]parallel" @@ -459,7 +459,7 @@ def from_I3Geometry(cls, i3geo, padding=0): if omgeo.omtype != omgeo.IceTop: strings[omkey.string].append(list(omgeo.position)) mean_xy = [ - numpy.mean(positions, axis=0)[0:2] for positions in list(strings.values()) + np.mean(positions, axis=0)[0:2] for positions in list(strings.values()) ] zmax = max(max(p[2] for p in positions) for positions in list(strings.values())) zmin = min(min(p[2] for p in positions) for positions in list(strings.values())) @@ -472,11 +472,11 @@ def from_I3Geometry(cls, i3geo, padding=0): @classmethod def from_file(cls, fname, padding=0): - dats = numpy.loadtxt(fname) + dats = np.loadtxt(fname) zmax = dats[:, -1].max() zmin = dats[:, -1].min() mean_xy = [ - dats[dats[:, 0] == i].mean(axis=0)[2:4] for i in numpy.unique(dats[:, 0]) + dats[dats[:, 0] == i].mean(axis=0)[2:4] for i in np.unique(dats[:, 0]) ] self = cls(mean_xy, [zmin, zmax]) @@ -501,11 +501,11 @@ def from_i3file(cls, fname, padding=0): return cls.from_I3Geometry(fr["I3Geometry"], padding) def _direction_to_vec(self, cos_zenith, azimuth): - ct, azi = numpy.broadcast_arrays(cos_zenith, azimuth) - st = numpy.sqrt(1.0 - ct**2) - cp = numpy.cos(azi) - sp = numpy.sin(azi) - return -numpy.array([st * cp, st * sp, ct]) + ct, azi = np.broadcast_arrays(cos_zenith, azimuth) + st = np.sqrt(1.0 - ct**2) + cp = np.cos(azi) + sp = np.sin(azi) + return -np.array([st * cp, st * sp, ct]) def area(self, cos_zenith, azimuth): """ @@ -513,7 +513,7 @@ def area(self, cos_zenith, azimuth): """ vec = self._direction_to_vec(cos_zenith, azimuth) # inner product with component normals - inner = numpy.dot(self._normals, vec) + inner = np.dot(self._normals, vec) # only surfaces that face the requested direction count towards the area mask = inner < 0 return -(inner * self._areas[:, None] * mask).sum(axis=0) @@ -549,8 +549,8 @@ def _distance_to_hull(self, point, vec): # proportional distance along edge to intersection point # NB: if diry/dirx == dy/dx, the ray is parallel to the line segment nonparallel = diry * dx != dirx * dy - alpha = numpy.where( - nonparallel, (dirx * y - diry * x) / (diry * dx - dirx * dy), numpy.nan + alpha = np.where( + nonparallel, (dirx * y - diry * x) / (diry * dx - dirx * dy), np.nan ) # check whether the intersection is actually in the segment mask = (alpha >= 0) & (alpha < 1) @@ -562,16 +562,16 @@ def _distance_to_hull(self, point, vec): beta = ((y + alpha * dy) / diry)[mask] if beta.size == 0: - return (numpy.nan,) * 2 + return (np.nan,) * 2 else: - return (numpy.nanmin(beta), numpy.nanmax(beta)) + return (np.nanmin(beta), np.nanmax(beta)) def _distance_to_cap(self, point, dir, cap_z): d = (point[2] - cap_z) / dir[2] if self._point_in_hull(point + d * dir): return d else: - return numpy.nan + return np.nan def _distance_to_caps(self, point, dir): return sorted( @@ -582,7 +582,7 @@ def point_in_footprint(self, point): return self._point_in_hull(point) def intersections(self, x, y, z, cos_zenith, azimuth): - point = numpy.array((x, y, z)) + point = np.array((x, y, z)) vec = self._direction_to_vec(cos_zenith, azimuth) # perfectly vertical track: only check intersections with caps @@ -593,11 +593,11 @@ def intersections(self, x, y, z, cos_zenith, azimuth): return self._distance_to_hull(point, vec) # general case: both rho and z components nonzero else: - sin_zenith = numpy.sqrt(1.0 - cos_zenith**2) - sides = numpy.array(self._distance_to_hull(point, vec)) / sin_zenith + sin_zenith = np.sqrt(1.0 - cos_zenith**2) + sides = np.array(self._distance_to_hull(point, vec)) / sin_zenith caps = self._distance_to_caps(point, vec) - return numpy.nanmax((sides[0], caps[0])), numpy.nanmin((sides[1], caps[1])) + return np.nanmax((sides[0], caps[0])), np.nanmin((sides[1], caps[1])) class Cylinder(UprightSurface): @@ -609,28 +609,28 @@ def expand(self, margin): return Cylinder(self.length + 2 * margin, self.radius + margin) def point_in_footprint(self, point): - return numpy.hypot(point[0], point[1]) < self.radius + return np.hypot(point[0], point[1]) < self.radius def get_z_range(self): return (-self.length / 2.0, self.length / 2) def get_cap_area(self): - return numpy.pi * self.radius**2 + return np.pi * self.radius**2 def get_side_area(self): return 2 * self.radius * self.length - def area(self, cos_zenith, azimuth=numpy.nan): + def area(self, cos_zenith, azimuth=np.nan): return self.azimuth_averaged_area(cos_zenith) def projected_area(self, direction): ct = direction[..., -1] - st = numpy.sqrt((1 + ct) * (1 - ct)) + st = np.sqrt((1 + ct) * (1 - ct)) return self.get_cap_area() * abs(ct) + self.get_side_area() * st def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): - directions = numpy.empty((size, 3)) - positions = numpy.empty((size, 3)) + directions = np.empty((size, 3)) + positions = np.empty((size, 3)) accepted = 0 blocksize = min((size * 4, 65535)) @@ -639,32 +639,32 @@ def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): cdir = self.sample_direction(cos_min, cos_max, block) directions[accepted : accepted + block] = cdir ct = -cdir[:, -1] - st = numpy.sqrt((1 + ct) * (1 - ct)) + st = np.sqrt((1 + ct) * (1 - ct)) - areas = numpy.array( + areas = np.array( [ self.get_side_area() * st, - self.get_cap_area() * numpy.where(ct > 0, ct, 0), - self.get_cap_area() * numpy.where(ct < 0, abs(ct), 0), + self.get_cap_area() * np.where(ct > 0, ct, 0), + self.get_cap_area() * np.where(ct < 0, abs(ct), 0), ] ).T prob = areas.cumsum(axis=1) prob /= prob[:, -1:] - p = numpy.random.uniform(size=block) - target = numpy.array([prob[i, :].searchsorted(p[i]) for i in range(block)]) + p = np.random.uniform(size=block) + target = np.array([prob[i, :].searchsorted(p[i]) for i in range(block)]) # first, handle sides sides = target == 0 nsides = sides.sum() - beta = numpy.arcsin( - numpy.random.uniform(-1, 1, size=nsides) - ) + numpy.arctan2(-cdir[sides, 1], -cdir[sides, 0]) - positions[accepted : accepted + block][sides] = numpy.stack( + beta = np.arcsin( + np.random.uniform(-1, 1, size=nsides) + ) + np.arctan2(-cdir[sides, 1], -cdir[sides, 0]) + positions[accepted : accepted + block][sides] = np.stack( ( - self.radius * numpy.cos(beta) * st[sides], - self.radius * numpy.sin(beta) * st[sides], - numpy.random.uniform( + self.radius * np.cos(beta) * st[sides], + self.radius * np.sin(beta) * st[sides], + np.random.uniform( -self.length / 2, self.length / 2, size=nsides ), ) @@ -674,13 +674,13 @@ def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): caps = ~sides ncaps = caps.sum() cap_target = target[caps] - beta = numpy.random.uniform(0, 2 * numpy.pi, size=ncaps) - r = numpy.sqrt(numpy.random.uniform(size=ncaps)) * self.radius - positions[accepted : accepted + block][caps] = numpy.stack( + beta = np.random.uniform(0, 2 * np.pi, size=ncaps) + r = np.sqrt(np.random.uniform(size=ncaps)) * self.radius + positions[accepted : accepted + block][caps] = np.stack( ( - r * numpy.cos(beta), - r * numpy.sin(beta), - numpy.where(cap_target == 1, self.length / 2, -self.length / 2), + r * np.cos(beta), + r * np.sin(beta), + np.where(cap_target == 1, self.length / 2, -self.length / 2), ) ).T accepted += block From 9dc43f4be68cb935d7e7278096cf61a5b7dab0c4 Mon Sep 17 00:00:00 2001 From: Jakob van Santen Date: Wed, 25 Sep 2024 14:19:57 +0200 Subject: [PATCH 5/6] blacken --- resources/docs/plots/geometries.py | 1 - toise/cache.py | 1 + toise/effective_areas.py | 16 +++----------- toise/energy_resolution.py | 4 +--- .../externals/AtmosphericSelfVeto/selfveto.py | 4 +--- toise/multillh.py | 8 ++----- toise/pointsource.py | 15 ++++--------- toise/surface_veto.py | 15 +++++-------- toise/surfaces.py | 21 +++++++------------ 9 files changed, 25 insertions(+), 60 deletions(-) diff --git a/resources/docs/plots/geometries.py b/resources/docs/plots/geometries.py index f3cc0d2..998d6cd 100644 --- a/resources/docs/plots/geometries.py +++ b/resources/docs/plots/geometries.py @@ -1,4 +1,3 @@ - import matplotlib.pyplot as plt import numpy as np from icecube.toise import plotting, surfaces diff --git a/toise/cache.py b/toise/cache.py index 5cdd8b2..1b1df74 100644 --- a/toise/cache.py +++ b/toise/cache.py @@ -18,6 +18,7 @@ "lru_cache", ] + class PickleCache(AbstractCacheInstance): def __init__(self, base_dir=join(data_dir, "cache"), *args, **kwargs): super(PickleCache, self).__init__(*args, **kwargs) diff --git a/toise/effective_areas.py b/toise/effective_areas.py index 8be087e..48071f0 100644 --- a/toise/effective_areas.py +++ b/toise/effective_areas.py @@ -288,9 +288,7 @@ def _interpolate_production_efficiency( np.clip(cos_zenith, centers[1].min(), centers[1].max()), ] + centers[2:] with np.errstate(divide="ignore"): - y = np.where( - ~(h.bincontent <= 0), np.log10(h.bincontent), -np.inf - ) + y = np.where(~(h.bincontent <= 0), np.log10(h.bincontent), -np.inf) assert not np.isnan(y).any() interpolant = interpolate.RegularGridInterpolator( @@ -573,7 +571,6 @@ def create_bundle_aeff( # 2) Selection efficiency # 3) Veto coverage - nside = None if isinstance(cos_theta, int): nside = cos_theta @@ -673,7 +670,6 @@ def create_throughgoing_aeff( # 4) Point spread function # 5) Energy resolution - nside = None if isinstance(cos_theta, int): nside = cos_theta @@ -756,7 +752,6 @@ def create_cascade_aeff( # 4) Point spread function # 5) Energy resolution - nside = None if isinstance(cos_theta, int): nside = cos_theta @@ -827,7 +822,6 @@ def create_starting_aeff( # 4) Point spread function # 5) Energy resolution - nside = None if isinstance(cos_theta, int): nside = cos_theta @@ -948,9 +942,7 @@ def _interpolate_ara_aeff(ct_edges=None, depth=200, nstations=37): center(loge_edges), np.clip(center(ct_edges), centers[1].min(), centers[1].max()), ] - xi = np.vstack( - [x.flatten() for x in np.meshgrid(*newcenters, indexing="ij")] - ).T + xi = np.vstack([x.flatten() for x in np.meshgrid(*newcenters, indexing="ij")]).T assert np.isfinite(xi).all() interpolant = interpolate.RegularGridInterpolator( @@ -1195,9 +1187,7 @@ def _interpolate_gen2_ehe_aeff(ct_edges=None): center(loge_edges), ] - xi = np.vstack( - [x.flatten() for x in np.meshgrid(*new_centers, indexing="ij")] - ) + xi = np.vstack([x.flatten() for x in np.meshgrid(*new_centers, indexing="ij")]) vs = [] for aeff in aeffs: diff --git a/toise/energy_resolution.py b/toise/energy_resolution.py index edddc91..5208615 100644 --- a/toise/energy_resolution.py +++ b/toise/energy_resolution.py @@ -61,9 +61,7 @@ def get_response_matrix(self, true_energy, reco_energy): :param reco_energy: edges of reconstructed muon energy bins """ loge_true = np.log10(true_energy) - loge_center = np.clip( - 0.5 * (loge_true[:-1] + loge_true[1:]), *self._loge_range - ) + loge_center = np.clip(0.5 * (loge_true[:-1] + loge_true[1:]), *self._loge_range) loge_width = np.diff(loge_true) loge_lo = np.log10(reco_energy[:-1]) loge_hi = np.log10(reco_energy[1:]) diff --git a/toise/externals/AtmosphericSelfVeto/selfveto.py b/toise/externals/AtmosphericSelfVeto/selfveto.py index 9410107..7e4dc07 100755 --- a/toise/externals/AtmosphericSelfVeto/selfveto.py +++ b/toise/externals/AtmosphericSelfVeto/selfveto.py @@ -426,9 +426,7 @@ def analytic_numu_flux(enu, cos_theta, emu=None): R_PI, R_K, ALAM_N, ALAM_PI, ALAM_K = 0.5731, 0.0458, 120.0, 160.0, 180.0 F = (GAMMA + 2.0) / (GAMMA + 1.0) - B_PI = ( - F * (ALAM_PI - ALAM_N) / (ALAM_PI * np.log(ALAM_PI / ALAM_N) * (1.0 - R_PI)) - ) + B_PI = F * (ALAM_PI - ALAM_N) / (ALAM_PI * np.log(ALAM_PI / ALAM_N) * (1.0 - R_PI)) B_K = F * (ALAM_K - ALAM_N) / (ALAM_K * np.log(ALAM_K / ALAM_N) * (1.0 - R_K)) if emu is not None: diff --git a/toise/multillh.py b/toise/multillh.py index 54c72bb..2a8e5fd 100644 --- a/toise/multillh.py +++ b/toise/multillh.py @@ -98,12 +98,8 @@ def differential_chunks(self, *args, **kwargs): { "label": label, "livetime": livetime, - "emin": max( - component.energy_range[0], kwargs.get("emin", -np.inf) - ), - "emax": min( - component.energy_range[1], kwargs.get("emax", np.inf) - ), + "emin": max(component.energy_range[0], kwargs.get("emin", -np.inf)), + "emax": min(component.energy_range[1], kwargs.get("emax", np.inf)), "chunks": component.differential_chunks( *args, exclusive=True, **kwargs ), diff --git a/toise/pointsource.py b/toise/pointsource.py index f931a73..4e24686 100644 --- a/toise/pointsource.py +++ b/toise/pointsource.py @@ -334,9 +334,7 @@ def __init__(self, effective_area, livetime, fluxes, sindecs, with_energy=True): # scatter sources through the zenith bands isotropically zenith_bins = effective_area.bin_edges[1] self.sources_per_band = np.histogram(-sindecs, bins=zenith_bins)[0] - self.flux_per_band = np.histogram( - -sindecs, bins=zenith_bins, weights=fluxes - )[0] + self.flux_per_band = np.histogram(-sindecs, bins=zenith_bins, weights=fluxes)[0] # reference flux is E^2 Phi = 1e-12 TeV^2 cm^-2 s^-1 # remember: fluxes are defined as neutrino + antineutrino, so the flux @@ -375,8 +373,7 @@ def source_to_local_zenith(declination, latitude, ct_bins): def offset(hour_angle, ct=0): "difference between source elevation and bin edge at given hour angle" return ( - np.cos(hour_angle) * np.cos(dec) * np.cos(lat) - + np.sin(dec) * np.sin(lat) + np.cos(hour_angle) * np.cos(dec) * np.cos(lat) + np.sin(dec) * np.sin(lat) ) - ct # find minimum and maximum elevation @@ -460,9 +457,7 @@ def f(flux_norm): nb = nevents(allh, ps=0, **fixed) ns = total - nb baseline = min((1000, np.sqrt(critical_ts) / (ns / np.sqrt(nb)))) / 10 - baseline = max( - ((np.sqrt(critical_ts) / (ns / np.sqrt(nb))) / 10, 0.3 / ns) - ) + baseline = max(((np.sqrt(critical_ts) / (ns / np.sqrt(nb))) / 10, 0.3 / ns)) # logging.getLogger().warn('total: %.2g ns: %.2g nb: %.2g baseline norm: %.2g ts: %.2g' % (total, ns, nb, baseline, ts(baseline))) # baseline = 1000 if not np.isfinite(baseline): @@ -578,9 +573,7 @@ def f(flux_norm): nb = nevents(allh, ps=0, **fixed) ns = total - nb baseline = min((1000, np.sqrt(critical_ts) / (ns / np.sqrt(nb)))) / 10 - baseline = max( - ((np.sqrt(critical_ts) / (ns / np.sqrt(nb))) / 10, 0.3 / ns) - ) + baseline = max(((np.sqrt(critical_ts) / (ns / np.sqrt(nb))) / 10, 0.3 / ns)) logging.getLogger().debug( "total: %.2g ns: %.2g nb: %.2g baseline norm: %.2g" % (total, ns, nb, baseline) diff --git a/toise/surface_veto.py b/toise/surface_veto.py index 837e937..d87b6f1 100644 --- a/toise/surface_veto.py +++ b/toise/surface_veto.py @@ -22,10 +22,7 @@ def surface_area(theta_max, volume): Surface coverage area required so that a track that """ d = 1950 - volume._z_range[0] # depth of the bottom of the detector - return ( - np.pi - * (d * np.tan(theta_max) + np.sqrt(volume.get_cap_area() / np.pi)) ** 2 - ) + return np.pi * (d * np.tan(theta_max) + np.sqrt(volume.get_cap_area() / np.pi)) ** 2 def array_cost(area, fill_factor): @@ -410,9 +407,7 @@ def trigger_efficiency(eprim, threshold=10**5.5, sharpness=7): :param sharpness: speed of transition. 0 makes a flat line at 0.5, infinity a step function """ - return 1 / ( - 1 + np.exp(-(np.log10(eprim) - np.log10(threshold)) * sharpness) - ) + return 1 / (1 + np.exp(-(np.log10(eprim) - np.log10(threshold)) * sharpness)) def untagged_fraction(eprim, **kwargs): @@ -505,9 +500,9 @@ def point_source_background( # dimensions of the keys in expectations are now energy, radial bin if is_zenith_weight(zenith_index, self._aeff): background.expectations = ( - np.nansum( - (self.expectations * zenith_index[:, None]) / omega, axis=0 - )[..., None] + np.nansum((self.expectations * zenith_index[:, None]) / omega, axis=0)[ + ..., None + ] * bin_areas ) else: diff --git a/toise/surfaces.py b/toise/surfaces.py index 34c46cc..9980e0d 100644 --- a/toise/surfaces.py +++ b/toise/surfaces.py @@ -159,9 +159,7 @@ def hull_to_normals(points): vecs = points[1:] - points[:-1] magn = np.sqrt(vecs[:, 0] ** 2 + vecs[:, 1] ** 2) - normals = np.array( - [vecs[:, 1] / magn, -vecs[:, 0] / magn, np.zeros(magn.shape)] - ).T + normals = np.array([vecs[:, 1] / magn, -vecs[:, 0] / magn, np.zeros(magn.shape)]).T return normals @@ -401,10 +399,9 @@ def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): xyz = np.concatenate( ( xy, - ( - self._z_range[0] - + np.random.uniform(size=nsides) * self.length - )[:, None], + (self._z_range[0] + np.random.uniform(size=nsides) * self.length)[ + :, None + ], ), axis=1, ) @@ -657,16 +654,14 @@ def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): # first, handle sides sides = target == 0 nsides = sides.sum() - beta = np.arcsin( - np.random.uniform(-1, 1, size=nsides) - ) + np.arctan2(-cdir[sides, 1], -cdir[sides, 0]) + beta = np.arcsin(np.random.uniform(-1, 1, size=nsides)) + np.arctan2( + -cdir[sides, 1], -cdir[sides, 0] + ) positions[accepted : accepted + block][sides] = np.stack( ( self.radius * np.cos(beta) * st[sides], self.radius * np.sin(beta) * st[sides], - np.random.uniform( - -self.length / 2, self.length / 2, size=nsides - ), + np.random.uniform(-self.length / 2, self.length / 2, size=nsides), ) ).T From b2d7d296dfb781814f39084c430f0e7c175cf5ea Mon Sep 17 00:00:00 2001 From: Jakob van Santen Date: Wed, 25 Sep 2024 14:24:22 +0200 Subject: [PATCH 6/6] ci: macos to 14 (M1) --- .github/workflows/test.yml | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c476df4..c395fc7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,25 +7,12 @@ jobs: name: test strategy: matrix: - os: [ubuntu-20.04, macos-11] + os: [ubuntu-20.04, macos-14] runs-on: ${{ matrix.os }} defaults: run: shell: bash -l {0} steps: - - name: Detect platform - id: detect - run: | - if [[ "${RUNNER_ARCH}" == "X64" ]]; then - if [[ "${RUNNER_OS}" == "Linux" ]]; then - echo '::set-output name=platform::linux-64' && exit 0 - elif [[ "${RUNNER_OS}" == "macOS" ]]; then - echo '::set-output name=platform::osx-64' && exit 0 - fi - fi - echo "Unsupported platform ${RUNNER_OS} ${RUNNER_ARCH}" - exit 1 - - uses: actions/checkout@v2 - name: Set up conda environment