Skip to content

Commit

Permalink
Merge pull request #3298 from chrishalcrow/more-sphinx-docstring
Browse files Browse the repository at this point in the history
More docstring updates for multiple modules
  • Loading branch information
alejoe91 authored Sep 4, 2024
2 parents 519c7c7 + 3a7855b commit 740e779
Show file tree
Hide file tree
Showing 41 changed files with 281 additions and 152 deletions.
14 changes: 12 additions & 2 deletions src/spikeinterface/comparison/collision.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,19 @@ class CollisionGTComparison(GroundTruthComparison):
This class needs maintenance and need a bit of refactoring.
collision_lag : float
Parameters
----------
gt_sorting : SortingExtractor
The first sorting for the comparison
collision_lag : float, default 2.0
Collision lag in ms.
tested_sorting : SortingExtractor
The second sorting for the comparison
nbins : int, default : 11
Number of collision bins
**kwargs : dict
Keyword arguments for `GroundTruthComparison`
"""

Expand Down
15 changes: 15 additions & 0 deletions src/spikeinterface/comparison/correlogram.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,21 @@ class CorrelogramGTComparison(GroundTruthComparison):
This class needs maintenance and need a bit of refactoring.
Parameters
----------
gt_sorting : SortingExtractor
The first sorting for the comparison
tested_sorting : SortingExtractor
The second sorting for the comparison
bin_ms : float, default: 1.0
Size of bin for correlograms
window_ms : float, default: 100.0
The window around the spike to compute the correlation in ms.
well_detected_score : float, default: 0.8
Agreement score above which units are well detected
**kwargs : dict
Keyword arguments for `GroundTruthComparison`
"""

def __init__(self, gt_sorting, tested_sorting, window_ms=100.0, bin_ms=1.0, well_detected_score=0.8, **kwargs):
Expand Down
6 changes: 5 additions & 1 deletion src/spikeinterface/comparison/groundtruthstudy.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,11 @@ class GroundTruthStudy:
This GroundTruthStudy have been refactor in version 0.100 to be more flexible than previous versions.
Note that the underlying folder structure is not backward compatible!
Parameters
----------
study_folder : str | Path
Path to folder containing `GroundTruthStudy`
"""

def __init__(self, study_folder):
Expand Down Expand Up @@ -370,7 +375,6 @@ def get_metrics(self, key):
return metrics

def get_units_snr(self, key):
""" """
return self.get_metrics(key)["snr"]

def get_performance_by_unit(self, case_keys=None):
Expand Down
2 changes: 2 additions & 0 deletions src/spikeinterface/comparison/hybrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ class HybridUnitsRecording(InjectTemplatesRecording):
injected_sorting_folder : str | Path | None
If given, the injected sorting is saved to this folder.
It must be specified if injected_sorting is None or not serialisable to file.
seed : int, default: None
Random seed for amplitude_factor
Returns
-------
Expand Down
14 changes: 12 additions & 2 deletions src/spikeinterface/comparison/multicomparisons.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,9 @@ class MultiSortingComparison(BaseMultiComparison, MixinSpikeTrainComparison):
- "intersection" : spike trains are the intersection between the spike trains of the
best matching two sorters
verbose : bool, default: False
if True, output is verbose
If True, output is verbose
do_matching : bool, default: True
If True, the comparison is done when the `MultiSortingComparison` is initialized
Returns
-------
Expand Down Expand Up @@ -318,7 +320,15 @@ class MultiTemplateComparison(BaseMultiComparison, MixinTemplateComparison):
chance_score : float, default: 0.3
Minimum agreement score to for a possible match
verbose : bool, default: False
if True, output is verbose
If True, output is verbose
do_matching : bool, default: True
If True, the comparison is done when the `MultiSortingComparison` is initialized
support : "dense" | "union" | "intersection", default: "union"
The support to compute the similarity matrix.
num_shifts : int, default: 0
Number of shifts to use to shift templates to maximize similarity.
similarity_method : "cosine" | "l1" | "l2", default: "cosine"
Method for the similarity matrix.
Returns
-------
Expand Down
54 changes: 52 additions & 2 deletions src/spikeinterface/comparison/paircomparisons.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,6 @@ def __init__(
gt_name=None,
tested_name=None,
delta_time=0.4,
sampling_frequency=None,
match_score=0.5,
well_detected_score=0.8,
redundant_score=0.2,
Expand Down Expand Up @@ -427,6 +426,11 @@ def get_performance(self, method="by_unit", output="pandas"):
def print_performance(self, method="pooled_with_average"):
"""
Print performance with the selected method
Parameters
----------
method : "by_unit" | "pooled_with_average", default: "pooled_with_average"
The method to compute performance
"""

template_txt_performance = _template_txt_performance
Expand All @@ -451,6 +455,19 @@ def print_summary(self, well_detected_score=None, redundant_score=None, overmerg
* how many gt units (one or several)
This summary mix several performance metrics.
Parameters
----------
well_detected_score : float, default: None
The agreement score above which tested units
are counted as "well detected".
redundant_score : float, default: None
The agreement score below which tested units
are counted as "false positive"" (and not "redundant").
overmerged_score : float, default: None
Tested units with 2 or more agreement scores above "overmerged_score"
are counted as "overmerged".
"""
txt = _template_summary_part1

Expand Down Expand Up @@ -502,6 +519,12 @@ def count_well_detected_units(self, well_detected_score):
"""
Count how many well detected units.
kwargs are the same as get_well_detected_units.
Parameters
----------
well_detected_score : float, default: None
The agreement score above which tested units
are counted as "well detected".
"""
return len(self.get_well_detected_units(well_detected_score=well_detected_score))

Expand Down Expand Up @@ -542,6 +565,12 @@ def get_false_positive_units(self, redundant_score=None):
def count_false_positive_units(self, redundant_score=None):
"""
See get_false_positive_units().
Parameters
----------
redundant_score : float | None, default: None
The agreement score below which tested units
are counted as "false positive"" (and not "redundant").
"""
return len(self.get_false_positive_units(redundant_score))

Expand All @@ -556,7 +585,7 @@ def get_redundant_units(self, redundant_score=None):
Parameters
----------
redundant_score=None : float, default: None
redundant_score : float, default: None
The agreement score above which tested units
are counted as "redundant" (and not "false positive" ).
"""
Expand All @@ -579,6 +608,12 @@ def get_redundant_units(self, redundant_score=None):
def count_redundant_units(self, redundant_score=None):
"""
See get_redundant_units().
Parameters
----------
redundant_score : float, default: None
The agreement score below which tested units
are counted as "false positive"" (and not "redundant").
"""
return len(self.get_redundant_units(redundant_score=redundant_score))

Expand Down Expand Up @@ -611,6 +646,12 @@ def get_overmerged_units(self, overmerged_score=None):
def count_overmerged_units(self, overmerged_score=None):
"""
See get_overmerged_units().
Parameters
----------
overmerged_score : float, default: None
Tested units with 2 or more agreement scores above "overmerged_score"
are counted as "overmerged".
"""
return len(self.get_overmerged_units(overmerged_score=overmerged_score))

Expand Down Expand Up @@ -706,6 +747,10 @@ class TemplateComparison(BasePairComparison, MixinTemplateComparison):
List of units from sorting_analyzer_1 to compare.
unit_ids2 : list, default: None
List of units from sorting_analyzer_2 to compare.
name1 : str, default: "sess1"
Name of first session.
name2 : str, default: "sess2"
Name of second session.
similarity_method : "cosine" | "l1" | "l2", default: "cosine"
Method for the similarity matrix.
support : "dense" | "union" | "intersection", default: "union"
Expand All @@ -714,6 +759,11 @@ class TemplateComparison(BasePairComparison, MixinTemplateComparison):
Number of shifts to use to shift templates to maximize similarity.
verbose : bool, default: False
If True, output is verbose.
chance_score : float, default: 0.3
Minimum agreement score to for a possible match
match_score : float, default: 0.7
Minimum agreement score to match units
Returns
-------
Expand Down
7 changes: 4 additions & 3 deletions src/spikeinterface/core/analyzer_extension_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -675,13 +675,14 @@ class ComputeNoiseLevels(AnalyzerExtension):
Parameters
----------
sorting_analyzer: SortingAnalyzer
sorting_analyzer : SortingAnalyzer
A SortingAnalyzer object
**params: dict with additional parameters for the `spikeinterface.get_noise_levels()` function
**kwargs : dict
Additional parameters for the `spikeinterface.get_noise_levels()` function
Returns
-------
noise_levels: np.array
noise_levels : np.array
The noise level vector
"""

Expand Down
36 changes: 25 additions & 11 deletions src/spikeinterface/core/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,14 @@ def generate_recording(
----------
num_channels : int, default: 2
The number of channels in the recording.
sampling_frequency : float, default: 30000.0
The sampling frequency of the recording in Hz
sampling_frequency : float, default: 30000. (in Hz)
The sampling frequency of the recording, default: 30000.
durations : list[float], default: [5.0, 2.5]
The duration in seconds of each segment in the recording, default: [5.0, 2.5].
Note that the number of segments is determined by the length of this list.
set_probe : bool | None, default: True
ndim : int | None, default: 2
The duration in seconds of each segment in the recording.
The number of segments is determined by the length of this list.
set_probe : bool, default: True
If true, attaches probe to the returned `Recording`
ndim : int, default: 2
The number of dimensions of the probe, default: 2. Set to 3 to make 3 dimensional probe.
seed : int | None, default: None
A seed for the np.ramdom.default_rng function
Expand Down Expand Up @@ -623,6 +624,13 @@ def generate_snippets(
The number of units.
empty_units : list | None, default: None
A list of units that will have no spikes.
durations : List[float], default: [10.325, 3.5]
The duration in seconds of each segment in the recording.
The number of segments is determined by the length of this list.
set_probe : bool, default: True
If true, attaches probe to the returned snippets object
**job_kwargs : dict, default: None
Job keyword arguments for `snippets_from_sorting`
Returns
-------
Expand Down Expand Up @@ -801,9 +809,9 @@ def synthesize_random_firings(
Sampling rate in Hz.
duration : float, default: 60
Duration of the segment in seconds.
refractory_period_ms : float, default: 4.0
refractory_period_ms : float
Refractory period in ms.
firing_rates : float or list[float], default: 3.0
firing_rates : float or list[float]
The firing rate of each unit (in Hz).
If float, all units will have the same firing rate.
add_shift_shuffle : bool, default: False
Expand Down Expand Up @@ -907,8 +915,8 @@ def inject_some_duplicate_units(sorting, num=4, max_shift=5, ratio=None, seed=No
range of the shift in sample.
ratio : float | None, default: None
Proportion of original spike in the injected units.
seed : int, default: None
Seed for the generator.
seed : int | None, default: None
Random seed for creating unit peak shifts.
Returns
-------
Expand Down Expand Up @@ -1564,6 +1572,10 @@ def generate_templates(
Ellipsoid injects some anisotropy dependent on unit shape, sphere is equivalent
to Euclidean distance.
mode : "sphere" | "ellipsoid", default: "ellipsoid"
Mode for how to calculate distances
Returns
-------
templates: np.array
Expand Down Expand Up @@ -1708,6 +1720,8 @@ class InjectTemplatesRecording(BaseRecording):
upsample_vector : np.array | None, default: None.
When templates is 4d we can simulate a jitter.
Optional the upsample_vector is the jitter index with a number per spike in range 0-templates.shape[3].
check_borders : bool, default: False
Checks if the border of the templates are zero.
Returns
-------
Expand Down Expand Up @@ -2074,7 +2088,7 @@ def generate_ground_truth_recording(
* (num_units, num_samples, num_channels, upsample_factor): case with oversample template to introduce jitter.
ms_before : float, default: 1.5
Cut out in ms before spike peak.
ms_after : float, default: 3
ms_after : float, default: 3.0
Cut out in ms after spike peak.
upsample_factor : None | int, default: None
A upsampling factor used only when templates are not provided.
Expand Down
3 changes: 3 additions & 0 deletions src/spikeinterface/curation/auto_merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ def get_potential_auto_merge(
* | "feature_neighbors": focused on finding unit pairs whose spikes are close in the feature space using kNN.
| It uses the following steps: "num_spikes", "snr", "remove_contaminated", "unit_locations",
| "knn", "quality_score"
If `preset` is None, you can specify the steps manually with the `steps` parameter.
resolve_graph : bool, default: False
If True, the function resolves the potential unit pairs to be merged into multiple-unit merges.
Expand Down Expand Up @@ -145,6 +146,8 @@ def get_potential_auto_merge(
Pontential steps : "num_spikes", "snr", "remove_contaminated", "unit_locations", "correlogram",
"template_similarity", "presence_distance", "cross_contamination", "knn", "quality_score"
Please check steps explanations above!
presence_distance_kwargs : None|dict, default: None
A dictionary of kwargs to be passed to compute_presence_distance().
Returns
-------
Expand Down
16 changes: 8 additions & 8 deletions src/spikeinterface/curation/curation_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,25 +293,25 @@ def apply_curation(
The Sorting object to apply merges.
curation_dict : dict
The curation dict.
censor_ms: float | None, default: None
censor_ms : float | None, default: None
When applying the merges, any consecutive spikes within the `censor_ms` are removed. This can be thought of
as the desired refractory period. If `censor_ms=None`, no spikes are discarded.
new_id_strategy : "append" | "take_first", default: "append"
The strategy that should be used, if `new_unit_ids` is None, to create new unit_ids.
* "append" : new_units_ids will be added at the end of max(sorting.unit_ids)
* "take_first" : new_unit_ids will be the first unit_id of every list of merges
merging_mode : "soft" | "hard", default: "soft"
merging_mode : "soft" | "hard", default: "soft"
How merges are performed for SortingAnalyzer. If the `merge_mode` is "soft" , merges will be approximated, with no reloading of
the waveforms. This will lead to approximations. If `merge_mode` is "hard", recomputations are accurately
performed, reloading waveforms if needed
sparsity_overlap : float, default 0.75
The percentage of overlap that units should share in order to accept merges. If this criteria is not
achieved, soft merging will not be possible and an error will be raised. This is for use with a SortingAnalyzer input.
verbose:
**job_kwargs
The percentage of overlap that units should share in order to accept merges. If this criteria is not
achieved, soft merging will not be possible and an error will be raised. This is for use with a SortingAnalyzer input.
verbose : bool, default: False
If True, output is verbose
**job_kwargs : dict
Job keyword arguments for `merge_units`
Returns
-------
Expand Down
3 changes: 2 additions & 1 deletion src/spikeinterface/curation/curationsorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,15 @@ class CurationSorting:
Parameters
----------
sorting: BaseSorting
sorting : BaseSorting
The sorting object
properties_policy : "keep" | "remove", default: "keep"
Policy used to propagate properties after split and merge operation. If "keep" the properties will be
passed to the new units (if the original units have the same value). If "remove" the new units will have
an empty value for all the properties
make_graph : bool
True to keep a Networkx graph instance with the curation history
Returns
-------
sorting : Sorting
Expand Down
Loading

0 comments on commit 740e779

Please sign in to comment.