From f912b3a95b13e64818c7df0a9c81eabd441e212a Mon Sep 17 00:00:00 2001 From: daquintero Date: Thu, 28 Nov 2024 13:43:19 +0100 Subject: [PATCH] Simualtion plots complete --- .../08a_pcb_interposer_characterisation.py | 2 +- piel/analysis/signals/time/core/compose.py | 14 +- piel/analysis/signals/time/core/dimension.py | 12 +- piel/analysis/signals/time/core/metrics.py | 18 +- piel/analysis/signals/time/core/off_state.py | 40 ++-- piel/analysis/signals/time/core/offset.py | 14 +- piel/analysis/signals/time/core/remove.py | 14 +- piel/analysis/signals/time/core/split.py | 22 +- piel/analysis/signals/time/core/threshold.py | 32 +-- piel/analysis/signals/time/core/transform.py | 14 +- piel/analysis/signals/time/core/transition.py | 16 +- .../time/integration/extract_pulse_metrics.py | 8 +- piel/conversion.py | 216 ------------------ piel/conversion/__init__.py | 9 + piel/conversion/core.py | 216 ++++++++++++++++++ .../file.py} | 9 +- piel/experimental/devices/DPO73304/extract.py | 18 +- .../transient/electro_optic/pulsed_laser.py | 10 +- piel/tools/virtuoso/simulation/__init__.py | 1 + .../virtuoso/simulation/data/__init__.py | 4 +- piel/tools/virtuoso/simulation/data/dc.py | 90 ++++++++ .../virtuoso/simulation/data/expressions.py | 0 piel/tools/virtuoso/simulation/data/time.py | 89 ++++++++ .../virtuoso/simulation/data/transient.py | 0 piel/tools/virtuoso/simulation/data/utils.py | 29 +++ piel/types/__init__.py | 2 + .../measurements/data/oscilloscope.py | 6 +- .../measurements/data/propagation.py | 10 +- piel/types/signal/time_data.py | 8 +- .../measurement_data_collection.py | 6 +- piel/visual/plot/signals/dc/__init__.py | 3 +- .../dc/{signal_dc_collection.py => basic.py} | 13 +- piel/visual/plot/signals/dc/overlay.py | 113 +++++++++ piel/visual/plot/signals/time/basic.py | 6 +- piel/visual/plot/signals/time/overlay.py | 6 +- piel/visual/plot/signals/time/separate.py | 11 +- piel/visual/signals.py | 4 +- .../signals/time/core/test_dimension.py | 76 +++--- .../analysis/signals/time/core/test_offset.py | 8 +- .../time/core/test_split_threshold_compose.py | 20 +- .../signals/time/core/test_transitions.py | 4 +- .../signals/time/core/test_transiton.py | 10 +- .../integration/test_extract_pulse_metrics.py | 16 +- tests/experimental/devices/test_DPO73304.py | 12 +- .../measurements/data/test_propagation.py | 6 +- .../test_electro_optic_pulsed_laser.py | 6 +- 46 files changed, 779 insertions(+), 464 deletions(-) create mode 100644 piel/conversion/__init__.py create mode 100644 piel/conversion/core.py rename piel/{file_conversion.py => conversion/file.py} (82%) create mode 100644 piel/tools/virtuoso/simulation/__init__.py delete mode 100644 piel/tools/virtuoso/simulation/data/expressions.py create mode 100644 piel/tools/virtuoso/simulation/data/time.py delete mode 100644 piel/tools/virtuoso/simulation/data/transient.py create mode 100644 piel/tools/virtuoso/simulation/data/utils.py rename piel/visual/plot/signals/dc/{signal_dc_collection.py => basic.py} (93%) create mode 100644 piel/visual/plot/signals/dc/overlay.py diff --git a/docs/examples/08a_pcb_interposer_characterisation/08a_pcb_interposer_characterisation.py b/docs/examples/08a_pcb_interposer_characterisation/08a_pcb_interposer_characterisation.py index dd69aa74..353c9f7c 100644 --- a/docs/examples/08a_pcb_interposer_characterisation/08a_pcb_interposer_characterisation.py +++ b/docs/examples/08a_pcb_interposer_characterisation/08a_pcb_interposer_characterisation.py @@ -731,7 +731,7 @@ def calibration_propagation_delay_experiment( # Note that this has some limitations of revalidation and reinstantion of python classes. -# ### Extract Software-Defined Statistics from a `DataTimeSignalData` +# ### Extract Software-Defined Statistics from a `TimeSignalData` # `piel.analysis` also provides some functionality to analyse the corresponding time-data accordingly. # diff --git a/piel/analysis/signals/time/core/compose.py b/piel/analysis/signals/time/core/compose.py index 1fd8d704..03524b99 100644 --- a/piel/analysis/signals/time/core/compose.py +++ b/piel/analysis/signals/time/core/compose.py @@ -2,26 +2,26 @@ import numpy as np from typing import List, Optional -from piel.types import DataTimeSignalData +from piel.types import TimeSignalData from .off_state import ( extract_off_state_generator_from_full_state_data, ) # Adjust the import path as needed def compose_pulses_into_signal( - pulses: List[DataTimeSignalData], + pulses: List[TimeSignalData], baseline: float = 0.0, noise_std: Optional[float] = None, data_time_signal_kwargs: Optional[dict] = None, start_time_s: Optional[float] = None, end_time_s: Optional[float] = None, -) -> DataTimeSignalData: +) -> TimeSignalData: """ Composes a full signal from a list of pulses by inserting them into a continuous time array and filling gaps with generated noise. Parameters: - pulses (List[DataTimeSignalData]): List of pulse signals to be inserted. + pulses (List[TimeSignalData]): List of pulse signals to be inserted. baseline (float, optional): Baseline value of the signal. Defaults to 0.0. noise_std (float, optional): Standard deviation of the noise to be generated in gaps. If not provided, it is estimated from the pulses. @@ -30,7 +30,7 @@ def compose_pulses_into_signal( end_time_s (float, optional): End time of the composed signal. If not provided, uses the last pulse's end time. Returns: - DataTimeSignalData: The composed full signal with pulses and noise. + TimeSignalData: The composed full signal with pulses and noise. """ if data_time_signal_kwargs is None: data_time_signal_kwargs = {} @@ -164,8 +164,8 @@ def compose_pulses_into_signal( # Insert the pulse data full_data[pulse_start_idx:pulse_end_idx] = pulse_data - # Create the composed DataTimeSignalData instance - composed_signal = DataTimeSignalData( + # Create the composed TimeSignalData instance + composed_signal = TimeSignalData( time_s=full_time_s.tolist(), data=full_data.tolist(), data_name="ComposedSignal", diff --git a/piel/analysis/signals/time/core/dimension.py b/piel/analysis/signals/time/core/dimension.py index 63ca6468..d7ead645 100644 --- a/piel/analysis/signals/time/core/dimension.py +++ b/piel/analysis/signals/time/core/dimension.py @@ -1,18 +1,18 @@ import numpy as np -from piel.types import DataTimeSignalData, Unit +from piel.types import TimeSignalData, Unit import logging logger = logging.getLogger(__name__) def resize_data_time_signal_units( - waveform: DataTimeSignalData, + waveform: TimeSignalData, time_unit: Unit, data_unit: Unit, corrected_name_suffix: str = "_corrected", -) -> DataTimeSignalData: +) -> TimeSignalData: """ - Applies unit corrections to the time and data arrays of a DataTimeSignalData object. + Applies unit corrections to the time and data arrays of a TimeSignalData object. Parameters: - waveform: The original waveform data. @@ -21,7 +21,7 @@ def resize_data_time_signal_units( - corrected_name_suffix: Suffix to append to the data name after correction. Returns: - - A new DataTimeSignalData object with corrected time and data. + - A new TimeSignalData object with corrected time and data. """ # Convert time and data to NumPy arrays for efficient computation time_array = np.array(waveform.time_s, dtype=float) @@ -57,7 +57,7 @@ def resize_data_time_signal_units( corrected_data_name = f"{waveform.data_name}{corrected_name_suffix}" # Create and return the corrected waveform - return DataTimeSignalData( + return TimeSignalData( time_s=corrected_time.tolist(), # Convert back to list if necessary data=corrected_data.tolist(), data_name=corrected_data_name, diff --git a/piel/analysis/signals/time/core/metrics.py b/piel/analysis/signals/time/core/metrics.py index 181b4624..ebb0afba 100644 --- a/piel/analysis/signals/time/core/metrics.py +++ b/piel/analysis/signals/time/core/metrics.py @@ -1,6 +1,6 @@ import numpy as np from piel.types import ( - MultiDataTimeSignal, + MultiTimeSignalData, ScalarMetric, EdgeTransitionAnalysisTypes, ScalarMetricCollection, @@ -10,14 +10,14 @@ def extract_mean_metrics_list( - multi_data_time_signal: MultiDataTimeSignal, **kwargs + multi_data_time_signal: MultiTimeSignalData, **kwargs ) -> ScalarMetricCollection: """ Extracts scalar metrics from a collection of rising edge signals. Standard deviation is not calculated as this just computes individual metrics list. Args: - multi_data_time_signal (List[DataTimeSignalData]): A list of rising edge signals. + multi_data_time_signal (List[TimeSignalData]): A list of rising edge signals. Returns: ScalarMetricCollection: A collection of ScalarMetric instances containing the extracted metrics. @@ -58,7 +58,7 @@ def extract_mean_metrics_list( def extract_peak_to_peak_metrics_list( - multi_data_time_signal: MultiDataTimeSignal, + multi_data_time_signal: MultiTimeSignalData, metric_kwargs_list: list[dict] = None, **kwargs, ) -> ScalarMetricCollection: @@ -67,7 +67,7 @@ def extract_peak_to_peak_metrics_list( difference between the maximum and minimum values of the signal. Args: - multi_data_time_signal (MultiDataTimeSignal): A collection of time signals to analyze. + multi_data_time_signal (MultiTimeSignalData): A collection of time signals to analyze. Returns: ScalarMetricCollection: A collection of ScalarMetric instances containing the peak-to-peak values @@ -116,7 +116,7 @@ def extract_peak_to_peak_metrics_list( def extract_multi_time_signal_statistical_metrics( - multi_data_time_signal: MultiDataTimeSignal, + multi_data_time_signal: MultiTimeSignalData, analysis_type: EdgeTransitionAnalysisTypes = "peak_to_peak", **kwargs, ) -> ScalarMetric: @@ -124,7 +124,7 @@ def extract_multi_time_signal_statistical_metrics( Extracts scalar metrics from a collection of rising edge signals. Args: - multi_data_time_signal (List[DataTimeSignalData]): A list of rising edge signals. + multi_data_time_signal (List[TimeSignalData]): A list of rising edge signals. analysis_type (piel.types.EdgeTransitionAnalysisTypes): The type of analysis to perform. Returns: @@ -146,7 +146,7 @@ def extract_multi_time_signal_statistical_metrics( def extract_statistical_metrics_collection( - multi_data_time_signal: MultiDataTimeSignal, + multi_data_time_signal: MultiTimeSignalData, analysis_types: list[EdgeTransitionAnalysisTypes], **kwargs, ) -> ScalarMetricCollection: @@ -154,7 +154,7 @@ def extract_statistical_metrics_collection( Extracts a collection of scalar metrics from a collection of rising edge signals based on multiple analysis types. Args: - multi_data_time_signal (MultiDataTimeSignal): A collection of rising edge signals. + multi_data_time_signal (MultiTimeSignalData): A collection of rising edge signals. analysis_types (list[EdgeTransitionAnalysisTypes], optional): The types of analyses to perform. Defaults to ["peak_to_peak"]. Returns: diff --git a/piel/analysis/signals/time/core/off_state.py b/piel/analysis/signals/time/core/off_state.py index 09553d5a..d059993d 100644 --- a/piel/analysis/signals/time/core/off_state.py +++ b/piel/analysis/signals/time/core/off_state.py @@ -1,7 +1,7 @@ import numpy as np from typing import Callable, Optional, Dict from piel.types import ( - DataTimeSignalData, + TimeSignalData, ) # Ensure this import matches your project structure @@ -12,7 +12,7 @@ def create_off_state_generator( baseline: float = 0.0, data_name: str = "off_state", data_time_signal_kwargs: Optional[Dict] = None, -) -> Callable[[float, Optional[int]], DataTimeSignalData]: +) -> Callable[[float, Optional[int]], TimeSignalData]: """ Creates a generator function for the equivalent off state signal with noise. @@ -21,18 +21,18 @@ def create_off_state_generator( sampling_rate (float): Sampling rate in Hz. baseline (float): Baseline signal level for the off state. data_name (str): Name of the data signal. - data_time_signal_kwargs (dict, optional): Additional keyword arguments for DataTimeSignalData. + data_time_signal_kwargs (dict, optional): Additional keyword arguments for TimeSignalData. Returns: - Callable[[float, Optional[int]], DataTimeSignalData]: - A function that takes duration_s (in seconds) and returns DataTimeSignalData. + Callable[[float, Optional[int]], TimeSignalData]: + A function that takes duration_s (in seconds) and returns TimeSignalData. """ if data_time_signal_kwargs is None: data_time_signal_kwargs = {} def generate_off_state( duration_s: float, num_samples: Optional[int] = None - ) -> DataTimeSignalData: + ) -> TimeSignalData: """ Generates the off state signal data with noise for a given duration_s. @@ -41,7 +41,7 @@ def generate_off_state( num_samples (float): Number of samples to generate. Returns: - DataTimeSignalData: The generated signal data. + TimeSignalData: The generated signal data. """ if num_samples is None: num_samples = int(duration_s * sampling_rate) @@ -49,7 +49,7 @@ def generate_off_state( noise = np.random.normal(loc=0.0, scale=noise_std, size=num_samples) data = baseline + noise - return DataTimeSignalData( + return TimeSignalData( time_s=time_s.tolist(), data=data.tolist(), data_name=data_name, @@ -61,15 +61,15 @@ def generate_off_state( # New function to extract parameters and create generator def extract_off_state_generator_from_off_state_section( - off_state_data: DataTimeSignalData, + off_state_data: TimeSignalData, data_name: Optional[str] = None, data_time_signal_kwargs: Optional[Dict] = None, -) -> Callable[[float], DataTimeSignalData]: +) -> Callable[[float], TimeSignalData]: """ Extracts parameters from an existing off state DataTimeSignalData and creates a generator function. Parameters: - off_state_data (DataTimeSignalData): The existing off state signal data. + off_state_data (TimeSignalData): The existing off state signal data. data_name (str, optional): Name for the new data signal. Defaults to the original data_name. data_time_signal_kwargs (dict, optional): Additional keyword arguments for DataTimeSignalData. @@ -112,19 +112,19 @@ def extract_off_state_generator_from_off_state_section( def extract_off_state_generator_from_full_state_data( - full_time_signal_data: DataTimeSignalData, + full_time_signal_data: TimeSignalData, baseline: Optional[float] = None, threshold: Optional[float] = None, min_duration_s: Optional[float] = None, sampling_rate: Optional[float] = None, data_name: Optional[str] = None, data_time_signal_kwargs: Optional[Dict] = None, -) -> Callable[[float, Optional[int]], DataTimeSignalData]: +) -> Callable[[float, Optional[int]], TimeSignalData]: """ Extracts parameters from an existing off state DataTimeSignalData and creates a generator function. Parameters: - full_time_signal_data (DataTimeSignalData): The input signal data containing multiple states. + full_time_signal_data (TimeSignalData): The input signal data containing multiple states. baseline (float, optional): The baseline value representing the off state. If not provided, it is computed as the mean of the data. threshold (float, optional): The maximum deviation from the baseline to consider as off state. @@ -184,18 +184,18 @@ def extract_off_state_generator_from_full_state_data( def extract_off_state_section( - full_time_signal_data: DataTimeSignalData, + full_time_signal_data: TimeSignalData, baseline: Optional[float] = None, threshold: Optional[float] = None, min_duration_s: Optional[float] = None, sampling_rate: Optional[float] = None, data_time_signal_kwargs: Optional[Dict] = None, -) -> DataTimeSignalData: +) -> TimeSignalData: """ Extracts the off state segments from a DataTimeSignalData instance containing multiple on and off states. Parameters: - full_time_signal_data (DataTimeSignalData): The input signal data containing multiple states. + full_time_signal_data (TimeSignalData): The input signal data containing multiple states. baseline (float, optional): The baseline value representing the off state. If not provided, it is computed as the mean of the data. threshold (float, optional): The maximum deviation from the baseline to consider as off state. @@ -206,7 +206,7 @@ def extract_off_state_section( data_time_signal_kwargs (dict, optional): Additional keyword arguments for DataTimeSignalData. Returns: - DataTimeSignalData: A new DataTimeSignalData instance containing only the off state segments. + TimeSignalData: A new DataTimeSignalData instance containing only the off state segments. """ if data_time_signal_kwargs is None: data_time_signal_kwargs = {} @@ -276,8 +276,8 @@ def extract_off_state_section( extracted_time = np.array(extracted_time)[sorted_indices].tolist() extracted_data = np.array(extracted_data)[sorted_indices].tolist() - # Create a new DataTimeSignalData instance - extracted_off_state = DataTimeSignalData( + # Create a new TimeSignalData instance + extracted_off_state = TimeSignalData( time_s=extracted_time, data=extracted_data, data_name=full_time_signal_data.data_name + "_off_state", diff --git a/piel/analysis/signals/time/core/offset.py b/piel/analysis/signals/time/core/offset.py index 387da6aa..1517ab93 100644 --- a/piel/analysis/signals/time/core/offset.py +++ b/piel/analysis/signals/time/core/offset.py @@ -1,12 +1,12 @@ import numpy as np -from piel.types import DataTimeSignalData +from piel.types import TimeSignalData def offset_to_first_rising_edge( - waveform: DataTimeSignalData, + waveform: TimeSignalData, lower_threshold_ratio: float = 0.1, upper_threshold_ratio: float = 0.9, -) -> DataTimeSignalData: +) -> TimeSignalData: """ Offsets the waveform's time axis so that the first rising edge occurs at time zero. @@ -14,12 +14,12 @@ def offset_to_first_rising_edge( threshold to above the upper threshold. Parameters: - waveform (DataTimeSignalData): The input waveform data. + waveform (TimeSignalData): The input waveform data. lower_threshold_ratio (float): Lower threshold as a ratio of the amplitude range. upper_threshold_ratio (float): Upper threshold as a ratio of the amplitude range. Returns: - DataTimeSignalData: A new waveform with the time offset applied. + TimeSignalData: A new waveform with the time offset applied. Raises: ValueError: If no rising edge is found in the waveform. @@ -68,8 +68,8 @@ def offset_to_first_rising_edge( # Apply the offset offset_time_array = time - offset_time - # Create a new DataTimeSignalData instance with the offset time - offset_signal = DataTimeSignalData( + # Create a new TimeSignalData instance with the offset time + offset_signal = TimeSignalData( time_s=offset_time_array.tolist(), data=data.tolist(), data_name=waveform.data_name, diff --git a/piel/analysis/signals/time/core/remove.py b/piel/analysis/signals/time/core/remove.py index 19469664..e8603d34 100644 --- a/piel/analysis/signals/time/core/remove.py +++ b/piel/analysis/signals/time/core/remove.py @@ -1,12 +1,12 @@ -from piel.types import DataTimeSignalData +from piel.types import TimeSignalData import numpy as np def remove_before_first_rising_edge( - waveform: DataTimeSignalData, + waveform: TimeSignalData, lower_threshold_ratio: float = 0.1, upper_threshold_ratio: float = 0.9, -) -> DataTimeSignalData: +) -> TimeSignalData: """ Removes all data points before the first rising edge in the waveform. @@ -14,12 +14,12 @@ def remove_before_first_rising_edge( threshold to above the upper threshold. Parameters: - waveform (DataTimeSignalData): The input waveform data. + waveform (TimeSignalData): The input waveform data. lower_threshold_ratio (float): Lower threshold as a ratio of the amplitude range. upper_threshold_ratio (float): Upper threshold as a ratio of the amplitude range. Returns: - DataTimeSignalData: A new waveform with data points before the first rising edge removed. + TimeSignalData: A new waveform with data points before the first rising edge removed. Raises: ValueError: If no rising edge is found in the waveform. @@ -72,8 +72,8 @@ def remove_before_first_rising_edge( # Optionally, reset the time so that the rising edge starts at zero sliced_time = sliced_time - sliced_time[0] - # Create a new DataTimeSignalData instance with the sliced data - trimmed_signal = DataTimeSignalData( + # Create a new TimeSignalData instance with the sliced data + trimmed_signal = TimeSignalData( time_s=sliced_time.tolist(), data=sliced_data.tolist(), data_name=waveform.data_name, diff --git a/piel/analysis/signals/time/core/split.py b/piel/analysis/signals/time/core/split.py index 18a18585..51f3f222 100644 --- a/piel/analysis/signals/time/core/split.py +++ b/piel/analysis/signals/time/core/split.py @@ -2,8 +2,8 @@ import numpy as np from typing import List, Optional, Dict from piel.types import ( - DataTimeSignalData, - MultiDataTimeSignal, + TimeSignalData, + MultiTimeSignalData, ) # Adjust the import path as needed from .threshold import ( extract_pulses_from_signal, @@ -13,7 +13,7 @@ def separate_per_pulse_threshold( - signal_data: DataTimeSignalData, + signal_data: TimeSignalData, first_signal_threshold: float, second_signal_threshold: float, trigger_delay_s: float, @@ -24,12 +24,12 @@ def separate_per_pulse_threshold( second_post_pulse_time_s: float = 1e-9, noise_std_multiplier: float = 3.0, data_time_signal_kwargs: Optional[Dict] = None, -) -> List[MultiDataTimeSignal]: +) -> List[MultiTimeSignalData]: """ Separates pulses in a signal into two categories based on two threshold values. Parameters: - signal_data (DataTimeSignalData): The input signal data containing multiple pulses. + signal_data (TimeSignalData): The input signal data containing multiple pulses. first_signal_threshold (float): The higher threshold to categorize pulses. second_signal_threshold (float): The lower threshold to categorize pulses. trigger_delay_s (float): Minimum time (in seconds) between pulses to prevent overlap. @@ -46,7 +46,7 @@ def separate_per_pulse_threshold( data_time_signal_kwargs (dict, optional): Additional keyword arguments for DataTimeSignalData. Returns: - List[MultiDataTimeSignal]: A list containing a single `MultiDataTimeSignal` instance: + List[MultiTimeSignalData]: A list containing a single `MultiTimeSignalData` instance: - `high_threshold_pulses`: List of `DataTimeSignalData` for pulses above `first_signal_threshold`. - `low_threshold_pulses`: List of `DataTimeSignalData` for pulses above `second_signal_threshold` but below `first_signal_threshold`. """ @@ -87,7 +87,7 @@ def separate_per_pulse_threshold( ] # Function to find the peak time of a pulse - def get_pulse_peak_time(pulse: DataTimeSignalData) -> float: + def get_pulse_peak_time(pulse: TimeSignalData) -> float: if not pulse.data or not pulse.time_s: return float("inf") # Assign a large value if pulse data is empty max_idx = np.argmax(pulse.data) @@ -130,7 +130,7 @@ def get_pulse_peak_time(pulse: DataTimeSignalData) -> float: def split_compose_per_pulse_threshold( - signal_data: DataTimeSignalData, + signal_data: TimeSignalData, first_signal_threshold: float, second_signal_threshold: float, trigger_delay_s: float, @@ -142,12 +142,12 @@ def split_compose_per_pulse_threshold( start_time_s: Optional[float] = None, end_time_s: Optional[float] = None, data_time_signal_kwargs: Optional[Dict] = None, -) -> MultiDataTimeSignal: +) -> MultiTimeSignalData: """ Separates pulses in a signal into two categories based on two threshold values. Parameters: - signal_data (DataTimeSignalData): The input signal data containing multiple pulses. + signal_data (TimeSignalData): The input signal data containing multiple pulses. first_signal_threshold (float): The higher threshold to categorize pulses. second_signal_threshold (float): The lower threshold to categorize pulses. trigger_delay_s (float): Minimum time (in seconds) between pulses to prevent overlap. @@ -166,7 +166,7 @@ def split_compose_per_pulse_threshold( end_time_s (float, optional): End time of the composed signal. If not provided, uses the last pulse's end time. Returns: - List[DataTimeSignalData]: The composed full signals as [low_threshold_pulse_signal, high_threshold_pulse_signal] + List[TimeSignalData]: The composed full signals as [low_threshold_pulse_signal, high_threshold_pulse_signal] """ high_threshold_pulse_list, low_threshold_pulse_list = separate_per_pulse_threshold( diff --git a/piel/analysis/signals/time/core/threshold.py b/piel/analysis/signals/time/core/threshold.py index 501e43f9..c0947b84 100644 --- a/piel/analysis/signals/time/core/threshold.py +++ b/piel/analysis/signals/time/core/threshold.py @@ -1,6 +1,6 @@ import numpy as np from scipy.signal import find_peaks -from piel.types import DataTimeSignalData, MultiDataTimeSignal +from piel.types import TimeSignalData, MultiTimeSignalData from typing import Optional, List import logging @@ -8,16 +8,16 @@ def extract_signal_above_threshold( - signal_data: DataTimeSignalData, + signal_data: TimeSignalData, threshold: float, min_pulse_width_s: float = 0.0, noise_floor: float = 0.0, -) -> MultiDataTimeSignal: +) -> MultiTimeSignalData: """ Extracts all pulses from the input signal that exceed the specified threshold. Args: - signal_data (DataTimeSignalData): The original signal data containing time and data arrays. + signal_data (TimeSignalData): The original signal data containing time and data arrays. threshold (float): The data value threshold to identify pulses. min_pulse_width_s (float, optional): The minimum duration (in seconds) for a pulse to be considered valid. Pulses shorter than this duration will be ignored. Defaults to 0.0. @@ -25,7 +25,7 @@ def extract_signal_above_threshold( Defaults to 0.0. Returns: - MultiDataTimeSignal: A list of DataTimeSignalData instances, each representing a detected pulse. + MultiTimeSignalData: A list of DataTimeSignalData instances, each representing a detected pulse. """ # Convert lists to NumPy arrays for efficient processing time = np.array(signal_data.time_s) @@ -53,7 +53,7 @@ def extract_signal_above_threshold( logger.debug(f"Detected {len(pulse_start_indices)} potential pulses.") # Initialize list to hold extracted pulses - extracted_pulses: MultiDataTimeSignal = [] + extracted_pulses: MultiTimeSignalData = [] # Iterate over each detected pulse for idx, (start_idx, end_idx) in enumerate( @@ -74,8 +74,8 @@ def extract_signal_above_threshold( # Optionally, assign noise_floor to non-pulse regions if maintaining original array length # Here, we create pulses with their own time and data arrays - # Create a DataTimeSignalData instance for the pulse - pulse_signal = DataTimeSignalData( + # Create a TimeSignalData instance for the pulse + pulse_signal = TimeSignalData( time_s=pulse_time.tolist(), data=pulse_data.tolist(), data_name=f"{signal_data.data_name}_pulse_{idx}", @@ -93,20 +93,20 @@ def extract_signal_above_threshold( def extract_pulses_from_signal( - full_data: DataTimeSignalData, + full_data: TimeSignalData, pre_pulse_time_s: float = 0.01, post_pulse_time_s: float = 0.01, noise_std_multiplier: float = 3.0, min_pulse_height: Optional[float] = None, min_pulse_distance_s: Optional[float] = None, data_time_signal_kwargs: Optional[dict] = None, -) -> List[DataTimeSignalData]: +) -> List[TimeSignalData]: """ Detects and extracts pulses from a DataTimeSignalData instance, including segments before and after each pulse up to the noise floor. Parameters: - full_data (DataTimeSignalData): The input signal data containing multiple pulses. + full_data (TimeSignalData): The input signal data containing multiple pulses. pre_pulse_time_s (float): Time (in seconds) to include before each detected pulse. post_pulse_time_s (float): Time (in seconds) to include after each detected pulse. noise_std_multiplier (float): Multiplier for noise standard deviation to set detection threshold. @@ -117,7 +117,7 @@ def extract_pulses_from_signal( data_time_signal_kwargs (dict, optional): Additional keyword arguments for DataTimeSignalData. Returns: - List[DataTimeSignalData]: A list of DataTimeSignalData instances, each representing an extracted pulse. + List[TimeSignalData]: A list of DataTimeSignalData instances, each representing an extracted pulse. """ if data_time_signal_kwargs is None: data_time_signal_kwargs = {} @@ -188,9 +188,9 @@ def extract_pulses_from_signal( segment_time = time_s[pre_start_idx:post_end_idx] segment_data = data[pre_start_idx:post_end_idx] - # Create a new DataTimeSignalData instance for the pulse + # Create a new TimeSignalData instance for the pulse pulse_data_name = f"{full_data.data_name}_pulse_{peak_idx}" - extracted_pulse = DataTimeSignalData( + extracted_pulse = TimeSignalData( time_s=segment_time.tolist(), data=segment_data.tolist(), data_name=pulse_data_name, @@ -202,12 +202,12 @@ def extract_pulses_from_signal( return extracted_pulses -def is_pulse_above_threshold(pulse: DataTimeSignalData, threshold: float) -> bool: +def is_pulse_above_threshold(pulse: TimeSignalData, threshold: float) -> bool: """ Determines if the pulse's amplitude exceeds the specified threshold. Parameters: - pulse (DataTimeSignalData): The pulse data to evaluate. + pulse (TimeSignalData): The pulse data to evaluate. threshold (float): The amplitude threshold. Returns: diff --git a/piel/analysis/signals/time/core/transform.py b/piel/analysis/signals/time/core/transform.py index 5b487746..7bdce28e 100644 --- a/piel/analysis/signals/time/core/transform.py +++ b/piel/analysis/signals/time/core/transform.py @@ -1,16 +1,16 @@ import numpy as np -from piel.types import MultiDataTimeSignal, DataTimeSignalData +from piel.types import MultiTimeSignalData, TimeSignalData -def offset_time_signals(multi_signal: MultiDataTimeSignal) -> MultiDataTimeSignal: +def offset_time_signals(multi_signal: MultiTimeSignalData) -> MultiTimeSignalData: """ - Offsets the time_s array of each DataTimeSignalData in the MultiDataTimeSignal to start at 0. + Offsets the time_s array of each TimeSignalData in the MultiTimeSignalData to start at 0. Args: - multi_signal (MultiDataTimeSignal): List of rising edge signals. + multi_signal (MultiTimeSignalData): List of rising edge signals. Returns: - MultiDataTimeSignal: New list with offset time_s arrays. + MultiTimeSignalData: New list with offset time_s arrays. """ offset_signals = [] for signal in multi_signal: @@ -27,8 +27,8 @@ def offset_time_signals(multi_signal: MultiDataTimeSignal) -> MultiDataTimeSigna # Apply the offset offset_time = time - offset - # Create a new DataTimeSignalData instance with the offset time - offset_signal = DataTimeSignalData( + # Create a new TimeSignalData instance with the offset time + offset_signal = TimeSignalData( time_s=offset_time.tolist(), data=data.tolist(), data_name=signal.data_name ) offset_signals.append(offset_signal) diff --git a/piel/analysis/signals/time/core/transition.py b/piel/analysis/signals/time/core/transition.py index 19bece7a..a89a4f17 100644 --- a/piel/analysis/signals/time/core/transition.py +++ b/piel/analysis/signals/time/core/transition.py @@ -1,22 +1,22 @@ import numpy as np -from piel.types import DataTimeSignalData, MultiDataTimeSignal +from piel.types import TimeSignalData, MultiTimeSignalData def extract_rising_edges( - signal: DataTimeSignalData, + signal: TimeSignalData, lower_threshold_ratio: float = 0.1, upper_threshold_ratio: float = 0.9, -) -> MultiDataTimeSignal: +) -> MultiTimeSignalData: """ Extracts rising edges from a signal defined as transitions from lower_threshold to upper_threshold. Args: - signal (DataTimeSignalData): The input signal data. + signal (TimeSignalData): The input signal data. lower_threshold_ratio (float): Lower threshold as a fraction of signal amplitude (default 0.1). upper_threshold_ratio (float): Upper threshold as a fraction of signal amplitude (default 0.9). Returns: - MultiDataTimeSignal: A list of DataTimeSignalData instances, each representing a rising edge. + MultiTimeSignalData: A list of DataTimeSignalData instances, each representing a rising edge. """ # Convert lists to numpy arrays for efficient processing time = np.array(signal.time_s) @@ -35,7 +35,7 @@ def extract_rising_edges( upper_threshold = data_min + upper_threshold_ratio * amplitude # Initialize list to hold rising edges - rising_edges: MultiDataTimeSignal = [] + rising_edges: MultiTimeSignalData = [] # State variables in_rising = False @@ -55,8 +55,8 @@ def extract_rising_edges( edge_time = time[start_idx : end_idx + 1] edge_data = data[start_idx : end_idx + 1] - # Create a new DataTimeSignalData instance for the rising edge - edge_signal = DataTimeSignalData( + # Create a new TimeSignalData instance for the rising edge + edge_signal = TimeSignalData( time_s=edge_time.tolist(), data=edge_data.tolist(), data_name=f"{signal.data_name}_rising_edge_{len(rising_edges) + 1}", diff --git a/piel/analysis/signals/time/integration/extract_pulse_metrics.py b/piel/analysis/signals/time/integration/extract_pulse_metrics.py index 78d9a4d8..032df494 100644 --- a/piel/analysis/signals/time/integration/extract_pulse_metrics.py +++ b/piel/analysis/signals/time/integration/extract_pulse_metrics.py @@ -1,12 +1,12 @@ from typing import List, Optional -from piel.types import DataTimeSignalData, ScalarMetricCollection +from piel.types import TimeSignalData, ScalarMetricCollection from piel.analysis.signals.time.core.split import extract_pulses_from_signal from piel.analysis.signals.time.core.metrics import extract_peak_to_peak_metrics_list def extract_peak_to_peak_metrics_after_split_pulses( - full_signal: DataTimeSignalData, + full_signal: TimeSignalData, pre_pulse_time_s: float = 1e-9, post_pulse_time_s: float = 1e-9, noise_std_multiplier: float = 3.0, @@ -19,7 +19,7 @@ def extract_peak_to_peak_metrics_after_split_pulses( Extracts pulses from the full signal and computes peak-to-peak metrics. Parameters: - - full_signal (DataTimeSignalData): The complete time signal data to be analyzed. + - full_signal (TimeSignalData): The complete time signal data to be analyzed. - pre_pulse_time_s (float): Time in seconds before the pulse to include. - post_pulse_time_s (float): Time in seconds after the pulse to include. - noise_std_multiplier (float): Multiplier for noise standard deviation to detect pulses. @@ -34,7 +34,7 @@ def extract_peak_to_peak_metrics_after_split_pulses( try: # Extract pulses from the full signal - pulses: List[DataTimeSignalData] = extract_pulses_from_signal( + pulses: List[TimeSignalData] = extract_pulses_from_signal( full_data=full_signal, pre_pulse_time_s=pre_pulse_time_s, post_pulse_time_s=post_pulse_time_s, diff --git a/piel/conversion.py b/piel/conversion.py index ccbee12e..e69de29b 100644 --- a/piel/conversion.py +++ b/piel/conversion.py @@ -1,216 +0,0 @@ -""" -This module provides a set of utilities for converting between common files measurement to facilitate the representation of information across different toolsets. -""" - -from functools import partial -import jax.numpy as jnp -import numpy as np -import pandas as pd -from piel.types.core import ArrayTypes, PackageArrayType, TupleIntType -from piel.types.digital import AbstractBitsType, BitsType, LogicSignalsList - - -def convert_array_type(array: ArrayTypes, output_type: PackageArrayType): - """ - Converts an array to the specified output type. - - Args: - array (ArrayTypes): The input array which can be of type numpy.ndarray or jax.ndarray. - output_type (PackageArrayType): The desired output type, which can be "qutip", "jax", "numpy", "list", "tuple", or a tuple of integers (TupleIntType). - - Returns: - The converted array in the specified output type. - - Raises: - ValueError: If the specified output type is not recognized or not supported. - - Examples: - >>> convert_array_type(np.array([1, 2, 3]), "jax") - DeviceArray([1, 2, 3], dtype=int32) - - >>> convert_array_type(jnp.array([1, 2, 3]), "numpy") - array([1, 2, 3]) - - >>> convert_array_type(np.array([1, 2, 3]), TupleIntType) - (1, 2, 3) - """ - if output_type == "qutip": - import qutip - - if not isinstance(array, qutip.Qobj): - array = qutip.Qobj(array) - elif output_type == "jax": - if not isinstance(array, jnp.ndarray): - array = jnp.array(array) - elif output_type == "numpy": - if not isinstance(array, np.ndarray): - array = np.array(array) - elif output_type == "list": - if not isinstance(array, list): - array = array.tolist() - elif output_type == "tuple": - if not isinstance(array, tuple): - array = tuple(array.tolist()) - elif output_type == TupleIntType: - if isinstance(array, jnp.ndarray): - array = tuple(array.tolist()) - if isinstance(array, tuple): - # Check if the tuple is a tuple of integers - if all(isinstance(i, int) for i in array): - pass - elif all(isinstance(i, list) for i in array): - array = tuple(i[0] for i in array) - else: - raise ValueError("The tuple must be a tuple of integers.") - elif output_type == "str": - if not isinstance(array, str): - array = "".join(str(value) for value in array) - else: - raise ValueError( - "The output type must be either 'qutip', 'jax', 'numpy', 'list', 'tuple', TupleIntType, or 'str'." - ) - return array - - -# Partially applied function for converting an array to a string. -convert_tuple_to_string = partial(convert_array_type, output_type="str") - - -def convert_2d_array_to_string(list_2D: list[list]) -> str: - """ - Converts a 2D array of binary files into a single string of binary values. - - Args: - list_2D (list[list]): A 2D array of binary files where each sublist contains a single binary value. - - Returns: - str: A string of binary files. - - Examples: - >>> convert_2d_array_to_string([[0], [0], [0], [1]]) - '0001' - """ - binary_string = "".join(str(sublist[0]) for sublist in list_2D) - return binary_string - - -def absolute_to_threshold( - array: ArrayTypes, - threshold: float = 1e-6, - dtype_output: int | float | bool = int, - output_array_type: PackageArrayType = "jax", -) -> PackageArrayType: - """ - Converts an array of optical transmission values to single-bit digital signals based on a threshold. - - Args: - array (ArrayTypes): The input array of any dimension representing optical transmission values. - threshold (float, optional): The threshold value to determine the digital signal. Defaults to 1e-6. - dtype_output (int | float | bool, optional): The desired files type for the output values. Defaults to int. - output_array_type (PackageArrayType, optional): The desired output array type. Defaults to "jax". - - Returns: - The array with values converted to digital signals (0 or 1) based on the threshold and specified output type. - - Raises: - ValueError: If the input array is not a numpy or jax array. - - Examples: - >>> absolute_to_threshold(jnp.array([1e-7, 0.1, 1.0]), threshold=1e-5, output_array_type="numpy") - array([0, 1, 1]) - """ - if isinstance(array, (jnp.ndarray, np.ndarray)): - array = jnp.array(array) if isinstance(array, np.ndarray) else array - - array = jnp.abs(array) > threshold - array = array.astype(dtype_output) - array = convert_array_type(array, output_array_type) - else: - raise ValueError("The array must be either a jax or numpy array.") - - return array - - -# Alias for the absolute_to_threshold function. -a2d = absolute_to_threshold - - -def convert_to_bits(bits: AbstractBitsType) -> BitsType: - """ - Converts an AbstractBitsType to a BitsType (binary string format). - - Args: - bits (AbstractBitsType): The digital bits to convert. Can be a string, bytes, or integer. - - Returns: - BitsType: The converted bits in binary string format (without '0b' prefix). - - Raises: - TypeError: If the input type is not supported. - """ - if isinstance(bits, str): - return bits # Already in binary string format - elif isinstance(bits, bytes): - # Convert each byte to its binary representation - return "".join(format(byte, "08b") for byte in bits) - - elif isinstance(bits, int): - # Convert integer to binary (remove the '0b' prefix) - return bin(bits)[2:] - - else: - raise TypeError( - "Unsupported type for bits conversion. Supported measurement are str, bytes, or int." - ) - - -def convert_dataframe_to_bits( - dataframe: pd.DataFrame, ports_list: LogicSignalsList -) -> pd.DataFrame: - """ - Converts specified integer columns in the dataframe to their binary string representations. - - Args: - dataframe (pd.DataFrame): The simulation files as a Pandas dataframe. - ports_list (LogicSignalsList): List of column names (connection) to convert to binary string format. - - Returns: - pd.DataFrame: The dataframe with specified columns converted to binary string format. - """ - - def int_to_binary_string(value: int, bits: int) -> str: - """ - Converts an integer to a binary string representation, padded with leading zeros to fit the specified number of bits. - - Args: - value (int): The integer to convert. - bits (int): The number of bits for the binary representation. - - Returns: - str: The binary string representation of the integer. - """ - return format(value, f"0{bits}b") - - # Determine the number of bits required to represent the maximum integer value in the DataFrame - max_bits = ( - dataframe[ports_list] - .apply(lambda col: int(col.dropna().astype(int).max()).bit_length()) - .max() - ) - - # Apply conversion only to the specified columns in ports_list - binary_converted_data = ( - dataframe.copy() - ) # Create a copy of the dataframe to avoid modifying the original files - - for port in ports_list: - if port in binary_converted_data.columns: - binary_converted_data[port] = binary_converted_data[port].apply( - lambda x: int_to_binary_string(int(x), max_bits) - if isinstance(x, (int, float)) - else x - ) - else: - raise ValueError(f"Port '{port}' not found in DataFrame columns") - - return binary_converted_data diff --git a/piel/conversion/__init__.py b/piel/conversion/__init__.py new file mode 100644 index 00000000..8fa449c9 --- /dev/null +++ b/piel/conversion/__init__.py @@ -0,0 +1,9 @@ +from .core import ( + absolute_to_threshold, + convert_2d_array_to_string, + convert_array_type, + convert_tuple_to_string, + convert_to_bits, + convert_dataframe_to_bits, +) +from .file import read_csv_to_pandas, read_vcd_to_json diff --git a/piel/conversion/core.py b/piel/conversion/core.py new file mode 100644 index 00000000..ccbee12e --- /dev/null +++ b/piel/conversion/core.py @@ -0,0 +1,216 @@ +""" +This module provides a set of utilities for converting between common files measurement to facilitate the representation of information across different toolsets. +""" + +from functools import partial +import jax.numpy as jnp +import numpy as np +import pandas as pd +from piel.types.core import ArrayTypes, PackageArrayType, TupleIntType +from piel.types.digital import AbstractBitsType, BitsType, LogicSignalsList + + +def convert_array_type(array: ArrayTypes, output_type: PackageArrayType): + """ + Converts an array to the specified output type. + + Args: + array (ArrayTypes): The input array which can be of type numpy.ndarray or jax.ndarray. + output_type (PackageArrayType): The desired output type, which can be "qutip", "jax", "numpy", "list", "tuple", or a tuple of integers (TupleIntType). + + Returns: + The converted array in the specified output type. + + Raises: + ValueError: If the specified output type is not recognized or not supported. + + Examples: + >>> convert_array_type(np.array([1, 2, 3]), "jax") + DeviceArray([1, 2, 3], dtype=int32) + + >>> convert_array_type(jnp.array([1, 2, 3]), "numpy") + array([1, 2, 3]) + + >>> convert_array_type(np.array([1, 2, 3]), TupleIntType) + (1, 2, 3) + """ + if output_type == "qutip": + import qutip + + if not isinstance(array, qutip.Qobj): + array = qutip.Qobj(array) + elif output_type == "jax": + if not isinstance(array, jnp.ndarray): + array = jnp.array(array) + elif output_type == "numpy": + if not isinstance(array, np.ndarray): + array = np.array(array) + elif output_type == "list": + if not isinstance(array, list): + array = array.tolist() + elif output_type == "tuple": + if not isinstance(array, tuple): + array = tuple(array.tolist()) + elif output_type == TupleIntType: + if isinstance(array, jnp.ndarray): + array = tuple(array.tolist()) + if isinstance(array, tuple): + # Check if the tuple is a tuple of integers + if all(isinstance(i, int) for i in array): + pass + elif all(isinstance(i, list) for i in array): + array = tuple(i[0] for i in array) + else: + raise ValueError("The tuple must be a tuple of integers.") + elif output_type == "str": + if not isinstance(array, str): + array = "".join(str(value) for value in array) + else: + raise ValueError( + "The output type must be either 'qutip', 'jax', 'numpy', 'list', 'tuple', TupleIntType, or 'str'." + ) + return array + + +# Partially applied function for converting an array to a string. +convert_tuple_to_string = partial(convert_array_type, output_type="str") + + +def convert_2d_array_to_string(list_2D: list[list]) -> str: + """ + Converts a 2D array of binary files into a single string of binary values. + + Args: + list_2D (list[list]): A 2D array of binary files where each sublist contains a single binary value. + + Returns: + str: A string of binary files. + + Examples: + >>> convert_2d_array_to_string([[0], [0], [0], [1]]) + '0001' + """ + binary_string = "".join(str(sublist[0]) for sublist in list_2D) + return binary_string + + +def absolute_to_threshold( + array: ArrayTypes, + threshold: float = 1e-6, + dtype_output: int | float | bool = int, + output_array_type: PackageArrayType = "jax", +) -> PackageArrayType: + """ + Converts an array of optical transmission values to single-bit digital signals based on a threshold. + + Args: + array (ArrayTypes): The input array of any dimension representing optical transmission values. + threshold (float, optional): The threshold value to determine the digital signal. Defaults to 1e-6. + dtype_output (int | float | bool, optional): The desired files type for the output values. Defaults to int. + output_array_type (PackageArrayType, optional): The desired output array type. Defaults to "jax". + + Returns: + The array with values converted to digital signals (0 or 1) based on the threshold and specified output type. + + Raises: + ValueError: If the input array is not a numpy or jax array. + + Examples: + >>> absolute_to_threshold(jnp.array([1e-7, 0.1, 1.0]), threshold=1e-5, output_array_type="numpy") + array([0, 1, 1]) + """ + if isinstance(array, (jnp.ndarray, np.ndarray)): + array = jnp.array(array) if isinstance(array, np.ndarray) else array + + array = jnp.abs(array) > threshold + array = array.astype(dtype_output) + array = convert_array_type(array, output_array_type) + else: + raise ValueError("The array must be either a jax or numpy array.") + + return array + + +# Alias for the absolute_to_threshold function. +a2d = absolute_to_threshold + + +def convert_to_bits(bits: AbstractBitsType) -> BitsType: + """ + Converts an AbstractBitsType to a BitsType (binary string format). + + Args: + bits (AbstractBitsType): The digital bits to convert. Can be a string, bytes, or integer. + + Returns: + BitsType: The converted bits in binary string format (without '0b' prefix). + + Raises: + TypeError: If the input type is not supported. + """ + if isinstance(bits, str): + return bits # Already in binary string format + elif isinstance(bits, bytes): + # Convert each byte to its binary representation + return "".join(format(byte, "08b") for byte in bits) + + elif isinstance(bits, int): + # Convert integer to binary (remove the '0b' prefix) + return bin(bits)[2:] + + else: + raise TypeError( + "Unsupported type for bits conversion. Supported measurement are str, bytes, or int." + ) + + +def convert_dataframe_to_bits( + dataframe: pd.DataFrame, ports_list: LogicSignalsList +) -> pd.DataFrame: + """ + Converts specified integer columns in the dataframe to their binary string representations. + + Args: + dataframe (pd.DataFrame): The simulation files as a Pandas dataframe. + ports_list (LogicSignalsList): List of column names (connection) to convert to binary string format. + + Returns: + pd.DataFrame: The dataframe with specified columns converted to binary string format. + """ + + def int_to_binary_string(value: int, bits: int) -> str: + """ + Converts an integer to a binary string representation, padded with leading zeros to fit the specified number of bits. + + Args: + value (int): The integer to convert. + bits (int): The number of bits for the binary representation. + + Returns: + str: The binary string representation of the integer. + """ + return format(value, f"0{bits}b") + + # Determine the number of bits required to represent the maximum integer value in the DataFrame + max_bits = ( + dataframe[ports_list] + .apply(lambda col: int(col.dropna().astype(int).max()).bit_length()) + .max() + ) + + # Apply conversion only to the specified columns in ports_list + binary_converted_data = ( + dataframe.copy() + ) # Create a copy of the dataframe to avoid modifying the original files + + for port in ports_list: + if port in binary_converted_data.columns: + binary_converted_data[port] = binary_converted_data[port].apply( + lambda x: int_to_binary_string(int(x), max_bits) + if isinstance(x, (int, float)) + else x + ) + else: + raise ValueError(f"Port '{port}' not found in DataFrame columns") + + return binary_converted_data diff --git a/piel/file_conversion.py b/piel/conversion/file.py similarity index 82% rename from piel/file_conversion.py rename to piel/conversion/file.py index f8a5d4e7..e982a5b0 100644 --- a/piel/file_conversion.py +++ b/piel/conversion/file.py @@ -1,11 +1,6 @@ import pandas as pd -from .file_system import return_path -from .types import PathTypes - -__all__ = [ - "read_csv_to_pandas", - "read_vcd_to_json", -] +from piel.file_system import return_path +from piel.types import PathTypes def read_csv_to_pandas(file_path: PathTypes): diff --git a/piel/experimental/devices/DPO73304/extract.py b/piel/experimental/devices/DPO73304/extract.py index 7f15b22f..bbb8272a 100644 --- a/piel/experimental/devices/DPO73304/extract.py +++ b/piel/experimental/devices/DPO73304/extract.py @@ -12,8 +12,8 @@ OscilloscopeMeasurementData, ) from piel.types import ( - DataTimeSignalData, - MultiDataTimeSignal, + TimeSignalData, + MultiTimeSignalData, PathTypes, ScalarMetric, ScalarMetricCollection, @@ -109,7 +109,7 @@ def extract_waveform_to_dataframe(file: PathTypes) -> pd.DataFrame: def extract_to_data_time_signal( file: PathTypes, -) -> DataTimeSignalData: +) -> TimeSignalData: """ Extracts the waveform files from a csv file and returns it as a DataTimeSignal that can be used to analyse the signal with other methods. @@ -120,12 +120,12 @@ def extract_to_data_time_signal( Returns ------- - DataTimeSignalData + TimeSignalData The waveform files as a DataTimeSignal. """ logger.debug(f"Extracting waveform from file: {file}") dataframe = extract_waveform_to_dataframe(file) - data_time_signal = DataTimeSignalData( + data_time_signal = TimeSignalData( time_s=dataframe.time_s.values, data=dataframe.voltage_V.values, data_name="voltage_V", @@ -320,9 +320,9 @@ def extract_to_signal_measurement(file: PathTypes, **kwargs) -> ScalarMetricColl def combine_channel_data( channel_file: list[PathTypes], -) -> MultiDataTimeSignal: +) -> MultiTimeSignalData: """ - Extracts the waveform files from a list of csv files and returns it as a MultiDataTimeSignal that can be used to analyse the signals together. + Extracts the waveform files from a list of csv files and returns it as a MultiTimeSignalData that can be used to analyse the signals together. Parameters ---------- @@ -331,8 +331,8 @@ def combine_channel_data( Returns ------- - MultiDataTimeSignal - The waveform files as a MultiDataTimeSignal. + MultiTimeSignalData + The waveform files as a MultiTimeSignalData. """ multi_channel_data_time_signals = list() diff --git a/piel/models/transient/electro_optic/pulsed_laser.py b/piel/models/transient/electro_optic/pulsed_laser.py index ffd83cc9..121d82af 100644 --- a/piel/models/transient/electro_optic/pulsed_laser.py +++ b/piel/models/transient/electro_optic/pulsed_laser.py @@ -1,5 +1,5 @@ import numpy as np -from piel.types import PulsedLaser, DataTimeSignalData, ns, W +from piel.types import PulsedLaser, TimeSignalData, ns, W def generate_laser_time_data_pulses( @@ -7,7 +7,7 @@ def generate_laser_time_data_pulses( time_frame_s: float, point_amount: int, data_name: str = "optical_pulse_power", -) -> DataTimeSignalData: +) -> TimeSignalData: """ Converts PulsedLaser metrics into a time-domain signal representation. @@ -18,7 +18,7 @@ def generate_laser_time_data_pulses( - data_name (str): Name/description of the data signal. Returns: - - DataTimeSignalData: The time-domain signal data. + - TimeSignalData: The time-domain signal data. """ if not pulsed_laser.metrics: raise ValueError( @@ -75,8 +75,8 @@ def generate_laser_time_data_pulses( if start < end: data_array[start:end] = pulse_amplitude_W - # Create DataTimeSignalData object - signal_data = DataTimeSignalData( + # Create TimeSignalData object + signal_data = TimeSignalData( time_s=time_array_ns.tolist(), # Time in nanoseconds data=data_array.tolist(), data_name=data_name, diff --git a/piel/tools/virtuoso/simulation/__init__.py b/piel/tools/virtuoso/simulation/__init__.py new file mode 100644 index 00000000..21b41025 --- /dev/null +++ b/piel/tools/virtuoso/simulation/__init__.py @@ -0,0 +1 @@ +from . import data diff --git a/piel/tools/virtuoso/simulation/data/__init__.py b/piel/tools/virtuoso/simulation/data/__init__.py index b5c9f8f2..2610fbe9 100644 --- a/piel/tools/virtuoso/simulation/data/__init__.py +++ b/piel/tools/virtuoso/simulation/data/__init__.py @@ -1,3 +1,3 @@ from . import dc -from . import transient -from . import expressions +from . import time +from . import utils diff --git a/piel/tools/virtuoso/simulation/data/dc.py b/piel/tools/virtuoso/simulation/data/dc.py index 8b137891..f9255147 100644 --- a/piel/tools/virtuoso/simulation/data/dc.py +++ b/piel/tools/virtuoso/simulation/data/dc.py @@ -1 +1,91 @@ +from piel.types import SignalDCCollection, SignalTraceDC, SignalDC +from piel.conversion import read_csv_to_pandas +from .utils import sanitize_column_name + +def dataframe_to_signal_dc_collection(df) -> SignalDCCollection: + """ + Converts a DataFrame containing time and data columns into a `SignalDCCollection`. + + This function processes a DataFrame where each signal is represented by a pair of columns: + one for input traces (time, ending with " X") and one for output traces (data, ending with " Y"). + It constructs `SignalDC` objects for each valid pair, grouping them into a `SignalDCCollection`. + + Args: + df (pd.DataFrame): A DataFrame with columns representing input traces ('X') and output traces ('Y') pairs. + + Returns: + SignalDCCollection: A collection of DC signals representing inputs and outputs. + + Example: + Input DataFrame: + /out (resistance=1000) X | /out (resistance=1000) Y | /out (resistance=2000) X | /out (resistance=2000) Y + -------------------------|-------------------------|-------------------------|------------------------- + 0.0 | 10.0 | 0.0 | 20.0 + 1.0 | 15.0 | 1.0 | 25.0 + + Output: + SignalDCCollection( + inputs=[SignalDC(trace_list=[SignalTraceDC(name="out_resistance_1000_X", values=[0.0, 1.0]), + SignalTraceDC(name="out_resistance_2000_X", values=[0.0, 1.0])])], + outputs=[SignalDC(trace_list=[SignalTraceDC(name="out_resistance_1000_Y", values=[10.0, 15.0]), + SignalTraceDC(name="out_resistance_2000_Y", values=[20.0, 25.0])])] + ) + """ + inputs = [] + outputs = [] + + # Loop through columns to identify "X" (input traces) and "Y" (output traces) pairs + for col in df.columns: + if col.endswith(" X"): + base_name = col[:-2] # Remove ' X' + y_col = f"{base_name} Y" + if y_col in df.columns: + # Sanitize names + input_name = sanitize_column_name(col) + output_name = sanitize_column_name(y_col) + + # Create SignalTraceDC objects for input and output + input_trace = SignalTraceDC(name=input_name, values=df[col].values) + output_trace = SignalTraceDC(name=output_name, values=df[y_col].values) + + # Create SignalDC objects for inputs and outputs + inputs.append(SignalDC(trace_list=[input_trace])) + outputs.append(SignalDC(trace_list=[output_trace])) + + # Create and return the SignalDCCollection + return SignalDCCollection(inputs=inputs, outputs=outputs, power=[]) + + +def extract_signals_from_csv(file_path: str) -> SignalDCCollection: + """ + Reads a CSV file and extracts time-series signals as a list of `DataTimeSignalData` objects. + + This function reads the contents of a CSV file into a pandas DataFrame, then converts + the DataFrame into a list of `DataTimeSignalData` objects using the `dataframe_to_multi_time_signal_data` function. + + Args: + file_path (str): The path to the CSV file. + + Returns: + MultiDataTimeSignal: A list of `DataTimeSignalData` objects, where each object represents a time-series signal. + + Example: + If the CSV contains: + Signal1 X,Signal1 Y,Signal2 X,Signal2 Y + 0.0,10.0,0.0,20.0 + 1.0,15.0,1.0,25.0 + + The output will be: + [ + DataTimeSignalData(time_s=[0.0, 1.0], data=[10.0, 15.0], data_name="Signal1"), + DataTimeSignalData(time_s=[0.0, 1.0], data=[20.0, 25.0], data_name="Signal2") + ] + """ + # Read the CSV file into a DataFrame + df = read_csv_to_pandas(file_path) + + # Convert the DataFrame into a list of DataTimeSignalData objects + signals = dataframe_to_signal_dc_collection(df) + + return signals diff --git a/piel/tools/virtuoso/simulation/data/expressions.py b/piel/tools/virtuoso/simulation/data/expressions.py deleted file mode 100644 index e69de29b..00000000 diff --git a/piel/tools/virtuoso/simulation/data/time.py b/piel/tools/virtuoso/simulation/data/time.py new file mode 100644 index 00000000..3baa8307 --- /dev/null +++ b/piel/tools/virtuoso/simulation/data/time.py @@ -0,0 +1,89 @@ +from piel.types import MultiDataTimeSignal, DataTimeSignalData +from piel.conversion import read_csv_to_pandas +from .utils import sanitize_column_name + + +def dataframe_to_multi_time_signal_data(df) -> MultiDataTimeSignal: + """ + Converts a DataFrame containing time and data columns into a list of `DataTimeSignalData` objects. + + This function processes a DataFrame where each signal is represented by a pair of columns: + one for time (ending with " X") and one for the corresponding data values (ending with " Y"). + It constructs `DataTimeSignalData` objects for each valid pair and returns them as a list. + + Args: + df (pd.DataFrame): A DataFrame with columns representing time ('X') and data ('Y') pairs. + + Returns: + MultiDataTimeSignal: A list of `DataTimeSignalData` objects, where each object represents a signal. + + Example: + Input DataFrame: + Signal1 X | Signal1 Y | Signal2 X | Signal2 Y + --------- | --------- | --------- | --------- + 0.0 | 10.0 | 0.0 | 20.0 + 1.0 | 15.0 | 1.0 | 25.0 + + Output: + [ + DataTimeSignalData(time_s=[0.0, 1.0], data=[10.0, 15.0], data_name="Signal1"), + DataTimeSignalData(time_s=[0.0, 1.0], data=[20.0, 25.0], data_name="Signal2") + ] + """ + import re + + signals = [] + + # Loop through columns to identify "X" (time) and "Y" (data) pairs + for col in df.columns: + if col.endswith(" X"): + # Determine the base name of the signal + base_name = col[:-2] # Remove ' X' + y_col = f"{base_name} Y" + if y_col in df.columns: + # Generate a valid data_name by sanitizing the base name + data_name = sanitize_column_name(y_col) + + # Create a DataTimeSignalData object for the identified signal + signal = DataTimeSignalData( + time_s=df[col].values, + data=df[y_col].values, + data_name=data_name, + ) + signals.append(signal) + + return signals + + +def extract_signals_from_csv(file_path: str) -> MultiDataTimeSignal: + """ + Reads a CSV file and extracts time-series signals as a list of `DataTimeSignalData` objects. + + This function reads the contents of a CSV file into a pandas DataFrame, then converts + the DataFrame into a list of `DataTimeSignalData` objects using the `dataframe_to_multi_time_signal_data` function. + + Args: + file_path (str): The path to the CSV file. + + Returns: + MultiDataTimeSignal: A list of `DataTimeSignalData` objects, where each object represents a time-series signal. + + Example: + If the CSV contains: + Signal1 X,Signal1 Y,Signal2 X,Signal2 Y + 0.0,10.0,0.0,20.0 + 1.0,15.0,1.0,25.0 + + The output will be: + [ + DataTimeSignalData(time_s=[0.0, 1.0], data=[10.0, 15.0], data_name="Signal1"), + DataTimeSignalData(time_s=[0.0, 1.0], data=[20.0, 25.0], data_name="Signal2") + ] + """ + # Read the CSV file into a DataFrame + df = read_csv_to_pandas(file_path) + + # Convert the DataFrame into a list of DataTimeSignalData objects + signals = dataframe_to_multi_time_signal_data(df) + + return signals diff --git a/piel/tools/virtuoso/simulation/data/transient.py b/piel/tools/virtuoso/simulation/data/transient.py deleted file mode 100644 index e69de29b..00000000 diff --git a/piel/tools/virtuoso/simulation/data/utils.py b/piel/tools/virtuoso/simulation/data/utils.py new file mode 100644 index 00000000..4ff634c0 --- /dev/null +++ b/piel/tools/virtuoso/simulation/data/utils.py @@ -0,0 +1,29 @@ +def sanitize_column_name(column) -> str: + """ + Converts a list of column names into a sanitized format that only includes + letters, numbers, and underscores. + + Args: + columns (List[str]): A list of column names to be sanitized. + + Returns: + List[str]: A list of sanitized column names. + + Example: + Input: + ['(VT("/net05") - VT("/net8")) X', '(VT("/net05") - VT("/net8")) Y'] + Output: + ['VT_net05_minus_VT_net8_X', 'VT_net05_minus_VT_net8_Y'] + """ + import re + + # Replace special characters with underscores, keeping only letters, numbers, and underscores + sanitized_column = re.sub(r"[^a-zA-Z0-9 ]", "", column).replace(" ", "_") + sanitized_column = re.sub( + r"[^\w\s]", "", sanitized_column + ) # Remove special characters + sanitized_column = re.sub( + r"\s+", "_", sanitized_column + ) # Replace spaces with underscores + sanitized_column = sanitized_column.replace("/", "_").replace("-", "_minus_") + return sanitized_column diff --git a/piel/types/__init__.py b/piel/types/__init__.py index 20c8eb72..6805c9eb 100644 --- a/piel/types/__init__.py +++ b/piel/types/__init__.py @@ -175,8 +175,10 @@ ) from piel.types.signal.time_data import ( + TimeSignalData, DataTimeSignalData, EdgeTransitionAnalysisTypes, + MultiTimeSignalData, MultiDataTimeSignal, MultiDataTimeSignalCollectionTypes, MultiDataTimeSignalAnalysisTypes, diff --git a/piel/types/experimental/measurements/data/oscilloscope.py b/piel/types/experimental/measurements/data/oscilloscope.py index d4bb4054..96ec16b1 100644 --- a/piel/types/experimental/measurements/data/oscilloscope.py +++ b/piel/types/experimental/measurements/data/oscilloscope.py @@ -1,5 +1,5 @@ from .core import MeasurementData, MeasurementDataCollection -from piel.types.signal.time_data import MultiDataTimeSignal +from piel.types.signal.time_data import MultiTimeSignalData from piel.types.metrics import ScalarMetricCollection @@ -11,12 +11,12 @@ class OscilloscopeMeasurementData(MeasurementData): Attributes: measurements (Optional[SignalMetricsMeasurementCollection]): The collection of signal measurements. - waveform_list (MultiDataTimeSignal): The collection of waveforms. + waveform_list (MultiTimeSignalData): The collection of waveforms. """ type: str = "OscilloscopeMeasurementData" measurements: ScalarMetricCollection | None = None - waveform_list: MultiDataTimeSignal = [] + waveform_list: MultiTimeSignalData = [] class OscilloscopeMeasurementDataCollection(MeasurementDataCollection): diff --git a/piel/types/experimental/measurements/data/propagation.py b/piel/types/experimental/measurements/data/propagation.py index b97ee148..12a92166 100644 --- a/piel/types/experimental/measurements/data/propagation.py +++ b/piel/types/experimental/measurements/data/propagation.py @@ -1,6 +1,6 @@ from .core import MeasurementData, MeasurementDataCollection from piel.types.signal.time_data import ( - DataTimeSignalData, + TimeSignalData, ) from piel.types.metrics import ScalarMetricCollection @@ -13,14 +13,14 @@ class PropagationDelayMeasurementData(MeasurementData): Attributes: measurements (Optional[SignalMetricsMeasurementCollection]): The collection of signal measurements. - dut_waveform (Optional[DataTimeSignalData]): The device waveform. - reference_waveform (Optional[DataTimeSignalData]): The reference waveform. + dut_waveform (Optional[TimeSignalData]): The device waveform. + reference_waveform (Optional[TimeSignalData]): The reference waveform. """ type: str = "PropagationDelayMeasurementData" measurements: ScalarMetricCollection | None = None - dut_waveform: DataTimeSignalData | None = None - reference_waveform: DataTimeSignalData | None = None + dut_waveform: TimeSignalData | None = None + reference_waveform: TimeSignalData | None = None class PropagationDelayMeasurementDataCollection(MeasurementDataCollection): diff --git a/piel/types/signal/time_data.py b/piel/types/signal/time_data.py index 57fc5d55..1582ad31 100644 --- a/piel/types/signal/time_data.py +++ b/piel/types/signal/time_data.py @@ -3,7 +3,7 @@ from piel.types.units import Unit, s, V -class DataTimeSignalData(PielBaseModel): +class TimeSignalData(PielBaseModel): """ Standard definition for a relationship between a relevant files signal and a time reference array. Sources could be both measurement and simulation. @@ -16,13 +16,15 @@ class DataTimeSignalData(PielBaseModel): data_unit: Unit = V -MultiDataTimeSignal = list[DataTimeSignalData] +DataTimeSignalData = TimeSignalData # Legacy, to be removed +MultiTimeSignalData = list[TimeSignalData] +MultiDataTimeSignal = MultiTimeSignalData # Legacy, to be removed + """ Collection of DataTimeSignals that can be used to analyse a set of signals together in a particular files flow. """ MultiDataTimeSignalCollectionTypes = ["equivalent", "different"] - EdgeTransitionAnalysisTypes = Literal["mean", "peak_to_peak", "rise_time"] MultiDataTimeSignalAnalysisTypes = Literal["delay"] diff --git a/piel/visual/experimental/oscilloscope/measurement_data_collection.py b/piel/visual/experimental/oscilloscope/measurement_data_collection.py index 4230f145..6a3a3fbd 100644 --- a/piel/visual/experimental/oscilloscope/measurement_data_collection.py +++ b/piel/visual/experimental/oscilloscope/measurement_data_collection.py @@ -263,10 +263,10 @@ def plot_oscilloscope_signals_time( # # Example Usage # if __name__ == "__main__": # from piel.types.units import s, V -# from piel.types.signal.time_data import DataTimeSignalData +# from piel.types.signal.time_data import TimeSignalData # # # Create sample data -# waveform1 = DataTimeSignalData( +# waveform1 = TimeSignalData( # time_s=[0, 1, 2, 3, 4, 5], # data=[0, 1, 0, 1, 0, 1], # data_name="Channel 1", @@ -274,7 +274,7 @@ def plot_oscilloscope_signals_time( # data_unit=V # ) # -# waveform2 = DataTimeSignalData( +# waveform2 = TimeSignalData( # time_s=[0, 1, 2, 3, 4, 5], # data=[0, 0.5, 0, 0.5, 0, 0.5], # data_name="Channel 2", diff --git a/piel/visual/plot/signals/dc/__init__.py b/piel/visual/plot/signals/dc/__init__.py index 972daf50..90de5fdc 100644 --- a/piel/visual/plot/signals/dc/__init__.py +++ b/piel/visual/plot/signals/dc/__init__.py @@ -1 +1,2 @@ -from .signal_dc_collection import plot_signal_dc_collection +from .basic import plot_signal_dc_collection +from .overlay import plot_signal_dc_collection_equivalent diff --git a/piel/visual/plot/signals/dc/signal_dc_collection.py b/piel/visual/plot/signals/dc/basic.py similarity index 93% rename from piel/visual/plot/signals/dc/signal_dc_collection.py rename to piel/visual/plot/signals/dc/basic.py index 1e4a2d77..5cab815a 100644 --- a/piel/visual/plot/signals/dc/signal_dc_collection.py +++ b/piel/visual/plot/signals/dc/basic.py @@ -13,10 +13,11 @@ def plot_signal_dc_collection( signal_dc_collection: SignalDCCollection, fig: Any = None, axs: Any = None, - subplots_kwargs: dict = None, xlabel: str | Unit = None, ylabel: str | Unit = None, title: str | Unit = None, + subplots_kwargs: dict = None, + plot_kwargs: dict = None, **kwargs, ): """ @@ -77,6 +78,12 @@ def plot_signal_dc_collection( ylabel = "Output Signal" y_correction = 1 + if subplots_kwargs is None: + subplots_kwargs = {} + + if plot_kwargs is None: + plot_kwargs = {"marker": "o", "linestyle": "-"} + # Apply corrections if necessary input_values = np.array(input_values) / x_correction output_values = np.array(output_values) / y_correction @@ -88,9 +95,7 @@ def plot_signal_dc_collection( ax = axs[0] # Plot the data - ax.plot( - input_values, output_values, label="Input vs Output", marker="o", linestyle="-" - ) + ax.plot(input_values, output_values, **plot_kwargs) # Set labels and title ax.set_xlabel(xlabel) diff --git a/piel/visual/plot/signals/dc/overlay.py b/piel/visual/plot/signals/dc/overlay.py new file mode 100644 index 00000000..6a1146de --- /dev/null +++ b/piel/visual/plot/signals/dc/overlay.py @@ -0,0 +1,113 @@ +from typing import Any +from piel.types import Unit +import numpy as np +from piel.types import SignalDCCollection +from piel.visual.plot.position import create_axes_per_figure +from piel.visual.plot.core import save +import logging + +logger = logging.getLogger(__name__) + + +def plot_signal_dc_collection_equivalent( + signal_dc_collection: SignalDCCollection, + fig: Any = None, + axs: Any = None, + xlabel: str | Unit = None, + ylabel: str | Unit = None, + title: str | Unit = None, + labels: list[str] = None, + subplots_kwargs: dict = None, + plot_kwargs: dict = None, + **kwargs, +): + """ + Plots inputs vs outputs from a SignalDCCollection on a figure. + + Args: + signal_dc_collection (SignalDCCollection): The collection of DC signals to plot. + fig (matplotlib.figure.Figure, optional): Existing figure to plot on. If None, a new figure is created. + axs (list[matplotlib.axes.Axes, optional]): Existing list of axes to plot on. If None, new axes are created. Plots on [0] by default. + subplots_kwargs (dict, optional): Keyword arguments to pass to create_axes_per_figure. + xlabel (str | Unit, optional): Label for the x-axis. If a Unit is provided, applies unit correction. + ylabel (str | Unit, optional): Label for the y-axis. If a Unit is provided, applies unit correction. + title (str | Unit, optional): Title for the plot. + **kwargs: Additional keyword arguments to pass to the save function. + + Returns: + tuple: A tuple containing the figure and axes objects. + """ + + # Handle label units and corrections + if xlabel is None: + xlabel = "Input Signal" + x_correction = 1 + elif isinstance(xlabel, Unit): + x_correction = xlabel.base + logger.warning( + f"Data correction of 1/{x_correction} from unit {xlabel} applied on x-axis." + ) + xlabel = xlabel.label + else: + pass + x_correction = 1 + + if ylabel is None: + ylabel = "Output Signal" + y_correction = 1 + elif isinstance(ylabel, Unit): + y_correction = ylabel.base + logger.warning( + f"Data correction of 1/{y_correction} from unit {ylabel} applied on y-axis." + ) + ylabel = ylabel.label + else: + pass + y_correction = 1 + + if subplots_kwargs is None: + subplots_kwargs = {} + + if plot_kwargs is None: + plot_kwargs = {"marker": "o", "linestyle": "-"} + + # Create a figure and axes if not provided + if fig is None or axs is None: + fig, axs = create_axes_per_figure(rows=1, columns=1, **subplots_kwargs) + + ax = axs[0] + + i = 0 + # Iterate through inputs and outputs to plot them + for input_signal, output_signal in zip( + signal_dc_collection.inputs, signal_dc_collection.outputs + ): + for input_trace, output_trace in zip( + input_signal.trace_list, output_signal.trace_list + ): + # Apply unit corrections + x_values = np.array(input_trace.values) / x_correction + y_values = np.array(output_trace.values) / y_correction + + if labels is None: + label_i = f"{input_trace.name} -> {output_trace.name}" + else: + label_i = labels[i] + + # Plot data + ax.plot(x_values, y_values, label=label_i, **plot_kwargs) + i += 1 + + # Set labels and title + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + + if title is not None: + fig.suptitle(title) + + ax.legend() + + # Save the figure using the save function and additional kwargs + save(fig, **kwargs) + + return fig, ax diff --git a/piel/visual/plot/signals/time/basic.py b/piel/visual/plot/signals/time/basic.py index 0a7c28bb..89fc6c3c 100644 --- a/piel/visual/plot/signals/time/basic.py +++ b/piel/visual/plot/signals/time/basic.py @@ -1,5 +1,5 @@ from typing import Any -from piel.types import DataTimeSignalData, Unit +from piel.types import TimeSignalData, Unit import numpy as np import matplotlib.pyplot as plt from piel.visual.plot.position import create_axes_per_figure @@ -10,7 +10,7 @@ def plot_time_signal_data( - signal: DataTimeSignalData, + signal: TimeSignalData, fig: Any = None, axs: Any = None, subplots_kwargs: dict = None, @@ -23,7 +23,7 @@ def plot_time_signal_data( Plots a single time signal on a figure. Args: - signal (DataTimeSignalData): The time signal to plot. + signal (TimeSignalData): The time signal to plot. fig (matplotlib.figure.Figure, optional): Existing figure to plot on. If None, a new figure is created. axs (list[matplotlib.axes.Axes, optional]): Existing list of axes to plot on. If None, new axes are created. Plots on [0] by default. subplots_kwargs (dict, optional): Keyword arguments to pass to create_axes_per_figure. diff --git a/piel/visual/plot/signals/time/overlay.py b/piel/visual/plot/signals/time/overlay.py index c1335942..e50c62dc 100644 --- a/piel/visual/plot/signals/time/overlay.py +++ b/piel/visual/plot/signals/time/overlay.py @@ -1,5 +1,5 @@ from typing import Any -from piel.types import MultiDataTimeSignal, Unit +from piel.types import MultiTimeSignalData, Unit import numpy as np import matplotlib.pyplot as plt from piel.visual.plot.position import create_axes_per_figure @@ -10,7 +10,7 @@ def plot_multi_data_time_signal_equivalent( - multi_signal: MultiDataTimeSignal, + multi_signal: MultiTimeSignalData, fig: Any = None, axs: Any = None, subplots_kwargs: dict = None, @@ -22,7 +22,7 @@ def plot_multi_data_time_signal_equivalent( Plots all rising edge signals on the same figure with a shared x-axis. Args: - multi_signal (List[DataTimeSignalData]): List of rising edge signals. + multi_signal (List[TimeSignalData]): List of rising edge signals. subplots_kwargs (dict): Keyword arguments to pass to create_axes_per_figure. Returns: diff --git a/piel/visual/plot/signals/time/separate.py b/piel/visual/plot/signals/time/separate.py index 1790ec87..84504128 100644 --- a/piel/visual/plot/signals/time/separate.py +++ b/piel/visual/plot/signals/time/separate.py @@ -1,5 +1,5 @@ from typing import Any, Tuple -from piel.types import MultiDataTimeSignal, Unit +from piel.types import MultiTimeSignalData, Unit import numpy as np from piel.visual.plot.position import create_axes_per_figure from piel.visual.plot.core import save @@ -9,7 +9,7 @@ def plot_multi_data_time_signal_different( - multi_signal: MultiDataTimeSignal, + multi_signal: MultiTimeSignalData, fig: Any = None, axs: Any = None, subplots_kwargs: dict = None, @@ -24,7 +24,7 @@ def plot_multi_data_time_signal_different( Plots all rising edge signals on the same figure, but with a shared x-axis and multiple y-axes. Args: - multi_signal (MultiDataTimeSignal): List of rising edge signals. + multi_signal (MultiTimeSignalData): List of rising edge signals. fig (Any): Figure object. axs (Any): Axes object. subplots_kwargs (dict): Keyword arguments to pass to create_axes_per_figure. @@ -89,6 +89,11 @@ def plot_multi_data_time_signal_different( elif isinstance(title, str): fig.suptitle(title) + if time_range_s is None: + # Assumes at least one signal + time_range_s = [min(multi_signal[0].time_s), max(multi_signal[0].time_s)] + # TODO improve this + time_range_s[0] = time_range_s[0] / x_correction time_range_s[1] = time_range_s[1] / x_correction diff --git a/piel/visual/signals.py b/piel/visual/signals.py index 528eda65..36312544 100644 --- a/piel/visual/signals.py +++ b/piel/visual/signals.py @@ -1,7 +1,7 @@ -from ..types import MultiDataTimeSignal +from ..types import MultiTimeSignalData -def plot_time_signals(multi_data_time_signal: MultiDataTimeSignal): +def plot_time_signals(multi_data_time_signal: MultiTimeSignalData): """ TODO signals """ diff --git a/tests/analysis/signals/time/core/test_dimension.py b/tests/analysis/signals/time/core/test_dimension.py index 3377f511..21d7d4d9 100644 --- a/tests/analysis/signals/time/core/test_dimension.py +++ b/tests/analysis/signals/time/core/test_dimension.py @@ -400,17 +400,13 @@ def test_concatenate_metrics_collection_invalid_type(): def test_extract_mean_metrics_list_success(): """ - Test extracting mean metrics from a MultiDataTimeSignal. + Test extracting mean metrics from a MultiTimeSignalData. """ - # Assuming MultiDataTimeSignal is a list of DataTimeSignalData - from piel.types import DataTimeSignalData + # Assuming MultiTimeSignalData is a list of TimeSignalData + from piel.types import TimeSignalData - signal1 = DataTimeSignalData( - time_s=[0, 1, 2], data=[10, 20, 30], data_name="Signal1" - ) - signal2 = DataTimeSignalData( - time_s=[0, 1, 2], data=[40, 50, 60], data_name="Signal2" - ) + signal1 = TimeSignalData(time_s=[0, 1, 2], data=[10, 20, 30], data_name="Signal1") + signal2 = TimeSignalData(time_s=[0, 1, 2], data=[40, 50, 60], data_name="Signal2") multi_data_time_signal = [signal1, signal2] @@ -450,9 +446,9 @@ def test_extract_mean_metrics_list_empty_data(): """ Test that ValueError is raised when a signal has empty data array. """ - from piel.types import DataTimeSignalData + from piel.types import TimeSignalData - signal1 = DataTimeSignalData(time_s=[0, 1, 2], data=[], data_name="Signal1") + signal1 = TimeSignalData(time_s=[0, 1, 2], data=[], data_name="Signal1") multi_data_time_signal = [signal1] with pytest.raises(ValueError, match="Signal 'Signal1' has an empty data array."): @@ -461,16 +457,12 @@ def test_extract_mean_metrics_list_empty_data(): def test_extract_peak_to_peak_metrics_list_success(): """ - Test extracting peak-to-peak metrics from a MultiDataTimeSignal. + Test extracting peak-to-peak metrics from a MultiTimeSignalData. """ - from piel.types import DataTimeSignalData + from piel.types import TimeSignalData - signal1 = DataTimeSignalData( - time_s=[0, 1, 2], data=[10, 20, 15], data_name="Signal1" - ) - signal2 = DataTimeSignalData( - time_s=[0, 1, 2], data=[40, 50, 45], data_name="Signal2" - ) + signal1 = TimeSignalData(time_s=[0, 1, 2], data=[10, 20, 15], data_name="Signal1") + signal2 = TimeSignalData(time_s=[0, 1, 2], data=[40, 50, 45], data_name="Signal2") multi_data_time_signal = [signal1, signal2] @@ -512,9 +504,9 @@ def test_extract_peak_to_peak_metrics_list_empty_data(): """ Test that ValueError is raised when a signal has empty data array. """ - from piel.types import DataTimeSignalData + from piel.types import TimeSignalData - signal1 = DataTimeSignalData(time_s=[0, 1, 2], data=[], data_name="Signal1") + signal1 = TimeSignalData(time_s=[0, 1, 2], data=[], data_name="Signal1") multi_data_time_signal = [signal1] with pytest.raises(ValueError, match="Signal 'Signal1' has an empty data array."): @@ -525,14 +517,10 @@ def test_extract_statistical_metrics_mean_success(): """ Test extracting statistical metrics with analysis_type 'mean'. """ - from piel.types import DataTimeSignalData + from piel.types import TimeSignalData - signal1 = DataTimeSignalData( - time_s=[0, 1, 2], data=[10, 20, 30], data_name="Signal1" - ) - signal2 = DataTimeSignalData( - time_s=[0, 1, 2], data=[40, 50, 60], data_name="Signal2" - ) + signal1 = TimeSignalData(time_s=[0, 1, 2], data=[10, 20, 30], data_name="Signal1") + signal2 = TimeSignalData(time_s=[0, 1, 2], data=[40, 50, 60], data_name="Signal2") multi_data_time_signal = [signal1, signal2] @@ -555,14 +543,10 @@ def test_extract_statistical_metrics_peak_to_peak_success(): """ Test extracting statistical metrics with analysis_type 'peak_to_peak'. """ - from piel.types import DataTimeSignalData + from piel.types import TimeSignalData - signal1 = DataTimeSignalData( - time_s=[0, 1, 2], data=[10, 20, 15], data_name="Signal1" - ) - signal2 = DataTimeSignalData( - time_s=[0, 1, 2], data=[40, 50, 45], data_name="Signal2" - ) + signal1 = TimeSignalData(time_s=[0, 1, 2], data=[10, 20, 15], data_name="Signal1") + signal2 = TimeSignalData(time_s=[0, 1, 2], data=[40, 50, 45], data_name="Signal2") multi_data_time_signal = [signal1, signal2] @@ -583,11 +567,9 @@ def test_extract_statistical_metrics_invalid_analysis_type(): """ Test that TypeError is raised when an invalid analysis_type is provided. """ - from piel.types import DataTimeSignalData + from piel.types import TimeSignalData - signal1 = DataTimeSignalData( - time_s=[0, 1, 2], data=[10, 20, 30], data_name="Signal1" - ) + signal1 = TimeSignalData(time_s=[0, 1, 2], data=[10, 20, 30], data_name="Signal1") multi_data_time_signal = [signal1] with pytest.raises(TypeError, match="Undefined analysis type."): @@ -600,14 +582,10 @@ def test_extract_statistical_metrics_collection_success(): """ Test extracting statistical metrics collection with multiple analysis types. """ - from piel.types import DataTimeSignalData + from piel.types import TimeSignalData - signal1 = DataTimeSignalData( - time_s=[0, 1, 2], data=[10, 20, 30], data_name="Signal1" - ) - signal2 = DataTimeSignalData( - time_s=[0, 1, 2], data=[40, 50, 60], data_name="Signal2" - ) + signal1 = TimeSignalData(time_s=[0, 1, 2], data=[10, 20, 30], data_name="Signal1") + signal2 = TimeSignalData(time_s=[0, 1, 2], data=[40, 50, 60], data_name="Signal2") multi_data_time_signal = [signal1, signal2] @@ -633,11 +611,9 @@ def test_extract_statistical_metrics_collection_invalid_analysis_types(): """ Test that TypeError is raised when analysis_types is not a list. """ - from piel.types import DataTimeSignalData + from piel.types import TimeSignalData - signal1 = DataTimeSignalData( - time_s=[0, 1, 2], data=[10, 20, 30], data_name="Signal1" - ) + signal1 = TimeSignalData(time_s=[0, 1, 2], data=[10, 20, 30], data_name="Signal1") multi_data_time_signal = [signal1] with pytest.raises(TypeError, match="analysis_types must be a list"): diff --git a/tests/analysis/signals/time/core/test_offset.py b/tests/analysis/signals/time/core/test_offset.py index c63cedca..08e06ba4 100644 --- a/tests/analysis/signals/time/core/test_offset.py +++ b/tests/analysis/signals/time/core/test_offset.py @@ -8,20 +8,20 @@ ) # Import necessary classes and units -from piel.types import DataTimeSignalData, Unit +from piel.types import TimeSignalData, Unit # Sample Units SECOND_UNIT = Unit(name="second", datum="time", base=1, label="s") VOLTAGE_UNIT = Unit(name="volt", datum="voltage", base=1, label="V") -# Helper function to create DataTimeSignalData +# Helper function to create TimeSignalData def create_data_time_signal( time_s: list[float], data: list[float], data_name: str = "Signal", -) -> DataTimeSignalData: - return DataTimeSignalData(time_s=time_s, data=data, data_name=data_name) +) -> TimeSignalData: + return TimeSignalData(time_s=time_s, data=data, data_name=data_name) # def test_offset_to_first_rising_edge_success(): diff --git a/tests/analysis/signals/time/core/test_split_threshold_compose.py b/tests/analysis/signals/time/core/test_split_threshold_compose.py index a248a3cc..9738345e 100644 --- a/tests/analysis/signals/time/core/test_split_threshold_compose.py +++ b/tests/analysis/signals/time/core/test_split_threshold_compose.py @@ -6,7 +6,7 @@ # Import your custom Pydantic models and functions from piel.types import ( - DataTimeSignalData, + TimeSignalData, ScalarMetricCollection, PulsedLaserMetrics, ScalarMetric, @@ -26,7 +26,7 @@ @pytest.fixture def sample_full_signal(): """ - Fixture to create a sample DataTimeSignalData object with predefined Gaussian pulses. + Fixture to create a sample TimeSignalData object with predefined Gaussian pulses. """ # Generate a time array from 0 to 10 ns with 1000 points time_s = np.linspace(0, 10e-9, 1000, endpoint=False) @@ -39,7 +39,7 @@ def sample_full_signal(): pulse2 = 3.0 * np.exp(-((time_s - 7.5e-9) ** 2) / (2 * (0.1e-9) ** 2)) data += pulse1 + pulse2 - full_signal = DataTimeSignalData( + full_signal = TimeSignalData( time_s=time_s.tolist(), data=data.tolist(), data_name="test_signal", @@ -52,7 +52,7 @@ def sample_full_signal(): @pytest.fixture def sample_full_signal_no_pulses(): """ - Fixture to create a DataTimeSignalData object with no pulses. + Fixture to create a TimeSignalData object with no pulses. Ensures noise does not exceed the detection threshold. """ # Generate a time array from 0 to 10 ns with 1000 points @@ -65,7 +65,7 @@ def sample_full_signal_no_pulses(): ) # Reduced std to ensure noise < 1.2 W data = noise.tolist() - full_signal = DataTimeSignalData( + full_signal = TimeSignalData( time_s=time_s.tolist(), data=data, data_name="no_pulses_signal", @@ -78,7 +78,7 @@ def sample_full_signal_no_pulses(): @pytest.fixture def sample_full_signal_multiple_pulses(): """ - Fixture to create a DataTimeSignalData object with multiple Gaussian pulses. + Fixture to create a TimeSignalData object with multiple Gaussian pulses. """ # Generate a time array from 0 to 20 ns with 2000 points time_s = np.linspace(0, 20e-9, 2000, endpoint=False) @@ -97,7 +97,7 @@ def sample_full_signal_multiple_pulses(): pulse = amplitude * np.exp(-((time_s - center) ** 2) / (2 * std_dev**2)) data += pulse - full_signal = DataTimeSignalData( + full_signal = TimeSignalData( time_s=time_s.tolist(), data=data.tolist(), data_name="multiple_pulses_signal", @@ -182,7 +182,7 @@ def test_extract_peak_to_peak_metrics_invalid_full_signal(): """ Test that providing an invalid full_signal raises an AttributeError. """ - invalid_signal = "this is not a DataTimeSignalData object" + invalid_signal = "this is not a TimeSignalData object" # with pytest.raises(AttributeError): # extract_peak_to_peak_metrics_after_split_pulses( @@ -325,7 +325,7 @@ def test_extract_peak_to_peak_metrics_pulse_overlap(): pulse2 = 6.0 * np.exp(-((time_s - 4.1e-9) ** 2) / (2 * (0.2e-9) ** 2)) data += pulse1 + pulse2 - full_signal = DataTimeSignalData( + full_signal = TimeSignalData( time_s=time_s.tolist(), data=data.tolist(), data_name="overlapping_pulses_signal", @@ -360,7 +360,7 @@ def test_extract_peak_to_peak_metrics_single_pulse(sample_full_signal): pulse = 4.5 * np.exp(-((time_s - 5e-9) ** 2) / (2 * (0.1e-9) ** 2)) data += pulse - full_signal = DataTimeSignalData( + full_signal = TimeSignalData( time_s=time_s.tolist(), data=data.tolist(), data_name="single_pulse_signal", diff --git a/tests/analysis/signals/time/core/test_transitions.py b/tests/analysis/signals/time/core/test_transitions.py index ca470082..d9bfbd8c 100644 --- a/tests/analysis/signals/time/core/test_transitions.py +++ b/tests/analysis/signals/time/core/test_transitions.py @@ -1,6 +1,6 @@ import numpy as np import pytest -from piel.types import DataTimeSignalData, MultiDataTimeSignal +from piel.types import TimeSignalData, MultiTimeSignalData from piel.analysis.signals.time import extract_rising_edges @@ -15,7 +15,7 @@ def square_wave_signal(): frequency = 5 # 5 Hz square wave square_wave = 0.5 * (1 + np.sign(np.sin(2 * np.pi * frequency * t))) - signal = DataTimeSignalData( + signal = TimeSignalData( time_s=t.tolist(), data=square_wave.tolist(), data_name="SquareWave" ) return signal diff --git a/tests/analysis/signals/time/core/test_transiton.py b/tests/analysis/signals/time/core/test_transiton.py index 607e3a09..e7c1bab7 100644 --- a/tests/analysis/signals/time/core/test_transiton.py +++ b/tests/analysis/signals/time/core/test_transiton.py @@ -6,7 +6,7 @@ from piel.analysis.signals.time import extract_rising_edges # Import necessary classes -from piel.types import DataTimeSignalData, MultiDataTimeSignal, Unit +from piel.types import TimeSignalData, MultiTimeSignalData, Unit # Configure logging for testing if necessary import logging @@ -19,9 +19,9 @@ CURRENT_UNIT = Unit(name="ampere", datum="ampere", base=1.0, label="A") -# Helper function to create DataTimeSignalData +# Helper function to create TimeSignalData def create_data_time_signal(time_s, data, data_name="Test Signal"): - return DataTimeSignalData(time_s=time_s, data=data, data_name=data_name) + return TimeSignalData(time_s=time_s, data=data, data_name=data_name) def test_offset_time_signals_normal_case(): @@ -590,7 +590,7 @@ def test_offset_time_signals_mismatched_time_data_lengths(): multi_signal = [signal] # Apply offset (should proceed without error as the function does not check lengths) - # Depending on the DataTimeSignalData definition, this might be handled elsewhere + # Depending on the TimeSignalData definition, this might be handled elsewhere # Here, we just check that the time is offset correctly offset_signals = offset_time_signals(multi_signal) @@ -648,7 +648,7 @@ def test_extract_rising_edges_signal_with_single_point(): def test_offset_time_signals_no_signals(): """ - Test offset_time_signals with an empty MultiDataTimeSignal list. + Test offset_time_signals with an empty MultiTimeSignalData list. """ multi_signal = [] diff --git a/tests/analysis/signals/time/integration/test_extract_pulse_metrics.py b/tests/analysis/signals/time/integration/test_extract_pulse_metrics.py index 5660ee41..bab7ef5f 100644 --- a/tests/analysis/signals/time/integration/test_extract_pulse_metrics.py +++ b/tests/analysis/signals/time/integration/test_extract_pulse_metrics.py @@ -6,7 +6,7 @@ # Import your custom Pydantic models and functions from piel.types import ( - DataTimeSignalData, + TimeSignalData, ScalarMetricCollection, PulsedLaserMetrics, ScalarMetric, @@ -26,7 +26,7 @@ @pytest.fixture def sample_full_signal(): """ - Fixture to create a sample DataTimeSignalData object with predefined pulses. + Fixture to create a sample TimeSignalData object with predefined pulses. """ # Generate a time array from 0 to 10 ns with 1000 points time_s = np.linspace(0, 10e-9, 1000, endpoint=False) @@ -38,7 +38,7 @@ def sample_full_signal(): data[(time_s >= 2e-9) & (time_s < 3e-9)] = 5.0 data[(time_s >= 7e-9) & (time_s < 8e-9)] = 3.0 - full_signal = DataTimeSignalData( + full_signal = TimeSignalData( time_s=time_s.tolist(), data=data.tolist(), data_name="test_signal", @@ -51,7 +51,7 @@ def sample_full_signal(): @pytest.fixture def sample_full_signal_no_pulses(): """ - Fixture to create a DataTimeSignalData object with no pulses. + Fixture to create a TimeSignalData object with no pulses. Ensures noise does not exceed the detection threshold. """ # Generate a time array from 0 to 10 ns with 1000 points @@ -64,7 +64,7 @@ def sample_full_signal_no_pulses(): ) # Reduced std to ensure noise < 1.5 W data = noise.tolist() - full_signal = DataTimeSignalData( + full_signal = TimeSignalData( time_s=time_s.tolist(), data=data, data_name="no_pulses_signal", @@ -77,7 +77,7 @@ def sample_full_signal_no_pulses(): @pytest.fixture def sample_full_signal_multiple_pulses(): """ - Fixture to create a DataTimeSignalData object with multiple pulses. + Fixture to create a TimeSignalData object with multiple pulses. """ # Generate a time array from 0 to 20 ns with 2000 points time_s = np.linspace(0, 20e-9, 2000, endpoint=False) @@ -95,7 +95,7 @@ def sample_full_signal_multiple_pulses(): for start, end, amplitude in pulse_params: data[(time_s >= start) & (time_s < end)] = amplitude - full_signal = DataTimeSignalData( + full_signal = TimeSignalData( time_s=time_s.tolist(), data=data.tolist(), data_name="multiple_pulses_signal", @@ -170,7 +170,7 @@ def test_extract_peak_to_peak_metrics_invalid_full_signal(): """ Test that providing an invalid full_signal raises an AttributeError. """ - invalid_signal = "this is not a DataTimeSignalData object" + invalid_signal = "this is not a TimeSignalData object" # with pytest.raises(AttributeError): # extract_peak_to_peak_metrics_after_split_pulses( diff --git a/tests/experimental/devices/test_DPO73304.py b/tests/experimental/devices/test_DPO73304.py index 265478b2..20007184 100644 --- a/tests/experimental/devices/test_DPO73304.py +++ b/tests/experimental/devices/test_DPO73304.py @@ -5,7 +5,7 @@ extract_to_data_time_signal, ) from piel.types import ( - DataTimeSignalData, + TimeSignalData, ) from unittest.mock import patch, MagicMock @@ -65,7 +65,7 @@ def test_extract_to_data_time_signal(): return_value=pd.DataFrame({"time_s": [1, 2, 3], "voltage_V": [4, 5, 6]}), ): signal = extract_to_data_time_signal("dummy_path.csv") - assert isinstance(signal, DataTimeSignalData) + assert isinstance(signal, TimeSignalData) assert signal.time_s.tolist() == [1, 2, 3] assert signal.data.tolist() == [4, 5, 6] assert signal.data_name == "voltage_V" @@ -82,7 +82,7 @@ def test_extract_propagation_delay_data_from_measurement(): # with patch("piel.measurement.devices.DPO73304.extract_to_signal_measurement") as mock_extract_signal_measurement, \ # patch("piel.measurement.devices.DPO73304.extract_to_data_time_signal") as mock_extract_data_time_signal: # mock_extract_signal_measurement.return_value = MagicMock(spec=SignalMetricsMeasurementCollection) - # mock_extract_data_time_signal.return_value = MagicMock(spec=DataTimeSignalData) + # mock_extract_data_time_signal.return_value = MagicMock(spec=TimeSignalData) # # data = extract_propagation_delay_data_from_measurement(mock_measurement) # assert isinstance(data, PropagationDelayMeasurementData) @@ -125,14 +125,12 @@ def test_combine_channel_data(): with patch( "piel.experimental.devices.DPO73304.extract_to_data_time_signal" ) as mock_extract_to_data_time_signal: - mock_extract_to_data_time_signal.return_value = MagicMock( - spec=DataTimeSignalData - ) + mock_extract_to_data_time_signal.return_value = MagicMock(spec=TimeSignalData) # signals = combine_channel_data(["dummy_path1.csv", "dummy_path2.csv"]) # # assert isinstance(signals, list) # assert len(signals) == 2 - # assert all(isinstance(signal, DataTimeSignalData) for signal in signals) + # assert all(isinstance(signal, TimeSignalData) for signal in signals) pass diff --git a/tests/experimental/types/measurements/data/test_propagation.py b/tests/experimental/types/measurements/data/test_propagation.py index fd5ce7ff..28a161ff 100644 --- a/tests/experimental/types/measurements/data/test_propagation.py +++ b/tests/experimental/types/measurements/data/test_propagation.py @@ -5,7 +5,7 @@ MeasurementDataCollection, ) from piel.types import ( - DataTimeSignalData, + TimeSignalData, ScalarMetricCollection, ) @@ -13,8 +13,8 @@ # Test PropagationDelayMeasurementData def test_propagation_delay_measurement_data_initialization(): signal_metrics = ScalarMetricCollection() - dut_waveform = DataTimeSignalData() - reference_waveform = DataTimeSignalData() + dut_waveform = TimeSignalData() + reference_waveform = TimeSignalData() measurement_data = PropagationDelayMeasurementData( measurements=signal_metrics, diff --git a/tests/models/transient/test_electro_optic_pulsed_laser.py b/tests/models/transient/test_electro_optic_pulsed_laser.py index f56f5e09..7bd15d50 100644 --- a/tests/models/transient/test_electro_optic_pulsed_laser.py +++ b/tests/models/transient/test_electro_optic_pulsed_laser.py @@ -7,7 +7,7 @@ from piel.types import ( PulsedLaser, PulsedLaserMetrics, - DataTimeSignalData, + TimeSignalData, ScalarMetric, ns, W, @@ -77,8 +77,8 @@ def test_generate_laser_time_data_pulses_typical(pulsed_laser_with_metrics): data_name=data_name, ) - # Assertions on the returned DataTimeSignalData - assert isinstance(signal_data, DataTimeSignalData) + # Assertions on the returned TimeSignalData + assert isinstance(signal_data, TimeSignalData) assert signal_data.data_name == data_name assert signal_data.time_s_unit == ns assert signal_data.data_unit == W