diff --git a/pyscan/__init__.py b/pyscan/__init__.py index 2d5ee542..eca55d6e 100644 --- a/pyscan/__init__.py +++ b/pyscan/__init__.py @@ -3,3 +3,5 @@ from .drivers import * from .plotting import * from .drivers.testing import * + +from itemattribute import ItemAttribute diff --git a/pyscan/drivers/agilent/agilent33500.py b/pyscan/drivers/agilent/agilent33500.py index f0b45a8a..5956d0de 100644 --- a/pyscan/drivers/agilent/agilent33500.py +++ b/pyscan/drivers/agilent/agilent33500.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- from ..instrument_driver import InstrumentDriver import re from ...general.d_range import drange diff --git a/pyscan/drivers/agilent/agilent34410.py b/pyscan/drivers/agilent/agilent34410.py index 5f92642a..9afeac58 100644 --- a/pyscan/drivers/agilent/agilent34410.py +++ b/pyscan/drivers/agilent/agilent34410.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver diff --git a/pyscan/drivers/agilent/agilent8267d.py b/pyscan/drivers/agilent/agilent8267d.py index 8663b689..be98a35f 100644 --- a/pyscan/drivers/agilent/agilent8267d.py +++ b/pyscan/drivers/agilent/agilent8267d.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- from ..instrument_driver import InstrumentDriver diff --git a/pyscan/drivers/agilent/agilent8275n.py b/pyscan/drivers/agilent/agilent8275n.py index 3ae4831e..11e02e5c 100644 --- a/pyscan/drivers/agilent/agilent8275n.py +++ b/pyscan/drivers/agilent/agilent8275n.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- from ..instrument_driver import InstrumentDriver diff --git a/pyscan/drivers/agilent/agilentdso900series.py b/pyscan/drivers/agilent/agilentdso900series.py index c99cc327..303b014f 100644 --- a/pyscan/drivers/agilent/agilentdso900series.py +++ b/pyscan/drivers/agilent/agilentdso900series.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import numpy as np from math import ceil from ..instrument_driver import InstrumentDriver diff --git a/pyscan/drivers/attocube/attocube_exceptions.py b/pyscan/drivers/attocube/attocube_exceptions.py index 5e8bdef3..968152a6 100644 --- a/pyscan/drivers/attocube/attocube_exceptions.py +++ b/pyscan/drivers/attocube/attocube_exceptions.py @@ -1,4 +1,4 @@ -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute from ..exceptions.external_package_excpetion import ExternalPackageException diff --git a/pyscan/drivers/bluefors/blueforslog.py b/pyscan/drivers/bluefors/blueforslog.py index b17b846d..6921e610 100644 --- a/pyscan/drivers/bluefors/blueforslog.py +++ b/pyscan/drivers/bluefors/blueforslog.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from pathlib import Path import pandas as pd diff --git a/pyscan/drivers/heliotis/helios_exceptions.py b/pyscan/drivers/heliotis/helios_exceptions.py index 7280f983..b3afa629 100644 --- a/pyscan/drivers/heliotis/helios_exceptions.py +++ b/pyscan/drivers/heliotis/helios_exceptions.py @@ -1,4 +1,4 @@ -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute from ..exceptions.external_package_excpetion import ExternalPackageException diff --git a/pyscan/drivers/heliotis/helios_sdk.py b/pyscan/drivers/heliotis/helios_sdk.py index aac3e7de..d30b3be3 100644 --- a/pyscan/drivers/heliotis/helios_sdk.py +++ b/pyscan/drivers/heliotis/helios_sdk.py @@ -1,4 +1,4 @@ -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute def sense_tqp_to_frequency(SensTqp): diff --git a/pyscan/drivers/heliotis/helioscamera.py b/pyscan/drivers/heliotis/helioscamera.py index 8f6f3a3e..1cabcd48 100644 --- a/pyscan/drivers/heliotis/helioscamera.py +++ b/pyscan/drivers/heliotis/helioscamera.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import sys import os from time import sleep diff --git a/pyscan/drivers/hp/hp34401a.py b/pyscan/drivers/hp/hp34401a.py index dee0b715..8a199f3c 100644 --- a/pyscan/drivers/hp/hp34401a.py +++ b/pyscan/drivers/hp/hp34401a.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver diff --git a/pyscan/drivers/instrument_driver.py b/pyscan/drivers/instrument_driver.py index 15bddddd..4b46b479 100644 --- a/pyscan/drivers/instrument_driver.py +++ b/pyscan/drivers/instrument_driver.py @@ -1,6 +1,6 @@ -# -*- coding: utf-8 -*- -from ..general.item_attribute import ItemAttribute from .new_instrument import new_instrument + +from itemattribute import ItemAttribute from collections import OrderedDict import numpy as np import re diff --git a/pyscan/drivers/keithley/keithley2400.py b/pyscan/drivers/keithley/keithley2400.py index 2c650fb3..9ef2a2e4 100644 --- a/pyscan/drivers/keithley/keithley2400.py +++ b/pyscan/drivers/keithley/keithley2400.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from pyscan.drivers.instrument_driver import InstrumentDriver import numpy as np from time import sleep diff --git a/pyscan/drivers/keysight/keysight53230a.py b/pyscan/drivers/keysight/keysight53230a.py index 86d2b538..4e4ec5df 100644 --- a/pyscan/drivers/keysight/keysight53230a.py +++ b/pyscan/drivers/keysight/keysight53230a.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver diff --git a/pyscan/drivers/keysight/keysightm3302aawg.py b/pyscan/drivers/keysight/keysightm3302aawg.py index 44213610..699816f6 100644 --- a/pyscan/drivers/keysight/keysightm3302aawg.py +++ b/pyscan/drivers/keysight/keysightm3302aawg.py @@ -1,6 +1,6 @@ -# -*- coding: utf-8 -*- + import keysightSD1 -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute import sys sys.path.append(r'c:\Program Files (x86)\Keysight\SD1\Libraries\Python') diff --git a/pyscan/drivers/keysight/keysightm3302adaq.py b/pyscan/drivers/keysight/keysightm3302adaq.py index 314ad917..7ed1c013 100644 --- a/pyscan/drivers/keysight/keysightm3302adaq.py +++ b/pyscan/drivers/keysight/keysightm3302adaq.py @@ -1,6 +1,5 @@ -# -*- coding: utf-8 -*- -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute import sys sys.path.append(r'c:\Program Files (x86)\Keysight\SD1\Libraries\Python') diff --git a/pyscan/drivers/keysight/keysite_exceptions.py b/pyscan/drivers/keysight/keysite_exceptions.py index e6102ae4..4a90a19e 100644 --- a/pyscan/drivers/keysight/keysite_exceptions.py +++ b/pyscan/drivers/keysight/keysite_exceptions.py @@ -1,4 +1,4 @@ -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute from ..exceptions.external_package_excpetion import ExternalPackageException diff --git a/pyscan/drivers/new_instrument.py b/pyscan/drivers/new_instrument.py index 078afafc..6dea257c 100644 --- a/pyscan/drivers/new_instrument.py +++ b/pyscan/drivers/new_instrument.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- import pyvisa as visa import pyscan as ps diff --git a/pyscan/drivers/oceanoptics/oceanoptics_exceptions.py b/pyscan/drivers/oceanoptics/oceanoptics_exceptions.py index 78ff21a9..9d6d1c0a 100644 --- a/pyscan/drivers/oceanoptics/oceanoptics_exceptions.py +++ b/pyscan/drivers/oceanoptics/oceanoptics_exceptions.py @@ -1,4 +1,4 @@ -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute from ..exceptions.external_package_excpetion import ExternalPackageException diff --git a/pyscan/drivers/oceanoptics/oceanopticsqepro.py b/pyscan/drivers/oceanoptics/oceanopticsqepro.py index 32c6f36d..4048a67d 100644 --- a/pyscan/drivers/oceanoptics/oceanopticsqepro.py +++ b/pyscan/drivers/oceanoptics/oceanopticsqepro.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- + import numpy as np from seabreeze.spectrometers import Spectrometer -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute class OceanOpticsQEPro(ItemAttribute): diff --git a/pyscan/drivers/oxford/oxfordigh.py b/pyscan/drivers/oxford/oxfordigh.py index bcafa45f..f4675c2b 100755 --- a/pyscan/drivers/oxford/oxfordigh.py +++ b/pyscan/drivers/oxford/oxfordigh.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver diff --git a/pyscan/drivers/oxford/oxfordips120.py b/pyscan/drivers/oxford/oxfordips120.py index eaf02d6b..a3110fae 100644 --- a/pyscan/drivers/oxford/oxfordips120.py +++ b/pyscan/drivers/oxford/oxfordips120.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from datetime import datetime from time import sleep, time diff --git a/pyscan/drivers/picoquant/picoharp300.py b/pyscan/drivers/picoquant/picoharp300.py index b1ec2ba4..63d18e13 100644 --- a/pyscan/drivers/picoquant/picoharp300.py +++ b/pyscan/drivers/picoquant/picoharp300.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- + import ctypes from ctypes import byref -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute from time import sleep from pathlib import Path from time import strftime diff --git a/pyscan/drivers/picoquant/picoquant_exceptions.py b/pyscan/drivers/picoquant/picoquant_exceptions.py index 940598a0..2009a58d 100644 --- a/pyscan/drivers/picoquant/picoquant_exceptions.py +++ b/pyscan/drivers/picoquant/picoquant_exceptions.py @@ -1,4 +1,4 @@ -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute from ..exceptions.external_package_excpetion import ExternalPackageException diff --git a/pyscan/drivers/princeton_instruments/princetonpimax4.py b/pyscan/drivers/princeton_instruments/princetonpimax4.py index ee4d0b72..b2166681 100644 --- a/pyscan/drivers/princeton_instruments/princetonpimax4.py +++ b/pyscan/drivers/princeton_instruments/princetonpimax4.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- + from PrincetonInstruments.LightField.AddIns import Pulse from PrincetonInstruments.LightField.AddIns import ExperimentSettings from PrincetonInstruments.LightField.AddIns import CameraSettings from PrincetonInstruments.LightField.Automation import Automation -from ...general.item_attribute import ItemAttribute - +from itemattribute import ItemAttribute import clr import sys import os diff --git a/pyscan/drivers/princeton_instruments/pylonsdk.py b/pyscan/drivers/princeton_instruments/pylonsdk.py index 68f48582..b5a773ca 100644 --- a/pyscan/drivers/princeton_instruments/pylonsdk.py +++ b/pyscan/drivers/princeton_instruments/pylonsdk.py @@ -1,5 +1,5 @@ -# -*- coding: utf-8 -*- -from ...general.item_attribute import ItemAttribute + +from itemattribute import ItemAttribute class PylonSDK(ItemAttribute): diff --git a/pyscan/drivers/spin_core/spin_core_api.py b/pyscan/drivers/spin_core/spin_core_api.py index b520063f..6a0f4f46 100644 --- a/pyscan/drivers/spin_core/spin_core_api.py +++ b/pyscan/drivers/spin_core/spin_core_api.py @@ -1,4 +1,4 @@ -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute import ctypes from . import spin_api_wrapper as pb diff --git a/pyscan/drivers/spin_core/spin_core_exceptions.py b/pyscan/drivers/spin_core/spin_core_exceptions.py index b388714f..c99bc87d 100644 --- a/pyscan/drivers/spin_core/spin_core_exceptions.py +++ b/pyscan/drivers/spin_core/spin_core_exceptions.py @@ -1,4 +1,4 @@ -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute from ..exceptions.external_package_excpetion import ExternalPackageException diff --git a/pyscan/drivers/stanford/stanford396.py b/pyscan/drivers/stanford/stanford396.py index 9ebb32c4..87e32568 100755 --- a/pyscan/drivers/stanford/stanford396.py +++ b/pyscan/drivers/stanford/stanford396.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver diff --git a/pyscan/drivers/stanford/stanford400.py b/pyscan/drivers/stanford/stanford400.py index c86c1244..309a65d3 100755 --- a/pyscan/drivers/stanford/stanford400.py +++ b/pyscan/drivers/stanford/stanford400.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver diff --git a/pyscan/drivers/stanford/stanford470.py b/pyscan/drivers/stanford/stanford470.py index d2ec5b86..32a516cf 100755 --- a/pyscan/drivers/stanford/stanford470.py +++ b/pyscan/drivers/stanford/stanford470.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver diff --git a/pyscan/drivers/stanford/stanford620.py b/pyscan/drivers/stanford/stanford620.py index ffc0eeb0..13104144 100644 --- a/pyscan/drivers/stanford/stanford620.py +++ b/pyscan/drivers/stanford/stanford620.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver import struct import numpy as np diff --git a/pyscan/drivers/stanford/stanford830.py b/pyscan/drivers/stanford/stanford830.py index 38ba6ff1..b756bc25 100644 --- a/pyscan/drivers/stanford/stanford830.py +++ b/pyscan/drivers/stanford/stanford830.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver import numpy as np from time import sleep diff --git a/pyscan/drivers/stanford/stanford900.py b/pyscan/drivers/stanford/stanford900.py index ec04b6f4..dbf2b0a7 100644 --- a/pyscan/drivers/stanford/stanford900.py +++ b/pyscan/drivers/stanford/stanford900.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver import re import time diff --git a/pyscan/drivers/stanford/stanford928.py b/pyscan/drivers/stanford/stanford928.py index 439ae958..662c755b 100644 --- a/pyscan/drivers/stanford/stanford928.py +++ b/pyscan/drivers/stanford/stanford928.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from .stanford900 import Stanford900 from math import floor diff --git a/pyscan/drivers/swabian/pulselaser.py b/pyscan/drivers/swabian/pulselaser.py index 912a3f9f..1bec69cb 100644 --- a/pyscan/drivers/swabian/pulselaser.py +++ b/pyscan/drivers/swabian/pulselaser.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver from time import sleep import serial diff --git a/pyscan/drivers/testing/auto_test_driver.py b/pyscan/drivers/testing/auto_test_driver.py index 5c1a7bed..1db54575 100644 --- a/pyscan/drivers/testing/auto_test_driver.py +++ b/pyscan/drivers/testing/auto_test_driver.py @@ -1,10 +1,10 @@ -# -*- coding: utf-8 -*- + import pytest import math from collections import OrderedDict import typing from pyscan.drivers.testing.test_instrument_driver import TestInstrumentDriver -from ...general.get_pyscan_version import get_pyscan_version +from ...measurement.get_pyscan_version import get_pyscan_version import os from datetime import datetime import re @@ -580,7 +580,7 @@ def check_doc_strings(test_instrument): # write formatting test cases here. -def test_driver(device=TestInstrumentDriver(), skip_log=False, expected_attributes=None, expected_values=None, +def test_driver(device=TestInstrumentDriver(), skip_log=True, expected_attributes=None, expected_values=None, verbose=True): if expected_attributes is not None: check_has_attributes(device, expected_attributes) diff --git a/pyscan/drivers/testing/test_voltage.py b/pyscan/drivers/testing/test_voltage.py index f487c66a..29fea06f 100644 --- a/pyscan/drivers/testing/test_voltage.py +++ b/pyscan/drivers/testing/test_voltage.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from pyscan.drivers import InstrumentDriver @@ -67,7 +67,7 @@ def initialize_properties(self): 'name': 'voltage', 'write_string': 'VOLT {}', 'query_string': 'VOLT?', - 'range': [0, 10], + 'range': [-10, 10], 'return_type': float }) diff --git a/pyscan/drivers/thorlabs/thorlabs_exceptions.py b/pyscan/drivers/thorlabs/thorlabs_exceptions.py index 3a1e382d..a07d0f84 100644 --- a/pyscan/drivers/thorlabs/thorlabs_exceptions.py +++ b/pyscan/drivers/thorlabs/thorlabs_exceptions.py @@ -1,4 +1,4 @@ -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute from ..exceptions.external_package_excpetion import ExternalPackageException diff --git a/pyscan/drivers/thorlabs/thorlabsbpc303.py b/pyscan/drivers/thorlabs/thorlabsbpc303.py index 6c80f14c..7b1b1509 100644 --- a/pyscan/drivers/thorlabs/thorlabsbpc303.py +++ b/pyscan/drivers/thorlabs/thorlabsbpc303.py @@ -1,5 +1,5 @@ -# -*- coding: utf-8 -*- -from ...general.item_attribute import ItemAttribute + +from itemattribute import ItemAttribute from thorlabs_kinesis import benchtop_piezo as bp from ctypes import c_char_p, c_ushort, c_ulong, c_short from time import sleep diff --git a/pyscan/drivers/thorlabs/thorlabsbsc203.py b/pyscan/drivers/thorlabs/thorlabsbsc203.py index 075a6f17..90df09df 100644 --- a/pyscan/drivers/thorlabs/thorlabsbsc203.py +++ b/pyscan/drivers/thorlabs/thorlabsbsc203.py @@ -1,6 +1,6 @@ -# -*- coding: utf-8 -*- + import numpy as np -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute from thorlabs_kinesis import benchtop_stepper_motor as bsm from ctypes import c_char_p, c_int, c_double, c_ushort, c_ulong, c_short from time import sleep diff --git a/pyscan/drivers/thorlabs/thorlabsitc4001.py b/pyscan/drivers/thorlabs/thorlabsitc4001.py index 6387b025..ac582d78 100644 --- a/pyscan/drivers/thorlabs/thorlabsitc4001.py +++ b/pyscan/drivers/thorlabs/thorlabsitc4001.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver diff --git a/pyscan/drivers/thorlabs/thorlabsmff101.py b/pyscan/drivers/thorlabs/thorlabsmff101.py index 08674448..41bde865 100644 --- a/pyscan/drivers/thorlabs/thorlabsmff101.py +++ b/pyscan/drivers/thorlabs/thorlabsmff101.py @@ -1,5 +1,5 @@ -# -*- coding: utf-8 -*- -from ...general.item_attribute import ItemAttribute + +from itemattribute import ItemAttribute from thorlabs_kinesis import filter_flipper as ff from ctypes import c_char_p, c_ushort, c_ulong from time import sleep diff --git a/pyscan/drivers/yokogawa/yokogawags200.py b/pyscan/drivers/yokogawa/yokogawags200.py index 2ec22893..1031e9eb 100644 --- a/pyscan/drivers/yokogawa/yokogawags200.py +++ b/pyscan/drivers/yokogawa/yokogawags200.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..instrument_driver import InstrumentDriver import numpy as np from time import sleep diff --git a/pyscan/drivers/zurich_instruments/zurichhf2.py b/pyscan/drivers/zurich_instruments/zurichhf2.py index dee61b38..0e21e231 100644 --- a/pyscan/drivers/zurich_instruments/zurichhf2.py +++ b/pyscan/drivers/zurich_instruments/zurichhf2.py @@ -1,6 +1,6 @@ -# -*- coding: utf-8 -*- + import zhinst.ziPython as ziPython -from ...general.item_attribute import ItemAttribute +from itemattribute import ItemAttribute import numpy as np diff --git a/pyscan/general/__init__.py b/pyscan/general/__init__.py index b901e9c6..9db02f60 100644 --- a/pyscan/general/__init__.py +++ b/pyscan/general/__init__.py @@ -1,15 +1,10 @@ -# objects -from .item_attribute import ItemAttribute -from .pyscan_json_encoder import PyscanJSONEncoder - # methods from .d_range import drange +from .delta_product import delta_product from .first_string import first_string +from .infinite_iterator import infinite_iterator from .is_list_type import is_list_type from .is_numeric_type import is_numeric_type -from .quadrature_sum import quadrature_sum from .same_length import same_length from .set_difference import set_difference from .stack_or_append import stack_or_append -from .get_pyscan_version import get_pyscan_version -from .pyscan_json_decoder import PyscanJSONDecoder diff --git a/pyscan/general/d_range.py b/pyscan/general/d_range.py index d45c564b..12a2fe7f 100644 --- a/pyscan/general/d_range.py +++ b/pyscan/general/d_range.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import numpy as np @@ -46,4 +46,6 @@ def drange(start, delta, stop): values = [start + sign * delta * i for i in range(n + 1)] values += [stop] + values = np.array(values) + return values diff --git a/pyscan/general/delta_product.py b/pyscan/general/delta_product.py new file mode 100644 index 00000000..95b8abd8 --- /dev/null +++ b/pyscan/general/delta_product.py @@ -0,0 +1,40 @@ +from itertools import product +import numpy as np + + +def delta_product(iterator_list): + ''' + Generator that yields indicies for arbitrarially long enlcosed for loops and + a delta for each index relative to the previous indicies + + Parameters + ---------- + length_list : list(int) + + Yields + ------ + indicies, delta + indicies : list(int) + tuple of indicies for the iteration of nested for loops + delta : list(int) + The 'delta' value betwen the current indicies and last indicies + -1 if the index has returned to zero + 0 if it has not changed + 1 if it has incremented + ''' + + for iterable in iterator_list: + assert hasattr(iterable, '__iter__'), 'iterator_list contain iterable functions' + + iterator_list = iterator_list[::-1] + + last = np.zeros(len(iterator_list)) + + for indicies in product(*iterator_list): + if indicies == tuple([0 for i in range(len(iterator_list))]): + yield indicies, tuple([-1 for length in iterator_list]) + else: + delta = np.array(indicies) - last + delta[np.where(delta < 0)] = -1 + yield indicies[::-1], tuple([int(val) for val in delta])[::-1] + last = indicies diff --git a/pyscan/general/first_string.py b/pyscan/general/first_string.py index f597ae76..76ccb70d 100644 --- a/pyscan/general/first_string.py +++ b/pyscan/general/first_string.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import numpy as np from six import string_types @@ -26,4 +26,6 @@ def first_string(obj): if isinstance(value, string_types): return value else: - raise TypeError + raise TypeError("First string must be a string or array of strings") + else: + raise TypeError("First string must be a string or array of strings") diff --git a/pyscan/general/infinite_iterator.py b/pyscan/general/infinite_iterator.py new file mode 100644 index 00000000..2140f791 --- /dev/null +++ b/pyscan/general/infinite_iterator.py @@ -0,0 +1,5 @@ +def infinite_iterator(): + i = 0 + while True: + yield i + i += 1 diff --git a/pyscan/general/is_list_type.py b/pyscan/general/is_list_type.py index d6b4ea77..d26d2abc 100644 --- a/pyscan/general/is_list_type.py +++ b/pyscan/general/is_list_type.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import numpy as np diff --git a/pyscan/general/is_numeric_type.py b/pyscan/general/is_numeric_type.py index 203edf7e..9f29bf0f 100644 --- a/pyscan/general/is_numeric_type.py +++ b/pyscan/general/is_numeric_type.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + def is_numeric_type(obj): '''Determines if input is an `int` or `float`. @@ -13,4 +13,7 @@ def is_numeric_type(obj): `True` if `obj` is an `int` or `float`. ''' - return issubclass(type(obj), (int, float)) + if isinstance(obj, bool): + return 0 + else: + return issubclass(type(obj), (int, float)) diff --git a/pyscan/general/item_attribute.py b/pyscan/general/item_attribute.py deleted file mode 100644 index ecdada65..00000000 --- a/pyscan/general/item_attribute.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -class ItemAttribute(object): - ''' - Class that has properties which can be called like dictionary - items - - Parameters - ---------- - dictionary : dict - Dictionary object, defaults to None. - - ''' - - def __init__(self, dictionary=None): - if dictionary is not None: - for k in dictionary.keys(): - self[k] = dictionary[k] - - __getitem__ = object.__getattribute__ - __setitem__ = object.__setattr__ - __delitem__ = object.__delattr__ - - def keys(self): - '''Returns a list of keys. - ''' - return self.__dict__.keys() - - def values(self): - '''Returns a list of values. - ''' - return self.__dict__.values() - - def items(self): - '''Returns a list of key:value pairs. - ''' - return self.__dict__.items() - - def __contains__(self, item): - ''' - Overleads the `key in object` syntax to check if - `key in obj.__dict__` - ''' - - return item in self.__dict__ diff --git a/pyscan/general/quadrature_sum.py b/pyscan/general/quadrature_sum.py deleted file mode 100644 index 226f1073..00000000 --- a/pyscan/general/quadrature_sum.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np - - -def quadrature_sum(array): - '''Function applies np.sqrt(sum(arrayelements**@)) - - Parameters - ---------- - array : float - array-like object - - Returns - ------- - float - ''' - array_sum = 0 - - for value in array: - - array_sum += value ** 2 - - return np.sqrt(array_sum) diff --git a/pyscan/general/same_length.py b/pyscan/general/same_length.py index 467cf53f..80d8732e 100644 --- a/pyscan/general/same_length.py +++ b/pyscan/general/same_length.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import numpy as np diff --git a/pyscan/general/set_difference.py b/pyscan/general/set_difference.py index 58ba99dc..bceed8b0 100644 --- a/pyscan/general/set_difference.py +++ b/pyscan/general/set_difference.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + def set_difference(list1, list2): ''' Fuction that returns a list containing unique items in `list1` which are not in `list2`. diff --git a/pyscan/general/stack_or_append.py b/pyscan/general/stack_or_append.py index 8bc7a7b3..ad23e0a8 100644 --- a/pyscan/general/stack_or_append.py +++ b/pyscan/general/stack_or_append.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import numpy as np diff --git a/pyscan/measurement/__init__.py b/pyscan/measurement/__init__.py index bb1f81ce..5dc702ec 100644 --- a/pyscan/measurement/__init__.py +++ b/pyscan/measurement/__init__.py @@ -1,15 +1,9 @@ # Functions from .load_experiment import load_experiment +from .get_pyscan_version import get_pyscan_version # Scans/Experiments -from .chart_recorder import ChartRecorder -from .experiment import Sweep, Experiment -from .raster_experiment import RasterSweep, RasterExperiment -from .sparse_experiment import SparseSweep, SparseExperiment - -from .fast_stage_experiment import FastStageSweep, FastStageExperiment -from .fast_galvo_experiment import FastGalvoSweep, FastGalvoExperiment - +from .experiment import Experiment from .scans import PropertyScan, RepeatScan, ContinuousScan, FunctionScan, AverageScan # Other objects diff --git a/pyscan/measurement/abstract_experiment.py b/pyscan/measurement/abstract_experiment.py index a02b7b09..5df37f5b 100644 --- a/pyscan/measurement/abstract_experiment.py +++ b/pyscan/measurement/abstract_experiment.py @@ -1,40 +1,57 @@ -# -*- coding: utf-8 -*- import h5py import json from pathlib import Path import numpy as np -import pyscan as ps from threading import Thread as thread from time import strftime -from pyscan.measurement.scans import PropertyScan, RepeatScan, ContinuousScan -from ..general.pyscan_json_encoder import PyscanJSONEncoder -from ..general.item_attribute import ItemAttribute +from pyscan.measurement.scans import PropertyScan +from .pyscan_json_encoder import PyscanJSONEncoder +from itemattribute import ItemAttribute from ..general.is_list_type import is_list_type class AbstractExperiment(ItemAttribute): - '''The base class for experiments. + ''' + The abstract class for experiments. Parameters ---------- - runinfo : :class:`.RunInfo` - RunInfo instance - devices : :class:`.ItemAttribute` + runinfo : ps.RunInfo + Contains all information about the experiment + devices : ItemAttribute ItemAttribute instance containing all experiment devices data_dir : str, optional The path to save the data, defaults to './backup' Attributes ---------- - runinfo : :class:`.RunInfo` - RunInfo instance passed into :class:`.AbstractExperiment`. - devices : :class:`.ItemAttribute` - ItemAttribute instance passed into :class:`.AbstractExperiment`. + runinfo : ps.RunInfo + Contains all information about the experiment + devices : ItemAttribute + ItemAttribute instance containing all experiment devices + + Methods + ------- + # Setup methods + setup_data_dir(data_dir) + check_runinfo() + save_metadata(metadata_name) + + # Data methods + preallocate(data) + reallocate(data) + rolling_average(data) + save_point(data) + + # Running experiment methods + start_thread() + stop() + run() ''' - def __init__(self, runinfo, devices, - data_dir): - '''Constructor method + def __init__(self, runinfo, devices, data_dir): + ''' + Constructor method ''' self.runinfo = runinfo @@ -51,226 +68,29 @@ def setup_data_dir(self, data_dir): ''' if data_dir is None: - data_dir = Path('./backup') + self.runinfo.data_path = Path('./backup') else: - data_dir = Path(data_dir) - self.runinfo.data_path = Path(data_dir) # seems redundant to use Path() again here + self.runinfo.data_path = Path(data_dir) if not self.runinfo.data_path.is_dir(): self.runinfo.data_path.mkdir() - def preallocate(self, data, debug=False): - '''Preallocate data based on the first value of the measurement function - - Parameters - ---------- - data : `.ItemAttribute` - ItemAttribute containing data - ''' - - skip = False - if self.runinfo.continuous: - continuous_scan = self.runinfo.scans[self.runinfo.continuous_scan_index] - if continuous_scan.i > 0: - skip = True - - if not skip: - self.runinfo.measured = [] - for key, value in data.items(): - self.runinfo.measured.append(key) - - save_path = self.runinfo.data_path / '{}.hdf5'.format(self.runinfo.long_name) - save_name = str(save_path.absolute()) - - # Create scan arrays - with h5py.File(save_name, 'a') as f: - for s in self.runinfo.scans: - for key, values in s.scan_dict.items(): - self[key] = values - f[key] = values - - # Create arrays in self and make hdf5 version - # Possibilies 1. data is a list, dims are list - # 2. data is a float, dims are list, - # 3. data is list , dims are 0 - # 4. datais a float, dims are 0 - if self.runinfo.average_d == -1: - scan_dims = self.runinfo.dims # was () for rp 1, (2,) for rp 2< (3,) for rp3 - ndim = self.runinfo.ndim - - if debug is True: - print("scan dims are: ", scan_dims, " and ndim is: ", ndim) - - else: - scan_dims = self.runinfo.average_dims - ndim = self.runinfo.n_average_dim - - with h5py.File(save_name, 'a') as f: - for name in self.runinfo.measured: - if debug is True: - print(f"for {name} data is : {data[name]}, and ndim is: {ndim}") - if is_list_type(data[name]) and ndim > 0: - if debug is True: - print(f"with measured name {name} preallocate1") - dims = (*scan_dims, * np.array(data[name]).shape) - self[name] = np.zeros(dims) * np.nan - maxshape = tuple(None for _ in dims) - f.create_dataset(name, shape=dims, maxshape=maxshape, chunks=dims, - fillvalue=np.nan, dtype='float64') - elif (not is_list_type(data[name])) and (ndim > 0): - if debug is True: - print(f"with measured name {name} preallocate2") - dims = scan_dims - self[name] = np.zeros(dims) * np.nan - maxshape = tuple(None for _ in dims) - f.create_dataset(name, shape=dims, maxshape=maxshape, chunks=dims, - fillvalue=np.nan, dtype='float64') - elif is_list_type(data[name]) and (ndim == 0): - if debug is True: - print(f"with measured name {name} preallocate3") - dims = np.array(data[name]).shape - self[name] = np.zeros(dims) * np.nan - maxshape = tuple(None for _ in dims) - f.create_dataset(name, shape=dims, maxshape=maxshape, chunks=dims, - fillvalue=np.nan, dtype='float64') - else: - if debug is True: - print(f"with measured name {name} preallocate4") - self[name] = np.nan - f.create_dataset(name, shape=[1, ], maxshape=(None,), chunks=(1,), - fillvalue=np.nan, dtype='float64') - - def reallocate(self, debug=False): - ''' - Reallocates memory for continuous experiments save files and measurement attribute arrays. - ''' - save_path = self.runinfo.data_path / '{}.hdf5'.format(self.runinfo.long_name) - save_name = str(save_path.absolute()) - - self.runinfo.new_slices = {} - - with h5py.File(save_name, 'a') as hdf: - if not self.runinfo.stop_continuous(plus_one=True): - for name in self.runinfo.measured: - if name in hdf: - dataset = hdf[name] - current_shape = dataset.shape - new_shape = list(current_shape) - - if len(current_shape) == 1: - if debug is True: - print("dataset shape is: ", dataset.shape, " dset shape[0] is: ", dataset.shape[0]) - new_shape[0] += 1 - if debug is True: - print("new size is: ", new_shape) - dataset.resize(tuple(new_shape)) - if debug is True: - print("resized dset is: ", dataset.shape, " and shape 0: ", dataset.shape[0]) - # fill the new part with nans - dataset[current_shape[0]:] = np.nan - elif len(current_shape) > 1: - # Expand the first dimension, there might be a problem here... - if debug is True: - print("old shape part 2 is: ", current_shape) - - dim_index = len(self.runinfo.dims) - 1 - new_shape[dim_index] += 1 - - if debug is True: - print("new shape part 2 is: ", new_shape) - - # Resize the dataset to the new shape - dataset.resize(tuple(new_shape)) - - # Create a mask for the new part - slices = tuple(slice(original_dim, new_dim) for original_dim, - new_dim in zip(current_shape, new_shape)) - mask = np.zeros(new_shape, dtype=bool) - mask[slices] = True - - # Fill the new part with NaN values - dataset[mask] = np.nan - - self.runinfo.new_slices[name] = tuple(slice(current_dim, new_dim) for current_dim, - new_dim in zip(current_shape, new_shape)) - - if debug is True: - print("Original shape:", current_shape) - print("New shape:", dataset.shape) - - else: - assert False, f"cannot reallocate dataset {name}, not found in file." - - # reallocate for the self[key] to accomodate additional data - if debug is True: - print(f"{name} original shape: {self[name].shape} with self[{name}] = {self[name]}") - self[name] = np.pad(self[name], - [(0, new_dim - original_dim) for original_dim, - new_dim in zip(current_shape, new_shape)], - mode='constant', constant_values=np.nan) - if debug is True: - print(f"new {name} shape: {self[name].shape} with self[{name}] = {self[name]}") - - elif self.runinfo.stop_continuous: - self.stop() - - # this function seems redundant/dead, since it is already accomplished by preallocate() - # consider deleting this dead code if it truly smells. - def preallocate_line(self, data): - '''Preallocate line data based on the first value of the measurement function - - Parameters - ---------- - data : :class:`~pyscan.general.itemattribute.ItemAttribute` - ItemAttribute containing data - ''' - - save_path = self.runinfo.data_path / '{}.hdf5'.format(self.runinfo.long_name) - save_name = str(save_path.absolute()) - - # Create scan arrays - with h5py.File(save_name, 'a') as f: - for s in self.runinfo.scans: - for key, values in s.scan_dict.items(): - self[key] = values - f[key] = values - - # Create arrays in self and make hdf5 version - # Possibilies 1. data is a list, dims are list - # 2. data is a float, dims are list, - # 3. data is list , dims are 0 - # 4. datais a float, dims are 0 - scan_dims = self.runinfo.dims - - with h5py.File(save_name, 'a') as f: - for name in self.runinfo.measured: - dims = scan_dims - self[name] = np.zeros(dims) * np.nan - f.create_dataset(name, shape=dims, fillvalue=np.nan, dtype='float64') - def check_runinfo(self): ''' Function that is run at the beginning of experiment to ensure runinfo is property formatted. ''' - num_repeat_scans = 0 - num_continuous_scans = 0 + scanned_properties = [] for scan in self.runinfo.scans: scan.check_same_length() if isinstance(scan, PropertyScan): for dev in scan.device_names: prop = scan.prop assert hasattr(self.devices[dev], prop), 'Device {} does not have property {}'.format(dev, prop) - if isinstance(scan, RepeatScan): - num_repeat_scans += 1 - if isinstance(scan, ContinuousScan): - num_continuous_scans += 1 - - if num_repeat_scans > 1: - assert False, "More than one repeat scan detected. This is not allowed." - if num_continuous_scans > 1: - assert False, "More than one continuous scan detected. This is not allowed." + assert f'{dev}_{prop}' not in scanned_properties, \ + 'Property {} is duplicated in the scans'.format(f'{dev}_{prop}') + scanned_properties.append(f'{dev}_{prop}') base_name = strftime("%Y%m%dT%H%M%S") save_path = self.runinfo.data_path / '{}.hdf5'.format(base_name) @@ -280,163 +100,209 @@ def check_runinfo(self): count += 1 save_path = self.runinfo.data_path / f'{base_name}-{count}.hdf5' - self.runinfo.long_name = save_path.stem - - self.runinfo.short_name = self.runinfo.long_name[8:] - + self.runinfo.file_name = save_path.stem self.runinfo.check() - assert hasattr(self.runinfo, 'average_d'), "runinfo did not have average d attribute after checking runinfo" - if self.runinfo.average_d == -1: - assert self.runinfo.has_average_scan is False - elif 0 <= self.runinfo.average_d < 4: - assert self.runinfo.has_average_scan is True - else: - assert False, "runinfo average d incorrect while has average scan is: " + str(self.runinfo.has_average_scan) - return 1 - def get_time(self): - '''Meta function intended to predict the entire time for experiment - Not implemented. + # Data methods + def preallocate(self, data): ''' + Preallocate save data based on the first value of the measurement function - pass - - def save_continuous_scan_dict(self, save_name, debug=False): - ''' - Increments continuous scan_dict to match run count for continuous experiments. Saves this change to file. + Parameters + ---------- + data : ItemAttribute + ItemAttribute instance containing data from self.runinfo.measure_function ''' - for scan in self.runinfo.scans: - if isinstance(scan, ps.ContinuousScan): - run_count = scan.n - - if run_count == 1: - with h5py.File(save_name, 'a') as f: - for s in self.runinfo.scans: - for key, values in s.scan_dict.items(): - if key == 'continuous': - del f[key] - f.create_dataset(key, shape=[1, ], maxshape=(None,), chunks=(1,), - fillvalue=np.nan, dtype='float64') - if debug is True: - print("new dataset created") - self[key] = values - f[key][...] = values + + # fill in what was measured + self.runinfo.measured = [] + for key, value in data.items(): + self.runinfo.measured.append(key) + + # get the file save path name + save_path = self.runinfo.data_path / '{}.hdf5'.format(self.runinfo.file_name) + save_name = str(save_path.absolute()) + + # Create and save scan arrays with h5py.File(save_name, 'a') as f: for s in self.runinfo.scans: for key, values in s.scan_dict.items(): - if key == 'continuous': - f[key].resize((len(s.scan_dict[key]),)) - self[key] = values - f[key][values[-1]] = values[-1] - - def assign_values(self, data): - if self.runinfo.average_d == -1: - try: - sample = self.runinfo.sparse_points[self.runinfo.indicies] - except: - sample = True - if sample: - for key, value in data.items(): - if is_list_type(self[key]): - self[key][self.runinfo.indicies] = value + self[key] = values + if key == 'iteration': + print(values.shape) + f.create_dataset(key, shape=values.shape, maxshape=(None,), chunks=(100, ),) else: - self[key] = value + f.create_dataset(key, shape=values.shape, maxshape=values.shape, chunks=values.shape,) + f[key][:] = values + + # Get dimensions based off of averaging or not + if self.runinfo.average_index == -1: + scan_dims = self.runinfo.dims + ndim = self.runinfo.ndim + else: + scan_dims = self.runinfo.average_dims + ndim = self.runinfo.n_average_dim + + # Initialize the data arrays + with h5py.File(save_name, 'a') as f: + for name in self.runinfo.measured: + # array of data, at least one non average scan + if is_list_type(data[name]) and ndim > 0: + dims = (*scan_dims, * np.array(data[name]).shape) + self[name] = np.zeros(dims) * np.nan + maxshape = tuple(None for _ in dims) + f.create_dataset(name, shape=dims, maxshape=maxshape, chunks=dims, + fillvalue=np.nan, dtype='float64') + # single data point, at least on non average scan + elif (not is_list_type(data[name])) and (ndim > 0): + dims = scan_dims + self[name] = np.zeros(dims) * np.nan + maxshape = tuple(None for _ in dims) + f.create_dataset(name, shape=dims, maxshape=maxshape, chunks=dims, + fillvalue=np.nan, dtype='float64') + # data is an array, but there are no scan dimension other than average + elif is_list_type(data[name]) and (ndim == 0): + dims = np.array(data[name]).shape + self[name] = np.zeros(dims) * np.nan + maxshape = tuple(None for _ in dims) + f.create_dataset(name, shape=dims, maxshape=maxshape, chunks=dims, + fillvalue=np.nan, dtype='float64') + # data is a single point, but there are no scan dimensions other than average + else: + self[name] = np.nan + f.create_dataset(name, shape=[1, ], maxshape=(None,), chunks=(1,), + fillvalue=np.nan, dtype='float64') + + def reallocate(self, data): + ''' + Reallocates memory for continuous experiments save files and measurement attribute arrays. - def assign_continuous_values(self, data, save_name, run_count, continuous_indicies, debug=False): - if all(index == 0 for index in self.runinfo.indicies): - self.save_continuous_scan_dict(save_name, debug) + Parameters + ---------- + data : ItemAttribute + ItemAttribute instance containing data from self.runinfo.measure_function + ''' + save_path = self.runinfo.data_path / '{}.hdf5'.format(self.runinfo.file_name) + save_name = str(save_path.absolute()) + + new_slices = {} + + with h5py.File(save_name, 'a') as f: + # Resize the continuous iterations array + continuous_n = self.runinfo.scans[-1].n + f['iteration'].resize((continuous_n,)) + self['iteration'] = self.runinfo.scans[-1].scan_dict['iteration'] + f['iteration'][-1] = self.runinfo.scans[-1].scan_dict['iteration'][-1] + + for name in self.runinfo.measured: + dataset = f[name] + current_shape = dataset.shape + new_shape = list(current_shape) + + if len(current_shape) == 1: + new_shape[0] += 1 + dataset.resize(tuple(new_shape)) + # fill the new part with nans + dataset[current_shape[0]:] = np.nan + elif len(current_shape) > 1: + dim_index = len(self.runinfo.dims) - 1 + new_shape[dim_index] += 1 + dataset.resize(tuple(new_shape)) + slices = tuple(slice( + original_dim, new_dim) for original_dim, + new_dim in zip(current_shape, new_shape)) + mask = np.zeros(new_shape, dtype=bool) + mask[slices] = True + dataset[mask] = np.nan + + new_slices[name] = tuple(slice( + current_dim, new_dim) for current_dim, + new_dim in zip(current_shape, new_shape)) + + self[name] = np.pad(self[name], + [(0, new_dim - original_dim) for original_dim, + new_dim in zip(current_shape, new_shape)], + mode='constant', constant_values=np.nan) + + def rolling_average(self, data): + ''' + Does a rolling average of newly measured data + Parameters + ---------- + data : + ItemAttribute instance of newly measured data point + ''' for key, value in data.items(): - if is_list_type(self[key][0]): - if run_count > 0: - if debug is True: - print(f"before saving point self[{key}] is: {self[key]}") - self[key][continuous_indicies] = value - if debug is True: - print(f"after saving point self[{key}] is: {self[key]}") + + # two cases: 1. self[key] is a list 2. self[key] is not a list + if is_list_type(self[key]): + if is_list_type(value): + value = np.array(value).astype(float) + + if self.runinfo.average_index == 0: + self[key][self.runinfo.average_indicies] = value else: - self[key][self.runinfo.indicies] = value + self[key][self.runinfo.average_indicies] *= ( + self.runinfo.average_index / (self.runinfo.average_index + 1)) + self[key][self.runinfo.average_indicies] += ( + value / (self.runinfo.average_index + 1)) else: - self[key][run_count] = value + if self.runinfo.average_index == 0: + self[key] = value + + else: + self[key] *= ( + self.runinfo.average_index / (self.runinfo.average_index + 1)) + self[key] += ( + value / (self.runinfo.average_index + 1)) def save_point(self, data): ''' Saves single point of data for current scan indicies. Does not return anything. ''' - save_path = self.runinfo.data_path / '{}.hdf5'.format(self.runinfo.long_name) + save_path = self.runinfo.data_path / '{}.hdf5'.format(self.runinfo.file_name) save_name = str(save_path.absolute()) - if self.runinfo.continuous: - continuous_scan = self.runinfo.scans[self.runinfo.continuous_scan_index] - run_count = continuous_scan.i - continuous_indicies = self.runinfo.indicies + (run_count,) - if self.runinfo.average_d >= 0: - continuous_indicies = self.runinfo.average_indicies + (run_count,) + if self.runinfo.has_average_scan: + indicies = self.runinfo.average_indicies else: - run_count = 0 - stop = self.runinfo.stop_continuous() + indicies = self.runinfo.indicies - if not self.runinfo.continuous and self.runinfo.average_d == -1: - self.assign_values(data) - - elif self.runinfo.continuous and not stop and self.runinfo.average_d == -1: - self.assign_continuous_values(data, save_name, run_count, continuous_indicies) - - with h5py.File(save_name, 'a') as f: - if not stop: - for key in self.runinfo.measured: - if not is_list_type(self[key]): - f[key][:] = self[key] - else: - try: - original_file_shape = self[key].shape - original_file_shape[0] = original_file_shape[0] - run_count - except: - pass - if np.array([original_file_shape == self[key].shape]).all(): - if run_count > 0: - f[key][continuous_indicies] = self[key][continuous_indicies] - else: - f[key][:] = self[key][:] - elif self.runinfo.average_d == -1: - f[key][self.runinfo.average_indicies, ...] = self[key][self.runinfo.average_indicies, ...] - else: - f[key][self.runinfo.indicies, ...] = self[key][self.runinfo.indicies, ...] - - def save_row(self): - '''Saves full scan0 of data at once. Does not return anything. - ''' - - save_path = self.runinfo.data_path / '{}.hdf5'.format(self.runinfo.long_name) - save_name = str(save_path.absolute()) + for key, value in data.items(): + if is_list_type(self[key]): + self[key][indicies] = value + else: + self[key] = value with h5py.File(save_name, 'a') as f: for key in self.runinfo.measured: - if not is_list_type(self[key]): - f[key][:] = self[key] - elif np.array([f[key].shape == self[key].shape]).all(): - f[key][:] = self[key][:] - elif self.runinfo.average_d == -1: - f[key][:, self.runinfo.line_indicies, ...] = self[key][self.runinfo.line_indicies, ...] + if is_list_type(self[key]): + f[key][*indicies, ...] = self[key][*indicies, ...] else: - f[key][:, self.runinfo.line_indicies, ...] = self[key][self.runinfo.line_indicies, ...] + f[key][:] = self[key] - def save_metadata(self): - '''Formats and saves metadata from self.runinfo and self.devices. Does not return anything. + def save_metadata(self, metadata_name): + ''' + Formats and saves metadata to the hdf5 file + Parameters + ---------- + metadata_name : str + Name of the metadata to be saved, ex. "runinfo", "devices" ''' - save_path = self.runinfo.data_path / '{}.hdf5'.format(self.runinfo.long_name) + save_path = self.runinfo.data_path / '{}.hdf5'.format(self.runinfo.file_name) save_name = str(save_path.absolute()) with h5py.File(save_name, 'a') as f: - f.attrs['runinfo'] = json.dumps(self.runinfo, cls=PyscanJSONEncoder) - f.attrs['devices'] = json.dumps(self.devices, cls=PyscanJSONEncoder) + f.attrs[metadata_name] = json.dumps(self[metadata_name], cls=PyscanJSONEncoder) def start_thread(self): - '''Starts experiment as a background thread, this works in conjunction with live plot + ''' + Starts experiment as a background thread, this works in conjunction with live plot ''' self.expt_thread = thread(target=self.run, daemon=True) @@ -444,60 +310,20 @@ def start_thread(self): self.runinfo.running = True def stop(self): - '''Stops the experiment after the next data point is take ensuring that the data + ''' + Stops the experiment after the next data point is take ensuring that the data is saved properly. Sets the associated runinfo.complete setting to 'stopped' and runinfo.running to `False`. ''' self.runinfo.running = False self.runinfo.complete = 'stopped' - # account for redundant run in the case of a continuous expt - if self.runinfo.continuous: - self.runinfo.scans[self.runinfo.continuous_scan_index].i -= 1 - self.runinfo.scans[self.runinfo.continuous_scan_index].n -= 1 - print('Stopping Experiment') def run(self): - '''Meta function the runs the experiment. It is not implemented in AbstractExperiment, - but must be implemented by its inheriting classes such as :class:`.Experiment`. - ''' - - pass - - def setup_runinfo(self): - '''Meta function that setups runinfo based on experiment type. - It is not implemented in AbstractExperiment, but must be implemented - by its inheriting classes such as :class:`.Experiment`. ''' - - pass - - def setup_instruments(self): - '''Meta Function that sets up devices based on experiment type. - It is not implemented in AbstractExperiment, but must be implemented - by its inheriting classes such as :class:`.Experiment`. + Meta function the runs the experiment. It is not implemented in AbstractExperiment, + but must be implemented by its inheriting classes such as :class:`.Experiment`. ''' pass - - def default_trigger_function(self): - '''Default trigger function that is called every scan0 iteration - ''' - - devices = self.devices - - devices.trigger.trigger() - - -# legacy naming convention -class MetaSweep(AbstractExperiment): - ''' - Present for backwards compatibility. Renamed to :class:`.AbstractExperiment`. - ''' - - def __init__(self, runinfo, devices, data_dir): - warning_msg = ("Use of legacy nomenclature detected but no longer supported.\n" - + "You entered MetaSweep, use AbstractExperiment instead.") - raise DeprecationWarning(f"\033[93m*** WARNING! ***: {warning_msg} \033[0m") - assert False, f"\033[93m*** WARNING! ***: {warning_msg} \033[0m" diff --git a/pyscan/measurement/chart_recorder.py b/pyscan/measurement/chart_recorder.py deleted file mode 100644 index 0d497304..00000000 --- a/pyscan/measurement/chart_recorder.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -from time import sleep -from ..general.stack_or_append import stack_or_append -from pyscan.measurement.abstract_experiment import AbstractExperiment - - -class ChartRecorder(AbstractExperiment): - '''Class to run single loop repeatedly. - It inherits from :class:`.AbstractExperiment`. - - Parameters - ---------- - runinfo: :class:`.Runinfo` - Runinfo instance. The Runinfo loop containing the dependent variable - that you want to average should be an instance of - :class:`.AverageScan`. - There should be only one dependent variable to be averaged. - The loops representing independent variables can be instances of - :class:`.PropertyScan`. - devices : - ItemAttribute instance containing all experiment devices - data_dir : str, optional - The path to save the data, defaults to './backup' - verbose: bool, optional - Indicates whether to print status updates, defaults to `False` - - ''' - - def __init__(self, runinfo, devices, data_dir=None, verbose=False): - super().__init__(runinfo, devices, data_dir) - - def run(self): - '''Runs the experiment while locking the console - ''' - - self.check_runinfo() - self.setup_instruments() - # save instrument settings - self.save_metadata() - - sleep(self.runinfo.initial_pause) - - self.get_time() - - self.runinfo.running = True - - # Use for loop, but break if self.runinfo.running=False - - i = 0 - - if self.runinfo.loop0.n == 0: - niter = np.inf - else: - niter = self.runinfo.loop0.n - - while i < niter: - self.runinfo.loop0.i = i - self.runinfo.loop0.iterate(i, self.devices) - sleep(self.runinfo.loop0.dt) - - data = self.runinfo.measure_function(self) - - if np.all(np.array(self.runinfo.indicies) == 0): - - for key, value in data.items(): - self.runinfo.measured.append(key) - self[key] = [] - - if self.runinfo.save is True: - self.preallocate(data) - - for key, value in data.items(): - self[key] = stack_or_append(self[key], value) - - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - i += 1 - - self.runinfo.running = False - - if 'end_function' in list(self.runinfo.keys()): - self.runinfo.end_function(self) diff --git a/pyscan/measurement/experiment.py b/pyscan/measurement/experiment.py index e153e971..70023dfc 100644 --- a/pyscan/measurement/experiment.py +++ b/pyscan/measurement/experiment.py @@ -1,292 +1,70 @@ -# -*- coding: utf-8 -*- + from time import sleep -import pyscan as ps -from pyscan.measurement.abstract_experiment import AbstractExperiment -from ..general.is_list_type import is_list_type +from .abstract_experiment import AbstractExperiment +from ..general.delta_product import delta_product import numpy as np -from datetime import datetime class Experiment(AbstractExperiment): ''' - Experiment class that takes data after each scan0 iteration. Inherits from - `.AbstractExperiment`. + Experiment class that takes data after each scan0 iteration. Parameters ---------- - runinfo: :class:`pyscan.measurement.runinfo.Runinfo` - Runinfo instance. The Runinfo scan containing the dependent variable - that you want to average should be an instance of - :class:`AverageScan`. - There should be only one dependent variable to be averaged. - The scans representing independent variables can be instances of - :class:`PropertyScan`. - devices : + runinfo : ps.RunInfo instance + Contains all information about the experiment + devices : ItemAttribute instance ItemAttribute instance containing all experiment devices data_dir : str, optional The path to save the data, defaults to './backup' verbose: bool, optional Indicates whether to print status updates, defaults to `False` + + Methods + ------- + start_thread() + run() + stop() ''' def __init__(self, runinfo, devices, data_dir=None, verbose=False, time=False): - '''Constructor method + ''' + Constructor method ''' super().__init__(runinfo, devices, data_dir) - self.runinfo.time = time - - def generic_experiment(self): - if self.runinfo.time: - for i in range(6): - self.runinfo['t{}'.format(i)] = np.zeros(self.runinfo.dims) - - t0 = (datetime.now()).timestamp() - - # Use for scan, but break if self.runinfo.running=False - for m in self.runinfo.scan3.iterator(): - self.runinfo.scan3.i = m - self.runinfo.scan3.iterate(m, self.devices) - sleep(self.runinfo.scan3.dt) - - for k in self.runinfo.scan2.iterator(): - self.runinfo.scan2.i = k - self.runinfo.scan2.iterate(k, self.devices) - sleep(self.runinfo.scan2.dt) - - for j in self.runinfo.scan1.iterator(): - self.runinfo.scan1.i = j - self.runinfo.scan1.iterate(j, self.devices) - sleep(self.runinfo.scan1.dt) - - for i in self.runinfo.scan0.iterator(): - self.runinfo.scan0.i = i - indicies = self.runinfo.indicies - - if self.runinfo.time: - self.runinfo.t0[indicies] = (datetime.now()).timestamp() - - self.runinfo.scan0.iterate(i, self.devices) - - if self.runinfo.time: - self.runinfo.t1[indicies] = (datetime.now()).timestamp() - - sleep(self.runinfo.scan0.dt) - - if self.runinfo.time: - self.runinfo.t2[indicies] = (datetime.now()).timestamp() - - data = self.runinfo.measure_function(self) - - if self.runinfo.time: - self.runinfo.t3[indicies] = (datetime.now()).timestamp() - - if np.all(np.array(self.runinfo.indicies) == 0): - self.preallocate(data) - - if self.runinfo.time: - self.runinfo.t4[indicies] = (datetime.now()).timestamp() - - self.save_point(data) - - if self.runinfo.time: - self.runinfo.t5[indicies] = (datetime.now()).timestamp() - - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - if isinstance(self.runinfo.scan0, ps.ContinuousScan): - self.reallocate() - - # Check if complete, stopped early - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - if isinstance(self.runinfo.scan1, ps.ContinuousScan): - self.reallocate() - - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - if isinstance(self.runinfo.scan2, ps.ContinuousScan): - self.reallocate() - - if self.runinfo.verbose: - print('Scan {}/{} Complete'.format(m + 1, self.runinfo.scan3.n)) - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - if isinstance(self.runinfo.scan3, ps.ContinuousScan): - self.reallocate() - - self.runinfo.complete = True - self.runinfo.running = False - - if self.runinfo.time: - try: - self.runinfo.dt0 = [0] + [self.runinfo.t0[i] - - self.runinfo.t0[i - 1] - for i in range(1, len(self.runinfo.t0))] - except Exception: - pass - self.runinfo.dt1 = self.runinfo.t1 - self.runinfo.t0 - self.runinfo.dt2 = self.runinfo.t2 - self.runinfo.t1 - self.runinfo.dt3 = self.runinfo.t3 - self.runinfo.t2 - self.runinfo.dt4 = self.runinfo.t4 - self.runinfo.t3 - self.runinfo.dt5 = self.runinfo.t5 - self.runinfo.t4 - self.runinfo.dttotal = self.runinfo.t5 - self.runinfo.t0 - self.runinfo.total_run_time = np.sum(self.runinfo.dttotal) - self.runinfo.total_time = (datetime.now()).timestamp() - t0 - - if 'end_function' in list(self.runinfo.keys()): - self.runinfo.end_function(self) - - def average_experiment(self): - for scan in self.runinfo.scans: - if isinstance(scan, ps.AverageScan) and (scan.n == 1): - print("n_average for average scan is 1. Running generic experiment instead of average experiment.") - self.generic_experiment() - return - - # Use for scan, but break if self.runinfo.running=False - for m in self.runinfo.scan3.iterator(): - self.runinfo.scan3.i = m - self.runinfo.scan3.iterate(m, self.devices) - sleep(self.runinfo.scan3.dt) - - for k in self.runinfo.scan2.iterator(): - self.runinfo.scan2.i = k - self.runinfo.scan2.iterate(k, self.devices) - sleep(self.runinfo.scan2.dt) - - for j in self.runinfo.scan1.iterator(): - self.runinfo.scan1.i = j - self.runinfo.scan1.iterate(j, self.devices) - sleep(self.runinfo.scan1.dt) - - for i in self.runinfo.scan0.iterator(): - self.runinfo.scan0.i = i - self.runinfo.scan0.iterate(i, self.devices) - sleep(self.runinfo.scan0.dt) - - data = self.runinfo.measure_function(self) - - # if on the first row of data, log the data names in self.runinfo.measured - if np.all(np.array(self.runinfo.indicies) == 0): - self.preallocate(data) - - self.rolling_average(data) + def run(self): - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break + self.check_runinfo() - self.save_point(data) + self.save_metadata('runinfo') + self.save_metadata('devices') - if isinstance(self.runinfo.scan0, ps.ContinuousScan): - self.reallocate() + sleep(self.runinfo.initial_pause) - # self.save_row(data) + self.runinfo.running = True - # Check if complete, stopped early - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break + for indicies, deltas in delta_product(self.runinfo.iterators): + for scan, i, d in zip(self.runinfo.scans[::-1], indicies[::-1], deltas[::-1]): + scan.iterate(self, i, d) - if isinstance(self.runinfo.scan1, ps.ContinuousScan): - self.reallocate() + data = self.runinfo.measure_function(self) - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break + if np.all(np.array(indicies) == 0): + self.preallocate(data) + elif self.runinfo.has_continuous_scan: + self.reallocate(data) + elif self.runinfo.has_average_scan: + self.rolling_average(data) - if isinstance(self.runinfo.scan2, ps.ContinuousScan): - self.reallocate() + self.save_point(data) - print('Scan {}/{} Complete'.format(m + 1, self.runinfo.scan3.n)) if self.runinfo.running is False: self.runinfo.complete = 'stopped' break - if isinstance(self.runinfo.scan3, ps.ContinuousScan): - self.reallocate() - self.runinfo.complete = True self.runinfo.running = False if 'end_function' in list(self.runinfo.keys()): self.runinfo.end_function(self) - - def rolling_average(self, data): - '''Does a rolling average of newly measured data - - Parameters - ---------- - data : - ItemAttribute instance of newly measured data point - ''' - for key, value in data.items(): - - # two cases: 1. self[key] is a list 2. self[key] is not a list - if is_list_type(self[key]): - if is_list_type(value): - value = np.array(value).astype(float) - - if self.runinfo.average_index == 0: - self[key][self.runinfo.average_indicies] = value - else: - self[key][self.runinfo.average_indicies] *= ( - self.runinfo.average_index / (self.runinfo.average_index + 1)) - self[key][self.runinfo.average_indicies] += ( - value / (self.runinfo.average_index + 1)) - else: - if self.runinfo.average_index == 0: - self[key] = value - - else: - self[key] *= ( - self.runinfo.average_index / (self.runinfo.average_index + 1)) - self[key] += ( - value / (self.runinfo.average_index + 1)) - - def run(self): - '''Runs the experiment while locking the console - ''' - - self.check_runinfo() - - self.setup_instruments() - # save instrument settings - self.save_metadata() - - sleep(self.runinfo.initial_pause) - - self.get_time() - - self.runinfo.running = True - - if self.runinfo.average_d == -1: - self.generic_experiment() - - elif 0 <= self.runinfo.average_d < 4: - self.average_experiment() - - else: - assert False, "self.average_d not setup correctly by check_runinfo method" - - -# legacy naming convention -class Sweep(Experiment): - ''' - Present for backwards compatibility. Renamed to :class:`.Experiment`. - ''' - - def __init__(self, runinfo, devices, data_dir=None, verbose=False, time=False): - warning_msg = ("Use of legacy nomenclature detected but no longer supported.\n" - + "You entered Sweep, use Experiment instead.") - raise DeprecationWarning(f"\033[93m*** WARNING! ***: {warning_msg} \033[0m") - assert False, f"\033[93m*** WARNING! ***: {warning_msg} \033[0m" diff --git a/pyscan/measurement/fast_galvo_experiment.py b/pyscan/measurement/fast_galvo_experiment.py deleted file mode 100644 index 3e66e3dc..00000000 --- a/pyscan/measurement/fast_galvo_experiment.py +++ /dev/null @@ -1,159 +0,0 @@ -# -*- coding: utf-8 -*- -from time import sleep -from pyscan.measurement.abstract_experiment import AbstractExperiment -from ..general.item_attribute import ItemAttribute -import numpy as np -# import nidaqmx - - -class FastGalvoExperiment(AbstractExperiment): - '''Setup a point by point measurement. - It inherits from :class:`pyscan.measurement.abstract_experiment.AbstractExperiment`. - - Parameters - ---------- - runinfo: :class:`pyscan.measurement.runinfo.Runinfo` - Runinfo instance. The Runinfo loop containing the dependent variable - that you want to average should be an instance of - :class:`AverageScan`. - There should be only one dependent variable to be averaged. - The loops representing independent variables can be instances of - :class:`PropertyScan`. - devices : - ItemAttribute instance containing all experiment devices - data_dir : str, optional - The path to save the data, defaults to './backup' - verbose: bool, optional - Indicates whether to print status updates, defaults to `False` - - ''' - - def __init__(self, runinfo, devices, data_dir=None, verbose=False): - '''Constructor method - ''' - super().__init__(runinfo, devices, data_dir) - - self.runinfo.measure_function = self.line_counts - - def setup_instruments(self): - '''TODO - ''' - runinfo = self.runinfo - devices = self.devices - - dev = devices[self.runinfo.loop0['device_names'][0]] - xrange = list(self.runinfo.loop0.scan_dict.values())[0] - - # ######### should this be sweep or experiment? - dev.legacy_sweep_mode(xrange, runinfo.srate, 5) - devices.pb.setup_single_ttl( - ['counter', 'awg'], - ['aom'], - total_time=runinfo.loop0.n / runinfo.srate * 1.05) - devices.counter.setup_timed_buffer( - 1 / runinfo.srate, runinfo.loop0.n, runinfo.loop1.n) - - sleep(0.2) - - def end_function(self): - '''TODO - ''' - devices = self.devices - - devices.x.dc_mode(0) - devices.y.dc_mode(0) - - devices.counter.get_counts(1e-6) - - def run(self): - '''Runs the experiment while locking the console - ''' - self.check_runinfo() - self.setup_instruments() - # save instrument settings - self.save_metadata() - - sleep(self.runinfo.initial_pause) - - self.get_time() - - self.runinfo.running = True - - # Use for loop, but break if self.runinfo.running=False - for m in range(self.runinfo.loop3.n): - self.runinfo.loop3.i = m - self.runinfo.loop3.iterate(m, self.devices) - sleep(self.runinfo.loop3.dt) - - for k in range(self.runinfo.loop2.n): - self.runinfo.loop2.i = k - self.runinfo.loop2.iterate(k, self.devices) - sleep(self.runinfo.loop2.dt) - - for j in range(self.runinfo.loop1.n): - self.runinfo.loop1.i = j - self.runinfo.loop1.iterate(j, self.devices) - sleep(self.runinfo.loop1.dt) - - data = self.runinfo.measure_function(self) - - if np.all(np.array(self.runinfo.indicies) == 0): - for key, value in data.items(): - self.runinfo.measured.append(key) - self.preallocate_line(data) - - for key, value in data.items(): - if self.runinfo.ndim == 1: - self[key] = np.array(value) - else: - self[key][:, self.runinfo.indicies[1::]] = np.reshape(np.array(value), (-1, 1)) - - self.save_row() - - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - # Check if complete, stopped early - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - if self.runinfo.verbose: - print('Scan {}/{} Complete'.format(m + 1, self.runinfo.loop3.n)) - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - self.runinfo.complete = True - self.runinfo.running = False - - if 'end_function' in list(self.runinfo.keys()): - self.runinfo.end_function(self) - - def line_counts(self, expt): - '''TODO - ''' - runinfo = expt.runinfo - devices = expt.devices - - d = ItemAttribute() - - devices.pb.start() - - sleep(runinfo.loop0.n / runinfo.srate * 1.01) - - for i in range(5): - if int(devices.counter.query('DATA:POIN?')) == runinfo.loop0.n: - d.data = devices.counter.read_data_points() - break - else: - sleep(0.05) - devices.pb.reset() - - return d - - -# legacy naming convention -class FastGalvoSweep(FastGalvoExperiment): - pass diff --git a/pyscan/measurement/fast_stage_experiment.py b/pyscan/measurement/fast_stage_experiment.py deleted file mode 100644 index 9fd6fb9b..00000000 --- a/pyscan/measurement/fast_stage_experiment.py +++ /dev/null @@ -1,190 +0,0 @@ -# -*- coding: utf-8 -*- -from time import sleep -from pyscan.measurement.abstract_experiment import AbstractExperiment -from ..general.item_attribute import ItemAttribute -import numpy as np -# import nidaqmx - - -class FastStageExperiment(AbstractExperiment): - '''Setup a point by point measurement. - It inherits from :class:`pyscan.measurement.abstract_experiment.AbstractExperiment`. - - Parameters - ---------- - runinfo: :class:`pyscan.measurement.runinfo.Runinfo` - Runinfo instance. The Runinfo loop containing the dependent variable - that you want to average should be an instance of - :class:`AverageScan`. - There should be only one dependent variable to be averaged. - The loops representing independent variables can be instances of - :class:`PropertyScan`. - devices : - ItemAttribute instance containing all experiment devices - data_dir : str, optional - The path to save the data, defaults to './backup' - verbose: bool, optional - Indicates whether to print status updates, defaults to `False` - - ''' - - def __init__(self, runinfo, devices, data_dir=None, verbose=False): - '''Constructor method - ''' - super().__init__(runinfo, devices, data_dir) - - self.runinfo.measure_function = self.line_counts - - def setup_instruments(self): - '''TODO - ''' - runinfo = self.runinfo - devices = self.devices - - chan = self.runinfo.loop0.prop - - if chan == 'x': - chan = 1 - elif chan == 'y': - chan = 2 - elif chan == 'z': - chan = 3 - - xrange = list(self.runinfo.loop0.scan_dict.values())[0] - runinfo.fast_values = xrange - - runinfo.start = xrange[0] - runinfo.stop = xrange[-1] - delta = xrange[1] - xrange[0] - d = runinfo.stop - runinfo.start - - runinfo.vel0, runinfo.acc = devices.stage.get_channel_velocity_parameters(1) # in mm/s - - n_points = int(np.abs((runinfo.start - runinfo.stop) / delta)) - - t = n_points / runinfo.srate - runinfo.vel = round(np.abs(d / t), 5) - - runinfo.scan_time = t - runinfo.fast_chan = chan - - def run(self): - '''Runs the experiment while locking the console - ''' - self.check_runinfo() - self.setup_instruments() - # save instrument settings - self.save_metadata() - - sleep(self.runinfo.initial_pause) - - self.get_time() - - self.runinfo.running = True - - # Use for loop, but break if self.runinfo.running=False - for m in range(self.runinfo.loop3.n): - self.runinfo.loop3.i = m - self.runinfo.loop3.iterate(m, self.devices) - sleep(self.runinfo.loop3.dt) - - for k in range(self.runinfo.loop2.n): - self.runinfo.loop2.i = k - self.runinfo.loop2.iterate(k, self.devices) - sleep(self.runinfo.loop2.dt) - - for j in range(self.runinfo.loop1.n): - self.runinfo.loop1.i = j - self.runinfo.loop1.iterate(j, self.devices) - sleep(self.runinfo.loop1.dt) - - data = self.runinfo.measure_function(self) - - if np.all(np.array(self.runinfo.indicies) == 0): - for key, value in data.items(): - self.runinfo.measured.append(key) - self.preallocate_line(data) - - for key, value in data.items(): - if self.runinfo.ndim == 1: - self[key] = np.array(value) - else: - self[key][:, self.runinfo.indicies[1::]] = np.reshape(np.array(value), (-1, 1)) - - self.save_row() - - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - # Check if complete, stopped early - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - if self.runinfo.verbose: - print('Scan {}/{} Complete'.format(m + 1, self.runinfo.loop3.n)) - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - self.runinfo.complete = True - self.runinfo.running = False - - if 'end_function' in list(self.runinfo.keys()): - self.runinfo.end_function(self) - - def line_counts(self, expt): - '''TODO - ''' - runinfo = expt.runinfo - devices = expt.devices - - if runinfo.fast_chan == 1: - chan = 'x' - chan_fast = 'xfast' - elif runinfo.fast_chan == 2: - chan = 'y' - chan_fast = 'yfast' - elif runinfo.fast_chan == 3: - chan = 'z' - chan_fast = 'zfast' - - d = ItemAttribute() - - devices.stage.reset_speed() - - devices.stage[chan] = runinfo.start - - sleep(2) - - devices.stage.set_channel_velocity_parameters( - runinfo.fast_chan, - runinfo.vel, - runinfo.acc) - - devices.stage[chan_fast] = runinfo.stop - - if runinfo.counter == 'picoharp': - d.counts0 = [] - # d.counts1 = [] - # d.counts_sum = [] - for i in range(runinfo.loop0.n): - counts0 = devices.ph.get_count_rate_0() - # counts1 = devices.ph.get_count_rate_1() - # counts_sum = counts0 + counts1 - d.counts0.append(counts0) - # d.counts1.append(counts1) - # d.counts_sum.append(counts_sum) - sleep(1 / runinfo.srate) - else: - d.counts = devices.counter.get_n_binary_points(runinfo.loop0.n) - - devices.stage.reset_speed() - - return d - - -# legacy naming convention -class FastStageSweep(FastStageExperiment): - pass diff --git a/pyscan/general/get_pyscan_version.py b/pyscan/measurement/get_pyscan_version.py similarity index 100% rename from pyscan/general/get_pyscan_version.py rename to pyscan/measurement/get_pyscan_version.py diff --git a/pyscan/measurement/load_experiment.py b/pyscan/measurement/load_experiment.py index cb693e2d..9e79b5f2 100644 --- a/pyscan/measurement/load_experiment.py +++ b/pyscan/measurement/load_experiment.py @@ -1,10 +1,10 @@ -# -*- coding: utf-8 -*- + import h5py import pickle import json from pathlib import Path -from ..general.item_attribute import ItemAttribute -from ..general.pyscan_json_decoder import PyscanJSONDecoder +from itemattribute import ItemAttribute +from .pyscan_json_decoder import PyscanJSONDecoder def load_experiment(file_name): diff --git a/pyscan/general/pyscan_json_decoder.py b/pyscan/measurement/pyscan_json_decoder.py similarity index 94% rename from pyscan/general/pyscan_json_decoder.py rename to pyscan/measurement/pyscan_json_decoder.py index 1d1fa0f8..8410ef9c 100644 --- a/pyscan/general/pyscan_json_decoder.py +++ b/pyscan/measurement/pyscan_json_decoder.py @@ -1,5 +1,5 @@ import json -from .item_attribute import ItemAttribute +from itemattribute import ItemAttribute class PyscanJSONDecoder(json.JSONDecoder): diff --git a/pyscan/general/pyscan_json_encoder.py b/pyscan/measurement/pyscan_json_encoder.py similarity index 66% rename from pyscan/general/pyscan_json_encoder.py rename to pyscan/measurement/pyscan_json_encoder.py index 59289476..1cbd3cf4 100644 --- a/pyscan/general/pyscan_json_encoder.py +++ b/pyscan/measurement/pyscan_json_encoder.py @@ -1,6 +1,6 @@ import json import numpy as np -from .item_attribute import ItemAttribute +from itemattribute import ItemAttribute from ..drivers.instrument_driver import InstrumentDriver from pyvisa.resources import ( # FirewireInstrument, @@ -47,35 +47,19 @@ def default(self, obj, debug=False): if type(obj) is type: return obj.__name__ elif isinstance(obj, (InstrumentDriver, ItemAttribute)): - if debug is True: - print(f"obj {obj} was instance of InstrumentDriver and or ItemAttribute.") return obj.__dict__ elif isinstance(obj, (range, tuple)): - if debug is True: - print(f"obj {obj} was instance of {type(obj)}.") return list(obj) - # Handle numpy integers elif isinstance(obj, np.integer): - if debug is True: - print(f"Object {obj} is a numpy integer, converting to int.") return int(obj) - # Handle numpy floating values elif isinstance(obj, np.floating): - if debug is True: - print(f"Object {obj} is a numpy floating value, converting to float.") return float(obj) - # Handle numpy arrays elif isinstance(obj, np.ndarray): - if debug is True: - print(f"Object {obj} is a numpy array, converting to list.") return obj.tolist() elif callable(obj): - if debug is True: - print(f"obj {obj} is a function, returning source code.") - return inspect.getsource(obj) # Talk with Andy about this and perhaps implementing in load_expt? - elif isinstance(obj, (WindowsPath, Path)): # This covers both WindowsPath and PosixPath - if debug is True: - print(f"obj {obj} is a Path or WindowsPath, returning string of the path.") + print(obj) + return inspect.getsource(obj) + elif isinstance(obj, (WindowsPath, Path)): return str(obj) elif type(obj) is type(iter(range(1))): return list(obj) @@ -91,8 +75,6 @@ def default(self, obj, debug=False): # VXIInstrument, ), ): - if debug is True: - print(f"obj {obj} is a pyvisa instrument, returning resource name.") return obj.resource_name else: - return super().default(obj) + return "could not serialize object" diff --git a/pyscan/measurement/raster_experiment.py b/pyscan/measurement/raster_experiment.py deleted file mode 100644 index 55e4356a..00000000 --- a/pyscan/measurement/raster_experiment.py +++ /dev/null @@ -1,161 +0,0 @@ -# -*- coding: utf-8 -*- -from time import sleep -from pyscan.measurement.abstract_experiment import AbstractExperiment -from ..general.is_list_type import is_list_type -import numpy as np -from datetime import datetime - - -class RasterExperiment(AbstractExperiment): - '''Experiment class that takes data after each loop0 iteration but reverses - loop0's direction after each loop1 iteration. - It inherits from :class:`pyscan.measurement.abstract_experiment.AbstractExperiment`. - - Parameters - ---------- - runinfo: :class:`pyscan.measurement.runinfo.Runinfo` - Runinfo instance. The Runinfo loop containing the dependent variable - that you want to average should be an instance of - :class:`AverageScan`. - There should be only one dependent variable to be averaged. - The loops representing independent variables can be instances of - :class:`PropertyScan`. - devices : - ItemAttribute instance containing all experiment devices - data_dir : str, optional - The path to save the data, defaults to './backup' - verbose: bool, optional - Indicates whether to print status updates, defaults to `False` - - ''' - - def __init__(self, runinfo, devices, data_dir=None, verbose=False, time=False): - super().__init__(runinfo, devices, data_dir) - - self.runinfo.time = time - - def run(self): - '''Runs the experiment while locking the console - ''' - - self.check_runinfo() - self.setup_instruments() - # save instrument settings - self.save_metadata() - - sleep(self.runinfo.initial_pause) - - self.get_time() - - self.runinfo.running = True - - if self.runinfo.time: - for i in range(6): - self.runinfo['t{}'.format(i)] = np.zeros(self.runinfo.dims) - - t0 = (datetime.now()).timestamp() - # Use for loop, but break if self.runinfo.running=False - for m in range(self.runinfo.loop3.n): - self.runinfo.loop3.i = m - self.runinfo.loop3.iterate(m, self.devices) - sleep(self.runinfo.loop3.dt) - - for k in range(self.runinfo.loop2.n): - self.runinfo.loop2.i = k - self.runinfo.loop2.iterate(k, self.devices) - sleep(self.runinfo.loop2.dt) - - for j in range(self.runinfo.loop1.n): - self.runinfo.loop1.i = j - self.runinfo.loop1.iterate(j, self.devices) - sleep(self.runinfo.loop1.dt) - - if j % 2 == 0: - range0D = range(self.runinfo.loop0.n) - else: - range0D = reversed(range(self.runinfo.loop0.n)) - for i in range0D: - self.runinfo.loop0.i = i - indicies = self.runinfo.indicies - - if self.runinfo.time: - self.runinfo.t0[indicies] = (datetime.now()).timestamp() - - self.runinfo.loop0.iterate(i, self.devices) - - if self.runinfo.time: - self.runinfo.t1[indicies] = (datetime.now()).timestamp() - - sleep(self.runinfo.loop0.dt) - - if self.runinfo.time: - self.runinfo.t2[indicies] = (datetime.now()).timestamp() - - data = self.runinfo.measure_function(self) - - if self.runinfo.time: - self.runinfo.t3[indicies] = (datetime.now()).timestamp() - - if np.all(np.array(self.runinfo.indicies) == 0): - for key, value in data.items(): - self.runinfo.measured.append(key) - self.preallocate(data) - - for key, value in data.items(): - if is_list_type(self[key]): - self[key][self.runinfo.indicies] = value - else: - self[key] = value - - if self.runinfo.time: - self.runinfo.t4[indicies] = (datetime.now()).timestamp() - - self.save_point() - - if self.runinfo.time: - self.runinfo.t5[indicies] = (datetime.now()).timestamp() - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - # Check if complete, stopped early - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - if self.runinfo.verbose: - print('Scan {}/{} Complete'.format(m + 1, self.runinfo.loop3.n)) - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - self.runinfo.complete = True - self.runinfo.running = False - - if self.runinfo.time: - try: - self.runinfo.dt0 = [0] + [self.runinfo.t0[i] - - self.runinfo.t0[i - 1] - for i in range(1, len(self.runinfo.t0))] - except: - pass - self.runinfo.dt1 = self.runinfo.t1 - self.runinfo.t0 - self.runinfo.dt2 = self.runinfo.t2 - self.runinfo.t1 - self.runinfo.dt3 = self.runinfo.t3 - self.runinfo.t2 - self.runinfo.dt4 = self.runinfo.t4 - self.runinfo.t3 - self.runinfo.dt5 = self.runinfo.t5 - self.runinfo.t4 - self.runinfo.dttotal = self.runinfo.t5 - self.runinfo.t0 - self.runinfo.total_run_time = np.sum(self.runinfo.dttotal) - self.runinfo.total_time = (datetime.now()).timestamp() - t0 - - if 'end_function' in list(self.runinfo.keys()): - self.runinfo.end_function(self) - - -# legacy naming convention -class RasterSweep(RasterExperiment): - pass diff --git a/pyscan/measurement/run_info.py b/pyscan/measurement/run_info.py index 83f9d1b9..0674402d 100644 --- a/pyscan/measurement/run_info.py +++ b/pyscan/measurement/run_info.py @@ -1,126 +1,161 @@ -# -*- coding: utf-8 -*- -from ..general.item_attribute import ItemAttribute -from ..general.get_pyscan_version import get_pyscan_version -from .scans import PropertyScan, AverageScan + +from itemattribute import ItemAttribute +from .get_pyscan_version import get_pyscan_version +from .scans import PropertyScan, AverageScan, ContinuousScan import pyscan as ps +import re +import numpy as np class RunInfo(ItemAttribute): ''' - Object that contains information of how to run the experiment. Inherits from :class:`.ItemAttribute`. - This is generally used as an input parameter to Experiment classes. - - You must set the desired number of scans to a type of Scan before setting RunInfo as a parameter - in a Experiment class. - Set the scans in order from `scan0` to `scan3` - for example, if you are experimenting over 2 variables, - set `scan0` and `scan1`. Do not set `scan0` and `scan2`. + Object that contains information of how to run the experiment. Attributes ---------- - scan0, scan1, scan2, scan3 : :class:`.PropertyScan`, :class:`.AverageScan`, :class:`.RepeatScan` - or :class:`.FunctionScan` - Set each scan to a scan object representing one independent experimental variable. The scan property or - function will be scanned during the experiment, with scan0 being the innermost scan. - Defaults to :class:`PropertyScan({}, prop=None)<.PropertyScan>`, - which indicates that the scan will not be used. - measured : - Array that contains the names of collected data, defined by the `measure_function` return object. + scan<#>: ps.PropertyScan, ps.RepeatScan, ps.FunctionScan, ps.FunctionScan + Instance of ps.AbstractScan to a scan object representing one independent experimental variable. + The scan property or function will be scanned during the experiment, with scan0 being the innermost scan loop. + measured : list + List that contains the names of collected data, defined by the `measure_function` return. measure_function : func User-defined function that controls how to measure data. - It should accept a :class:`.Experiment` object as its only parameter, - and returns an :class:`.ItemAttribute` object containing the measured data. The names of the measured data, + It should accept a ps.Experiment object as its only parameter, + and returns an ItemAttribute object containing the measured data. The names of the measured data, each being an attribute of the return object, will appear as keys of the experiment after it is run. - trigger_function : func - User-defined function that controls triggering of instruments initial_pause : float Pause before first setting instruments in seconds, defaults to 0.1. - average_d : int - scan index used by an Experiment with one of its scans being :class:`.AverageScan`. - It is used to track which scan to average over, defaults to -1. - Automatically is set to the correct index by :meth:`.RunInfo.check` method, which is automatically - called by Experiment objects `run()` methods. - verbose : bool - Flag to print status information, defaults to `False`. - version : str + _pyscan_version : str Current version of pyscan to be saved as metadata. + (Properties) + scans : list + Returns list of all scans in sequential order. + dims : tuple + Returns tuple containing the length of each scan, by increasing scan number + average_dims : tuple + Returns a tuple containing the lenght of each scan, excluding the average scan + ndim : int + Returns number of scans + has_average_scan : bool + True if an average scan is present, False otherwise + indicies : tuple + Returns tuple of the current scan iteration indicies + average_indicies : tuple + Returns tuple of the current scan iteration indicies, excludingb averaged scan + average_index : int + Returns the index of the scan to be averaged, -1 if no average scan is present + + Methods + ------- + check() + check_sequential_scans() + check_property_scan() + check_repeat_scan() + check_average_scan() + check_continuous_scan() ''' def __init__(self): - """ Constructor method """ - self.scan0 = PropertyScan({}, prop=None) - self.scan1 = PropertyScan({}, prop=None) - self.scan2 = PropertyScan({}, prop=None) - self.scan3 = PropertyScan({}, prop=None) + Constructor method + """ - self.static = {} self.measured = [] - self.measure_function = None - self.trigger_function = None self.initial_pause = 0.1 - self.average_d = -1 - # Assumed not a continuous expt. If there is a continuous scan this will be set to true in self.check() - self.continuous = False - - self.verbose = False self._pyscan_version = get_pyscan_version() def check(self): - '''Checks to see if runinfo is properly formatted. Called by Experiment object's `run()` methods. + ''' + Checks to see if runinfo is properly formatted. Called by Experiment object's `run()` methods. - Automatically sets `self.average_d` to the correct scan index (i.e., the scan which contains an + Automatically sets `self.average_index` to the correct scan index (i.e., the scan which contains an instance of `.AverageScan`) to average over. ''' + + self.check_sequential_scans() + + self.check_property_scans() + + self.check_repeat_scan() + + self.check_average_scan() + + self.check_continuous_scan() + + def check_sequential_scans(self): + + scan_indicies = [] + for key in self.__dict__.keys(): + if 'scan' in key: + if len(re.findall(r'\d+', key)) > 0: + scan_indicies.append(int(re.findall(r'\d+', key)[0])) + + scan_indicies.sort() + delta_indices = [scan_indicies[i] - scan_indicies[i - 1] for i in range(1, len(scan_indicies))] + assert np.all(np.array(delta_indices) == 1), 'Scan indicies are not sequential' + + def check_property_scans(self): + ''' + Checks to see if there are any errors with PropertyScans + ''' + + scanned_properties = [] + for scan in self.scans: + if isinstance(scan, PropertyScan): + for dev in scan.device_names: + assert f'{dev}_{scan.prop}' not in scanned_properties, \ + 'Property {} is duplicated in the scans'.format(f'{dev}_{scan.prop}') + scanned_properties.append(f'{dev}_{scan.prop}') + + def check_average_scan(self): + ''' + Checks to see if there is an average scan present and sets `self.average_index` to the correct index. + ''' + # find the scan set to average scan (if any) and determine the index index = 0 num_av_scans = 0 for scan in self.scans: if isinstance(scan, AverageScan): - self.average_d = index num_av_scans += 1 index += 1 - # if no average scans found set average_d to -1 - if num_av_scans == 0: - self.average_d = -1 - # throw an error if more than one average scan is found if num_av_scans > 1: assert False, "More than one average scan is not allowed" - # make sure there are no empty scans inbetween used scans. - used_scan_found = False - scans = self.scans - for i in range(len(scans)): - count_down = len(scans) - i - 1 - if used_scan_found is False: - if not (isinstance(scans[count_down], PropertyScan) and len(scans[count_down].input_dict) == 0): - used_scan_found = True - used_scan_index = count_down - else: - assert not (isinstance(scans[count_down], PropertyScan) and len(scans[count_down].input_dict) == 0), \ - (f"Found empty PropertyScan (scan{count_down}) below used scan (scan{used_scan_index}).\n" - + "Scans must be populated in sequential order.") + def check_repeat_scan(self): + ''' + Checks to see if there a repeat scan present and if there are more the one repeat scans + ''' + + repeat_count = 0 + for scan in self.scans: + if isinstance(scan, ps.RepeatScan): + repeat_count += 1 + assert repeat_count <= 1, "More than one repeat scan is not allowed" + + def check_continuous_scan(self): # find the scan set to continuous scan (if any) and determine the index + n_continuous_scans = 0 for i, scan in enumerate(self.scans): if isinstance(scan, ps.ContinuousScan): - self.continuous = True - self.continuous_scan_index = i + n_continuous_scans += 1 + + assert n_continuous_scans <= 1, "More than one continuous scan detected. Only one continuous scan is allowed." # If there is a ContinuousScan, ensure it is the highest level scan - if self.continuous: - for i in range(self.continuous_scan_index + 1, len(self.scans)): - assert isinstance(self.scans[i], PropertyScan) and len(self.scans[i].input_dict) == 0, \ - f"ContinuousScan found at scan{self.continuous_scan_index} but is not the highest level scan." + if self.has_continuous_scan: + assert self.continuous_index == (self.ndim - 1), 'Error, continuous scan must be the last scan' def stop_continuous(self, plus_one=False): stop = False - if self.continuous: + if self.has_continuous_scan: continuous_scan = self.scans[self.continuous_scan_index] if hasattr(continuous_scan, 'n_max'): if plus_one is False: @@ -132,92 +167,49 @@ def stop_continuous(self, plus_one=False): return stop + # Regular properties @property def scans(self): - ''' Returns array of all scans ''' - return [self.scan0, self.scan1, self.scan2, self.scan3] + Returns array of all scans + ''' + i = 0 + scans = [] + while hasattr(self, f'scan{i}'): + scans.append(getattr(self, f'scan{i}')) + i += 1 + return scans @property def dims(self): - ''' Returns tuple containing the length of each scan, in order from scan0 to scan3, and excludes scans of size 1 ''' - dims = (self.scan0.n, - self.scan1.n, - self.scan2.n, - self.scan3.n) - dims = [n for n in dims if n != 1] - if self.continuous: - if len(dims) - 1 == self.continuous_scan_index: - dims = dims[:-1] - dims.append(1) + Returns tuple containing the length of each scan, in order from scan0 to scan3, and excludes scans of size 1 + ''' + dims = [scan.n for scan in self.scans] self._dims = tuple(dims) return self._dims - @property - def average_dims(self): - ''' Returns tuple containing the length of each scan, excluding scans of size 1 and the averaged scan - ''' - self._average_dims = tuple(drop(self.dims, self.average_d)) - - return self._average_dims - - @property - def ndim(self): - ''' Returns number of non 1 sized scans - ''' - self._ndim = len(self.dims) - return self._ndim - - @property - def n_average_dim(self): - ''' Returns number of scans that are neither size-1 nor average scans - ''' - self._n_average_dim = len(self.average_dims) - return self._n_average_dim - @property def indicies(self): ''' Returns tuple of the current scan iteration indicies, ''' - self._indicies = (self.scan0.i, - self.scan1.i, - self.scan2.i, - self.scan3.i) - self._indicies = self._indicies[:self.ndim] - if self.continuous: - self._indicies = self._indicies[:-1] + self._indicies = [scan.i for scan in self.scans] return tuple(self._indicies) @property - def line_indicies(self): - self._line_indicies = ( - self.scan1.i, - self.scan2.i, - self.scan3.i) - self._line_indicies = self._line_indicies[:self.ndim] - return tuple(self._line_indicies) - - @property - def average_indicies(self): - ''' Returns tuple of the current scan iteration indicies, - excluding scans of size 1 and averaged scan. These are the active - scans not to be averaged. Used by `.AverageExperiment`. + def ndim(self): ''' - self._average_indicies = drop(self.indicies, self.average_d) - return tuple(self._average_indicies) - - @property - def average_index(self): - ''' Returns the index of the scan to be averaged. Used by `pyscan.AverageExperiment`. + Returns number of scans ''' - self._average_index = self.indicies[self.average_d] - return self._average_index + self._ndim = len(self.dims) + return self._ndim + # Properties modified due to the presence of an average scan @property def has_average_scan(self): - ''' Returns a boolean of whether or not an average scan is present. + ''' + Returns a boolean of whether or not an average scan is present. ''' num_av_scans = 0 for scan in self.scans: @@ -231,79 +223,88 @@ def has_average_scan(self): return self._has_average_scan - ####################### LEGACY SECTION ######################## - # This section is set up to alert users who try to use legacy nomenclature - # of the updated naming convention that they must use instead. @property - def loop0(self): - legacy_warning() - return self.scan0 - - @loop0.setter - def loop0(self, value): - legacy_warning() - self.scan0 = value + def average_index(self): + ''' + Returns the index of the scan to be averaged. Used by `pyscan.AverageExperiment`. + ''' + if self.has_average_scan: + i = 0 + for scan in self.scans: + if isinstance(scan, AverageScan): + return i + i += 1 + self._average_index = self.indicies[self.average_dims] + return self._average_index + else: + return -1 @property - def loop1(self): - legacy_warning() - return self.scan1 - - @loop1.setter - def loop1(self, value): - legacy_warning() - self.scan1 = value + def average_dims(self): + ''' + Returns tuple containing the length of each scan, excluding scans of size 1 and the averaged scan + ''' + if self.average_index != -1: + self._average_dims = list(self.dims) + self._average_dims.pop(self.average_index) + return tuple(self._average_dims) + else: + return () @property - def loop2(self): - legacy_warning() - return self.scan2 - - @loop2.setter - def loop2(self, value): - legacy_warning() - self.scan2 = value + def average_indicies(self): + ''' + Returns tuple of the current scan iteration indicies, excluding scans of size 1 and averaged scan. + Used by `.AverageExperiment`. + ''' + if self.has_average_scan: + self._average_indicies = list(self.indicies) + self._average_indicies.pop(self.average_index) + return tuple(self._average_indicies) + else: + return () @property - def loop3(self): - legacy_warning() - return self.scan3 - - @loop3.setter - def loop3(self, value): - legacy_warning() - self.scan3 = value + def n_average_dim(self): + ''' + Returns number of scans that are neither size-1 nor average scans + ''' + self._n_average_dim = len(self.average_dims) + return self._n_average_dim + # Properties based on the presence of a continuous scan @property - def loops(self): + def has_continuous_scan(self): ''' - Returns array of all scans + Returns a boolean of whether or not an continuous scan is present. ''' - legacy_warning() - return [self.scan0, self.scan1, self.scan2, self.scan3] - - -def drop(array, index): - ''' - Drops an object at `index` in `array` + num_av_scans = 0 + for scan in self.scans: + if isinstance(scan, ContinuousScan): + num_av_scans += 1 - Parameters - ---------- - array : list or numpy.array - Array for object to be dropped - index : int - Index of object to be dropped - - Returns - list - The array minus the dropped value - ''' + if num_av_scans > 0: + self._has_continuous_scan = True + else: + self._has_continuous_scan = False - return list(array[0:index]) + list(array[index + 1:]) + return self._has_continuous_scan + @property + def continuous_index(self): + ''' + Returns the index of the scan to be averaged. Used by `pyscan.AverageExperiment`. + ''' + if self.has_continuous_scan: + i = 0 + for i in range(self.ndim): + if isinstance(self.scans[i], ContinuousScan): + return i + self._continuous_index = i + else: + self._continuous_index = -1 + return self._continuous_index -def legacy_warning(): - warning_msg = ("Use of legacy nomenclature detected but no longer supported.\n" - + "You entered 'loop', use 'scan' instead.") - raise DeprecationWarning(f"\033[93m*** WARNING! ***: {warning_msg} \033[0m") - assert False, f"\033[93m*** WARNING! ***: {warning_msg} \033[0m" + @property + def iterators(self): + return [scan.iterator() for scan in self.scans] diff --git a/pyscan/measurement/scans.py b/pyscan/measurement/scans.py index 0973b5ba..c5bb5ec7 100644 --- a/pyscan/measurement/scans.py +++ b/pyscan/measurement/scans.py @@ -1,12 +1,14 @@ -# -*- coding: utf-8 -*- + import numpy as np +from time import sleep +from itemattribute import ItemAttribute from ..general.same_length import same_length -from ..general.item_attribute import ItemAttribute +from ..general.infinite_iterator import infinite_iterator class AbstractScan(ItemAttribute): ''' - Meta class for different scan types. Inherits from `.ItemAttribute`. + Abstract class for different scan types. Inherits from `.ItemAttribute`. ''' def iterate(self, index, devices): @@ -21,13 +23,6 @@ def check_same_length(self): ''' pass - # This must be a method and not an attribute as iterators can only be used once - def iterator(self): - ''' - Returns an iterator for the scan over its n range. - ''' - return iter(range(self.n)) - class PropertyScan(AbstractScan): ''' @@ -52,17 +47,17 @@ def __init__(self, input_dict, prop, dt=0): ''' self.prop = prop self.scan_dict = {} - self.input_dict = input_dict for device, array in input_dict.items(): self.scan_dict['{}_{}'.format(device, prop)] = array self.device_names = list(input_dict.keys()) self.dt = dt - self.check_same_length() self.i = 0 - def iterate(self, index, devices): + self.check_same_length() + + def iterate(self, expt, i, d): ''' Changes `prop` of the listed `devices` to the value of `PropertyScan`'s input_dict at the given `index`. @@ -70,11 +65,16 @@ def iterate(self, index, devices): :param devices: ItemAttribute instance of experimental devices :type devices: ItemAttribute ''' + + self.i = i + + if d == 0: + return 0 + for dev in self.device_names: - try: - devices[dev][self.prop] = self.scan_dict[dev + '_' + self.prop][index] - except Exception: - continue + expt.devices[dev][self.prop] = self.scan_dict[dev + '_' + self.prop][i] + + sleep(self.dt) def check_same_length(self): ''' @@ -85,10 +85,16 @@ def check_same_length(self): if same_length(list(self.scan_dict.values())): self.n = len(list(self.scan_dict.values())[0]) # self.n is the length of the input_dict arrays. else: - assert 0, 'Values are not of the same length' + raise Exception('PropertyScan Values are not of the same length') else: self.n = 1 # n=1 is required to allow the run() function to proceed atleast once. + def iterator(self): + ''' + The following iterates over n + ''' + return range(self.n) + class FunctionScan(AbstractScan): ''' @@ -111,14 +117,14 @@ def __init__(self, function, values, dt=0): self.scan_dict = {} - self.scan_dict[function.__name__] = values + self.scan_dict[function.__name__] = np.array(values) self.function = function self.dt = dt self.i = 0 self.n = len(values) - def iterate(self, index, devices): + def iterate(self, expt, i, d): ''' Executes function(self.values[index]). Used by a Experiment class's run() function. @@ -130,11 +136,24 @@ def iterate(self, index, devices): devices: Not used ''' - self.function(self.scan_dict[self.function.__name__][index]) + + self.i = i + + if d == 0: + return 0 + + self.function(self.scan_dict[self.function.__name__][i]) + sleep(self.dt) def check_same_length(self): pass + def iterator(self): + ''' + The following iterates over n + ''' + return range(self.n) + class RepeatScan(AbstractScan): '''Class for repeating inner loops. @@ -154,7 +173,7 @@ def __init__(self, nrepeat, dt=0): assert nrepeat > 0, "nrepeat must be > 0" assert nrepeat != np.inf, "nrepeat is np.inf, make a continuous scan instead." self.scan_dict = {} - self.scan_dict['repeat'] = list(range(nrepeat)) + self.scan_dict['repeat'] = np.array(range(nrepeat)) self.device_names = ['repeat'] self.dt = dt @@ -163,14 +182,17 @@ def __init__(self, nrepeat, dt=0): self.i = 0 - def iterate(self, index, devices): + def iterate(self, expt, i, d): ''' Iterates repeat loop. ''' - # Need a method here to iterate infinitely/continuously. + self.i = i - pass + if d == 0: + return 0 + + sleep(self.dt) def check_same_length(self): ''' @@ -178,6 +200,12 @@ def check_same_length(self): ''' return 1 + def iterator(self): + ''' + The following iterates over n + ''' + return range(self.n) + class ContinuousScan(AbstractScan): ''' @@ -192,44 +220,42 @@ class ContinuousScan(AbstractScan): Maximum number of iterations to run. If not specified, the scan will run indefinitely. ''' - def __init__(self, dt=0, n_max=None): + def __init__(self, n_max=None, dt=0): self.scan_dict = {} - self.scan_dict['continuous'] = [] + self.scan_dict['iteration'] = np.ndarray((0)) - self.device_names = ['continuous'] + self.device_names = ['iteration'] self.dt = dt - self.run_count = 0 - # essentially run_count - self.n = 1 - # current experiment number index self.i = 0 - if n_max is not None: - self.n_max = n_max + self.n = 1 - def iterate(self, index, devices): - self.run_count += 1 + assert n_max is None or n_max > 0, "n_max must be > 0 or None" - if hasattr(self, "stop_at"): - if not self.n_max <= self.i: - self.scan_dict['continuous'].append(self.i) - else: - self.scan_dict['continuous'].append(self.i) + self.n_max = n_max + + def iterate(self, expt, i, d): + + self.i = i + self.n = i + 1 + + if d == 0: + return 0 + + self.scan_dict['iteration'] = np.append(self.scan_dict['iteration'], i) + expt.iteration = self.scan_dict['iteration'] + + sleep(self.dt) def iterator(self): ''' - The following iterator increments continuous scan i and n by one each time continuously. + The following iterates over n_max if n_max is specified, otherwise it iterates indefinitely. ''' - def incrementing_n(): - while True: - yield self.i - self.i += 1 - self.n += 1 - iterator = iter(incrementing_n()) - - # returns an infinite iterator, overwriting Abstract scans default iterator - return iterator + if self.n_max is not None: + return range(self.n_max) + else: + return infinite_iterator() class AverageScan(AbstractScan): @@ -251,19 +277,31 @@ def __init__(self, n_average, dt=0): self.scan_dict = {} self.n = n_average - self.scan_dict['average'] = list(self.iterator()) + self.scan_dict['average'] = list(range(n_average)) self.device_names = ['average'] self.i = 0 self.dt = dt - def iterate(self, index, devices): + def iterate(self, expt, i, d): ''' Place holder, does nothing ''' - pass + + self.i = i + + if d == 0: + return 0 + + sleep(self.dt) def check_same_length(self): ''' Not used ''' return 1 + + def iterator(self): + ''' + The following iterates over n + ''' + return range(self.n) diff --git a/pyscan/measurement/sparse_experiment.py b/pyscan/measurement/sparse_experiment.py deleted file mode 100644 index fdc30098..00000000 --- a/pyscan/measurement/sparse_experiment.py +++ /dev/null @@ -1,117 +0,0 @@ -# -*- coding: utf-8 -*- -from time import sleep -from pyscan.measurement.abstract_experiment import AbstractExperiment -from ..general.is_list_type import is_list_type -import numpy as np - - -class SparseExperiment(AbstractExperiment): - '''Experiment class that takes data after each scan0 iteration if - runinfo.sparse_points[self.runinfo.indicies] = 1, allowing the experiment - to skip taking data points. Inherits from :class:`pyscan.measurement.abstract_experiment.AbstractExperiment`. - - Parameters - ---------- - runinfo: :class:`pyscan.measurement.runinfo.Runinfo` - Runinfo instance. The Runinfo scan containing the dependent variable - that you want to average should be an instance of - :class:`AverageScan`. - There should be only one dependent variable to be averaged. - The scans representing independent variables can be instances of - :class:`PropertyScan`. - devices : - ItemAttribute instance containing all experiment devices - data_dir : str, optional - The path to save the data, defaults to './backup' - verbose: bool, optional - Indicates whether to print status updates, defaults to `False` - - ''' - - def __init__(self, runinfo, devices, data_dir=None, verbose=False): - '''Constructor method - ''' - super().__init__(runinfo, devices, data_dir) - - def run(self): - '''Runs the experiment while locking the console - ''' - self.check_runinfo() - self.setup_instruments() - # save instrument settings - self.save_metadata() - - sleep(self.runinfo.initial_pause) - - self.get_time() - - self.runinfo.running = True - - # Use for scan, but break if self.runinfo.running=False - for m in range(self.runinfo.scan3.n): - self.runinfo.scan3.i = m - self.runinfo.scan3.iterate(m, self.devices) - sleep(self.runinfo.scan3.dt) - - for k in range(self.runinfo.scan2.n): - self.runinfo.scan2.i = k - self.runinfo.scan2.iterate(k, self.devices) - sleep(self.runinfo.scan2.dt) - - for j in range(self.runinfo.scan1.n): - self.runinfo.scan1.i = j - self.runinfo.scan1.iterate(j, self.devices) - sleep(self.runinfo.scan1.dt) - - for i in range(self.runinfo.scan0.n): - self.runinfo.scan0.i = i - sample = self.runinfo.sparse_points[self.runinfo.indicies] - - if (sample) or (np.all(np.array(self.runinfo.indicies) == 0)): - self.runinfo.scan0.iterate(i, self.devices) - sleep(self.runinfo.scan0.dt) - - data = self.runinfo.measure_function(self) - if np.all(np.array(self.runinfo.indicies) == 0): - for key in data.keys(): - self.runinfo.measured.append(key) - self.preallocate(data) - - if sample: - for key, value in data.items(): - if is_list_type(self[key]): - self[key][self.runinfo.indicies] = value - else: - self[key] = value - - self.save_point(data) - - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - # Check if complete, stopped early - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - if self.runinfo.verbose: - print('Scan {}/{} Complete'.format(m + 1, self.runinfo.scan3.n)) - if self.runinfo.running is False: - self.runinfo.complete = 'stopped' - break - - self.runinfo.complete = True - self.runinfo.running = False - - if 'end_function' in list(self.runinfo.keys()): - self.runinfo.end_function(self) - - -# legacy naming convention -class SparseSweep(SparseExperiment): - pass diff --git a/pyscan/plotting/basic_plots.py b/pyscan/plotting/basic_plots.py index 216d894a..1eba65c0 100644 --- a/pyscan/plotting/basic_plots.py +++ b/pyscan/plotting/basic_plots.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import matplotlib.pyplot as plt import numpy as np from .plot_generator import PlotGenerator @@ -27,8 +27,8 @@ def plot1D(expt, **kwarg): pg = PlotGenerator(expt, d=1, **kwarg) # this ensures that continuous expts are plotted correctly when an n_max parameter is implemented. - if expt.runinfo.continuous is True and expt.runinfo.running is False: - n_max = expt.runinfo.scans[expt.runinfo.continuous_scan_index].n_max + if expt.runinfo._has_continuous_scan is True and expt.runinfo.running is False: + n_max = expt.runinfo.scans[expt.runinfo.continuous_index].n_max if len(pg.x) - 1 == n_max: # give a time buffer to make sure the most recent state of the expt is registered. # otherwise the plot will break as it finishes. May need to extend for low performing processors. @@ -67,7 +67,7 @@ def plot2D(expt, **kwarg): # this ensures that continuous expts are plotted correctly when an n_max parameter is implemented. if expt.runinfo.continuous is True and expt.runinfo.running is False: - n_max = expt.runinfo.scans[expt.runinfo.continuous_scan_index].n_max + n_max = expt.runinfo.scans[expt.runinfo.continuous_index].n_max if len(pg.y) - 1 == n_max: # give a time buffer to make sure the most recent state of the expt is registered. # otherwise the plot will break as it finishes. May need to extend for low performing processors. diff --git a/pyscan/plotting/jupyter_tools.py b/pyscan/plotting/jupyter_tools.py index cbb05186..b9dbc87c 100644 --- a/pyscan/plotting/jupyter_tools.py +++ b/pyscan/plotting/jupyter_tools.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import os import glob from pathlib import Path diff --git a/pyscan/plotting/live_plot.py b/pyscan/plotting/live_plot.py index 80fb1a13..fe8e9002 100644 --- a/pyscan/plotting/live_plot.py +++ b/pyscan/plotting/live_plot.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import matplotlib.pyplot as plt from IPython import display from time import sleep diff --git a/pyscan/plotting/oscilloscope.py b/pyscan/plotting/oscilloscope.py index 78f83f86..1e906299 100644 --- a/pyscan/plotting/oscilloscope.py +++ b/pyscan/plotting/oscilloscope.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import matplotlib.pyplot as plt import numpy as np from time import sleep, time diff --git a/pyscan/plotting/plot_generator.py b/pyscan/plotting/plot_generator.py index eabfe5df..9e75fb85 100644 --- a/pyscan/plotting/plot_generator.py +++ b/pyscan/plotting/plot_generator.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + from ..general.set_difference import set_difference from ..general.first_string import first_string import numpy as np @@ -152,27 +152,27 @@ def get_title(self): ''' if not self.expt.runinfo.running: - return '{}, {}'.format(self.data_name, self.expt.runinfo.long_name) + return '{}, {}'.format(self.data_name, self.expt.runinfo.file_name) elif self.expt.runinfo.ndim == 4: - return '{}/{}, {}, {}'.format(self.expt.runinfo.scan4.i, - self.expt.runinfo.scan4.n, - self.data_name, - self.expt.runinfo.long_name) - elif self.expt.runinfo.ndim == 3: return '{}/{}, {}, {}'.format(self.expt.runinfo.scan3.i, self.expt.runinfo.scan3.n, self.data_name, - self.expt.runinfo.long_name) - elif self.expt.runinfo.ndim == 2: + self.expt.runinfo.file_name) + elif self.expt.runinfo.ndim == 3: return '{}/{}, {}, {}'.format(self.expt.runinfo.scan2.i, self.expt.runinfo.scan2.n, self.data_name, - self.expt.runinfo.long_name) - elif self.expt.runinfo.ndim == 1: + self.expt.runinfo.file_name) + elif self.expt.runinfo.ndim == 2: return '{}/{}, {}, {}'.format(self.expt.runinfo.scan1.i, self.expt.runinfo.scan1.n, self.data_name, - self.expt.runinfo.long_name) + self.expt.runinfo.file_name) + elif self.expt.runinfo.ndim == 1: + return '{}/{}, {}, {}'.format(self.expt.runinfo.scan0.i, + self.expt.runinfo.scan0.n, + self.data_name, + self.expt.runinfo.file_name) def get_xrange(self): ''' diff --git a/test/general/test_d_range.py b/test/general/test_d_range.py deleted file mode 100644 index e4943120..00000000 --- a/test/general/test_d_range.py +++ /dev/null @@ -1,34 +0,0 @@ -import pyscan as ps -import numpy as np - - -def test_drange(): - - # If the start and end value are the same, give a single valued array - values = ps.drange(1, 0.01, 1) - assert values == [1], "drange(1, 0.01, 1) gave wrong output" - - values = ps.drange(1, 0.01, 1.005) - assert values == [1, 1.005], "drange(1, 0.01, 1.005) gave wrong output" - - values = ps.drange(1, 0.01, 1.2) - works = np.all(np.round(np.arange(1, 1.21, 0.01), 5) - == np.round(values, 5)) - assert works, "ps.drange(1, 0.01, 1.2) gave wrong output" - - values = ps.drange(1, 0.015, 1.2) - test_values = np.append(np.round(np.arange(1, 1.195, 0.015), 5), 1.2) - assert np.all(values == test_values), "ps.drange(1, 0.015, 1.2) gave wrong output" - - # Negative direcions - values = ps.drange(1.005, 0.01, 1) - assert values == [1.005, 1], "ps.drange(1.005, 0.01, 1) failed" - - values = ps.drange(1.2, 0.01, 1) - works = np.all(np.round(np.arange(1.20, 0.99, -0.01), 5) - == np.round(values, 5)) - assert works, "ps.drange(1.2, 0.01, 1) gave wrong output" - - values = np.round(ps.drange(1.2, 0.015, 1), 5) - test_values = np.round(np.append(np.arange(1.2, 1.005, -0.015), 1), 5) - assert np.all(values == test_values), "ps.drange(1.2, 0.015, 1) gave the wrong output" diff --git a/test/general/test_first_string.py b/test/general/test_first_string.py deleted file mode 100644 index 1610dde8..00000000 --- a/test/general/test_first_string.py +++ /dev/null @@ -1,17 +0,0 @@ -import pyscan as ps -import pytest - - -def test_first_string(): - - str1 = 'a' - str2 = 'b' - str_array = [str1, str2] - - assert ps.first_string(str1) == 'a' - assert ps.first_string(str_array) == 'a' - assert ps.first_string(str_array[::-1]) == 'b' - - with pytest.raises(TypeError): - ps.first_string(0) - ps.first_string([0, 1]) diff --git a/test/general/test_is_list_type.py b/test/general/test_is_list_type.py deleted file mode 100644 index 5831dc5b..00000000 --- a/test/general/test_is_list_type.py +++ /dev/null @@ -1,19 +0,0 @@ -import pyscan as ps -import numpy as np - - -def test_is_list_type(): - - list1 = (1, 2, 3) - list2 = [1, 2, 3] - list3 = np.array([1, 2, 3]) - - notlist1 = 'string' - notlist2 = 2 - - assert ps.is_list_type(list1) - assert ps.is_list_type(list2) - assert ps.is_list_type(list3) - - assert not ps.is_list_type(notlist1) - assert not ps.is_list_type(notlist2) diff --git a/test/general/test_is_numerical_type.py b/test/general/test_is_numerical_type.py deleted file mode 100644 index a3ddbcce..00000000 --- a/test/general/test_is_numerical_type.py +++ /dev/null @@ -1,21 +0,0 @@ -import pyscan as ps -import numpy as np - - -def test_is_numeric_type(): - - num1 = 2 - num2 = 2.1 - - notnum1 = 'string' - notnum2 = (1, 2, 3) - notnum3 = [1, 2, 3] - notnum4 = np.array([1, 2, 3]) - - assert ps.is_numeric_type(num1) - assert ps.is_numeric_type(num2) - - assert not ps.is_numeric_type(notnum1) - assert not ps.is_numeric_type(notnum2) - assert not ps.is_numeric_type(notnum3) - assert not ps.is_numeric_type(notnum4) diff --git a/test/general/test_item_attribute.py b/test/general/test_item_attribute.py deleted file mode 100644 index ec386177..00000000 --- a/test/general/test_item_attribute.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyscan as ps - - -def test_ItemAttribute(): - - ia = ps.ItemAttribute() - - ia.test_prop1 = 3 - ia.test_prop2 = 'str' - - assert ia.test_prop1 == 3 - assert ia['test_prop1'] == 3 - assert ia.test_prop2 == 'str' - assert ia['test_prop2'] == 'str' - - assert list(ia.keys()) == (['test_prop1', 'test_prop2']) - assert list(ia.values()) == [3, 'str'] - assert list(ia.items()) == [('test_prop1', 3), ('test_prop2', 'str')] - - assert hasattr(ia, 'test_prop1') - assert hasattr(ia, 'test_prop2') - - del ia.test_prop1 - assert not hasattr(ia, 'test_prop1') diff --git a/test/legacy/test_legacy.py b/test/legacy/test_legacy.py deleted file mode 100644 index d138b545..00000000 --- a/test/legacy/test_legacy.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyscan as ps -import pytest - - -def test_legacy(): - ''' - Ensure that legacy naming convention fails. - ''' - devices = ps.ItemAttribute() - runinfo = ps.RunInfo() - with pytest.raises(Exception): - runinfo.loop0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - with pytest.raises(Exception): - runinfo.loop1 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - with pytest.raises(Exception): - runinfo.loop2 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - with pytest.raises(Exception): - runinfo.loop3 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - with pytest.raises(Exception): - sweep = ps.Sweep(runinfo, devices) - assert isinstance(sweep, ps.Experiment) - with pytest.raises(Exception): - metasweep = ps.MetaSweep(runinfo, devices, None) - assert isinstance(metasweep, ps.AbstractExperiment) diff --git a/test/measurement/test_abstract_experiment.py b/test/measurement/test_abstract_experiment.py deleted file mode 100644 index 7452daf4..00000000 --- a/test/measurement/test_abstract_experiment.py +++ /dev/null @@ -1,283 +0,0 @@ -''' -Pytest functions to test the meta sweep class -''' - - -import pyscan as ps -from pyscan.measurement.abstract_experiment import AbstractExperiment -from pathlib import Path -import random -import numpy as np -import pytest -from io import StringIO -import sys -import shutil -import re -import os - - -# for testing default trigger function with empty function -def empty_function(): - pass - - -# for setting runinfo measure_function to measure 1D data randomly -def measure_point(expt): - d = ps.ItemAttribute() - - d.x = random.random() - - return d - - -# for setting runinfo measure_function to measure (up to) 3D data randomly -def measure_up_to_3D(expt): - d = ps.ItemAttribute() - - d.x1 = random.random() # could make predictable to ensure data is saved properly - d.x2 = [random.random() for i in range(2)] - d.x3 = [[random.random() for i in range(2)] for j in range(2)] - - return d - - -def test_abstract_experiment(): - """ - Testing abstract experiment - - Returns - -------- - None - """ - - def test_ms_diff_inputs(data_dir=None, measure_function=measure_point, allocate='preallocate'): - devices = ps.ItemAttribute() - devices.v1 = ps.TestVoltage() - devices.v2 = ps.TestVoltage() - devices.v3 = ps.TestVoltage() - - runinfo = ps.RunInfo() - - # consider adding and testing for 4 scans since runinfo has 4 by default. Should 3 be allowed by Meta Sweep? - runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - runinfo.scan1 = ps.PropertyScan({'v2': ps.drange(0.1, 0.1, 0.5)}, 'voltage') - runinfo.scan2 = ps.PropertyScan({'v3': ps.drange(0.5, 0.1, 0.8)}, 'voltage') - - # dictionary of scans to later verify they were saved according to scans created - scans = {'scan0': 'v1', 'scan1': 'v2', 'scan2': 'v3'} - - runinfo.measure_function = measure_function - - ms = AbstractExperiment(runinfo, devices, data_dir) - - # testing meta sweep's init - assert hasattr(ms, 'runinfo'), "Meta Sweep runinfo not set up" - assert ms.runinfo == runinfo, "Meta Sweep runinfo not set up properly" - - assert hasattr(ms, 'devices'), "Meta Sweep devices not set up" - assert ms.devices == devices, "Meta Sweep devices not set up properly" - - assert hasattr(ms.runinfo, 'data_path'), "Meta Sweep data path not set up" - - # testing meta sweep's setup data dir method - assert callable(ms.setup_data_dir) - ms.setup_data_dir(data_dir) - if data_dir is None: - assert ms.runinfo.data_path == Path('./backup'), "Meta Sweep data path not set up properly" - else: - assert ms.runinfo.data_path == Path(Path(data_dir)), "Meta Sweep data path not set up properly" - assert ms.runinfo.data_path.is_dir() - - # testing meta sweep's check runinfo method - assert callable(ms.check_runinfo) - ms.check_runinfo() - assert ms.check_runinfo() == 1 - - assert hasattr(ms.runinfo, 'long_name'), "Meta Sweep runinfo long name is not initialized by check_runinfo()" - assert isinstance(ms.runinfo.long_name, str), "Meta Sweep runinfo long name is not initialized as a string" - # check that the long name is formatted with values for YYYYMMDDTHHMMSS, and optionally a - followed by digits. - assert re.match(r'^\d{8}T\d{6}(-\d+)?$', ms.runinfo.long_name), "runinfo long_name is not properly formatted" - - assert hasattr(ms.runinfo, 'short_name'), "Meta Sweep runinfo long name is not initialized by check_runinfo()" - assert isinstance(ms.runinfo.short_name, str), "Meta Sweep runinfo short name is not initialized as a string" - assert ms.runinfo.short_name == ms.runinfo.long_name[8:], "Meta Sweep short name is not the correct value" - - # setting file name for loading later - if data_dir is None: - file_name = './backup/' + ms.runinfo.long_name - else: - file_name = data_dir + '/' + ms.runinfo.long_name - - # ############### testing meta sweeps preallocate method here? Or will we be changing to dynamic allocation? - data = ms.runinfo.measure_function(ms) - if np.all(np.array(ms.runinfo.indicies) == 0): - if allocate == 'preallocate': - ms.preallocate(data) - elif allocate == 'preallocate_line': - ms.preallocate_line(data) - else: - assert False, "allocate input variable for test not acceptable" - - # testing meta sweep's check runinfo method with bad scan inputs - bad_runinfo = ps.RunInfo() - bad_runinfo.scan0 = ps.PropertyScan({'v8': ps.drange(0, 0.1, 0.1)}, 'voltage') - bad_ms = AbstractExperiment(bad_runinfo, devices, data_dir) - - with pytest.raises(Exception): - bad_ms.check_runinfo(), "Metasweep's check runinfo did not ensure validation of devices and properties" - - # testing meta sweep's check runinfo method with more than 1 repeat scan - bad_runinfo2 = ps.RunInfo() - bad_runinfo2.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - bad_runinfo2.scan1 = ps.RepeatScan(3) - bad_runinfo2.scan2 = ps.RepeatScan(3) - bad_ms2 = AbstractExperiment(bad_runinfo, devices, data_dir) - - with pytest.raises(Exception): - bad_ms2.check_runinfo(), "Metasweep's check runinfo did not flag runinfo with more than one repeat scan" - - # testing meta sweep's get time method *placeholder* - assert callable(ms.get_time) - - # ############# The following saves don't seem to be saving any data to the file, not sure why... - # testing meta sweep's save point method - assert callable(ms.save_point) - ms.save_point(data) - - # testing meta sweep's save row method - assert callable(ms.save_row) - ms.save_row() - - # testing meta sweep's save meta data method - assert callable(ms.save_metadata) - ms.save_metadata() - - # now loading the experiment to check the information was saved properly - temp = ps.load_experiment(file_name) - os.remove(file_name + '.hdf5') - # print("temp dict is: ", temp.__dict__.keys()) - - # test that preallocate and saves functioned as expected based on loaded experiment - if allocate == 'preallocate': - if list(data.__dict__.keys()) == ['x']: - assert temp.x.shape == (2, 5, 5) - assert data.x in temp.x - elif list(data.__dict__.keys()) == ['x1', 'x2', 'x3']: - assert temp.x1.shape == (2, 5, 5) - assert data.x1 in temp.x1 - - assert temp.x2.shape == (2, 5, 5, 2) - assert data.x2 in temp.x2 - - assert temp.x3.shape == (2, 5, 5, 2, 2) - assert data.x3 in temp.x3 - else: - if list(data.__dict__.keys()) == ['x']: - assert temp.x.shape == (2, 5, 5) - assert data.x in temp.x - elif list(data.__dict__.keys()) == ['x1', 'x2', 'x3']: - assert temp.x1.shape == (2, 5, 5) - assert data.x1 in temp.x1 - assert temp.x2.shape == (2, 5, 5) - assert data.x2 in temp.x2 - assert temp.x3.shape == (2, 5, 5) - assert data.x3 in temp.x3 - - assert len(temp.__dict__.keys()) == 5 + len(ms.runinfo.measured) - - # check that the meta data was saved and loaded with expected attributes - assert hasattr(temp, 'runinfo'), "runinfo was not saved/could not be loaded from meta data to temp" - assert hasattr(temp, 'devices'), "devices was not saved/could not be loaded from meta data to temp" - assert hasattr(temp, 'v1_voltage'), "v1_voltage was not saved/could not be loaded from meta data to temp" - assert hasattr(temp, 'v2_voltage'), "v2_voltage was not saved/could not be loaded from meta data to temp" - assert hasattr(temp, 'v3_voltage'), "v3_voltage was not saved/could not be loaded from meta data to temp" - if list(data.__dict__.keys()) == ['x']: - assert hasattr(temp, 'x'), "x was not saved/could not be loaded from meta data to temp" - elif list(data.__dict__.keys()) == ['x1', 'x2', 'x3']: - assert hasattr(temp, 'x1'), "x1 was not saved/could not be loaded from meta data to temp" - assert hasattr(temp, 'x2'), "x2 was not saved/could not be loaded from meta data to temp" - assert hasattr(temp, 'x3'), "x3 was not saved/could not be loaded from meta data to temp" - - # could maybe add to and clarify this - assert hasattr(temp.runinfo, 'measured'), "save meta data didn't save runinfo.measured meta data" - assert isinstance(temp.runinfo.measured, list), "save meta data didn't save runinfo.measured as a list" - # order of list should not matter - assert set(temp.runinfo.measured) == set(data.__dict__.keys()), "save meta data failed to save runinfo.measured" - - # check that scans meta data was saved/loaded correctly - assert hasattr(temp.runinfo, 'scan0'), "save meta data didn't save scan0, or it couldn't be loaded" - assert hasattr(temp.runinfo, 'scan1'), "save meta data didn't save scan1, or it couldn't be loaded" - assert hasattr(temp.runinfo, 'scan2'), "save meta data didn't save scan2, or it couldn't be loaded" - for scan in scans: - assert temp.runinfo[scan].prop == 'voltage', "save meta data didn't save " + scan + ".prop correctly" - assert hasattr(temp.runinfo[scan], 'scan_dict'), "save meta data didn't save " + scan + "scan_dict" - assert hasattr(temp.runinfo[scan], 'input_dict'), "save meta data didn't save " + scan + "input_dict" - assert temp.runinfo[scan].device_names == [scans[scan]], "save meta data didn't save " + scan + "devicename" - assert temp.runinfo[scan].dt == 0, "save meta data didn't save " + scan + ".dt correctly" - assert temp.runinfo[scan].i == 0, "save meta data didn't save " + scan + ".i correctly" - - assert temp.runinfo.scan0.n == 2, "save meta data didn't save scan0.n, or it couldn't be loaded" - assert temp.runinfo.scan1.n == 5, "save meta data didn't save scan1.n, or it couldn't be loaded" - assert temp.runinfo.scan2.n == 5, "save meta data didn't save scan2.n, or it couldn't be loaded" - - # check that devices were saved and loaded properly - assert len(temp.devices.__dict__.keys()) == 3, "save meta data didn't save the right number of runinfo.devices" - assert list(temp.devices.__dict__.keys()) == ['v1', 'v2', 'v3'], "save meta data issue saving runinfo.devices" - - # testing meta sweep's start thread method - assert callable(ms.start_thread), "meta sweep's start thread method not callable" - assert not hasattr(ms.runinfo, 'running'), "meta sweep runinfo has running attribute before expected" - ms.start_thread() - - # try to affirm thread is running/ran here... threading only showed 1 thread running before and after - assert hasattr(ms.runinfo, 'running'), "meta sweep runinfo does not have running attribute after start thread" - assert ms.runinfo.running is True, "meta sweep's start thread method did not set runinfo running to true" - - # testing meta sweep's stop method - assert callable(ms.stop), "meta sweep's stop method not callable" - assert not hasattr(ms.runinfo, 'complete'), "meta sweep runinfo has complete attribute before expected" - buffer = StringIO() - sys.stdout = buffer - ms.stop() - assert hasattr(ms.runinfo, 'complete'), "meta sweep runinfo does not have complete attribute after stop()" - assert ms.runinfo.running is False, "meta sweep's start thread method did not set runinfo running to false" - assert ms.runinfo.complete == 'stopped', "meta sweep's stop method did not set runinfo complete to stopped" - print_output = buffer.getvalue() - sys.stdout = sys.__stdout__ - assert print_output.strip() == 'Stopping Experiment', "meta sweep's stop method does not print confirmation" - - # test meta sweep's run method *placeholder* - assert callable(ms.run), "meta sweep's run method not callable" - - # test meta sweep's setup runinfo method *placeholder* - assert callable(ms.setup_runinfo), "meta sweep's setup runinfo method not callable" - - # test meta sweep's setup instruments method *placeholder* - assert callable(ms.setup_instruments), "meta sweep's setup instruments method not callable" - - # test meta sweep's default trigger method - assert callable(ms.default_trigger_function), "meta sweep's default trigger method not callable" - with pytest.raises(Exception): - ms.default_trigger_function() - - ms.devices.trigger = ps.ItemAttribute() - ms.devices.trigger.trigger = empty_function - ms.default_trigger_function() - - if data_dir is None: - shutil.rmtree('./backup') - else: - shutil.rmtree(data_dir) - - test_ms_diff_inputs() - test_ms_diff_inputs(data_dir='./backeep') - with pytest.raises(Exception): - # experiments that use preallocate_line such as fast galvo and fast stage behave differenty - # in a way where without refactoring this will not/should not pass. - test_ms_diff_inputs(data_dir='./backup', allocate='preallocate_line') - test_ms_diff_inputs(data_dir=None, measure_function=measure_up_to_3D) - test_ms_diff_inputs(data_dir='./backup', measure_function=measure_up_to_3D) - with pytest.raises(Exception): - # This should not work with preallocate_line as is, - # because it doesn't factor data dimension into it's preallocation - test_ms_diff_inputs(data_dir='./backup', measure_function=measure_up_to_3D, allocate='preallocate_line') diff --git a/test/measurement/test_average_experiment.py b/test/measurement/test_average_experiment.py deleted file mode 100644 index 86a9ba88..00000000 --- a/test/measurement/test_average_experiment.py +++ /dev/null @@ -1,281 +0,0 @@ -''' -Pytest functions to test the AverageExperiment experiment class -''' - - -import pyscan as ps -import shutil -from pathlib import Path -from random import random -import numpy as np -import pytest -# import imp -# imp.reload(ps) - - -##################### FUNCTIONS USED BY TEST CASES ##################### - - -# for setting runinfo measure_function to measure 1D data -def measure_point(expt): - d = ps.ItemAttribute() - - d.x = random() - - return d - - -# for setting up prestring based on loaded to differentiate loaded experiment error strings -def loaded_modifier(loaded): - if (loaded is True): - return 'loaded ' - else: - return '' - - -# for checking that the experiment has data measurement attribute -def check_has_single_data(expt, loaded=False): - is_loaded = loaded_modifier(loaded) - assert hasattr(expt, 'x'), is_loaded + "experiment missing x attribute after running" - - -# for setting runinfo measure_function to measure (up to) 3D data -def measure_up_to_3D(expt): - d = ps.ItemAttribute() - - d.x1 = random() # could make predictable to ensure data is saved properly - d.x2 = [random() for i in range(2)] - d.x3 = [[random() for i in range(2)] for j in range(2)] - - return d - - -# for checking that the experiment has multidata measurement attributes -def check_has_multi_data(expt, loaded=False): - is_loaded = loaded_modifier(loaded) - pre_string = is_loaded + "experiment missing x" - post_string = " attribute after running" - assert hasattr(expt, 'x1'), pre_string + "1" + post_string - assert hasattr(expt, 'x2'), pre_string + "2" + post_string - assert hasattr(expt, 'x3'), pre_string + "3" + post_string - - -# for setting up the experiments -def set_up_experiment(num_devices, measure_function, data_dir, verbose, n_average, bad): - # set up core attributes - devices = ps.ItemAttribute() - runinfo = ps.RunInfo() - runinfo.measure_function = measure_function - - # set up based on num devices - if bad is False: - if (num_devices < 0): - assert False, "Num devices shouldn't be negative" - if (num_devices == 0): - runinfo.scan0 = ps.AverageScan(n_average, dt=0.001) - elif (num_devices == 1): - devices.v1 = ps.TestVoltage() - runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - runinfo.scan1 = ps.AverageScan(n_average, dt=0) - elif (num_devices == 2): - devices.v1 = ps.TestVoltage() - devices.v2 = ps.TestVoltage() - runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - runinfo.scan1 = ps.PropertyScan({'v2': ps.drange(0.1, 0.1, 0)}, 'voltage') - runinfo.scan2 = ps.AverageScan(n_average + 1, dt=0) - elif (num_devices == 3): - devices.v1 = ps.TestVoltage() - devices.v2 = ps.TestVoltage() - devices.v3 = ps.TestVoltage() - runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - runinfo.scan1 = ps.PropertyScan({'v2': ps.drange(0.1, 0.1, 0)}, 'voltage') - runinfo.scan2 = ps.PropertyScan({'v3': ps.drange(0.3, 0.1, 0.2)}, 'voltage') - runinfo.scan3 = ps.AverageScan(n_average + 2, dt=0.01) - if (num_devices > 4): - assert False, "num_devices > 4 not implemented in testing" - # if bad runinfo it will have no average scan and thus should fail - else: - devices.v1 = ps.TestVoltage() - runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - - # instantiate expt based on additional parameters - if data_dir is None: - if verbose is False: - expt = ps.Experiment(runinfo, devices) - elif verbose is True: - expt = ps.Experiment(runinfo, devices, verbose=verbose) - else: - assert False, "Invalid verbose entry. Must be boolean." - elif isinstance(data_dir, str): - if verbose is False: - expt = ps.Experiment(runinfo, devices, data_dir) - elif verbose is True: - expt = ps.Experiment(runinfo, devices, data_dir, verbose) - else: - assert False, "Invalid verbose entry. Must be boolean." - else: - assert False, "Invalid data_dir entry. Must be a string" - - return expt - - -# for checking that the meta path is initialized properly -def check_meta_path(expt): - expt.save_metadata() - meta_path = expt.runinfo.data_path / '{}.hdf5'.format(expt.runinfo.long_name) - assert meta_path.exists(), "meta_path not initialized" - assert meta_path.is_file(), "meta_path is not a file" - - -# for checking that the voltage(s) as expected -def check_voltage_results(voltage, expected_value1, expected_value2, voltage_id=1, loaded=False, string_modifier=''): - is_loaded = loaded_modifier(loaded) - - pre_string = is_loaded + "experiment v" + str(voltage_id) + string_modifier + "_voltage " - assert (isinstance(voltage, np.ndarray) or isinstance(voltage, list)), pre_string + "is not a numpy array or a list" - for i in voltage: - try: - assert i.dtype == 'float64' - except (Exception): - assert isinstance(i, float), pre_string + "data is not a float" - assert len(voltage) == 2, pre_string + "array does not have 2 elements" - assert voltage[0] == expected_value1, pre_string + "value[0] is not " + str(expected_value1) - assert voltage[1] == expected_value2, pre_string + "value[1] is not " + str(expected_value2) - - -# for checking that the data results are as expected -def check_data_results(x, id='', dtype=np.ndarray, shape=[2], loaded=False, num_devices=0): - is_loaded = loaded_modifier(loaded) - pre_string = is_loaded + str(num_devices) + " devices experiment x" + str(id) + " measurement " - - if (dtype == float or shape == [1]): - assert isinstance(x, dtype), pre_string + "is not a " + str(dtype) - else: - assert isinstance(x, dtype), pre_string + "is not a numpy array" - assert x.dtype == 'float64', pre_string + "data is not a float" - assert list(x.shape) == shape, pre_string + "array does not have " + str(shape) + " elements" - - if (shape == [2, 2] or shape == [2, 2, 2] or shape == [2, 2, 2, 2] or shape == [2, 2, 2, 2, 2]): - for i in x: - assert isinstance(i, np.ndarray), pre_string + "is not a numpy array of numpy arrays" - - -# for checking that the multi data results are as expected -def check_multi_data_results(expt, num_devices, shape1=[2], shape2=[2, 2], shape3=[2, 2, 2], shape4=[2, 2, 2, 2]): - if num_devices == 0: - check_data_results(expt.x1, id=1, dtype=float, shape=shape1) - check_data_results(expt.x2, id=2, shape=shape1) - for i in expt.x3: - assert isinstance(i, np.ndarray), "experiment x3 measurement is not a numpy array of numpy arrays" - check_data_results(expt.x3, id=3, shape=shape2) - if num_devices == 1: - check_data_results(expt.x1, id=1, shape=shape1, num_devices=1) - check_data_results(expt.x2, id=2, shape=shape2, num_devices=1) - for i in expt.x3: - assert isinstance(i, np.ndarray), "experiment x3 measurement is not a numpy array of numpy arrays" - check_data_results(expt.x3, id=3, shape=shape3, num_devices=1) - if num_devices == 2: - check_data_results(expt.x1, id=1, shape=shape2, num_devices=2) - check_data_results(expt.x2, id=2, shape=shape3, num_devices=2) - for i in expt.x3: - assert isinstance(i, np.ndarray), "experiment x3 measurement is not a numpy array of numpy arrays" - check_data_results(expt.x3, id=3, shape=shape4, num_devices=2) - - -##################### TEST CASES BEGIN HERE ##################### - -def test_average_experiment(): - """ - Testing AverageExperiment - - Returns - -------- - None - """ - - def test_variations(num_devices=0, measure_function=measure_point, data_dir=None, verbose=False, n_average=2, - bad=False): - expt = set_up_experiment(num_devices, measure_function, data_dir, verbose, n_average, bad) - - # check the experiment core attributes are initialized correctly - assert hasattr(expt, 'runinfo'), "expt does not have runinfo attribute" - assert hasattr(expt, 'devices'), "expt does not have devices attribute" - assert expt.runinfo.data_path.exists(), "experiment data path does not exist" - assert expt.runinfo.data_path.is_dir(), "experiment data path is not a directory" - if data_dir is None: - assert str(expt.runinfo.data_path) == 'backup', "experiment data path does not equal 'backup'" - else: - assert str(expt.runinfo.data_path) == str(Path(data_dir)), "data path not setup to equal input data dir" - - # check the experiment runinfo - expt.check_runinfo() - - # if no average scan was input, make sure runinfo_averaged is set correctly - if bad is True: - assert expt.runinfo.average_d == -1, "average_d not -1 even without average scan" - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - # check that the experiment has the data measurement attribute(s) - if measure_function == measure_point: - check_has_single_data(expt) - elif measure_function == measure_up_to_3D: - check_has_multi_data(expt) - - # check voltage is as expected - if num_devices >= 1: - check_voltage_results(expt.v1_voltage, expected_value1=0, expected_value2=0.1) - if num_devices >= 2: - check_voltage_results(expt.v2_voltage, expected_value1=0.1, expected_value2=0, voltage_id=2) - if num_devices >= 3: - check_voltage_results(expt.v3_voltage, expected_value1=0.3, expected_value2=0.2, voltage_id=3) - - # ######### check that average scan is as expected ###### may add more test cases here? - - # check the data results are as expected - if measure_function == measure_point: - if num_devices == 1: - check_data_results(expt.x) - if num_devices == 2: - check_data_results(expt.x, shape=[2, 2]) - elif measure_function == measure_up_to_3D: - check_multi_data_results(expt, num_devices) - - # saves file name of the saved experiment data and deletes the experiment - file_name = expt.runinfo.long_name - del expt - - # basic check to load the experiment we just ran - if data_dir is None: - ps.load_experiment('./backup/{}'.format(file_name)) - else: - ps.load_experiment('./' + str(Path(data_dir)) + '/{}'.format(file_name)) - - # close and delete directories created from running this test - if data_dir is None: - shutil.rmtree('./backup') - else: - shutil.rmtree(data_dir) - - test_variations() - test_variations(num_devices=1) - test_variations(num_devices=2) - test_variations(num_devices=3) - test_variations(measure_function=measure_up_to_3D) - test_variations(num_devices=1, measure_function=measure_up_to_3D) - test_variations(num_devices=2, measure_function=measure_up_to_3D) - test_variations(num_devices=3, measure_function=measure_up_to_3D) - test_variations(data_dir='./bakeep') - test_variations(verbose=True) - test_variations(n_average=1) - test_variations(n_average=10) - test_variations(bad=True) - - with pytest.raises(Exception): - test_variations(n_average=-1), "AverageExperiment's n_average must be 1 or more" - with pytest.raises(Exception): - test_variations(n_average=0), "AverageExperiment's n_average must be 1 or more" diff --git a/test/measurement/test_experiment.py b/test/measurement/test_experiment.py deleted file mode 100644 index 3704e6df..00000000 --- a/test/measurement/test_experiment.py +++ /dev/null @@ -1,1156 +0,0 @@ -''' -Pytest functions to test the Experiment experiment class and the load experiment function from loadexperiment.py -''' - - -import pyscan as ps -from random import random -import shutil -import numpy as np -import pytest -# import time -import re -import os - - -##################### FUNCTIONS USED BY TEST CASES ##################### - - -# for setting runinfo measure_function to measure 1D data -def measure_point(expt): - d = ps.ItemAttribute() - - d.x = random() - - return d - - -# for setting runinfo measure_function to measure (up to) 3D data -def measure_up_to_3D(expt): - d = ps.ItemAttribute() - - d.x1 = random() # could make predictable to ensure data is saved properly - d.x2 = [random() for i in range(2)] - d.x3 = [[random() for i in range(2)] for j in range(2)] - - return d - - -# for setting up prestring based on loaded to differentiate loaded experiment error strings -def loaded_modifier(loaded): - if (loaded is True): - return 'loaded ' - else: - return '' - - -# for setting up the experiments -def set_up_experiment(num_devices, measure_function, repeat=False, repeat_num=1): - devices = ps.ItemAttribute() - devices.v1 = ps.TestVoltage() - - runinfo = ps.RunInfo() - - if (repeat is True): - runinfo.scan0 = ps.RepeatScan(repeat_num) - else: - runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - - if (num_devices > 1): - devices.v2 = ps.TestVoltage() - runinfo.scan1 = ps.PropertyScan({'v2': ps.drange(0.1, 0.1, 0)}, 'voltage') - - if (num_devices > 2): - devices.v3 = ps.TestVoltage() - runinfo.scan2 = ps.PropertyScan({'v3': ps.drange(0.3, 0.1, 0.2)}, 'voltage') - - if (num_devices > 3): - devices.v4 = ps.TestVoltage() - runinfo.scan3 = ps.PropertyScan({'v4': ps.drange(-0.1, 0.1, 0)}, 'voltage') - - if (num_devices > 4): - assert False, "num_devices > 4 not implemented in testing" - - runinfo.measure_function = measure_function - - expt = ps.Experiment(runinfo, devices) - return expt - - -# for checking keys, runinfo, and devices attributes -def check_has_attributes(expt, intended_keys_length, additional=None, loaded=False): - is_loaded = loaded_modifier(loaded) - assert hasattr(expt, 'keys'), is_loaded + "experiment missing attribute 'keys'" - ks = str(len(expt.keys())) - iks = str(intended_keys_length) - error_string = is_loaded + "experiment has " + ks + " keys instead of " + iks + " keys" - assert len(expt.keys()) == intended_keys_length, error_string - - assert hasattr(expt, 'runinfo'), is_loaded + "experiment missing runinfo attribute" - assert hasattr(expt.runinfo, 'time'), is_loaded + "experiment missing runinfo time attribute" - assert hasattr(expt, 'devices'), is_loaded + "experiment missing devices attribute" - - if (additional is not None): - assert hasattr(expt, additional), is_loaded + "experiment missing " + additional + " attribute" - - -# for checking the experiment (expt) upon initialization -def check_expt_init(expt): - check_has_attributes(expt, intended_keys_length=2) - - assert expt.runinfo.data_path.exists(), "experiment data path does not exist" - assert expt.runinfo.data_path.is_dir(), "experiment data path is not a directory" - assert str(expt.runinfo.data_path) == 'backup', "experiment data path does not equal 'backup'" - - assert len(expt.runinfo.measured) == 0 - assert expt.runinfo.measured == [] - - -# for checking whether the check experimental run info succeeded -def check_expt_runinfo(expt): - assert expt.check_runinfo(), "check_runinfo() failed" - - assert hasattr(expt.runinfo, 'long_name'), "experiment runinfo long name not initialized by check_runinfo" - assert hasattr(expt.runinfo, 'short_name'), "experiment runinfo long name not initialized by check_runinfo" - - -# for checking that the meta path is initialized properly -def check_meta_path(expt): - expt.save_metadata() - meta_path = expt.runinfo.data_path / '{}.hdf5'.format(expt.runinfo.long_name) - assert meta_path.exists(), "meta_path not initialized" - assert meta_path.is_file(), "meta_path is not a file" - - -# for checking that the experiment has data measurement attribute -def check_has_data(expt, loaded=False): - is_loaded = loaded_modifier(loaded) - assert hasattr(expt, 'x'), is_loaded + "experiment missing x attribute after running" - - -# for checking that the data results are as expected -def check_data_results(x, id=None, dtype=np.ndarray, shape=[2], loaded=False): - is_loaded = loaded_modifier(loaded) - pre_string = is_loaded + "experiment x" + str(id) + " measurement " - - if (dtype == float or shape == [1]): - assert isinstance(x, dtype), pre_string + "is not a float" - else: - assert isinstance(x, dtype), pre_string + "is not a numpy array" - assert x.dtype == 'float64', pre_string + "data is not a float" - assert list(x.shape) == shape, pre_string + "array does not have " + str(shape) + " elements" - - if (shape == [2, 2] or shape == [2, 2, 2] or shape == [2, 2, 2, 2] or shape == [2, 2, 2, 2, 2]): - for i in x: - assert isinstance(i, np.ndarray), pre_string + "is not a numpy array of numpy arrays" - - -# for checking that the experiment has multidata measurement attributes -def check_has_multi_data(expt, loaded=False): - is_loaded = loaded_modifier(loaded) - pre_string = is_loaded + "experiment missing x" - post_string = " attribute after running" - assert hasattr(expt, 'x1'), pre_string + "1" + post_string - assert hasattr(expt, 'x2'), pre_string + "2" + post_string - assert hasattr(expt, 'x3'), pre_string + "3" + post_string - - -# for checking that the multi data results are as expected -def check_multi_data_results(expt, shape1=[2], shape2=[2], shape3=[2]): - assert isinstance(expt.x1, float) - check_data_results(expt.x2, id=2, shape=shape2) - for i in expt.x3: - assert isinstance(i, np.ndarray), "experiment x3 measurement is not a numpy array of numpy arrays" - check_data_results(expt.x3, id=2, shape=shape3) - - -# for checking the experiment has voltages -def check_has_voltages(expt, num_voltages, loaded=False): - is_loaded = loaded_modifier(loaded) - - assert hasattr(expt, 'v1_voltage'), is_loaded + "experiment missing v1_voltage attribute after running" - if (num_voltages > 1): - assert hasattr(expt, 'v2_voltage'), is_loaded + "experiment missing v2_voltage attribute after running" - else: - assert not hasattr(expt, 'v2_voltage'), is_loaded + "experiment missing v2_voltage attribute after running" - if (num_voltages > 2): - assert hasattr(expt, 'v3_voltage'), is_loaded + "experiment missing v3_voltage attribute after running" - else: - assert not hasattr(expt, 'v3_voltage'), is_loaded + "experiment missing v3_voltage attribute after running" - if (num_voltages > 3): - assert hasattr(expt, 'v4_voltage'), is_loaded + "experiment missing v4_voltage attribute after running" - else: - assert not hasattr(expt, 'v4_voltage'), is_loaded + "experiment missing v4_voltage attribute after running" - if (num_voltages > 4): - assert hasattr(expt, 'v5_voltage'), is_loaded + "experiment missing v5_voltage attribute after running" - else: - assert not hasattr(expt, 'v5_voltage'), is_loaded + "experiment missing v5_voltage attribute after running" - - -# for checking that the voltage(s) as expected -def check_voltage_results(voltage, expected_value1, expected_value2, voltage_id=1, loaded=False, string_modifier=''): - is_loaded = loaded_modifier(loaded) - - pre_string = is_loaded + "experiment v" + str(voltage_id) + string_modifier + "_voltage " - assert (isinstance(voltage, np.ndarray) or isinstance(voltage, list)), pre_string + "is not a numpy array" - for i in voltage: - try: - assert i.dtype == 'float64', pre_string + "data is not a float" - except Exception: - assert isinstance(i, float), pre_string + "data is not a float" - assert len(voltage) == 2, pre_string + "array does not have 2 elements" - assert voltage[0] == expected_value1, pre_string + "value[0] is not " + str(expected_value1) - assert voltage[1] == expected_value2, pre_string + "value[1] is not " + str(expected_value2) - - -# for checking that the load experiment function is working as expected -def check_loaded_expt_further(expt): - # for checking variable devices formatting - def check_loaded_dev_attributes(dev, name, attributes): - for a in attributes: - assert hasattr(dev, a), "loaded experiment device " + name + " does not have " + a + " attribute" - assert isinstance(dev, ps.ItemAttribute), "loaded device " + name + " is not an instance of item attribute" - - # confirm these are instances of item attribute - assert isinstance(expt, ps.ItemAttribute), "loaded expt is not loaded as an instance of item attribute" - assert isinstance(expt.runinfo, ps.ItemAttribute), "expt runinfo is not loaded as an instance of item attribute" - assert isinstance(expt.devices, ps.ItemAttribute), "expt devices is not loaded as an instance of item attribute" - assert isinstance(expt.runinfo.static, ps.ItemAttribute), "runinfo static is not loaded as an item attribute" - - # check that the devices are as expected - v_attributes = ['debug', '_voltage'] - for device_name in expt.devices.__dict__.keys(): - device = expt.devices[device_name] - check_loaded_dev_attributes(device, device, v_attributes) - - assert isinstance(device.debug, bool), "devices " + device_name + " debug is not loaded as a boolean" - assert isinstance(device._voltage, float) or isinstance( - device._voltage, int), device_name + " voltage type error" - - # check the runinfo scans - assert hasattr(expt.runinfo, 'scan0'), "loaded expt does not have scan0" - assert hasattr(expt.runinfo, 'scan1'), "loaded expt does not have scan1" - assert hasattr(expt.runinfo, 'scan2'), "loaded expt does not have scan2" - assert hasattr(expt.runinfo, 'scan3'), "loaded expt does not have scan3" - - for key in expt.runinfo.__dict__.keys(): - if key.startswith('scan'): - scan = key - assert isinstance(expt.runinfo[scan], ps.ItemAttribute), "loaded runinfo " + scan + " not item attribute" - - # check other attributes for proper type when loaded - assert isinstance(expt.runinfo.measured, list), "runinfo measured is not loaded as a list" - assert isinstance(expt.runinfo.measure_function, str), "runinfo measure function is not loaded as a string" - assert expt.runinfo.trigger_function is None, "runinfo trigger function is not loaded as None" - assert isinstance(expt.runinfo.initial_pause, float), "runinfo initial pause is not loaded as a float" - assert isinstance(expt.runinfo.average_d, int), "runinfo average_d is not loaded as a float" - assert isinstance(expt.runinfo.verbose, bool), "runinfo verbose is not loaded as a boolean" - assert isinstance(expt.runinfo.time, bool), "runinfo time is not loaded as a boolean" - assert isinstance(expt.runinfo.long_name, str), "runinfo long_name is not loaded as a string" - assert isinstance(expt.runinfo.short_name, str), "runinfo short_name is not loaded as a string" - # ### sometimes loaded expt doesn't have runinfo.running... why? Is this supposed to be allowed? ### - if hasattr(expt.runinfo, 'running'): - assert isinstance(expt.runinfo.running, bool), "runinfo running is not loaded as a boolean" - # runinfo.complete does not seem to be saved... do we want it to be - # to know if the expt crashed before it could finish? - - -####################### TEST CASES BEGIN HERE ####################### - - -def test_0D_multi_data(): - """ - Testing 1D scan, measuring 1D, 2D, and 3D data and loaded file - - Returns - -------- - None - """ - - # set up experiment - expt = set_up_experiment(num_devices=1, measure_function=measure_up_to_3D, repeat=True, repeat_num=1) - - # check the experiment was initialized correctly - check_expt_init(expt) - - expt.check_runinfo() - - # check the experiment run info was initialized successfully - check_expt_runinfo(expt) - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - # for checking the experiments attributes and output after running - def check_expt_attributes(expt, loaded=False): - # check the experiment keys, runinfo, and devices attributes - check_has_attributes(expt, intended_keys_length=6, additional='repeat', loaded=loaded) - - # check the experiment has multidata measurement attributes - check_has_multi_data(expt, loaded=loaded) - - check_expt_attributes(expt) - - # for checking the experiments results formatting after running - def check_expt_results(expt, loaded=False): - assert len(expt.repeat) == 1, "experiment repeat length is not 1" - assert expt.repeat == [0], "expt.repeat is not [0]" - assert expt.repeat[0] == 0.0, "experiment repeat[0] is not 0.0" - - # check the data results are as expected - check_data_results(expt.x1, id=1, dtype=float, shape=[1], loaded=loaded) - - check_data_results(expt.x2, id=2, shape=[2], loaded=loaded) - - check_data_results(expt.x3, id=3, shape=[2, 2], loaded=loaded) - - check_expt_results(expt) - - # saves file name of the saved experiment data and deletes the experiment - file_name = expt.runinfo.long_name - del expt - - # test that load experiment rejects other file types - with pytest.raises(Exception): - temp = ps.load_experiment('./test/measurement/test_scans.py') - - # load the experiment we just ran - temp = ps.load_experiment('./backup/{}'.format(file_name)) - - # check that we load what we expect - def check_load_expt(temp): - # check the loaded experiment has the right attributes - check_expt_attributes(temp, loaded=True) - - # check the loaded data results are as expected - assert len(temp.repeat) == 1, "experiment repeat length is not 1" - assert temp.repeat == [0], "expt.repeat is not [0]" - assert temp.repeat[0] == 0.0, "experiment repeat[0] is not 0.0" - - check_data_results(temp.x1, id=1, shape=[1], loaded=True) - - check_data_results(temp.x2, id=2, shape=[2], loaded=True) - - check_data_results(temp.x3, id=3, shape=[2, 2], loaded=True) - - check_loaded_expt_further(temp) - - check_load_expt(temp) - - shutil.rmtree('./backup') - - -def test_1D_data(): - """ - Testing 1D scan, measuring 1D, 2D, and 3D data and loaded file - - Returns - -------- - None - """ - - # set up experiment - expt = set_up_experiment(num_devices=1, measure_function=measure_point) - - # check the experiment was initialized correctly - check_expt_init(expt) - - expt.check_runinfo() - - # check the experiment run info was initialized successfully - check_expt_runinfo(expt) - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - # for checking the experiments attributes and output after running - def check_expt_attributes(expt, loaded=False): - # check the experiment has intended keys, runinfo, and devices attributes - check_has_attributes(expt, intended_keys_length=4, loaded=loaded) - - # check the experiment has the right number of voltages - check_has_voltages(expt, num_voltages=1, loaded=loaded) - - # check the experiment has data measurement attribute - check_has_data(expt) - - check_expt_attributes(expt) - - # for checking the experiments results formatting after running - def check_expt_results(expt, loaded=False): - # check voltage(s) are as expected - check_voltage_results(expt.v1_voltage, expected_value1=0, expected_value2=0.1, loaded=loaded) - - # check the data results are as expected - check_data_results(expt.x, loaded=loaded) - - check_expt_results(expt) - - # saves file name of the saved experiment data and deletes the experiment - file_name = expt.runinfo.long_name - del expt - - # load the experiment we just ran - temp = ps.load_experiment('./backup/{}'.format(file_name)) - - # check that we load what we expect - def check_load_expt(temp): - # check the loaded experiment has the right attributes - check_expt_attributes(temp, loaded=True) - - # check the loaded experiment results are accurate - check_expt_results(temp, loaded=True) - - check_loaded_expt_further(temp) - - check_load_expt(temp) - - shutil.rmtree('./backup') - - -def test_1D_multi_data(): - """ - Testing 1D scan, measuring 1D, 2D, and 3D data and loaded file - - Returns - -------- - None - """ - - # set up experiment - expt = set_up_experiment(num_devices=1, measure_function=measure_up_to_3D) - - # check the experiment was initialized correctly - check_expt_init(expt) - - # check the experiment run info was initialized successfully - expt.check_runinfo() - - check_expt_runinfo(expt) - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - # for checking the experiments attributes and output after running - def check_expt_attributes(expt, loaded=False): - # check the experiment keys, runinfo, and devices attributes - check_has_attributes(expt, 6, loaded=loaded) - - # check the experiment has multidata measurement attributes - check_has_multi_data(expt, loaded=loaded) - - # check the experiment has the right number of voltages - check_has_voltages(expt, num_voltages=1, loaded=loaded) - - check_expt_attributes(expt) - - # for checking the experiments results formatting after running - def check_expt_results(expt, loaded=False): - # check voltage(s) are as expected - check_voltage_results(expt.v1_voltage, expected_value1=0, expected_value2=0.1, loaded=loaded) - - # check the data results are as expected - check_data_results(expt.x1, id=1, loaded=loaded) - - check_data_results(expt.x2, id=2, shape=[2, 2], loaded=loaded) - - check_data_results(expt.x3, id=3, shape=[2, 2, 2], loaded=loaded) - - check_expt_results(expt) - - # saves file name of the saved experiment data and deletes the experiment - file_name = expt.runinfo.long_name - del expt - - # load the experiment we just ran - temp = ps.load_experiment('./backup/{}'.format(file_name)) - - # check that we load what we expect - def check_load_expt(temp): - # check the loaded experiment has the right attributes - check_expt_attributes(temp, loaded=True) - - # check the loaded experiment results are accurate - check_expt_results(temp, loaded=True) - - check_loaded_expt_further(temp) - - check_load_expt(temp) - - shutil.rmtree('./backup') - - -def test_2D_data(): - """ - Testing 2D scan, measurement and loaded file - - Returns - -------- - None - """ - - # set up experiment - expt = set_up_experiment(num_devices=2, measure_function=measure_point) - - # check the experiment was initialized correctly - check_expt_init(expt) - - expt.check_runinfo() - - # check the experiment run info was initialized successfully - check_expt_runinfo(expt) - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - # for checking the experiments attributes and output after running - def check_expt_attributes(expt, loaded=False): - # check the experiment has intended keys, runinfo, and devices attributes - check_has_attributes(expt, intended_keys_length=5, loaded=loaded) - - # check the experiment has the right number of voltages - check_has_voltages(expt, num_voltages=2, loaded=loaded) - - # check the experiment has data measurement attribute - check_has_data(expt) - - check_expt_attributes(expt) - - # for checking the experiments results formatting after running - def check_expt_results(expt, loaded=False): - # check voltage(s) are as expected - check_voltage_results(expt.v1_voltage, expected_value1=0, expected_value2=0.1, voltage_id=1, loaded=loaded) - - check_voltage_results(expt.v2_voltage, expected_value1=0.1, expected_value2=0, voltage_id=2, loaded=loaded) - - # check the data results are as expected - check_data_results(expt.x, shape=[2, 2], loaded=loaded) - - check_expt_results(expt) - - # saves file name of the saved experiment data and deletes the experiment - file_name = expt.runinfo.long_name - del expt - - # load the experiment we just ran - temp = ps.load_experiment('./backup/{}'.format(file_name)) - - # check that we load what we expect - def check_load_expt(temp): - # check the loaded experiment has the right attributes - check_expt_attributes(temp, loaded=True) - - # check the loaded experiment results are accurate - check_expt_results(temp, loaded=True) - - check_loaded_expt_further(temp) - - check_load_expt(temp) - - shutil.rmtree('./backup') - - -def test_2D_multi_data(): - """ - Testing 2D scan, measurement and loaded file - - Returns - -------- - None - """ - - # set up experiment - expt = set_up_experiment(num_devices=2, measure_function=measure_up_to_3D) - - # check the experiment was initialized correctly - check_expt_init(expt) - - expt.check_runinfo() - - # check the experiment run info was initialized successfully - check_expt_runinfo(expt) - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - # for checking the experiments attributes and output after running - def check_expt_attributes(expt, loaded=False): - # check the experiment keys, runinfo, and devices attributes - check_has_attributes(expt, intended_keys_length=7, loaded=loaded) - - # check the experiment has multidata measurement attributes - check_has_multi_data(expt, loaded=loaded) - - # check the experiment has the right number of voltages - check_has_voltages(expt, num_voltages=2, loaded=loaded) - - check_expt_attributes(expt) - - # for checking the experiments results formatting after running - def check_expt_results(expt, loaded=False): - # check voltage(s) are as expected - check_voltage_results(expt.v1_voltage, expected_value1=0, expected_value2=0.1, voltage_id=1, loaded=loaded) - - check_voltage_results(expt.v2_voltage, expected_value1=0.1, expected_value2=0, voltage_id=2, loaded=loaded) - - # check the data results are as expected - check_data_results(expt.x1, id=1, shape=[2, 2], loaded=loaded) - - check_data_results(expt.x2, id=2, shape=[2, 2, 2], loaded=loaded) - - check_data_results(expt.x3, id=3, shape=[2, 2, 2, 2], loaded=loaded) - - check_expt_results(expt) - - # saves file name of the saved experiment data and deletes the experiment - file_name = expt.runinfo.long_name - del expt - - # load the experiment we just ran - temp = ps.load_experiment('./backup/{}'.format(file_name)) - - # check that we load what we expect - def check_load_expt(temp): - # check the loaded experiment has the right attributes - check_expt_attributes(temp, loaded=True) - - # check the loaded experiment results are accurate - check_expt_results(temp, loaded=True) - - check_loaded_expt_further(temp) - - check_load_expt(temp) - - shutil.rmtree('./backup') - - -def test_3D_data(): - """ - Testing 3D scan, measurement and loaded file - - Returns - -------- - None - """ - - # set up experiment - expt = set_up_experiment(num_devices=3, measure_function=measure_point) - - # check the experiment was initialized correctly - check_expt_init(expt) - - expt.check_runinfo() - - # check the experiment run info was initialized successfully - check_expt_runinfo(expt) - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - # for checking the experiments attributes and output after running - def check_expt_attributes(expt, loaded=False): - # check the experiment has intended keys, runinfo, and devices attributes - check_has_attributes(expt, intended_keys_length=6, loaded=loaded) - - # check the experiment has the right number of voltages - check_has_voltages(expt, num_voltages=3, loaded=loaded) - - # check the experiment has data measurement attribute - check_has_data(expt) - - check_expt_attributes(expt) - - # for checking the experiments results formatting after running - def check_expt_results(expt, loaded=False): - # check voltage(s) are as expected - check_voltage_results(expt.v1_voltage, expected_value1=0, expected_value2=0.1, voltage_id=1, loaded=loaded) - - check_voltage_results(expt.v2_voltage, expected_value1=0.1, expected_value2=0, voltage_id=2, loaded=loaded) - - check_voltage_results(expt.v3_voltage, expected_value1=0.3, expected_value2=0.2, voltage_id=3, loaded=loaded) - - # check the data results are as expected - check_data_results(expt.x, shape=[2, 2, 2], loaded=loaded) - - check_expt_results(expt) - - # saves file name of the saved experiment data and deletes the experiment - file_name = expt.runinfo.long_name - del expt - - # load the experiment we just ran - temp = ps.load_experiment('./backup/{}'.format(file_name)) - - # check that we load what we expect - def check_load_expt(temp): - # check the loaded experiment has the right attributes - check_expt_attributes(temp, loaded=True) - - # check the loaded experiment results are accurate - check_expt_results(temp, loaded=True) - - check_loaded_expt_further(temp) - - check_load_expt(temp) - - shutil.rmtree('./backup') - - -def test_3D_multi_data(): - """ - Testing 3D scan, measurement and loaded file - - Returns - -------- - None - """ - - # set up experiment - expt = set_up_experiment(num_devices=3, measure_function=measure_up_to_3D) - - # check the experiment was initialized correctly - check_expt_init(expt) - - expt.check_runinfo() - - # check the experiment run info was initialized successfully - check_expt_runinfo(expt) - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - # for checking the experiments attributes and output after running - def check_expt_attributes(expt, loaded=False): - # check the experiment keys, runinfo, and devices attributes - check_has_attributes(expt, intended_keys_length=8, loaded=loaded) - - # check the experiment has multidata measurement attributes - check_has_multi_data(expt, loaded=loaded) - - # check the experiment has the right number of voltages - check_has_voltages(expt, num_voltages=3, loaded=loaded) - - check_expt_attributes(expt) - - # for checking the experiments results formatting after running - def check_expt_results(expt, loaded=False): - # check voltage(s) are as expected - check_voltage_results(expt.v1_voltage, expected_value1=0, expected_value2=0.1, voltage_id=1, loaded=loaded) - - check_voltage_results(expt.v2_voltage, expected_value1=0.1, expected_value2=0, voltage_id=2, loaded=loaded) - - check_voltage_results(expt.v3_voltage, expected_value1=0.3, expected_value2=0.2, voltage_id=3, loaded=loaded) - - # check the data results are as expected - check_data_results(expt.x1, id=1, shape=[2, 2, 2], loaded=loaded) - - check_data_results(expt.x2, id=2, shape=[2, 2, 2, 2], loaded=loaded) - - check_data_results(expt.x3, id=3, shape=[2, 2, 2, 2, 2], loaded=loaded) - - check_expt_results(expt) - - # saves file name of the saved experiment data and deletes the experiment - file_name = expt.runinfo.long_name - del expt - - # load the experiment we just ran - temp = ps.load_experiment('./backup/{}'.format(file_name)) - - # check that we load what we expect - def check_load_expt(temp): - # check the loaded experiment has the right attributes - check_expt_attributes(temp, loaded=True) - - # check the loaded experiment results are accurate - check_expt_results(temp, loaded=True) - - check_loaded_expt_further(temp) - - check_load_expt(temp) - - shutil.rmtree('./backup') - - -def test_4D_data(): - """ - Testing 4D scan, measurement and loaded file - - Returns - -------- - None - """ - - # set up experiment - expt = set_up_experiment(num_devices=4, measure_function=measure_point) - - # check the experiment was initialized correctly - check_expt_init(expt) - - expt.check_runinfo() - - # check the experiment run info was initialized successfully - check_expt_runinfo(expt) - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - # for checking the experiments attributes and output after running - def check_expt_attributes(expt, loaded=False): - # check the experiment has intended keys, runinfo, and devices attributes - check_has_attributes(expt, intended_keys_length=7, loaded=loaded) - - # check the experiment has the right number of voltages - check_has_voltages(expt, num_voltages=4, loaded=loaded) - - # check the experiment has data measurement attribute - check_has_data(expt) - - check_expt_attributes(expt) - - # for checking the experiments results formatting after running - def check_expt_results(expt, loaded=False): - # check voltage(s) are as expected - check_voltage_results(expt.v1_voltage, expected_value1=0, expected_value2=0.1, voltage_id=1, loaded=loaded) - - check_voltage_results(expt.v2_voltage, expected_value1=0.1, expected_value2=0, voltage_id=2, loaded=loaded) - - check_voltage_results(expt.v3_voltage, expected_value1=0.3, expected_value2=0.2, voltage_id=3, loaded=loaded) - - check_voltage_results(expt.v4_voltage, expected_value1=-0.1, expected_value2=0, voltage_id=4, loaded=loaded) - - # check the data results are as expected - check_data_results(expt.x, shape=[2, 2, 2, 2], loaded=loaded) - - check_expt_results(expt) - - # saves file name of the saved experiment data and deletes the experiment - file_name = expt.runinfo.long_name - del expt - - # load the experiment we just ran - temp = ps.load_experiment('./backup/{}'.format(file_name)) - - # check that we load what we expect - def check_load_expt(temp): - # check the loaded experiment has the right attributes - check_expt_attributes(temp, loaded=True) - - # check the loaded experiment results are accurate - check_expt_results(temp, loaded=True) - - check_loaded_expt_further(temp) - - check_load_expt(temp) - - shutil.rmtree('./backup') - - -def test_4D_multi_data(): - """ - Testing 4D scan, measurement and loaded file - - Returns - -------- - None - """ - - # set up experiment - expt = set_up_experiment(num_devices=4, measure_function=measure_up_to_3D) - - # check the experiment was initialized correctly - check_expt_init(expt) - - expt.check_runinfo() - - # check the experiment run info was initialized successfully - check_expt_runinfo(expt) - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - # for checking the experiments attributes and output after running - def check_expt_attributes(expt, loaded=False): - # check the experiment keys, runinfo, and devices attributes - check_has_attributes(expt, intended_keys_length=9, loaded=loaded) - - # check the experiment has multidata measurement attributes - check_has_multi_data(expt, loaded=loaded) - - # check the experiment has the right number of voltages - check_has_voltages(expt, num_voltages=4, loaded=loaded) - - check_expt_attributes(expt) - - # for checking the experiments results formatting after running - def check_expt_results(expt, loaded=False): - # check voltage(s) are as expected - check_voltage_results(expt.v1_voltage, expected_value1=0, expected_value2=0.1, voltage_id=1, loaded=loaded) - - check_voltage_results(expt.v2_voltage, expected_value1=0.1, expected_value2=0, voltage_id=2, loaded=loaded) - - check_voltage_results(expt.v3_voltage, expected_value1=0.3, expected_value2=0.2, voltage_id=3, loaded=loaded) - - check_voltage_results(expt.v4_voltage, expected_value1=-0.1, expected_value2=0, voltage_id=4, loaded=loaded) - - # check the data results are as expected - check_data_results(expt.x1, id=1, shape=[2, 2, 2, 2], loaded=loaded) - - check_data_results(expt.x2, id=2, shape=[2, 2, 2, 2, 2], loaded=loaded) - - check_data_results(expt.x3, id=3, shape=[2, 2, 2, 2, 2, 2], loaded=loaded) - - check_expt_results(expt) - - # saves file name of the saved experiment data and deletes the experiment - file_name = expt.runinfo.long_name - del expt - - # load the experiment we just ran - temp = ps.load_experiment('./backup/{}'.format(file_name)) - - # check that we load what we expect - def check_load_expt(temp): - # check the loaded experiment has the right attributes - check_expt_attributes(temp, loaded=True) - - # check the loaded experiment results are accurate - check_expt_results(temp, loaded=True) - - check_loaded_expt_further(temp) - - check_load_expt(temp) - - shutil.rmtree('./backup') - - -def test_1D_repeat(): - """ - Testing 1D repeat scan, measurement and loaded file - - Returns - -------- - None - """ - - # set up experiment - expt = set_up_experiment(num_devices=1, measure_function=measure_point, repeat=True, repeat_num=2) - - # check the experiment was initialized correctly - check_expt_init(expt) - - expt.check_runinfo() - - # check the experiment run info was initialized successfully - check_expt_runinfo(expt) - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - def check_expt_attributes(expt, loaded=False): - # check the experiment keys, runinfo, and devices attributes - check_has_attributes(expt, intended_keys_length=4, additional='repeat', loaded=loaded) - - # check the experiment has multidata measurement attributes - check_has_data(expt, loaded=loaded) - - check_expt_attributes(expt) - - def check_expt_results(expt, loaded=False): - assert len(expt.repeat) == 2, "experiment repeat length is not 2" - assert expt.repeat[0] == 0.0, "experiment repeat[0] is not 0.0" - assert expt.repeat[1] == 1.0, "experiment repeat[1] is not 1.0" - - # check the data results are as expected - check_data_results(expt.x, loaded=loaded) - - check_expt_results(expt) - - # saves file name of the saved experiment data and deletes the experiment - file_name = expt.runinfo.long_name - del expt - - # load the experiment we just ran - temp = ps.load_experiment('./backup/{}'.format(file_name)) - - # check that we load what we expect - def check_load_expt(temp): - # check the loaded experiment has the right attributes - check_expt_attributes(temp, loaded=True) - - # check the loaded experiment results are accurate - check_expt_results(temp, loaded=True) - assert temp.repeat.dtype == 'float64' - - check_loaded_expt_further(temp) - - check_load_expt(temp) - - shutil.rmtree('./backup') - - -def test_underscore_property(): - """ - Testing property scan, measurement and loaded file - - Returns - -------- - None - """ - # set up experiment - devices = ps.ItemAttribute() - devices.v1_device = ps.TestVoltage() - - runinfo = ps.RunInfo() - - runinfo.scan0 = ps.PropertyScan({'v1_device': ps.drange(0, 0.1, 0.1)}, prop='voltage') - - runinfo.measure_function = measure_point - - expt = ps.Experiment(runinfo, devices) - - # check the experiment was initialized correctly - check_expt_init(expt) - - expt.check_runinfo() - - # check the experiment run info was initialized successfully - check_expt_runinfo(expt) - - # check the meta path was set successfully - check_meta_path(expt) - - # run the experiment - expt.run() - - # for checking the experiments attributes and output after running - def check_expt_attributes(expt, loaded=False): - # check the experiment has intended keys, runinfo, and devices attributes - check_has_attributes(expt, intended_keys_length=4, loaded=loaded) - - # check the experiment has the right voltage attribute - assert hasattr(expt, 'v1_device_voltage') - - # check the experiment has data measurement attribute - check_has_data(expt) - - check_expt_attributes(expt) - - # for checking the experiments results formatting after running - def check_expt_results(expt, loaded=False): - # check voltage(s) are as expected - check_voltage_results(expt.v1_device_voltage, expected_value1=0, expected_value2=0.1, - loaded=loaded, string_modifier='_device') - - # check the data results are as expected - check_data_results(expt.x, loaded=loaded) - - check_expt_results(expt) - - file_name = expt.runinfo.long_name - del expt - - # load the experiment we just ran - temp = ps.load_experiment('./backup/{}'.format(file_name)) - - # check that we load what we expect - def check_load_expt(temp): - # check the loaded experiment has the right attributes - check_expt_attributes(temp, loaded=True) - - # check the loaded experiment results are accurate - check_expt_results(temp, loaded=True) - - check_loaded_expt_further(temp) - - check_load_expt(temp) - - shutil.rmtree('./backup') - - -def test_fast_experiments(): - devices = ps.ItemAttribute() - - devices.v1 = ps.TestVoltage() # Device 1 - devices.v2 = ps.TestVoltage() # Device 2 - devices.v3 = ps.TestVoltage() # Device 3 - - def get_voltage_data(expt): - """ - Reads the voltage from v1, v2, and v3 devices. Also adds a calculated value vsum. - """ - - devices = expt.devices - - d = ps.ItemAttribute() - - d.v1_readout = devices.v1.voltage - d.v2_readout = devices.v2.voltage - d.v3_readout = devices.v3.voltage - - d.vsum = d.v1_readout + d.v2_readout + d.v3_readout - - return d - - # Create RunInfo instance and set scan0 to PropertyScan - runinfo = ps.RunInfo() - runinfo.scan0 = ps.RepeatScan(1, dt=0.0000001) - - # Set RunInfo measure_function (remember, it takes a Experiment object as a parameter and - # returns an ItemAttribute containing data). - runinfo.measure_function = get_voltage_data - - # Create a Experiment class with the RunInfo and Devices just created - expt = ps.Experiment(runinfo, devices, time=True) - - long_names = [] - - while len(long_names) < 3: - expt.run() - if len(long_names) == 0: - long_names.append(expt.runinfo.long_name) - elif (expt.runinfo.long_name[:15] == long_names[0][:15]): - long_names.append(expt.runinfo.long_name) - else: - long_names = [expt.runinfo.long_name] - - err_str = f"First long name '{long_names[0]}' does not match expected date/time format." - assert re.match(r'^\d{8}T\d{6}$', long_names[0]), err_str - err_str = f"-1 long name '{long_names[1]}' does not match expected increment or format." - assert long_names[1] == long_names[0] + '-1', err_str - err_str = f"-2 long name '{long_names[1]}' does not match expected increment or format." - assert long_names[2] == long_names[0] + '-2', err_str - - for name in long_names: - save_path = expt.runinfo.data_path / '{}.hdf5'.format(name) - assert os.path.exists(save_path), f"Expected file at path'{save_path}' was not found." diff --git a/test/measurement/test_run_info.py b/test/measurement/test_run_info.py deleted file mode 100644 index 82095f7f..00000000 --- a/test/measurement/test_run_info.py +++ /dev/null @@ -1,107 +0,0 @@ -''' -Pytest functions to test the Runinfo class -''' - -import pyscan as ps -import pytest - - -# ######## need to add tests for runinfo's different @property definitions. -def test_init_from_noparams(): - """ - Testing init from no paramaters in RunInfo - - Returns - ------- - None - - """ - - init_runinfo = ps.RunInfo() - - # for checking that scans have expected attributes - def check_scans_have_attribute(scans, attribute_name): - counter = 0 - for scan in scans: - err_string = "runinfo scan" + str(counter) + " (Property Scan) " + attribute_name + " not intialized" - assert hasattr(scan, attribute_name), err_string - counter += 1 - - # for checking that runinfo attributes are as expected - def check_attribute(runinfo, attribute, attribute_name, expected): - err_string1 = "runinfo " + attribute_name + " not initialized" - assert hasattr(runinfo, attribute_name), err_string1 - err_string2 = "runinfo " + attribute_name + " not " + str(expected) + " when intialized" - assert (attribute is expected or attribute == expected), err_string2 - - # check that runinfo scans are initialized correctly - def check_runinfo_scans(): - # check that scans 0 - 4 initialized - for i in range(4): - assert hasattr(init_runinfo, 'scan' + str(i)), "runinfo scan" + str(i) + " not initialized" - - # check that scans 0 - 4 initialized - for scan in init_runinfo.scans: - assert isinstance(scan, ps.PropertyScan), "runinfo scans not initialized as Property Scan" - - # check that scan attributes are initialized - check_scans_have_attribute(init_runinfo.scans, 'scan_dict') - check_scans_have_attribute(init_runinfo.scans, 'prop') - check_scans_have_attribute(init_runinfo.scans, 'dt') - check_scans_have_attribute(init_runinfo.scans, 'i') - - # check that each scans attributes are initialized correctly - counter = 0 - for scan in init_runinfo.scans: - # check that scan_dict initialized as empty {} - err_string = "runinfo scan" + str(counter) + " (Property Scan) scan_dict not empty when intialized" - assert scan.scan_dict == {}, err_string - - # check that prop initialized as None - err_string = "runinfo scan" + str(counter) + " (Property Scan) prop not None when intialized" - assert scan.prop is None, err_string - - # check that dt initialized as 0 - assert scan.dt == 0, "runinfo scan" + str(counter) + " (Property Scan) dt not 0 when intialized" - - # check that i initialized as 0 - assert scan.i == 0, "runinfo scan" + str(counter) + " (Property Scan) i not 0 when intialized" - - counter += 1 - - check_runinfo_scans() - - # check that runinfo attributes are initialized correctly - def check_runinfo_attributes(): - # check that static initialized correctly - check_attribute(runinfo=init_runinfo, attribute=init_runinfo.static, attribute_name='static', expected={}) - - # check that measured initialized correctly - check_attribute(runinfo=init_runinfo, attribute=init_runinfo.measured, attribute_name='measured', expected=[]) - - # check that measure_function initialized correctly - check_attribute(runinfo=init_runinfo, attribute=init_runinfo.measure_function, - attribute_name='measure_function', expected=None) - - # check that trigger_function initialized correctly - check_attribute(runinfo=init_runinfo, attribute=init_runinfo.trigger_function, - attribute_name='trigger_function', expected=None) - - # check that initial_pause initialized correctly - check_attribute(runinfo=init_runinfo, attribute=init_runinfo.initial_pause, - attribute_name='initial_pause', expected=0.1) - - # check that average_d initialized correctly - check_attribute(runinfo=init_runinfo, attribute=init_runinfo.average_d, attribute_name='average_d', expected=-1) - - # check that verbose initialized correctly - check_attribute(runinfo=init_runinfo, attribute=init_runinfo.verbose, attribute_name='verbose', expected=False) - - check_runinfo_attributes() - - init_runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage') - init_runinfo.scan1 = ps.PropertyScan({'v2': ps.drange(0, 0.1, 0.1)}, 'voltage') - init_runinfo.check() - with pytest.raises(Exception): - init_runinfo.scan3 = ps.PropertyScan({'v3': ps.drange(0, 0.1, 0.1)}, 'voltage') - init_runinfo.check() diff --git a/test/measurement/test_scans.py b/test/measurement/test_scans.py deleted file mode 100644 index 466d1a1f..00000000 --- a/test/measurement/test_scans.py +++ /dev/null @@ -1,480 +0,0 @@ -''' -Pytest functions to test the Scans class -''' - - -import pyscan as ps -import numpy as np -import pytest -from time import sleep - - -# for checking that the scans have the given attributes -def check_has_attributes(scan, scan_name, attribute_names): - for i in attribute_names: - err_string = scan_name + " scan " + i + " not intialized" - assert hasattr(scan, i), err_string - - -# for checking that the scans have the expected attribute values -def check_attribute_value(scan_name, attribute_name, attribute, expected_value): - err_string = scan_name + " scan " + attribute_name + " not " + str(expected_value) + " when intialized" - assert attribute == expected_value, err_string - - -# for checking that the iterate function is working as expected -def check_iterate_function(scan, scan_name, devices=[]): - # check that iterate is callable - assert callable(scan.iterate), scan_name + " scan iterate function not callable" - - # check that iterate functions as expected - try: - scan.iterate(0, devices) # This only tests if it runs, not if the results are desired. - except Exception: - assert False, scan_name + " scan iterate function error" - - -# for setting up devices to test scans iterate functions -def setup_devices(): - devices = ps.ItemAttribute() - devices.v1 = ps.TestVoltage() - devices.v2 = ps.TestVoltage() - devices.v3 = ps.TestVoltage() - devices.v4 = ps.TestVoltage() - return devices - - -# mostly a placeholder for now, meta scan has no init function -def test_meta_scan(): - """ - Testing function scan - - Returns - -------- - None - """ - - # set up basic scan - # scan = ps.MetaScan(0, []) - - # scan_name = 'MetaScan' - - # ensure empty scan is instance of Property Scan - # assert isinstance(scan, ps.MetaScan), scan_name + " scan not initialized as MetaScan" - - assert True - - -def test_property_scan(): - """ - Testing property scan, with both 0D and 1D inputs - - Returns - -------- - None - """ - - # for testing a property scan scan initialized as empty - def test_empty_scan(): - # set up empty scan - scan = ps.PropertyScan({}, prop=None) - - scan_name = 'Property Scan empty' - - # ensure empty scan is instance of Property Scan - assert isinstance(scan, ps.PropertyScan), scan_name + " scan not initialized as Property Scan" - - # check that the empty scan attributes are initialized - attribute_names = ['device_names', 'scan_dict', 'input_dict', 'prop', 'dt', 'i', 'n'] - check_has_attributes(scan, scan_name, attribute_names) - - # check the attributes are initialized correctly - def check_attributes(scan): - # check that device_names initialized with correct value - check_attribute_value(scan_name, 'device names', scan.device_names, expected_value=[]) - - # check that scan_dict has the expected number of keys - check_attribute_value(scan_name, 'scan_dict num keys', len(list(scan.scan_dict.keys())), expected_value=0) - - # check same length function is callable and passes - err_string1 = scan_name + " scan check same length not callable" - assert callable(scan.check_same_length), err_string1 - err_string2 = scan_name + " scan check same length failed" - assert scan.check_same_length, err_string2 - - # check that n has the expected value - check_attribute_value(scan_name, 'n', scan.n, expected_value=1) - - # check that scan_dict initialized correctly - check_attribute_value(scan_name, 'scan_dict', scan.scan_dict, expected_value={}) - - # check that prop initialized with correct value - err_string = "Property Scan empty scan prop not None when intialized" - assert scan.prop is None, err_string - - # check that dt initialized with correct value - check_attribute_value(scan_name, 'dt', scan.dt, expected_value=0) - - # check that i initialized with correct value - check_attribute_value(scan_name, 'i', scan.i, expected_value=0) - - # check that iterate functions as expected - check_iterate_function(scan, scan_name) - - check_attributes(scan) - - # test a property scan scan initialized as empty - test_empty_scan() - - def test_1D_property_scan_4scans(): - # set up 4 scans as "scans" to test - scans = [0, 1, 2, 3] - self = ['v1', 'v2', 'v3', 'v4'] - prop = 'voltage' - scans[0] = ps.PropertyScan({self[0]: ps.drange(0, 0.1, 0.1)}, prop, dt=.01) - scans[1] = ps.PropertyScan({self[1]: ps.drange(0.1, 0.1, 0)}, prop, dt=.01) - scans[2] = ps.PropertyScan({self[2]: ps.drange(0.3, 0.1, 0.2)}, prop, dt=.01) - scans[3] = ps.PropertyScan({self[3]: ps.drange(-0.1, 0.1, 0)}, prop, dt=.01) - - # verifying the check same length function called by property scan will fail with bad runinfo - with pytest.raises(Exception): - bad_runinfo = ps.RunInfo() - bad_runinfo.scan1 = ps.PropertyScan({'v1': ps.drange(5, 5, 5), 'diff': ps.drange(0, 0.1, 0.1)}, 'voltage') - - # setup devices for testing iterate function - devices = setup_devices() - - scan_name = "Property Scan scan" - - # check that scans 0 - 4 initialized as Property Scan - counter = 0 - for scan in scans: - assert isinstance(scan, ps.PropertyScan), "scan" + str(counter) + " not initialized as Property Scan" - counter += 1 - - # for checking that scans as scans have expected attributes - def check_scans_have_attribute(scans, attribute_name): - counter = 0 - for scan in scans: - err_string = "scan" + str(counter) + " (Property Scan) " + attribute_name + " not intialized" - assert hasattr(scan, attribute_name), err_string - counter += 1 - - # check that scan attributes are initialized - check_scans_have_attribute(scans, 'device_names') - check_scans_have_attribute(scans, 'scan_dict') - check_scans_have_attribute(scans, 'input_dict') - check_scans_have_attribute(scans, 'prop') - check_scans_have_attribute(scans, 'dt') - check_scans_have_attribute(scans, 'i') - check_scans_have_attribute(scans, 'n') - - # for checking that scans as scans attribute values are correct - def check_scan_attributes(scans, scan_num, self, prop, - expected_scan_dict1, expected_scan_dict2, expected_dt, expected_i): - # check that device_names initialized with correct value - err_string = "Property Scan scan" + str(scan_num) + " device names not " + self - assert scans[scan_num].device_names == [self] - - # check that scan_dict has the right number of keys - err_string1 = "Property Scan scan" + str(scan_num) + " scan_dict has no keys" - err_string2 = "Property Scan scan" + str(scan_num) + " scan_dict num keys not 1" - assert len(list(scans[scan_num].scan_dict.keys())) > 0, err_string1 - assert len(list(scans[scan_num].scan_dict.keys())) == 1, err_string2 - - # check same length and that n is right value - err_string1 = "Property Scan scan" + str(scan_num) + " check same length failed" - err_string2 = "Property Scan scan" + str(scan_num) + " n not 2" - assert scans[scan_num].check_same_length, err_string1 - assert scans[scan_num].n == 2, err_string2 - - # check that scan_dict initialized with correct values - prestring = "Property Scan scan" + str(scan_num) + " scan_dict " + self + "_" + prop - err_string1 = prestring + "[0] not " + str(expected_scan_dict1) + " when initialized" - err_string2 = prestring + "[1] not " + str(expected_scan_dict1) + " when initialized" - assert scans[scan_num].scan_dict[self + "_" + prop][0] == expected_scan_dict1, err_string1 - assert scans[scan_num].scan_dict[self + "_" + prop][1] == expected_scan_dict2, err_string2 - - # check that prop initialized with correct value - err_string = "Property Scan scan" + str(scan_num) + " prop not " + prop + " when intialized" - assert scans[scan_num].prop == prop, err_string - - # check that dt initialized with correct value - err_string = "Property Scan scan" + str(scan_num) + " dt not " + str(expected_dt) + " when intialized" - assert scans[scan_num].dt == expected_dt, err_string - - # check that i initialized with correct value - err_string = "Property Scan scan" + str(scan_num) + " i not " + str(expected_i) + " when intialized" - assert scans[scan_num].i == expected_i, err_string - - # check that iterate functions as expected (this adds a lot of runtime to the testing) - for scan in scans: - check_iterate_function(scan, scan_name) - - for m in range(scan.n): - scan.iterate(scan.i, devices) - - for dev in scan.device_names: - devices[dev][scan.prop] == scan.scan_dict[dev + '_' + scan.prop][scan.i] - - sleep(scan.dt) # Can we remove this to save time when running test cases, or is it important? - - # check each scan for expected attribute values - check_scan_attributes(scans, 0, self[0], prop, expected_scan_dict1=0.0, - expected_scan_dict2=0.1, expected_dt=.01, expected_i=0) - check_scan_attributes(scans, 1, self[1], prop, expected_scan_dict1=0.1, - expected_scan_dict2=0.0, expected_dt=.01, expected_i=0) - check_scan_attributes(scans, 2, self[2], prop, expected_scan_dict1=0.3, - expected_scan_dict2=0.2, expected_dt=.01, expected_i=0) - check_scan_attributes(scans, 3, self[3], prop, expected_scan_dict1=-0.1, - expected_scan_dict2=0.0, expected_dt=.01, expected_i=0) - - test_1D_property_scan_4scans() - - # ########are we intending to test multi dictionary input items for scans and should I add test cases for them here? - - -def test_function_scan(): - """ - Testing function scan, with both populated and unpopulated inputs - - Returns - -------- - None - """ - - # for testing a function scan scan initialized as empty - def test_empty_scan(): - def input_function(): - pass - - # initialize the empty function scan scan - scan = ps.FunctionScan(input_function, values=[]) - - scan_name = 'Function Scan empty' - - # ensure empty scan is instance of Function Scan - assert isinstance(scan, ps.FunctionScan), "empty scan not initialized as Function Scan" - - # check that the empty scan attributes are initialized - attribute_names = ['scan_dict', 'function', 'dt', 'i', 'n'] - check_has_attributes(scan, scan_name, attribute_names) - - # check that the attributes are initialized correctly - def check_attributes(): - # check that scan_dict has the right number of keys - check_attribute_value(scan_name, 'scan_dict num keys', len(list(scan.scan_dict.keys())), expected_value=1) - - # check that scan_dict initialized correctly - check_attribute_value(scan_name, 'scan_dict', scan.scan_dict, expected_value={'input_function': []}) - - # check that function is callable - err_string = scan_name + " scans function not a callable function" - assert callable(scan.function), err_string - - # check that dt initialized with correct value - check_attribute_value(scan_name, 'dt', scan.dt, expected_value=0) - - # check that i initialized with correct value - check_attribute_value(scan_name, 'i', scan.i, expected_value=0) - - # check that n initialized with correct value - check_attribute_value(scan_name, 'n', scan.n, expected_value=0) - - # check that iterate is callable - assert callable(scan.iterate), scan_name + " scan iterate function not callable" - - # check that running the iterate function on empty function scan with no index fails - with pytest.raises(Exception): - check_iterate_function(scan, scan_name) - - check_attributes() - - # test a function scan scan initialized as empty - test_empty_scan() - - # for testing a function scan scan initialized as populated - def test_scan(return_value=0): - # set up a basic function to pass as an input to the function scan - def input_function(num): - for i in range(num): - pass - return return_value - - # setup devices for testing iterate function - devices = setup_devices() - - # initialize the function scan scan - scan = ps.FunctionScan(input_function, values=[0, 1, 2], dt=.1) - - scan_name = 'Function Scan populated' - - # ensure scan is instance of Function Scan - assert isinstance(scan, ps.FunctionScan), "populated scan not initialized as Function Scan" - - # check that the scan attributes are initialized - attribute_names = ['scan_dict', 'function', 'dt', 'i', 'n'] - check_has_attributes(scan, scan_name, attribute_names) - - # for checking that the attributes are initialized correctly - def check_attributes(): - # check that scan_dict has the right number of keys - check_attribute_value(scan_name, 'scan_dict num keys', len(list(scan.scan_dict.keys())), expected_value=1) - - # check that scan_dict initialized correctly - check_attribute_value(scan_name, 'scan_dict', scan.scan_dict, expected_value={'input_function': [0, 1, 2]}) - - # check that function initialized correctly - err_string = scan_name + " scan function not a callable function" - assert callable(scan.function), err_string - - # check that dt initialized with correct value - check_attribute_value(scan_name, 'dt', scan.dt, expected_value=.1) - - # check that i initialized with correct value - check_attribute_value(scan_name, 'i', scan.i, expected_value=0) - - # check that n initialized with correct value - check_attribute_value(scan_name, 'n', scan.n, expected_value=3) - - # check that iterate functions as expected - check_iterate_function(scan, scan_name, devices=devices) - err_string = scan_name + " iterate function not as expected." - assert scan.function(scan.scan_dict[scan.function.__name__][0]) == return_value, err_string - - # check that the attributes are initialized correctly - check_attributes() - - # test a function scan scan initialized as populated - test_scan() - test_scan(return_value=1) - - -def test_repeat_scan(): - """ - Testing repeat scan, with various numbers of repeats - - Returns - -------- - None - """ - - # for testing repeat scans with different numbers of repeats - def test_num_repeat(num_repeat, dt=0): - # initialize the repeat scan scan - scan = ps.RepeatScan(num_repeat, dt=dt) - - scan_name = 'Repeat Scan with ' + str(num_repeat) + ' num repeats' - - # check that the empty scan attributes are initialized - attribute_names = ['device_names', 'scan_dict', 'dt', 'i', 'n'] - check_has_attributes(scan, scan_name, attribute_names) - - # check the attributes are initialized correctly - def check_attributes(scan, dt=0): - # check that device_names initialized with correct value - check_attribute_value(scan_name, 'device names', scan.device_names, expected_value=['repeat']) - - # check that scan_dict has the right number of keys - check_attribute_value(scan_name, 'scan_dict num keys', len(list(scan.scan_dict.keys())), expected_value=1) - - # check same length placeholder - err_string = scan_name + " check same length failed" - assert scan.check_same_length, err_string - - # check that n initialized correctly depending on if num_repeat is infinity or not - if (num_repeat is not np.inf): - check_attribute_value(scan_name, 'n', scan.n, expected_value=num_repeat) - - else: - check_attribute_value(scan_name, 'n', scan.n, expected_value=0) - - # check that scan_dict initialized correctly depending on if num_repeat is infinity or not - if (num_repeat is not np.inf): - check_attribute_value(scan_name, 'scan_dict', scan.scan_dict, - expected_value={'repeat': list(range(num_repeat))}) - else: - check_attribute_value(scan_name, 'scan_dict', scan.scan_dict, expected_value={'repeat': []}) - - # check that dt initialized with correct value - check_attribute_value(scan_name, 'dt', scan.dt, expected_value=dt) - - # check that i initialized with correct value - check_attribute_value(scan_name, 'i', scan.i, expected_value=0) - - # check that iterate functions as expected - check_iterate_function(scan, scan_name) - - check_attributes(scan, dt) - - with pytest.raises(Exception): - test_num_repeat(-1), "Repeat scan num repeats can be negative when it is not allowed" - with pytest.raises(Exception): - test_num_repeat(0), "Repeat scan num repeats can be 0 when it is not allowed" - test_num_repeat(1) - test_num_repeat(1, dt=.1) - test_num_repeat(1000000) - with pytest.raises(Exception): - test_num_repeat(np.inf), "Repeat scan num repeats can be np.inf when it is not allowed" - - -def test_average_scan(): - """ - Testing average scan, with both populated and unpopulated inputs - - Returns - -------- - None - """ - - # for testing average scans with different number of times to average data from inner scans - def test_num_average(n_average, dt=0): - # initialize the average scan scan - scan = ps.AverageScan(n_average, dt) - - scan_name = 'Average Scan with ' + str(n_average) + ' n_average' - - # check that the empty scan attributes are initialized - attribute_names = ['device_names', 'scan_dict', 'dt', 'i', 'n'] - check_has_attributes(scan, scan_name, attribute_names) - - # check the attributes are initialized correctly - def check_attributes(scan, dt=0): - # check that device_names initialized with correct value - check_attribute_value(scan_name, 'device names', scan.device_names, expected_value=['average']) - - # check that scan_dict has the right number of keys - check_attribute_value(scan_name, 'scan_dict num keys', len(list(scan.scan_dict.keys())), expected_value=1) - - # check same length placeholder - err_string = scan_name + " check same length failed" - assert scan.check_same_length, err_string - - # check that n initialized correctly - check_attribute_value(scan_name, 'n', scan.n, expected_value=n_average) - - # check that scan_dict initialized correctly - check_attribute_value(scan_name, 'scan_dict', scan.scan_dict, - expected_value={'average': list(scan.iterator())}) - - # check that dt initialized with correct value - check_attribute_value(scan_name, 'dt', scan.dt, expected_value=dt) - - # check that i initialized with correct value - check_attribute_value(scan_name, 'i', scan.i, expected_value=0) - - # check that iterate functions as expected - check_iterate_function(scan, scan_name) - - check_attributes(scan, dt) - - with pytest.raises(Exception): - test_num_average(-1), "Average Scan n_average can be negative when it should be 1 or more" - with pytest.raises(Exception): - test_num_average(0), "Average Scan n_average can be 0 when it should be 1 or more" - test_num_average(1) - test_num_average(2) - test_num_average(2, dt=.1) - test_num_average(100) - with pytest.raises(Exception): - test_num_average(np.inf), "Average Scan n_average can be np.inf when it is not allowed" diff --git a/test/drivers/test_driver_import_failures.py b/test/test_drivers/test_driver_import_failures.py similarity index 100% rename from test/drivers/test_driver_import_failures.py rename to test/test_drivers/test_driver_import_failures.py diff --git a/test/drivers/test_drivers_test_unit.py b/test/test_drivers/test_drivers_test_unit.py similarity index 100% rename from test/drivers/test_drivers_test_unit.py rename to test/test_drivers/test_drivers_test_unit.py diff --git a/test/drivers/test_test_instrument_driver.py b/test/test_drivers/test_test_instrument_driver.py similarity index 99% rename from test/drivers/test_test_instrument_driver.py rename to test/test_drivers/test_test_instrument_driver.py index 3bf552a5..ba99e519 100644 --- a/test/drivers/test_test_instrument_driver.py +++ b/test/test_drivers/test_test_instrument_driver.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- + import pytest import math import string diff --git a/test/drivers/test_test_voltage.py b/test/test_drivers/test_test_voltage.py similarity index 100% rename from test/drivers/test_test_voltage.py rename to test/test_drivers/test_test_voltage.py diff --git a/test/test_general/test_d_range.py b/test/test_general/test_d_range.py new file mode 100644 index 00000000..db742e5a --- /dev/null +++ b/test/test_general/test_d_range.py @@ -0,0 +1,15 @@ +import pyscan as ps +import numpy as np +import pytest + + +@pytest.mark.parametrize("start, step, end, expected", [ + (1, 0.01, 1, [1]), + (1, 0.01, 1.005, [1, 1.005]), + (1, 0.01, 1.2, np.round(np.arange(1, 1.21, 0.01), 5)), + (1, 0.015, 1.2, np.append(np.round(np.arange(1, 1.195, 0.015), 5), 1.2)), + (1.005, 0.01, 1, [1.005, 1]), + (1.2, 0.01, 1, np.round(np.arange(1.20, 0.99, -0.01), 5)), + (1.2, 0.015, 1, np.round(np.append(np.arange(1.2, 1.005, -0.015), 1), 5))]) +def test_drange_value(start, step, end, expected): + assert np.allclose(ps.drange(start, step, end), expected), f"drange({start}, {step}, {end}) gave wrong output" diff --git a/test/test_general/test_first_string.py b/test/test_general/test_first_string.py new file mode 100644 index 00000000..5f79adb3 --- /dev/null +++ b/test/test_general/test_first_string.py @@ -0,0 +1,19 @@ +import pyscan as ps +import pytest + + +@pytest.mark.parametrize("input,output", [ + (('a', 'b'), 'a'), + ('a', 'a')]) +def test_first_string(input, output): + assert ps.first_string(input) == output, f"First string of {input} incorrection output {output}" + + +@pytest.mark.parametrize("input", [ + 0, + True, + [0, 1], + [0, 'b']]) +def test_first_string_error(input): + with pytest.raises(TypeError): + ps.first_string(input) diff --git a/test/test_general/test_is_list_type.py b/test/test_general/test_is_list_type.py new file mode 100644 index 00000000..2c857548 --- /dev/null +++ b/test/test_general/test_is_list_type.py @@ -0,0 +1,20 @@ +import pyscan as ps +import numpy as np +import pytest + + +@pytest.mark.parametrize("input", [ + (1, 2, 3), + [1, 2, 3], + np.array([1, 2, 3])]) +def test_is_list_type(input): + assert ps.is_list_type(input) + + +@pytest.mark.parametrize("input", [ + 'string', + 2, + {'a': 1}, + True]) +def test_is_list_type_error(input): + assert not ps.is_list_type(input) diff --git a/test/test_general/test_is_numeric_type.py b/test/test_general/test_is_numeric_type.py new file mode 100644 index 00000000..dd4c68d7 --- /dev/null +++ b/test/test_general/test_is_numeric_type.py @@ -0,0 +1,19 @@ +import pyscan as ps +import numpy as np +import pytest + + +@pytest.mark.parametrize("input", [2, 2.1, np.float64(2)]) +def test_is_numeric_type(input): + assert ps.is_numeric_type(input) + + +@pytest.mark.parametrize("input", [ + 'string', + (1, 2, 3), + [1, 2, 3], + np.array([1, 2, 3]), + {'a': 1}, + True]) +def test_is_not_numeric_type(input): + assert not ps.is_numeric_type(input), f"{input} was wrongly identified as numeric type" diff --git a/test/test_general/test_item_attribute.py b/test/test_general/test_item_attribute.py new file mode 100644 index 00000000..9024b9b8 --- /dev/null +++ b/test/test_general/test_item_attribute.py @@ -0,0 +1,45 @@ +import pyscan as ps +import pytest + + +@pytest.fixture +def ia(): + ia = ps.ItemAttribute() + ia.test_prop1 = 3 + ia.test_prop2 = 'str' + return ia + + +def test_itemattribute_property_call(ia): + assert ia.test_prop1 == 3 + assert ia.test_prop2 == 'str' + + +@pytest.mark.parametrize("key,value", [ + ('test_prop1', 3), + ('test_prop2', 'str')]) +def test_itemattribute_dict_call(ia, key, value): + assert ia[key] == value + + +@pytest.mark.parametrize("func,value", [ + ('keys', ['test_prop1', 'test_prop2']), + ('values', [3, 'str']), + ('items', [('test_prop1', 3), ('test_prop2', 'str')])]) +def test_itemattribute_dictionary_functions(ia, func, value): + assert list(getattr(ia, func)()) == value + + +@pytest.mark.parametrize("key", [ + 'test_prop1', + 'test_prop2']) +def test_itemattribute_del(ia, key): + del ia[key] + assert not hasattr(ia, key) + + +@pytest.mark.parametrize("key", [ + 'test_prop1', + 'test_prop2']) +def test_itemattribute_contains(ia, key): + assert key in ia diff --git a/test/test_measurement/test_abstract_experiment.py b/test/test_measurement/test_abstract_experiment.py new file mode 100644 index 00000000..f0cd7259 --- /dev/null +++ b/test/test_measurement/test_abstract_experiment.py @@ -0,0 +1,353 @@ +import pytest +import pyscan as ps +import os +import h5py +import json +import numpy as np + + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage', dt=0) + return runinfo + + +@pytest.fixture() +def c_runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + runinfo.scan0 = ps.ContinuousScan(10) + return runinfo + + +@pytest.mark.parametrize("data_dir", [None, './test']) +def test_setup_data_directory(runinfo, devices, data_dir): + expt = ps.Experiment(runinfo, devices, data_dir=data_dir) + if data_dir is None: + assert os.path.isdir('./backup'), "Data Directory not correctly setup" + else: + assert os.path.isdir(data_dir), "Data Directory not correctly setup" + + +def test_check_runinfo(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.check_runinfo() + assert hasattr(expt.runinfo, 'file_name') + + +def test_save_runinfo_metadata(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.check_runinfo() + expt.save_metadata('runinfo') + + with h5py.File('./backup/{}'.format(expt.runinfo.file_name + '.hdf5'), 'r') as f: + runinfo = f.attrs['runinfo'] + + runinfo = json.loads(runinfo) + + +def test_save_devices_metadata(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.check_runinfo() + expt.save_metadata('devices') + + with h5py.File('./backup/{}'.format(expt.runinfo.file_name + '.hdf5'), 'r') as f: + devices = f.attrs['devices'] + + devices = json.loads(devices) + + expected = { + 'v1': { + 'instrument': None, + '_driver_class': 'NoneType', + 'debug': False, + '_instrument_driver_version': '0.2.0', + '_voltage_settings': { + 'name': 'voltage', + 'write_string': 'VOLT {}', + 'query_string': 'VOLT?', + 'range': [-10, 10], + 'return_type': 'float'}, + '_power_settings': { + 'name': 'power', + 'write_string': 'POW {}', + 'query_string': 'POW?', + 'values': [1, 10], + 'return_type': 'int'}, + '_output_state_settings': { + 'name': 'output_state', + 'write_string': 'OUTP {}', + 'query_string': 'OUTP?', + 'dict_values': {'on': 1, 'off': 0, '1': 1, '0': 0}, + 'return_type': 'str'}, + '_voltage': 0.0, + '_power': 1, + '_output_state': 'off', + '_version': '0.1.0', + 'black_list_for_testing': []}} + + for key in devices.keys(): + assert devices[key] == expected[key] + + +def test_preallocate(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.check_runinfo() + data = expt.runinfo.measure_function(expt) + expt.preallocate(data) + + assert expt.runinfo.measured == ['x1', 'x2', 'x3'] + + assert expt.x1.shape == (2,) + assert np.all(np.isnan(expt.x1)) + + assert expt.x2.shape == (2, 2) + assert np.all(np.isnan(expt.x2)) + + assert expt.x3.shape == (2, 2, 2) + assert np.all(np.isnan(expt.x3)) + + with h5py.File('./backup/{}'.format(expt.runinfo.file_name + '.hdf5'), 'r') as f: + scan = f['v1_voltage'] + assert scan.shape == (2,) + assert np.allclose(scan, np.array([0.0, 0.1])) + + x1 = f['x1'] + assert x1.shape == (2,) + assert np.all(np.isnan(x1)) + x2 = f['x2'] + assert x2.shape == (2, 2) + assert np.all(np.isnan(x2)) + + x3 = f['x3'] + assert x3.shape == (2, 2, 2) + assert np.all(np.isnan(x3)) + + +def test_save_point(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.check_runinfo() + data = expt.runinfo.measure_function(expt) + expt.preallocate(data) + expt.save_point(data) + + assert expt.x1.shape == (2,) + assert np.isclose(expt.x1[0], 0.0) + assert np.isnan(expt.x1[1]) + + assert expt.x2.shape == (2, 2) + assert np.allclose(expt.x2[0], [0, 0]) + assert np.all(np.isnan(expt.x2[1])) + + assert expt.x3.shape == (2, 2, 2) + assert np.allclose(expt.x3[0], np.zeros((2, 2))) + assert np.all(np.isnan(expt.x3[1])) + + with h5py.File('./backup/{}'.format(expt.runinfo.file_name + '.hdf5'), 'r') as f: + scan = f['v1_voltage'] + assert scan.shape == (2,) + assert np.allclose(scan, np.array([0.0, 0.1])) + + x1 = f['x1'] + assert x1.shape == (2,) + assert np.isclose(x1[0], [0.0]) + print(np.array(x1)) + assert np.all(np.isnan(x1[1])) + + x2 = f['x2'] + assert x2.shape == (2, 2) + assert np.allclose(x2[0], [0, 0]) + assert np.all(np.isnan(x2[1])) + + x3 = f['x3'] + assert x3.shape == (2, 2, 2) + assert np.allclose(x3[0], np.zeros((2, 2))) + assert np.all(np.isnan(x3[1])) + + +def test_save_point(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.check_runinfo() + data = expt.runinfo.measure_function(expt) + expt.preallocate(data) + expt.save_point(data) + + assert expt.x1.shape == (2,) + assert np.isclose(expt.x1[0], 0.0) + assert np.isnan(expt.x1[1]) + + assert expt.x2.shape == (2, 2) + assert np.allclose(expt.x2[0], [0, 0]) + assert np.all(np.isnan(expt.x2[1])) + + assert expt.x3.shape == (2, 2, 2) + assert np.allclose(expt.x3[0], np.zeros((2, 2))) + assert np.all(np.isnan(expt.x3[1])) + + with h5py.File('./backup/{}'.format(expt.runinfo.file_name + '.hdf5'), 'r') as f: + scan = f['v1_voltage'] + assert scan.shape == (2,) + assert np.allclose(scan, np.array([0.0, 0.1])) + + x1 = f['x1'] + assert x1.shape == (2,) + assert np.isclose(x1[0], [0.0]) + print(np.array(x1)) + assert np.all(np.isnan(x1[1])) + + x2 = f['x2'] + assert x2.shape == (2, 2) + assert np.allclose(x2[0], [0, 0]) + assert np.all(np.isnan(x2[1])) + + x3 = f['x3'] + assert x3.shape == (2, 2, 2) + assert np.allclose(x3[0], np.zeros((2, 2))) + assert np.all(np.isnan(x3[1])) + + +def test_continuous_preallocate(c_runinfo, devices): + runinfo = c_runinfo + + expt = ps.Experiment(runinfo, devices) + expt.check_runinfo() + data = expt.runinfo.measure_function(expt) + expt.runinfo.scans[-1].iterate(expt, 0, -1) + expt.preallocate(data) + + assert expt.iteration.shape == (1,) + assert np.allclose(expt.iteration, [0.0]) + + assert expt.x1.shape == (1,) + assert np.isnan(expt.x1[0]) + + assert expt.x2.shape == (1, 2) + assert np.all(np.isnan(expt.x2)) + + assert expt.x3.shape == (1, 2, 2) + assert np.all(np.isnan(expt.x3)) + + with h5py.File('./backup/{}'.format(expt.runinfo.file_name + '.hdf5'), 'r') as f: + scan = f['iteration'] + assert scan.shape == (1,) + assert np.allclose(scan, 0.0) + + x1 = f['x1'] + assert x1.shape == (1,) + assert np.all(np.isnan(x1)) + + x2 = f['x2'] + assert x2.shape == (1, 2) + assert np.all(np.isnan(x2)) + + x3 = f['x3'] + assert x3.shape == (1, 2, 2) + assert np.all(np.isnan(x3)) + + +def test_continuous_save_point(c_runinfo, devices): + runinfo = c_runinfo + + expt = ps.Experiment(runinfo, devices) + expt.check_runinfo() + data = expt.runinfo.measure_function(expt) + expt.runinfo.scans[-1].iterate(expt, 0, -1) + expt.preallocate(data) + data = expt.runinfo.measure_function(expt) + expt.save_point(data) + + assert expt.iteration.shape == (1,) + assert np.allclose(expt.iteration, [0.0]) + + assert expt.x1.shape == (1,) + assert np.allclose(expt.x1, 0.0) + + assert expt.x2.shape == (1, 2) + assert np.allclose(expt.x2, 0.0) + + assert expt.x3.shape == (1, 2, 2) + assert np.allclose(expt.x3, 0.0) + + with h5py.File('./backup/{}'.format(expt.runinfo.file_name + '.hdf5'), 'r') as f: + scan = f['iteration'] + assert scan.shape == (1,) + assert np.allclose(scan, 0.0) + + x1 = f['x1'] + assert x1.shape == (1,) + assert np.allclose(x1, 0.0) + + x2 = f['x2'] + assert x2.shape == (1, 2) + assert np.allclose(x2, 0.0) + + x3 = f['x3'] + assert x3.shape == (1, 2, 2) + assert np.allclose(x3, 0.0) + + +def test_continuous_reallocate(c_runinfo, devices): + runinfo = c_runinfo + + expt = ps.Experiment(runinfo, devices) + expt.check_runinfo() + + data = expt.runinfo.measure_function(expt) + expt.runinfo.scans[-1].iterate(expt, 0, -1) + expt.preallocate(data) + expt.save_point(data) + + expt.runinfo.scans[-1].iterate(expt, 1, 1) + data = expt.runinfo.measure_function(expt) + expt.reallocate(data) + expt.save_point(data) + + assert expt.iteration.shape == (2,) + assert np.allclose(expt.iteration, [0, 1]) + + assert expt.x1.shape == (2,) + assert np.allclose(expt.x1, [0, 1]) + + assert expt.x2.shape == (2, 2) + assert np.allclose(expt.x2, [[0, 0], [1, 1]]) + + assert expt.x3.shape == (2, 2, 2) + assert np.allclose(expt.x3[0], [[0, 0], [0, 0]]) + assert np.allclose(expt.x3[1], [[1, 1], [1, 1]]) + + with h5py.File('./backup/{}'.format(expt.runinfo.file_name + '.hdf5'), 'r') as f: + scan = f['iteration'] + assert scan.shape == (2,) + assert np.allclose(scan, [0, 1]) + + x1 = f['x1'] + assert x1.shape == (2,) + assert np.allclose(x1, [0, 1]) + + x2 = f['x2'] + assert x2.shape == (2, 2) + assert np.allclose(x2, [[0, 0], [1, 1]]) + + x3 = f['x3'] + assert x3.shape == (2, 2, 2) + assert np.allclose(x3[0], [[0, 0], [0, 0]]) + assert np.allclose(x3[1], [[1, 1], [1, 1]]) diff --git a/test/test_measurement/test_experiment/test_continuous_scan_1D.py b/test/test_measurement/test_experiment/test_continuous_scan_1D.py new file mode 100644 index 00000000..df8d183e --- /dev/null +++ b/test/test_measurement/test_experiment/test_continuous_scan_1D.py @@ -0,0 +1,58 @@ +import pyscan as ps +import pytest +import numpy as np + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + runinfo.scan0 = ps.ContinuousScan(n_max=2) + return runinfo + + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +def test_experiment_post_measure_1D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + for key, value, t, shape in [ + ('iteration', np.array([0, 1]), np.ndarray, (2,)), + ('x1', np.array([0.0, 1.0]), np.ndarray, (2,)), + ('x2', np.array([[0.0, 0.0], [1.0, 1.0]]), np.ndarray, (2, 2)), + ('x3', np.array([[[0., 0.], [0., 0.]], [[1., 1.], [1., 1.]]]), np.ndarray, (2, 2, 2))]: + assert hasattr(expt, key), 'Experiment does not have key {}'.format(key) + assert isinstance(expt[key], t), 'Value of {} is not {}'.format(key, t) + assert np.allclose(expt[key], value), 'Value of {} is not {}'.format(key, value) + + +# def test_runinfo_contents_post_measure_1D(runinfo, devices): +# expt = ps.Experiment(runinfo, devices) +# expt.run() +# for key, value in [ +# ('measured', ['x1', 'x2', 'x3']), +# ('initial_pause', 0.1), +# ('continuous', False), +# ('has_average_scan', False), +# ('running', False), +# ('_dims', (2,)), +# ('_ndim', 1), +# ('_indicies', [1]), +# ('complete', True)]: +# assert hasattr(expt.runinfo, key), 'RunInfo does not have key {}'.format(key) +# assert expt.runinfo[key] == value, 'Value of {} is not {}'.format(key, value) diff --git a/test/test_measurement/test_experiment/test_experiment_1D.py b/test/test_measurement/test_experiment/test_experiment_1D.py new file mode 100644 index 00000000..da63160e --- /dev/null +++ b/test/test_measurement/test_experiment/test_experiment_1D.py @@ -0,0 +1,118 @@ +import pyscan as ps +import numpy as np +import pytest +from pathlib import PosixPath +from typing import Callable +import re + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage', dt=0) + return runinfo + + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + devices.v2 = ps.TestVoltage() + devices.v3 = ps.TestVoltage() + devices.v4 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +## Test initial experiment +def test_experiment_init(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + for key in ['runinfo', 'devices']: + assert hasattr(expt, key), 'Experiment does not have attribute {}'.format(key) + + +def test_measure_function_returns(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + d = expt.runinfo.measure_function(expt) + for key, value, t, shape in [ + ('x1', 0, int, ()), + ('x2', [0, 0], list, (2,)), + ('x3', [[0, 0], [0, 0]], list, (2, 2))]: + assert isinstance(d, ps.ItemAttribute), 'measure_function does not return ItemAttribute' + assert hasattr(d, key), 'ItemAttribute does not have key {}'.format(key) + assert d[key] == value, 'Value of {} is not {}'.format(key, value) + + +def test_experiment_post_measure_1D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + for key, value, t, shape in [ + ('v1_voltage', np.array([0, 0.1]), np.ndarray, (2,)), + ('x1', np.array([0.0, 1.0]), np.ndarray, (2,)), + ('x2', np.array([[0.0, 0.0], [1.0, 1.0]]), np.ndarray, (2, 2)), + ('x3', np.array([[[0., 0.], [0., 0.]], [[1., 1.], [1., 1.]]]), np.ndarray, (2, 2, 2))]: + assert hasattr(expt, key), 'Experiment does not have key {}'.format(key) + assert isinstance(expt[key], t), 'Value of {} is not {}'.format(key, t) + assert np.allclose(expt[key], value), 'Value of {} is not {}'.format(key, value) + + +## Test experiment after running + +def test_runinfo_post_measure_1D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + for key, value, t, shape in [ + ('v1_voltage', np.array([0, 0.1]), np.ndarray, (2,)), + ('x1', np.array([0.0, 1.0]), np.ndarray, (2,)), + ('x2', np.array([[0.0, 0.0], [1.0, 1.0]]), np.ndarray, (2, 2)), + ('x3', np.array([[[0., 0.], [0., 0.]], [[1., 1.], [1., 1.]]]), np.ndarray, (2, 2, 2))]: + assert hasattr(expt, key), 'Experiment does not have key {}'.format(key) + assert isinstance(expt[key], t), 'Value of {} is not {}'.format(key, t) + assert np.allclose(expt[key], value), 'Value of {} is not {}'.format(key, value) + + +def test_runinfo_contents_post_measure_1D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + for key, value in [ + ('measured', ['x1', 'x2', 'x3']), + ('initial_pause', 0.1), + ('_has_continuous_scan', False), + ('has_average_scan', False), + ('running', False), + ('_dims', (2,)), + ('_ndim', 1), + ('_indicies', [1]), + ('complete', True)]: + assert hasattr(expt.runinfo, key), 'RunInfo does not have key {}'.format(key) + assert expt.runinfo[key] == value, 'Value of {} is not {}'.format(key, value) + + +def test_runinfo_types_post_measure_1D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + for key, t in [ + ('measure_function', Callable), + ('scan0', ps.PropertyScan), + ('data_path', PosixPath), + ('_pyscan_version', str), + ('file_name', str)]: + assert hasattr(expt.runinfo, key), 'RunInfo does not have key {}'.format(key) + assert isinstance(expt.runinfo[key], t), 'Value of {} is not {}'.format(key, t) + + +def test_file_name_format(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + assert re.match(r'^\d{8}T\d{6}(-\d+)?$', expt.runinfo.file_name), \ + "runinfo file_name is not properly formatted" diff --git a/test/test_measurement/test_experiment/test_experiment_2D.py b/test/test_measurement/test_experiment/test_experiment_2D.py new file mode 100644 index 00000000..e62b22ee --- /dev/null +++ b/test/test_measurement/test_experiment/test_experiment_2D.py @@ -0,0 +1,82 @@ +import pyscan as ps +import numpy as np +import pytest +from pathlib import PosixPath +from typing import Callable + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage', dt=0) + runinfo.scan1 = ps.PropertyScan({'v2': ps.drange(0, 0.1, 0.2)}, 'voltage', dt=0) + + return runinfo + + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + devices.v2 = ps.TestVoltage() + devices.v3 = ps.TestVoltage() + devices.v4 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +def test_experiment_post_measure_2D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + + for key, value, t, shape in [ + ('v1_voltage', np.array([0, 0.1]), np.ndarray, (2,)), + ('v2_voltage', np.array([0, 0.1, 0.2]), np.ndarray, (3,)), + ('x1', np.array([[0., 0., 0.], [1., 1., 1.]]), np.ndarray, (2, 3)), + ('x2', np.array( + [[[0., 0.], [0., 0.], [0., 0.]], + [[1., 1.], [1., 1.], [1., 1.]]]), np.ndarray, (2, 3, 2)), + ('x3', np.array((list(np.zeros((3, 2, 2))), list(np.ones((3, 2, 2))))), np.ndarray, (2, 3, 2, 2))]: + assert hasattr(expt, key), 'Experiment does not have key {}'.format(key) + assert isinstance(expt[key], t), 'Value of {} is not {}'.format(key, t) + assert np.allclose(expt[key], value), 'Value of {} is not {}'.format(key, value) + + +def test_runinfo_contents_post_measure_2D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + for key, value in [ + ('measured', ['x1', 'x2', 'x3']), + ('initial_pause', 0.1), + ('_has_continuous_scan', False), + ('has_average_scan', False), + ('running', False), + ('_dims', (2, 3)), + ('_ndim', 2), + ('_indicies', [1, 2]), + ('complete', True)]: + assert expt.runinfo[key] == value, 'Value of {} is not {}'.format(key, value) + + +def test_runinfo_types_post_measure_2D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + for key, t in [ + ('measure_function', Callable), + ('scan0', ps.PropertyScan), + ('scan1', ps.PropertyScan), + ('data_path', PosixPath), + ('_pyscan_version', str), + ('file_name', str)]: + + assert isinstance(expt.runinfo[key], t), 'Value of {} is not {}'.format(key, t) diff --git a/test/test_measurement/test_experiment/test_experiment_3D.py b/test/test_measurement/test_experiment/test_experiment_3D.py new file mode 100644 index 00000000..5846195f --- /dev/null +++ b/test/test_measurement/test_experiment/test_experiment_3D.py @@ -0,0 +1,81 @@ +import pyscan as ps +import numpy as np +import pytest +from pathlib import PosixPath +from typing import Callable + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage', dt=0) + runinfo.scan1 = ps.PropertyScan({'v2': ps.drange(0, 0.1, 0.2)}, 'voltage', dt=0) + runinfo.scan2 = ps.PropertyScan({'v3': ps.drange(0, 0.1, 0.3)}, 'voltage', dt=0) + return runinfo + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + devices.v2 = ps.TestVoltage() + devices.v3 = ps.TestVoltage() + devices.v4 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +def test_experiment_post_measure_3D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + for key, value, t, shape in [ + ('v1_voltage', np.array([0, 0.1]), np.ndarray, (2,)), + ('v2_voltage', np.array([0, 0.1, 0.2]), np.ndarray, (3,)), + ('v3_voltage', np.array([0, 0.1, 0.2, 0.3]), np.ndarray, (4,)), + ('x1', np.array((list(np.zeros((3, 4))), list(np.ones((3, 4))))), np.ndarray, (2, 3, 4)), + ('x2', np.array((list(np.zeros((3, 4, 2))), list(np.ones((3, 4, 2))))), np.ndarray, (2, 3, 4, 2)), + ('x3', np.array((list(np.zeros((3, 4, 2, 2))), list(np.ones((3, 4, 2, 2))))), np.ndarray, (2, 3, 4, 2, 2))]: + + assert hasattr(expt, key), 'Experiment does not have key {}'.format(key) + assert isinstance(expt[key], t), 'Value of {} is not {}'.format(key, t) + assert np.allclose(expt[key], value), 'Value of {} is not {}'.format(key, value) + assert expt[key].shape == shape, 'Shape of {} is not {}'.format(key, shape) + + +def test_runinfo_contents_post_measure_3D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + for key, value in [ + ('measured', ['x1', 'x2', 'x3']), + ('initial_pause', 0.1), + ('_has_continuous_scan', False), + ('has_average_scan', False), + ('running', False), + ('_dims', (2, 3, 4)), + ('_ndim', 3), + ('_indicies', [1, 2, 3]), + ('complete', True)]: + assert expt.runinfo[key] == value, 'Value of {} is not {}'.format(key, value) + + +def test_runinfo_types_post_measure_3D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + for key, t in [ + ('measure_function', Callable), + ('scan0', ps.PropertyScan), + ('scan1', ps.PropertyScan), + ('scan2', ps.PropertyScan), + ('data_path', PosixPath), + ('_pyscan_version', str), + ('file_name', str)]: + assert isinstance(expt.runinfo[key], t), 'Value of {} is not {}'.format(key, t) diff --git a/test/test_measurement/test_experiment/test_function_scan_experiment.py b/test/test_measurement/test_experiment/test_function_scan_experiment.py new file mode 100644 index 00000000..31b57c5c --- /dev/null +++ b/test/test_measurement/test_experiment/test_function_scan_experiment.py @@ -0,0 +1,57 @@ +import pyscan as ps +import numpy as np +import pytest + + + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + + def do_nothing(value): + pass + + runinfo.measure_function = measure_up_to_3D + runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage', dt=0) + runinfo.scan1 = ps.FunctionScan(do_nothing, [0, 1, 2], dt=0) + + return runinfo + + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + devices.v2 = ps.TestVoltage() + devices.v3 = ps.TestVoltage() + devices.v4 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +def test_repeat_experiment_2D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + + for key, value, t, shape in [ + ('v1_voltage', np.array([0, 0.1]), np.ndarray, (2,)), + ('do_nothing', np.array([0, 1, 2]), np.ndarray, (3,)), + ('x1', np.array([[0., 0., 0.], [1., 1., 1.]]), np.ndarray, (2, 3)), + ('x2', np.array( + [[[0., 0.], [0., 0.], [0., 0.]], + [[1., 1.], [1., 1.], [1., 1.]]]), np.ndarray, (2, 3, 2)), + ('x3', np.array((list(np.zeros((3, 2, 2))), list(np.ones((3, 2, 2))))), np.ndarray, (2, 3, 2, 2))]: + assert hasattr(expt, key), 'Experiment does not have key {}'.format(key) + assert isinstance(expt[key], t), 'Value of {} is not {}'.format(key, t) + assert np.allclose(expt[key], value), 'Value of {} is not {}'.format(key, value) + assert expt[key].shape == shape, 'Shape of {} is not {}'.format(key, shape) \ No newline at end of file diff --git a/test/test_measurement/test_experiment/test_repeat_scan_experiment.py b/test/test_measurement/test_experiment/test_repeat_scan_experiment.py new file mode 100644 index 00000000..033c8ee8 --- /dev/null +++ b/test/test_measurement/test_experiment/test_repeat_scan_experiment.py @@ -0,0 +1,51 @@ +import pyscan as ps +import numpy as np +import pytest + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, 'voltage', dt=0) + runinfo.scan1 = ps.RepeatScan(3) + + return runinfo + + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + devices.v2 = ps.TestVoltage() + devices.v3 = ps.TestVoltage() + devices.v4 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +def test_repeat_experiment_2D(runinfo, devices): + expt = ps.Experiment(runinfo, devices) + expt.run() + + for key, value, t, shape in [ + ('v1_voltage', np.array([0, 0.1]), np.ndarray, (2,)), + ('repeat', np.array([0, 1, 2]), np.ndarray, (3,)), + ('x1', np.array([[0., 0., 0.], [1., 1., 1.]]), np.ndarray, (2, 3)), + ('x2', np.array( + [[[0., 0.], [0., 0.], [0., 0.]], + [[1., 1.], [1., 1.], [1., 1.]]]), np.ndarray, (2, 3, 2)), + ('x3', np.array((list(np.zeros((3, 2, 2))), list(np.ones((3, 2, 2))))), np.ndarray, (2, 3, 2, 2))]: + assert hasattr(expt, key), 'Experiment does not have key {}'.format(key) + assert isinstance(expt[key], t), 'Value of {} is not {}'.format(key, t) + assert np.allclose(expt[key], value), 'Value of {} is not {}'.format(key, value) + assert expt[key].shape == shape, 'Shape of {} is not {}'.format(key, shape) \ No newline at end of file diff --git a/test/general/test_get_version.py b/test/test_measurement/test_get_pyscan_version.py similarity index 80% rename from test/general/test_get_version.py rename to test/test_measurement/test_get_pyscan_version.py index 136ce121..7cb7d615 100644 --- a/test/general/test_get_version.py +++ b/test/test_measurement/test_get_pyscan_version.py @@ -1,4 +1,4 @@ -from pyscan.general.get_pyscan_version import get_pyscan_version +from pyscan.measurement.get_pyscan_version import get_pyscan_version def test_get_version(): diff --git a/test/test_measurement/test_run_info.py b/test/test_measurement/test_run_info.py new file mode 100644 index 00000000..b93e89e2 --- /dev/null +++ b/test/test_measurement/test_run_info.py @@ -0,0 +1,112 @@ +import pyscan as ps +import pytest + + +@pytest.mark.parametrize("key,value", [ + ("measured", []), + ("measure_function", None), + ("initial_pause", 0.1), + ("_pyscan_version", ps.get_pyscan_version()), + ("scans", []), + ("dims", ()), + ("average_dims", ()), + ("dims", ()), + ('ndim', 0), + ('n_average_dim', 0), + ('indicies', ()), + ('average_indicies', ()), + ('average_index', -1), + ('has_average_scan', False)]) +def test_runinfo_init_attributes_and_properties(key, value): + runinfo = ps.RunInfo() + if (value is None) or (value is False): + assert runinfo[key] is value, f"Initialized RunInfo {key} is not {value}" + else: + assert runinfo[key] == value, f"Initialized RunInfo {key} is not {value}" + + +@pytest.mark.parametrize("key,value,t", [ + ("measured", [], list), + ("measure_function", None, None), + ("initial_pause", 0.1, float), + ("_pyscan_version", ps.get_pyscan_version(), str), + ("scan0", '_', ps.PropertyScan), + ("scans", '_', list), + ("dims", (2,), tuple), + ("average_dims", (), tuple), + ("dims", (2,), tuple), + ("ndim", 1, int), + ("n_average_dim", 0, int), + ("indicies", (0,), tuple), + ("average_indicies", (), tuple), + ("average_index", -1, int), + ("has_average_scan", False, bool)]) +def test_runinfo_1D_attributes_and_properties(key, value, t): + runinfo = ps.RunInfo() + runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, prop='voltage') + + if value == '_': + pass + elif (value is None) or (value is False): + assert runinfo[key] is value, f"RunInfo 1D {key} is not {value}" + else: + assert runinfo[key] == value, f"RunInfo 1D {key} is not {value}" + + if t is None: + pass + else: + assert isinstance(runinfo[key], t), f"RunInfo 1D {key} is not of type {t}" + + +def test_bad_scan_order(): + runinfo = ps.RunInfo() + runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, prop='voltage') + runinfo.scan2 = ps.PropertyScan({'v2': ps.drange(0, 0.1, 0.1)}, prop='voltage') + + with pytest.raises(AssertionError): + runinfo.check() + + +def test_repeat_property_scan(): + runinfo = ps.RunInfo() + runinfo.scan0 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 1)}, prop='voltage') + runinfo.scan1 = ps.PropertyScan({'v1': ps.drange(0, 0.1, 1)}, prop='voltage') + + with pytest.raises(AssertionError): + runinfo.check() + + +def test_multi_repeat(): + runinfo = ps.RunInfo() + runinfo.scan0 = ps.RepeatScan(1) + runinfo.scan1 = ps.RepeatScan(1) + + with pytest.raises(AssertionError): + runinfo.check() + + +def test_multi_average(): + runinfo = ps.RunInfo() + runinfo.scan0 = ps.AverageScan(2) + runinfo.scan1 = ps.AverageScan(2) + + with pytest.raises(AssertionError): + runinfo.check() + + +def test_multi_continuous_scan(): + runinfo = ps.RunInfo() + runinfo.scan0 = ps.ContinuousScan(2) + runinfo.scan1 = ps.ContinuousScan(2) + + with pytest.raises(AssertionError): + runinfo.check() + + +def test_low_continuous_scan(): + runinfo = ps.RunInfo() + runinfo.scan0 = ps.ContinuousScan(2) + runinfo.scan1 = ps.RepeatScan(2) + + with pytest.raises(AssertionError): + runinfo.check() diff --git a/test/test_measurement/test_scans/test_average_scan.py b/test/test_measurement/test_scans/test_average_scan.py new file mode 100644 index 00000000..d3119b1e --- /dev/null +++ b/test/test_measurement/test_scans/test_average_scan.py @@ -0,0 +1,75 @@ +import pyscan as ps +import numpy as np +import pytest + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + return runinfo + + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + devices.v2 = ps.TestVoltage() + devices.v3 = ps.TestVoltage() + devices.v4 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +## Test AverageScan +@pytest.fixture +def average_scan(): + return ps.AverageScan(2) + + +@pytest.mark.parametrize('key,value', [ + ('scan_dict', {'average': np.array([0, 1])}), + ('device_names', ['average']), + ('dt', 0), + ('i', 0), + ('n', 2) +]) +def test_average_scan_init(average_scan, key, value): + if key == 'scan_dict': + for key1, value1 in value.items(): + assert np.all(average_scan.scan_dict[key1] == value[key1]), f"Average scan attribute {key} != {value}" + else: + assert average_scan[key] == value, f"Average scan attribute {key} != {value}" + + +def test_function_scan_iterate_m1(runinfo, devices, average_scan): + runinfo.scan0 = average_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 0, -1) + assert expt.runinfo.scan0.i == 0 + + +def test_function_scan_no_iterate(runinfo, devices, average_scan): + runinfo.scan0 = average_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 0, 0) + assert expt.runinfo.scan0.i == 0 + + +def test_function_scan_iterate_one(runinfo, devices, average_scan): + runinfo.scan0 = average_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 1, 1) + assert expt.runinfo.scan0.i == 1 diff --git a/test/test_measurement/test_scans/test_continuous_scan.py b/test/test_measurement/test_scans/test_continuous_scan.py new file mode 100644 index 00000000..7920756f --- /dev/null +++ b/test/test_measurement/test_scans/test_continuous_scan.py @@ -0,0 +1,71 @@ +import pyscan as ps +import numpy as np +import pytest + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + return runinfo + + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +@pytest.mark.parametrize('key,value', [ + ('scan_dict', {'iteration': np.array([])}), + ('device_names', ['iteration']), + ('dt', 0), + ('i', 0), + ('n', 1), + ('n_max', 10) +]) +def test_continuous_scan_init(key, value): + continuous_scan = ps.ContinuousScan(n_max=10) + if key == 'scan_dict': + for key1, value1 in value.items(): + assert np.all(continuous_scan.scan_dict[key1] == value[key1]), f"Continuous scan attribute {key} != {value}" + else: + assert continuous_scan[key] == value, f"Continuous scan attribute {key} != {value}" + + +def test_continuous_scan_iterate_m1(runinfo, devices): + runinfo.scan0 = ps.ContinuousScan(n_max=10) + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 0, -1) + assert expt.runinfo.scan0.i == 0 + assert expt.runinfo.scan0.n == 1 + + +def test_continuous_scan_no_iterate(runinfo, devices): + runinfo.scan0 = ps.ContinuousScan(n_max=10) + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 0, 0) + assert expt.runinfo.scan0.i == 0 + assert expt.runinfo.scan0.n == 1 + + +def test_continuous_scan_iterate_one(runinfo, devices): + runinfo.scan0 = ps.ContinuousScan(n_max=10) + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 1, 1) + assert expt.runinfo.scan0.i == 1 + assert expt.runinfo.scan0.n == 2 diff --git a/test/test_measurement/test_scans/test_function_scan.py b/test/test_measurement/test_scans/test_function_scan.py new file mode 100644 index 00000000..387c2bb3 --- /dev/null +++ b/test/test_measurement/test_scans/test_function_scan.py @@ -0,0 +1,79 @@ +import pyscan as ps +import numpy as np +import pytest + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + return runinfo + + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + devices.v2 = ps.TestVoltage() + devices.v3 = ps.TestVoltage() + devices.v4 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +## Test FunctionScan +def do_nothing(value): + pass + + +@pytest.fixture +def function_scan(): + return ps.FunctionScan(do_nothing, np.array([0, 1])) + + +@pytest.mark.parametrize('key,value', [ + ('scan_dict', {'do_nothing': np.array([0, 1])}), + ('function', do_nothing), + ('dt', 0), + ('i', 0), + ('n', 2) +]) +def test_function_scan_init(function_scan, key, value): + if key == 'scan_dict': + for key1, value1 in value.items(): + assert np.all(function_scan.scan_dict[key1] == value[key1]), f"Function scan attribute {key} != {value}" + else: + assert function_scan[key] == value, f"Function scan attribute {key} != {value}" + + +def test_function_scan_iterate_m1(runinfo, devices, function_scan): + runinfo.scan0 = function_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 0, -1) + assert expt.runinfo.scan0.i == 0 + + +def test_function_scan_no_iterate(runinfo, devices, function_scan): + runinfo.scan0 = function_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 0, 0) + assert expt.runinfo.scan0.i == 0 + + +def test_function_scan_iterate_one(runinfo, devices, function_scan): + runinfo.scan0 = function_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 1, 1) + assert expt.runinfo.scan0.i == 1 diff --git a/test/test_measurement/test_scans/test_property_scan.py b/test/test_measurement/test_scans/test_property_scan.py new file mode 100644 index 00000000..2ec57f95 --- /dev/null +++ b/test/test_measurement/test_scans/test_property_scan.py @@ -0,0 +1,78 @@ +import pyscan as ps +import numpy as np +import pytest + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + return runinfo + + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + devices.v2 = ps.TestVoltage() + devices.v3 = ps.TestVoltage() + devices.v4 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +## Test PropertyScan +@pytest.fixture +def property_scan(): + return ps.PropertyScan({'v1': ps.drange(0, 0.1, 0.1)}, prop='voltage') + + +@pytest.mark.parametrize('key,value', [ + ('prop', 'voltage'), + ('scan_dict', {'v1_voltage': np.array([0, 0.1])}), + ('device_names', ['v1']), + ('dt', 0), + ('i', 0) +]) +def test_property_scan_init(property_scan, key, value): + if key == 'scan_dict': + for key1, value1 in value.items(): + assert np.all(property_scan.scan_dict[key1] == value[key1]), f"Property scan attribute {key} != {value}" + else: + assert property_scan[key] == value, f"Property scan attribute {key} != {value}" + + +def test_property_scan_iterate_m1(runinfo, devices, property_scan): + runinfo.scan0 = property_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 0, -1) + assert expt.runinfo.scan0.i == 0 + assert expt.devices.v1.voltage == 0 + + +def test_property_scan_no_iterate(runinfo, devices, property_scan): + runinfo.scan0 = property_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 0, 0) + assert expt.devices.v1.voltage == 0 + assert expt.runinfo.scan0.i == 0 + + +def test_property_scan_iterate_one(runinfo, devices, property_scan): + runinfo.scan0 = property_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 1, 1) + assert expt.devices.v1.voltage == 0.1 + assert expt.runinfo.scan0.i == 1 diff --git a/test/test_measurement/test_scans/test_repeat_scan.py b/test/test_measurement/test_scans/test_repeat_scan.py new file mode 100644 index 00000000..aaf891db --- /dev/null +++ b/test/test_measurement/test_scans/test_repeat_scan.py @@ -0,0 +1,75 @@ +import pyscan as ps +import numpy as np +import pytest + + +@pytest.fixture() +def runinfo(): + runinfo = ps.RunInfo() + runinfo.measure_function = measure_up_to_3D + return runinfo + + +@pytest.fixture() +def devices(): + devices = ps.ItemAttribute() + devices.v1 = ps.TestVoltage() + devices.v2 = ps.TestVoltage() + devices.v3 = ps.TestVoltage() + devices.v4 = ps.TestVoltage() + return devices + + +def measure_up_to_3D(expt): + d = ps.ItemAttribute() + + d.x1 = expt.runinfo.scan0.i + d.x2 = [d.x1 for _ in range(2)] + d.x3 = [[expt.runinfo.scan0.i, expt.runinfo.scan0.i] for _ in range(2)] + + return d + + +## Test RepeatScan +@pytest.fixture +def repeat_scan(): + return ps.RepeatScan(2) + + +@pytest.mark.parametrize('key,value', [ + ('scan_dict', {'repeat': np.array([0, 1])}), + ('device_names', ['repeat']), + ('dt', 0), + ('i', 0), + ('n', 2) +]) +def test_repeat_scan_init(repeat_scan, key, value): + if key == 'scan_dict': + for key1, value1 in value.items(): + assert np.all(repeat_scan.scan_dict[key1] == value[key1]), f"Property scan attribute {key} != {value}" + else: + assert repeat_scan[key] == value, f"Property scan attribute {key} != {value}" + + +def test_repeat_scan_iterate_m1(runinfo, devices, repeat_scan): + runinfo.scan0 = repeat_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 0, -1) + assert expt.runinfo.scan0.i == 0 + + +def test_repeat_scan_no_iterate(runinfo, devices, repeat_scan): + runinfo.scan0 = repeat_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 0, 0) + assert expt.runinfo.scan0.i == 0 + + +def test_repeat_scan_iterate_one(runinfo, devices, repeat_scan): + runinfo.scan0 = repeat_scan + expt = ps.Experiment(runinfo, devices) + + runinfo.scan0.iterate(expt, 1, 1) + assert expt.runinfo.scan0.i == 1