diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..97e598f --- /dev/null +++ b/.gitignore @@ -0,0 +1,39 @@ +*.py[cod] + +# Text editor caches and backups +*.swp +*~ + +# C extensions +*.so + +# Packages +*.egg +*.egg-info +dist +build +eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg +lib +lib64 + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.tox +nosetests.xml + +# Translations +*.mo + +# Mr Developer +.mr.developer.cfg +.project +.pydevproject diff --git a/python/auto_setup/__init__.py b/python/auto_setup/__init__.py index 6377561..f0f5175 100644 --- a/python/auto_setup/__init__.py +++ b/python/auto_setup/__init__.py @@ -1,14 +1,15 @@ +from __future__ import absolute_import # -- Handy ruler ------------------------------------------------------| -import idl_compat -import util -import config +from . import idl_compat +from . import util +from . import config -from main import auto_setup -import series_array, sq2_servo, sq1_servo, frame_test +from .main import auto_setup +from . import series_array, sq2_servo, sq1_servo, frame_test -from series_array import SARamp -from sq2_servo import SQ2Servo -from sq1_servo import SQ1Servo, SQ1ServoSA -from sq1_ramp import SQ1Ramp, SQ1RampTes -from rs_servo import RSServo +from .series_array import SARamp +from .sq2_servo import SQ2Servo +from .sq1_servo import SQ1Servo, SQ1ServoSA +from .sq1_ramp import SQ1Ramp, SQ1RampTes +from .rs_servo import RSServo diff --git a/python/auto_setup/config/__init__.py b/python/auto_setup/config/__init__.py index 1b0a3f5..400de88 100644 --- a/python/auto_setup/config/__init__.py +++ b/python/auto_setup/config/__init__.py @@ -1,10 +1,11 @@ +from __future__ import absolute_import __all__ = ["get_exp_config", "get_exp_param", "set_exp_param", "set_exp_param_range"] -from get_exp_config import get_exp_config -from get_exp_config import get_exp_param -from get_exp_config import set_exp_param -from get_exp_config import set_exp_param_range -from get_exp_config import mas_param -from get_exp_config import configFile -from get_exp_config import get_fake_expt #transitional +from .get_exp_config import get_exp_config +from .get_exp_config import get_exp_param +from .get_exp_config import set_exp_param +from .get_exp_config import set_exp_param_range +from .get_exp_config import mas_param +from .get_exp_config import configFile +from .get_exp_config import get_fake_expt #transitional diff --git a/python/auto_setup/config/get_exp_config.py b/python/auto_setup/config/get_exp_config.py index c124a4b..8d750b0 100644 --- a/python/auto_setup/config/get_exp_config.py +++ b/python/auto_setup/config/get_exp_config.py @@ -1,3 +1,6 @@ +from __future__ import print_function +from builtins import str +from builtins import map import numpy import subprocess @@ -57,9 +60,9 @@ def mas_param(file, key, type=None, no_singles=False): stdout=subprocess.PIPE); value = p.communicate()[0]; status = p.wait(); - except OSError, (errno, strerror): - print "Failed to load parameter " + key + \ - "\n[Errno {0}] {1}".format(errno, strerror) + except OSError as e: + print("Failed to load parameter " + key + \ + "\n[Errno {0}] {1}".format(e.errno, e.strerror)) return None if (status): @@ -162,18 +165,18 @@ def get_param_descriptions(file): value, err_text = p.communicate() if p.wait() != 0: raise OSError(1, err_text) - except OSError, (errno, strerror): - print "Failed to get parameter info table from mas_param\n" \ - "[Errno {0}] {1}".format(errno, strerror) + except OSError as e: + print("Failed to get parameter info table from mas_param\n" \ + "[Errno {0}] {1}".format(e.errno, e.strerror)) return None params = [] info = {} for line in value.split('\n'): - w = map(str.strip, line.split(':')) + w = list(map(str.strip, line.split(':'))) if len(w) == 0 or len(w[0]) == 0 or w[0][0] == '#': continue if len(w) != 4: - raise RuntimeError, "Failed to parse mas_param info line %s" % line + raise RuntimeError("Failed to parse mas_param info line %s" % line) name, dtype, arrayness, size = w[0], w[1], w[2]=='array', int(w[3]) params.append(name) info[name] = {'type': dtype, @@ -215,7 +218,7 @@ def read_param(self, name): # Numerical data are space-delimited val = val.split() if desc['is_array']: - val = numpy.array(map(cast, val)) + val = numpy.array(list(map(cast, val))) else: val = cast(val[0]) # Update internal data and return @@ -236,7 +239,7 @@ def get_param(self, name, missing_ok=False, default=None): if not name in self: if missing_ok or default is not None: return default - raise ValueError, "key '%s' not found in config file." % name + raise ValueError("key '%s' not found in config file." % name) if hasattr(self[name], '__copy__'): # Don't expose references to mutable objects (arrays) return self[name].copy() @@ -260,7 +263,7 @@ def write(self): def merge_from(self, filename, clobber=False, verbose=False): src = configFile(filename) - for k in src.keys(): + for k in list(src.keys()): if (not k in self) or clobber: self[k] = src[k] self.info[k] = src.info[k] @@ -270,7 +273,7 @@ def merge_from(self, filename, clobber=False, verbose=False): @staticmethod def compare(c1, c2): results = [] - c2k = [x for x in c2.keys()] + c2k = [x for x in list(c2.keys())] for k in sorted(c1.keys()): if not k in c2k: results.append((k, 'left only')) @@ -286,11 +289,11 @@ def compare(c1, c2): def get_fake_expt(filename): - print """ + print(""" Creating configFile based on hard-coded experiment.cfg parameters... Please upgrade mas_param to support "info" dumping. Thanks. -""" +""") e = configFile(filename, read=False) names, info = [], {} for dtype in ['string', 'float', 'integer']: @@ -314,9 +317,9 @@ def get_fake_expt(filename): # Unit test... fn1 = mas_path().data_dir() + '/experiment.cfg' fn2 = './experiment.cfg' - print 'load' + print('load') c1 = configFile(fn1) c2 = configFile(fn2) - print 'scan' + print('scan') for x in c1.compare(c1, c2): - print x + print(x) diff --git a/python/auto_setup/frame_test.py b/python/auto_setup/frame_test.py index b462e1d..ebae5d1 100644 --- a/python/auto_setup/frame_test.py +++ b/python/auto_setup/frame_test.py @@ -1,14 +1,18 @@ +from __future__ import print_function +from builtins import str +from builtins import range import os, time from mce_data import MCEFile, MCERunfile from numpy import * import auto_setup.idl_compat as idl_compat import auto_setup.util as util import auto_setup.servo as servo +from functools import reduce def go(tuning, rc, filename=None): ok, frametest = acquire(tuning, rc, filename=filename) if not ok: - raise RuntimeError, frametest['error'] + raise RuntimeError(frametest['error']) lockflags = reduce(tuning, frametest) plot(tuning, frametest, lockflags) @@ -63,7 +67,7 @@ def reduce(tuning, frametest): # Read data preserving rows/cols dimensioning mcefile = MCEFile(datafile) - print mcefile.data_mode + print(mcefile.data_mode) data = mcefile.Read(field='error',row_col=True) data = data.data n_cols = data.shape[1] diff --git a/python/auto_setup/idl_compat/__init__.py b/python/auto_setup/idl_compat/__init__.py index e83adff..58f2c50 100644 --- a/python/auto_setup/idl_compat/__init__.py +++ b/python/auto_setup/idl_compat/__init__.py @@ -3,8 +3,9 @@ These routines recreate IDL library routines where native Python or NumPy/SciPy routines are inadequate. """ +from __future__ import absolute_import __all__ = ["deriv", "smooth"] -from deriv import deriv -from smooth import smooth +from .deriv import deriv +from .smooth import smooth diff --git a/python/auto_setup/idl_compat/deriv.py b/python/auto_setup/idl_compat/deriv.py index fc8e8b8..6340d18 100644 --- a/python/auto_setup/idl_compat/deriv.py +++ b/python/auto_setup/idl_compat/deriv.py @@ -1,3 +1,5 @@ +from __future__ import division +from past.utils import old_div import numpy def deriv(x, y=None): @@ -8,7 +10,7 @@ def deriv(x, y=None): returns an array of the same length as its input. """ if (x is None and y is None): - raise TypeError, "No data." + raise TypeError("No data.") if (y is None): y = x x = numpy.arange(y.size) @@ -17,7 +19,7 @@ def deriv(x, y=None): n = x.size if (n != y.size): - raise TypeError, "x and y must be the same size" + raise TypeError("x and y must be the same size") #floatify xx = x.astype("float") @@ -29,8 +31,8 @@ def deriv(x, y=None): d = numpy.empty([n], dtype="float") # middle points - d = numpy.roll(y,1) * (x12 / (x01 * x02)) + y * (1. / x12 - 1. / x01) \ - - numpy.roll(y,-1) * (x01 / (x02 * x12)) + d = numpy.roll(y,1) * (old_div(x12, (x01 * x02))) + y * (old_div(1., x12) - old_div(1., x01)) \ + - numpy.roll(y,-1) * (old_div(x01, (x02 * x12))) # formulae for the first and last points: d[0] = y[0] * (x01[1] + x02[1]) / (x01[1] * x02[1]) - y[1] * x02[1] \ diff --git a/python/auto_setup/idl_compat/smooth.py b/python/auto_setup/idl_compat/smooth.py index b2ac015..d419b21 100644 --- a/python/auto_setup/idl_compat/smooth.py +++ b/python/auto_setup/idl_compat/smooth.py @@ -1,3 +1,5 @@ +from __future__ import division +from past.utils import old_div import numpy def smooth(a, w): @@ -7,10 +9,10 @@ def smooth(a, w): something slightly less lame with the end points. """ if (a.ndim != 1): - raise ValueError, "Input must be single dimensional" + raise ValueError("Input must be single dimensional") if (a.size < w): - raise ValueError, "Input must be longer than the smoothing kernel" + raise ValueError("Input must be longer than the smoothing kernel") # the trivial case if (w < 3): @@ -24,6 +26,6 @@ def smooth(a, w): s = numpy.r_[2 * a[0] - a[w:1:-1], a, 2 * a[-1]-a[-1:-w:-1]] # perform the convolution - y = numpy.convolve(numpy.ones(w,'d')/w, s, mode='same') + y = numpy.convolve(old_div(numpy.ones(w,'d'),w), s, mode='same') return y[w-1:-w+1] diff --git a/python/auto_setup/main.py b/python/auto_setup/main.py index a316818..cf7dc7b 100644 --- a/python/auto_setup/main.py +++ b/python/auto_setup/main.py @@ -1,11 +1,17 @@ +from __future__ import division +from __future__ import print_function +from __future__ import absolute_import +from builtins import zip +from builtins import str +from past.utils import old_div # vim: ts=4 sw=4 et -import util -import series_array -import sq2_servo -import sq1_servo -import sq1_ramp -import frame_test -import mux11d +from . import util +from . import series_array +from . import sq2_servo +from . import sq1_servo +from . import sq1_ramp +from . import frame_test +from . import mux11d import os import time @@ -36,7 +42,7 @@ def do_init(tuning, rcs, check_bias, ramp_sa_bias, note): for c in check_rcs: exit_status = tuning.run(["check_zero", "rc%i" % (c), "sa_bias"]) if (exit_status > 8): - print "check_zero failed with code", exit_status + print("check_zero failed with code", exit_status) on_bias += exit_status # experiment.cfg setting may force a ramp_sa_bias. @@ -76,7 +82,7 @@ def do_init(tuning, rcs, check_bias, ramp_sa_bias, note): # thermalisation if (check_bias and on_bias == 0): - print "Waiting for thermalization." + print("Waiting for thermalization.") time.sleep(tuning.get_exp_param('tuning_therm_time')) return {'ramp_sa_bias': ramp_sa_bias, 'sq1_bias': sq1_bias, @@ -161,7 +167,7 @@ def do_sa_ramp(tuning, rc, rc_indices, ramp_sa_bias=False, write_default=False): ok, ramp_data = series_array.acquire(tuning, rc, do_bias=ramp_sa_bias) if not ok: - raise RuntimeError, ramp_data['error'] + raise RuntimeError(ramp_data['error']) sa = series_array.SARamp(ramp_data['filename'], tuning=tuning) bias_ramp = sa.bias_style == 'ramp' @@ -242,7 +248,7 @@ def do_sq2_servo(tuning, rc, rc_indices, write_default=False): tuning.write_config() ok, servo_data = sq2_servo.acquire(tuning, rc) if not ok: - raise RuntimeError, servo_data['error'] + raise RuntimeError(servo_data['error']) sq = sq2_servo.SQ2Servo(servo_data['filename'], tuning=tuning) bias_ramp = sq.bias_style == 'ramp' @@ -321,7 +327,7 @@ def do_sq1_servo(tuning, rc, rc_indices, write_default=False): ok, servo_data = sq1_servo.acquire(tuning, rc, super_servo=super_servo) if not ok: - raise RuntimeError, servo_data['error'] + raise RuntimeError(servo_data['error']) sq = sq1_servo.SQ1Servo(servo_data['filename'], tuning=tuning) bias_ramp = sq.bias_style == 'ramp' @@ -402,8 +408,8 @@ def do_sq1_ramp(tuning, rcs, init=True, ramp_check=False): for rc in rcs: ok, info = sq1_ramp.acquire(tuning, rc, check=ramp_check) if not ok: - raise RuntimeError, 'sq1ramp failed for rc%s (%s)' % \ - (str(rc), info['error']) + raise RuntimeError('sq1ramp failed for rc%s (%s)' % \ + (str(rc), info['error'])) ramps.append(sq1_ramp.SQ1Ramp(info['filename'])) # Join into single data/analysis object @@ -428,7 +434,7 @@ def do_sq1_ramp(tuning, rcs, init=True, ramp_check=False): # Sometimes we don't actually want to change anything if not ramp_check: # Note that adc_offset_cr needs to be corrected for samp_num - tuning.set_exp_param('adc_offset_cr', adc/samp_num) + tuning.set_exp_param('adc_offset_cr', old_div(adc,samp_num)) tuning.set_exp_param('config_adc_offset_all', 1) tuning.write_config() @@ -458,8 +464,8 @@ def do_sq1_ramp_tes(tuning, rcs, init=True): for rc in rcs: ok, info = sq1_ramp.acquire_tes_ramp(tuning, rc) if not ok: - raise RuntimeError, 'sq1ramp failed for rc%s (%s)' % \ - (str(rc), info['error']) + raise RuntimeError('sq1ramp failed for rc%s (%s)' % \ + (str(rc), info['error'])) ramps.append(sq1_ramp.SQ1RampTes(info['filename'])) # Join into single data/analysis object @@ -489,12 +495,12 @@ def operate(tuning): tuning.copy_exp_param('default_%s'%param, param) # Compile dead detector mask - print "Assembling dead detector mask." + print("Assembling dead detector mask.") mask = util.get_all_dead_masks(tuning, union=True) if mask is not None: tuning.set_exp_param("dead_detectors", mask.data.transpose().reshape(-1)) - print "Assembling frail detector mask." + print("Assembling frail detector mask.") mask = util.get_all_dead_masks(tuning, union=True, frail=True) if mask is not None: tuning.set_exp_param("frail_detectors", mask.data.transpose().reshape(-1)) @@ -511,7 +517,7 @@ def frametest_check(tuning, rcs, row, column): # Permit row override, or else take it from config if (row is None): row = 9 - print "Row = %i is used for frametest_plot by default." % row + print("Row = %i is used for frametest_plot by default." % row) if (len(rcs) < 4): for rc in rcs: @@ -537,8 +543,8 @@ def auto_setup(rcs=None, check_bias=None, short=False, ramp_sa_bias=None, IDL auto_setup_squids.""" tuning = util.tuningData(data_dir=data_dir, reg_note=reg_note, debug=debug) - print 'Tuning ctime: %i' % tuning.the_time - print 'Tuning date : ' + tuning.date + print('Tuning ctime: %i' % tuning.the_time) + print('Tuning date : ' + tuning.date) # Create data and analysis directories tuning.make_dirs() @@ -549,7 +555,7 @@ def auto_setup(rcs=None, check_bias=None, short=False, ramp_sa_bias=None, # set rc list, if necessary if (rcs is None): - print " Tuning all available RCs." + print(" Tuning all available RCs.") rcs = ['s'] # default parameters @@ -614,12 +620,11 @@ def auto_setup(rcs=None, check_bias=None, short=False, ramp_sa_bias=None, stages.remove('sq1_ramp_check') if len(rcs) != 1: - raise RuntimeError, \ - "Tuning of misc. RCs no longer supported. Pick a single RC, or use RCS." + raise RuntimeError("Tuning of misc. RCs no longer supported. Pick a single RC, or use RCS.") else: # This indentation left in for clarity of commit diff. Remove me some day. c = rcs[0] - print "Processing rc%s" % str(c) + print("Processing rc%s" % str(c)) if c == 's': rc_indices = array(tuning.column_list()) else: @@ -671,7 +676,7 @@ def auto_setup(rcs=None, check_bias=None, short=False, ramp_sa_bias=None, if 'frametest' in stages: # lock check - print 'frametest_check to-be-implemented...' + print('frametest_check to-be-implemented...') #frametest_check(tuning, rcs, row, column) if 'operate' in stages: @@ -684,5 +689,5 @@ def auto_setup(rcs=None, check_bias=None, short=False, ramp_sa_bias=None, shutil.copy2(tuning.exp_file, tuning.data_dir) t_elapsed = time.time() - tuning.the_time - print "Tuning complete. Time elapsed: %i seconds." % t_elapsed + print("Tuning complete. Time elapsed: %i seconds." % t_elapsed) return 0 diff --git a/python/auto_setup/mux11d.py b/python/auto_setup/mux11d.py index 10807f7..0da9bf7 100644 --- a/python/auto_setup/mux11d.py +++ b/python/auto_setup/mux11d.py @@ -1,14 +1,19 @@ """ __main__ style stuff for mux11d tuning. """ - -import util -import series_array -import sq2_servo -import sq1_servo -import sq1_ramp -import rs_servo -import frame_test +from __future__ import print_function +from __future__ import absolute_import +from builtins import str +from builtins import zip +from builtins import range + +from . import util +from . import series_array +from . import sq2_servo +from . import sq1_servo +from . import sq1_ramp +from . import rs_servo +from . import frame_test import os import time @@ -46,47 +51,47 @@ def do_init_mux11d(tuning, tune_data): len(mux11d_mux_order) == \ len(tuning.get_exp_param('row_select',missing_ok=False)) == \ len(tuning.get_exp_param('row_deselect',missing_ok=False)) ): - print """!!! Warning : hybrid muxing w/ different lengths for at least one of + print("""!!! Warning : hybrid muxing w/ different lengths for at least one of row_order, mux11d_mux_order, row_select, and row_deselect in - experiment.cfg. Proceed at your own risk!""" + experiment.cfg. Proceed at your own risk!""") # Throw an exception if a card is in mux11d_row_select_cards 2x if len(mux11d_row_select_cards)!=len(set(mux11d_row_select_cards)): - raise ValueError, """trying to hybrid mux but there are duplicate entries in mux11d_row_select_cards - not supported.""" + raise ValueError("""trying to hybrid mux but there are duplicate entries in mux11d_row_select_cards - not supported.""") if len(mux11d_row_select_cards_row0)!=len(set(mux11d_row_select_cards_row0)): - raise ValueError, """trying to hybrid mux but there are duplicate entries in mux11d_row_select_cards_row0 - not supported.""" + raise ValueError("""trying to hybrid mux but there are duplicate entries in mux11d_row_select_cards_row0 - not supported.""") # Make sure cards in mux11d_row_select_cards are doable if not set(mux11d_row_select_cards).issubset(['ac','bc1','bc2','bc3']): - raise ValueError, """can't mux on one of the cards in + raise ValueError("""can't mux on one of the cards in mux11d_row_select_cards; can only mux on - ['ac','bc1','bc2','bc3']""" + ['ac','bc1','bc2','bc3']""") # Some functionality right now in mas requires the row0s be in strictly ascending order if not list(mux11d_row_select_cards_row0)==sorted(list(mux11d_row_select_cards_row0)): - raise ValueError, """trying to hybrid mux but the entries in mux11d_row_select_cards_row0 must be in strictly ascending order.""" + raise ValueError("""trying to hybrid mux but the entries in mux11d_row_select_cards_row0 must be in strictly ascending order.""") if len(mux11d_row_select_cards)!=len(mux11d_row_select_cards_row0): - raise ValueError, """len(mux11d_row_select_cards)!=len(mux11d_row_select_cards_row0) : every card being - hybrid muxed must be assigned a row0""" + raise ValueError("""len(mux11d_row_select_cards)!=len(mux11d_row_select_cards_row0) : every card being + hybrid muxed must be assigned a row0""") # Make sure user didn't request a starting row0 for a card in another card's block of RSes. card_nrs_dict={ 'ac' : 41, 'bc1' : 32, 'bc2' : 32, 'bc3' : 32 } for (r0,card) in zip(mux11d_row_select_cards_row0,mux11d_row_select_cards): - if len(list(set(mux11d_row_select_cards_row0).intersection(range(r0,r0+card_nrs_dict[card]))))>1: - raise ValueError, """trying to hybrid mux but overlap detected between RS blocks. Make sure row0's for cards are - correctly spaced in mux11d_row_select_cards_row0.""" + if len(list(set(mux11d_row_select_cards_row0).intersection(list(range(r0,r0+card_nrs_dict[card])))))>1: + raise ValueError("""trying to hybrid mux but overlap detected between RS blocks. Make sure row0's for cards are + correctly spaced in mux11d_row_select_cards_row0.""") # Make sure there's no RS requested outside of a valid RS block for rs in mux11d_mux_order: if not any([rs in card_rs_range for card_rs_range in - [range(r0,r0+card_nrs_dict[card]) for (r0,card) in + [list(range(r0,r0+card_nrs_dict[card])) for (r0,card) in zip(mux11d_row_select_cards_row0,mux11d_row_select_cards)]]): - raise ValueError, """trying to hybrid mux but + raise ValueError("""trying to hybrid mux but requested rs=%d in mux11d_mux_order, but that rs - doesn't fall into any defined hybrid rs block!"""%(rs) + doesn't fall into any defined hybrid rs block!"""%(rs)) # Make sure ac idle row is in AC's RS block (ac idle row is indexed from zero, so it's native to ac row_order) - if mux11d_ac_idle_row not in range(0,41): - raise ValueError, """trying to hybrid mux but - mux11d_ac_idle_row must be in [0:40]!""" + if mux11d_ac_idle_row not in list(range(0,41)): + raise ValueError("""trying to hybrid mux but + mux11d_ac_idle_row must be in [0:40]!""") # Build ac row_order for experiment.cfg from mux11d_mux_order hybrid_ac_row_order=[] @@ -113,10 +118,10 @@ def do_init_mux11d(tuning, tune_data): # Make sure row_order is consistent with mux11d_mux_order if any([hro!=ro for (hro,ro) in zip(hybrid_ac_row_order,row_order)]): - print """!!! Warning : hybrid muxing but row_order in + print("""!!! Warning : hybrid muxing but row_order in experiment.cfg not consistent with mux11d_mux_order. Will - overwrite row_order based on mux11d_mux_order.""" - print """!!! hybrid_ac_row_order=',hybrid_ac_row_order""" + overwrite row_order based on mux11d_mux_order.""") + print("""!!! hybrid_ac_row_order=',hybrid_ac_row_order""") tuning.set_exp_param('row_order', hybrid_ac_row_order) # Done w/ special hybrid-mux only setup @@ -193,13 +198,13 @@ def do_rs_servo(tuning, rc, rc_indices): tuning.write_config() if len(rc) != 1: - raise ValueError, "this module does not support weird multi-rc tunes" + raise ValueError("this module does not support weird multi-rc tunes") ok, servo_data = acquire(tuning, rc[0], action_name='rsservo', bin_name='rs_servo') if not ok: - raise RuntimeError, servo_data['error'] + raise RuntimeError(servo_data['error']) sq = rs_servo.RSServo(servo_data['filename'], tuning=tuning) bias_ramp = sq.bias_style == 'ramp' @@ -282,13 +287,13 @@ def do_sq1_servo_sa(tuning, rc, rc_indices): tuning.write_config() if len(rc) != 1: - raise ValueError, "this module does not support weird multi-rc tunes" + raise ValueError("this module does not support weird multi-rc tunes") ok, servo_data = acquire(tuning, rc[0], action_name='sq1servo_sa', bin_name='sq1servo_sa') if not ok: - raise RuntimeError, servo_data['error'] + raise RuntimeError(servo_data['error']) sq = sq1_servo.SQ1ServoSA(servo_data['filename'], tuning=tuning) bias_ramp = sq.bias_style == 'ramp' diff --git a/python/auto_setup/rs_servo.py b/python/auto_setup/rs_servo.py index 0042432..a6e91ca 100644 --- a/python/auto_setup/rs_servo.py +++ b/python/auto_setup/rs_servo.py @@ -1,3 +1,8 @@ +from __future__ import division +from __future__ import absolute_import +from builtins import zip +from builtins import range +from past.utils import old_div # vim: ts=4 sw=4 et import time, os, glob import biggles @@ -6,13 +11,13 @@ import numpy as np from mce_data import MCERunfile, MCEFile -import servo +from . import servo def go(tuning, rc, filename=None): ok, servo_data = acquire(tuning, rc, filename=filename) if not ok: - raise RuntimeError, servo_data['error'] + raise RuntimeError(servo_data['error']) sq = RSServo(servo_data['filename'], tuning=tuning) lock_points = sq.reduce() @@ -74,7 +79,7 @@ def __init__(self, filename=None, tuning=None): mux11d_mux_order=self.tuning.get_exp_param('mux11d_mux_order',missing_ok=False) for rs in mux11d_mux_order: for (c,cr) in \ - [(card,range(r0,r0+card_nrs_dict[card])) for \ + [(card,list(range(r0,r0+card_nrs_dict[card]))) for \ (r0,card) in \ zip(mux11d_row_select_cards_row0,mux11d_row_select_cards)]: if rs in cr: @@ -171,7 +176,7 @@ def reduce1(self, slope=None): elif self.bias_style == 'ramp': (bias,row,col)=np.unravel_index(idx,self.data_shape[:-1],order='C') else: - raise RuntimeError, 'Unable to unravel by row.' + raise RuntimeError('Unable to unravel by row.') if ok_if_rs_on_upper_bndry is None or ok_if_rs_on_upper_bndry[row]==0: # Throw out ramps (max->min and min->max) @@ -416,11 +421,11 @@ def plot(self, plot_file=None, format=None, data_attr=None): for _, r, ax in pl: if r >= plot_shape[1]: continue - x = self.fb/1000. - for i in xrange(ncurves): - ax.add(biggles.Curve(x, data[r,i]/1000.)) + x = old_div(self.fb,1000.) + for i in range(ncurves): + ax.add(biggles.Curve(x, old_div(data[r,i],1000.))) if 'lock_x' in an: - ax.add(biggles.LineX(an['lock_x'][r] / 1000., type='dashed')) + ax.add(biggles.LineX(old_div(an['lock_x'][r], 1000.), type='dashed')) return { 'plot_files': pl.plot_files, diff --git a/python/auto_setup/series_array.py b/python/auto_setup/series_array.py index 4c8481f..96b0566 100644 --- a/python/auto_setup/series_array.py +++ b/python/auto_setup/series_array.py @@ -1,3 +1,7 @@ +from __future__ import division +from builtins import str +from builtins import range +from past.utils import old_div # This is a semitranslation of the IDL auto_setup_squids program. The # intent is to separate that program into three broad parts: # @@ -21,7 +25,7 @@ def go(tuning, rc, filename=None, do_bias=None, slope=None): ok, ramp_data = acquire(tuning, rc, filename=filename, do_bias=do_bias) if not ok: - raise RuntimeError, ramp_data['error'] + raise RuntimeError(ramp_data['error']) sa = SARamp(ramp_data['filename'], tuning=tuning) if sa.bias_style == 'ramp': @@ -167,14 +171,14 @@ def reduce2(self, slope=None, x_adjust=None): # Analyze all SA curves for lock-points an = servo.get_lock_points(y, start=0, slope=slope, - x_adjust=x_adjust/self.d_fb) + x_adjust=old_div(x_adjust,self.d_fb)) # Add feedback keys, with shift to counteract smoothing for k in ['lock', 'left', 'right']: an[k+'_x'] = self.fb[an[k+'_idx'] + scale] # Measure flux quantum - width = self.data.shape[-1] / 4 + width = old_div(self.data.shape[-1], 4) phi0 = servo.period(self.data, width=width) # Convert position, slope, phi0 to feedback units. diff --git a/python/auto_setup/servo.py b/python/auto_setup/servo.py index 57b26d8..30c4e94 100644 --- a/python/auto_setup/servo.py +++ b/python/auto_setup/servo.py @@ -1,3 +1,9 @@ +from __future__ import division +from __future__ import print_function +from builtins import zip +from builtins import range +from past.builtins import basestring +from past.utils import old_div import auto_setup.util as util from numpy import * import numpy as np @@ -7,7 +13,7 @@ def smooth(x, scale): s = x.shape x.shape = (-1, s[-1]) - y = array([convolve(xx, [1]*scale, mode='valid') for xx in x]) / scale + y = old_div(array([convolve(xx, [1]*scale, mode='valid') for xx in x]), scale) x.shape = s y.shape = s[:-1] + (y.shape[-1],) return y @@ -49,7 +55,7 @@ def get_curve_regions(y, extremality=0.8, y0, dy = 0.5*(y1+y0), 0.5*(y1-y0) if dy == 0: dy = 1 - y = (y - y0)/dy # Now in [-1,+1] + y = old_div((y - y0),dy) # Now in [-1,+1] # Identify samples in extreme regions. n = len(y) hi = hstack(((y > extremality).nonzero()[0], n)) @@ -75,7 +81,7 @@ def get_curve_regions(y, extremality=0.8, idx = hi[0] def pairify(): - return zip(transitions, transitions[1:]+[len(y)]) + return list(zip(transitions, transitions[1:]+[len(y)])) if extrema: return pairify()[::2] @@ -100,14 +106,14 @@ def get_lock_points(y, scale=5, lock_amp=False, slope=1., # By default, we characterize the extrema ignoring the beginning # of the curve, since the servo may still be settling. if start is None: - start = y.shape[1]/8 + start = old_div(y.shape[1],8) if stop is None: stop = y.shape[1] y1, y0 = y[:,start:stop].max(axis=1).astype('float'), \ y[:,start:stop].min(axis=1).astype('float') - mids = ((y1+y0)/2).reshape(-1,1) - amps = ((y1-y0)/2).reshape(-1,1) + mids = (old_div((y1+y0),2)).reshape(-1,1) + amps = (old_div((y1-y0),2)).reshape(-1,1) amps[amps==0] = 1. # Copy data, rescaled to +-1 and corrected for slope. @@ -145,8 +151,8 @@ def get_lock_points(y, scale=5, lock_amp=False, slope=1., # Lock mid-way in y or x? if lock_amp: # y - target = array([yy[a] + yy[b] for yy,a,b in \ - zip(y, i_left, i_right)]) / 2 + target = old_div(array([yy[a] + yy[b] for yy,a,b in \ + zip(y, i_left, i_right)]), 2) lock_idx = array([a + argmin(abs(yy[a:b+1]-t)) for \ a,b,t,yy in zip(i_left, i_right, target, y)]) \ .astype('int') @@ -160,7 +166,7 @@ def get_lock_points(y, scale=5, lock_amp=False, slope=1., min_index=i_left, max_index=i_right) lock_y = array([yy[i] for i,yy in zip(lock_idx, y)]) else: # x - lock_idx = (i_left + i_right)/2 + lock_idx = old_div((i_left + i_right),2) if x_adjust is not None: lock_idx += x_adjust lock_slope, lock_y = get_slopes(y, lock_idx, intercept='y', @@ -206,8 +212,8 @@ def get_slopes(data, index, n_points=5, min_index=None, max_index=None, fits = [] for d, i, lo, hi in zip(data, index, min_index, max_index): - sl_idx = arange(max(lo, i-n_points/2), - min(hi, i+(n_points+1)/2)) + sl_idx = arange(max(lo, i-old_div(n_points,2)), + min(hi, i+old_div((n_points+1),2))) if len(sl_idx) < 2: fits.append([0.,0.]) else: @@ -218,10 +224,10 @@ def get_slopes(data, index, n_points=5, min_index=None, max_index=None, if intercept == 'y': return fits[0], fits[1] if intercept == 'x': - x0 = - fits[1] / fits[0] + x0 = old_div(- fits[1], fits[0]) x0[fits[0]==0] = 0. return fits[0], x0 - raise ValueError, 'Invalid intercept request "%s"' % intercept + raise ValueError('Invalid intercept request "%s"' % intercept) def period_correlation(y, width=None, normalize=True): @@ -229,7 +235,7 @@ def period_correlation(y, width=None, normalize=True): # Remove mean! y = y - y.mean(axis=1)[:,None] if width is None: - width = nx / 2 + width = old_div(nx, 2) m = nx - width corr = zeros((n, m)) for i in range(m): @@ -252,7 +258,7 @@ def period(y, width=None): """ n0, n_x = y.shape if width is None: - width = n_x / 8 + width = old_div(n_x, 8) p = zeros(n0) # Get the correlations, and locate their second minimum corr = period_correlation(y, width=width, normalize=False) @@ -290,7 +296,7 @@ def add_curves(ax, x, y, lp, i, def plot(x, y, y_rc, lock_points, plot_file, - shape=(4,2), img_size=None, scale=1./1000, + shape=(4,2), img_size=None, scale=old_div(1.,1000), title=None, xlabel=None, ylabel=None, titles=None, rows=None, cols=None, @@ -321,7 +327,7 @@ def get(key, param): if y0 is None: # Use default y-target if separate up/dn aren't there. y0 = get('', 'y') - slopes.append(zip(m, x0, y0)) + slopes.append(list(zip(m, x0, y0))) pl = util.plotGridder(y_rc, plot_file, title=title, xlabel=xlabel, ylabel=ylabel, @@ -347,26 +353,26 @@ def get(key, param): ax.add(biggles.PlotLabel(0., 0., insets[i], halign='left',valign='bottom')) if x.shape==y.shape: - ax.add(biggles.Curve(x[i]/1000., y[i]/1000.)) + ax.add(biggles.Curve(old_div(x[i],1000.), old_div(y[i],1000.))) else: - ax.add(biggles.Curve(x/1000., y[i]/1000.)) + ax.add(biggles.Curve(old_div(x,1000.), old_div(y[i],1000.))) if scale_style == 'roll-off': # Prevent small signals from causing large tick labels - hi, lo = amax(y[i])/1000, amin(y[i])/1000 + hi, lo = old_div(amax(y[i]),1000), old_div(amin(y[i]),1000) if hi - lo < 4: - mid = (hi+lo)/2 + mid = old_div((hi+lo),2) ax.yrange = (mid-2, mid+2) elif scale_style == 'tight': - hi, lo = amax(y[i]) / 1000., amin(y[i]) / 1000. + hi, lo = old_div(amax(y[i]), 1000.), old_div(amin(y[i]), 1000.) dx = (hi - lo)*.1 if dx <= 0: # Never set a 0-size yrange. dx = 0.5 ax.yrange = lo - dx, hi + dx if x.shape==y.shape: - ax.xrange = x[i][0]/1000., x[i][-1]/1000. + ax.xrange = old_div(x[i][0],1000.), old_div(x[i][-1],1000.) else: - ax.xrange = x[0]/1000., x[-1]/1000. + ax.xrange = old_div(x[0],1000.), old_div(x[-1],1000.) pl.cleanup() return { @@ -421,18 +427,18 @@ def join(cls, args, target=None): def _check_data(self, simple=False): if self.data is None: - raise RuntimeError, '%s needs data.' % self.stage_name + raise RuntimeError('%s needs data.' % self.stage_name) if simple and self.gridded: - raise RuntimeError, 'Simple %s expected (use split?)' % \ - self.stage_name + raise RuntimeError('Simple %s expected (use split?)' % \ + self.stage_name) def _check_analysis(self, existence=False): if self.analysis is None: if existence: self.analysis = {} else: - raise RuntimeError, '%s lacks desired analysis structure.' % \ - self.stage_name + raise RuntimeError('%s lacks desired analysis structure.' % \ + self.stage_name) def reduce_rows(self): """ @@ -546,7 +552,7 @@ def _read_super_bias(self, filename): self.rows = index_set[2,:n_row] def read_data(self, filename, **kwargs): - raise RuntimeError, "this is a virtual method" + raise RuntimeError("this is a virtual method") def split(self): """ @@ -619,7 +625,7 @@ def select_biases(self, bias_idx=None, assoc=None, ic_factor=None): elif assoc == 'row': n_group, n_member = n_row, n_col else: - raise ValueError, "cannot select_bias with assoc='%'" % assoc + raise ValueError("cannot select_bias with assoc='%'" % assoc) # If the user has not passed in the desired indices into the # bias array, try to find it in the analysis. @@ -649,8 +655,8 @@ def get_curves(key, idx0, idx1, assoc=None): bias = self.bias[bias_idx][i] * ic_factor s.bias[i] = max(self.bias[0], min(bias, self.bias[-1])) idx0 = (self.bias[:-1] <= bias).nonzero()[0][-1] - frac = float(s.bias[i] - self.bias[idx0]) / \ - (self.bias[idx0+1] - self.bias[idx0]) + frac = old_div(float(s.bias[i] - self.bias[idx0]), \ + (self.bias[idx0+1] - self.bias[idx0])) # chokes if bias_assoc='rowcol' # Interpolate between two curves @@ -705,7 +711,7 @@ def reduce1(self, slope=None): return self.analysis def reduce2(self, slope=None): - raise RuntimeError, "this is a virtual method." + raise RuntimeError("this is a virtual method.") def plot(self, plot_file=None, format=None, data_attr='data'): if plot_file is None: @@ -777,7 +783,7 @@ def __init__(self): def from_biases(cls, parent): # Check parent for compatibility if parent.bias_style != 'ramp': - raise ValueError, "parent is not a bias ramp!" + raise ValueError("parent is not a bias ramp!") # What do we have here n_bias, n_row, n_col, n_fb = parent.data_shape self = cls() @@ -840,15 +846,15 @@ def plot(self, plot_file=None, format=None, data_attr=None): a, b, p = random() * 1000, (random()-.5)*10000, random()*N y.append(a * sin(2*pi*x*f/N+p) + b) y = array(y) - print 'Periods: ', period(y) - print 'Expected: ', N/array(F) + print('Periods: ', period(y)) + print('Expected: ', old_div(N,array(F))) lp = get_lock_points(y, slope=array([1,1,1,-1])) - print 'Lock-x: ', lp['lock_idx'] + print('Lock-x: ', lp['lock_idx']) reg = [] for yy in y: reg.append(get_curve_regions(yy, slopes=True)) - print 'Crossings: ', reg[-1] - print 'Plotting...' + print('Crossings: ', reg[-1]) + print('Plotting...') fp = biggles.Table(2,2) for i in range(4): p = biggles.FramedPlot() @@ -856,5 +862,5 @@ def plot(self, plot_file=None, format=None, data_attr=None): p.add(biggles.LineX(x[lp['lock_idx'][i]],type='dashed')) p.add(biggles.LineY(lp['lock_y'][i],type='dashed')) p.yrange = y[i].min()-10, y[i].max()+10 - fp[i%2,i/2] = p + fp[i%2,old_div(i,2)] = p fp.write_img(500,500, 'check_servo.png') diff --git a/python/auto_setup/sq1_ramp.py b/python/auto_setup/sq1_ramp.py index 2478418..9753d5c 100644 --- a/python/auto_setup/sq1_ramp.py +++ b/python/auto_setup/sq1_ramp.py @@ -1,3 +1,9 @@ +from __future__ import division +from __future__ import print_function +from builtins import str +from builtins import zip +from builtins import range +from past.utils import old_div import os, time from mce_data import MCEFile, MCERunfile from numpy import * @@ -8,7 +14,7 @@ def go(tuning, rc, filename=None, slope=None, flags=None): ok, ramp_data = acquire(tuning, rc, filename=filename, do_bias=do_bias) if not ok: - raise RuntimeError, servo_data['error'] + raise RuntimeError(servo_data['error']) s = sq1_ramp(filename) s.reduce1() @@ -91,7 +97,7 @@ def get_lock_slope(data, index, slope_points=5): if len(sl_idx) == 0: return 0., 0. if len(sl_idx) == 1: return sl_idx[0], 0. p = polyfit(sl_idx, data[sl_idx], 1) - return -p[1]/p[0], p[0] + return old_div(-p[1],p[0]), p[0] def get_lock_points(data, slope=None, max_points=5, min_spacing=5): """ @@ -136,7 +142,7 @@ def lock_stats(data, target=0., range=None, slope=1., if not ok: return ok, range[0], 0 a, m = get_lock_slope(data-target, L+range[0], slope_points=slope_points) - if flag: print L, range, a, m + if flag: print(L, range, a, m) return ok, a, m @@ -175,11 +181,11 @@ def join(cls, args): def _check_data(self): if self.data is None: - raise RuntimeError, 'sq1_ramp needs data.' + raise RuntimeError('sq1_ramp needs data.') def _check_analysis(self): if self.analysis is None: - raise RuntimeError, 'sq1_ramp needs analysis.' + raise RuntimeError('sq1_ramp needs analysis.') def read_data(self, filename): self.mcefile = MCEFile(filename) @@ -214,13 +220,13 @@ def reduce1(self, rule=None): if rule is None: rule = self.tuning.get_exp_param('sq1_ramp_locking_rule', default='y_space_sorted') - print rule + print(rule) self._check_data() # Analyze every single stupid rampc curve - scale = max([len(self.fb)/40, 1]) + scale = max([old_div(len(self.fb),40), 1]) y = servo.smooth(self.data, scale) - x_offset = scale/2 + x_offset = old_div(scale,2) dy = y[:,1:] - y[:,:-1] y = y[:,:-1] @@ -265,7 +271,7 @@ def reduce1(self, rule=None): idx = argmax(dz) lims.append((z[idx][1], z[idx+1][1])) - adc_offset = array([(yy[a]+yy[b])/2 for (a,b),yy in zip(lims,y)]) + adc_offset = array([old_div((yy[a]+yy[b]),2) for (a,b),yy in zip(lims,y)]) if clear_lims: lims = [(0,0)] * len(lims) @@ -275,7 +281,7 @@ def reduce1(self, rule=None): result = { 'max_y': amax(y, axis=1), 'min_y': amin(y, axis=1), - 'lock_idx': (lock_left + lock_right)/2, + 'lock_idx': old_div((lock_left + lock_right),2), 'left_idx': lock_left, 'right_idx': lock_right, 'lock_y': adc_offset, @@ -297,9 +303,9 @@ def reduce2(self): self._check_data() # Smooth - scale = max([len(self.fb)/40, 1]) + scale = max([old_div(len(self.fb),40), 1]) y = servo.smooth(self.data, scale) - x_offset = scale/2 + x_offset = old_div(scale,2) abs_lims = [-x_offset, len(self.fb)-x_offset-1] # Find lock points and slopes @@ -309,8 +315,8 @@ def reduce2(self): for word, sgn in [('up', 1), ('dn',-1)]: ok, idx, sl, nl = [zeros(y.shape[0], x) for x in ['bool','int','float','int']] for i, (yy, t) in enumerate(zip(y, targets)): - rg = (len(yy)/4, len(yy)) - o, d, s = lock_stats(yy, target=t, slope_points=scale/2, slope=sgn, + rg = (old_div(len(yy),4), len(yy)) + o, d, s = lock_stats(yy, target=t, slope_points=old_div(scale,2), slope=sgn, range=rg) d = max(0,min(len(yy)-1, d)) # Move in bounds. # Get curve regions @@ -322,12 +328,12 @@ def reduce2(self): idx[idxabs_lims[1]] = abs_lims[1] result['lock_%s_idx'%word] = idx + x_offset - result['lock_%s_slope'%word] = sl / self.d_fb + result['lock_%s_slope'%word] = old_div(sl, self.d_fb) result['lock_%s_ok'%word] = ok.astype('int') result['lock_%s_x'%word] = self.fb[result['lock_%s_idx'%word]] result['lock_%s_count'%word] = nl - width = self.data.shape[-1] / 4 + width = old_div(self.data.shape[-1], 4) phi0 = servo.period(self.data, width=width) result['phi0'] = phi0 * self.d_fb diff --git a/python/auto_setup/sq1_servo.py b/python/auto_setup/sq1_servo.py index a507b24..e68e542 100644 --- a/python/auto_setup/sq1_servo.py +++ b/python/auto_setup/sq1_servo.py @@ -1,3 +1,9 @@ +from __future__ import division +from __future__ import print_function +from __future__ import absolute_import +from builtins import str +from builtins import range +from past.utils import old_div # vim: ts=4 sw=4 et import time, os, glob import biggles @@ -7,7 +13,7 @@ import numpy as np from mce_data import MCERunfile, MCEFile -import servo +from . import servo def go(tuning, rc, filename=None, fb=None, slope=None, gain=None): @@ -27,7 +33,7 @@ def go(tuning, rc, filename=None, fb=None, slope=None, gain=None): ok, servo_data = acquire(tuning, rc, filename=filename, fb=fb, gain=gain, super_servo=super_servo) if not ok: - raise RuntimeError, servo_data['error'] + raise RuntimeError(servo_data['error']) sq = SQ1Servo(servo_data['filename'], tuning=tuning) lock_points = sq.reduce() @@ -116,7 +122,7 @@ def acquire_all_row_painful(tuning, rc, filename=None, fb=None, ok, r = acquire(tuning, rc, filename=filename+'.r%02i'%row, fb=fb, gain=gain, super_servo=False, old_servo=old_servo) if not ok: - print r + print(r) return ok, r results.append(r) @@ -172,7 +178,7 @@ def _read_single(self, filename): """ self.error, self.data = util.load_bias_file(filename+'.bias') # Awkward indexing... - col_idx = self.cols - (amin(self.cols) / 8)*8 + col_idx = self.cols - (old_div(amin(self.cols), 8))*8 self.rows = array(self.rf.Item('servo_init', 'row.init', type='int'))[col_idx] n_row = 1 self.gridded = False @@ -300,7 +306,7 @@ def reduce(self, slope=None, lock_amp=True): else: slope = slope[0] n_fb = len(self.fb) - an = servo.get_lock_points(self.data, scale=n_fb/40, + an = servo.get_lock_points(self.data, scale=old_div(n_fb,40), lock_amp=lock_amp, slope=slope) # Add feedback keys @@ -308,7 +314,7 @@ def reduce(self, slope=None, lock_amp=True): an[k+'_x'] = self.fb[an[k+'_idx']] # Measure flux quantum - width = self.data.shape[-1] / 4 + width = old_div(self.data.shape[-1], 4) phi0 = servo.period(self.data, width=width) # Tweak feedback values and rescale slopes and phi0 @@ -503,7 +509,7 @@ def reduce1(self, slope=None): r = [] for i in range(len(self.data)): reg = servo.get_curve_regions(self.data[i], pairs=True) - if i == 100: print reg + if i == 100: print(reg) # reg will always have at least 4 entries. Remove any # trivial ones though. while len(reg) > 0 and reg[0][0] == reg[0][1]: @@ -569,7 +575,7 @@ def reduce(self, slope=None, lock_amp=True): else: slope = slope[0] n_fb = len(self.fb) - an = servo.get_lock_points(self.data, scale=n_fb/40, + an = servo.get_lock_points(self.data, scale=old_div(n_fb,40), lock_amp=lock_amp, slope=slope) # Add feedback keys @@ -627,11 +633,11 @@ def plot(self, plot_file=None, format=None, data_attr=None): for _, r, ax in pl: if r >= plot_shape[1]: continue - x = self.fb/1000. - for i in xrange(ncurves): - ax.add(biggles.Curve(x, data[r,i]/1000.)) + x = old_div(self.fb,1000.) + for i in range(ncurves): + ax.add(biggles.Curve(x, old_div(data[r,i],1000.))) if 'lock_x' in an: - ax.add(biggles.LineX(an['lock_x'][r] / 1000., type='dashed')) + ax.add(biggles.LineX(old_div(an['lock_x'][r], 1000.), type='dashed')) return { 'plot_files': pl.plot_files, @@ -677,14 +683,14 @@ def plot_all(self, plot_file=None, format=None, data_attr=None): for r, c, ax in pl: if r >= plot_shape[(1 if transpose_row_col else 0)]: continue - x = self.fb/1000. + x = old_div(self.fb,1000.) #for i in xrange(ncurves): - ax.add(biggles.Curve(x, data[r,c]/1000.)) + ax.add(biggles.Curve(x, old_div(data[r,c],1000.))) if 'lock_x' in an: if self.bias_assoc=='rowcol': - ax.add(biggles.LineX((an['lock_x'].reshape(nrow, ncol))[r,c] / 1000., type='dashed')) + ax.add(biggles.LineX(old_div((an['lock_x'].reshape(nrow, ncol))[r,c], 1000.), type='dashed')) else: - ax.add(biggles.LineX(an['lock_x'][r] / 1000., type='dashed')) + ax.add(biggles.LineX(old_div(an['lock_x'][r], 1000.), type='dashed')) return { 'plot_files': pl.plot_files, diff --git a/python/auto_setup/sq2_servo.py b/python/auto_setup/sq2_servo.py index 181d620..26a77f9 100644 --- a/python/auto_setup/sq2_servo.py +++ b/python/auto_setup/sq2_servo.py @@ -1,9 +1,14 @@ +from __future__ import division +from __future__ import absolute_import +from builtins import str +from builtins import range +from past.utils import old_div import os, time import auto_setup.util as util from numpy import * from mce_data import MCERunfile, MCEFile -import servo +from . import servo def go(tuning, rc, filename=None, fb=None, slope=None, bias=None, gain=None, do_analysis=True): @@ -11,7 +16,7 @@ def go(tuning, rc, filename=None, fb=None, slope=None, bias=None, gain=None, ok, servo_data = acquire(tuning, rc, filename=filename, fb=fb, bias=bias, gain=gain) if not ok: - raise RuntimeError, servo_data['error'] + raise RuntimeError(servo_data['error']) if not do_analysis: return None @@ -201,16 +206,16 @@ def reduce2(self, slope=None, lock_amp=True, x_adjust=None): x_adjust = x_adjust[self.cols] n_fb = len(self.fb) - an = servo.get_lock_points(self.data, scale=n_fb/40, + an = servo.get_lock_points(self.data, scale=old_div(n_fb,40), lock_amp=lock_amp, slope=slope, - x_adjust=x_adjust/self.d_fb) + x_adjust=old_div(x_adjust,self.d_fb)) # Add feedback keys for k in ['lock', 'left', 'right']: an[k+'_x'] = self.fb[an[k+'_idx']] # Measure flux quantum - width = self.data.shape[-1] / 4 + width = old_div(self.data.shape[-1], 4) phi0 = servo.period(self.data, width=width) # Tweak feedback values and rescale slopes and phi0 diff --git a/python/auto_setup/util/__init__.py b/python/auto_setup/util/__init__.py index 447e1bf..432848f 100644 --- a/python/auto_setup/util/__init__.py +++ b/python/auto_setup/util/__init__.py @@ -1,12 +1,13 @@ +from __future__ import absolute_import __all__ = ["load_bias_file", "mas_path", "sign", "tuningData"] -from sign import sign -from tuning import tuningData -from load_bias_file import load_bias_file, load_super_bias_file -from plotter import plotGridder -from dead_mask import DeadMask, get_all_dead_masks -from file_set import FileSet -from rectangle import RCData -from mas_path import mas_path +from .sign import sign +from .tuning import tuningData +from .load_bias_file import load_bias_file, load_super_bias_file +from .plotter import plotGridder +from .dead_mask import DeadMask, get_all_dead_masks +from .file_set import FileSet +from .rectangle import RCData +from .mas_path import mas_path -from debug import interactive_errors +from .debug import interactive_errors diff --git a/python/auto_setup/util/dead_mask.py b/python/auto_setup/util/dead_mask.py index 2134557..91d32be 100644 --- a/python/auto_setup/util/dead_mask.py +++ b/python/auto_setup/util/dead_mask.py @@ -1,9 +1,13 @@ +from __future__ import print_function +from builtins import zip +from builtins import range +from builtins import object from auto_setup.config import mas_param import numpy import os -class DeadMask: +class DeadMask(object): def __init__(self, filename=None, label='', shape=None): """ Provide filename to load dead mask, or pass dimensions in 'shape' to @@ -21,7 +25,7 @@ def read(self, filename): nc = mas_param(filename, 'n_cols', 'integer') d = mas_param(filename, 'mask', 'integer') if nr is None or nc is None or d is None: - raise RuntimeError, 'Invalid or missing dead_mask file "%s"' % filename + raise RuntimeError('Invalid or missing dead_mask file "%s"' % filename) self.data = d.reshape(nc, nr).transpose() self.shape = self.data.shape @@ -87,9 +91,9 @@ def get_all_dead_masks(tuning, union=False, frail=False, silent=False): mask_files = [m for m in mask_files if os.path.exists(m)] warnos = [m for m in mask_files if not os.path.exists(m)] if len(warnos) > 0: - print 'Warning: %i of %i mask_files not found:' % (len(warnos), len(mask_files)) + print('Warning: %i of %i mask_files not found:' % (len(warnos), len(mask_files))) for w in warnos: - print ' ', w + print(' ', w) masks = [DeadMask(f, label=l) for f,l in zip(mask_files, mask_list)] if union: if len(masks) == 0: diff --git a/python/auto_setup/util/debug.py b/python/auto_setup/util/debug.py index 9426795..4c53aec 100644 --- a/python/auto_setup/util/debug.py +++ b/python/auto_setup/util/debug.py @@ -1,3 +1,4 @@ +from __future__ import print_function # # Based on # http://code.activestate.com/recipes/65287-automatically-start-the-debugger-on-an-exception/ @@ -13,7 +14,7 @@ def info(type, value, tb): import traceback, pdb # we are NOT in interactive mode, print the exception... traceback.print_exception(type, value, tb) - print + print() # ...then start the debugger in post-mortem mode. pdb.pm() diff --git a/python/auto_setup/util/file_set.py b/python/auto_setup/util/file_set.py index 43b1ae9..6e99f2e 100644 --- a/python/auto_setup/util/file_set.py +++ b/python/auto_setup/util/file_set.py @@ -1,3 +1,4 @@ +from __future__ import print_function from glob import glob import os @@ -34,7 +35,7 @@ def read(self, folder): self[name][rc] = f break else: - print 'Unmatched tuning file, %s' % f + print('Unmatched tuning file, %s' % f) cfg_file = '%s/experiment.cfg' % folder if os.path.exists(cfg_file): self['cfg_file'] = cfg_file diff --git a/python/auto_setup/util/load_bias_file.py b/python/auto_setup/util/load_bias_file.py index 747da14..f514e3d 100644 --- a/python/auto_setup/util/load_bias_file.py +++ b/python/auto_setup/util/load_bias_file.py @@ -1,3 +1,5 @@ +from __future__ import division +from past.utils import old_div from numpy import * def _load(filename): @@ -14,10 +16,10 @@ def load_bias_file(filename): columns as one array, and the second half as a second array. """ data = _load(filename) - n_cols = data.shape[0]/2 + n_cols = old_div(data.shape[0],2) return data[:n_cols,:], data[n_cols:,:] def load_super_bias_file(filename): data = _load(filename) - n_cols = (data.shape[0]-3)/2 + n_cols = old_div((data.shape[0]-3),2) return data[:3], data[3:n_cols+3,:], data[n_cols+3:,:] diff --git a/python/auto_setup/util/mas_path.py b/python/auto_setup/util/mas_path.py index 0e6d920..f5b0125 100644 --- a/python/auto_setup/util/mas_path.py +++ b/python/auto_setup/util/mas_path.py @@ -1,8 +1,11 @@ +from __future__ import print_function +from builtins import str +from builtins import object #vim: ts=4 sw=4 et import os import subprocess -class mas_path: +class mas_path(object): """ Path look-up made easy. You can optionally pass the constructor a fibre card number and/or mas.cfg path, and/or mas_var path. If the 'mas_var' @@ -193,37 +196,37 @@ def test_suite_dir(self): #test! if __name__ == '__main__': m = mas_path() - print "Default:" - print " Bin Dir : " + m.bin_dir() - print " Config Dir: " + m.config_dir() - print " Data Root : " + m.data_root() - print " Data Dir : " + m.data_dir() - print " Etc Dir : " + m.etc_dir() - print " Expt. File: " + m.experiment_file() - print " Hdwr. File: " + m.hardware_file() - print " IDL Dir : " + m.idl_dir() - print " MAS Prefix: " + m.mas_prefix() - print " MAS Root : " + m.mas_root() - print " Python Dir: " + m.python_dir() - print " Script Dir: " + m.script_dir() - print " Temp Dir : " + m.temp_dir() - print " Templ. Dir: " + m.template_dir() - print " TSuite Dir: " + m.test_suite_dir() - print "" + print("Default:") + print(" Bin Dir : " + m.bin_dir()) + print(" Config Dir: " + m.config_dir()) + print(" Data Root : " + m.data_root()) + print(" Data Dir : " + m.data_dir()) + print(" Etc Dir : " + m.etc_dir()) + print(" Expt. File: " + m.experiment_file()) + print(" Hdwr. File: " + m.hardware_file()) + print(" IDL Dir : " + m.idl_dir()) + print(" MAS Prefix: " + m.mas_prefix()) + print(" MAS Root : " + m.mas_root()) + print(" Python Dir: " + m.python_dir()) + print(" Script Dir: " + m.script_dir()) + print(" Temp Dir : " + m.temp_dir()) + print(" Templ. Dir: " + m.template_dir()) + print(" TSuite Dir: " + m.test_suite_dir()) + print("") m = mas_path(mas_var = "/") - print "Without MAS_VAR:" - print " Bin Dir : " + m.bin_dir() - print " Config Dir: " + m.config_dir() - print " Data Root : " + m.data_root() - print " Data Dir : " + m.data_dir() - print " Etc Dir : " + m.etc_dir() - print " Expt. File: " + m.experiment_file() - print " Hdwr. File: " + m.hardware_file() - print " IDL Dir : " + m.idl_dir() - print " MAS Prefix: " + m.mas_prefix() - print " MAS Root : " + m.mas_root() - print " Python Dir: " + m.python_dir() - print " Script Dir: " + m.script_dir() - print " Temp Dir : " + m.temp_dir() - print " Templ. Dir: " + m.template_dir() - print " TSuite Dir: " + m.test_suite_dir() + print("Without MAS_VAR:") + print(" Bin Dir : " + m.bin_dir()) + print(" Config Dir: " + m.config_dir()) + print(" Data Root : " + m.data_root()) + print(" Data Dir : " + m.data_dir()) + print(" Etc Dir : " + m.etc_dir()) + print(" Expt. File: " + m.experiment_file()) + print(" Hdwr. File: " + m.hardware_file()) + print(" IDL Dir : " + m.idl_dir()) + print(" MAS Prefix: " + m.mas_prefix()) + print(" MAS Root : " + m.mas_root()) + print(" Python Dir: " + m.python_dir()) + print(" Script Dir: " + m.script_dir()) + print(" Temp Dir : " + m.temp_dir()) + print(" Templ. Dir: " + m.template_dir()) + print(" TSuite Dir: " + m.test_suite_dir()) diff --git a/python/auto_setup/util/plot_reg.py b/python/auto_setup/util/plot_reg.py index fa1cee6..48e9a0a 100644 --- a/python/auto_setup/util/plot_reg.py +++ b/python/auto_setup/util/plot_reg.py @@ -1,6 +1,7 @@ +from builtins import object import os -class plot_registrar: +class plot_registrar(object): def __init__(self, root, child): self.filename = os.path.join(root, child, 'mceplots_archive') fout = open(self.filename, 'w') diff --git a/python/auto_setup/util/plotter.py b/python/auto_setup/util/plotter.py index cb127f1..39bf5a2 100644 --- a/python/auto_setup/util/plotter.py +++ b/python/auto_setup/util/plotter.py @@ -1,3 +1,10 @@ +from __future__ import division +from builtins import zip +from builtins import str +from builtins import map +from builtins import range +from builtins import object +from past.utils import old_div import os, shutil import distutils.version as dvs import biggles @@ -5,7 +12,7 @@ # assert biggles.__version__ >= 1.6.4 MIN_BIGGLES = '1.6.4' if dvs.StrictVersion(MIN_BIGGLES) > biggles.__version__: - raise RuntimeError, 'This package needs biggles %s or so.' % MIN_BIGGLES + raise RuntimeError('This package needs biggles %s or so.' % MIN_BIGGLES) def _carry(idx, lim): @@ -18,10 +25,10 @@ def _div_up(x, y): """ int(ceil(x/y)) """ - return (x + y - 1) / y + return old_div((x + y - 1), y) -class pageIndexer: +class pageIndexer(object): """ Spread a grid of a certain size over several pages of a smaller size, preserving row and column structure. @@ -40,7 +47,7 @@ def index(self, r, c): # return page number and page row, col dr, dc = self.page_shape nr, nc = self.world_shape - page_r, page_c = (r/dr), (c/dc) + page_r, page_c = (old_div(r,dr)), (old_div(c,dc)) page = page_r * self.world_pages[1] + page_c return (page, r%dr, c%dc) @@ -48,14 +55,14 @@ def __iter__(self): self.index = 0 return self - def next(self): + def __next__(self): if self.index >= len(self.indices): raise StopIteration self.index += 1 return self.indices[self.index-1] -class plotPager: +class plotPager(object): """ Generic organizer for grouping objects (e.g. plots) onto pages in a systematic way. @@ -91,8 +98,8 @@ def __iter__(self): self.canvas = None return self - def next(self): - (p, pr, pc), (r, c) = self.iter.next() + def __next__(self): + (p, pr, pc), (r, c) = next(self.iter) if p != self.last_page and self.canvas is not None: self.write_page() self.last_page = p @@ -177,7 +184,7 @@ def write_page(self): return c['filename'] -class plotGridder: +class plotGridder(object): """ Schemer for arranging curves for sets of rows and columns onto pages of 4x4 (or so) plots. @@ -203,9 +210,9 @@ def __init__(self, shape, filename, **kwargs): for k, v in self.props: setattr(self, k, v) keys = [a for a,_ in self.props] - for k, v in zip(kwargs.keys(), kwargs.values()): + for k, v in zip(list(kwargs.keys()), list(kwargs.values())): if not k in keys: - raise ValueError, "keyword '%s' not valid" % k + raise ValueError("keyword '%s' not valid" % k) if v is not None: setattr(self, k, v) @@ -314,13 +321,13 @@ def _write_hpage(self): def index_of(self, row, col): V, H, S, M, N = self.target_shape - vpage = row / S - hpage = col / (M*N) + vpage = old_div(row, S) + hpage = old_div(col, (M*N)) if S == 1: - major = (col - hpage*(M*N)) / M + major = old_div((col - hpage*(M*N)), M) minor = (col - hpage*(M*N)) % M else: - major = (col - (M*N) / S * (row % S)) / M + major = old_div((col - (M*N) / S * (row % S)), M) minor = (col - (M*N) / S * (row % S)) % M return vpage, hpage, major, minor @@ -338,7 +345,7 @@ def __iter__(self): self.reset() return self - def next(self): + def __next__(self): """ Returns row, column, and biggles plot object. Use them wisely. """ @@ -396,8 +403,8 @@ def hack_svg_viewbox(src, dest): x, y = float(x), float(y) break else: - raise RuntimeError, "Could not find scale argument" - xsize, ysize = int(round(1/x)), int(round(-1/y)) + raise RuntimeError("Could not find scale argument") + xsize, ysize = int(round(old_div(1,x))), int(round(old_div(-1,y))) # New coordinate description svg_g.setAttribute('transform', 'translate(0 %i) scale(1 -1)' % ysize) svg.setAttribute('viewBox', '0 0 %i %i' % (xsize, ysize)) @@ -408,7 +415,7 @@ def hack_svg_viewbox(src, dest): del fout -class pdfCollator: +class pdfCollator(object): """ Combine some SVGs into a single PDF. """ @@ -424,7 +431,7 @@ def collate(self, remove_temp=True, remove_sources=False): # Make temporary folder dest_dir, _ = os.path.split(self.dest) if not os.path.exists(dest_dir): - raise RuntimeError, "output place %s d.n.e."% dest_dir + raise RuntimeError("output place %s d.n.e."% dest_dir) temp_dir = dest_dir + '/tmp' if not os.path.exists(temp_dir): os.mkdir(temp_dir) @@ -451,5 +458,6 @@ def collate(self, remove_temp=True, remove_sources=False): shutil.rmtree(temp_dir) # Remove the source images if remove_sources: - map(os.remove, self.sources) + for f in self.sources: + os.remove(f) return True diff --git a/python/auto_setup/util/rectangle.py b/python/auto_setup/util/rectangle.py index 4b1620d..ce96041 100644 --- a/python/auto_setup/util/rectangle.py +++ b/python/auto_setup/util/rectangle.py @@ -19,10 +19,12 @@ len(rows)==len(cols) is the number of spatial elements. """ +from builtins import zip +from builtins import object import numpy -class RCData: +class RCData(object): def __init__(self, gridded=False, row_attr='rows', col_attr='cols', data_attrs=None): if data_attrs is None: @@ -40,7 +42,7 @@ def join(self, items): self.gridded = items[0].gridded for i in items[1:]: if i.gridded != self.gridded: - raise RuntimeError, 'items for synthesis differ in structure.' + raise RuntimeError('items for synthesis differ in structure.') if self.gridded: # Compute new row and col arrays offsets = self._synth_rect_gridded(items) @@ -51,7 +53,7 @@ def join(self, items): # Check row/col listings for consistency for i in items: if len(i.rows) != len(i.cols): - raise RuntimeError, 'some items have inconsistent gridding.' + raise RuntimeError('some items have inconsistent gridding.') # Merge row and col arrays offsets = self._synth_rect(items) # Merge data @@ -87,7 +89,7 @@ def _synth_rect_gridded(self, items): offsets.append((len(rows), 0)) rows += r else: - raise RuntimeError, 'incompatible row/col structures.' + raise RuntimeError('incompatible row/col structures.') self.rows, self.cols = numpy.array(rows), numpy.array(cols) return offsets @@ -98,9 +100,9 @@ def _synth_data(self, items, offsets, attr): for i in items: d = getattr(i, attr) if d.dtype != dtype: - raise RuntimeError, 'data array types differ.' + raise RuntimeError('data array types differ.') if tuple(i.data_shape[:-3]) != shape_s: - raise RuntimeError, 'secret data structure incompatible' + raise RuntimeError('secret data structure incompatible') # Super data nd, nt = len(self.rows), d.shape[-1] data = numpy.zeros(shape_s + (nd, nt), dtype=dtype) @@ -123,9 +125,9 @@ def _synth_data_gridded(self, items, offsets, attr): for i in items: d = getattr(i, attr) if d.dtype != dtype: - raise RuntimeError, 'data array types differ.' + raise RuntimeError('data array types differ.') if tuple(i.data_shape[:-3]) != shape_s: - raise RuntimeError, 'secret data structure incompatible' + raise RuntimeError('secret data structure incompatible') # Super data nr, nc, nt = len(self.rows), len(self.cols), d.shape[-1] data = numpy.zeros(shape_s + (nr, nc, nt), dtype=dtype) diff --git a/python/auto_setup/util/tuning.py b/python/auto_setup/util/tuning.py index 148cd13..5e1e9af 100644 --- a/python/auto_setup/util/tuning.py +++ b/python/auto_setup/util/tuning.py @@ -1,9 +1,14 @@ +from __future__ import print_function +from __future__ import absolute_import +from builtins import str +from builtins import range +from builtins import object # vi: ts=4:sw=4:et import os, subprocess, time import auto_setup.config as config -from mas_path import mas_path +from .mas_path import mas_path -class tuningData: +class tuningData(object): """ Generic, static data useful to all methods. """ @@ -101,10 +106,11 @@ def run(self, args, no_log=False): + time.asctime(time.gmtime(self.the_time)) + " UTC\n") self.log.write("Dir: " + self.base_dir + "\n") self.log.write("Name: " + self.name + "\n") - except IOError, (errno, strerror): - print "Unable to create logfile \"{0}\" (errno: {1}; {2})".\ - format(self.log_file, errno, strerror) - print "Logging disabled." + except IOError as xxx_todo_changeme: + (errno, strerror) = xxx_todo_changeme.args + print("Unable to create logfile \"{0}\" (errno: {1}; {2})".\ + format(self.log_file, errno, strerror)) + print("Logging disabled.") self.openlog_failed = True log = self.log @@ -114,7 +120,7 @@ def run(self, args, no_log=False): log.flush() if (self.debug): - print "Executing: " + str(args) + print("Executing: " + str(args)) s = subprocess.call([str(x) for x in args], stdout=log, stderr=log) @@ -158,8 +164,8 @@ def write_config(self, run_now=True): try: status = self.run(["mce_make_config", self.exp_file, self.config_mce_file]) - except OSError, e: - print "Config creation failed:", e + except OSError as e: + print("Config creation failed:", e) return -1 if (status > 0): @@ -169,8 +175,8 @@ def write_config(self, run_now=True): if (run_now): try: status = self.run([self.config_mce_file]) - except OSError, e: - print "Config run failed:", e + except OSError as e: + print("Config run failed:", e) return -1 if (status > 0): @@ -236,7 +242,7 @@ def compose_vector(label, data, format='%f'): elif s == 'raw': f.write(item['data']) else: - raise RuntimeError, 'unknown item style "%s"' % s + raise RuntimeError('unknown item style "%s"' % s) f.write("\n" % block) f.close() @@ -271,7 +277,7 @@ def register(self, ctime, type, filename, numpts, note=None): def register_plots(self, *args, **kwargs): if kwargs.get('init', False): - from plot_reg import plot_registrar + from .plot_reg import plot_registrar self.plot_reg = plot_registrar(self.base_dir+'/analysis', self.name) if self.plot_reg is None: return diff --git a/python/interservo.py b/python/interservo.py index b0d4853..28e3faa 100755 --- a/python/interservo.py +++ b/python/interservo.py @@ -1,6 +1,13 @@ #!/usr/bin/python # vim: ts=4 sw=4 et +from __future__ import division +from __future__ import print_function +from future import standard_library +standard_library.install_aliases() +from builtins import zip +from builtins import range +from past.utils import old_div from auto_setup.util import mas_path ## This is an old script... is it still in use? We should at least @@ -10,7 +17,7 @@ from mce_data import * from glob import glob -import sys, commands +import sys, subprocess from numpy import * import optparse @@ -20,7 +27,7 @@ def expt_param(key, dtype=None): src = mas_path().experiment_file() - line = commands.getoutput('mas_param -s %s get %s' % (src, key)).rstrip() + line = subprocess.getoutput('mas_param -s %s get %s' % (src, key)).rstrip() s = line.split(' ') if dtype is None or dtype == 'string': return s @@ -28,7 +35,7 @@ def expt_param(key, dtype=None): return [ int(ss) for ss in s if ss != ''] if dtype == 'float': return [ float(ss) for ss in s if ss != ''] - raise ValueError, 'can\'t handle dtype=\'%s\' '%dtype + raise ValueError('can\'t handle dtype=\'%s\' '%dtype) def reservo(m, param, gains=None, rows=None, steps=None, verbose=False): @@ -45,12 +52,12 @@ def reservo(m, param, gains=None, rows=None, steps=None, verbose=False): data = m.read_frame(data_only=True) dy = [data[NCOLS*r + c] for (c,r) in enumerate(rows)] if verbose: - print 'Measured: ', dy + print('Measured: ', dy) dx = [g*d for d,g in zip(dy, gains)] x = m.read(param[0], param[1]) x_new = [int(a+b) for (a,b) in zip(x,dx)] if verbose: - print 'Applied: ', x_new + print('Applied: ', x_new) m.write(param[0], param[1], x_new) if steps is not None: count += 1 @@ -78,7 +85,7 @@ def get_historical_offset(folder, stage='ssa', rows=None): def write_adc_offset(m, ofs, fill=True, n_rows=33): for c in range(N_RC*8): - m.write('rc%i'%((c/8)+1), 'adc_offset%i'%(c%8), [ofs[c]]*41) + m.write('rc%i'%((old_div(c,8))+1), 'adc_offset%i'%(c%8), [ofs[c]]*41) def get_line(m, rows=None): @@ -103,12 +110,12 @@ def main(): opts, args = process_options() if len(args) != 1: - print 'Specify exactly one stage argument' + print('Specify exactly one stage argument') sys.exit(10) stage = args[0] if opts.tuning is None: - print 'Using most recent tuning...' + print('Using most recent tuning...') try: data_root = mas_path().data_root() w = [s.strip() for s in @@ -117,8 +124,8 @@ def main(): assert(len(glob('%s/*ssa'%tuning)) > 0) opts.tuning = tuning except: - print 'Could not find a recent tuning, or most recent tuning was ' \ - 'not a full tune (specify the tuning folder manualy)!' + print('Could not find a recent tuning, or most recent tuning was ' \ + 'not a full tune (specify the tuning folder manualy)!') sys.exit(11) # Get basic system description @@ -128,7 +135,7 @@ def main(): # This has no analog in the tuning... sq1_fb hardware servo'd param = ['sq1', 'fb_const'] g = expt_param('default_servo_i', dtype='float') - gains = [gg/4096. for gg in g] + gains = [old_div(gg,4096.) for gg in g] rows = expt_param('sq2_rows', dtype='int') elif stage == 's2' or stage == 'sq2': # This is like sq1servo, but the sq1 are off @@ -142,10 +149,10 @@ def main(): rows = None if not opts.quiet: - print 'Source tuning: %s' % opts.tuning - print 'Servo control: %s %s' % (param[0],param[1]) - print 'Servo steps: %i' % opts.steps - print '' + print('Source tuning: %s' % opts.tuning) + print('Servo control: %s %s' % (param[0],param[1])) + print('Servo steps: %i' % opts.steps) + print('') # Get an mce m = mce() @@ -164,18 +171,18 @@ def main(): if not opts.quiet: err = [ get_line(m, rows) for i in range(n_check)] err = array(err) - print 'Initial error set (%i):' % n_check - print mean(err, axis=0) + print('Initial error set (%i):' % n_check) + print(mean(err, axis=0)) if not opts.quiet: - print 'Servoing...' + print('Servoing...') reservo(m, param, gains=gains, steps=n_servo, verbose=opts.verbose) if not opts.quiet: err = [ get_line(m, rows) for i in range(n_check)] err = array(err) - print 'Final error set (%i):' % n_check - print mean(err, axis=0) + print('Final error set (%i):' % n_check) + print(mean(err, axis=0)) if __name__ == '__main__': main() diff --git a/python/iv_analysis.py b/python/iv_analysis.py index d9b60b6..4a331d3 100644 --- a/python/iv_analysis.py +++ b/python/iv_analysis.py @@ -1,5 +1,11 @@ #!/usr/bin/python +from __future__ import division +from __future__ import print_function +from builtins import map +from builtins import zip +from builtins import range +from past.utils import old_div import os, sys, time import numpy as np from numpy import * @@ -82,14 +88,14 @@ fmt = ar_par['Rshunt_format'].split() if fmt[0] == 'detector_list': # Args are column indices (col, row, Rshunt) - cols = map(int, fmt[1:]) + cols = list(map(int, fmt[1:])) Rshunt = iv_tools.TESShunts.from_columns_file( (n_row,n_col), ar_par['Rshunt_filename'], data_cols=cols) Rshunt.R[~Rshunt.ok] = ar_par['default_Rshunt'] printv('Read %i shunt resistances from %s' % \ (Rshunt.ok.sum(), ar_par['Rshunt_filename']), 2) else: - raise ValueError, "unknown shunt_format = '%s'" % fmt[0] + raise ValueError("unknown shunt_format = '%s'" % fmt[0]) else: Rshunt = iv_tools.TESShunts((n_row, n_col)) Rshunt.R[:,:] = ar_par['default_Rshunt'] @@ -105,7 +111,7 @@ deriv_thresh=supercond_thresh, scale=supercond_scale, smoother_mode=smoother_mode) -ok_rc = zip(*iv_data.ok.nonzero()) +ok_rc = list(zip(*iv_data.ok.nonzero())) # Using the branch analysis in iv_data, and the resistances in Rshunt # and ar_par, compute loading properties of each TES. @@ -140,7 +146,7 @@ # Round. bstep = ar_par['bias_step'] -bias_points_dac = (bias_points_dac/bstep).round().astype('int')*bstep +bias_points_dac = (old_div(bias_points_dac,bstep)).round().astype('int')*bstep # Evaluate perRn of each det at the chosen bias point bias_points_dac_ar = bias_points_dac[bias_map.phys_line] @@ -181,17 +187,17 @@ if printv.v >= 1: if opts.with_rshunt_bug: - print 'Rshunt bug is in!.' - print 'Recommended biases for target of %10.4f Rn' % ar_par['per_Rn_bias'] + print('Rshunt bug is in!.') + print('Recommended biases for target of %10.4f Rn' % ar_par['per_Rn_bias']) for l in range(bias_map.n_line): - print 'Line %2i = %6i' % (l, bias_points_dac[l]) - print - print 'Cut limits at recommended biases:' - print '%% R_n %10.6f %10.6f' % (r0,r1) - print 'Po (pW) %10.6f %10.6f' % (p0, p1) - print - print 'Total good normal branches = %4i' % iv_data.ok.sum() - print 'Number of detectors within cut limits = %4i' % sum(set_data.keep_rec) + print('Line %2i = %6i' % (l, bias_points_dac[l])) + print() + print('Cut limits at recommended biases:') + print('%% R_n %10.6f %10.6f' % (r0,r1)) + print('Po (pW) %10.6f %10.6f' % (p0, p1)) + print() + print('Total good normal branches = %4i' % iv_data.ok.sum()) + print('Number of detectors within cut limits = %4i' % sum(set_data.keep_rec)) # # Runfile block ! @@ -287,8 +293,8 @@ fr.xlabel = 'TES BIAS (DAC/1000)' fr.ylabel = 'FB (DAC/1000)' # Show data with analysis regions - y = filedata.mcedata[r,c] / 1000 - x = filedata.bias_dac / 1000 + y = old_div(filedata.mcedata[r,c], 1000) + x = old_div(filedata.bias_dac, 1000) # Shaded regions; normal, transition, supercond. regions = [(iv_data.norm_idx0[r,c],iv_data.norm_idx1[r,c]), (iv_data.trans_idx[r,c],iv_data.super_idx0[r,c]), @@ -306,7 +312,7 @@ yl = max(yl[0], 2*y_norm[0]-y_norm[1]), \ min(yl[1], 2*y_norm[1]-y_norm[0]) # Selected bias value - xb = bias_points_dac[bias_map.phys_line[r,c]] / 1000. + xb = old_div(bias_points_dac[bias_map.phys_line[r,c]], 1000.) p.add(bg.Curve([xb,xb],yl,type='dashed')) elif pc == 9: # Shunt I vs shunt V (super-cond) @@ -381,9 +387,9 @@ def get_R_crossing(i): targets = ar_par.get('perRn_plot_target_bins') if targets is None: # Base targets on the dets-on-transition results. - lo, hi = (n > n.max() / 10).nonzero()[0][[-1,0]] + lo, hi = (n > old_div(n.max(), 10)).nonzero()[0][[-1,0]] lo, hi = filedata.bias_dac[bi[lo]], filedata.bias_dac[bi[hi]] - targets = (lo, hi, (hi-lo) / 4.99) + targets = (lo, hi, old_div((hi-lo), 4.99)) # To a range targets = arange(*targets) diff --git a/python/iv_tools.py b/python/iv_tools.py index 8db5872..8cd570b 100644 --- a/python/iv_tools.py +++ b/python/iv_tools.py @@ -1,3 +1,9 @@ +from __future__ import division +from __future__ import print_function +from builtins import zip +from builtins import range +from past.utils import old_div +from builtins import object import os import subprocess as sp @@ -8,7 +14,7 @@ from auto_setup import config -class runfile_block: +class runfile_block(object): """ Write (especially) numpy arrays to a runfile-block file. """ @@ -39,7 +45,7 @@ def close(self): self.fout -class adict: +class adict(object): """ Just a holder for arrays. Members of a particular types, but with the same shape are added with the "define" method. @@ -56,7 +62,7 @@ def define(self, keys, types, shape): setattr(self, k, np.zeros(shape, dtype=t)) self.keys.append(k) def add_item(self, index, source): - for k, v in source.iteritems(): + for k, v in source.items(): if k in self.keys: getattr(self,k)[index] = v @@ -69,7 +75,7 @@ def read_ascii(filename, data_start=0, comment_chars=[]): return np.transpose(data) -class IVData: +class IVData(object): """ Container for the vectors pertaining to IV curves. Raw data can be loaded from an MCE flatfile, or something. Based on array @@ -107,7 +113,7 @@ def read(self, filename, biasfile=None, runfile=True): self.bias_dac = read_ascii(biasfile, comment_chars=['<', '#'])[0] self.n_row, self.n_col, self.n_pts = self.mcedata.shape if self.bias_dac.shape[0] != self.n_pts: - raise RuntimeError, 'weird .bias file' + raise RuntimeError('weird .bias file') def compute_physical(self, ar_par): """ @@ -116,13 +122,13 @@ def compute_physical(self, ar_par): """ # Differentials ## TES bias DAC voltage, per DAC unit - self.dbias_ddac = ar_par['bias_DAC_volts'] / 2**ar_par['bias_DAC_bits'] + self.dbias_ddac = old_div(ar_par['bias_DAC_volts'], 2**ar_par['bias_DAC_bits']) R33, Rfb = 49.9, ar_par['Rfb_total'] fb_DAC_volts = ar_par['fb_DAC_amps'] * Rfb * R33 / (R33 + Rfb) ## SQ1 FB DAC voltage, per DAC unit - self.dfb_ddac = fb_DAC_volts / 2**ar_par['fb_DAC_bits'] + self.dfb_ddac = old_div(fb_DAC_volts, 2**ar_par['fb_DAC_bits']) ## TES current, per unit FB DAC voltage - self.di_dfb = 1 / (ar_par['M_ratio']*Rfb) + self.di_dfb = old_div(1, (ar_par['M_ratio']*Rfb)) ## Get the bias configuration, which includes per-channel sign ## of the feedback -> power conversion. Another way to do @@ -146,13 +152,13 @@ def compute_tes(self, iv_data, ar_par, Rshunt, bias_map, ## Use shunt to get TES voltage Rb = ar_par['Rbias_arr_total'][bias_map.virt_line] self.tes_v = 1e6 * Rshunt.R[:,:,None] * \ - (self.bias_v[None,None,:]/Rb[:,:,None] - self.tes_i*1e-6) + (old_div(self.bias_v[None,None,:],Rb[:,:,None]) - self.tes_i*1e-6) ## The resistance vector; just the ratio of voltage to current. - self.tes_R = self.tes_v / self.tes_i + self.tes_R = old_div(self.tes_v, self.tes_i) ## The power self.tes_P = self.tes_v * self.tes_i ## More branch analysis... - self.ok_rc = zip(*iv_data.ok.nonzero()) + self.ok_rc = list(zip(*iv_data.ok.nonzero())) if update_iv_data: ## Estimate R_normal from tes_R for r, c in self.ok_rc: @@ -174,10 +180,10 @@ def compute_tes(self, iv_data, ar_par, Rshunt, bias_map, i0 = norm_region.max() iv_data.psat[r,c] = self.tes_P[r,c,i0] ## Curves of fraction of R_normal (formerly percent of R normal) - self.tes_fracRn = self.tes_R / iv_data.R_norm.reshape((nr, nc, 1)) + self.tes_fracRn = old_div(self.tes_R, iv_data.R_norm.reshape((nr, nc, 1))) ## Responsivity as function of bias, including FB sign correction. self.resp = self.di_dfb * self.dfb_ddac * 1e-6*self.tes_v * \ - (1 - Rshunt.R.reshape((nr, nc, 1))/self.tes_R) + (1 - old_div(Rshunt.R.reshape((nr, nc, 1)),self.tes_R)) if ar_par.get('preserve_resp_sign', False): self.resp *= bias_map.sign[:,:,None] @@ -214,9 +220,9 @@ def __init__(self, shape): def analyze_curves(self, filedata, rows=None, cols=None, **kwargs): ## Analyze only the requested rows and columns... if cols is None: - cols = range(self.n_col) + cols = list(range(self.n_col)) if rows is None: - rows = range(self.n_row) + rows = list(range(self.n_row)) for c in cols: for r in rows: det = analyze_IV_curve(filedata.bias_v, filedata.fb_v[r,c], @@ -224,7 +230,7 @@ def analyze_curves(self, filedata, rows=None, cols=None, **kwargs): self.add_item((r, c), det) -class TESShunts: +class TESShunts(object): def __init__(self, shape): self.R = np.zeros(shape, 'float') self.ok = np.zeros(shape, 'bool') @@ -326,8 +332,8 @@ def from_array_params(cls, ar_par): self.optim[row,col] = optim else: - raise ValueError, "unknown bias_line_scheme = '%s'" % \ - ar_par['bias_line_scheme'] + raise ValueError("unknown bias_line_scheme = '%s'" % \ + ar_par['bias_line_scheme']) # Also allow a per-column sign correction to the signal. if 'fb_normalize' in ar_par: @@ -336,15 +342,15 @@ def from_array_params(cls, ar_par): return self -class logger: +class logger(object): def __init__(self, verbosity=0, indent=True): self.v = verbosity self.indent = indent def write(self, s, level=0): if level <= self.v: if self.indent: - print ' '*level, - print s + print(' '*level, end=' ') + print(s) def __call__(self, *args, **kwargs): return self.write(*args, **kwargs) @@ -394,8 +400,8 @@ def analyze_IV_curve(bias0, fb0, n = bias.shape[0] i = 0 dbias = -np.mean(bias[1:] - bias[:-1]) - dy = (fb[1:] - fb[:-1]) / dbias - span = max(5, int(scale/dbias)) + dy = old_div((fb[1:] - fb[:-1]), dbias) + span = max(5, int(old_div(scale,dbias))) transend = None # Look at all places where the derivative is positive. pos_idx = (dy[:-span]>0).nonzero()[0] @@ -432,17 +438,17 @@ def analyze_IV_curve(bias0, fb0, ok = len(normal_idx) > 1 if not ok: return results - results = dict(zip(['ok', 'trans_begin', 'trans_end', 'trans_bias'], - [ok, trans, transend, trans_bias])) + results = dict(list(zip(['ok', 'trans_begin', 'trans_end', 'trans_bias'], + [ok, trans, transend, trans_bias]))) # Fit normal branch normfit = np.polyfit(bias[normal_idx], fb[normal_idx], 1) Rnorm, offset = normfit - results.update(zip(['norm_offset', 'Rnorm', 'norm_idx0', 'norm_idx1'], \ - [offset, Rnorm, min(normal_idx), max(normal_idx)])) + results.update(list(zip(['norm_offset', 'Rnorm', 'norm_idx0', 'norm_idx1'], \ + [offset, Rnorm, min(normal_idx), max(normal_idx)]))) # Fit super-conducting branch superfit = np.polyfit(bias[transend:], fb[transend:], 1) - results.update(zip(['super_offset', 'Rsuper', 'super_idx0', 'super_idx1'], - [superfit[1], superfit[0], transend, fb.shape[0]])) + results.update(list(zip(['super_offset', 'Rsuper', 'super_idx0', 'super_idx1'], + [superfit[1], superfit[0], transend, fb.shape[0]]))) return results @@ -473,7 +479,7 @@ def smooth(fb, target_segs=3, max_kernel=None): max_kernel -= 1 best = None for klen in range(0, max_kernel//2): - fb1 = np.convolve(fb, np.ones(klen*2+1), 'valid') / (klen*2+1) + fb1 = old_div(np.convolve(fb, np.ones(klen*2+1), 'valid'), (klen*2+1)) dy = np.diff(fb1) sc = (dy[1:]*dy[:-1] < 0).sum() if best is None or sc < best[0]: diff --git a/python/mce_auto_assist.py b/python/mce_auto_assist.py index fe65ef6..429b30d 100644 --- a/python/mce_auto_assist.py +++ b/python/mce_auto_assist.py @@ -1,3 +1,7 @@ +from __future__ import print_function +from future import standard_library +standard_library.install_aliases() +from builtins import object import sys import os import time @@ -5,7 +9,7 @@ from auto_setup.util.mas_path import mas_path mas_path = mas_path() -from ConfigParser import SafeConfigParser +from configparser import SafeConfigParser simple_delist = lambda x: x.split() @@ -23,8 +27,8 @@ def get_type(self, type_caster, key, default=None): return type_caster(self.get(section, key)) if default is not None: return default - raise ValueError, 'Unknown config parameter %s:%s' % \ - (section, key) + raise ValueError('Unknown config parameter %s:%s' % \ + (section, key)) def getfloat(self, key, default=None): return self.get_type(float, key, default) def getint(self, key, default=None): @@ -34,7 +38,7 @@ def getstr(self, key, default=None): def getlist(self, key, default=None): return self.get_type(simple_delist, key, default) -class AutoLogger: +class AutoLogger(object): log_file = None format_str = '{info} : {script_id} : {msg}' start_time = None @@ -64,7 +68,7 @@ def write(self, msg, info='INFO'): try: fout = open(self.log_file, 'a') except IOError: - print 'Failed to open %s, disabling auto-log.' % self.log_file + print('Failed to open %s, disabling auto-log.' % self.log_file) self.log_file = None return kw = {'msg': msg, diff --git a/python/mce_control.py b/python/mce_control.py index 2753839..a3cc4f5 100644 --- a/python/mce_control.py +++ b/python/mce_control.py @@ -1,3 +1,7 @@ +from __future__ import division +from __future__ import print_function +from builtins import range +from past.utils import old_div import numpy as np import pymce @@ -63,7 +67,7 @@ def read_row(self, n=None, avg=False): def write_columns(self, param, data): # Duplicate values across all rows in each column parameter for c, d in enumerate(data): - rc, chan = c/MCE_CHANS + 1, c%MCE_CHANS + rc, chan = old_div(c,MCE_CHANS) + 1, c%MCE_CHANS self.write('rc%i'%rc, param+'%i' % chan, [int(d)]*41) def io_readwrite(self, card, param, data=None): @@ -81,8 +85,8 @@ def io_sys_sync(self, param, data=None): if data is None: vals = np.array(self.read('sys', param)) if not np.all(vals==vals[0]): - print '(Warning: inconsistent data for "%s" across sys.)' % \ - param + print('(Warning: inconsistent data for "%s" across sys.)' % \ + param) return vals[0] else: self.write('sys', param, data) @@ -97,8 +101,8 @@ def io_rc_sync(self, param, data=None): if data is None: vals = np.array(self.read('rca', param)) if not np.all(vals==vals[0]): - print '(Warning: inconsistent data for "%s" across RCs.)' % \ - param + print('(Warning: inconsistent data for "%s" across RCs.)' % \ + param) return vals[0] else: self.write('rca', param, data) @@ -215,13 +219,13 @@ def sa_fb(self, fb): def dt(self): nr, dr, rl = [self.read('cc', k)[0] for k in ['num_rows', 'data_rate', 'row_len']] - return float(nr * dr * rl) / 5e7 + return old_div(float(nr * dr * rl), 5e7) def mux_rate(self): return pymce.const.FREQ / self.row_len() / self.num_rows() def readout_rate(self): - return self.mux_rate() / self.data_rate() + return old_div(self.mux_rate(), self.data_rate()) diff --git a/python/mce_data.py b/python/mce_data.py index feea0d5..8007ccf 100644 --- a/python/mce_data.py +++ b/python/mce_data.py @@ -1,3 +1,9 @@ +from __future__ import division +from __future__ import print_function +from builtins import zip +from builtins import range +from past.utils import old_div +from builtins import object import numpy import sys import os @@ -16,7 +22,7 @@ # This block read maximum (bytes) is to keep memory usage reasonable. MAX_READ_SIZE = int(1e9) -class HeaderFormat: +class HeaderFormat(object): """ Contains description of MCE header content and structure. """ @@ -61,7 +67,7 @@ def extract(self, data, rescale=True, unwrap=False, **kwargs): unwrap = deprecate_arg(unwrap, kwargs, 'unwrap', 'do_unwrap') if len(kwargs) > 0: raise TypeError("%s: got an expected keyword argument '%s'" % \ - (sys._getframe().f_code.co_name, kwargs.keys()[0])) + (sys._getframe().f_code.co_name, list(kwargs.keys())[0])) if self.signed: # Integer division preserves sign @@ -70,7 +76,7 @@ def extract(self, data, rescale=True, unwrap=False, **kwargs): if left != 0: data = numpy.array(data).astype('int32') * 2**left if right != 0: - data = numpy.array(data).astype('int32') / 2**right + data = old_div(numpy.array(data).astype('int32'), 2**right) else: # For unsigned fields, bit operations should be used data = (data >> self.start) & ((1 << self.count)-1) @@ -86,8 +92,8 @@ def deprecate_arg(new_val, kwargs, new_arg, old_arg): # Note this pops the bad value from kwargs if old_arg in kwargs: if deprecation_warnings: - print 'Use of argument "%s" is deprecated, the new word is "%s".' % \ - (old_arg, new_arg) + print('Use of argument "%s" is deprecated, the new word is "%s".' % \ + (old_arg, new_arg)) return kwargs.pop(old_arg) return new_val @@ -105,7 +111,7 @@ def define(self, *args, **kargs): for a in args: self.fields.append(a.name) self[a.name] = a - for k in kargs.keys(): + for k in list(kargs.keys()): if k == 'raw': self.raw = True self.raw_info = kargs[k] @@ -135,7 +141,7 @@ def define(self, *args, **kargs): } -class MCEData: +class MCEData(object): """ Container for MCE data (single channel) and associated header and origin information. """ @@ -159,21 +165,21 @@ def _rangify(start, count, n, name='items'): if start < 0: start = n + start if start > n: - print 'Warning: %s requested at %i, beyond available %s.' %\ - (name, start, name) + print('Warning: %s requested at %i, beyond available %s.' %\ + (name, start, name)) start = n if count is None: count = n - start if count < 0: count = n - start + count if start + count > n: - print 'Warning: %i %s requested, exceeding available %s.' %\ - (count, name, name) + print('Warning: %i %s requested, exceeding available %s.' %\ + (count, name, name)) count = n - start return start, count -class SmallMCEFile: +class SmallMCEFile(object): """ Facilitate the loading of (single channels from) raw MCE flat-files. Extraction and rescaling of data content is performed @@ -266,12 +272,12 @@ def _GetRCAItem(self, param): vals = [ self._rfMCEParam('rc%i'%r, param) for r in rcs ] for r,v in zip(rcs[1:], vals[1:]): if v is None and vals[0] is not None: - print 'Warning: param \'%s\' not found on rc%i.' % \ - (param, r) + print('Warning: param \'%s\' not found on rc%i.' % \ + (param, r)) continue if vals[0] != v: - print 'Warning: param \'%s\' is not consistent across RCs.' % \ - (param) + print('Warning: param \'%s\' is not consistent across RCs.' % \ + (param)) break return vals[0] @@ -284,13 +290,11 @@ def _GetContentInfo(self): """ if self.runfile is None: if self.runfilename == False: - raise RuntimeError, \ - 'Can\'t determine content params without runfile.' + raise RuntimeError('Can\'t determine content params without runfile.') self._ReadRunfile() # In a pinch we could get these params from the runfile. if self.size_ro == 0: - raise RuntimeError, \ - 'Can\'t determine content params without data file.' + raise RuntimeError('Can\'t determine content params without data file.') # Switch on firmware revision to determine 'num_cols_reported' support fw_rev = self._GetRCAItem('fw_rev') if fw_rev >= 0x5000001: @@ -322,23 +326,23 @@ def _GetContentInfo(self): # Check 1: Warn if count_rc does not fit evenly into count_cc if count_cc % count_rc != 0: - print 'Warning: imperfect RC->CC frame packing (%i->%i).' % \ - (count_rc, count_cc) + print('Warning: imperfect RC->CC frame packing (%i->%i).' % \ + (count_rc, count_cc)) # Check 2: Warn if decimation/packing is such that samples are # not evenly spaced in time. if count_rc != count_cc: if count_rc * self.divid != count_cc: - print 'Warning: bizarro uneven RC->CC frame packing.' + print('Warning: bizarro uneven RC->CC frame packing.') # Determine the final data count, per channel. Any times # that are not represented in all channels are lost. - self.n_frames = (count_cc / count_rc) * self.n_ro + self.n_frames = (old_div(count_cc, count_rc)) * self.n_ro # Store mean sampling frequency nr, rl, dr = [self._rfMCEParam('cc', s) for s in \ ['num_rows', 'row_len', 'data_rate']] - self.freq = (50.e6 / nr / rl / dr) * (count_cc / count_rc) + self.freq = (50.e6 / nr / rl / dr) * (old_div(count_cc, count_rc)) def _GetPayloadInfo(self): @@ -367,17 +371,17 @@ def _GetPayloadInfo(self): if self.filename is not None: # This conditional caginess is for subclassing to MCEBinaryData. file_size = stat(self.filename).st_size - self.n_ro = file_size / self.frame_bytes + self.n_ro = old_div(file_size, self.frame_bytes) if file_size % self.frame_bytes != 0: - print 'Warning: partial frame at end of file.' + print('Warning: partial frame at end of file.') def _UpdateNFrames(self): # Partial GetInfo... no error checking. file_size = stat(self.filename).st_size - self.n_ro = file_size / self.frame_bytes + self.n_ro = old_div(file_size, self.frame_bytes) count_rc = self.n_rows * self.n_cols count_cc = self.size_ro - self.n_frames = (count_cc / count_rc) * self.n_ro + self.n_frames = (old_div(count_cc, count_rc)) * self.n_ro def _ReadHeader(self, offset=None, head_binary=None): """ @@ -388,7 +392,7 @@ def _ReadHeader(self, offset=None, head_binary=None): format = HeaderFormat() if head_binary is None: if self.filename is None: - raise RuntimeError, 'Can\'t read header without data file.' + raise RuntimeError('Can\'t read header without data file.') fin = open(self.filename) if offset is not None: fin.seek(offset) @@ -434,7 +438,7 @@ def ReadHeaders(self, count=None, start=0): fn in range(start, start + count)] # Return the transpose: a dictionary of lists: - return dict(zip(header_list[0],zip(*[d.values() for d in header_list]))) + return dict(list(zip(header_list[0],list(zip(*[list(d.values()) for d in header_list]))))) def ReadRaw(self, count=None, start=0, raw_frames=False): """ @@ -450,19 +454,19 @@ def ReadRaw(self, count=None, start=0, raw_frames=False): # Check max frame size if count * self.frame_bytes > MAX_READ_SIZE: # Users: override this by changing the value of mce_data.MAX_READ_SIZE - print 'Warning: maximum read of %i bytes exceeded; limiting.' % \ - MAX_READ_SIZE - count = MAX_READ_SIZE / self.frame_bytes + print('Warning: maximum read of %i bytes exceeded; limiting.' % \ + MAX_READ_SIZE) + count = old_div(MAX_READ_SIZE, self.frame_bytes) # Open, seek, read. - f_dwords = self.frame_bytes / MCE_DWORD + f_dwords = old_div(self.frame_bytes, MCE_DWORD) fin = open(self.filename) fin.seek(start*self.frame_bytes) a = numpy.fromfile(file=fin, dtype=' 0: raise TypeError("%s: got an expected keyword argument '%s'" % \ - (sys._getframe().f_code.co_name, kwargs.keys()[0])) + (sys._getframe().f_code.co_name, list(kwargs.keys())[0])) # When raw_frames is passed, count and start are passed directly to ReadRaw. if raw_frames: @@ -652,13 +656,13 @@ def Read(self, count=None, start=0, if self.raw_data: # Raw data is contiguous and uninterrupted cc_start = start * self.n_cols / self.size_ro - cc_count = ((count+start)*self.n_cols + self.size_ro-1) / \ - self.size_ro - cc_start + cc_count = old_div(((count+start)*self.n_cols + self.size_ro-1), \ + self.size_ro) - cc_start else: # For packed data, trim excess frame words - pack_factor = self.size_ro / (self.n_rows * self.n_cols) - cc_start = start / pack_factor - cc_count = (count + start + pack_factor-1) / pack_factor - cc_start + pack_factor = old_div(self.size_ro, (self.n_rows * self.n_cols)) + cc_start = old_div(start, pack_factor) + cc_count = old_div((count + start + pack_factor-1), pack_factor) - cc_start # Get detector data as (n_ro x (size_ro*n_rc)) array data_in = self.ReadRaw(count=cc_count, start=cc_start) @@ -666,7 +670,7 @@ def Read(self, count=None, start=0, # Check data mode for processing instructions dm_data = MCE_data_modes.get('%i'%self.data_mode) if dm_data is None: - print 'Warning: unimplemented data mode %i, treating as 0.'%self.data_mode + print('Warning: unimplemented data mode %i, treating as 0.'%self.data_mode) dm_data = MCE_data_modes['0'] # Handle data packing @@ -720,12 +724,11 @@ def Read(self, count=None, start=0, new_data /= filt.gain() elif unfilter == True: new_data = filt.apply_filter(new_data, inverse=True, - decimation=1./self.divid) + decimation=old_div(1.,self.divid)) elif unfilter == False: pass else: - raise ValueError, \ - "unexpected value for unfilter= argument to MCEFile.Read" + raise ValueError("unexpected value for unfilter= argument to MCEFile.Read") if data_out.data_is_dict: data_out.data[f] = new_data else: @@ -748,7 +751,7 @@ def __init__(self, value): def __str__(self): return repr(self.value) -class MCERunfile: +class MCERunfile(object): def __init__(self, filename=None): self.filename = filename self.data = {} @@ -776,7 +779,7 @@ def Read(self, filename): block_data = {} elif block_name is None: if data is None or data == '': - if self.data.has_key(key): + if key in self.data: raise BadRunfile('duplicate block \'%s\''%key) block_name = key else: @@ -786,7 +789,7 @@ def Read(self, filename): return self.data def Item(self, block, key, array=True, type='string'): - if not self.data.has_key(block) or not self.data[block].has_key(key): + if block not in self.data or key not in self.data[block]: return None data = self.data[block][key] if type=='float': @@ -798,7 +801,7 @@ def Item(self, block, key, array=True, type='string'): if not array and len(f) <= 1: return f[0] return f if type!='string': - print 'Unknown type "%s", returning string.' % type + print('Unknown type "%s", returning string.' % type) if array: return data.split() return data @@ -894,8 +897,8 @@ def unwrap_array(data, period, in_place=False): creating a new buffer for the unwrapped data. """ ddata = data[...,1:] - data[...,:-1] - ups = (ddata > period/2).astype('int').cumsum(axis=-1) - dns = (ddata < -period/2).astype('int').cumsum(axis=-1) + ups = (ddata > old_div(period,2)).astype('int').cumsum(axis=-1) + dns = (ddata < old_div(-period,2)).astype('int').cumsum(axis=-1) if not in_place: data = data.astype('float') data[...,1:] += float(period) * (dns - ups) @@ -906,15 +909,15 @@ def unwrap(*args, **kwargs): This in a alias for unwrap_array, which you should use now. """ if deprecation_warnings: - print 'Use of "unwrap" function is deprecated, the new name '\ - ' is "unwrap_array".' + print('Use of "unwrap" function is deprecated, the new name '\ + ' is "unwrap_array".') return unwrap_array(*args, **kwargs) # # MCE low-pass filters # -class MCEFilter: +class MCEFilter(object): @staticmethod def from_runfile(runfile): """ @@ -942,8 +945,8 @@ def transfer(self, f, f_samp=1., power=False): Setting power=True will return the power window function (square of the modulus of the transfer function). """ - f = f / f_samp - K = 1./2**14 + f = old_div(f, f_samp) + K = old_div(1.,2**14) scalars = [K, K, K, K, 1., 1.] b11, b12, b21, b22, k1, k2 = [s*p for s,p in zip(scalars, self.params)] z = numpy.exp(-2j*numpy.pi*f) @@ -954,7 +957,7 @@ def transfer(self, f, f_samp=1., power=False): return H def spectrum(self, *args, **kwargs): - print '*** please use "transfer" method instead of "spectrum" method.' + print('*** please use "transfer" method instead of "spectrum" method.') return self.transfer(*args, **kwargs) def gain(self): @@ -976,7 +979,7 @@ def _spec(x): x = 0.5 - x #flip if x < 0: return (1.-x) * g0 - return abs(cutoff - abs(self.transfer(x)/g0)**2) + return abs(cutoff - abs(old_div(self.transfer(x),g0))**2) return fmin(_spec,0.1,disp=0)[0] * f_samp # Filter application @@ -999,13 +1002,13 @@ def apply_filter(self, data, decimation=1., inverse=False, gain0=1. """ n = data.shape[-1] - freqs = numpy.arange(float(n))/n - freqs[int((n+1)/2):] -= 1. - spec = self.transfer(freqs, f_samp=1./decimation) + freqs = old_div(numpy.arange(float(n)),n) + freqs[int(old_div((n+1),2)):] -= 1. + spec = self.transfer(freqs, f_samp=old_div(1.,decimation)) if gain0 is not None: - spec *= gain0 / self.gain() + spec *= old_div(gain0, self.gain()) if inverse: - spec = 1./spec + spec = old_div(1.,spec) return numpy.fft.ifft(numpy.fft.fft(data)*spec).real def apply_filter_fir(self, data, truncate=False, @@ -1024,14 +1027,14 @@ def apply_filter_fir(self, data, truncate=False, b = [1., 2., 1.] # First filter if stages is None or 0 in stages: - a = [1., -self.params[0]/2.**14, self.params[1]/2.**14] - data = scs.lfilter(b, a, data) / 2**self.params[5] + a = [1., old_div(-self.params[0],2.**14), old_div(self.params[1],2.**14)] + data = old_div(scs.lfilter(b, a, data), 2**self.params[5]) if truncate: data = numpy.floor(data) # Second filter if stages is None or 1 in stages: - a = [1., -self.params[2]/2.**14, self.params[3]/2.**14] - data = scs.lfilter(b, a, data) / 2**self.params[4] + a = [1., old_div(-self.params[2],2.**14), old_div(self.params[3],2.**14)] + data = old_div(scs.lfilter(b, a, data), 2**self.params[4]) if truncate: data = numpy.floor(data) return data @@ -1052,8 +1055,8 @@ def from_params(cls, ftype, fparams): params = fparams # Did this all work out? if params is None or len(params) != 6: - raise ValueError, "Invalid filter parameters for ftype='%i'" %\ - ftype + raise ValueError("Invalid filter parameters for ftype='%i'" %\ + ftype) return cls(params) @classmethod diff --git a/python/mce_internal.py b/python/mce_internal.py index 0f63d59..dc949a5 100644 --- a/python/mce_internal.py +++ b/python/mce_internal.py @@ -2,6 +2,9 @@ Classes and functions for supporting MCE internal ramps (see scripts/mce_internal_ramp and scripts/mce_awg). """ +from __future__ import print_function +from builtins import map +from builtins import object import subprocess as sp import re @@ -10,7 +13,7 @@ try: from pymce.compat import old_mce as mce except ImportError: - print 'Could not load mce module; mce commanding will be disabled.' + print('Could not load mce module; mce commanding will be disabled.') def abort_msg(text, error=20): sys.stderr.write('Error: %s\n' % text) @@ -31,7 +34,7 @@ def abort_msg(text, error=20): } -class physicalMap: +class physicalMap(object): def __init__(self, card_name, param_name, param_id, card_ids): self.card, self.param, self.p_id, self.c_ids = \ card_name, param_name, param_id, card_ids @@ -43,7 +46,7 @@ def decode(cls, line): c_ids = [int(x,0) for x in w[6:]] return cls(card, param, p_id, c_ids) -class virtualMap: +class virtualMap(object): def __init__(self, card_name, param_name, maps): self.card, self.param, self.maps = \ card_name, param_name, maps @@ -65,7 +68,7 @@ def decode(cls, line): maps.append((int(m[0],0), int(m[1],0), c, p, int(m[3],0))) return cls(card, param, maps) -class configAnalysis: +class configAnalysis(object): """ Decode output of mce_status -g. """ @@ -142,7 +145,7 @@ def get_ramp_params(self, card, param, start=0, count=None, _recursion=0): AWG_MAX_DATA = 8192 AWG_BLOCK_SIZE = 32 -class awgAccessor: +class awgAccessor(object): """ Read and write to the CC AWG data area. """ @@ -153,7 +156,7 @@ def read(self, n=None, reset=True): n = self.mce.read('cc', 'awg_sequence_length')[0] if n > AWG_MAX_DATA: n = AWG_MAX_DATA - print 'Limiting read to %i words' % n + print('Limiting read to %i words' % n) addr = 0 if not reset: addr = self.get_address() @@ -167,11 +170,11 @@ def read(self, n=None, reset=True): addr += _n return data def write(self, data, reset=True, set_length=True): - data = map(int, data) # cast for safety + data = list(map(int, data)) # cast for safety n = len(data) if n > AWG_MAX_DATA: n = AWG_MAX_DATA - print 'Limiting write to %i words' % n + print('Limiting write to %i words' % n) if set_length: self.mce.write('cc', 'awg_sequence_length', [int(n)]) if reset: diff --git a/python/measure_quanta.py b/python/measure_quanta.py index 67b3807..a35115a 100644 --- a/python/measure_quanta.py +++ b/python/measure_quanta.py @@ -1,3 +1,7 @@ +from __future__ import division +from __future__ import print_function +from builtins import range +from past.utils import old_div import auto_setup as aset from auto_setup.util import mas_path import numpy as np @@ -27,13 +31,13 @@ # Protect user if not tuning_dir[0] == '/' and not os.path.exists(tuning_dir): - print 'Cannot find "%s", looking in $MAS_DATA...' % tuning_dir + print('Cannot find "%s", looking in $MAS_DATA...' % tuning_dir) tuning_dir = mas_path().data_dir() + tuning_dir fs = aset.util.FileSet(tuning_dir) files = fs.stage_all(stage) if len(files) == 0: - print 'No files found for stage %s' % stage + print('No files found for stage %s' % stage) sys.exit(1) if stage == 'sa_ramp': @@ -49,19 +53,19 @@ ramps = [aset.SQ1RampTes(f) for f in files] out_format = 'array' else: - print 'Unsupported stage argument "%s".' % stage + print('Unsupported stage argument "%s".' % stage) sq = ramps[0].join(ramps) periods = [] if sq.bias_style == 'ramp': - print 'Multi-bias detected' + print('Multi-bias detected') if opts.sub_ramp is None: - print 'Selecting best bias values for each column (use --sub-ramp to force one).' + print('Selecting best bias values for each column (use --sub-ramp to force one).') sq.reduce1() sq = sq.select_biases(ic_factor=1) else: - print 'Selecting bias at index %i' % opts.sub_ramp + print('Selecting bias at index %i' % opts.sub_ramp) sq = sq.split()[opts.sub_ramp] if stage == 'sa_ramp': @@ -73,13 +77,13 @@ # Set the scan width so that there is at least another phi0 available n = data.shape[1] -n_phi0 = int(n / opts.span) +n_phi0 = int(old_div(n, opts.span)) width = n - n_phi0 # -width = max(width, n/8) # at least n/8! +width = max(width, old_div(n,8)) # at least n/8! width = min(width, n_phi0*2) # at most 2*phi0 -print 'Keeping %i of %i points' % (n, sq.data.shape[-1]) -print 'Traveling segment of length %i' % width +print('Keeping %i of %i points' % (n, sq.data.shape[-1])) +print('Traveling segment of length %i' % width) p = aset.servo.period(sq.data, width=width) @@ -88,11 +92,11 @@ if opts.rescale is not None: MAX_QUANTUM = 2**14 * 0.75 to_adjust = (periods != 0) - n_fit = np.floor(MAX_QUANTUM / periods[to_adjust]) + n_fit = np.floor(old_div(MAX_QUANTUM, periods[to_adjust])) to_adjust[to_adjust] *= (n_fit >= opts.rescale) periods[to_adjust] = (opts.rescale * periods[to_adjust]) -n_rc = len(periods)/8 +n_rc = old_div(len(periods),8) stage_keys = { 'sa_ramp': 'sa_flux_quanta', 'sq2_servo': 'sq2_flux_quanta', @@ -113,7 +117,7 @@ elif out_format == 'array': n_row, n_col = sq.data_shape[-3:-1] if periods.size != n_row*n_col: - print 'I do not know how to reduce this multi-bias data...' + print('I do not know how to reduce this multi-bias data...') sys.exit(1) periods.shape = (n_row, n_col) if opts.do_median: @@ -135,4 +139,4 @@ else: s += ',\n' -print s +print(s) diff --git a/python/raw_power/logbin.py b/python/raw_power/logbin.py index 12ea3bd..3e95400 100644 --- a/python/raw_power/logbin.py +++ b/python/raw_power/logbin.py @@ -1,3 +1,5 @@ +from __future__ import division +from past.utils import old_div from numpy import * def logbin(f, y, bins=400): @@ -17,8 +19,8 @@ def logbin(f, y, bins=400): df = f[1] - f[0] f_max = f[-1] + df f_min = f[1] - N = log(f_max / f_min) - dN = N / bins + N = log(old_div(f_max, f_min)) + dN = old_div(N, bins) edges = f_min * exp(dN * arange(bins+1)) # Frequency counts for norming nf = histogram(f, bins=edges)[0] @@ -26,8 +28,8 @@ def logbin(f, y, bins=400): new_f = histogram(f, weights=f, bins=edges)[0] new_y = histogram(f, weights=abs(y)**2, bins=edges)[0] # Reduce - new_f = new_f[nf!=0] / nf[nf!=0] - new_y = sqrt(new_y[nf!=0]/nf[nf!=0]) + new_f = old_div(new_f[nf!=0], nf[nf!=0]) + new_y = sqrt(old_div(new_y[nf!=0],nf[nf!=0])) return new_f, new_y @@ -38,8 +40,8 @@ def logbin(f, y, bins=400): from todUtils import TOD filename = '/scr/queequeg1/colossus/season2/merlin/20081013/1223886429.1234568432.ar1' tod = TOD.read(filename, camCol=[5], camRow=[25]) - x, y = arange(tod.ndata/2, dtype='float') / tod.ndata * 400., \ - abs(fft(tod.data[0]))[:tod.ndata/2] + x, y = arange(old_div(tod.ndata,2), dtype='float') / tod.ndata * 400., \ + abs(fft(tod.data[0]))[:old_div(tod.ndata,2)] x1, y1 = logbin(x, y) x2, y2 = logbin(200., y) loglog(x, y) diff --git a/python/raw_power/spectrum_fit.py b/python/raw_power/spectrum_fit.py index fde505a..c24f75f 100644 --- a/python/raw_power/spectrum_fit.py +++ b/python/raw_power/spectrum_fit.py @@ -1,10 +1,13 @@ #!/usr/bin/python +from __future__ import division +from __future__ import print_function +from past.utils import old_div try: from pylab import * plotter = 'pylab' except: - print 'pylab absent, trying biggles.' + print('pylab absent, trying biggles.') import biggles as bg plotter = 'biggles' @@ -28,10 +31,10 @@ def load_raw_file(fn, kill_partial=33, drop_initial=1): def power(data): n = len(data) - df = 50.e6 / n - p = abs(fft.fft(data)[0:n/2]) / sqrt(50.e6*n) + df = old_div(50.e6, n) + p = old_div(abs(fft.fft(data)[0:old_div(n,2)]), sqrt(50.e6*n)) p[0] = 0. - f = df * arange(n/2) + f = df * arange(old_div(n,2)) return f, p def time_series(files, column=0): @@ -39,7 +42,7 @@ def time_series(files, column=0): for fn in files: d = load_raw_file(fn) if not check_data(d.data): - print 'Weird data in %s'%fn + print('Weird data in %s'%fn) ts.append(d.data[column,:]) return ts @@ -48,7 +51,7 @@ def spectra(files, column=0): for fn in files: d = load_raw_file(fn) if not check_data(d.data): - print 'Weird data in %s'%fn + print('Weird data in %s'%fn) f, p = power(d.data[column,:65536]) spectra.append(p) return f, array(spectra) @@ -72,7 +75,7 @@ def model(x, p): A, B, f0, beta0, f1, beta1 = p # A, B, f0, f1, f2, f3 = p # return (T_1(B + A*abs(1./(1+1j*(x/f0))/(1+1j*(x/f1))/(1+1j*(x/f2))/(1+1j*(x/f3))))) - return (T_1(B + A*abs(1./(1+1j*(x/f0)**beta0)/(1+1j*(x/f1)**beta1)))) # / (1+(x/f2)**beta2)) + return (T_1(B + A*abs(1./(1+1j*(old_div(x,f0))**beta0)/(1+1j*(old_div(x,f1))**beta1)))) # / (1+(x/f2)**beta2)) def resid(*args): p = args[0] @@ -81,7 +84,7 @@ def resid(*args): def model_3db(p): f = arange(.1e6, 10e6, .1e6) - y = model(f, p) / model(f[:1], p)[0] + y = old_div(model(f, p), model(f[:1], p)[0]) idx = (y < 0.5).nonzero()[0][0] return f[idx] @@ -95,12 +98,12 @@ def model_3db(p): if len(args) == 1 and os.path.isdir(args[0]): files = glob('%s/*raw' % args[0]) if len(files) == 0: - print 'No files found in directory "%s"' % args[0] + print('No files found in directory "%s"' % args[0]) sys.exit(1) else: files = args if len(files) == 0: - print 'Give directory or filenames.' + print('Give directory or filenames.') sys.exit(1) # Column is always 0 for one-RC raw grabs. @@ -108,9 +111,9 @@ def model_3db(p): n_files = len(files) ts = time_series(files, column) - print 'Computing RMS for %i files.' % n_files + print('Computing RMS for %i files.' % n_files) rr = [t.std() for t in ts] - print 'RMS: ', mean(rr), ' +- ', std(rr) + print('RMS: ', mean(rr), ' +- ', std(rr)) # Load spectra, average bin. f, p = spectra(files, column) @@ -125,16 +128,16 @@ def model_3db(p): white_level = sqrt(mean(p[:,fhigh_cut]**2)) - print 'White noise (ADC/rtHz): ', white_level - print 'Noise floor (ADC/rtHz): ', high_level - print 'Ratio: ', high_level / white_level + print('White noise (ADC/rtHz): ', white_level) + print('Noise floor (ADC/rtHz): ', high_level) + print('Ratio: ', old_div(high_level, white_level)) # Find 3dB - f3db_level = white_level / sqrt(2.) + f3db_level = old_div(white_level, sqrt(2.)) search_cut = 0.5e6 f3db = f2[((f2 > search_cut)*(y2 <= f3db_level)).nonzero()][0] - print 'f_3db (MHz): ', f3db/1e6, 'MHz' + print('f_3db (MHz): ', old_div(f3db,1e6), 'MHz') # Plot plot plot. if plotter == 'pylab': diff --git a/python/rect_check.py b/python/rect_check.py index 88c13d2..0eb52b7 100755 --- a/python/rect_check.py +++ b/python/rect_check.py @@ -1,5 +1,9 @@ #!/usr/bin/python +from __future__ import division +from __future__ import print_function +from builtins import object +from past.utils import old_div USAGE="""%prog [options] [runfiles] Query the MCE, or parse a runfile, to evaluate the sanity of the @@ -14,7 +18,7 @@ MCE_CLOCK = 5e7 # Hz MCE_OVERHEAD = 44 -class frameConfig: +class frameConfig(object): mce_params = [ \ ('cc_rcs', ('cc', 'rcs_to_report_data')), ('cc_dec', ('cc', 'data_rate')), @@ -60,7 +64,7 @@ def derive(self): # d = {} d['f_mux'] = MCE_CLOCK / self.params['cc_cmux'] / self.params['cc_nmux'] - d['f_ro'] = d['f_mux'] / self.params['cc_dec'] + d['f_ro'] = old_div(d['f_mux'], self.params['cc_dec']) d['n_mux'] = self.params['rc_nr'] * self.params['rc_nc'] d['n_ro'] = self.params['cc_nr'] * self.params['cc_nc'] # @@ -70,51 +74,51 @@ def derive(self): d['in_bounds'] = self.params['rc_r0'] + self.params['rc_nr'] <= \ self.params['cc_nmux'] # - d['f_sam'] = d['f_ro'] * (d['n_ro'] / d['n_mux']) + d['f_sam'] = d['f_ro'] * (old_div(d['n_ro'], d['n_mux'])) # d['dup_bug'] = (self.params['rc_fw'] < 0x5010007) and ( self.params['rc_nmux']*self.params['rc_cmux'] < 230 + d['n_ro']*2) self.derived = d def report(self): - print 'MCE configuration:' + print('MCE configuration:') for k, (c,p) in self.mce_params: if k == 'barrier': break - print ' %-8s %-30s %3i' % \ - (k, '( %s, %s ):'%(c,p), self.params[k]) - print - print 'Framing:' + print(' %-8s %-30s %3i' % \ + (k, '( %s, %s ):'%(c,p), self.params[k])) + print() + print('Framing:') d = self.derived def yn(x): if x: return 'yes' return 'no' - print ' RC words stored per mux cycle: %4i' % d['n_mux'] - print ' CC words per read-out frame: %4i' % d['n_ro'] - print ' CC decimation: %4i' % self.params['cc_dec'] - print - print 'Sanity summary:' - print ' Contiguous (for high-rate readout)? %4s' % yn(d['contiguous']) - print ' Complete (all-detector readout)? %4s' % yn(d['complete']) - print ' Bizarro (a bad thing)? %4s' % yn(d['bizarro']) - print ' Bounded (a good thing)? %4s' % yn(d['in_bounds']) + print(' RC words stored per mux cycle: %4i' % d['n_mux']) + print(' CC words per read-out frame: %4i' % d['n_ro']) + print(' CC decimation: %4i' % self.params['cc_dec']) + print() + print('Sanity summary:') + print(' Contiguous (for high-rate readout)? %4s' % yn(d['contiguous'])) + print(' Complete (all-detector readout)? %4s' % yn(d['complete'])) + print(' Bizarro (a bad thing)? %4s' % yn(d['bizarro'])) + print(' Bounded (a good thing)? %4s' % yn(d['in_bounds'])) if d['dup_bug']: - print ' Duplicate data bug! %4s' % yn(d['dup_bug']) - print - print 'Timing:' - print ' Mux freq: %9.2f' % d['f_mux'] - print ' Mean sampling freq: %9.2f' % d['f_sam'] - print ' Read-out freq: %9.2f' % d['f_ro'] - print - print 'Data volume:' - print ' Frame size (bytes/RC): %9i' % \ - (4*(MCE_OVERHEAD + d['n_ro'])) - print ' Data rate (MB/s/RC): %9.2f' % \ - (1.e-6*d['f_ro'] * 4*(MCE_OVERHEAD + d['n_ro'])) + print(' Duplicate data bug! %4s' % yn(d['dup_bug'])) + print() + print('Timing:') + print(' Mux freq: %9.2f' % d['f_mux']) + print(' Mean sampling freq: %9.2f' % d['f_sam']) + print(' Read-out freq: %9.2f' % d['f_ro']) + print() + print('Data volume:') + print(' Frame size (bytes/RC): %9i' % \ + (4*(MCE_OVERHEAD + d['n_ro']))) + print(' Data rate (MB/s/RC): %9.2f' % \ + (1.e-6*d['f_ro'] * 4*(MCE_OVERHEAD + d['n_ro']))) if d['dup_bug']: - print - print 'Warning!! your RC firmware has the duplicate data bug;' - print ' and this configuration will suffer from it.' + print() + print('Warning!! your RC firmware has the duplicate data bug;') + print(' and this configuration will suffer from it.') def from_runfile(self, filename): rf = MCERunfile(filename) @@ -126,7 +130,7 @@ def from_runfile(self, filename): if c == 'rca': c = rc item = rf.Item('HEADER', 'RB %s %s' % (c, p), type='int') if item is None: - raise RuntimeError, "Failed to find key for %s %s" % (c,p) + raise RuntimeError("Failed to find key for %s %s" % (c,p)) self.params[k] = item[0] self.derive() @@ -152,7 +156,7 @@ def from_mce(self, mce=None): if opts.mce: from mce_control import MCE as mce if len(args) > 0: - print 'Pass runfiles or --mce, not both!' + print('Pass runfiles or --mce, not both!') sys.exit(1) f = frameConfig(mce=mce()) f.from_mce() diff --git a/python/runfile2cfg.py b/python/runfile2cfg.py index 123642a..1a29e03 100644 --- a/python/runfile2cfg.py +++ b/python/runfile2cfg.py @@ -2,6 +2,9 @@ Sometimes you might want to dump a runfile into config options, or mas_param calls, or something. """ +from __future__ import division +from builtins import range +from past.utils import old_div import sys from mce_data import MCERunfile @@ -34,7 +37,7 @@ fout.write(' '.join(['%i'%x for x in d]) + '\n') else: fout.write('%s = [\n' % k1) - nr = (len(d)+7)/8 + nr = old_div((len(d)+7),8) for i in range(nr): fout.write(' ' * 10 + ', '.join(['%6i'%x for x in d[i*8:i*8+8]])) if i != nr-1: diff --git a/python/runfile_checks.py b/python/runfile_checks.py index 339a614..7ae8d85 100644 --- a/python/runfile_checks.py +++ b/python/runfile_checks.py @@ -1,3 +1,5 @@ +from __future__ import print_function +from builtins import range import sys from mce_runfile import * @@ -18,7 +20,7 @@ def check_multilock(filename): c = (rc-1)*8 + ch for r in range(num_rows): if multi[c][r]*deads[rci][ch][r] != 0: - print c,r,multi[c][r],deads[rci][ch][r] + print(c,r,multi[c][r],deads[rci][ch][r]) broken = broken + 1 return (broken == 0) @@ -26,6 +28,6 @@ def check_multilock(filename): for arg in sys.argv[1:]: if check_multi(arg): - print 'Ok ' + arg + print('Ok ' + arg) else: - print 'Fail! ' + arg + print('Fail! ' + arg) diff --git a/python/servo_gains.py b/python/servo_gains.py index 9e3eedd..83fd833 100644 --- a/python/servo_gains.py +++ b/python/servo_gains.py @@ -2,6 +2,10 @@ Estimate the critical gains for sq2servo and sq1servo based on the measured SA Ramp and SQ2 servo locking slopes. """ +from __future__ import division +from __future__ import print_function +from builtins import range +from past.utils import old_div import auto_setup as aset @@ -12,7 +16,7 @@ def pretty(x, n=8, fmt='%8.4f'): out = '' - for i in range((len(x)+n-1)/n): + for i in range(old_div((len(x)+n-1),n)): out += ' '.join([fmt % _x for _x in x[i*n:i*n+n]]) + '\n' return out @@ -36,9 +40,9 @@ def pretty(x, n=8, fmt='%8.4f'): sa = aset.SARamp.join([aset.SARamp(f) for f in fs.stage_all('sa_ramp')]) sa.tuning = tuning sa.reduce(slope=1) - s2s_gains = 1./sa.analysis['lock_slope'] - print 'SQ2 servo critical gain: ' - print pretty(s2s_gains) + s2s_gains = old_div(1.,sa.analysis['lock_slope']) + print('SQ2 servo critical gain: ') + print(pretty(s2s_gains)) if 'sq2_servo' in opts.stage: sq2 = aset.SQ2Servo.join([aset.SQ2Servo(f) \ @@ -47,9 +51,9 @@ def pretty(x, n=8, fmt='%8.4f'): for s in [-1, +1]: sq2.reduce(slope=s) s1s_gains = 1./sq2.analysis['lock_slope'] / s2s_gains - print 'SQ1 servo critical gain (sq2_slope=%i):' % s - print pretty(s1s_gains) - print + print('SQ1 servo critical gain (sq2_slope=%i):' % s) + print(pretty(s1s_gains)) + print() if 'sq1_ramp' in opts.stage: sq = aset.SQ1Ramp.join([aset.SQ1Ramp(f) @@ -57,14 +61,14 @@ def pretty(x, n=8, fmt='%8.4f'): sq.tuning = tuning sq.reduce() for direction in ['up', 'dn']: - crit_gains = -4096./sq.analysis['lock_%s_slope' % direction] + crit_gains = old_div(-4096.,sq.analysis['lock_%s_slope' % direction]) crit_gains[np.isnan(crit_gains) + np.isinf(crit_gains)] = 0. # Reformat n_row, n_col = sq.data_shape[:2] crit_gains.shape = (n_row, n_col) - print 'Full chain critical gains by column:' + print('Full chain critical gains by column:') for col, col_data in enumerate(crit_gains.transpose()): - print 'Column %i' % col - print pretty(col_data, fmt='%8.2f') - print + print('Column %i' % col) + print(pretty(col_data, fmt='%8.2f')) + print() diff --git a/python/special_ops.py b/python/special_ops.py index 4cb5d60..295d466 100644 --- a/python/special_ops.py +++ b/python/special_ops.py @@ -1,3 +1,9 @@ +from __future__ import division +from __future__ import print_function +from builtins import chr +from builtins import range +from builtins import object +from past.utils import old_div usage=""" %prog [options] action [action, ...] @@ -18,7 +24,7 @@ # Kinds of readout card -class RC_revE: +class RC_revE(object): # Overall gain of SA output to ADC input output_gain = 202.36 # SA output voltage per ADC bit @@ -26,11 +32,11 @@ class RC_revE: # SA output voltage per bit of offset DAC dVdX_offset = 2.5/2**16 / 45.92 # SA bias voltage, per bit of bias DAC - dVdX_bias = 2.5/2**16 + dVdX_bias = old_div(2.5,2**16) # SA bias resistance (not including cable) R_bias = 15000. -class RC_revB: +class RC_revB(object): # Overall gain of SA output to ADC input output_gain = 198.9 # SA output voltage per ADC bit @@ -38,7 +44,7 @@ class RC_revB: # SA output voltage per bit of offset DAC dVdX_offset = 2.5/2**16 / 33.90 # SA bias voltage, per bit of bias DAC - dVdX_bias = 2.5/2**16 + dVdX_bias = old_div(2.5,2**16) # SA bias resistance (not including cable) R_bias = 15000. @@ -57,7 +63,7 @@ class RC_revB: for action in args: if action == 'measure_sa_offset_ratio': m = mce() - print 'Saving current configuration...' + print('Saving current configuration...') sa_bias0 = m.read('sa', 'bias') sa_offset0 = m.read('sa', 'offset') sample_num = m.read('rca', 'sample_num')[0] @@ -66,12 +72,12 @@ class RC_revB: card_rev = 2 servo0, data0 = m.servo_mode(), m.data_mode() - print 'Setting up...' + print('Setting up...') n_sa = len(sa_bias0) m.servo_mode(0) m.data_mode(0) - print 'Measuring SA bias response...' + print('Measuring SA bias response...') step = 500 m.write('sa', 'bias', [0]*n_sa) m.write('sa', 'offset', [0]*n_sa) @@ -86,7 +92,7 @@ class RC_revB: m.write('sa', 'bias', [0]*n_sa) y3 = m.read_row() - print 'Restoring...' + print('Restoring...') m.write('sa', 'offset', sa_offset0) m.write('sa', 'bias', sa_bias0) m.servo_mode(servo0) @@ -97,79 +103,79 @@ class RC_revB: #d_offset = y1 - y2 ## This is better because it catches open circuits d_offset = (y0 - y3).astype('float') / step / sample_num - ratio = d_bias.astype('float') / d_offset + ratio = old_div(d_bias.astype('float'), d_offset) if any(d_offset==0): ratio[d_offset==0] = 0 - print 'SA response (dADC/dBIAS), by column:' - for r in range((n_sa+7)/8): - print ' ', ' '.join(['%7.3f' % x - for x in d_bias[r*8:(r+1)*8]]) - print - - print 'Offset response (dDC/dOFFSET), by column:' - for r in range((n_sa+7)/8): - print ' ', ' '.join(['%7.3f' % x - for x in d_offset[r*8:(r+1)*8]]) - print + print('SA response (dADC/dBIAS), by column:') + for r in range(old_div((n_sa+7),8)): + print(' ', ' '.join(['%7.3f' % x + for x in d_bias[r*8:(r+1)*8]])) + print() + + print('Offset response (dDC/dOFFSET), by column:') + for r in range(old_div((n_sa+7),8)): + print(' ', ' '.join(['%7.3f' % x + for x in d_offset[r*8:(r+1)*8]])) + print() # Analyze those signals rc = RC_revs[card_rev] - d_offset_pred = rc.dVdX_offset / rc.dVdX_adc + d_offset_pred = old_div(rc.dVdX_offset, rc.dVdX_adc) # Convert d_bias to voltage ratio between SA output (at # pre-amp input) and SA bias (at DAC output) dVdV_bias = (d_bias * rc.dVdX_adc / rc.dVdX_bias)**-1 ## Using R_bias, determine R_cable. - R_cable = rc.R_bias / (dVdV_bias - 1) + R_cable = old_div(rc.R_bias, (dVdV_bias - 1)) ## Flags - offset_good = abs(d_offset/d_offset_pred - 1) < .01 + offset_good = abs(old_div(d_offset,d_offset_pred) - 1) < .01 - print 'Estimated SA cable resistance (rev%s readout card):' % \ - chr(ord('A')-1+card_rev) - for r in range((n_sa+7)/8): - print ' ', ' '.join(['%7.1f' % x for x in R_cable[r*8:(r+1)*8]]) - print + print('Estimated SA cable resistance (rev%s readout card):' % \ + chr(ord('A')-1+card_rev)) + for r in range(old_div((n_sa+7),8)): + print(' ', ' '.join(['%7.1f' % x for x in R_cable[r*8:(r+1)*8]])) + print() - print 'Apparent resistance ratios, by column:' - for r in range((n_sa+7)/8): - print ' ', ' '.join(['%7.3f' % x for x in ratio[r*8:(r+1)*8]]) + print('Apparent resistance ratios, by column:') + for r in range(old_div((n_sa+7),8)): + print(' ', ' '.join(['%7.3f' % x for x in ratio[r*8:(r+1)*8]])) ratio_mean = ratio[ratio!=0].mean() - print - print 'The typical ratio is: %.3f' % ratio_mean - print + print() + print('The typical ratio is: %.3f' % ratio_mean) + print() # This ratio isn't necessarily what you want to use, you might # want to let the floor drop out gradually - sa_max = 64000. / step + sa_max = old_div(64000., step) sa_fall = -8000*sample_num - y0 - ratio1 = -(sa_fall / sa_max - d_bias) / d_offset + ratio1 = old_div(-(old_div(sa_fall, sa_max) - d_bias), d_offset) ratio1[d_offset==0] = 0. - print 'Suggested sa_offset_bias_ratios, by column:' - for r in range((n_sa+7)/8): - print ' ', ' '.join(['%7.3f' % x for x in ratio1[r*8:(r+1)*8]]) + print('Suggested sa_offset_bias_ratios, by column:') + for r in range(old_div((n_sa+7),8)): + print(' ', ' '.join(['%7.3f' % x for x in ratio1[r*8:(r+1)*8]])) ratio1_min = ratio1[ratio1!=0].min() - print 'Smallest range-filling ratio: %.3f' % ratio1_min + print('Smallest range-filling ratio: %.3f' % ratio1_min) # Rough badness diagnostic if ratio1_min < ratio_mean: - print 'You have weird ratios. Seek advice.' - print 'Recommended sa_offset_bias_ratio: %.3f' % ratio1_min + print('You have weird ratios. Seek advice.') + print('Recommended sa_offset_bias_ratio: %.3f' % ratio1_min) elif action == 'test_syncbox': m = mce() for it in [0,1]: sc0 = m.read('cc', 'select_clk') if sc0 is None: - print 'MCE error.' + print('MCE error.') break elif sc0[0] == 1: - print 'Sync box is connected and select_clk=1.' + print('Sync box is connected and select_clk=1.') break elif i == 0: - print 'Writing select_clk=1' + print('Writing select_clk=1') m.write('cc', 'select_clk', [1]) time.sleep(1) diff --git a/template/auto_acq.py b/template/auto_acq.py index b33a0e4..b5b70a4 100644 --- a/template/auto_acq.py +++ b/template/auto_acq.py @@ -178,7 +178,7 @@ def is_and_is_symlink(linkname): opts['include'] = my_det['incfile']; # pass any remaining options to mce_cmd - for key,val in opts.items(): + for key,val in list(opts.items()): lines.append('acq_option %s %s %s' % (ftype, key, val)) # Construct init line for this output type @@ -188,7 +188,7 @@ def is_and_is_symlink(linkname): elif ftype == 'dirfile': line = 'acq_config_dirfile' else: - raise ValueError, "unknown type '%s'" % ftype + raise ValueError("unknown type '%s'" % ftype) if my_det['seq_int'] > 0: line += '_fs %(filename)s %(rc)s %(seq_int)i' % my_det else: diff --git a/test_suite/dry_lock.py b/test_suite/dry_lock.py index 60bd7c6..0354484 100644 --- a/test_suite/dry_lock.py +++ b/test_suite/dry_lock.py @@ -2,6 +2,11 @@ Set up locked servo (looped-back) RC. Manipulate the equilibrium feedback by setting adc_offset. """ +from __future__ import division +from __future__ import print_function +from builtins import zip +from builtins import range +from past.utils import old_div from mce import mce import numpy @@ -13,7 +18,7 @@ def __init__(self, *args, **kwargs): mce.__init__(self, *args, **kwargs) self.n_rc = len(self.read('rca', 'fw_rev')) # The col_map might need tweaking depending on what rcs are present. - self.col_map = range(self.n_rc*8) + self.col_map = list(range(self.n_rc*8)) def read_row(self, n=1, avg=False): d = numpy.array(self.read_frames(n, data_only=True))[:,:self.n_rc*8] @@ -24,7 +29,7 @@ def read_row(self, n=1, avg=False): def write_columns(self, param, data): # Duplicate values across all rows in each column parameter for c, d in enumerate(data): - rc, chan = c/8 + 1, c%8 + rc, chan = old_div(c,8) + 1, c%8 self.write('rc%i'%rc, param+'%i' % chan, [int(d)]*41) def servo_mode(self, mode=None): @@ -55,7 +60,7 @@ def flux_jumping(self, mode=None): def dt(self): nr, dr, rl = [self.read('cc', k)[0] for k in ['num_rows', 'data_rate', 'row_len']] - return float(nr * dr * rl) / 5e7 + return old_div(float(nr * dr * rl), 5e7) class column(super_mce): """ @@ -153,12 +158,12 @@ def permute(items): # Measure SQ1 FB response def col_avg(): time.sleep(0.1) # let any recent settings set - return mean(m.read_col()) / SAMPLE_NUM + return old_div(mean(m.read_col()), SAMPLE_NUM) def check(): - z1 = m.read_col() / SAMPLE_NUM + z1 = old_div(m.read_col(), SAMPLE_NUM) time.sleep(0.5) - z2 = m.read_col() / SAMPLE_NUM + z2 = old_div(m.read_col(), SAMPLE_NUM) return (abs(array(z1) - z2) > 100).astype('int') # Zero point @@ -172,40 +177,40 @@ def check(): m.fb_const(8191) adc1 = col_avg() m.fb_const(-8192) - print 'SQ1FB range coverage: %8.2f to %8.2f = %8.2f' % (adc0, adc1, adc1-adc0) - dadc_dfb = (adc1-adc0) / (8191 + 8192) - print ' dADC / dFB: %8.4f' % (dadc_dfb) - print ' Critical gain: %8.2f' % (4096. / dadc_dfb / SAMPLE_NUM) + print('SQ1FB range coverage: %8.2f to %8.2f = %8.2f' % (adc0, adc1, adc1-adc0)) + dadc_dfb = old_div((adc1-adc0), (8191 + 8192)) + print(' dADC / dFB: %8.4f' % (dadc_dfb)) + print(' Critical gain: %8.2f' % (4096. / dadc_dfb / SAMPLE_NUM)) # Probe ADC response to SA bias m.sa_bias(2000) adc1 = col_avg() m.sa_bias(0) - print 'SA range coverage: %8.2f to %8.2f' % (adc0, adc1) - dadc_dsa = (adc1 - adc0) / 2000 - print ' dADC / dSA: %8.4f' % (dadc_dsa) + print('SA range coverage: %8.2f to %8.2f' % (adc0, adc1)) + dadc_dsa = old_div((adc1 - adc0), 2000) + print(' dADC / dSA: %8.4f' % (dadc_dsa)) # Probe ADC response to SA offset m.sa_offset(2000) adc1 = col_avg() m.sa_offset(0) - print 'SA offset coverage: %8.2f to %8.2f' % (adc0, adc1) - dadc_doff = (adc1 - adc0) / 2000 - print ' dADC / dOffset: %8.4f' % (dadc_doff) + print('SA offset coverage: %8.2f to %8.2f' % (adc0, adc1)) + dadc_doff = old_div((adc1 - adc0), 2000) + print(' dADC / dOffset: %8.4f' % (dadc_doff)) if INIT_ONLY: - raise RuntimeError, 'stopping' + raise RuntimeError('stopping') # Set offset/bias to lock near FB=0 adc = adc0 + dadc_dfb * 8192 if adc > 0: - m.sa_offset(-int(adc / dadc_doff)) + m.sa_offset(-int(old_div(adc, dadc_doff))) else: - m.sa_bias(-int(adc / dadc_dsa)) + m.sa_bias(-int(old_div(adc, dadc_dsa))) m.fb_const(0) # Those should work fine. Pick a set of target feedbacks DAC_OK = 16000*.93 # don't flux jump immediately - targets = -DAC_OK/2 + DAC_OK*arange(ROWS+1)/ROWS + targets = old_div(-DAC_OK,2) + DAC_OK*arange(ROWS+1)/ROWS targets = array(permute(targets)) targets[SOURCE_ROW] = -4000 @@ -228,7 +233,7 @@ def check(): m.init_servo() m.data_mode(1) time.sleep(0.1) - print 'Lock points: ', m.read_col()[0]/2**12 + print('Lock points: ', old_div(m.read_col()[0],2**12)) def trace(delay=0.01, steps=100, step_fn=None): t = 0 @@ -242,19 +247,19 @@ def trace(delay=0.01, steps=100, step_fn=None): return data def curve(i): - NW = STEPS/8 - ao = ADC0 + DEPTH*exp(-(float((i-STEPS/2))/NW)**2) + NW = old_div(STEPS,8) + ao = ADC0 + DEPTH*exp(-(old_div(float((i-old_div(STEPS,2))),NW))**2) a = m.adc_offset() a[SOURCE_ROW] = ao m.adc_offset(a) # Run a curve - print 'Running a curve...' + print('Running a curve...') m.data_mode(1) m.flux_jumping(1) ADC0 = m.adc_offset()[SOURCE_ROW] - DEPTH = (DAC_OK/2 - targets[SOURCE_ROW]) * dadc_dfb + DEPTH = (old_div(DAC_OK,2) - targets[SOURCE_ROW]) * dadc_dfb STEPS = 200 data = trace(delay=0.01, steps=STEPS, step_fn=curve) fj = None @@ -262,27 +267,27 @@ def curve(i): # Unravel for data mode: data_mode = m.data_mode() if data_mode == 1: - data = data / 2**12 + data = old_div(data, 2**12) elif data_mode == 2: - data = data / 1218. + data = old_div(data, 1218.) elif data_mode == 10: data = data / 1218 / 16 fj = (2**16 + data) % 128 - print 'Reached grand extreme of ', data[:,0].max() + print('Reached grand extreme of ', data[:,0].max()) # idx = arange(ROWS-1) idx = idx + (idx >= SOURCE_ROW).astype('int') # rows that aren't the source ddata = (data - data[:10,:].mean(axis=0))[:,idx] - print 'Source row: ', SOURCE_ROW - print 'Top 10 departures:' + print('Source row: ', SOURCE_ROW) + print('Top 10 departures:') deps = abs(ddata).max(axis=0) deps = sorted(zip(deps, idx)) for dep, i in deps[-1:-11:-1]: - print ' row %2i %5f' % (i,dep) - print + print(' row %2i %5f' % (i,dep)) + print() subplot(211) plot(data[:,idx]) diff --git a/test_suite/find_dups.py b/test_suite/find_dups.py index e5da7b1..f5330e9 100644 --- a/test_suite/find_dups.py +++ b/test_suite/find_dups.py @@ -14,6 +14,8 @@ length prev_frame frame 6 -6 0 """ +from __future__ import print_function +from builtins import range from pylab import * from mce_data import * @@ -61,7 +63,7 @@ def hunt(raw): for filename in args: # Load file - print 'Filename: %s' % filename + print('Filename: %s' % filename) m = MCEFile(filename, runfile=False) # Load raw frames, 10 k should be enough. rawd = m.Read(raw_frames=1)[:10000] @@ -70,9 +72,9 @@ def hunt(raw): # Look for duplicates runs = hunt(raw) n = raw.shape[-1] - print ' readout size: %5i' % n - print ' duplicate runs: %5i' % len(runs) + print(' readout size: %5i' % n) + print(' duplicate runs: %5i' % len(runs)) if len(runs)>0: - print ' length prev_frame frame' + print(' length prev_frame frame') for a,b,c, in runs: - print ' %3i %3i %3i' % (c, a-n,b) + print(' %3i %3i %3i' % (c, a-n,b)) diff --git a/test_suite/fj_test_lock.py b/test_suite/fj_test_lock.py index 202b020..0a390b8 100644 --- a/test_suite/fj_test_lock.py +++ b/test_suite/fj_test_lock.py @@ -3,6 +3,11 @@ feedback by setting adc_offset. MA@UBC This script is modified to apply an ever-increasing ramp to adc_offset and hence sq1fb, so it can be used to test flux-jumping. """ +from __future__ import division +from __future__ import print_function +from builtins import zip +from builtins import range +from past.utils import old_div from mce import mce import numpy @@ -14,7 +19,7 @@ def __init__(self, *args, **kwargs): mce.__init__(self, *args, **kwargs) self.n_rc = len(self.read('rca', 'fw_rev')) # The col_map might need tweaking depending on what rcs are present. - self.col_map = range(self.n_rc*8) + self.col_map = list(range(self.n_rc*8)) def read_row(self, n=1, avg=False): d = numpy.array(self.read_frames(n, data_only=True))[:,:self.n_rc*8] @@ -25,7 +30,7 @@ def read_row(self, n=1, avg=False): def write_columns(self, param, data): # Duplicate values across all rows in each column parameter for c, d in enumerate(data): - rc, chan = c/8 + 1, c%8 + rc, chan = old_div(c,8) + 1, c%8 self.write('rc%i'%rc, param+'%i' % chan, [int(d)]*41) def servo_mode(self, mode=None): @@ -56,7 +61,7 @@ def flux_jumping(self, mode=None): def dt(self): nr, dr, rl = [self.read('cc', k)[0] for k in ['num_rows', 'data_rate', 'row_len']] - return float(nr * dr * rl) / 5e7 + return old_div(float(nr * dr * rl), 5e7) class column(super_mce): """ @@ -154,12 +159,12 @@ def permute(items): # Measure SQ1 FB response def col_avg(): time.sleep(0.1) # let any recent settings set - return mean(m.read_col()) / SAMPLE_NUM + return old_div(mean(m.read_col()), SAMPLE_NUM) def check(): - z1 = m.read_col() / SAMPLE_NUM + z1 = old_div(m.read_col(), SAMPLE_NUM) time.sleep(0.5) - z2 = m.read_col() / SAMPLE_NUM + z2 = old_div(m.read_col(), SAMPLE_NUM) return (abs(array(z1) - z2) > 100).astype('int') # Zero point @@ -173,40 +178,40 @@ def check(): m.fb_const(8191) adc1 = col_avg() m.fb_const(-8192) - print 'SQ1FB range coverage: %8.2f to %8.2f = %8.2f' % (adc0, adc1, adc1-adc0) - dadc_dfb = (adc1-adc0) / (8191 + 8192) - print ' dADC / dFB: %8.4f' % (dadc_dfb) - print ' Critical gain: %8.2f' % (4096. / dadc_dfb / SAMPLE_NUM) + print('SQ1FB range coverage: %8.2f to %8.2f = %8.2f' % (adc0, adc1, adc1-adc0)) + dadc_dfb = old_div((adc1-adc0), (8191 + 8192)) + print(' dADC / dFB: %8.4f' % (dadc_dfb)) + print(' Critical gain: %8.2f' % (4096. / dadc_dfb / SAMPLE_NUM)) # Probe ADC response to SA bias m.sa_bias(2000) adc1 = col_avg() m.sa_bias(0) - print 'SA range coverage: %8.2f to %8.2f' % (adc0, adc1) - dadc_dsa = (adc1 - adc0) / 2000 - print ' dADC / dSA: %8.4f' % (dadc_dsa) + print('SA range coverage: %8.2f to %8.2f' % (adc0, adc1)) + dadc_dsa = old_div((adc1 - adc0), 2000) + print(' dADC / dSA: %8.4f' % (dadc_dsa)) # Probe ADC response to SA offset m.sa_offset(2000) adc1 = col_avg() m.sa_offset(0) - print 'SA offset coverage: %8.2f to %8.2f' % (adc0, adc1) - dadc_doff = (adc1 - adc0) / 2000 - print ' dADC / dOffset: %8.4f' % (dadc_doff) + print('SA offset coverage: %8.2f to %8.2f' % (adc0, adc1)) + dadc_doff = old_div((adc1 - adc0), 2000) + print(' dADC / dOffset: %8.4f' % (dadc_doff)) if INIT_ONLY: - raise RuntimeError, 'stopping' + raise RuntimeError('stopping') # Set offset/bias to lock near FB=0 adc = adc0 + dadc_dfb * 8192 if adc > 0: - m.sa_offset(-int(adc / dadc_doff)) + m.sa_offset(-int(old_div(adc, dadc_doff))) else: - m.sa_bias(-int(adc / dadc_dsa)) + m.sa_bias(-int(old_div(adc, dadc_dsa))) m.fb_const(0) # Those should work fine. Pick a set of target feedbacks DAC_OK = 16000# *.93 # don't flux jump immediately - targets = -DAC_OK/2 + DAC_OK*arange(ROWS+1)/ROWS + targets = old_div(-DAC_OK,2) + DAC_OK*arange(ROWS+1)/ROWS targets = array(permute(targets)) targets[SOURCE_ROW] = -4000 @@ -229,7 +234,7 @@ def check(): m.init_servo() m.data_mode(1) time.sleep(0.1) - print 'Lock points: ', m.read_col()[0]/2**12 + print('Lock points: ', old_div(m.read_col()[0],2**12)) def trace(delay=0.01, steps=100, step_fn=None): t = 0 @@ -243,20 +248,20 @@ def trace(delay=0.01, steps=100, step_fn=None): return data def curve(i): - NW = STEPS/8 + NW = old_div(STEPS,8) ao = ADC0 + 500*dadc_dfb*i/10 #exp(-(float((i-STEPS/2))/NW)**2) a = m.adc_offset() a[SOURCE_ROW] = ao m.adc_offset(a) # Run a curve - print 'Running a curve...' + print('Running a curve...') m.n_rc = 1 m.data_mode(10) m.flux_jumping(1) ADC0 = m.adc_offset()[SOURCE_ROW] - DEPTH = (DAC_OK/2 - targets[SOURCE_ROW]) * dadc_dfb + DEPTH = (old_div(DAC_OK,2) - targets[SOURCE_ROW]) * dadc_dfb STEPS = 1000 data = trace(delay=0.01, steps=STEPS, step_fn=curve) fj = None @@ -265,32 +270,32 @@ def curve(i): # Unravel for data mode: data_mode = m.data_mode() if data_mode == 1: - data = data / 2**12 + data = old_div(data, 2**12) elif data_mode == 2: - data = data / 1218. + data = old_div(data, 1218.) elif data_mode == 10: fj = data%128 - data = ((data/128.)*8.)/ 1218.#drop the fj bits, then account for fitler gain and filter scaling of datamode 10 + data = old_div(((old_div(data,128.))*8.), 1218.)#drop the fj bits, then account for fitler gain and filter scaling of datamode 10 # signed 7 bits #for i in range(len(fj[:,SOURCE_ROW])): # if fj[i,SOURCE_ROW] > 63: # fj[i,SOURCE_ROW] -= 128 - print 'Reached grand extreme of ', data[:,0].max() + print('Reached grand extreme of ', data[:,0].max()) # idx = arange(ROWS-1) idx = idx + (idx >= SOURCE_ROW).astype('int') # rows that aren't the source ddata = (data - data[:10,:].mean(axis=0))[:,idx] - print 'Source row: ', SOURCE_ROW - print 'Top 10 departures:' + print('Source row: ', SOURCE_ROW) + print('Top 10 departures:') deps = abs(ddata).max(axis=0) deps = sorted(zip(deps, idx)) for dep, i in deps[-1:-11:-1]: - print ' row %2i %5f' % (i,dep) - print + print(' row %2i %5f' % (i,dep)) + print() # This is added to also acquire feedback and compare. # Convert to adc_offsets @@ -300,12 +305,12 @@ def curve(i): m.init_servo() m.data_mode(1) ADC0 = m.adc_offset()[SOURCE_ROW] - DEPTH = (DAC_OK/2 - targets[SOURCE_ROW]) * dadc_dfb + DEPTH = (old_div(DAC_OK,2) - targets[SOURCE_ROW]) * dadc_dfb STEPS = 1000 data_fb = trace(delay=0.01, steps=STEPS, step_fn=curve) data_mode = m.data_mode() if data_mode == 1: - data_fb1 = data_fb / 2**12 + data_fb1 = old_div(data_fb, 2**12) data_mode = 10 ######################################################### diff --git a/test_suite/mce_bin2asc.py b/test_suite/mce_bin2asc.py index 1a26b7f..a5114df 100755 --- a/test_suite/mce_bin2asc.py +++ b/test_suite/mce_bin2asc.py @@ -1,11 +1,12 @@ #!/usr/bin/python +from __future__ import print_function from mce_data import MCEFile import sys from numpy import * if len(sys.argv) <= 1: - print 'Give me raw file names.' + print('Give me raw file names.') sys.exit(1) for f in sys.argv[1:]: diff --git a/test_suite/slow_fb_ramp.py b/test_suite/slow_fb_ramp.py index 6406edb..01ae129 100644 --- a/test_suite/slow_fb_ramp.py +++ b/test_suite/slow_fb_ramp.py @@ -2,6 +2,9 @@ Set up feedback servo ramp on (disconnected) RC. It's best to jumper the amp inputs so that there is no drift in the error voltage level. """ +from __future__ import division +from __future__ import print_function +from past.utils import old_div from mce import mce import numpy @@ -22,7 +25,7 @@ def read_row(self, n=1, avg=False): def write_columns(self, param, data): # Duplicate values across all rows in each column parameter for c, d in enumerate(data): - rc, chan = c/8 + 1, c%8 + rc, chan = old_div(c,8) + 1, c%8 self.write('rc%i'%rc, param+'%i' % chan, [int(d)]*41) def servo_mode(self, mode=None): @@ -51,7 +54,7 @@ def flux_jumping(self, mode=None): def dt(self): nr, dr, rl = [self.read('cc', k)[0] for k in ['num_rows', 'data_rate', 'row_len']] - return float(nr * dr * rl) / 5e7 + return old_div(float(nr * dr * rl), 5e7) if __name__ == '__main__': # Get MCE @@ -66,15 +69,15 @@ def dt(self): # Sample d1 = m.read_row(100, True) - print '0-sample:' - print d1 + print('0-sample:') + print(d1) # ADC_offset - adc0 = d1 / 10 + adc0 = old_div(d1, 10) m.write_columns('adc_offset', adc0) # Re-sample d2 = m.read_row(100, True) - print 'Check lock:' - print d2 + print('Check lock:') + print(d2) # Set gains to servo at ~ 1000 FB / second target = -16300 @@ -85,14 +88,14 @@ def dt(self): m.write_columns('gaini', ([0]*8)+([1]*8)) # RC1 # m.write_columns('gaini', ([1]*8)+([0]*8)) - m.write_columns('adc_offset', adc0 - e_gain/10) + m.write_columns('adc_offset', adc0 - old_div(e_gain,10)) m.data_mode(1) m.servo_mode(3) m.init_flux() - d3 = m.read_row(100) / 4096 + d3 = old_div(m.read_row(100), 4096) chan = d3[:,8] - print chan + print(chan) plot(chan) show() diff --git a/test_suite/utils.py b/test_suite/utils.py index eb3f7cb..23e62e1 100644 --- a/test_suite/utils.py +++ b/test_suite/utils.py @@ -1,3 +1,5 @@ +from __future__ import division +from past.utils import old_div from numpy import * def logbin(f, y, bins=400): @@ -17,8 +19,8 @@ def logbin(f, y, bins=400): df = f[1] - f[0] f_max = f[-1] + df f_min = f[1] - N = log(f_max / f_min) - dN = N / bins + N = log(old_div(f_max, f_min)) + dN = old_div(N, bins) edges = f_min * exp(dN * arange(bins+1)) # Frequency counts for norming nf = histogram(f, bins=edges)[0] @@ -26,8 +28,8 @@ def logbin(f, y, bins=400): new_f = histogram(f, weights=f, bins=edges)[0] new_y = histogram(f, weights=abs(y)**2, bins=edges)[0] # Reduce - new_f = new_f[nf!=0] / nf[nf!=0] - new_y = sqrt(new_y[nf!=0]/nf[nf!=0]) + new_f = old_div(new_f[nf!=0], nf[nf!=0]) + new_y = sqrt(old_div(new_y[nf!=0],nf[nf!=0])) return new_f, new_y @@ -40,6 +42,6 @@ def spectrum(data, dt=1., rebin=False, axis=0): f = 1./dt * arange(nt)/nt if rebin: if y.ndim > 1: - raise ValueError, 'Can only rebin 1d transforms.' - return logbin(f[:nt/2], y[:nt/2]) + raise ValueError('Can only rebin 1d transforms.') + return logbin(f[:old_div(nt,2)], y[:old_div(nt,2)]) return f, y diff --git a/utilities/expresspost/expresspost.py b/utilities/expresspost/expresspost.py index af15ba2..6f6b03e 100755 --- a/utilities/expresspost/expresspost.py +++ b/utilities/expresspost/expresspost.py @@ -1,3 +1,5 @@ +from __future__ import print_function +from builtins import object #!/usr/bin/python import os, time @@ -5,7 +7,7 @@ import stat import optparse -class WatchSet: +class WatchSet(object): """Track a set of files matching a certain pattern.""" def __init__(self, base_dir, pattern, recursive = False, @@ -50,13 +52,13 @@ def FindNew(self, sources): def MarkProcessed(self, sources): self.processed = self.processed.union(sources) -class ListState: +class ListState(object): unknown = 0 open = 1 closed = 2 processed = 3 -class ArchiveList: +class ArchiveList(object): def __init__(self, filename, read_now = True): self.filename = filename self.path = os.path.dirname(filename) @@ -113,7 +115,7 @@ def FullPath(self, files=None): return [ self.path + '/' + f for f in files ] -class Rsyncer: +class Rsyncer(object): def __init__(self, dest, key=None): self.dest = dest self.key = key @@ -126,7 +128,7 @@ def Sync(self, sources, suffix, extra_permissions=None): err = os.spawnv(os.P_WAIT, '/usr/bin/rsync', args) if (err != 0): - print 'rsync didn\'t like: ', args + print('rsync didn\'t like: ', args) return # split dest into host and folder... @@ -136,7 +138,7 @@ def Sync(self, sources, suffix, extra_permissions=None): (self.key, host, extra_permissions, folder) err = os.spawnv(os.P_WAIT, '/usr/bin/ssh', argstr.split()) if (err != 0): - print 'ssh didn\'t like: ', argstr + print('ssh didn\'t like: ', argstr) def process_options(): @@ -152,7 +154,7 @@ def process_options(): (op, ar) = opts.parse_args() if op.source_dir is None or op.dest_location is None: - print 'Source directory or destination location not specified!' + print('Source directory or destination location not specified!') return None return op diff --git a/utilities/headsup/mheadsup/__init__.py b/utilities/headsup/mheadsup/__init__.py index c672d5e..f653588 100644 --- a/utilities/headsup/mheadsup/__init__.py +++ b/utilities/headsup/mheadsup/__init__.py @@ -1,12 +1,13 @@ +from __future__ import absolute_import __all__ = ['analyzer', 'constants', 'clients', 'gfx', 'geometries', 'nets', 'streams', 'plotters', 'util'] -import analyzer -import clients -import constants -import gfx -import geometries -import nets -import plotters -import streams -import util +from . import analyzer +from . import clients +from . import constants +from . import gfx +from . import geometries +from . import nets +from . import plotters +from . import streams +from . import util diff --git a/utilities/headsup/mheadsup/analyzer.py b/utilities/headsup/mheadsup/analyzer.py index b8514d5..5d50be3 100644 --- a/utilities/headsup/mheadsup/analyzer.py +++ b/utilities/headsup/mheadsup/analyzer.py @@ -1,11 +1,14 @@ """ Implement a data consumer using pylab. """ +from __future__ import print_function +from __future__ import absolute_import +from builtins import object import time -import clients, nets, util +from . import clients, nets, util -class x: #dataAnalyzer(clients.dataConsumer): +class x(object): #dataAnalyzer(clients.dataConsumer): def __init__(self, addr=None, name='pylab'): clients.dataConsumer.__init__(self, addr, name) self.config = {} @@ -17,7 +20,7 @@ def go(self, rate=10.): while self.connected: op, data = self.process() if op == 'ctrl': - print 'ctrl: updated_keys=', data + print('ctrl: updated_keys=', data) elif op == 'data': dshape = self.controls.get('data_shape', None) if dshape is not None: @@ -33,5 +36,5 @@ def go(self, rate=10.): pp = dataAnalyzer(opts.server) pp.go() - print 'disconnected.' + print('disconnected.') diff --git a/utilities/headsup/mheadsup/clients.py b/utilities/headsup/mheadsup/clients.py index 30a2a17..b645879 100644 --- a/utilities/headsup/mheadsup/clients.py +++ b/utilities/headsup/mheadsup/clients.py @@ -1,10 +1,13 @@ +from builtins import str +from builtins import range +from builtins import object from mheadsup import nets, streams, constants, util import time import numpy -class HeadsupClient: +class HeadsupClient(object): name = '' connected = False client_control_handler = None @@ -198,7 +201,7 @@ def update_info(self, new_info, replace=False, trigger_notify=True): self.info[0] = True self.info[1].update(new_info) if trigger_notify: - self.log('issuing notify' + ' '.join(self.info[1].keys())) + self.log('issuing notify' + ' '.join(list(self.info[1].keys()))) self.send_json(self.stream.name, {'info_update': self.info[1]}) def get_info(self): diff --git a/utilities/headsup/mheadsup/colormap.py b/utilities/headsup/mheadsup/colormap.py index d3deab5..72055c8 100644 --- a/utilities/headsup/mheadsup/colormap.py +++ b/utilities/headsup/mheadsup/colormap.py @@ -1,3 +1,8 @@ +from __future__ import division +from builtins import zip +from builtins import range +from builtins import object +from past.utils import old_div # # Round here, colors are always 4-tuples. R,G,B,alpha. And for the @@ -30,7 +35,7 @@ DEFAULT_COLORMAP_SIZE = 256 -class ColorMap: +class ColorMap(object): size = DEFAULT_COLORMAP_SIZE pivots = DEFAULT_COLORMAP_PIVOTS colors = None @@ -54,7 +59,7 @@ def make_colors(self, size=None): self.size = size if self.size is None: self.size = DEFAULT_COLORMAP_SIZE - speed = float(len(self.pivots)-1)/(self.size-1) + speed = old_div(float(len(self.pivots)-1),(self.size-1)) self.colors = [] for i in range(self.size-1): x = speed*i @@ -87,7 +92,7 @@ def decode(cls, data): @classmethod def get_builtin(cls, name, size=None, scale=None): if not name in COLORMAP_BUILTIN_DICT: - raise ValueError, "invalid built-in colormap builtin '%s'"%name + raise ValueError("invalid built-in colormap builtin '%s'"%name) return cls(pivots=COLORMAP_BUILTIN_DICT[name], size=size, scale=scale) diff --git a/utilities/headsup/mheadsup/encoders.py b/utilities/headsup/mheadsup/encoders.py index ad4f510..92cef85 100644 --- a/utilities/headsup/mheadsup/encoders.py +++ b/utilities/headsup/mheadsup/encoders.py @@ -1,3 +1,5 @@ +from builtins import map +from builtins import object import numpy as np """ @@ -8,7 +10,7 @@ def encode_array_row(data, cast): if data.ndim == 1: - return map(cast, data) + return list(map(cast, data)) return [encode_array_row(d, cast) for d in data] def encode_array(data): @@ -33,7 +35,7 @@ def decode_array(data): return np.array(data['data'], dtype=data['dtype']) -class arrayInfoEncoder: +class arrayInfoEncoder(object): """ Classlet for serializing simple classes to json-ready dictionaries. diff --git a/utilities/headsup/mheadsup/geometries.py b/utilities/headsup/mheadsup/geometries.py index 0915276..e653819 100644 --- a/utilities/headsup/mheadsup/geometries.py +++ b/utilities/headsup/mheadsup/geometries.py @@ -1,3 +1,8 @@ +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +from builtins import map +from builtins import range import os import numpy as np @@ -7,7 +12,7 @@ Also tracking of multiple layouts, encoding, decoding... """ -from encoders import arrayInfoEncoder as aie +from .encoders import arrayInfoEncoder as aie class pixelSetGeometry(aie): """ @@ -78,7 +83,7 @@ def read_ascii_columns(self, filename, 'names': str, } data = [] - for k in casts.keys(): + for k in list(casts.keys()): if k in columns: data.append((k, columns[k], casts[k], translators.get(k,{}), @@ -116,14 +121,14 @@ def from_ascii_columns(cls, filename, name='file_geom', @staticmethod def get_cp_list(filename): - from ConfigParser import ConfigParser + from configparser import ConfigParser cp = ConfigParser() cp.read(filename) return cp.sections() @classmethod def from_cp(cls, filename, section='geometry'): - from ConfigParser import ConfigParser + from configparser import ConfigParser cp = ConfigParser() cp.read(filename) name = cp.get(section, 'name') @@ -167,13 +172,13 @@ def get_trans_table(lines): # Rescale pixel coordinates? if cp.has_option(section, 'rescale'): - rescale = map(float, cp.get(section, 'rescale').split()) + rescale = list(map(float, cp.get(section, 'rescale').split())) for i in range(len(rescale)): self.coords[i] *= rescale[i] # Read the data shape, or set it to a reasonable thing if cp.has_option(section, 'shape'): - self.set_shape(map(int, cp.get(section, 'shape').split())) + self.set_shape(list(map(int, cp.get(section, 'shape').split()))) else: self.set_shape(len(self.coords[0])) # That it all diff --git a/utilities/headsup/mheadsup/gfx.py b/utilities/headsup/mheadsup/gfx.py index 4214e53..1c14a42 100644 --- a/utilities/headsup/mheadsup/gfx.py +++ b/utilities/headsup/mheadsup/gfx.py @@ -1,3 +1,5 @@ +from __future__ import division +from past.utils import old_div import numpy as np def grid_coords(nrow, ncol): @@ -10,8 +12,8 @@ def grid_coords(nrow, ncol): def circle_coords(nrow, ncol, spacing=1.4): N = nrow*ncol - R = (N/np.pi)**.5 - a = np.arange(float(N))/N + R = (old_div(N,np.pi))**.5 + a = old_div(np.arange(float(N)),N) r = a**.5 * R R, PHI = spacing * (np.floor(r)+1), (r - np.floor(r))*2*np.pi return R*np.cos(PHI), R*np.sin(PHI) diff --git a/utilities/headsup/mheadsup/masks.py b/utilities/headsup/mheadsup/masks.py index 0377b54..69e8792 100644 --- a/utilities/headsup/mheadsup/masks.py +++ b/utilities/headsup/mheadsup/masks.py @@ -1,10 +1,13 @@ +from __future__ import print_function +from __future__ import absolute_import +from builtins import range import numpy as np """ Masks. An enum the same shape as the data. """ -from encoders import arrayInfoEncoder as aie +from .encoders import arrayInfoEncoder as aie class pixelMask(aie): """ @@ -28,7 +31,7 @@ def get_mask(self, state): if state in self.states: state = self.states.index(state) else: - print 'unknown state "%s"' % state + print('unknown state "%s"' % state) return np.zeros(self.data.shape, 'bool') return self.data == state @@ -69,12 +72,12 @@ def load_ascii(cls, filename): i, val = [int(w.pop(0)) for i in range(2)] data[:,i] = val else: - print 'unknown command %s on line %i of %s' % \ - (cmd,line_num,filename) + print('unknown command %s on line %i of %s' % \ + (cmd,line_num,filename)) continue if len(w) > 0 and w[0][0] != '#': - print 'stray stuff on line %i of %s' % \ - (line_num,filename) + print('stray stuff on line %i of %s' % \ + (line_num,filename)) # Enough states? i_max = data.max() while len(states) <= i_max: diff --git a/utilities/headsup/mheadsup/nets.py b/utilities/headsup/mheadsup/nets.py index e04e24a..f8e3371 100644 --- a/utilities/headsup/mheadsup/nets.py +++ b/utilities/headsup/mheadsup/nets.py @@ -1,3 +1,6 @@ +from __future__ import print_function +from past.builtins import basestring +from builtins import object import socket import array import errno @@ -100,8 +103,8 @@ def decode_address(addr): # a JSON structure and a binary block. # -class packetFormatV1: - class addressBlock: +class packetFormatV1(object): + class addressBlock(object): def __init__(self, type='', name='', source='', dest=''): self.type = type self.name = name @@ -111,7 +114,7 @@ def __init__(self, type='', name='', source='', dest=''): def decode(cls, packet): words = packet.split('\x00') if len(words) != 5: - print 'fail address words' + print('fail address words') return None t, n, s, d = words[:4] self = cls(t,n,s,d) @@ -119,15 +122,15 @@ def decode(cls, packet): def encode(self): t, n, s, d = self.type, self.name, self.source, self.dest return ''.join([x+'\x00' for x in [t,n,s,d]]) - class payloadBlock: + class payloadBlock(object): @classmethod def decode(cls, packet): if len(packet) < 8: - print 'payload header' + print('payload header') return None n1, n2 = array.array('i', packet[:8]) if len(packet) != n1 + n2 + 8: - print 'payload size' + print('payload size') return None d1, d2 = packet[8:8+n1], packet[8+n1:8+n1+n2] self = cls() @@ -177,13 +180,13 @@ def decode_packet(cls, data, dahi_header=False): # Pre-amble if dahi_header: if len(data) < 8: - print 'header fail' + print('header fail') return False, None, None code, size = data[:4], array.array('i', data[4:8])[0] data = data[8:] # Validate... if len(data) < 8: - print 'no data' + print('no data') return False, None, None # Addressing addr_len = array.array('i', data[0:4])[0] diff --git a/utilities/headsup/mheadsup/plotters.py b/utilities/headsup/mheadsup/plotters.py index dcbd17a..0907d82 100644 --- a/utilities/headsup/mheadsup/plotters.py +++ b/utilities/headsup/mheadsup/plotters.py @@ -1,7 +1,14 @@ +from __future__ import division +from __future__ import print_function +from __future__ import absolute_import +from builtins import str +from builtins import range +from builtins import object +from past.utils import old_div import json import time -import clients +from . import clients import numpy as np display_defaults = { @@ -59,9 +66,9 @@ def _get_scale(self, data, update_texts=True): def _norm_data(self, data, black, white, mask=None, mask_val=None): if mask is not None and mask.sum() > 0: if mask_val is None: - mask_val = (black+white)/2 + mask_val = old_div((black+white),2) data[~mask] = mask_val - data = (data - black) / (white-black) + data = old_div((data - black), (white-black)) data[data<0] = 0 data[data>1] = 1 return data @@ -81,7 +88,7 @@ def update_texts(self, redraw=None): # # Needs a rewrite... -class displayController: #(clients.dataProducer, dict): +class displayController(object): #(clients.dataProducer, dict): """ This class can be used to control a plot window from a client of some kind. It exposes high-level methods that send display @@ -147,7 +154,7 @@ def mask_area(self, row=None, col=None, shape=None, unmask=False): if shape is not None: mask = np.zeros(shape, 'bool') else: - print 'Set mask shape first.' + print('Set mask shape first.') return False else: mask = np.asarray(self['mask'], 'bool').transpose() @@ -184,12 +191,12 @@ def restore(self, filename, update=False): self.clear() d = json.loads(open(filename).read()) self.update(d) - self.post_some(d.keys()) + self.post_some(list(d.keys())) def get_controls(self, timeout=2.): self.set_client_var('poll_controls', 1) dt = .1 - for i in range(min(timeout/dt, 1)): + for i in range(min(old_div(timeout,dt), 1)): op, data = self.process() if op == 'ctrl': break @@ -211,7 +218,7 @@ def watch_controls(self, enable=None, loop=False): # Assistance for plotters # -class textItem: +class textItem(object): def __init__(self, name, label, text=None): self.name = name self.label = label @@ -244,7 +251,7 @@ def set_text(self, name, text): # # -class dataScaleProps: +class dataScaleProps(object): z_offset = None z_range = (None, None) @@ -308,7 +315,7 @@ def rescale_data(self, data, mask=None, scale_hi = scale_lo + 1 self.last_limits = (scale_lo, scale_hi) # Transform - data = (data-scale_lo)/(scale_hi-scale_lo) + data = old_div((data-scale_lo),(scale_hi-scale_lo)) if clip_vals is None: clip_vals = (0,1) data[data<0] = clip_vals[0] diff --git a/utilities/headsup/mheadsup/qtgfx.py b/utilities/headsup/mheadsup/qtgfx.py index 826a1e5..1f783bd 100644 --- a/utilities/headsup/mheadsup/qtgfx.py +++ b/utilities/headsup/mheadsup/qtgfx.py @@ -1,3 +1,9 @@ +from __future__ import division +from builtins import zip +from builtins import range +from past.builtins import basestring +from past.utils import old_div +from builtins import object from PyQt4 import QtCore, QtGui import numpy as np @@ -95,7 +101,7 @@ def fromTextItemList(self, pltexts): self.add_item(t.name, t.label) -class mutexHolder: +class mutexHolder(object): def __init__(self, mutex): self.mutex = mutex def __enter__(self): @@ -247,13 +253,13 @@ def is_scalar(x): # Create the blips if mask is None: self.data_mask = None - indices = range(len(x)) + indices = list(range(len(x))) else: self.data_mask = mask indices = mask.nonzero()[0] for i in indices: con = default_shapes.get(form[i*form_mul]) - item = con(-w/2, -h/2, w, h) + item = con(old_div(-w,2), old_div(-h,2), w, h) item.setRotation(rotation[i*rotation_mul]) item.setPos(x[i], -y[i]) item.setPen(self.blip_pen) @@ -360,7 +366,7 @@ def animateMove(self, new_x=None, new_y=None, t=1.): self._anim_data['timer'] = timer timer.timeout.connect(self._anim) x0,x1,y0,y1 = new_x.min()-2, new_x.max()+2, new_y.min()-2, new_y.max()-2 - timer.start(t / self._anim_data['n_step']) + timer.start(old_div(t, self._anim_data['n_step'])) def _anim(self): ad = self._anim_data @@ -377,7 +383,7 @@ def _anim(self): self.scene().views()[0].rebound() -class BlipColorPalette: +class BlipColorPalette(object): # Manages a multi-color brush set; e.g. 'red' and 'blue' colormaps # from 0 to 255. def __init__(self, resolution=256, scale=255.): diff --git a/utilities/headsup/mheadsup/streams.py b/utilities/headsup/mheadsup/streams.py index da8fbf8..c00dacc 100644 --- a/utilities/headsup/mheadsup/streams.py +++ b/utilities/headsup/mheadsup/streams.py @@ -1,9 +1,14 @@ +from __future__ import print_function +from __future__ import absolute_import +from builtins import zip +from builtins import str +from builtins import object from mheadsup import constants -import util +from . import util import numpy -class HeadsupStream: +class HeadsupStream(object): props = ['name', 'provider', 'fun_name', 'local_provider', 'properties'] def __init__(self, **kwargs): @@ -41,7 +46,7 @@ def add_stream(self, stream): self.stream_hash[name] = stream self.streams.append(stream) def remove_stream(self, stream=None, name=None): - print 'remove ', stream, name + print('remove ', stream, name) if stream is None: stream = self.stream_hash.pop(name) else: @@ -79,9 +84,9 @@ def parse(self, data): for stream in msg['stream_list']: name = stream['name'] self.streams[name] = HeadsupStream(**stream) - print "streams are now: " + print("streams are now: ") for s in self.streams: - print s['name'], s['fun_name'] + print(s['name'], s['fun_name']) # @@ -92,7 +97,7 @@ def parse(self, data): # They should expose the interface defined in _MessageHandler # -class _MessageHandler: +class _MessageHandler(object): def handle(self, addr, data): """ Accepts a decoded address and data objects. Determines @@ -107,7 +112,7 @@ def handle(self, addr, data): return False, addr, data -class ServerMessageHandler: +class ServerMessageHandler(object): """ Handler for clients that processes messages from the server. Includes initial connection negotiation, termination requests, @@ -134,11 +139,11 @@ def handle(self, addr, data): self.status = 'server_close' else: self.weirds += 1 - print 'unhandled client control message' + print('unhandled client control message') return True, addr, data -class StreamListStreamHandler: +class StreamListStreamHandler(object): """ Maintain a list of streams available on a given server. """ @@ -155,7 +160,7 @@ def handle(self, addr, data): current_active = [False for s in self.streams] for stream in msg['stream_list']: stream = HeadsupStream(**dict([(str(k), v) - for k,v in stream.items()])) + for k,v in list(stream.items())])) if stream.name in current_names: i = current_names.index(stream.name) @@ -171,7 +176,7 @@ def handle(self, addr, data): return True, addr, data -class DataHandler: +class DataHandler(object): def __init__(self, stream_name=None): self.frames = [] self.stream_name = stream_name @@ -201,7 +206,7 @@ def handle(self, addr, data): return True, addr, data -class DataSourceControlHandler: +class DataSourceControlHandler(object): def __init__(self, stream_name): self.stream_name = stream_name self.do_update = False diff --git a/utilities/headsup/mheadsup/util.py b/utilities/headsup/mheadsup/util.py index 688166e..5767a9c 100644 --- a/utilities/headsup/mheadsup/util.py +++ b/utilities/headsup/mheadsup/util.py @@ -1,3 +1,11 @@ +from __future__ import print_function +from future import standard_library +standard_library.install_aliases() +from builtins import str +from builtins import zip +from builtins import range +from past.builtins import basestring +from builtins import object import os, sys, time import numpy as np import subprocess @@ -33,7 +41,7 @@ _defaults = defaults -import ConfigParser as cp +import configparser as cp class MainConfig(cp.ConfigParser): filename = None @@ -46,7 +54,7 @@ def __init__(self, filename=None): self.filename = _defaults['config_file'] if self.filename is not None: if not os.path.exists(self.filename): - print 'Config file %s not found' % self.filename + print('Config file %s not found' % self.filename) else: self.read(self.filename) @@ -63,7 +71,7 @@ def get_server_config(self, key=None): key = self.get('Servers', 'default_server') server_list = [x.split() for x in self.get('Servers', 'server_list').split('\n')] - keys, filenames = zip(*[w for w in server_list if len(w) != 0]) + keys, filenames = list(zip(*[w for w in server_list if len(w) != 0])) if not key in keys: return 'Unknown server_profile %s' % key filename = filenames[keys.index(key)] @@ -205,7 +213,7 @@ def load_columns(fin, cols=None, skip=0): -class logger: +class logger(object): def __init__(self, default_priority=0, verbosity=None, prefix=''): if verbosity is None: