code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
# Start with a page object for unknown entries if the HAR data has
# any entries with no page ID
pages = []
if any('pageref' not in entry for entry in self.har_data['entries']):
pages.append(HarPage('unknown', har_parser=self))
for har_page in self.har_data['pages']:
page = HarPage(har_page['id'], har_parser=self)
pages.append(page)
return pages
|
def pages(self)
|
This is a list of HarPage objects, each of which represents a page
from the HAR file.
| 5.474901 | 4.593095 | 1.191985 |
if asset_type == 'page':
assets = self.entries
else:
assets = getattr(self, '{0}_files'.format(asset_type), None)
return self.get_total_size_trans(assets)
|
def _get_asset_size_trans(self, asset_type)
|
Helper function to dynamically create *_size properties.
| 4.904286 | 4.843109 | 1.012632 |
if asset_type == 'page':
assets = self.entries
else:
assets = getattr(self, '{0}_files'.format(asset_type), None)
return self.get_total_size(assets)
|
def _get_asset_size(self, asset_type)
|
Helper function to dynamically create *_size properties.
| 4.806556 | 4.623459 | 1.039602 |
if asset_type == 'initial':
return self.actual_page['time']
elif asset_type == 'content':
return self.pageTimings['onContentLoad']
elif asset_type == 'page':
if self.page_id == 'unknown':
return None
return self.pageTimings['onLoad']
# TODO - should we return a slightly fake total load time to
# accomodate HAR data that cannot understand things like JS
# rendering or just throw a warning?
#return self.get_load_time(request_type='.*',content_type='.*', status_code='.*', asynchronous=False)
else:
return self.get_load_time(
content_type=self.asset_types[asset_type]
)
|
def _get_asset_load(self, asset_type)
|
Helper function to dynamically create *_load_time properties. Return
value is in ms.
| 7.457193 | 7.069923 | 1.054777 |
results = []
for entry in self.entries:
valid_entry = True
p = self.parser
if request_type is not None and not p.match_request_type(
entry, request_type, regex=regex):
valid_entry = False
if content_type is not None:
if not self.parser.match_content_type(entry, content_type,
regex=regex):
valid_entry = False
if status_code is not None and not p.match_status_code(
entry, status_code, regex=regex):
valid_entry = False
if http_version is not None and not p.match_http_version(
entry, http_version, regex=regex):
valid_entry = False
if valid_entry:
results.append(entry)
return results
|
def filter_entries(self, request_type=None, content_type=None,
status_code=None, http_version=None, regex=True)
|
Returns a ``list`` of entry objects based on the filter criteria.
:param request_type: ``str`` of request type (i.e. - GET or POST)
:param content_type: ``str`` of regex to use for finding content type
:param status_code: ``int`` of the desired status code
:param http_version: ``str`` of HTTP version of request
:param regex: ``bool`` indicating whether to use regex or exact match.
| 1.736422 | 1.826858 | 0.950496 |
entries = self.filter_entries(
request_type=request_type, content_type=content_type,
status_code=status_code
)
if "async" in kwargs:
asynchronous = kwargs['async']
if not asynchronous:
time = 0
for entry in entries:
time += entry['time']
return time
else:
return len(self.parser.create_asset_timeline(entries))
|
def get_load_time(self, request_type=None, content_type=None,
status_code=None, asynchronous=True, **kwargs)
|
This method can return the TOTAL load time for the assets or the ACTUAL
load time, the difference being that the actual load time takes
asynchronous transactions into account. So, if you want the total load
time, set asynchronous=False.
EXAMPLE:
I want to know the load time for images on a page that has two images,
each of which took 2 seconds to download, but the browser downloaded
them at the same time.
self.get_load_time(content_types=['image']) (returns 2)
self.get_load_time(content_types=['image'], asynchronous=False) (returns 4)
| 3.65544 | 4.057529 | 0.900903 |
size = 0
for entry in entries:
if entry['response']['bodySize'] > 0:
size += entry['response']['bodySize']
return size
|
def get_total_size(self, entries)
|
Returns the total size of a collection of entries.
:param entries: ``list`` of entries to calculate the total size of.
| 3.556794 | 3.945307 | 0.901525 |
size = 0
for entry in entries:
if entry['response']['_transferSize'] > 0:
size += entry['response']['_transferSize']
return size
|
def get_total_size_trans(self, entries)
|
Returns the total size of a collection of entries - transferred.
NOTE: use with har file generated with chrome-har-capturer
:param entries: ``list`` of entries to calculate the total size of.
| 3.744767 | 3.46745 | 1.079977 |
# The unknown page is just a placeholder for entries with no page ID.
# As such, it would not have a TTFB
if self.page_id == 'unknown':
return None
ttfb = 0
for entry in self.entries:
if entry['response']['status'] == 200:
for k, v in iteritems(entry['timings']):
if k != 'receive':
if v > 0:
ttfb += v
break
else:
ttfb += entry['time']
return ttfb
|
def time_to_first_byte(self)
|
Time to first byte of the page request in ms
| 5.157498 | 4.894196 | 1.053799 |
fields = self._expand_group_by_fields(self.model, fields)
return self._clone(klass=GroupByQuerySet, setup=True, _fields=fields)
|
def group_by(self, *fields)
|
Clone the queryset using GroupByQuerySet.
:param fields:
:return:
| 6.37343 | 6.223231 | 1.024135 |
d = {}
# Iterate all keys and values
for k, v in self._row_values.items():
# Split related model fields
attrs = k.rsplit('__', 1)
# Set value depending case
if len(attrs) == 2:
# Related model field, store nested
fk, fn = attrs
if fk not in d:
d[fk] = {}
d[fk][fn] = v
else:
# Own model field, store directly
d[k] = v
# Return (+cache) data
return d
|
def _data(self)
|
Cached data built from instance raw _values as a dictionary.
| 6.108637 | 5.475259 | 1.11568 |
# Iterate all keys and values in data
for k, v in self._data.items():
# If it's a dict, process it (it's probably instance data)
if isinstance(v, dict):
try:
# Get related model from field (follow path)
rel_model = self._model
for attr in k.split('__'):
rel_model = getattr(rel_model, attr).field.related_model
except AttributeError:
# Not a model, maybe it is a dict field (?)
pass
else:
# Model, first shorten field name
k = k.replace('__', '_')
# Now init instance if required (not if we got ID None)
if 'id' in v and v['id'] is None:
# This means we grouped by ID, if it's none then FK is None
v = None
else:
# Either we have ID or we didn't group by ID, use instance
v = rel_model(**v)
# Set value
setattr(self, k, v)
|
def _set_values(self)
|
Populate instance with given.
| 6.66587 | 6.472426 | 1.029887 |
fields = self._expand_group_by_fields(self.model, fields)
clone = self._values(*fields)
clone._iterable_class = GroupByIterable
return clone
|
def group_by(self, *fields)
|
Clone the queryset using GroupByQuerySet.
:param fields:
:return:
| 8.639045 | 7.389331 | 1.169124 |
# Containers for resulting fields and related model fields
res = []
related = {}
# Add own fields and populate related fields
for field_name in fields:
if '__' in field_name:
# Related model field: append to related model's fields
fk_field_name, related_field = field_name.split('__', 1)
if fk_field_name not in related:
related[fk_field_name] = [related_field]
else:
related[fk_field_name].append(related_field)
else:
# Simple field, get the field instance
model_field = model._meta.get_field(field_name)
if isinstance(model_field, (ForeignKey, ManyToManyField)):
# It's a related field, get model
related_model = model_field.related_model
# Append all its fields with the correct prefix
res.extend('{}__{}'.format(field_name, f.column)
for f in related_model._meta.fields)
else:
# It's a common field, just append it
res.append(field_name)
# Resolve all related fields
for fk_field_name, field_names in related.items():
# Get field
fk = model._meta.get_field(fk_field_name)
# Get all fields for that related model
related_fields = cls._expand_group_by_fields(fk.related_model,
field_names)
# Append them with the correct prefix
res.extend('{}__{}'.format(fk_field_name, f) for f in related_fields)
# Return all fields
return res
|
def _expand_group_by_fields(cls, model, fields)
|
Expand FK fields into all related object's fields to avoid future
lookups.
:param fields: fields to "group by"
:return: expanded fields
| 2.690863 | 2.761378 | 0.974464 |
'''Write to dummy memory
Parameters
----------
addr : int
The register address.
data : list, tuple
Data (byte array) to be written.
Returns
-------
nothing
'''
logger.debug(
"Dummy SiTransferLayer.write addr: %s data: %s" % (hex(addr), data))
for curr_addr, d in enumerate(data, start=addr):
self.mem[curr_addr] = array.array('B', [d])[0]
|
def write(self, addr, data)
|
Write to dummy memory
Parameters
----------
addr : int
The register address.
data : list, tuple
Data (byte array) to be written.
Returns
-------
nothing
| 6.007807 | 3.748829 | 1.602582 |
'''
Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 for each byte if it hasn't been written to.
'''
logger.debug("Dummy SiTransferLayer.read addr: %s size: %s" % (hex(addr), size))
return array.array('B', [self.mem[curr_addr] if curr_addr in self.mem else 0 for curr_addr in range(addr, addr + size)])
|
def read(self, addr, size)
|
Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 for each byte if it hasn't been written to.
| 5.33501 | 2.650667 | 2.012704 |
def set_value(self, value, addr, size, offset, **kwargs):
'''Writing a value of any arbitrary size (max. unsigned int 64) and offset to a register
Parameters
----------
value : int, str
The register value (int, long, bit string) to be written.
addr : int
The register address.
size : int
Bit size/length of the value to be written to the register.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
nothing
'''
div_offset, mod_offset = divmod(offset, 8)
div_size, mod_size = divmod(size + mod_offset, 8)
if mod_size:
div_size += 1
if mod_offset == 0 and mod_size == 0:
reg = BitLogic.from_value(0, size=div_size * 8)
else:
ret = self._intf.read(self._base_addr + addr + div_offset, size=div_size)
reg = BitLogic()
reg.frombytes(tobytes(ret))
reg[size + mod_offset - 1:mod_offset] = value
self._intf.write(self._base_addr + addr + div_offset, data=array.array('B', reg.tobytes()))
|
Writing a value of any arbitrary size (max. unsigned int 64) and offset to a register
Parameters
----------
value : int, str
The register value (int, long, bit string) to be written.
addr : int
The register address.
size : int
Bit size/length of the value to be written to the register.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
nothing
| null | null | null |
|
def get_value(self, addr, size, offset, **kwargs):
'''Reading a value of any arbitrary size (max. unsigned int 64) and offset from a register
Parameters
----------
addr : int
The register address.
size : int
Bit size/length of the value.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
reg : int
Register value.
'''
div_offset, mod_offset = divmod(offset, 8)
div_size, mod_size = divmod(size + mod_offset, 8)
if mod_size:
div_size += 1
ret = self._intf.read(self._base_addr + addr + div_offset, size=div_size)
reg = BitLogic()
reg.frombytes(tobytes(ret))
return reg[size + mod_offset - 1:mod_offset].tovalue()
|
Reading a value of any arbitrary size (max. unsigned int 64) and offset from a register
Parameters
----------
addr : int
The register address.
size : int
Bit size/length of the value.
offset : int
Offset of the value to be written to the register (in number of bits).
Returns
-------
reg : int
Register value.
| null | null | null |
|
def set_bytes(self, data, addr, **kwargs):
'''Writing bytes of any arbitrary size
Parameters
----------
data : iterable
The data (byte array) to be written.
addr : int
The register address.
Returns
-------
nothing
'''
self._intf.write(self._conf['base_addr'] + addr, data)
|
Writing bytes of any arbitrary size
Parameters
----------
data : iterable
The data (byte array) to be written.
addr : int
The register address.
Returns
-------
nothing
| null | null | null |
|
def get_bytes(self, addr, size, **kwargs):
'''Reading bytes of any arbitrary size
Parameters
----------.
addr : int
The register address.
size : int
Byte length of the value.
Returns
-------
data : iterable
Byte array.
'''
return self._intf.read(self._conf['base_addr'] + addr, size)
|
Reading bytes of any arbitrary size
Parameters
----------.
addr : int
The register address.
size : int
Byte length of the value.
Returns
-------
data : iterable
Byte array.
| null | null | null |
|
if size is None:
self._drv.set_data(self.tobytes())
else:
self._drv.set_data(self.tobytes()[:size])
if "auto_start" in self._conf:
if self._conf["auto_start"]:
self._drv.start()
|
def write(self, size=None)
|
to call start() automatically, set yaml file as follows:
registers:
- name : CCPD_PCB
type : StdRegister
hw_driver : CCPD_PCB_SPI
size : 32
auto_start : True <------ add this
fields: ......
| 3.687523 | 3.654974 | 1.008905 |
'''
Factory method
For format characters see: https://docs.python.org/2/library/struct.html
'''
bl = cls(**kwargs) # size is 0 by default
bl.fromvalue(value=value, size=size, fmt=fmt)
return bl
|
def from_value(cls, value, size=None, fmt='Q', **kwargs)
|
Factory method
For format characters see: https://docs.python.org/2/library/struct.html
| 7.115271 | 4.795238 | 1.48382 |
'''
Append from a int/long number.
'''
if size and value.bit_length() > size:
raise TypeError('Value is too big for given size')
self.frombytes(struct.pack(fmt, value))
if size:
if not isinstance(size, integer_types) or not size > 0:
raise TypeError('Size must be greater than zero')
if size > self.length():
bitarray.extend(self, (size - self.length()) * [0])
else:
bitarray.__delitem__(self, slice(size, self.length()))
|
def fromvalue(self, value, size=None, fmt='Q')
|
Append from a int/long number.
| 4.033095 | 3.409796 | 1.182797 |
'''
Convert bitstring to a int/long number.
'''
format_size = struct.calcsize(fmt)
if self.length() > format_size * 8:
raise TypeError('Cannot convert to number')
ba = self.copy()
ba.extend((format_size * 8 - self.length()) * [0])
return struct.unpack_from(fmt, ba.tobytes())[0]
|
def tovalue(self, fmt='Q')
|
Convert bitstring to a int/long number.
| 4.767033 | 3.486871 | 1.367138 |
'''Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
'''
try:
start = slc.start
stop = slc.stop
slc_step = slc.step
except AttributeError:
if make_slice:
if slc < 0:
slc += self.length()
return slice(slc, slc + 1)
else:
return slc
else:
if not start and start != 0:
slc_stop = self.length()
elif start < 0:
slc_stop = self.length() + start + 1
else:
slc_stop = start + 1
if not stop and stop != 0:
slc_start = 0
elif stop < 0:
slc_start = self.length() + stop
else:
slc_start = stop
return slice(slc_start, slc_stop, slc_step)
|
def _swap_slice_indices(self, slc, make_slice=False)
|
Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
| 2.299333 | 1.895222 | 1.213226 |
# Write the sequence to the sequence generator (hw driver)
self['SEQ'].write(size) #write pattern to memory
self['SEQ'].set_size(size) # set size
self['SEQ'].set_repeat(1) # set repeat
for _ in range(1):
self['SEQ'].start() # start
while not self['SEQ'].get_done():
#time.sleep(0.1)
print("Wait for done...")
|
def _run_seq(self, size)
|
Send the contents of self['SEQ'] to the chip and wait until it finishes.
| 7.548768 | 6.079092 | 1.241759 |
#reset some stuff
self['SEQ']['GLOBAL_SHIFT_EN'].setall(False)
self['SEQ']['GLOBAL_CTR_LD'].setall(False)
self['SEQ']['GLOBAL_DAC_LD'].setall(False)
self['SEQ']['PIXEL_SHIFT_EN'].setall(False)
self['SEQ']['INJECTION'].setall(False)
|
def _clear_strobes(self)
|
Resets the "enable" and "load" output streams to all 0.
| 6.388837 | 6.06926 | 1.052655 |
'''
Sets data for outgoing stream
'''
if self._mem_bytes < len(data):
raise ValueError('Size of data (%d bytes) is too big for memory (%d bytes)' % (len(data), self._mem_bytes))
self._intf.write(self._conf['base_addr'] + self._spi_mem_offset + addr, data)
|
def set_data(self, data, addr=0)
|
Sets data for outgoing stream
| 6.572054 | 5.298197 | 1.240432 |
'''
Gets data for incoming stream
'''
# readback memory offset
if addr is None:
addr = self._mem_bytes
if size and self._mem_bytes < size:
raise ValueError('Size is too big')
if size is None:
return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, self._mem_bytes)
else:
return self._intf.read(self._conf['base_addr'] + self._spi_mem_offset + addr, size)
|
def get_data(self, size=None, addr=None)
|
Gets data for incoming stream
| 5.00161 | 4.383761 | 1.14094 |
'''
Initialize serial device.
Parameters of serial.Serial: http://pyserial.sourceforge.net/pyserial_api.html
Plus termination string parameter eol
'''
super(Serial, self).init()
self.read_termination = self._init.get('read_termination', None)
self.write_termination = self._init.get('write_termination', self.read_termination)
try:
self.read_termination = bytes(self.read_termination, 'utf-8')
self.write_termination = bytes(self.write_termination, 'utf-8')
except TypeError as e:
logger.debug(e)
self.timeout = self._init.get('timeout', None) # timeout of 0 returns immediately
self._port = serial.Serial(**{key: value for key, value in self._init.items() if key not in ("read_termination", "write_termination")})
|
def init(self)
|
Initialize serial device.
Parameters of serial.Serial: http://pyserial.sourceforge.net/pyserial_api.html
Plus termination string parameter eol
| 3.443893 | 2.27935 | 1.51091 |
'''Initialize ADC
'''
self._intf.write(self._base_addr + self.MAX11644_ADD, array('B', pack('B', flags | self.MAX11644_SETUP)))
|
def _setup_adc(self, flags)
|
Initialize ADC
| 12.430573 | 12.524244 | 0.992521 |
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_GPAC_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_GPAC_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_GPAC_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_GPAC_FORMAT, ch_data)
self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip()
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['min'] = values[2]
self._ch_cal[channel]['max'] = values[3]
self._ch_cal[channel]['ADCI']['gain'] = -values[4] # fix gain sign
self._ch_cal[channel]['ADCI']['offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DAC']['gain'] = values[8]
self._ch_cal[channel]['DAC']['offset'] = values[9]
self._ch_cal[channel]['limit'] = values[10]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header)
|
def read_eeprom_calibration(self): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for sources and regulators
'''
header = self.get_format()
if header == self.HEADER_GPAC
|
Reading EEPROM calibration for sources and regulators
| 2.354744 | 2.345181 | 1.004078 |
'''Reading voltage
'''
adc_ch = self._ch_map[channel]['ADCV']['adc_ch']
address = self._ch_map[channel]['ADCV']['address']
raw = self._get_adc_value(address=address)[adc_ch]
dac_offset = self._ch_cal[channel]['ADCV']['offset']
dac_gain = self._ch_cal[channel]['ADCV']['gain']
voltage = ((raw - dac_offset) / dac_gain)
if unit == 'raw':
return raw
elif unit == 'V':
return voltage / 1000
elif unit == 'mV':
return voltage
else:
raise TypeError("Invalid unit type.")
|
def get_voltage(self, channel, unit='V')
|
Reading voltage
| 3.458099 | 3.384417 | 1.021771 |
'''Reading current
'''
values = self._get_adc_value(address=self._ch_map[channel]['ADCI']['address'])
raw = values[self._ch_map[channel]['ADCI']['adc_ch']]
dac_offset = self._ch_cal[channel]['ADCI']['offset']
dac_gain = self._ch_cal[channel]['ADCI']['gain']
if 'PWR' in channel:
current = ((raw - dac_offset) / dac_gain)
if unit == 'raw':
return raw
elif unit == 'A':
return current / 1000
elif unit == 'mA':
return current
elif unit == 'uA':
return current * 1000
else:
raise TypeError("Invalid unit type.")
else:
voltage = values[self._ch_map[channel]['ADCV']['adc_ch']]
current = (((raw - voltage) - dac_offset) / dac_gain)
if unit == 'raw':
return raw
elif unit == 'A':
return current / 1000000
elif unit == 'mA':
return current / 1000
elif unit == 'uA':
return current
else:
raise TypeError("Invalid unit type.")
|
def get_current(self, channel, unit='A')
|
Reading current
| 2.566535 | 2.518448 | 1.019094 |
'''Enable/Disable output of power channel
'''
try:
bit = self._ch_map[channel]['GPIOEN']['bit']
except KeyError:
raise ValueError('set_enable() not supported for channel %s' % channel)
self._set_power_gpio_value(bit=bit, value=value)
|
def set_enable(self, channel, value)
|
Enable/Disable output of power channel
| 8.119631 | 6.748576 | 1.203162 |
'''Reading over current status of power channel
'''
try:
bit = self._ch_map[channel]['GPIOOC']['bit']
except KeyError:
raise ValueError('get_over_current() not supported for channel %s' % channel)
return not self._get_power_gpio_value(bit)
|
def get_over_current(self, channel)
|
Reading over current status of power channel
| 10.176256 | 7.92554 | 1.283983 |
'''Setting current limit
Note: same limit for all channels.
'''
# TODO: add units / calibration
if unit == 'raw':
value = value
elif unit == 'A':
value = int(value * 1000 * self.CURRENT_LIMIT_GAIN)
elif unit == 'mA':
value = int(value * self.CURRENT_LIMIT_GAIN)
elif unit == 'uA':
value = int(value / 1000 * self.CURRENT_LIMIT_GAIN)
else:
raise TypeError("Invalid unit type.")
I2cAnalogChannel._set_dac_value(self, address=self.CURRENT_LIMIT_DAC_SLAVE_ADD, dac_ch=self.CURRENT_LIMIT_DAC_CH, value=value)
|
def set_current_limit(self, channel, value, unit='A')
|
Setting current limit
Note: same limit for all channels.
| 4.348742 | 3.865375 | 1.12505 |
'''Setting current of current source
'''
dac_offset = self._ch_cal[channel]['DAC']['offset']
dac_gain = self._ch_cal[channel]['DAC']['gain']
if unit == 'raw':
value = value
elif unit == 'A':
value = int((-value * 1000000 - dac_offset) / dac_gain) # fix sign of output
elif unit == 'mA':
value = int((-value * 1000 - dac_offset) / dac_gain) # fix sign of output
elif unit == 'uA':
value = int((-value - dac_offset) / dac_gain) # fix sign of output
else:
raise TypeError("Invalid unit type.")
self._set_dac_value(channel=channel, value=value)
|
def set_current(self, channel, value, unit='A')
|
Setting current of current source
| 2.97823 | 2.824954 | 1.054258 |
q_d_f = 0
'''
fout = fref * (p_total / q_total) * (1 / div)
p_total = 2 * ((p_counter + 4) + p_0) [16..1023]
q_total = q_counter + 2 [2..129]
div = [2,(3),4..127]
constraints:
f_ref * p_total / q_total = [100..400] MHz
f_ref / q_total > 0.25 MHz
'''
for self.q_counter in range(128):
self.q_total = self.q_counter + 2
if (self.fref / self.q_total) < 0.25: # PLL constraint
break
for self.div in range(2, 128):
q_d_f = self.q_total * self.div * fout
if isinstance(q_d_f, six.integer_types) and q_d_f > (15 * self.fref): # = f0 * p
if int(q_d_f) % int(self.fref) == 0: # p, q, and d found
self.p_total = q_d_f / self.fref
while self.p_total <= 16: # counter constraint
self.p_total = self.p_total * 2
self.div = self.div * 2
if self.div > 127:
break
if self.p_total > 1023:
break
if ((self.fref * self.p_total / self.q_total) < 100 or (self.fref * self.p_total / self.q_total) > 400): # PLL constraint
break
if int(self.p_total) % 2 == 0:
self.p_0 = 0
else:
self.p_0 = 1
self.p_counter = ((int(self.p_total) - self.p_0) / 2) - 4 # set p counter value
if self.div == 2:
self.clk1SRC = 0x02
self.div1N = 4
else:
if self.div == 3:
self.clk1SRC = 0x03
self.div1N = 6
else:
self.clk1SRC = 0x01
self.div1N = self.div
if self.p_total <= 44:
self.chg_pump = 0
else:
if self.p_total <= 479:
self.chg_pump = 1
else:
if self.p_total <= 639:
self.chg_pump = 2
else:
if self.p_total <= 799:
self.chg_pump = 3
else:
if self.p_total <= 1023:
self.chg_pump = 4
ftest = self.fref * self.p_total / self.q_total * 1 / self.div
fvco = self.fref * self.p_total / self.q_total
logger.info('PLL frequency set to ' + str(ftest) + ' MHz' + ' (VCO @ ' + str(fvco) + ' MHz)')
return True
logger.error('MIO_PLL: Could not find PLL parameters')
return False
|
def _calculateParameters(self, fout)
|
fout = fref * (p_total / q_total) * (1 / div)
p_total = 2 * ((p_counter + 4) + p_0) [16..1023]
q_total = q_counter + 2 [2..129]
div = [2,(3),4..127]
constraints:
f_ref * p_total / q_total = [100..400] MHz
f_ref / q_total > 0.25 MHz
| 3.324664 | 2.380946 | 1.396363 |
'''
Initialize the device.
Parameters of visa.ResourceManager().open_resource()
'''
super(Visa, self).init()
backend = self._init.get('backend', '') # Empty string means std. backend (NI VISA)
rm = visa.ResourceManager(backend)
try:
logger.info('BASIL VISA TL with %s backend found the following devices: %s', backend, ", ".join(rm.list_resources()))
except NotImplementedError: # some backends do not always implement the list_resources function
logger.info('BASIL VISA TL with %s backend', backend)
self._resource = rm.open_resource(**{key: value for key, value in self._init.items() if key not in ("backend",)})
|
def init(self)
|
Initialize the device.
Parameters of visa.ResourceManager().open_resource()
| 6.672321 | 5.314055 | 1.255599 |
''' Reading data in BRAM.
Returns
-------
array : numpy.ndarray
Array of unsigned integers (32 bit).
'''
fifo_int_size_1 = self.FIFO_INT_SIZE
fifo_int_size_2 = self.FIFO_INT_SIZE
if fifo_int_size_1 > fifo_int_size_2:
fifo_int_size = fifo_int_size_2 # use smaller chunk
logger.warning("Reading wrong FIFO size. Expected: %d <= %d" % (fifo_int_size_1, fifo_int_size_2))
else:
fifo_int_size = fifo_int_size_1 # use smaller chunk
return np.frombuffer(self._intf.read(self._conf['base_data_addr'], size=4 * fifo_int_size), dtype=np.dtype('<u4'))
|
def get_data(self)
|
Reading data in BRAM.
Returns
-------
array : numpy.ndarray
Array of unsigned integers (32 bit).
| 4.181918 | 3.355338 | 1.246348 |
self.bus.RD_B <= 1
self.bus.ADD <= self._x
self.bus.BUS_DATA <= self._high_impedence
for _ in range(5):
yield RisingEdge(self.clock)
yield RisingEdge(self.clock)
self.bus.ADD <= address + 0x4000
yield RisingEdge(self.clock)
self.bus.RD_B <= 0
yield RisingEdge(self.clock)
self.bus.RD_B <= 0
yield ReadOnly()
result = self.bus.BUS_DATA.value.integer
yield RisingEdge(self.clock)
self.bus.RD_B <= 1
yield RisingEdge(self.clock)
self.bus.RD_B <= 1
self.bus.ADD <= self._x
yield RisingEdge(self.clock)
for _ in range(5):
yield RisingEdge(self.clock)
raise ReturnValue(result)
|
def read_external(self, address)
|
Copied from silusb.sv testbench interface
| 2.663103 | 2.596129 | 1.025798 |
self.bus.WR_B <= 1
self.bus.ADD <= self._x
for _ in range(5):
yield RisingEdge(self.clock)
yield RisingEdge(self.clock)
self.bus.ADD <= address + 0x4000
self.bus.BUS_DATA <= int(value)
yield Timer(1) # This is hack for iverilog
self.bus.ADD <= address + 0x4000
self.bus.BUS_DATA <= int(value)
yield RisingEdge(self.clock)
self.bus.WR_B <= 0
yield Timer(1) # This is hack for iverilog
self.bus.BUS_DATA <= int(value)
self.bus.WR_B <= 0
yield RisingEdge(self.clock)
self.bus.WR_B <= 0
yield Timer(1) # This is hack for iverilog
self.bus.BUS_DATA <= int(value)
self.bus.WR_B <= 0
yield RisingEdge(self.clock)
self.bus.WR_B <= 1
self.bus.BUS_DATA <= self._high_impedence
yield Timer(1) # This is hack for iverilog
self.bus.WR_B <= 1
self.bus.BUS_DATA <= self._high_impedence
yield RisingEdge(self.clock)
self.bus.WR_B <= 1
self.bus.ADD <= self._x
for _ in range(5):
yield RisingEdge(self.clock)
|
def write_external(self, address, value)
|
Copied from silusb.sv testbench interface
| 2.110327 | 2.075956 | 1.016557 |
'''Initialize ADC
'''
self._intf.write(self._base_addr + self.MAX_1239_ADD, array('B', pack('B', flags)))
|
def _setup_adc(self, flags)
|
Initialize ADC
| 16.941944 | 16.839325 | 1.006094 |
'''Read ADC
'''
conf = self.SCAN_OFF | self.SINGLE_ENDED | ((0x1e) & (channel << 1))
self._intf.write(self._base_addr + self.MAX_1239_ADD, array('B', pack('B', conf)))
def read_data():
ret = self._intf.read(self._base_addr + self.MAX_1239_ADD | 1, size=2)
ret.reverse()
ret[1] = ret[1] & 0x0f # 12-bit ADC
return unpack_from('H', ret)[0]
if average:
raw = 0
for _ in range(average):
raw += read_data()
raw /= average
else:
raw = read_data()
return raw
|
def _get_adc_value(self, channel, average=None)
|
Read ADC
| 4.697688 | 4.695701 | 1.000423 |
'''Read EEPROM
'''
self._intf.write(self._base_addr + self.CAL_EEPROM_ADD, array('B', pack('>H', address & 0x3FFF))) # 14-bit address, 16384 bytes
n_pages, n_bytes = divmod(size, self.CAL_EEPROM_PAGE_SIZE)
data = array('B')
for _ in range(n_pages):
data.extend(self._intf.read(self._base_addr + self.CAL_EEPROM_ADD | 1, size=self.CAL_EEPROM_PAGE_SIZE))
if n_bytes > 0:
data.extend(self._intf.read(self._base_addr + self.CAL_EEPROM_ADD | 1, size=n_bytes))
return data
|
def _read_eeprom(self, address, size)
|
Read EEPROM
| 2.977413 | 2.918365 | 1.020233 |
'''Setting default voltage
'''
if not channels:
channels = self._ch_cal.keys()
for channel in channels:
self.set_voltage(channel, self._ch_cal[channel]['default'], unit='V')
|
def set_default(self, channels=None)
|
Setting default voltage
| 5.594316 | 4.563143 | 1.225979 |
'''Reading voltage
'''
kwargs = self._ch_map[channel]['ADCV']
voltage_raw = self._get_adc_value(**kwargs)
voltage = (voltage_raw - self._ch_cal[channel]['ADCV']['offset']) / self._ch_cal[channel]['ADCV']['gain']
if unit == 'raw':
return voltage_raw
elif unit == 'V':
return voltage
elif unit == 'mV':
return voltage * 1000
else:
raise TypeError("Invalid unit type.")
|
def get_voltage(self, channel, unit='V')
|
Reading voltage
| 3.884186 | 3.749127 | 1.036024 |
'''Reading current
'''
kwargs = self._ch_map[channel]['ADCI']
current_raw = self._get_adc_value(**kwargs)
voltage = self.get_voltage(channel)
current_raw_iq = current_raw - (self._ch_cal[channel]['ADCI']['iq_offset'] + self._ch_cal[channel]['ADCI']['iq_gain'] * voltage) # quiescent current (IQ) compensation
current = (current_raw_iq - self._ch_cal[channel]['ADCI']['offset']) / self._ch_cal[channel]['ADCI']['gain']
if unit == 'raw':
return current_raw
elif unit == 'raw_iq':
return current_raw_iq
elif unit == 'A':
return current
elif unit == 'mA':
return current * 1000
elif unit == 'uA':
return current * 1000000
else:
raise TypeError("Invalid unit type.")
|
def get_current(self, channel, unit='A')
|
Reading current
| 3.225586 | 3.135879 | 1.028607 |
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_V1_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_V1_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_V1_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_V1_FORMAT, ch_data)
self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip()
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['ADCI']['gain'] = values[2]
self._ch_cal[channel]['ADCI']['offset'] = values[3]
self._ch_cal[channel]['ADCI']['iq_gain'] = values[4]
self._ch_cal[channel]['ADCI']['iq_offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DACV']['gain'] = values[8]
self._ch_cal[channel]['DACV']['offset'] = values[9]
const_data = data[-calcsize(self.CAL_DATA_CONST_V1_FORMAT):]
values = unpack_from(self.CAL_DATA_CONST_V1_FORMAT, const_data)
if temperature:
for channel in self._ch_cal.keys():
self._ch_cal[channel]['VNTC']['B_NTC'] = values[0]
self._ch_cal[channel]['VNTC']['R1'] = values[1]
self._ch_cal[channel]['VNTC']['R2'] = values[2]
self._ch_cal[channel]['VNTC']['R4'] = values[3]
self._ch_cal[channel]['VNTC']['R_NTC_25'] = values[4]
self._ch_cal[channel]['VNTC']['VREF'] = values[5]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header)
|
def read_eeprom_calibration(self, temperature=False): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for power regulators and temperature
'''
header = self.get_format()
if header == self.HEADER_V1
|
Reading EEPROM calibration for power regulators and temperature
| 2.109205 | 2.096719 | 1.005955 |
'''Reading temperature
'''
# NTC type SEMITEC 103KT1608 http://www.semitec.co.jp/english/products/pdf/KT_Thermistor.pdf
#
# R_NTC = R_25 * exp(B_NTC * (1/T - 1/T_25))
#
# R_NTC measured NTC resistance
# R_NTC_25 resistance @ 25C
# B_NTC temperature coefficient
# Temperature current temperature (Kelvin)
# T_25 298,15 K (25C)
#
# B_NTC NTC 'b' coefficient, NTC Semitec 103KT1608-1P
# R_NTC_25 NTC 25C resistance, NTC Semitec 103KT1608-1P
# R1 resistor value for NTC voltage divider
# R2 value of R2 in the reference voltage divider
# R4 value of R4 in the reference voltage divider
# VREF supply voltage of the resistor bridge
#
kwargs = self._ch_map[channel][sensor]
temp_raw = self._get_adc_value(**kwargs)
v_adc = ((temp_raw - self._ch_cal.items()[0][1]['ADCV']['offset']) / self._ch_cal.items()[0][1]['ADCV']['gain']) # voltage, VDDA1
k = self._ch_cal[channel][sensor]['R4'] / (self._ch_cal[channel][sensor]['R2'] + self._ch_cal[channel][sensor]['R4']) # reference voltage divider
r_ntc = self._ch_cal[channel][sensor]['R1'] * (k - v_adc / self._ch_cal[channel][sensor]['VREF']) / (1 - k + v_adc / self._ch_cal[channel][sensor]['VREF']) # NTC resistance
return (self._ch_cal[channel][sensor]['B_NTC'] / (log(r_ntc) - log(self._ch_cal[channel][sensor]['R_NTC_25']) + self._ch_cal[channel][sensor]['B_NTC'] / self.T_KELVIN_25)) - self.T_KELVIN_0
|
def get_temperature(self, channel, sensor='VNTC')
|
Reading temperature
| 4.402864 | 4.384647 | 1.004155 |
def convert_data_array(arr, filter_func=None, converter_func=None):
'''Filter and convert any given data array of any dtype.
Parameters
----------
arr : numpy.array
Data array of any dtype.
filter_func : function
Function that takes array and returns true or false for each item in array.
converter_func : function
Function that takes array and returns an array or tuple of arrays.
Returns
-------
array of specified dimension (converter_func) and content (filter_func)
'''
# if filter_func != None:
# if not hasattr(filter_func, '__call__'):
# raise ValueError('Filter is not callable')
if filter_func:
array = arr[filter_func(arr)] # Indexing with Boolean Arrays
# if converter_func != None:
# if not hasattr(converter_func, '__call__'):
# raise ValueError('Converter is not callable')
if converter_func:
arr = converter_func(arr)
return array
|
Filter and convert any given data array of any dtype.
Parameters
----------
arr : numpy.array
Data array of any dtype.
filter_func : function
Function that takes array and returns true or false for each item in array.
converter_func : function
Function that takes array and returns an array or tuple of arrays.
Returns
-------
array of specified dimension (converter_func) and content (filter_func)
| null | null | null |
|
def logical_and(f1, f2): # function factory
'''Logical and from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
Examples
--------
filter_func=logical_and(is_data_record, is_data_from_channel(4)) # new filter function
filter_func(array) # array that has Data Records from channel 4
'''
def f_and(arr):
return np.logical_and(f1(arr), f2(arr))
f_and.__name__ = f1.__name__ + "_and_" + f2.__name__
return f_and
|
Logical and from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
Examples
--------
filter_func=logical_and(is_data_record, is_data_from_channel(4)) # new filter function
filter_func(array) # array that has Data Records from channel 4
| null | null | null |
|
def logical_or(f1, f2): # function factory
'''Logical or from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
'''
def f_or(arr):
return np.logical_or(f1(arr), f2(arr))
f_or.__name__ = f1.__name__ + "_or_" + f2.__name__
return f_or
|
Logical or from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
| null | null | null |
|
def logical_not(f): # function factory
'''Logical not from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
'''
def f_not(arr):
return np.logical_not(f(arr))
f_not.__name__ = "not_" + f.__name__
return f_not
|
Logical not from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
| null | null | null |
|
def logical_xor(f1, f2): # function factory
'''Logical xor from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
'''
def f_xor(arr):
return np.logical_xor(f1(arr), f2(arr))
f_xor.__name__ = f1.__name__ + "_xor_" + f2.__name__
return f_xor
|
Logical xor from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function
| null | null | null |
|
def arr_select(value): # function factory
'''Selecting array elements by bitwise and comparison to a given value.
Parameters:
value : int
Value to which array elements are compared to.
Returns:
array : np.array
'''
def f_eq(arr):
return np.equal(np.bitwise_and(arr, value), value)
f_eq.__name__ = "arr_bitwise_and_" + str(value) # or use inspect module: inspect.stack()[0][3]
return f_eq
|
Selecting array elements by bitwise and comparison to a given value.
Parameters:
value : int
Value to which array elements are compared to.
Returns:
array : np.array
| null | null | null |
|
def arr_astype(arr_type): # function factory
'''Change dtype of array.
Parameters:
arr_type : str, np.dtype
Character codes (e.g. 'b', '>H'), type strings (e.g. 'i4', 'f8'), Python types (e.g. float, int) and numpy dtypes (e.g. np.uint32) are allowed.
Returns:
array : np.array
'''
def f_astype(arr):
return arr.astype(arr_type)
f_astype.__name__ = "arr_astype_" + str(arr_type) # or use inspect module: inspect.stack()[0][3]
return f_astype
|
Change dtype of array.
Parameters:
arr_type : str, np.dtype
Character codes (e.g. 'b', '>H'), type strings (e.g. 'i4', 'f8'), Python types (e.g. float, int) and numpy dtypes (e.g. np.uint32) are allowed.
Returns:
array : np.array
| null | null | null |
|
assert isinstance(obj, ProtocolBase)
string = pickle.dumps(obj)
length = len(string)
self.sock.sendall(struct.pack("<I", length) + string)
|
def send(self, obj)
|
Prepend a 4-byte length to the string
| 4.066068 | 4.060127 | 1.001463 |
length = struct.unpack("<I", self.sock.recv(4))[0]
return self._get_next_obj(length)
|
def recv(self, blocking=True)
|
Receive the next object from the socket
| 6.225574 | 4.983139 | 1.249328 |
try:
lenstr = self.sock.recv(4, socket.MSG_DONTWAIT)
except socket.error:
return None
if len(lenstr) < 4:
raise EOFError("Socket closed")
length = struct.unpack("<I", lenstr)[0]
return self._get_next_obj(length)
|
def try_recv(self)
|
Return None immediately if nothing is waiting
| 3.200877 | 3.229785 | 0.99105 |
data = b''
while len(data) < length:
data += self.sock.recv(length - len(data))
return pickle.loads(data)
|
def _get_next_obj(self, length)
|
Assumes we've already read the object length
| 3.012752 | 2.480268 | 1.214688 |
'''Getting modules by type name.
Parameters
----------
type_name : string
Type name of the modules to be returned.
Returns
-------
List of modules of given type name else empty list.
'''
modules = []
for module in self:
if module.__class__.__name__ == type_name:
modules.append(module)
return modules
|
def get_modules(self, type_name)
|
Getting modules by type name.
Parameters
----------
type_name : string
Type name of the modules to be returned.
Returns
-------
List of modules of given type name else empty list.
| 3.244356 | 1.925458 | 1.684979 |
'''Read response to command and convert it to 16-bit integer.
Returns : list of values
'''
self.write(command)
time.sleep(0.1)
return self.read()
|
def ask(self, command)
|
Read response to command and convert it to 16-bit integer.
Returns : list of values
| 8.041972 | 2.999694 | 2.680931 |
if isinstance(channel, str):
cmd = "OPALL %d" % int(on)
elif isinstance(channel, int):
cmd = "OP%d %d" % (channel, int(on))
self.write(cmd)
|
def set_enable(self, on, channel=1)
|
channel: 1=OP1, 2=OP2, 3=AUX, ALL=all channels
| 3.616395 | 3.067724 | 1.178853 |
ret = self.ask("I%dO?" % channel)
if ret[-1] != "A":
print("ttiQl355tp.get_current() format error", ret)
return None
return float(ret[:-1])
|
def get_current(self, channel)
|
channel: 1=OP1, 2=OP2, AUX is not supported
| 13.145668 | 12.10553 | 1.085923 |
ret = self.ask("V%dO?" % channel)
if ret[-1] != "V":
print("ttiQl355tp.get_voltage() format error", ret)
return None
return float(ret[:-1])
|
def get_voltage(self, channel)
|
channel: 1=OP1, 2=OP2, AUX is not supported
| 11.365613 | 10.637447 | 1.068453 |
ret = self.ask("V%d?" % channel)
if ret[:3] != "V%d " % channel:
print("ttiQl355tp.get_voltage() format error", ret)
return None
return float(ret[3:])
|
def get_set_voltage(self, channel)
|
channel: 1=OP1, 2=OP2, AUX is not supported
| 10.287136 | 9.548592 | 1.077346 |
ret = self.ask("I%d?" % channel)
if ret[:3] != "I%d " % channel:
print("ttiQl355tp.get_current_limit() format error", ret)
return None
return float(ret[3:])
|
def get_current_limit(self, channel)
|
channel: 1=OP1, 2=OP2, AUX is not supported
| 9.928011 | 8.91201 | 1.114004 |
cmd = "V%d %f" % (channel, value)
self.write(cmd)
|
def set_voltage(self, value, channel=1)
|
channel: 1=OP1, 2=OP2, AUX is not supported
| 4.982796 | 4.869675 | 1.02323 |
cmd = "I%d %f" % (channel, value)
self.write(cmd)
|
def set_current_limit(self, value, channel=1)
|
channel: 1=OP1, 2=OP2, AUX is not supported
| 6.16416 | 6.221117 | 0.990845 |
'''Determine the ready state of the device and wait until device is ready.
Parameters
----------
timeout : int, float
The maximum amount of time to wait in seconds. Reaching the timeout will raise a RuntimeError.
times : int
Maximum number of times reading the ready state.
delay : int, float
The number of seconds to sleep before checks. Defaults to 0.
delay_between : int, float
The number of seconds to sleep between each check. Defaults to 0.
abort : Threading.Event
Breaking the loop from other threads.
Returns
-------
True if state is ready, else False.
'''
if delay:
try:
sleep(delay)
except IOError: # negative values
pass
if timeout is not None:
if timeout < 0:
raise ValueError("timeout is smaller than 0")
else:
stop = time() + timeout
times_checked = 0
while not self.is_ready:
now = time()
times_checked += 1
if abort and abort.is_set():
False
if timeout is not None and stop <= now:
raise RuntimeError('Time out while waiting for ready in %s, module %s' % (self.name, self.__class__.__module__))
if times and times > times_checked:
False
if delay_between:
try:
sleep(delay_between)
except IOError: # negative values
pass
return True
|
def wait_for_ready(self, timeout=None, times=None, delay=None, delay_between=None, abort=None)
|
Determine the ready state of the device and wait until device is ready.
Parameters
----------
timeout : int, float
The maximum amount of time to wait in seconds. Reaching the timeout will raise a RuntimeError.
times : int
Maximum number of times reading the ready state.
delay : int, float
The number of seconds to sleep before checks. Defaults to 0.
delay_between : int, float
The number of seconds to sleep between each check. Defaults to 0.
abort : Threading.Event
Breaking the loop from other threads.
Returns
-------
True if state is ready, else False.
| 3.609323 | 2.1273 | 1.696669 |
'''Write DAC
'''
self._intf.write(self._base_addr + self.MAX_5380_ADD, array('B', pack('B', value)))
|
def _set_dac_value(self, channel, value)
|
Write DAC
| 19.642979 | 16.562611 | 1.185983 |
'''Write DAC
'''
# DAC value cannot be -128
if value == -128:
value = -127
if value < 0:
sign = 1
else:
sign = 0
value = (sign << 7) | (0x7F & abs(value))
self._intf.write(self._base_addr + self.DS_4424_ADD, array('B', pack('BB', channel, value)))
|
def _set_dac_value(self, channel, value)
|
Write DAC
| 5.627642 | 5.357318 | 1.050459 |
data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_V2_FORMAT))
for idx, channel in enumerate(self._ch_cal.iterkeys()):
ch_data = data[idx * calcsize(self.CAL_DATA_CH_V2_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_V2_FORMAT)]
values = unpack_from(self.CAL_DATA_CH_V2_FORMAT, ch_data)
self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip()
self._ch_cal[channel]['default'] = values[1]
self._ch_cal[channel]['ADCI']['gain'] = values[2]
self._ch_cal[channel]['ADCI']['offset'] = values[3]
self._ch_cal[channel]['ADCI']['iq_gain'] = values[4]
self._ch_cal[channel]['ADCI']['iq_offset'] = values[5]
self._ch_cal[channel]['ADCV']['gain'] = values[6]
self._ch_cal[channel]['ADCV']['offset'] = values[7]
self._ch_cal[channel]['DACV']['gain'] = values[8]
self._ch_cal[channel]['DACV']['offset'] = values[9]
if temperature:
self._ch_cal[channel]['NTC']['B_NTC'] = values[10]
self._ch_cal[channel]['NTC']['R1'] = values[11]
self._ch_cal[channel]['NTC']['R2'] = values[12]
self._ch_cal[channel]['NTC']['R4'] = values[13]
self._ch_cal[channel]['NTC']['R_NTC_25'] = values[14]
self._ch_cal[channel]['NTC']['VREF'] = values[15]
else:
raise ValueError('EEPROM data format not supported (header: %s)' % header)
|
def read_eeprom_calibration(self, temperature=False): # use default values for temperature, EEPROM values are usually not calibrated and random
'''Reading EEPROM calibration for power regulators and temperature
'''
header = self.get_format()
if header == self.HEADER_V2
|
Reading EEPROM calibration for power regulators and temperature
| 2.086566 | 2.073246 | 1.006425 |
'''Setting current limit
Note: same limit for all channels.
'''
dac_offset = self._ch_cal[channel]['DACI']['offset']
dac_gain = self._ch_cal[channel]['DACI']['gain']
if unit == 'raw':
value = value
elif unit == 'A':
value = int((value - dac_offset) / dac_gain)
elif unit == 'mA':
value = int((value / 1000 - dac_offset) / dac_gain)
else:
raise TypeError("Invalid unit type.")
DacMax5380._set_dac_value(self, channel, value)
|
def set_current_limit(self, channel, value, unit='A')
|
Setting current limit
Note: same limit for all channels.
| 3.95143 | 3.469574 | 1.13888 |
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self.simulation_host, self.simulation_port))
self._iface = PickleInterface(self._sock)
return True
|
def DownloadXilinx(self, bitfile)
|
We hijack this call to perform the socket connect
| 4.454513 | 3.662002 | 1.216415 |
''' Move chuck to absolute position in um'''
if speed:
self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y %d' % (x, y, speed))
else:
self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y' % (x, y))
|
def set_position(self, x, y, speed=None)
|
Move chuck to absolute position in um
| 5.598204 | 4.128745 | 1.355909 |
''' Move chuck relative to actual position in um'''
if speed:
self._intf.write('MoveChuckPosition %1.1f %1.1f R Y %d' % (dx, dy, speed))
else:
self._intf.write('MoveChuckPosition %1.1f %1.1f R Y' % (dx, dy))
|
def move_position(self, dx, dy, speed=None)
|
Move chuck relative to actual position in um
| 5.100121 | 3.406385 | 1.497224 |
''' Read chuck position (x, y, z)'''
reply = self._intf.query('ReadChuckPosition Y H')[2:]
return [float(i) for i in reply.split()]
|
def get_position(self)
|
Read chuck position (x, y, z)
| 15.936893 | 8.614533 | 1.850001 |
''' Move chuck to wafer map chip index'''
reply = self._intf.query('ReadMapPosition')
values = reply[2:].split(' ')
return (int(values[0]), int(values[1]))
|
def get_die(self)
|
Move chuck to wafer map chip index
| 20.937895 | 6.806087 | 3.076348 |
'Clear tracks in memory - all zero'
for track in self._tracks:
self._tracks[track].setall(False)
|
def clear(self)
|
Clear tracks in memory - all zero
| 20.090542 | 7.151941 | 2.809103 |
''' Reading data from SiTCP FIFO (via TCP).
Returns
-------
array : numpy.ndarray
Array of unsigned integers (32 bit).
'''
fifo_size = self._intf._get_tcp_data_size()
fifo_int_size = int((fifo_size - (fifo_size % 4)) / 4)
data = self._intf._get_tcp_data(fifo_int_size * 4)
return np.frombuffer(data, dtype=np.dtype('<u4'))
|
def get_data(self)
|
Reading data from SiTCP FIFO (via TCP).
Returns
-------
array : numpy.ndarray
Array of unsigned integers (32 bit).
| 5.646588 | 3.08537 | 1.830117 |
''' Sending data to via TCP.
Parameters
----------
data : array
Array of unsigned integers (32 bit).
'''
data = array.array('B', struct.unpack("{}B".format(len(data) * 4), struct.pack("{}I".format(len(data)), *data)))
self._intf._send_tcp_data(data)
|
def set_data(self, data)
|
Sending data to via TCP.
Parameters
----------
data : array
Array of unsigned integers (32 bit).
| 6.342872 | 3.89448 | 1.628683 |
self._clear_strobes()
gr_size = len(self['GLOBAL_REG'][:]) # get the size
self['SEQ']['SHIFT_IN'][0:gr_size] = self['GLOBAL_REG'][:] # this will be shifted out
self['SEQ']['GLOBAL_SHIFT_EN'][0:gr_size] = bitarray(gr_size * '1') # this is to enable clock
self['SEQ']['GLOBAL_CTR_LD'][gr_size + 1:gr_size + 2] = bitarray("1") # load signals
self['SEQ']['GLOBAL_DAC_LD'][gr_size + 1:gr_size + 2] = bitarray("1")
# Execute the program (write bits to output pins)
# + 1 extra 0 bit so that everything ends on LOW instead of HIGH
self._run_seq(gr_size + 3)
|
def program_global_reg(self)
|
Send the global register to the chip.
Loads the values of self['GLOBAL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
| 7.60861 | 6.109038 | 1.245468 |
self._clear_strobes()
# enable receiver it work only if pixel register is enabled/clocked
self['PIXEL_RX'].set_en(enable_receiver)
px_size = len(self['PIXEL_REG'][:]) # get the size
self['SEQ']['SHIFT_IN'][0:px_size] = self['PIXEL_REG'][:] # this will be shifted out
self['SEQ']['PIXEL_SHIFT_EN'][0:px_size] = bitarray(px_size * '1') # this is to enable clock
self._run_seq(px_size + 1)
|
def program_pixel_reg(self, enable_receiver=True)
|
Send the pixel register to the chip and store the output.
Loads the values of self['PIXEL_REG'] onto the chip.
Includes enabling the clock, and loading the Control (CTR)
and DAC shadow registers.
if(enable_receiver), stores the output (by byte) in
self['DATA'], retrievable via `chip['DATA'].get_data()`.
| 9.827022 | 9.03128 | 1.08811 |
# Write the sequence to the sequence generator (hw driver)
self['SEQ'].write(size) # write pattern to memory
self['SEQ'].set_size(size) # set size
self['SEQ'].set_repeat(1) # set repeat
self['SEQ'].start() # start
while not self['SEQ'].get_done():
time.sleep(0.01)
|
def _run_seq(self, size)
|
Send the contents of self['SEQ'] to the chip and wait until it finishes.
| 5.944578 | 4.629991 | 1.283929 |
def write(self, addr, data):
'''Write access.
:param addr: i2c slave address
:type addr: char
:param data: array/list of bytes
:type data: iterable
:rtype: None
'''
self.set_addr(addr & 0xfe)
self.set_data(data)
self.set_size(len(data))
self.start()
while not self.is_ready:
pass
|
Write access.
:param addr: i2c slave address
:type addr: char
:param data: array/list of bytes
:type data: iterable
:rtype: None
| null | null | null |
|
def read(self, addr, size):
'''Read access.
:param addr: i2c slave address
:type addr: char
:param size: size of transfer
:type size: int
:returns: data byte array
:rtype: array.array('B')
'''
self.set_addr(addr | 0x01)
self.set_size(size)
self.start()
while not self.is_ready:
pass
return self.get_data(size)
|
Read access.
:param addr: i2c slave address
:type addr: char
:param size: size of transfer
:type size: int
:returns: data byte array
:rtype: array.array('B')
| null | null | null |
|
global _full_parser
if _full_parser:
return _full_parser
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
# build the full argument parser with luigi helpers
root_task = luigi_parser.known_args.root_task
_full_parser = luigi_parser._build_parser(root_task)
logger.debug("build full luigi argument parser")
return _full_parser
|
def full_parser()
|
Returns the full *ArgumentParser* used by the luigi ``CmdlineParser``. The returned instance is
cached.
| 3.807879 | 3.508128 | 1.085445 |
global _root_task_parser
if _root_task_parser:
return _root_task_parser
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
root_task = luigi_parser.known_args.root_task
# get all root task parameter destinations
root_dests = []
for task_name, _, param_name, _ in luigi.task_register.Register.get_all_params():
if task_name == root_task:
root_dests.append(param_name)
# create a new parser and add all root actions
_root_task_parser = ArgumentParser(add_help=False)
for action in list(full_parser()._actions):
if not action.option_strings or action.dest in root_dests:
_root_task_parser._add_action(action)
logger.debug("build luigi argument parser for root task {}".format(root_task))
return _root_task_parser
|
def root_task_parser()
|
Returns a new *ArgumentParser* instance that only contains paremeter actions of the root task.
The returned instance is cached.
| 3.114275 | 2.926191 | 1.064276 |
global _global_cmdline_args
if _global_cmdline_args:
return _global_cmdline_args
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
_global_cmdline_args = root_task_parser().parse_known_args(luigi_parser.cmdline_args)[1]
return _global_cmdline_args
|
def global_cmdline_args()
|
Returns the list of command line arguments that do not belong to the root task. The returned
list is cached. Example:
.. code-block:: python
global_cmdline_args()
# -> ["--local-scheduler"]
| 3.28628 | 3.669453 | 0.895578 |
global _global_cmdline_values
if _global_cmdline_values:
return _global_cmdline_values
luigi_parser = luigi.cmdline_parser.CmdlineParser.get_instance()
if not luigi_parser:
return None
# go through all actions of the full luigi parser and compare option strings
# with the global cmdline args
parser = full_parser()
global_args = global_cmdline_args()
_global_cmdline_values = {}
for action in parser._actions:
if any(arg in action.option_strings for arg in global_args):
_global_cmdline_values[action.dest] = getattr(luigi_parser.known_args, action.dest)
return _global_cmdline_values
|
def global_cmdline_values()
|
Returns a dictionary of global command line arguments (computed with
:py:func:`global_cmdline_args`) to their current values. The returnd dictionary is cached.
Example:
.. code-block:: python
global_cmdline_values()
# -> {"core_local_scheduler": True}
| 3.228689 | 3.584693 | 0.900688 |
if arg not in args:
args = list(args) + [arg] + list(values)
return args
|
def add_cmdline_arg(args, arg, *values)
|
Adds a command line argument *arg* to a list of argument *args*, e.g. as returned from
:py:func:`global_cmdline_args`. When *arg* exists, *args* is returned unchanged. Otherwise,
*arg* is appended to the end with optional argument *values*. Example:
.. code-block:: python
args = global_cmdline_values()
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--local-scheduler")
# -> ["--local-scheduler"]
add_cmdline_arg(args, "--workers", 4)
# -> ["--local-scheduler", "--workers", "4"]
| 3.320477 | 5.29269 | 0.62737 |
if arg in args:
idx = args.index(arg)
args = list(args)
del args[idx:idx + max(n, 1)]
return args
|
def remove_cmdline_arg(args, arg, n=1)
|
Removes the command line argument *args* from a list of arguments *args*, e.g. as returned from
:py:func:`global_cmdline_args`. When *n* is 1 or less, only the argument is removed. Otherwise,
the following *n-1* values are removed. Example:
.. code-block:: python
args = global_cmdline_values()
# -> ["--local-scheduler", "--workers", "4"]
remove_cmdline_arg(args, "--local-scheduler")
# -> ["--workers", "4"]
remove_cmdline_arg(args, "--workers", 2)
# -> ["--local-scheduler"]
| 2.741941 | 3.580343 | 0.765832 |
skip = make_list(skip) if skip else []
transfer_kwargs = {
name: kwargs.pop(name)
for name in ["cache", "prefer_cache", "retries", "retry_delay"]
if name in kwargs and name not in skip
}
return transfer_kwargs, kwargs
|
def split_transfer_kwargs(kwargs, skip=None)
|
Takes keyword arguments *kwargs*, splits them into two separate dictionaries depending on their
content, and returns them in a tuple. The first one will contain arguments related to potential
file transfer operations (e.g. ``"cache"`` or ``"retries"``), while the second one will contain
all remaining arguments. This function is used internally to decide which arguments to pass to
target formatters. *skip* can be a list of argument keys that are ignored.
| 4.036435 | 4.486943 | 0.899596 |
return dict(job_id=job_id, branches=branches or [])
|
def job_data(cls, job_id=dummy_job_id, branches=None, **kwargs)
|
Returns a dictionary containing default job submission information such as the *job_id* and
task *branches* covered by the job.
| 4.990068 | 4.729575 | 1.055077 |
return dict(job_id=job_id, status=status, code=code, error=error)
|
def job_data(cls, job_id=dummy_job_id, status=None, code=None, error=None, **kwargs)
|
Returns a dictionary containing default job status information such as the *job_id*, a job
*status* string, a job return code, and an *error* message.
| 2.700625 | 2.587208 | 1.043838 |
attr = "{}_{}".format(self.workflow_type, name)
if not fallback:
return getattr(self.task, attr)
else:
value = getattr(self.task, attr, no_value)
return value if value != no_value else getattr(self.task, name)
|
def _get_task_attribute(self, name, fallback=False)
|
Return an attribute of the actial task named ``<workflow_type>_<name>``.
When the attribute does not exist and *fallback* is *True*, try to return the task attribute
simply named *name*. In any case, if a requested task attribute is eventually not found, an
AttributeError is raised.
| 3.248037 | 2.769601 | 1.172745 |
task = self.task
# get the directory where the control outputs are stored
out_dir = self._get_task_attribute("output_directory")()
# define outputs
outputs = OrderedDict()
postfix = self._get_task_attribute("output_postfix")()
# a file containing the submission data, i.e. job ids etc
submission_file = "{}_submission{}.json".format(self.workflow_type, postfix)
outputs["submission"] = out_dir.child(submission_file, type="f")
outputs["submission"].optional = True
# a file containing status data when the jobs are done
if not task.no_poll:
status_file = "{}_status{}.json".format(self.workflow_type, postfix)
outputs["status"] = out_dir.child(status_file, type="f")
outputs["status"].optional = True
# update with upstream output when not just controlling running jobs
if not task.is_controlling_remote_jobs():
outputs.update(super(BaseRemoteWorkflowProxy, self).output())
return outputs
|
def output(self)
|
Returns the default workflow outputs in an ordered dictionary. At the moment, this is the
collection of outputs of the branch tasks (key ``"collection"``), the submission file (key
``"submission"``), and the status file (key ``"status"``). These two *control outputs* are
optional, i.e., they are not considered when checking the task's completeness.
| 5.490766 | 4.891549 | 1.1225 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.