code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
with self.notifications_lock:
if clear_if_dupl and message in self.message_widget_dict.keys():
logger.debug("notification %r is already displayed", message)
return
logger.debug("display notification %r", message)
widget = urwid.AttrMap(urwid.Text(message), "notif_{}".format(level))
return self.notify_widget(widget, message=message, clear_in=clear_in) | def notify_message(self, message, level="info", clear_if_dupl=True,
clear_in=CLEAR_NOTIF_BAR_MESSAGE_IN) | :param message, str
:param level: str, {info, error}
:param clear_if_dupl: bool, if True, don't display the notification again
:param clear_in: seconds, remove the notificantion after some time
opens notification popup. | 3.855778 | 3.966267 | 0.972143 |
@log_traceback
def clear_notification(*args, **kwargs):
# the point here is the log_traceback
self.remove_widget(widget, message=message)
if not widget:
return
logger.debug("display notification widget %s", widget)
with self.notifications_lock:
self.widget_message_dict[widget] = message
if message:
self.message_widget_dict[message] = widget
self.reload_footer(rebuild_statusbar=False)
self.loop.set_alarm_in(clear_in, clear_notification)
return widget | def notify_widget(self, widget, message=None, clear_in=CLEAR_NOTIF_BAR_MESSAGE_IN) | opens notification popup.
:param widget: instance of Widget, widget to display
:param message: str, message to remove from list of notifications
:param clear_in: int, time seconds when notification should be removed | 5.610969 | 5.822168 | 0.963725 |
logger.debug("refresh user interface")
try:
with self.refresh_lock:
self.draw_screen()
except AssertionError:
logger.warning("application is not running")
pass | def refresh(self) | explicitely refresh user interface; useful when changing widgets dynamically | 8.943417 | 6.861283 | 1.303461 |
max_cols_lengths = {}
for row in table:
col_index = 0
for idx, widget in enumerate(row.widgets):
l = widget.pack((size[0], ))[0]
max_cols_lengths[idx] = max(max_cols_lengths.get(idx, 0), l)
col_index += 1
max_cols_lengths.setdefault(0, 1) # in case table is empty
return max_cols_lengths | def calculate_max_cols_length(table, size) | :param table: list of lists:
[["row 1 column 1", "row 1 column 2"],
["row 2 column 1", "row 2 column 2"]]
each item consists of instance of urwid.Text
:returns dict, {index: width} | 3.365085 | 3.292686 | 1.021988 |
rows = []
max_lengths = {}
ignore_columns = ignore_columns or []
# shitty performance, here we go
# it would be way better to do a single double loop and provide mutable variable
# FIXME: merge this code with calculate() from above
for row in data:
col_index = 0
for widget in row:
if col_index in ignore_columns:
continue
l = len(widget.text)
if max_allowed_lengths:
if col_index in max_allowed_lengths and max_allowed_lengths[col_index] < l:
# l is bigger then what is allowed
l = max_allowed_lengths[col_index]
max_lengths.setdefault(col_index, l)
max_lengths[col_index] = max(l, max_lengths[col_index])
col_index += 1
for row in data:
row_widgets = []
for idx, item in enumerate(row):
if idx in ignore_columns:
row_widgets.append(item)
else:
row_widgets.append((max_lengths[idx], item))
rows.append(
RowWidget(row_widgets, dividechars=dividechars)
)
return rows | def assemble_rows(data, max_allowed_lengths=None, dividechars=1,
ignore_columns=None) | :param data: list of lists:
[["row 1 column 1", "row 1 column 2"],
["row 2 column 1", "row 2 column 2"]]
each item consists of instance of urwid.Text
:param max_allowed_lengths: dict:
{col_index: maximum_allowed_length}
:param ignore_columns: list of ints, indexes which should not be calculated | 3.347738 | 3.284307 | 1.019313 |
now = datetime.datetime.now()
if t + datetime.timedelta(hours=3) > now:
return get_map("main_list_white")
if t + datetime.timedelta(days=3) > now:
return get_map("main_list_lg")
else:
return get_map("main_list_dg") | def get_time_attr_map(t) | now -> |
hour ago -> |
day ago -> |
|--------------|--------------------|---------------------| | 4.141431 | 4.214817 | 0.982589 |
# esc[ + values + control character
# h, l, p commands are complicated, let's ignore them
seq_regex = r"\x1b\[[0-9;]*[mKJusDCBAfH]"
regex = re.compile(seq_regex)
start = 0
response = ""
for match in regex.finditer(text):
end = match.start()
response += text[start:end]
start = match.end()
response += text[start:len(text)]
return response | def strip_from_ansi_esc_sequences(text) | find ANSI escape sequences in text and remove them
:param text: str
:return: list, should be passed to ListBox | 7.792358 | 8.572723 | 0.908971 |
# TODO: make this available for every buffer
logger.info("starting receiving events from docker")
it = self.d.realtime_updates()
while True:
try:
event = next(it)
except NotifyError as ex:
self.ui.notify_message("error when receiving realtime events from docker: %s" % ex,
level="error")
return
# FIXME: we should pass events to all buffers
# ATM the buffers can't be rendered since they are not displayed
# and hence traceback like this: ListBoxError("Listbox contents too short! ...
logger.debug("pass event to current buffer %s", self.ui.current_buffer)
try:
self.ui.current_buffer.process_realtime_event(event)
except Exception as ex:
# swallow any exc
logger.error("error while processing runtime event: %r", ex) | def realtime_updates(self) | fetch realtime events from docker and pass them to buffers
:return: None | 10.530281 | 9.172829 | 1.147986 |
try:
top_dir = os.path.abspath(os.path.expanduser(os.environ["XDG_CACHE_HOME"]))
except KeyError:
top_dir = os.path.abspath(os.path.expanduser("~/.cache"))
our_cache_dir = os.path.join(top_dir, PROJECT_NAME)
os.makedirs(our_cache_dir, mode=0o775, exist_ok=True)
return our_cache_dir | def setup_dirs() | Make required directories to hold logfile.
:returns: str | 2.145495 | 2.333652 | 0.919372 |
abbrevs = (
(1 << 50, 'PB'),
(1 << 40, 'TB'),
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'kB'),
(1, 'bytes')
)
if bytesize == 1:
return '1 byte'
for factor, suffix in abbrevs:
if bytesize >= factor:
break
if factor == 1:
precision = 0
return '%.*f %s' % (precision, bytesize / float(factor), suffix) | def humanize_bytes(bytesize, precision=2) | Humanize byte size figures
https://gist.github.com/moird/3684595 | 1.466892 | 1.504933 | 0.974722 |
args = args or ()
kwargs = kwargs or {}
t = 1.0
for x in range(retries):
try:
return call(*args, **kwargs)
except APIError as ex:
logger.error("query #%d: docker returned an error: %r", x, ex)
except Exception as ex:
# this may be pretty bad
log_last_traceback()
logger.error("query #%d: generic error: %r", x, ex)
t *= 2
time.sleep(t) | def repeater(call, args=None, kwargs=None, retries=4) | repeat call x-times: docker API is just awesome
:param call: function
:param args: tuple, args for function
:param kwargs: dict, kwargs for function
:param retries: int, how many times we try?
:return: response of the call | 3.965621 | 4.05707 | 0.977459 |
try:
value = graceful_chain_get(self.inspect(cached=cached).response, *path)
except docker.errors.NotFound:
logger.warning("object %s is not available anymore", self)
raise NotAvailableAnymore()
return value | def metadata_get(self, path, cached=True) | get metadata from inspect, specified by path
:param path: list of str
:param cached: bool, use cached version of inspect if available | 10.149285 | 9.995164 | 1.01542 |
# sample output:
# {
# "Created": 1457116802,
# "Id": "sha256:507cb13a216097710f0d234668bf64a4c92949c573ba15eba13d05aad392fe04",
# "Size": 204692029,
# "Tags": [
# "docker.io/fedora:latest"
# ],
# "Comment": "",
# "CreatedBy": "/bin/sh -c #(nop) ADD file:bcb5e5c... in /"
# }
try:
response = self.d.history(self.image_id)
except docker.errors.NotFound:
raise NotAvailableAnymore()
layers = []
for l in response:
layer_id = l["Id"]
if layer_id == "<missing>":
layers.append(DockerImage(l, self.docker_backend))
else:
layers.append(self.docker_backend.get_image_by_id(layer_id))
return layers | def layers(self) | similar as parent images, except that it uses /history API endpoint
:return: | 6.921929 | 6.497313 | 1.065353 |
self._virtual_size = self._virtual_size or \
graceful_chain_get(self.data, "VirtualSize", default=0)
try:
return self._virtual_size - self._shared_size
except TypeError:
return 0 | def unique_size(self) | Size of ONLY this particular layer
:return: int or None | 6.406429 | 6.882638 | 0.93081 |
try:
# docker >= 1.9
image_id = self.data["ImageID"]
except KeyError:
# docker <= 1.8
image_id = self.metadata_get(["Image"])
return image_id | def image_id(self) | this container is created from image with id... | 5.081625 | 4.247422 | 1.196402 |
try:
return NetData(self.inspect(cached=True).response)
except docker.errors.NotFound:
raise NotAvailableAnymore() | def net(self) | get ACTIVE port mappings of a container
:return: dict:
{
"host_port": "container_port"
} | 17.372025 | 16.088539 | 1.079776 |
# let's get resources from .stats()
ps_args = "-eo pid,ppid,wchan,args"
# returns {"Processes": [values], "Titles": [values]}
# it's easier to play with list of dicts: [{"pid": 1, "ppid": 0}]
try:
response = self.d.top(self.container_id, ps_args=ps_args)
except docker.errors.APIError as ex:
logger.warning("error getting processes: %r", ex)
return []
# TODO: sort?
logger.debug(json.dumps(response, indent=2))
return [dict(zip(response["Titles"], process))
for process in response["Processes"] or []] | def top(self) | list of processes in a running container
:return: None or list of dicts | 7.654895 | 6.566599 | 1.165732 |
content = []
containers_o = None
images_o = None
# return containers when containers=False and running=True
if containers or not stopped:
containers_o = self.get_containers(cached=cached, stopped=stopped)
content += containers_o.response
if images:
images_o = self.get_images(cached=cached)
content += images_o.response
if sort_by_created:
content.sort(key=attrgetter("natural_sort_value"), reverse=True)
return content, containers_o, images_o | def filter(self, containers=True, images=True, stopped=True, cached=False, sort_by_created=True) | since django is so awesome, let's use their ORM API
:return: | 3.492326 | 3.613465 | 0.966476 |
maxcol = size[0]
self._cache_maxcol = maxcol
widths = [width for i, (w, (t, width, b)) in enumerate(self.contents)]
self._cache_column_widths = widths
return widths | def column_widths(self, size, focus=False) | Return a list of column widths.
0 values in the list mean hide corresponding column completely | 8.011147 | 8.414714 | 0.95204 |
a4 = None
if network_name == "host":
a4 = "127.0.0.1"
n = {}
a4 = graceful_chain_get(network_data, "IPAddress") or a4
if a4:
n["ip_address4"] = a4
a6 = graceful_chain_get(network_data, "GlobalIPv6Address")
if a6:
n["ip_address4"] = a6
return n | def extract_data_from_inspect(network_name, network_data) | :param network_name: str
:param network_data: dict
:return: dict:
{
"ip_address4": "12.34.56.78"
"ip_address6": "ff:fa:..."
} | 4.040465 | 3.29396 | 1.226629 |
if self._ports is None:
self._ports = {}
if self.net_settings["Ports"]:
for key, value in self.net_settings["Ports"].items():
cleaned_port = key.split("/")[0]
self._ports[cleaned_port] = graceful_chain_get(value, 0, "HostPort")
# in case of --net=host, there's nothing in network settings, let's get it from "Config"
exposed_ports_section = graceful_chain_get(self.inspect_data, "Config", "ExposedPorts")
if exposed_ports_section:
for key, value in exposed_ports_section.items():
cleaned_port = key.split("/")[0]
self._ports[cleaned_port] = None # extremely docker specific
return self._ports | def ports(self) | :return: dict
{
# container -> host
"1234": "2345"
} | 4.588004 | 3.942307 | 1.163787 |
if self._ips is None:
self._ips = {}
default_net = extract_data_from_inspect("default", self.net_settings)
if default_net:
self._ips["default"] = default_net
# this can be None
networks = self.inspect_data["NetworkSettings"]["Networks"]
if networks:
for network_name, network_data in networks.items():
self._ips[network_name] = extract_data_from_inspect(network_name, network_data)
return self._ips | def ips(self) | :return: dict:
{
"default": {
"ip_address4": "12.34.56.78"
"ip_address6": "ff:fa:..."
}
"other": {
...
}
} | 3.808851 | 3.305983 | 1.152108 |
logger.info("refresh listing")
focus_on_top = len(self.body) == 0 # focus if empty
with self.refresh_lock:
self.query(query_string=query)
if focus_on_top:
try:
self.set_focus(0)
except IndexError:
pass | def refresh(self, query=None) | refresh listing, also apply filters
:return: | 6.748354 | 6.541352 | 1.031645 |
def query_notify(operation):
w = get_operation_notify_widget(operation, display_always=False)
if w:
self.ui.notify_widget(w)
if query_string is not None:
self.filter_query = query_string.strip()
# FIXME: this could be part of filter command since it's command line
backend_query = {
"cached": False,
"containers": True,
"images": True,
}
def containers():
backend_query["containers"] = True
backend_query["images"] = not backend_query["images"]
backend_query["cached"] = True
def images():
backend_query["containers"] = not backend_query["containers"]
backend_query["images"] = True
backend_query["cached"] = True
def running():
backend_query["stopped"] = False
backend_query["cached"] = True
backend_query["images"] = False
query_conf = [
{
"query_keys": ["t", "type"],
"query_values": ["c", "container", "containers"],
"callback": containers
}, {
"query_keys": ["t", "type"],
"query_values": ["i", "images", "images"],
"callback": images
}, {
"query_keys": ["s", "state"],
"query_values": ["r", "running"],
"callback": running
},
]
query_list = re.split(r"[\s,]", self.filter_query)
unprocessed = []
for query_str in query_list:
if not query_str:
continue
# process here x=y queries and pass rest to parent filter()
try:
query_key, query_value = query_str.split("=", 1)
except ValueError:
unprocessed.append(query_str)
else:
logger.debug("looking up query key %r and query value %r", query_key, query_value)
for c in query_conf:
if query_key in c["query_keys"] and query_value in c["query_values"]:
c["callback"]()
break
else:
raise NotifyError("Invalid query string: %r", query_str)
widgets = []
logger.debug("doing query %s", backend_query)
query, c_op, i_op = self.d.filter(**backend_query)
for o in query:
try:
line = MainLineWidget(o)
except NotAvailableAnymore:
continue
widgets.append(line)
if unprocessed:
new_query = " ".join(unprocessed)
logger.debug("doing parent query for unprocessed string: %r", new_query)
super().filter(new_query, widgets_to_filter=widgets)
else:
self.set_body(widgets)
self.ro_content = widgets
query_notify(i_op)
query_notify(c_op) | def query(self, query_string="") | query and display, also apply filters
:param query_string: str
:return: None | 3.530866 | 3.495225 | 1.010197 |
arg_index = 0
for a in argument_list:
opt_and_val = a.split("=", 1)
opt_name = opt_and_val[0]
try:
# option
argument = self.options[opt_name]
except KeyError:
# argument
try:
argument = self.arguments[arg_index]
except IndexError:
logger.error("option/argument %r not specified", a)
raise NoSuchOptionOrArgument("No such option or argument: %r" % opt_name)
logger.info("argument found: %s", argument)
safe_arg_name = normalize_arg_name(argument.name) # so we can access names-with-dashes
logger.info("argument is available under name %r", safe_arg_name)
if isinstance(argument, Argument):
arg_index += 1
value = (a, )
else:
try:
value = (opt_and_val[1], )
except IndexError:
value = tuple()
arg_val = argument.action(*value)
logger.info("argument %r has value %r", safe_arg_name, arg_val)
self.given_arguments[safe_arg_name] = arg_val
return self.given_arguments | def process(self, argument_list) | :param argument_list: list of str, input from user
:return: dict:
{"cleaned_arg_name": "value"} | 3.456715 | 3.404826 | 1.01524 |
logger.debug("get command for command input %r", command_input)
if not command_input:
# noop, don't do anything
return
if command_input[0] in ["/"]: # we could add here !, @, ...
command_name = command_input[0]
unparsed_command_args = shlex.split(command_input[1:])
else:
command_input_list = shlex.split(command_input)
command_name = command_input_list[0]
unparsed_command_args = command_input_list[1:]
try:
CommandClass = commands_mapping[command_name]
except KeyError:
logger.info("no such command: %r", command_name)
raise NoSuchCommand("There is no such command: %s" % command_name)
else:
cmd = CommandClass(ui=self.ui, docker_backend=self.docker_backend,
docker_object=docker_object, buffer=buffer, size=size)
cmd.process_args(unparsed_command_args)
return cmd | def get_command(self, command_input, docker_object=None, buffer=None, size=None) | return command instance which is the actual command to be executed
:param command_input: str, command name and its args: "command arg arg2=val opt"
:param docker_object:
:param buffer:
:param size: tuple, so we can call urwid.keypress(size, ...)
:return: instance of Command | 3.017115 | 2.998193 | 1.006311 |
if wav:
f = wave.open(wav, 'rb')
rate = f.getframerate()
channels = f.getnchannels()
width = f.getsampwidth()
def gen(w):
d = w.readframes(CHUNK_SIZE)
while d:
yield d
d = w.readframes(CHUNK_SIZE)
w.close()
data = gen(f)
self.stop_event.clear()
if block:
self._play(data, rate, channels, width, spectrum)
else:
thread = threading.Thread(target=self._play, args=(data, rate, channels, width, spectrum))
thread.start() | def play(self, wav=None, data=None, rate=16000, channels=1, width=2, block=True, spectrum=None) | play wav file or raw audio (string or generator)
Args:
wav: wav file path
data: raw audio data, str or iterator
rate: sample rate, only for raw audio
channels: channel number, only for raw data
width: raw audio data width, 16 bit is 2, only for raw data
block: if true, block until audio is played.
spectrum: if true, use a spectrum analyzer thread to analyze data | 2.067215 | 2.111332 | 0.979105 |
if platform.machine() == 'mips':
command = 'madplay -o wave:- - | aplay -M'
else:
command = 'ffplay -autoexit -nodisp -'
if mp3:
def gen(m):
with open(m, 'rb') as f:
d = f.read(1024)
while d:
yield d
d = f.read(1024)
data = gen(mp3)
if isinstance(data, types.GeneratorType):
p = subprocess.Popen(command, stdin=subprocess.PIPE, shell=True)
for d in data:
p.stdin.write(d)
p.stdin.close()
else:
with tempfile.NamedTemporaryFile(mode='w+b') as f:
f.write(data)
f.flush()
f.seek(0)
p = subprocess.Popen(command, stdin=f, shell=True)
if block:
p.wait() | def play_mp3(self, mp3=None, data=None, block=True) | It supports GeneratorType mp3 stream or mp3 data string
Args:
mp3: mp3 file
data: mp3 generator or data
block: if true, block until audio is played. | 2.563278 | 2.487761 | 1.030355 |
val = self._fd.read()
self._fd.seek(0)
return int(val) | def read(self) | Read pin value
@rtype: int
@return: I{0} when LOW, I{1} when HIGH | 6.713024 | 7.172066 | 0.935996 |
# find all devices matching the vid/pid specified
dev = usb.core.find(idVendor=0x2886, idProduct=0x0007)
if not dev:
logging.debug("No device connected")
return []
interface_number = -1
# get active config
config = dev.get_active_configuration()
# iterate on all interfaces:
# - if we found a HID interface
for interface in config:
if interface.bInterfaceClass == 0x03:
interface_number = interface.bInterfaceNumber
break
if interface_number == -1:
return []
try:
if dev.is_kernel_driver_active(interface_number):
dev.detach_kernel_driver(interface_number)
except Exception as e:
print(e)
ep_in, ep_out = None, None
for ep in interface:
if ep.bEndpointAddress & 0x80:
ep_in = ep
else:
ep_out = ep
if not ep_in:
logging.error('Endpoints not found')
return []
board = PyUSB()
board.ep_in = ep_in
board.ep_out = ep_out
board.dev = dev
board.intf_number = interface_number
board.start_rx()
return [board] | def getAllConnectedInterface() | returns all the connected devices which matches PyUSB.vid/PyUSB.pid.
returns an array of PyUSB (Interface) objects | 2.932176 | 2.728386 | 1.074692 |
# report_size = 64
# if self.ep_out:
# report_size = self.ep_out.wMaxPacketSize
#
# for _ in range(report_size - len(data)):
# data.append(0)
self.read_sem.release()
if not self.ep_out:
bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface
bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0)
wValue = 0x200 #Issuing an OUT report
wIndex = self.intf_number #mBed Board interface number for HID
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return
#raise ValueError('EP_OUT endpoint is NULL')
self.ep_out.write(data)
#logging.debug('sent: %s', data)
return | def write(self, data) | write data on the OUT endpoint associated to the HID interface | 7.221054 | 6.314714 | 1.143528 |
logging.debug("closing interface")
self.closed = True
self.read_sem.release()
self.thread.join()
usb.util.dispose_resources(self.dev) | def close(self) | close the interface | 5.437877 | 4.84621 | 1.122089 |
all_devices = hid.find_all_hid_devices()
# find devices with good vid/pid
all_mbed_devices = []
for d in all_devices:
if (d.product_name.find("MicArray") >= 0):
all_mbed_devices.append(d)
boards = []
for dev in all_mbed_devices:
try:
dev.open(shared=False)
report = dev.find_output_reports()
if (len(report) == 1):
new_board = PyWinUSB()
new_board.report = report[0]
new_board.vendor_name = dev.vendor_name
new_board.product_name = dev.product_name
new_board.serial_number = dev.serial_number
new_board.vid = dev.vendor_id
new_board.pid = dev.product_id
new_board.device = dev
new_board.device.set_raw_data_handler(new_board.rx_handler)
boards.append(new_board)
except Exception as e:
logging.error("Receiving Exception: %s", e)
dev.close()
return boards | def getAllConnectedInterface() | returns all the connected CMSIS-DAP devices | 3.040233 | 3.060113 | 0.993504 |
for _ in range(64 - len(data)):
data.append(0)
#logging.debug("send: %s", data)
self.report.send(bytearray([0]) + data)
return | def write(self, data) | write data on the OUT endpoint associated to the HID interface | 6.178428 | 5.407749 | 1.142514 |
start = time()
while len(self.rcv_data) == 0:
if time() - start > timeout:
# Read operations should typically take ~1-2ms.
# If this exception occurs, then it could indicate
# a problem in one of the following areas:
# 1. Bad usb driver causing either a dropped read or write
# 2. CMSIS-DAP firmware problem cause a dropped read or write
# 3. CMSIS-DAP is performing a long operation or is being
# halted in a debugger
raise Exception("Read timed out")
return self.rcv_data.popleft() | def read(self, timeout=1.0) | read data on the IN endpoint associated to the HID interface | 8.427095 | 8.427782 | 0.999919 |
devices = hid.enumerate()
if not devices:
logging.debug("No Mbed device connected")
return []
boards = []
for deviceInfo in devices:
product_name = deviceInfo['product_string']
if (product_name.find("MicArray") < 0):
# Skip non cmsis-dap devices
continue
try:
dev = hid.device(vendor_id=deviceInfo['vendor_id'], product_id=deviceInfo['product_id'],
path=deviceInfo['path'])
except IOError:
logging.debug("Failed to open Mbed device")
continue
# Create the USB interface object for this device.
new_board = HidApiUSB()
new_board.vendor_name = deviceInfo['manufacturer_string']
new_board.product_name = deviceInfo['product_string']
new_board.serial_number = deviceInfo['serial_number']
new_board.vid = deviceInfo['vendor_id']
new_board.pid = deviceInfo['product_id']
new_board.device_info = deviceInfo
new_board.device = dev
try:
dev.open_path(deviceInfo['path'])
except AttributeError:
pass
except IOError:
# Ignore failure to open a device by skipping the device.
continue
boards.append(new_board)
return boards | def getAllConnectedInterface() | returns all the connected devices which matches HidApiUSB.vid/HidApiUSB.pid.
returns an array of HidApiUSB (Interface) objects | 3.181755 | 2.959294 | 1.075174 |
for _ in range(64 - len(data)):
data.append(0)
#logging.debug("send: %s", data)
self.device.write(bytearray([0]) + data)
return | def write(self, data) | write data on the OUT endpoint associated to the HID interface | 5.00316 | 4.607761 | 1.085811 |
return CDFepoch.encode_tt2000(epochs, iso_8601)
elif (isinstance(epochs, float) or isinstance(epochs, np.float64)):
return CDFepoch.encode_epoch(epochs, iso_8601)
elif (isinstance(epochs, complex) or isinstance(epochs, np.complex128)):
return CDFepoch.encode_epoch16(epochs, iso_8601)
elif (isinstance(epochs, list) or isinstance(epochs, np.ndarray)):
if (isinstance(epochs[0], int) or isinstance(epochs[0], np.int64)):
return CDFepoch.encode_tt2000(epochs, iso_8601)
elif (isinstance(epochs[0], float) or
isinstance(epochs[0], np.float64)):
return CDFepoch.encode_epoch(epochs, iso_8601)
elif (isinstance(epochs[0], complex) or
isinstance(epochs[0], np.complex128)):
return CDFepoch.encode_epoch16(epochs, iso_8601)
else:
print('Bad input')
return None
else:
print('Bad input')
return None | def encode(epochs, iso_8601=True): # @NoSelf
if (isinstance(epochs, int) or isinstance(epochs, np.int64)) | Encodes the epoch(s) into UTC string(s).
For CDF_EPOCH:
The input should be either a float or list of floats
(in numpy, a np.float64 or a np.ndarray of np.float64)
Each epoch is encoded, by default to a ISO 8601 form:
2004-05-13T15:08:11.022
Or, if iso_8601 is set to False,
13-May-2004 15:08:11.022
For CDF_EPOCH16:
The input should be either a complex or list of
complex(in numpy, a np.complex128 or a np.ndarray of np.complex128)
Each epoch is encoded, by default to a ISO 8601 form:
2004-05-13T15:08:11.022033044055
Or, if iso_8601 is set to False,
13-May-2004 15:08:11.022.033.044.055
For TT2000:
The input should be either a int or list of ints
(in numpy, a np.int64 or a np.ndarray of np.int64)
Each epoch is encoded, by default to a ISO 8601 form:
2008-02-02T06:08:10.10.012014016
Or, if iso_8601 is set to False,
02-Feb-2008 06:08:10.012.014.016 | 1.634838 | 1.465975 | 1.115188 |
if len(cdf_time) == 1:
time_list = [time_list]
else:
time_list = [time_list]
unixtime = []
for t in time_list:
date = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond']
for i in range(0, len(t)):
if i > 7:
continue
elif i == 6:
date[i] = 1000*t[i]
elif i == 7:
date[i-1] += t[i]
else:
date[i] = t[i]
unixtime.append(datetime.datetime(*date).replace(tzinfo=datetime.timezone.utc).timestamp())
return np.array(unixtime) if to_np else unixtime | def unixtime(cdf_time, to_np=False): # @NoSelf
import datetime
time_list = CDFepoch.breakdown(cdf_time, to_np=False)
#Check if only one time was input into unixtime.
#If so, turn the output of breakdown into a list for this function to work
if hasattr(cdf_time, '__len__') | Encodes the epoch(s) into seconds after 1970-01-01. Precision is only
kept to the nearest microsecond.
If to_np is True, then the values will be returned in a numpy array. | 2.199254 | 2.297651 | 0.957175 |
raise TypeError('datetime must be in list form')
if isinstance(datetimes[0], numbers.Number):
items = len(datetimes)
elif isinstance(datetimes[0], (list, tuple, np.ndarray)):
items = len(datetimes[0])
else:
print('Unknown input')
return
if (items == 7):
return CDFepoch.compute_epoch(datetimes, to_np)
elif (items == 10):
return CDFepoch.compute_epoch16(datetimes, to_np)
elif (items == 9):
return CDFepoch.compute_tt2000(datetimes, to_np)
else:
print('Unknown input')
return | def compute(datetimes, to_np=None): # @NoSelf
if not isinstance(datetimes, (list, tuple, np.ndarray)) | Computes the provided date/time components into CDF epoch value(s).
For CDF_EPOCH:
For computing into CDF_EPOCH value, each date/time elements should
have exactly seven (7) components, as year, month, day, hour, minute,
second and millisecond, in a list. For example:
[[2017,1,1,1,1,1,111],[2017,2,2,2,2,2,222]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to seven) components. The last component, if
not the 7th, can be a float that can have a fraction of the unit.
For CDF_EPOCH16:
They should have exactly ten (10) components, as year,
month, day, hour, minute, second, millisecond, microsecond, nanosecond
and picosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789,999],[2017,2,2,2,2,2,987,654,321,999]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to ten) components. The last component, if
not the 10th, can be a float that can have a fraction of the unit.
For TT2000:
Each TT2000 typed date/time should have exactly nine (9) components, as
year, month, day, hour, minute, second, millisecond, microsecond,
and nanosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789],[2017,2,2,2,2,2,987,654,321]]
Or, call function compute_tt2000 directly, instead, with at least three
(3) first (up to nine) components. The last component, if
not the 9th, can be a float that can have a fraction of the unit.
Specify to_np to True, if the result should be in numpy class. | 3.02936 | 2.189235 | 1.383753 |
return CDFepoch.epochrange_epoch(epochs, starttime, endtime)
elif (isinstance(epochs, int) or isinstance(epochs, np.int64)):
return CDFepoch.epochrange_tt2000(epochs, starttime, endtime)
elif isinstance(epochs, (complex, np.complex128)):
return CDFepoch.epochrange_epoch16(epochs, starttime, endtime)
elif isinstance(epochs, (list, tuple, np.ndarray)):
if (isinstance(epochs[0], float) or
isinstance(epochs[0], np.float64)):
return CDFepoch.epochrange_epoch(epochs, starttime, endtime)
elif (isinstance(epochs[0], int) or
isinstance(epochs[0], np.int64)):
return CDFepoch.epochrange_tt2000(epochs, starttime, endtime)
elif (isinstance(epochs[0], complex) or
isinstance(epochs[0], np.complex128)):
return CDFepoch.epochrange_epoch16(epochs, starttime, endtime)
else:
print('Bad input')
return None
else:
print('Bad input')
return None | def findepochrange(epochs, starttime=None, endtime=None): # @NoSelf
if (isinstance(epochs, float) or isinstance(epochs, np.float64)) | Finds the record range within the start and end time from values
of a CDF epoch data type. It returns a list of record numbers.
If the start time is not provided, then it is
assumed to be the minimum possible value. If the end time is not
provided, then the maximum possible value is assumed. The epoch is
assumed to be in the chronological order. The start and end times
should have the proper number of date/time components, corresponding
to the epoch's data type.
The start/end times should be in either be in epoch units, or in the list
format described in "compute_epoch/epoch16/tt2000" section. | 1.741809 | 1.690424 | 1.030398 |
print('Invalid value... should be a string or a list of string')
return None
elif ((not (isinstance(value, list))) and
(not (isinstance(value, tuple))) and
(not (isinstance(value, str)))):
print('Invalid value... should be a string or a list of string')
return None
else:
if (isinstance(value, list) or isinstance(value, tuple)):
num = len(value)
epochs = []
for x in range(0, num):
epochs.append(CDFepoch._parse_epoch(value[x]))
if (to_np == None):
return epochs
else:
return np.array(epochs)
else:
if (to_np == None):
return CDFepoch._parse_epoch(value)
else:
return np.array(CDFepoch._parse_epoch(value)) | def parse(value, to_np=None): # @NoSelf
if ((isinstance(value, list) or isinstance(value, tuple)) and
not (isinstance(value[0], str))) | Parses the provided date/time string(s) into CDF epoch value(s).
For CDF_EPOCH:
The string has to be in the form of 'dd-mmm-yyyy hh:mm:ss.xxx' or
'yyyy-mm-ddThh:mm:ss.xxx' (in iso_8601). The string is the output
from encode function.
For CDF_EPOCH16:
The string has to be in the form of
'dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn.ppp' or
'yyyy-mm-ddThh:mm:ss.mmmuuunnnppp' (in iso_8601). The string is
the output from encode function.
For TT2000:
The string has to be in the form of
'dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn' or
'yyyy-mm-ddThh:mm:ss.mmmuuunnn' (in iso_8601). The string is
the output from encode function.
Specify to_np to True, if the result should be in numpy class. | 2.489784 | 2.223766 | 1.119625 |
def getVersion(): # @NoSelf
print('epochs version:', str(CDFepoch.version) + '.' +
str(CDFepoch.release) + '.'+str(CDFepoch.increment)) | Shows the code version. | null | null | null |
|
def getLeapSecondLastUpdated(): # @NoSelf
print('Leap second last updated:', str(CDFepoch.LTS[-1][0]) + '-' +
str(CDFepoch.LTS[-1][1]) + '-' + str(CDFepoch.LTS[-1][2])) | Shows the latest date a leap second was added to the leap second table. | null | null | null |
|
'''
Closes the CDF Class.
1. If compression was set, this is where the compressed file is
written.
2. If a checksum is needed, this will place the checksum at the end
of the file.
'''
if self.compressed_file is None:
with self.path.open('rb+') as f:
f.seek(0, 2)
eof = f.tell()
self._update_offset_value(f, self.gdr_head+36, 8, eof)
if self.checksum:
f.write(self._md5_compute(f))
return
# %%
with self.path.open('rb+') as f:
f.seek(0, 2)
eof = f.tell()
self._update_offset_value(f, self.gdr_head+36, 8, eof)
with self.compressed_file.open('wb+') as g:
g.write(bytearray.fromhex(CDF.V3magicNUMBER_1))
g.write(bytearray.fromhex(CDF.V3magicNUMBER_2c))
self._write_ccr(f, g, self.compression)
if self.checksum:
g.seek(0, 2)
g.write(self._md5_compute(g))
self.path.unlink() # NOTE: for Windows this is necessary
self.compressed_file.rename(self.path) | def close(self) | Closes the CDF Class.
1. If compression was set, this is where the compressed file is
written.
2. If a checksum is needed, this will place the checksum at the end
of the file. | 4.362647 | 3.111916 | 1.401917 |
'''
Writes ADRs and AEDRs for variables
Parameters:
f : file
The open CDF file
varNum : int
The variable number for adding attributes
var_attrs : dict
A dictionary object full of variable attributes
zVar : bool
True if varNum is referencing a z variable
Returns: None
'''
if (not isinstance(var_attrs, dict)):
raise TypeError('Variable attribute(s) should be in dictionary form.')
for attr, entry in var_attrs.items():
if (attr in self.gattrs):
print('Attribute: ', attr,
' already defined as a global attribute... Skip')
continue
if not (attr in self.attrs):
attrNum, offset = self._write_adr(f, False, attr)
if (len(self.attrs) == 0):
# GDR's ADRhead
self._update_offset_value(self.grd_offset+28, 8, offset)
else:
attrNum = self.attrs.index(attr)
offset = self.attrsinfo[attrNum][2]
if (entry is None):
continue
# Check if dataType was provided
dataType = 0
if (isinstance(entry, list) or isinstance(entry, tuple)):
items = len(entry)
if (items == 2):
dataType = CDF._datatype_token(entry[1])
if (dataType > 0):
# CDF data type defined in entry
data = entry[0]
if (CDF._checklistofNums(data)):
# All are numbers
if (isinstance(data, list) or isinstance(data, tuple)):
numElems = len(data)
else:
numElems = 1
else:
# Then string(s) -- either in CDF_type or epoch in string(s)
if (dataType == CDF.CDF_CHAR or dataType == CDF.CDF_UCHAR):
if isinstance(data, (list, tuple)):
items = len(data)
odata = data
data = str('')
for x in range(0, items):
if (x > 0):
data += str('\\N ')
data += odata[x]
else:
data = odata[x]
numElems = len(data)
elif (dataType == CDF.CDF_EPOCH or dataType == CDF.CDF_EPOCH16
or dataType == CDF.CDF_TIME_TT2000):
cvalue = []
if isinstance(data, (list, tuple)):
numElems = len(data)
for x in range(0, numElems):
cvalue.append(cdfepoch.CDFepoch.parse(data[x]))
data = cvalue
else:
data = cdfepoch.CDFepoch.parse(data)
numElems = 1
else:
# No data type defined...
data = entry
if isinstance(entry, (list, tuple)):
numElems, dataType = CDF._datatype_define(entry[0])
if (dataType == CDF.CDF_CHAR or dataType == CDF.CDF_UCHAR):
data = str('')
for x in range(0, len(entry)):
if (x > 0):
data += str('\\N ')
data += entry[x]
else:
data = entry[x]
numElems = len(data)
else:
numElems, dataType = CDF._datatype_define(entry)
offset = self._write_aedr(f, False, attrNum, varNum, data, dataType, numElems, zVar)
self._update_aedr_link(f, attrNum, zVar, varNum, offset) | def _write_var_attrs(self, f, varNum, var_attrs, zVar) | Writes ADRs and AEDRs for variables
Parameters:
f : file
The open CDF file
varNum : int
The variable number for adding attributes
var_attrs : dict
A dictionary object full of variable attributes
zVar : bool
True if varNum is referencing a z variable
Returns: None | 3.620916 | 3.111914 | 1.163566 |
'''
Writes a VVR and a VXR for this block of sparse data
Parameters:
f : file
The open CDF file
zVar : bool
True if this is for a z variable
var : int
The variable number
dataType : int
The CDF data type of this variable
numElems : str
The number of elements in each record
recVary : bool
True if the value varies across records
oneblock: list
A list of data in the form [startrec, endrec, [data]]
Returns:
recend : int
Just the "endrec" value input by the user in "oneblock"
'''
rec_start = oneblock[0]
rec_end = oneblock[1]
indata = oneblock[2]
numValues = self._num_values(zVar, var)
# Convert oneblock[2] into a byte stream
_, data = self._convert_data(dataType, numElems, numValues, indata)
# Gather dimension information
if zVar:
vdr_offset = self.zvarsinfo[var][1]
else:
vdr_offset = self.rvarsinfo[var][1]
# Write one VVR
offset = self._write_vvr(f, data)
f.seek(vdr_offset+28, 0)
# Get first VXR
vxrOne = int.from_bytes(f.read(8), 'big', signed=True)
foundSpot = 0
usedEntries = 0
currentVXR = 0
# Search through VXRs to find an open one
while foundSpot == 0 and vxrOne > 0:
# have a VXR
f.seek(vxrOne, 0)
currentVXR = f.tell()
f.seek(vxrOne+12, 0)
vxrNext = int.from_bytes(f.read(8), 'big', signed=True)
nEntries = int.from_bytes(f.read(4), 'big', signed=True)
usedEntries = int.from_bytes(f.read(4), 'big', signed=True)
if (usedEntries == nEntries):
# all entries are used -- check the next vxr in link
vxrOne = vxrNext
else:
# found a vxr with an vailable entry spot
foundSpot = 1
# vxrOne == 0 from vdr's vxrhead vxrOne == -1 from a vxr's vxrnext
if (vxrOne == 0 or vxrOne == -1):
# no available vxr... create a new one
currentVXR = self._create_vxr(f, rec_start, rec_end, vdr_offset,
currentVXR, offset)
else:
self._use_vxrentry(f, currentVXR, rec_start, rec_end, offset)
# Modify the VDR's MaxRec if needed
f.seek(vdr_offset+24, 0)
recNumc = int.from_bytes(f.read(4), 'big', signed=True)
if (rec_end > recNumc):
self._update_offset_value(f, vdr_offset+24, 4, rec_end)
return rec_end | def _write_var_data_sparse(self, f, zVar, var, dataType, numElems, recVary,
oneblock) | Writes a VVR and a VXR for this block of sparse data
Parameters:
f : file
The open CDF file
zVar : bool
True if this is for a z variable
var : int
The variable number
dataType : int
The CDF data type of this variable
numElems : str
The number of elements in each record
recVary : bool
True if the value varies across records
oneblock: list
A list of data in the form [startrec, endrec, [data]]
Returns:
recend : int
Just the "endrec" value input by the user in "oneblock" | 3.93887 | 2.867701 | 1.373529 |
'''
Create a VXR AND use a VXR
Parameters:
f : file
The open CDF file
recStart : int
The start record of this block
recEnd : int
The ending record of this block
currentVDR : int
The byte location of the variables VDR
priorVXR : int
The byte location of the previous VXR
vvrOffset : int
The byte location of ther VVR
Returns:
vxroffset : int
The byte location of the created vxr
'''
# add a VXR, use an entry, and link it to the prior VXR if it exists
vxroffset = self._write_vxr(f)
self._use_vxrentry(f, vxroffset, recStart, recEnd, vvrOffset)
if (priorVXR == 0):
# VDR's VXRhead
self._update_offset_value(f, currentVDR+28, 8, vxroffset)
else:
# VXR's next
self._update_offset_value(f, priorVXR+12, 8, vxroffset)
# VDR's VXRtail
self._update_offset_value(f, currentVDR+36, 8, vxroffset)
return vxroffset | def _create_vxr(self, f, recStart, recEnd, currentVDR, priorVXR, vvrOffset) | Create a VXR AND use a VXR
Parameters:
f : file
The open CDF file
recStart : int
The start record of this block
recEnd : int
The ending record of this block
currentVDR : int
The byte location of the variables VDR
priorVXR : int
The byte location of the previous VXR
vvrOffset : int
The byte location of ther VVR
Returns:
vxroffset : int
The byte location of the created vxr | 3.847107 | 2.302433 | 1.670888 |
'''
Adds a VVR pointer to a VXR
'''
# Select the next unused entry in a VXR for a VVR/CVVR
f.seek(VXRoffset+20)
# num entries
numEntries = int.from_bytes(f.read(4), 'big', signed=True)
# used entries
usedEntries = int.from_bytes(f.read(4), 'big', signed=True)
# VXR's First
self._update_offset_value(f, VXRoffset+28+4*usedEntries, 4, recStart)
# VXR's Last
self._update_offset_value(f, VXRoffset+28+4*numEntries+4*usedEntries,
4, recEnd)
# VXR's Offset
self._update_offset_value(f, VXRoffset+28+2*4*numEntries+8*usedEntries,
8, offset)
# VXR's NusedEntries
usedEntries += 1
self._update_offset_value(f, VXRoffset+24, 4, usedEntries)
return usedEntries | def _use_vxrentry(self, f, VXRoffset, recStart, recEnd, offset) | Adds a VVR pointer to a VXR | 3.364803 | 3.050399 | 1.10307 |
'''
Build a new level of VXRs... make VXRs more tree-like
From:
VXR1 -> VXR2 -> VXR3 -> VXR4 -> ... -> VXRn
To:
new VXR1
/ | \
VXR2 VXR3 VXR4
/ | \
...
VXR5 .......... VXRn
Parameters:
f : file
The open CDF file
vxrhead : int
The byte location of the first VXR for a variable
numVXRs : int
The total number of VXRs
Returns:
newVXRhead : int
The byte location of the newest VXR head
newvxroff : int
The byte location of the last VXR head
'''
newNumVXRs = int(numVXRs / CDF.NUM_VXRlvl_ENTRIES)
remaining = int(numVXRs % CDF.NUM_VXRlvl_ENTRIES)
vxroff = vxrhead
prevxroff = -1
if (remaining != 0):
newNumVXRs += 1
CDF.level += 1
for x in range(0, newNumVXRs):
newvxroff = self._write_vxr(f, numEntries=CDF.NUM_VXRlvl_ENTRIES)
if (x > 0):
self._update_offset_value(f, prevxroff+12, 8, newvxroff)
else:
newvxrhead = newvxroff
prevxroff = newvxroff
if (x == (newNumVXRs - 1)):
if (remaining == 0):
endEntry = CDF.NUM_VXRlvl_ENTRIES
else:
endEntry = remaining
else:
endEntry = CDF.NUM_VXRlvl_ENTRIES
for _ in range(0, endEntry):
recFirst, recLast = self._get_recrange(f, vxroff)
self._use_vxrentry(f, newvxroff, recFirst, recLast, vxroff)
vxroff = self._read_offset_value(f, vxroff+12, 8)
vxroff = vxrhead
# Break the horizontal links
for x in range(0, numVXRs):
nvxroff = self._read_offset_value(f, vxroff+12, 8)
self._update_offset_value(f, vxroff+12, 8, 0)
vxroff = nvxroff
# Iterate this process if we're over NUM_VXRlvl_ENTRIES
if (newNumVXRs > CDF.NUM_VXRlvl_ENTRIES):
return self._add_vxr_levels_r(f, newvxrhead, newNumVXRs)
else:
return newvxrhead, newvxroff | def _add_vxr_levels_r(self, f, vxrhead, numVXRs) | Build a new level of VXRs... make VXRs more tree-like
From:
VXR1 -> VXR2 -> VXR3 -> VXR4 -> ... -> VXRn
To:
new VXR1
/ | \
VXR2 VXR3 VXR4
/ | \
...
VXR5 .......... VXRn
Parameters:
f : file
The open CDF file
vxrhead : int
The byte location of the first VXR for a variable
numVXRs : int
The total number of VXRs
Returns:
newVXRhead : int
The byte location of the newest VXR head
newvxroff : int
The byte location of the last VXR head | 3.294071 | 2.126454 | 1.549091 |
'''
This sets a VXR to be the first and last VXR in the VDR
'''
# VDR's VXRhead
self._update_offset_value(f, vdr_offset+28, 8, VXRoffset)
# VDR's VXRtail
self._update_offset_value(f, vdr_offset+36, 8, VXRoffset) | def _update_vdr_vxrheadtail(self, f, vdr_offset, VXRoffset) | This sets a VXR to be the first and last VXR in the VDR | 4.047579 | 2.735396 | 1.479705 |
'''
Finds the first and last record numbers pointed by the VXR
Assumes the VXRs are in order
'''
f.seek(VXRoffset+20)
# Num entries
numEntries = int.from_bytes(f.read(4), 'big', signed=True)
# used entries
usedEntries = int.from_bytes(f.read(4), 'big', signed=True)
# VXR's First record
firstRec = int.from_bytes(f.read(4), 'big', signed=True)
# VXR's Last record
f.seek(VXRoffset+28+(4*numEntries+4*(usedEntries-1)))
lastRec = int.from_bytes(f.read(4), 'big', signed=True)
return firstRec, lastRec | def _get_recrange(self, f, VXRoffset) | Finds the first and last record numbers pointed by the VXR
Assumes the VXRs are in order | 3.148989 | 2.29282 | 1.373413 |
datatype : int
CDF variable data type
numElms : int
number of elements
Returns:
numBytes : int
The number of bytes for the data
'''
sizes = {1: 1,
2: 2,
4: 4,
8: 8,
11: 1,
12: 2,
14: 4,
21: 4,
22: 8,
31: 8,
32: 16,
33: 8,
41: 1,
44: 4,
45: 8,
51: 1,
52: 1}
try:
if (isinstance(datatype, int)):
if (datatype == 51 or datatype == 52):
return numElms
else:
return sizes[datatype]
else:
datatype = datatype.upper()
if (datatype == 'CDF_INT1' or datatype == 'CDF_UINT1' or
datatype == 'CDF_BYTE'):
return 1
elif (datatype == 'CDF_INT2' or datatype == 'CDF_UINT2'):
return 2
elif (datatype == 'CDF_INT4' or datatype == 'CDF_UINT4'):
return 4
elif (datatype == 'CDF_INT8' or datatype == 'CDF_TIME_TT2000'):
return 8
elif (datatype == 'CDF_REAL4' or datatype == 'CDF_FLOAT'):
return 4
elif (datatype == 'CDF_REAL8' or datatype == 'CDF_DOUBLE' or
datatype == 'CDF_EPOCH'):
return 8
elif (datatype == 'CDF_EPOCH16'):
return 16
elif (datatype == 'CDF_CHAR' or datatype == 'CDF_UCHAR'):
return numElms
else:
return -1
except Exception:
return -1 | def _datatype_size(datatype, numElms): # @NoSelf
'''
Gets datatype size
Parameters | Gets datatype size
Parameters:
datatype : int
CDF variable data type
numElms : int
number of elements
Returns:
numBytes : int
The number of bytes for the data | 2.040994 | 1.912844 | 1.066994 |
'''
Writes and ADR to the end of the file.
Additionally, it will update the offset values to either the previous ADR
or the ADRhead field in the GDR.
Parameters:
f : file
The open CDF file
gORv : bool
True if a global attribute, False if variable attribute
name : str
name of the attribute
Returns:
num : int
The attribute number
byte_loc : int
The current location in file f
'''
f.seek(0, 2)
byte_loc = f.tell()
block_size = CDF.ADR_BASE_SIZE64
section_type = CDF.ADR_
nextADR = 0
headAgrEDR = 0
if (gORv == True):
scope = 1
else:
scope = 2
num = len(self.attrs)
ngrEntries = 0
maxgrEntry = -1
rfuA = 0
headAzEDR = 0
nzEntries = 0
maxzEntry = -1
rfuE = -1
adr = bytearray(block_size)
adr[0:8] = struct.pack('>q', block_size)
adr[8:12] = struct.pack('>i', section_type)
adr[12:20] = struct.pack('>q', nextADR)
adr[20:28] = struct.pack('>q', headAgrEDR)
adr[28:32] = struct.pack('>i', scope)
adr[32:36] = struct.pack('>i', num)
adr[36:40] = struct.pack('>i', ngrEntries)
adr[40:44] = struct.pack('>i', maxgrEntry)
adr[44:48] = struct.pack('>i', rfuA)
adr[48:56] = struct.pack('>q', headAzEDR)
adr[56:60] = struct.pack('>i', nzEntries)
adr[60:64] = struct.pack('>i', maxzEntry)
adr[64:68] = struct.pack('>i', rfuE)
tofill = 256 - len(name)
adr[68:324] = (name+'\0'*tofill).encode()
f.write(adr)
info = []
info.append(name)
info.append(scope)
info.append(byte_loc)
self.attrsinfo[num] = info
if (scope == 1):
self.gattrs.append(name)
else:
self.vattrs.append(name)
self.attrs.append(name)
if (num > 0):
# ADR's ADRnext
self._update_offset_value(f, self.attrsinfo[num-1][2]+12, 8,
byte_loc)
else:
# GDR's ADRhead
self._update_offset_value(f, self.gdr_head+28, 8, byte_loc)
# GDR's NumAttr
self._update_offset_value(f, self.gdr_head+48, 4, num+1)
return num, byte_loc | def _write_adr(self, f, gORv, name) | Writes and ADR to the end of the file.
Additionally, it will update the offset values to either the previous ADR
or the ADRhead field in the GDR.
Parameters:
f : file
The open CDF file
gORv : bool
True if a global attribute, False if variable attribute
name : str
name of the attribute
Returns:
num : int
The attribute number
byte_loc : int
The current location in file f | 3.198464 | 2.405978 | 1.329382 |
'''
Creates a VXR at the end of the file.
Returns byte location of the VXR
The First, Last, and Offset fields will need to be filled in later
'''
f.seek(0, 2)
byte_loc = f.tell()
section_type = CDF.VXR_
nextVXR = 0
if (numEntries == None):
nEntries = CDF.NUM_VXR_ENTRIES
else:
nEntries = int(numEntries)
block_size = CDF.VXR_BASE_SIZE64 + (4 + 4 + 8) * nEntries
nUsedEntries = 0
firsts = [-1] * nEntries
lasts = [-1] * nEntries
offsets = [-1] * nEntries
vxr = bytearray(block_size)
vxr[0:8] = struct.pack('>q', block_size)
vxr[8:12] = struct.pack('>i', section_type)
vxr[12:20] = struct.pack('>q', nextVXR)
vxr[20:24] = struct.pack('>i', nEntries)
vxr[24:28] = struct.pack('>i', nUsedEntries)
estart = 28 + 4*nEntries
vxr[28:estart] = struct.pack('>%si' % nEntries, *firsts)
eend = estart + 4*nEntries
vxr[estart:eend] = struct.pack('>%si' % nEntries, *lasts)
vxr[eend:block_size] = struct.pack('>%sq' % nEntries, *offsets)
f.write(vxr)
return byte_loc | def _write_vxr(self, f, numEntries=None) | Creates a VXR at the end of the file.
Returns byte location of the VXR
The First, Last, and Offset fields will need to be filled in later | 2.731781 | 2.177579 | 1.254504 |
'''
Writes a vvr to the end of file "f" with the byte stream "data".
'''
f.seek(0, 2)
byte_loc = f.tell()
block_size = CDF.VVR_BASE_SIZE64 + len(data)
section_type = CDF.VVR_
vvr1 = bytearray(12)
vvr1[0:8] = struct.pack('>q', block_size)
vvr1[8:12] = struct.pack('>i', section_type)
f.write(vvr1)
f.write(data)
return byte_loc | def _write_vvr(self, f, data) | Writes a vvr to the end of file "f" with the byte stream "data". | 4.28067 | 3.178108 | 1.346924 |
'''
Write compression info to the end of the file in a CPR.
'''
f.seek(0, 2)
byte_loc = f.tell()
block_size = CDF.CPR_BASE_SIZE64 + 4
section_type = CDF.CPR_
rfuA = 0
pCount = 1
cpr = bytearray(block_size)
cpr[0:8] = struct.pack('>q', block_size)
cpr[8:12] = struct.pack('>i', section_type)
cpr[12:16] = struct.pack('>i', cType)
cpr[16:20] = struct.pack('>i', rfuA)
cpr[20:24] = struct.pack('>i', pCount)
cpr[24:28] = struct.pack('>i', parameter)
f.write(cpr)
return byte_loc | def _write_cpr(self, f, cType, parameter) -> int | Write compression info to the end of the file in a CPR. | 3.271953 | 2.819006 | 1.160676 |
'''
Write compressed "data" variable to the end of the file in a CVVR
'''
f.seek(0, 2)
byte_loc = f.tell()
cSize = len(data)
block_size = CDF.CVVR_BASE_SIZE64 + cSize
section_type = CDF.CVVR_
rfuA = 0
cvvr1 = bytearray(24)
cvvr1[0:8] = struct.pack('>q', block_size)
cvvr1[8:12] = struct.pack('>i', section_type)
cvvr1[12:16] = struct.pack('>i', rfuA)
cvvr1[16:24] = struct.pack('>q', cSize)
f.write(cvvr1)
f.write(data)
return byte_loc | def _write_cvvr(self, f, data) | Write compressed "data" variable to the end of the file in a CVVR | 3.886512 | 3.267636 | 1.189396 |
'''
Write a CCR to file "g" from file "f" with level "level".
Currently, only handles gzip compression.
Parameters:
f : file
Uncompressed file to read from
g : file
File to read the compressed file into
level : int
The level of the compression from 0 to 9
Returns: None
'''
f.seek(8)
data = f.read()
uSize = len(data)
section_type = CDF.CCR_
rfuA = 0
cData = gzip.compress(data, level)
block_size = CDF.CCR_BASE_SIZE64 + len(cData)
cprOffset = 0
ccr1 = bytearray(32)
#ccr1[0:4] = binascii.unhexlify(CDF.V3magicNUMBER_1)
#ccr1[4:8] = binascii.unhexlify(CDF.V3magicNUMBER_2c)
ccr1[0:8] = struct.pack('>q', block_size)
ccr1[8:12] = struct.pack('>i', section_type)
ccr1[12:20] = struct.pack('>q', cprOffset)
ccr1[20:28] = struct.pack('>q', uSize)
ccr1[28:32] = struct.pack('>i', rfuA)
g.seek(0, 2)
g.write(ccr1)
g.write(cData)
cprOffset = self._write_cpr(g, CDF.GZIP_COMPRESSION, level)
self._update_offset_value(g, 20, 8, cprOffset) | def _write_ccr(self, f, g, level: int) | Write a CCR to file "g" from file "f" with level "level".
Currently, only handles gzip compression.
Parameters:
f : file
Uncompressed file to read from
g : file
File to read the compressed file into
level : int
The level of the compression from 0 to 9
Returns: None | 4.001733 | 2.997882 | 1.334853 |
'''
Determines which symbol to use for numpy conversions
> : a little endian system to big endian ordering
< : a big endian system to little endian ordering
= : No conversion
'''
data_endian = 'little'
if (self._encoding == 1 or self._encoding == 2 or self._encoding == 5 or
self._encoding == 7 or self._encoding == 9 or self._encoding == 11 or
self._encoding == 12 or self._encoding == 18):
data_endian = 'big'
if sys.byteorder == 'little' and data_endian == 'big':
# big->little
order = '>'
elif sys.byteorder == 'big' and data_endian == 'little':
# little->big
order = '<'
else:
# no conversion
order = '='
return order | def _convert_option(self) | Determines which symbol to use for numpy conversions
> : a little endian system to big endian ordering
< : a big endian system to little endian ordering
= : No conversion | 3.994522 | 2.228859 | 1.792183 |
dt_string = 'b'
elif data_type == 2:
dt_string = 'h'
elif data_type == 4:
dt_string = 'i'
elif data_type in (8, 33):
dt_string = 'q'
elif data_type == 11:
dt_string = 'B'
elif data_type == 12:
dt_string = 'H'
elif data_type == 14:
dt_string = 'I'
elif data_type in (21, 44):
dt_string = 'f'
elif data_type in (22, 45, 31):
dt_string = 'd'
elif data_type == 32:
dt_string = 'd'
elif data_type in (51, 52):
dt_string = 's'
else:
dt_string = ''
return dt_string | def _convert_type(data_type): # @NoSelf
'''
Converts CDF data types into python types
'''
if data_type in (1, 41) | Converts CDF data types into python types | 1.781768 | 1.746619 | 1.020124 |
return np.int8(data).tobytes()
elif data_type == 2:
return np.int16(data).tobytes()
elif data_type == 4:
return np.int32(data).tobytes()
elif (data_type == 8) or (data_type == 33):
return np.int64(data).tobytes()
elif data_type == 11:
return np.uint8(data).tobytes()
elif data_type == 12:
return np.uint16(data).tobytes()
elif data_type == 14:
return np.uint32(data).tobytes()
elif (data_type == 21) or (data_type == 44):
return np.float32(data).tobytes()
elif (data_type == 22) or (data_type == 45) or (data_type == 31):
return np.float64(data).tobytes()
elif (data_type == 32):
return np.complex128(data).tobytes()
else:
return data | def _convert_nptype(data_type, data): # @NoSelf
'''
Converts "data" of CDF type "data_type" into a numpy array
'''
if data_type in (1, 41) | Converts "data" of CDF type "data_type" into a numpy array | 1.510528 | 1.48051 | 1.020275 |
'''
Determines the default pad data for a "data_type"
'''
order = self._convert_option()
if (data_type == 1) or (data_type == 41):
pad_value = struct.pack(order+'b', -127)
elif data_type == 2:
pad_value = struct.pack(order+'h', -32767)
elif data_type == 4:
pad_value = struct.pack(order+'i', -2147483647)
elif (data_type == 8) or (data_type == 33):
pad_value = struct.pack(order+'q', -9223372036854775807)
elif data_type == 11:
pad_value = struct.pack(order+'B', 254)
elif data_type == 12:
pad_value = struct.pack(order+'H', 65534)
elif data_type == 14:
pad_value = struct.pack(order+'I', 4294967294)
elif (data_type == 21) or (data_type == 44):
pad_value = struct.pack(order+'f', -1.0E30)
elif (data_type == 22) or (data_type == 45):
pad_value = struct.pack(order+'d', -1.0E30)
elif (data_type == 31):
pad_value = struct.pack(order+'d', 0.0)
elif (data_type == 32):
pad_value = struct.pack(order+'2d', *[0.0, 0.0])
elif (data_type == 51) or (data_type == 52):
tmpPad = str(' '*numElems).encode()
form = str(numElems)
pad_value = struct.pack(form+'b', *tmpPad)
return pad_value | def _default_pad(self, data_type, numElems) | Determines the default pad data for a "data_type" | 1.984705 | 1.885772 | 1.052463 |
'''
Determines the number of values in a record.
Set zVar=True if this is a zvariable.
'''
values = 1
if (zVar == True):
numDims = self.zvarsinfo[varNum][2]
dimSizes = self.zvarsinfo[varNum][3]
dimVary = self.zvarsinfo[varNum][4]
else:
numDims = self.rvarsinfo[varNum][2]
dimSizes = self.rvarsinfo[varNum][3]
dimVary = self.rvarsinfo[varNum][4]
if (numDims < 1):
return values
else:
for x in range(0, numDims):
if (zVar == True):
values = values * dimSizes[x]
else:
if (dimVary[x] != 0):
values = values * dimSizes[x]
return values | def _num_values(self, zVar, varNum) | Determines the number of values in a record.
Set zVar=True if this is a zvariable. | 2.575518 | 2.089302 | 1.232717 |
'''
Reads an integer value from file "f" at location "offset".
'''
f.seek(offset, 0)
if (size == 8):
return int.from_bytes(f.read(8), 'big', signed=True)
else:
return int.from_bytes(f.read(4), 'big', signed=True) | def _read_offset_value(self, f, offset, size) | Reads an integer value from file "f" at location "offset". | 2.809583 | 2.106357 | 1.333859 |
'''
Writes "value" into location "offset" in file "f".
'''
f.seek(offset, 0)
if (size == 8):
f.write(struct.pack('>q', value))
else:
f.write(struct.pack('>i', value)) | def _update_offset_value(self, f, offset, size, value) | Writes "value" into location "offset" in file "f". | 3.263273 | 2.210963 | 1.475951 |
'''
Computes the checksum of the file
'''
md5 = hashlib.md5()
block_size = 16384
f.seek(0, 2)
remaining = f.tell()
f.seek(0)
while (remaining > block_size):
data = f.read(block_size)
remaining = remaining - block_size
md5.update(data)
if remaining > 0:
data = f.read(remaining)
md5.update(data)
return md5.digest() | def _md5_compute(self, f) | Computes the checksum of the file | 2.365093 | 2.236064 | 1.057703 |
records: list
A list of records that there is data for
Returns:
sparse_blocks: list of list
A list of ranges we have physical values for.
Example:
Input: [1,2,3,4,10,11,12,13,50,51,52,53]
Output: [[1,4],[10,13],[50,53]]
'''
sparse_blocks = []
total = len(records)
if (total == 0):
return []
x = 0
while (x < total):
recstart = records[x]
y = x
recnum = recstart
# Find the location in the records before the next gap
# Call this value "y"
while ((y+1) < total):
y = y + 1
nextnum = records[y]
diff = nextnum - recnum
if (diff == 1):
recnum = nextnum
else:
y = y - 1
break
# Put the values of the records into "ablock", append to sparse_blocks
ablock = []
ablock.append(recstart)
if ((y+1) == total):
recend = records[total-1]
else:
recend = records[y]
x = y + 1
ablock.append(recend)
sparse_blocks.append(ablock)
return sparse_blocks | def _make_blocks(records): # @NoSelf
'''
Organizes the physical records into blocks in a list by
placing consecutive physical records into a single block, so
lesser VXRs will be created.
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Parameters | Organizes the physical records into blocks in a list by
placing consecutive physical records into a single block, so
lesser VXRs will be created.
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Parameters:
records: list
A list of records that there is data for
Returns:
sparse_blocks: list of list
A list of ranges we have physical values for.
Example:
Input: [1,2,3,4,10,11,12,13,50,51,52,53]
Output: [[1,4],[10,13],[50,53]] | 3.407831 | 2.47679 | 1.375906 |
'''
Handles the data for the variable with sparse records.
Organizes the physical record numbers into blocks in a list:
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Place consecutive physical records into a single block
Parameters:
variable: dict
the variable, returned from varinq('variable', expand=True)
records: list
a list of physical records
data: varies
bytes array, numpy.ndarray or list of str form with vitual data
embedded, returned from varget('variable') call
'''
# Gather the ranges for which we have physical data
sparse_blocks = CDF._make_blocks(records)
sparse_data = []
if (isinstance(data, np.ndarray)):
for sblock in sparse_blocks:
# each block in this list: [starting_rec#, ending_rec#, data]
asparse = []
asparse.append(sblock[0])
asparse.append(sblock[1])
starting = sblock[0]
ending = sblock[1]+1
asparse.append(data[starting:ending])
sparse_data.append(asparse)
return sparse_data
elif (isinstance(data, bytes)):
y = 1
for z in range(0, variable['Num_Dims']):
y = y * variable['Dim_Sizes'][z]
y = y * CDF._datatype_size(variable['Data_Type'], variable['Num_Elements'])
for x in sparse_blocks:
# each block in this list: [starting_rec#, ending_rec#, data]
asparse = []
asparse.append(sblock[0])
asparse.append(sblock[1])
starting = sblock[0]*y
ending = (sblock[1]+1)*y
asparse.append(data[starting:ending])
sparse_data.append(asparse)
return sparse_data
elif (isinstance(data, list)):
for x in sparse_blocks:
# each block in this list: [starting_rec#, ending_rec#, data]
asparse = []
asparse.append(sblock[0])
asparse.append(sblock[1])
records = sparse_blocks[x][1] - sparse_blocks[x][0] + 1
datax = []
ist = sblock[0]
for z in range(0, records):
datax.append(data[ist+z])
asparse.append(datax)
sparse_data.append(asparse)
return sparse_data
else:
print('Can not handle data... Skip')
return None | def _make_sparse_blocks_with_virtual(self, variable, records, data) | Handles the data for the variable with sparse records.
Organizes the physical record numbers into blocks in a list:
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Place consecutive physical records into a single block
Parameters:
variable: dict
the variable, returned from varinq('variable', expand=True)
records: list
a list of physical records
data: varies
bytes array, numpy.ndarray or list of str form with vitual data
embedded, returned from varget('variable') call | 3.857735 | 2.117481 | 1.821851 |
mycdf_info = {}
mycdf_info['CDF'] = self.file
mycdf_info['Version'] = self._version
mycdf_info['Encoding'] = self._encoding
mycdf_info['Majority'] = self._majority
mycdf_info['rVariables'], mycdf_info['zVariables'] = self._get_varnames()
mycdf_info['Attributes'] = self._get_attnames()
mycdf_info['Copyright'] = self._copyright
mycdf_info['Checksum'] = self._md5
mycdf_info['Num_rdim'] = self._num_rdim
mycdf_info['rDim_sizes'] = self._rdim_sizes
mycdf_info['Compressed'] = self._compressed
if (self.cdfversion > 2):
mycdf_info['LeapSecondUpdated'] = self._leap_second_updated
return mycdf_info | def cdf_info(self) | Returns a dictionary that shows the basic CDF information.
This information includes
+---------------+--------------------------------------------------------------------------------+
| ['CDF'] | the name of the CDF |
+---------------+--------------------------------------------------------------------------------+
| ['Version'] | the version of the CDF |
+---------------+--------------------------------------------------------------------------------+
| ['Encoding'] | the endianness of the CDF |
+---------------+--------------------------------------------------------------------------------+
| ['Majority'] | the row/column majority |
+---------------+--------------------------------------------------------------------------------+
| ['zVariables']| the dictionary for zVariable numbers and their corresponding names |
+---------------+--------------------------------------------------------------------------------+
| ['rVariables']| the dictionary for rVariable numbers and their corresponding names |
+---------------+--------------------------------------------------------------------------------+
| ['Attributes']| the dictionary for attribute numbers and their corresponding names and scopes |
+---------------+--------------------------------------------------------------------------------+
| ['Checksum'] | the checksum indicator |
+---------------+--------------------------------------------------------------------------------+
| ['Num_rdim'] | the number of dimensions, applicable only to rVariables |
+---------------+--------------------------------------------------------------------------------+
| ['rDim_sizes'] | the dimensional sizes, applicable only to rVariables |
+----------------+-------------------------------------------------------------------------------+
| ['Compressed']| CDF is compressed at the file-level |
+---------------+--------------------------------------------------------------------------------+
| ['LeapSecondUpdated']| The last updated for the leap second table, if applicable |
+---------------+--------------------------------------------------------------------------------+ | 3.487556 | 1.974495 | 1.766302 |
vdr_info = self.varget(variable=variable, inq=True)
if vdr_info is None:
raise KeyError("Variable {} not found.".format(variable))
var = {}
var['Variable'] = vdr_info['name']
var['Num'] = vdr_info['variable_number']
var['Var_Type'] = CDF._variable_token(vdr_info['section_type'])
var['Data_Type'] = vdr_info['data_type']
var['Data_Type_Description'] = CDF._datatype_token(vdr_info['data_type'])
var['Num_Elements'] = vdr_info['num_elements']
var['Num_Dims'] = vdr_info['num_dims']
var['Dim_Sizes'] = vdr_info['dim_sizes']
var['Sparse'] = CDF._sparse_token(vdr_info['sparse'])
var['Last_Rec'] = vdr_info['max_records']
var['Rec_Vary'] = vdr_info['record_vary']
var['Dim_Vary'] = vdr_info['dim_vary']
if ('pad' in vdr_info):
var['Pad'] = vdr_info['pad']
var['Compress'] = vdr_info['compression_level']
if ('blocking_factor' in vdr_info):
var['Block_Factor'] = vdr_info['blocking_factor']
return var | def varinq(self, variable) | Returns a dictionary that shows the basic variable information.
This information includes
+-----------------+--------------------------------------------------------------------------------+
| ['Variable'] | the name of the variable |
+-----------------+--------------------------------------------------------------------------------+
| ['Num'] | the variable number |
+-----------------+--------------------------------------------------------------------------------+
| ['Var_Type'] | the variable type: zVariable or rVariable |
+-----------------+--------------------------------------------------------------------------------+
| ['Data_Type'] | the variable's CDF data type |
+-----------------+--------------------------------------------------------------------------------+
| ['Num_Elements']| the number of elements of the variable |
+-----------------+--------------------------------------------------------------------------------+
| ['Num_Dims'] | the dimensionality of the variable record |
+-----------------+--------------------------------------------------------------------------------+
| ['Dim_Sizes'] | the shape of the variable record |
+-----------------+--------------------------------------------------------------------------------+
| ['Sparse'] | the variable's record sparseness |
+-----------------+--------------------------------------------------------------------------------+
| ['Last_Rec'] | the maximum written record number (0-based) |
+-----------------+--------------------------------------------------------------------------------+
| ['Dim_Vary'] | the dimensional variance(s) |
+-----------------+--------------------------------------------------------------------------------+
| ['Rec_Vary'] | the record variance |
+-----------------+--------------------------------------------------------------------------------+
| ['Pad'] | the padded value if set |
+-----------------+--------------------------------------------------------------------------------+
| ['Compress'] | the GZIP compression level, 0 to 9. 0 if not compressed |
+-----------------+--------------------------------------------------------------------------------+
| ['Block_Factor']| the blocking factor if the variable is compressed |
+-----------------+--------------------------------------------------------------------------------+
Parameters
----------
variable : | 2.795602 | 2.003342 | 1.395469 |
position = self._first_adr
if isinstance(attribute, str):
for _ in range(0, self._num_att):
name, next_adr = self._read_adr_fast(position)
if name.strip().lower() == attribute.strip().lower():
return self._read_adr(position)
position = next_adr
raise KeyError('No attribute {}'.format(attribute))
elif isinstance(attribute, int):
if (attribute < 0 or attribute > self._num_zvariable):
raise KeyError('No attribute {}'.format(attribute))
for _ in range(0, attribute):
name, next_adr = self._read_adr_fast(position)
position = next_adr
return self._read_adr(position)
else:
print('Please set attribute keyword equal to the name or ',
'number of an attribute')
attrs = self._get_attnames()
print(attrs)
for x in range(0, self._num_att):
name = list(attrs[x].keys())[0]
print('NAME: ' + name + ', NUMBER: ' + str(x) + ', SCOPE: ' + attrs[x][name])
return attrs | def attinq(self, attribute=None) | Get attribute information.
Returns
-------
dict
Dictionary of attribution infromation. | 3.788441 | 3.92543 | 0.965102 |
return self.varget(variable=epoch, starttime=starttime,
endtime=endtime, record_range_only=True) | def epochrange(self, epoch=None, starttime=None, endtime=None) | Get epoch range.
Returns a list of the record numbers, representing the
corresponding starting and ending records within the time
range from the epoch data. A None is returned if there is no
data either written or found in the time range. | 10.747846 | 8.978678 | 1.197041 |
byte_loc = self._first_adr
return_dict = {}
for _ in range(0, self._num_att):
adr_info = self._read_adr(byte_loc)
if (adr_info['scope'] != 1):
byte_loc = adr_info['next_adr_location']
continue
if (adr_info['num_gr_entry'] == 0):
if (expand is not False):
return_dict[adr_info['name']] = None
byte_loc = adr_info['next_adr_location']
continue
if (expand is False):
entries = []
else:
entries = {}
aedr_byte_loc = adr_info['first_gr_entry']
for _ in range(0, adr_info['num_gr_entry']):
if (self.cdfversion == 3):
aedr_info = self._read_aedr(aedr_byte_loc, to_np=to_np)
else:
aedr_info = self._read_aedr2(aedr_byte_loc, to_np=to_np)
entryData = aedr_info['entry']
if (expand is False):
entries.append(entryData)
else:
entryWithType = []
if (isinstance(entryData, str)):
entryWithType.append(entryData)
else:
dataType = aedr_info['data_type']
if (len(entryData.tolist()) == 1):
if (dataType != 31 and dataType != 32 and dataType != 33):
entryWithType.append(entryData.tolist()[0])
else:
if (dataType != 33):
entryWithType.append(epoch.CDFepoch.encode(entryData.tolist()[0],
iso_8601=False))
else:
entryWithType.append(epoch.CDFepoch.encode(entryData.tolist()[0]))
else:
if (dataType != 31 and dataType != 32 and dataType != 33):
entryWithType.append(entryData.tolist())
else:
if (dataType != 33):
entryWithType.append(epoch.CDFepoch.encode(entryData.tolist(),
iso_8601=False))
else:
entryWithType.append(epoch.CDFepoch.encode(entryData.tolist()))
entryWithType.append(CDF._datatype_token(aedr_info['data_type']))
entries[aedr_info['entry_num']] = entryWithType
aedr_byte_loc = aedr_info['next_aedr']
if (len(entries) != 0):
if (expand is False):
if (len(entries) == 1):
return_dict[adr_info['name']] = entries[0]
else:
return_dict[adr_info['name']] = entries
else:
return_dict[adr_info['name']] = entries
byte_loc = adr_info['next_adr_location']
return return_dict | def globalattsget(self, expand=False, to_np=True) | Gets all global attributes.
This function returns all of the global attribute entries,
in a dictionary (in the form of 'attribute': {entry: value}
pair) from a CDF. If there is no entry found, None is
returned. If expand is entered with non-False, then each
entry's data type is also returned in a list form as
[entry, 'CDF_xxxx']. For attributes without any entries,
they will also return with None value. | 2.240016 | 2.125495 | 1.05388 |
if (isinstance(variable, int) and self._num_zvariable > 0 and self._num_rvariable > 0):
print('This CDF has both r and z variables. Use variable name')
return None
if isinstance(variable, str):
position = self._first_zvariable
num_variables = self._num_zvariable
for zVar in [1, 0]:
for _ in range(0, num_variables):
if (self.cdfversion == 3):
name, vdr_next = self._read_vdr_fast(position)
else:
name, vdr_next = self._read_vdr_fast2(position)
if name.strip().lower() == variable.strip().lower():
if (self.cdfversion == 3):
vdr_info = self._read_vdr(position)
else:
vdr_info = self._read_vdr2(position)
return self._read_varatts(vdr_info['variable_number'], zVar, expand, to_np=to_np)
position = vdr_next
position = self._first_rvariable
num_variables = self._num_rvariable
print('No variable by this name:', variable)
return None
elif isinstance(variable, int):
if self._num_zvariable > 0:
num_variable = self._num_zvariable
zVar = True
else:
num_variable = self._num_rvariable
zVar = False
if (variable < 0 or variable >= num_variable):
print('No variable by this number:', variable)
return None
return self._read_varatts(variable, zVar, expand, to_np=to_np)
else:
print('Please set variable keyword equal to the name or ',
'number of an variable')
rvars, zvars = self._get_varnames()
print("RVARIABLES: ")
for x in rvars:
print("NAME: " + str(x))
print("ZVARIABLES: ")
for x in zvars:
print("NAME: " + str(x))
return | def varattsget(self, variable=None, expand=False, to_np=True) | Gets all variable attributes.
Unlike attget, which returns a single attribute entry value,
this function returns all of the variable attribute entries,
in a dictionary (in the form of 'attribute': value pair) for
a variable. If there is no entry found, None is returned.
If no variable name is provided, a list of variables are printed.
If expand is entered with non-False, then each entry's data
type is also returned in a list form as [entry, 'CDF_xxxx'].
For attributes without any entries, they will also return with
None value. | 3.041607 | 2.995127 | 1.015519 |
'''
Writes the current file into a file in the temporary directory.
If that doesn't work, create a new file in the CDFs directory.
'''
with self.file.open('rb') as f:
if (self.cdfversion == 3):
data_start, data_size, cType, _ = self._read_ccr(8)
else:
data_start, data_size, cType, _ = self._read_ccr2(8)
if cType != 5:
return
f.seek(data_start)
decompressed_data = gzip.decompress(f.read(data_size))
newpath = pathlib.Path(tempfile.NamedTemporaryFile(suffix='.cdf').name)
with newpath.open('wb') as g:
g.write(bytearray.fromhex('cdf30001'))
g.write(bytearray.fromhex('0000ffff'))
g.write(decompressed_data)
return newpath | def _uncompress_file(self, path) | Writes the current file into a file in the temporary directory.
If that doesn't work, create a new file in the CDFs directory. | 4.120082 | 3.015412 | 1.366341 |
'''
Verifies the MD5 checksum.
Only used in the __init__() function
'''
fn = self.file if self.compressed_file is None else self.compressed_file
md5 = hashlib.md5()
block_size = 16384
with fn.open('rb') as f:
f.seek(-16, 2)
remaining = f.tell() # File size minus checksum size
f.seek(0)
while (remaining > block_size):
data = f.read(block_size)
remaining = remaining - block_size
md5.update(data)
if (remaining > 0):
data = f.read(remaining)
md5.update(data)
existing_md5 = f.read(16).hex()
return md5.hexdigest() == existing_md5 | def _md5_validation(self) -> bool | Verifies the MD5 checksum.
Only used in the __init__() function | 3.203528 | 2.593454 | 1.235236 |
'''
Determines how to convert CDF byte ordering to the system
byte ordering.
'''
if sys.byteorder == 'little' and self._endian() == 'big-endian':
# big->little
order = '>'
elif sys.byteorder == 'big' and self._endian() == 'little-endian':
# little->big
order = '<'
else:
# no conversion
order = '='
return order | def _convert_option(self) | Determines how to convert CDF byte ordering to the system
byte ordering. | 5.336882 | 3.026851 | 1.76318 |
'''
Determines endianess of the CDF file
Only used in __init__
'''
if (self._encoding == 1 or self._encoding == 2 or self._encoding == 5 or
self._encoding == 7 or self._encoding == 9 or self._encoding == 11 or
self._encoding == 12):
return 'big-endian'
else:
return 'little-endian' | def _endian(self) -> str | Determines endianess of the CDF file
Only used in __init__ | 4.119227 | 2.60265 | 1.582705 |
'''
Returns the number of values in a record, using a given VDR
dictionary. Multiplies the dimension sizes of each dimension,
if it is varying.
'''
values = 1
for x in range(0, vdr_dict['num_dims']):
if (vdr_dict['dim_vary'][x] != 0):
values = values * vdr_dict['dim_sizes'][x]
return values | def _num_values(self, vdr_dict) | Returns the number of values in a record, using a given VDR
dictionary. Multiplies the dimension sizes of each dimension,
if it is varying. | 5.153208 | 2.217364 | 2.324024 |
'''
CDF data types to python struct data types
'''
if (data_type == 1) or (data_type == 41):
dt_string = 'b'
elif data_type == 2:
dt_string = 'h'
elif data_type == 4:
dt_string = 'i'
elif (data_type == 8) or (data_type == 33):
dt_string = 'q'
elif data_type == 11:
dt_string = 'B'
elif data_type == 12:
dt_string = 'H'
elif data_type == 14:
dt_string = 'I'
elif (data_type == 21) or (data_type == 44):
dt_string = 'f'
elif (data_type == 22) or (data_type == 45) or (data_type == 31):
dt_string = 'd'
elif (data_type == 32):
dt_string = 'd'
elif (data_type == 51) or (data_type == 52):
dt_string = 's'
return dt_string | def _convert_type(self, data_type) | CDF data types to python struct data types | 1.866621 | 1.713274 | 1.089505 |
return str(' '*num_elms)
if (data_type == 1) or (data_type == 41):
pad_value = struct.pack(order+'b', -127)
dt_string = 'i1'
elif data_type == 2:
pad_value = struct.pack(order+'h', -32767)
dt_string = 'i2'
elif data_type == 4:
pad_value = struct.pack(order+'i', -2147483647)
dt_string = 'i4'
elif (data_type == 8) or (data_type == 33):
pad_value = struct.pack(order+'q', -9223372036854775807)
dt_string = 'i8'
elif data_type == 11:
pad_value = struct.pack(order+'B', 254)
dt_string = 'u1'
elif data_type == 12:
pad_value = struct.pack(order+'H', 65534)
dt_string = 'u2'
elif data_type == 14:
pad_value = struct.pack(order+'I', 4294967294)
dt_string = 'u4'
elif (data_type == 21) or (data_type == 44):
pad_value = struct.pack(order+'f', -1.0E30)
dt_string = 'f'
elif (data_type == 22) or (data_type == 45) or (data_type == 31):
pad_value = struct.pack(order+'d', -1.0E30)
dt_string = 'd'
else:
# (data_type == 32):
pad_value = struct.pack(order+'2d', *[-1.0E30, -1.0E30])
dt_string = 'c16'
dt = np.dtype(dt_string)
ret = np.frombuffer(pad_value, dtype=dt, count=1)
ret.setflags('WRITEABLE')
return ret | def _default_pad(self, data_type, num_elms): # @NoSelf
'''
The default pad values by CDF data type
'''
order = self._convert_option()
if (data_type == 51 or data_type == 52) | The default pad values by CDF data type | 1.711444 | 1.715233 | 0.997791 |
if (data == ''):
return ('\x00'*num_elems).encode()
else:
return data.ljust(num_elems, '\x00').encode('utf-8')
elif (data_type == 32):
data_stream = data.real.tobytes()
data_stream += data.imag.tobytes()
return data_stream
else:
return data.tobytes() | def _convert_np_data(data, data_type, num_elems): # @NoSelf
'''
Converts a single np data into byte stream.
'''
if (data_type == 51 or data_type == 52) | Converts a single np data into byte stream. | 3.097059 | 2.925126 | 1.058778 |
'''
Returns a VVR or decompressed CVVR block
'''
with self.file.open('rb') as f:
f.seek(offset, 0)
block_size = int.from_bytes(f.read(8), 'big')
block = f.read(block_size-8)
section_type = int.from_bytes(block[0:4], 'big')
if section_type == 13:
# a CVVR
compressed_size = int.from_bytes(block[12:16], 'big')
return gzip.decompress(block[16:16+compressed_size])
elif section_type == 7:
# a VVR
return block[4:] | def _read_vvr_block(self, offset) | Returns a VVR or decompressed CVVR block | 2.913638 | 2.318245 | 1.256829 |
cur_block = 0
for x in range(cur_block, total):
if (starts[x] <= rec_num and ends[x] >= rec_num):
return x, x
if (starts[x] > rec_num):
break
return -1, x-1 | def _find_block(starts, ends, cur_block, rec_num): # @NoSelf
'''
Finds the block that rec_num is in if it is found. Otherwise it returns -1.
It also returns the block that has the physical data either at or
preceeding the rec_num.
It could be -1 if the preceeding block does not exists.
'''
total = len(starts)
if (cur_block == -1) | Finds the block that rec_num is in if it is found. Otherwise it returns -1.
It also returns the block that has the physical data either at or
preceeding the rec_num.
It could be -1 if the preceeding block does not exists. | 2.570799 | 2.72754 | 0.942534 |
'''
Converts data to the appropriate type using the struct.unpack method,
rather than using numpy.
'''
if (data_type == 51 or data_type == 52):
return [data[i:i+num_elems].decode('utf-8') for i in
range(0, num_recs*num_values*num_elems, num_elems)]
else:
tofrom = self._convert_option()
dt_string = self._convert_type(data_type)
form = tofrom + str(num_recs*num_values*num_elems) + dt_string
value_len = CDF._type_size(data_type, num_elems)
return list(struct.unpack_from(form,
data[0:num_recs*num_values*value_len])) | def _convert_data(self, data, data_type, num_recs, num_values, num_elems) | Converts data to the appropriate type using the struct.unpack method,
rather than using numpy. | 4.396858 | 3.54434 | 1.240529 |
def getVersion(): # @NoSelf
print('CDFread version:', str(CDF.version) + '.' + str(CDF.release) +
'.' + str(CDF.increment))
print('Date: 2018/01/11') | Shows the code version and last modified date. | null | null | null |
|
'''
Returns an access token for the specified subscription.
This method uses a cache to limit the number of requests to the token service.
A fresh token can be re-used during its lifetime of 10 minutes. After a successful
request to the token service, this method caches the access token. Subsequent
invocations of the method return the cached token for the next 5 minutes. After
5 minutes, a new token is fetched from the token service and the cache is updated.
'''
if (self.token is None) or (datetime.utcnow() > self.reuse_token_until):
headers = {'Ocp-Apim-Subscription-Key': self.client_secret}
response = requests.post(self.base_url, headers=headers)
response.raise_for_status()
self.token = response.content
self.reuse_token_until = datetime.utcnow() + timedelta(minutes=5)
return self.token.decode('utf-8') | def get_access_token(self) | Returns an access token for the specified subscription.
This method uses a cache to limit the number of requests to the token service.
A fresh token can be re-used during its lifetime of 10 minutes. After a successful
request to the token service, this method caches the access token. Subsequent
invocations of the method return the cached token for the next 5 minutes. After
5 minutes, a new token is fetched from the token service and the cache is updated. | 3.881863 | 1.666245 | 2.329707 |
text = ' '.join(text)
kwargs = dict(from_lang=from_lang, to_lang=to_lang, provider=provider)
if provider != DEFAULT_PROVIDER:
kwargs['secret_access_key'] = secret_access_key
translator = Translator(**kwargs)
translation = translator.translate(text)
if sys.version_info.major == 2:
translation = translation.encode(locale.getpreferredencoding())
if output_only:
click.echo(translation)
return translation
click.echo('\nTranslation: {}'.format(translation))
click.echo('-' * 25)
click.echo('Translated by: {}'.format(translator.provider.name))
return translation | def main(from_lang, to_lang, provider, secret_access_key, output_only, text) | Python command line tool to make on line translations
\b
Example:
\b
\t $ translate-cli -t zh the book is on the table
\t 碗是在桌子上。
\b
Available languages:
\b
\t https://en.wikipedia.org/wiki/ISO_639-1
\t Examples: (e.g. en, ja, ko, pt, zh, zh-TW, ...) | 2.420907 | 2.737742 | 0.884271 |
server_configs = (
{'url': url, 'auth': ('admin', 'changeme'), 'verify': False}
for url
in ('https://sat1.example.com', 'https://sat2.example.com')
)
for server_config in server_configs:
response = requests.post(
server_config['url'] + '/api/v2/users',
json.dumps({
'user': {
'auth_source_id': 1,
'login': 'Alice',
'mail': '[email protected]',
'organization_ids': [get_organization_id(
server_config,
'Default_Organization'
)],
'password': 'hackme',
}
}),
auth=server_config['auth'],
headers={'content-type': 'application/json'},
verify=server_config['verify'],
)
response.raise_for_status()
pprint(response.json()) | def main() | Create an identical user account on a pair of satellites. | 3.494028 | 3.218478 | 1.085615 |
response = requests.get(
server_config['url'] + '/katello/api/v2/organizations',
data=json.dumps({'search': 'label={}'.format(label)}),
auth=server_config['auth'],
headers={'content-type': 'application/json'},
verify=server_config['verify'],
)
response.raise_for_status()
decoded = response.json()
if decoded['subtotal'] != 1:
print(
'Expected to find one organization, but instead found {0}. Search '
'results: {1}'.format(decoded['subtotal'], decoded['results'])
)
exit(1)
return decoded['results'][0]['id'] | def get_organization_id(server_config, label) | Return the ID of the organization with label ``label``.
:param server_config: A dict of information about the server being talked
to. The dict should include the keys "url", "auth" and "verify".
:param label: A string label that will be used when searching. Every
organization should have a unique label.
:returns: An organization ID. (Typically an integer.) | 2.738744 | 2.751678 | 0.995299 |
org = Organization(name='junk org').create()
pprint(org.get_values()) # e.g. {'name': 'junk org', …}
org.delete() | def main() | Create an organization, print out its attributes and delete it. | 10.756598 | 6.430594 | 1.672722 |
if poll_rate is None:
poll_rate = TASK_POLL_RATE
if timeout is None:
timeout = TASK_TIMEOUT
# Implement the timeout.
def raise_task_timeout(): # pragma: no cover
thread.interrupt_main()
timer = threading.Timer(timeout, raise_task_timeout)
# Poll until the task finishes. The timeout prevents an infinite loop.
path = '{0}/foreman_tasks/api/tasks/{1}'.format(server_config.url, task_id)
try:
timer.start()
while True:
response = client.get(path, **server_config.get_client_kwargs())
response.raise_for_status()
task_info = response.json()
if task_info['state'] in ('paused', 'stopped'):
break
time.sleep(poll_rate)
except KeyboardInterrupt: # pragma: no cover
# raise_task_timeout will raise a KeyboardInterrupt when the timeout
# expires. Catch the exception and raise TaskTimedOutError
raise TaskTimedOutError(
'Timed out polling task {0}. Task information: {1}'
.format(task_id, task_info)
)
finally:
timer.cancel()
# Check for task success or failure.
if task_info['result'] != 'success':
raise TaskFailedError(
'Task {0} did not succeed. Task information: {1}'
.format(task_id, task_info)
)
return task_info | def _poll_task(task_id, server_config, poll_rate=None, timeout=None) | Implement :meth:`nailgun.entities.ForemanTask.poll`.
See :meth:`nailgun.entities.ForemanTask.poll` for a full description of how
this method acts. Other methods may also call this method, such as
:meth:`nailgun.entity_mixins.EntityDeleteMixin.delete`.
Certain mixins benefit from being able to poll the server after performing
an operation. However, this module cannot use
:meth:`nailgun.entities.ForemanTask.poll`, as that would be a circular
import. Placing the implementation of
:meth:`nailgun.entities.ForemanTask.poll` here allows both that method and
the mixins in this module to use the same logic. | 2.897755 | 2.950463 | 0.982136 |
if isinstance(entity_obj_or_id, entity_cls):
return entity_obj_or_id
return entity_cls(server_config, id=entity_obj_or_id) | def _make_entity_from_id(entity_cls, entity_obj_or_id, server_config) | Given an entity object or an ID, return an entity object.
If the value passed in is an object that is a subclass of :class:`Entity`,
return that value. Otherwise, create an object of the type that ``field``
references, give that object an ID of ``field_value``, and return that
object.
:param entity_cls: An :class:`Entity` subclass.
:param entity_obj_or_id: Either a :class:`nailgun.entity_mixins.Entity`
object or an entity ID.
:returns: An ``entity_cls`` object.
:rtype: nailgun.entity_mixins.Entity | 2.050203 | 2.443733 | 0.838964 |
return [
_make_entity_from_id(entity_cls, entity_or_id, server_config)
for entity_or_id
in entity_objs_and_ids
] | def _make_entities_from_ids(entity_cls, entity_objs_and_ids, server_config) | Given an iterable of entities and/or IDs, return a list of entities.
:param entity_cls: An :class:`Entity` subclass.
:param entity_obj_or_id: An iterable of
:class:`nailgun.entity_mixins.Entity` objects and/or entity IDs. All of
the entities in this iterable should be of type ``entity_cls``.
:returns: A list of ``entity_cls`` objects. | 2.487909 | 3.359149 | 0.740637 |
for field_name, field in fields.items():
if field_name in values:
if isinstance(field, OneToOneField):
values[field_name + '_id'] = (
getattr(values.pop(field_name), 'id', None)
)
elif isinstance(field, OneToManyField):
values[field_name + '_ids'] = [
entity.id for entity in values.pop(field_name)
]
elif isinstance(field, ListField):
def parse(obj):
if isinstance(obj, Entity):
return _payload(obj.get_fields(), obj.get_values())
return obj
values[field_name] = [
parse(obj) for obj in values[field_name]]
return values | def _payload(fields, values) | Implement the ``*_payload`` methods.
It's frequently useful to create a dict of values that can be encoded to
JSON and sent to the server. Unfortunately, there are mismatches between
the field names used by NailGun and the field names the server expects.
This method provides a default translation that works in many cases. For
example:
>>> from nailgun.entities import Product
>>> product = Product(name='foo', organization=1)
>>> set(product.get_fields())
{
'description',
'gpg_key',
'id',
'label',
'name',
'organization',
'sync_plan',
}
>>> set(product.get_values())
{'name', 'organization'}
>>> product.create_payload()
{'organization_id': 1, 'name': 'foo'}
:param fields: A value like what is returned by
:meth:`nailgun.entity_mixins.Entity.get_fields`.
:param values: A value like what is returned by
:meth:`nailgun.entity_mixins.Entity.get_values`.
:returns: A dict mapping field names to field values. | 2.664506 | 2.657631 | 1.002587 |
field_name_id = field_name + '_id'
if field_name in attrs:
if attrs[field_name] is None:
return None
elif 'id' in attrs[field_name]:
return attrs[field_name]['id']
if field_name_id in attrs:
return attrs[field_name_id]
else:
raise MissingValueError(
'Cannot find a value for the "{0}" field. Searched for keys named '
'{1}, but available keys are {2}.'
.format(field_name, (field_name, field_name_id), attrs.keys())
) | def _get_entity_id(field_name, attrs) | Find the ID for a one to one relationship.
The server may return JSON data in the following forms for a
:class:`nailgun.entity_fields.OneToOneField`::
'user': None
'user': {'name': 'Alice Hayes', 'login': 'ahayes', 'id': 1}
'user_id': 1
'user_id': None
Search ``attrs`` for a one to one ``field_name`` and return its ID.
:param field_name: A string. The name of a field.
:param attrs: A dict. A JSON payload as returned from a server.
:returns: Either an entity ID or None. | 2.908531 | 3.149478 | 0.923496 |
field_name_ids = field_name + '_ids'
plural_field_name = pluralize(field_name)
if field_name_ids in attrs:
return attrs[field_name_ids]
elif field_name in attrs:
return [entity['id'] for entity in attrs[field_name]]
elif plural_field_name in attrs:
return [entity['id'] for entity in attrs[plural_field_name]]
else:
raise MissingValueError(
'Cannot find a value for the "{0}" field. Searched for keys named '
'{1}, but available keys are {2}.'
.format(
field_name,
(field_name_ids, field_name, plural_field_name),
attrs.keys()
)
) | def _get_entity_ids(field_name, attrs) | Find the IDs for a one to many relationship.
The server may return JSON data in the following forms for a
:class:`nailgun.entity_fields.OneToManyField`::
'user': [{'id': 1, …}, {'id': 42, …}]
'users': [{'id': 1, …}, {'id': 42, …}]
'user_ids': [1, 42]
Search ``attrs`` for a one to many ``field_name`` and return its ID.
:param field_name: A string. The name of a field.
:param attrs: A dict. A JSON payload as returned from a server.
:returns: An iterable of entity IDs. | 2.609215 | 2.8092 | 0.928811 |
if isinstance(obj, Entity):
return obj.to_json_dict()
if isinstance(obj, dict):
return {k: to_json_serializable(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return [to_json_serializable(v) for v in obj]
elif isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
return obj | def to_json_serializable(obj) | Transforms obj into a json serializable object.
:param obj: entity or any json serializable object
:return: serializable object | 1.648701 | 1.722392 | 0.957216 |
# It is OK that member ``self._meta`` is not found. Subclasses are
# required to set that attribute if they wish to use this method.
#
# Beware of leading and trailing slashes:
#
# urljoin('example.com', 'foo') => 'foo'
# urljoin('example.com/', 'foo') => 'example.com/foo'
# urljoin('example.com', '/foo') => '/foo'
# urljoin('example.com/', '/foo') => '/foo'
#
base = urljoin(
self._server_config.url + '/',
self._meta['api_path'] # pylint:disable=no-member
)
if which == 'base' or (which is None and not hasattr(self, 'id')):
return base
elif (which == 'self' or which is None) and hasattr(self, 'id'):
return urljoin(base + '/', str(self.id)) # pylint:disable=E1101
raise NoSuchPathError | def path(self, which=None) | Return the path to the current entity.
Return the path to base entities of this entity's type if:
* ``which`` is ``'base'``, or
* ``which`` is ``None`` and instance attribute ``id`` is unset.
Return the path to this exact entity if instance attribute ``id`` is
set and:
* ``which`` is ``'self'``, or
* ``which`` is ``None``.
Raise :class:`NoSuchPathError` otherwise.
Child classes may choose to extend this method, especially if a child
entity offers more than the two URLs supported by default. If extended,
then the extending class should check for custom parameters before
calling ``super``::
def path(self, which):
if which == 'custom':
return urljoin(…)
super(ChildEntity, self).__init__(which)
This will allow the extending method to accept a custom parameter
without accidentally raising a :class:`NoSuchPathError`.
:param which: A string. Optional. Valid arguments are 'self' and
'base'.
:return: A string. A fully qualified URL.
:raises nailgun.entity_mixins.NoSuchPathError: If no path can be built. | 4.152833 | 3.517249 | 1.180705 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.