code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total
|
def sum(self, property)
|
Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float
| 6.606742 | 6.706946 | 0.98506 |
self.__prepare()
try:
return max(self._json_data, key=lambda x: x[property]).get(property)
except KeyError:
raise KeyError("Key is not exists")
|
def max(self, property)
|
Getting the maximum value from the prepared data
:@param property
:@type property: string
:@return object
:@throws KeyError
| 5.873838 | 5.248673 | 1.119109 |
self.__prepare()
return self.sum(property) / self.count()
|
def avg(self, property)
|
Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
| 9.635018 | 14.461207 | 0.666267 |
if size == 0:
raise ValueError('Invalid chunk size')
self.__prepare()
_new_content = []
while(len(self._json_data) > 0):
_new_content.append(self._json_data[0:size])
self._json_data = self._json_data[size:]
self._json_data = _new_content
return self._json_data
|
def chunk(self, size=0)
|
Group the resulted collection to multiple chunk
:@param size: 0
:@type size: integer
:@return Chunked List
| 3.211971 | 3.052901 | 1.052105 |
self.__prepare()
group_data = {}
for data in self._json_data:
if data[property] not in group_data:
group_data[data[property]] = []
group_data[data[property]].append(data)
self._json_data = group_data
return self
|
def group_by(self, property)
|
Getting the grouped result by the given property
:@param property
:@type property: string
:@return self
| 2.687525 | 2.781546 | 0.966198 |
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(self._json_data)
else:
self._json_data = sorted(self._json_data, reverse=True)
return self
|
def sort(self, order="asc")
|
Getting the sorted result of the given list
:@param order: "asc"
:@type order: string
:@return self
| 2.635971 | 2.912572 | 0.905032 |
self.__prepare()
if isinstance(self._json_data, list):
if order == "asc":
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property)
)
else:
self._json_data = sorted(
self._json_data,
key=lambda x: x.get(property),
reverse=True
)
return self
|
def sort_by(self, property, order="asc")
|
Getting the sorted result by the given property
:@param property, order: "asc"
:@type property, order: string
:@return self
| 2.102407 | 2.33539 | 0.900238 |
if (op not in self.condition_mapper):
raise ValueError('Invalid where condition given')
func = getattr(self, self.condition_mapper.get(op))
return func(x, y)
|
def _match(self, x, op, y)
|
Compare the given `x` and `y` based on `op`
:@param x, y, op
:@type x, y: mixed
:@type op: string
:@return bool
:@throws ValueError
| 6.551592 | 6.581097 | 0.995517 |
for super_class in _get_base_classes(sys._getframe(2), method.__globals__):
if hasattr(super_class, method.__name__):
super_method = getattr(super_class, method.__name__)
if hasattr(super_method, "__finalized__"):
finalized = getattr(super_method, "__finalized__")
if finalized:
raise AssertionError('Method "%s" is finalized' %
method.__name__)
if not method.__doc__:
method.__doc__ = super_method.__doc__
return method
raise AssertionError('No super class method found for "%s"' %
method.__name__)
|
def overrides(method)
|
Decorator to indicate that the decorated method overrides a method in
superclass.
The decorator code is executed while loading class. Using this method
should have minimal runtime performance implications.
This is based on my idea about how to do this and fwc:s highly improved
algorithm for the implementation fwc:s
algorithm : http://stackoverflow.com/a/14631397/308189
my answer : http://stackoverflow.com/a/8313042/308189
How to use:
from overrides import overrides
class SuperClass(object):
def method(self):
return 2
class SubClass(SuperClass):
@overrides
def method(self):
return 1
:raises AssertionError if no match in super classes for the method name
:return method with possibly added (if the method doesn't have one)
docstring from super class
| 2.917456 | 2.863685 | 1.018777 |
co, lasti = frame.f_code, frame.f_lasti
code = co.co_code
extends = []
for (op, oparg) in op_stream(code, lasti):
if op in dis.hasconst:
if type(co.co_consts[oparg]) == str:
extends = []
elif op in dis.hasname:
if dis.opname[op] == 'LOAD_NAME':
extends.append(('name', co.co_names[oparg]))
if dis.opname[op] == 'LOAD_ATTR':
extends.append(('attr', co.co_names[oparg]))
if dis.opname[op] == 'LOAD_GLOBAL':
extends.append(('name', co.co_names[oparg]))
items = []
previous_item = []
for t, s in extends:
if t == 'name':
if previous_item:
items.append(previous_item)
previous_item = [s]
else:
previous_item += [s]
if previous_item:
items.append(previous_item)
return items
|
def _get_base_class_names(frame)
|
Get baseclass names from the code object
| 2.286297 | 2.278569 | 1.003392 |
if message.get_content_charset():
return message.get_content_charset()
if message.get_charset():
return message.get_charset()
return default
|
def get_charset(message, default="utf-8")
|
Get the message charset
| 2.288245 | 2.085369 | 1.097285 |
file = os.path.join(os.path.dirname(__file__),
'assets',
'tlds-alpha-by-domain.txt')
with open(file) as fobj:
return [elem for elem in fobj.read().lower().splitlines()[1:]
if "--" not in elem]
|
def load_tlds()
|
Load all legal TLD extensions from assets
| 3.908259 | 3.496309 | 1.117824 |
rval = []
loc = 0
for match in URLRE.finditer(mesg):
if loc < match.start():
rval.append(Chunk(mesg[loc:match.start()], None))
# Turn email addresses into mailto: links
email = match.group("email")
if email and "mailto" not in email:
mailto = "mailto:{}".format(email)
else:
mailto = match.group(1)
rval.append(Chunk(None, mailto))
loc = match.end()
if loc < len(mesg):
rval.append(Chunk(mesg[loc:], None))
return rval
|
def parse_text_urls(mesg)
|
Parse a block of text, splitting it into its url and non-url
components.
| 2.807629 | 2.66979 | 1.051629 |
rval = []
start = 0
length = 0
while start < len(lst):
usedfirst = False
usedlast = False
# Extend to the next match.
while start + length < len(lst) and length < before_context + 1 \
and not pred(lst[start + length]):
length += 1
# Slide to the next match.
while start + length < len(lst) and not pred(lst[start + length]):
start += 1
# If there was no next match, abort here (it's easier
# to do this than to try to detect this case later).
if start + length == len(lst):
break
# Now extend repeatedly until we can't find anything.
while start + length < len(lst) and pred(lst[start + length]):
extendlength = 1
# Read in the 'after' context and see if it holds a URL.
while extendlength < after_context + 1 and start + length + \
extendlength < len(lst) and \
not pred(lst[start + length + extendlength]):
extendlength += 1
length += extendlength
if start + length < len(lst) and not pred(lst[start + length]):
# Didn't find a matching line, so we either
# hit the end or extended to after_context + 1..
#
# Now read in possible 'before' context
# from the next URL; if we don't find one,
# we discard the readahead.
extendlength = 1
while extendlength < before_context and start + length + \
extendlength < len(lst) and \
not pred(lst[start + length + extendlength]):
extendlength += 1
if start + length + extendlength < len(lst) and \
pred(lst[start + length + extendlength]):
length += extendlength
if length > 0 and start + length <= len(lst):
if start == 0:
usedfirst = True
if start + length == len(lst):
usedlast = True
rval.append((lst[start:start + length], usedfirst, usedlast))
start += length
length = 0
return rval
|
def extract_with_context(lst, pred, before_context, after_context)
|
Extract URL and context from a given chunk.
| 3.141823 | 3.081106 | 1.019706 |
lines = NLRE.split(mesg)
# The number of lines of context above to provide.
# above_context = 1
# The number of lines of context below to provide.
# below_context = 1
# Plan here is to first transform lines into the form
# [line_fragments] where each fragment is a chunk as
# seen by parse_text_urls. Note that this means that
# lines with more than one entry or one entry that's
# a URL are the only lines containing URLs.
linechunks = [parse_text_urls(l) for l in lines]
return extract_with_context(linechunks,
lambda chunk: len(chunk) > 1 or
(len(chunk) == 1 and chunk[0].url is not None),
1, 1)
|
def extracturls(mesg)
|
Given a text message, extract all the URLs found in the message, along
with their surrounding context. The output is a list of sequences of Chunk
objects, corresponding to the contextual regions extracted from the string.
| 7.787115 | 7.299427 | 1.066812 |
chunk = HTMLChunker()
chunk.feed(mesg)
chunk.close()
# above_context = 1
# below_context = 1
def somechunkisurl(chunks):
for chnk in chunks:
if chnk.url is not None:
return True
return False
return extract_with_context(chunk.rval, somechunkisurl, 1, 1)
|
def extracthtmlurls(mesg)
|
Extract URLs with context from html type message. Similar to extracturls.
| 6.473584 | 6.050926 | 1.06985 |
try:
strg = byt.decode(enc)
except UnicodeDecodeError as err:
strg = "Unable to decode message:\n{}\n{}".format(str(byt), err)
except (AttributeError, UnicodeEncodeError):
# If byt is already a string, just return it
return byt
return strg
|
def decode_bytes(byt, enc='utf-8')
|
Given a string or bytes input, return a string.
Args: bytes - bytes or string
enc - encoding to use for decoding the byte string.
| 3.511358 | 3.768011 | 0.931886 |
# We avoid the get_payload decoding machinery for raw
# content-transfer-encodings potentially containing non-ascii characters,
# such as 8bit or binary, as these are encoded using raw-unicode-escape which
# seems to prevent subsequent utf-8 decoding.
cte = str(msg.get('content-transfer-encoding', '')).lower()
decode = cte not in ("8bit", "7bit", "binary")
res = msg.get_payload(decode=decode)
return decode_bytes(res, enc)
|
def decode_msg(msg, enc='utf-8')
|
Decodes a message fragment.
Args: msg - A Message object representing the fragment
enc - The encoding to use for decoding the message
| 9.067189 | 9.484307 | 0.95602 |
# Written as a generator so I can easily choose only
# one subpart in the future (e.g., for
# multipart/alternative). Actually, I might even add
# a browser for the message structure?
enc = get_charset(msg)
if msg.is_multipart():
for part in msg.get_payload():
for chunk in msgurls(part, urlidx):
urlidx += 1
yield chunk
elif msg.get_content_type() == "text/plain":
decoded = decode_msg(msg, enc)
for chunk in extracturls(decoded):
urlidx += 1
yield chunk
elif msg.get_content_type() == "text/html":
decoded = decode_msg(msg, enc)
for chunk in extracthtmlurls(decoded):
urlidx += 1
yield chunk
|
def msgurls(msg, urlidx=1)
|
Main entry function for urlscan.py
| 4.15348 | 4.221483 | 0.983891 |
cols = ((cols - 6) * .85) # 6 cols for urlref and don't use while line
if shorten is False or len(url) < cols:
return url
split = int(cols * .5)
return url[:split] + "..." + url[-split:]
|
def shorten_url(url, cols, shorten)
|
Shorten long URLs to fit on one line.
| 8.965005 | 8.307626 | 1.07913 |
grp = []
res = []
for item in items:
if isinstance(item, urwid.Divider):
res.append(grp)
grp = [items[0]]
else:
grp.append(item)
res.append(grp)
return res[1:]
|
def grp_list(items)
|
Organize list of items [a,2,3,4,a,4,2,a,1, etc...] like:
[[a,2,3,4], [a,4,2], [a,1]], where 'a' is a urwid.Divider
| 3.269502 | 2.634083 | 1.24123 |
if search:
pat = re.compile("({})".format(re.escape(search)), re.IGNORECASE)
else:
return text
final = pat.split(text)
final = [(attr, i) if i.lower() == search.lower() else i for i in final]
return final
|
def splittext(text, search, attr)
|
Split a text string by search string and add Urwid display attribute to
the search term.
Args: text - string
search - search string
attr - attribute string to add
Returns: urwid markup list ["string", ("default", " mo"), "re string"]
for search="mo", text="string more string" and attr="default"
| 3.292381 | 4.410905 | 0.746419 |
self.loop = urwid.MainLoop(self.top, self.palettes[self.palette_names[0]], screen=self.tui,
handle_mouse=False, input_filter=self.handle_keys,
unhandled_input=self.unhandled)
self.loop.run()
|
def main(self)
|
Urwid main event loop
| 4.88419 | 4.094387 | 1.192899 |
for j, k in enumerate(keys):
if self.search is True:
text = "Search: {}".format(self.search_string)
if k == 'enter':
# Catch 'enter' key to prevent opening URL in mkbrowseto
self.enter = True
if not self.items:
self.search = False
self.enter = False
if self.search_string:
footer = 'search'
else:
footer = 'default'
text = ""
footerwid = urwid.AttrMap(urwid.Text(text), footer)
self.top.footer = footerwid
elif k in self.activate_keys:
self.search_string += k
self._search()
elif k == 'backspace':
self.search_string = self.search_string[:-1]
self._search()
elif k in self.activate_keys and \
self.urls and \
self.search is False and \
self.help_menu is False:
self._open_url()
elif self.help_menu is True:
self._help_menu()
return []
if k == 'up':
# Works around bug where the up arrow goes higher than the top list
# item and unintentionally triggers context and palette switches.
# Remaps 'up' to 'k'
keys[j] = 'k'
if k == 'home':
# Remap 'home' to 'g'. Works around small bug where 'home' takes the cursor
# above the top list item.
keys[j] = 'g'
# filter backspace out before the widget, it has a weird interaction
return [i for i in keys if i != 'backspace']
|
def handle_keys(self, keys, raw)
|
Handle widget default keys
- 'Enter' or 'space' to load URL
- 'Enter' to end search mode
- add 'space' to search string in search mode
- Workaround some small positioning bugs
| 6.229872 | 5.881489 | 1.059234 |
self.key = key
self.size = self.tui.get_cols_rows()
if self.search is True:
if self.enter is False and self.no_matches is False:
if len(key) == 1 and key.isprintable():
self.search_string += key
self._search()
elif self.enter is True and not self.search_string:
self.search = False
self.enter = False
return
if not self.urls and key not in "Qq":
return # No other actions are useful with no URLs
if self.help_menu is False:
try:
self.keys[key]()
except KeyError:
pass
|
def unhandled(self, key)
|
Handle other keyboard actions not handled by the ListBox widget.
| 5.694319 | 5.460797 | 1.042763 |
load_text = "Loading URL..." if not self.run else "Executing: {}".format(self.run)
if os.environ.get('BROWSER') not in ['elinks', 'links', 'w3m', 'lynx']:
self._footer_start_thread(load_text, 5)
|
def _open_url(self)
|
<Enter> or <space>
| 12.242072 | 11.194412 | 1.093588 |
if self.help_menu is False:
self.focus_pos_saved = self.top.body.focus_position
help_men = "\n".join(["{} - {}".format(i, j.__name__.strip('_'))
for i, j in self.keys.items() if j.__name__ !=
'_digits'])
help_men = "KEYBINDINGS\n" + help_men + "\n<0-9> - Jump to item"
docs = ("OPTIONS\n"
"all_escape -- toggle unescape all URLs\n"
"all_shorten -- toggle shorten all URLs\n"
"bottom -- move cursor to last item\n"
"clear_screen -- redraw screen\n"
"clipboard -- copy highlighted URL to clipboard using xsel/xclip\n"
"clipboard_pri -- copy highlighted URL to primary selection using xsel/xclip\n"
"config_create -- create ~/.config/urlscan/config.json\n"
"context -- show/hide context\n"
"down -- cursor down\n"
"help_menu -- show/hide help menu\n"
"open_url -- open selected URL\n"
"palette -- cycle through palettes\n"
"quit -- quit\n"
"shorten -- toggle shorten highlighted URL\n"
"top -- move to first list item\n"
"up -- cursor up\n")
self.top.body = \
urwid.ListBox(urwid.SimpleListWalker([urwid.Columns([(30, urwid.Text(help_men)),
urwid.Text(docs)])]))
else:
self.top.body = urwid.ListBox(self.items)
self.top.body.focus_position = self.focus_pos_saved
self.help_menu = not self.help_menu
|
def _help_menu(self)
|
F1
| 4.604392 | 4.536884 | 1.01488 |
if self.urls:
self.search = True
if self.compact is True:
self.compact = False
self.items, self.items_com = self.items_com, self.items
else:
return
self.no_matches = False
self.search_string = ""
# Reset the search highlighting
self._search()
footerwid = urwid.AttrMap(urwid.Text("Search: "), 'footer')
self.top.footer = footerwid
self.items = self.items_orig
self.top.body = urwid.ListBox(self.items)
|
def _search_key(self)
|
/
| 6.677633 | 6.470256 | 1.032051 |
self.number += self.key
try:
if self.compact is False:
self.top.body.focus_position = \
self.items.index(self.items_com[max(int(self.number) - 1, 0)])
else:
self.top.body.focus_position = \
self.items.index(self.items[max(int(self.number) - 1, 0)])
except IndexError:
self.number = self.number[:-1]
self.top.keypress(self.size, "") # Trick urwid into redisplaying the cursor
if self.number:
self._footer_start_thread("Selection: {}".format(self.number), 1)
|
def _digits(self)
|
0-9
| 5.957046 | 5.735246 | 1.038673 |
# Goto top of the list
self.top.body.focus_position = 2 if self.compact is False else 0
self.top.keypress(self.size, "")
|
def _top(self)
|
g
| 18.141279 | 18.307993 | 0.990894 |
# Goto bottom of the list
self.top.body.focus_position = len(self.items) - 1
self.top.keypress(self.size, "")
|
def _bottom(self)
|
G
| 12.95962 | 13.759462 | 0.94187 |
# Toggle shortened URL for selected item
fpo = self.top.body.focus_position
url_idx = len([i for i in self.items[:fpo + 1]
if isinstance(i, urwid.Columns)]) - 1
if self.compact is False and fpo <= 1:
return
url = self.urls[url_idx]
short = not "..." in self.items[fpo][1].label
self.items[fpo][1].set_label(shorten_url(url, self.size[0], short))
|
def _shorten(self)
|
s
| 6.963365 | 6.646252 | 1.047713 |
# Toggle all shortened URLs
self.shorten = not self.shorten
urls = iter(self.urls)
for item in self.items:
# Each Column has (Text, Button). Update the Button label
if isinstance(item, urwid.Columns):
item[1].set_label(shorten_url(next(urls),
self.size[0],
self.shorten))
|
def _all_shorten(self)
|
S
| 7.652315 | 7.632544 | 1.00259 |
# Toggle all escaped URLs
self.unesc = not self.unesc
self.urls, self.urls_unesc = self.urls_unesc, self.urls
urls = iter(self.urls)
for item in self.items:
# Each Column has (Text, Button). Update the Button label
if isinstance(item, urwid.Columns):
item[1].set_label(shorten_url(next(urls),
self.size[0],
self.shorten))
|
def _all_escape(self)
|
u
| 7.875914 | 7.780757 | 1.01223 |
# Show/hide context
if self.search_string:
# Reset search when toggling compact mode
footerwid = urwid.AttrMap(urwid.Text(""), 'default')
self.top.footer = footerwid
self.search_string = ""
self.items = self.items_orig
fpo = self.top.body.focus_position
self.items, self.items_com = self.items_com, self.items
self.top.body = urwid.ListBox(self.items)
self.top.body.focus_position = self._cur_focus(fpo)
self.compact = not self.compact
|
def _context(self)
|
c
| 6.28883 | 6.133616 | 1.025305 |
# Copy highlighted url to clipboard
fpo = self.top.body.focus_position
url_idx = len([i for i in self.items[:fpo + 1]
if isinstance(i, urwid.Columns)]) - 1
if self.compact is False and fpo <= 1:
return
url = self.urls[url_idx]
if pri is True:
cmds = ("xsel -i", "xclip -i")
else:
cmds = ("xsel -ib", "xclip -i -selection clipboard")
for cmd in cmds:
try:
proc = Popen(shlex.split(cmd), stdin=PIPE)
proc.communicate(input=url.encode(sys.getdefaultencoding()))
self._footer_start_thread("Copied url to {} selection".format(
"primary" if pri is True else "clipboard"), 5)
except OSError:
continue
break
|
def _clipboard(self, pri=False)
|
C
| 5.041403 | 4.98852 | 1.010601 |
# Loop through available palettes
self.palette_idx += 1
try:
self.loop.screen.register_palette(self.palettes[self.palette_names[self.palette_idx]])
except IndexError:
self.loop.screen.register_palette(self.palettes[self.palette_names[0]])
self.palette_idx = 0
self.loop.screen.clear()
|
def _palette(self)
|
p
| 3.156497 | 3.17129 | 0.995335 |
# Create ~/.config/urlscan/config.json if if doesn't exist
if not exists(self.conf):
try:
# Python 2/3 compatible recursive directory creation
os.makedirs(dirname(expanduser(self.conf)))
except OSError as err:
if errno.EEXIST != err.errno:
raise
keys = dict(zip(self.keys.keys(),
[i.__name__.strip('_') for i in self.keys.values()]))
with open(expanduser(self.conf), 'w') as pals:
pals.writelines(json.dumps({"palettes": self.palettes, "keys": keys},
indent=4))
print("Created ~/.config/urlscan/config.json")
else:
print("~/.config/urlscan/config.json already exists")
|
def _config_create(self)
|
--genconf
| 4.090739 | 3.920522 | 1.043417 |
footerwid = urwid.AttrMap(urwid.Text(text), 'footer')
self.top.footer = footerwid
load_thread = Thread(target=self._loading_thread, args=(time,))
load_thread.daemon = True
load_thread.start()
|
def _footer_start_thread(self, text, time)
|
Display given text in the footer. Clears after <time> seconds
| 3.698993 | 3.789637 | 0.976081 |
sleep(time)
self.number = "" # Clear URL selection number
text = "Search: {}".format(self.search_string)
if self.search_string:
footer = 'search'
else:
footer = 'default'
text = ""
footerwid = urwid.AttrMap(urwid.Text(text), footer)
self.top.footer = footerwid
size = self.tui.get_cols_rows()
self.draw_screen(size)
|
def _loading_thread(self, time)
|
Simple thread to wait <time> seconds after launching a URL or
displaying a URL selection number, clearing the screen and clearing the
footer loading message.
| 8.109984 | 6.557828 | 1.236687 |
text = "Search: {}".format(self.search_string)
footerwid = urwid.AttrMap(urwid.Text(text), 'footer')
self.top.footer = footerwid
search_items = []
for grp in self.items_org:
done = False
for idx, item in enumerate(grp):
if isinstance(item, urwid.Columns):
for col_idx, col in enumerate(item.contents):
if isinstance(col[0], urwid.decoration.AttrMap):
grp[idx][col_idx].set_label(splittext(col[0].base_widget.label,
self.search_string,
''))
if self.search_string.lower() in col[0].base_widget.label.lower():
grp[idx][col_idx].set_label(splittext(col[0].base_widget.label,
self.search_string,
'search'))
done = True
elif isinstance(item, urwid.Text):
grp[idx].set_text(splittext(item.text, self.search_string, ''))
if self.search_string.lower() in item.text.lower():
grp[idx].set_text(splittext(item.text, self.search_string, 'search'))
done = True
if done is True:
search_items.extend(grp)
self.items = search_items
self.top.body = urwid.ListBox(self.items)
if self.items:
self.top.body.focus_position = 2 if self.compact is False else 0
# Trick urwid into redisplaying the cursor
self.top.keypress(self.tui.get_cols_rows(), "")
self.no_matches = False
else:
self.no_matches = True
footerwid = urwid.AttrMap(urwid.Text(text + " No Matches"), 'footer')
self.top.footer = footerwid
|
def _search(self)
|
Search - search URLs and text.
| 2.799625 | 2.753879 | 1.016611 |
self.tui.clear()
canvas = self.top.render(size, focus=True)
self.tui.draw_screen(size, canvas)
|
def draw_screen(self, size)
|
Render curses screen
| 7.332654 | 6.412956 | 1.143413 |
# Try-except block to work around webbrowser module bug
# https://bugs.python.org/issue31014
try:
browser = os.environ['BROWSER']
except KeyError:
pass
else:
del os.environ['BROWSER']
webbrowser.register(browser, None, webbrowser.GenericBrowser(browser))
try_idx = webbrowser._tryorder.index(browser)
webbrowser._tryorder.insert(0, webbrowser._tryorder.pop(try_idx))
def browse(*args):
# double ()() to ensure self.search evaluated at runtime, not when
# browse() is _created_. [0] is self.search, [1] is self.enter
# self.enter prevents opening URL when in search mode
if self._get_search()[0]() is True:
if self._get_search()[1]() is True:
self.search = False
self.enter = False
elif not self.run:
webbrowser.open(url)
elif self.run and self.pipe:
process = Popen(shlex.split(self.run), stdout=PIPE, stdin=PIPE)
process.communicate(input=url.encode(sys.getdefaultencoding()))
else:
Popen(self.run.format(url), shell=True).communicate()
size = self.tui.get_cols_rows()
self.draw_screen(size)
return browse
|
def mkbrowseto(self, url)
|
Create the urwid callback function to open the web browser or call
another function with the URL.
| 5.073786 | 4.824681 | 1.051631 |
cols, _ = urwid.raw_display.Screen().get_cols_rows()
items = []
urls = []
first = True
for group, usedfirst, usedlast in extractedurls:
if first:
first = False
items.append(urwid.Divider(div_char='-', top=1, bottom=1))
if dedupe is True:
# If no unique URLs exist, then skip the group completely
if not [chunk for chunks in group for chunk in chunks
if chunk.url is not None and chunk.url not in urls]:
continue
groupurls = []
markup = []
if not usedfirst:
markup.append(('msgtext:ellipses', '...\n'))
for chunks in group:
i = 0
while i < len(chunks):
chunk = chunks[i]
i += 1
if chunk.url is None:
markup.append(('msgtext', chunk.markup))
else:
if (dedupe is True and chunk.url not in urls) \
or dedupe is False:
urls.append(chunk.url)
groupurls.append(chunk.url)
# Collect all immediately adjacent
# chunks with the same URL.
tmpmarkup = []
if chunk.markup:
tmpmarkup.append(('msgtext', chunk.markup))
while i < len(chunks) and \
chunks[i].url == chunk.url:
if chunks[i].markup:
tmpmarkup.append(chunks[i].markup)
i += 1
url_idx = urls.index(chunk.url) + 1 if dedupe is True else len(urls)
markup += [tmpmarkup or '<URL>',
('urlref:number:braces', ' ['),
('urlref:number', repr(url_idx)),
('urlref:number:braces', ']')]
markup += '\n'
if not usedlast:
markup += [('msgtext:ellipses', '...\n\n')]
items.append(urwid.Text(markup))
i = len(urls) - len(groupurls)
for url in groupurls:
i += 1
markup = [(6, urwid.Text([('urlref:number:braces', '['),
('urlref:number', repr(i)),
('urlref:number:braces', ']'),
' '])),
urwid.AttrMap(urwid.Button(shorten_url(url,
cols,
shorten),
self.mkbrowseto(url),
user_data=url),
'urlref:url', 'url:sel')]
items.append(urwid.Columns(markup))
return items, urls
|
def process_urls(self, extractedurls, dedupe, shorten)
|
Process the 'extractedurls' and ready them for either the curses browser
or non-interactive output
Args: extractedurls
dedupe - Remove duplicate URLs from list
Returns: items - List of widgets for the ListBox
urls - List of all URLs
| 3.894762 | 3.811526 | 1.021838 |
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
|
def _get_key_file_path()
|
Return the key file path.
| 2.622473 | 2.303931 | 1.13826 |
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
|
def load_key_file(self)
|
Try to load the client key for the current ip.
| 2.360444 | 2.127733 | 1.109371 |
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
|
def save_key_file(self)
|
Save the current client key.
| 2.522074 | 2.350014 | 1.073216 |
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
|
def _send_register_payload(self, websocket)
|
Send the register payload.
| 2.963712 | 2.908913 | 1.018839 |
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
|
def _register(self)
|
Register wrapper.
| 2.566332 | 2.542282 | 1.00946 |
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
|
def register(self)
|
Pair client with tv.
| 2.489926 | 2.351658 | 1.058796 |
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
|
def _command(self, msg)
|
Send a command to the tv.
| 2.988117 | 3.003161 | 0.994991 |
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
|
def command(self, request_type, uri, payload)
|
Build and send a command.
| 2.60436 | 2.57375 | 1.011893 |
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
|
def send_message(self, message, icon_path=None)
|
Show a floating message.
| 2.40607 | 2.240726 | 1.073791 |
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
|
def get_apps(self)
|
Return all apps.
| 11.190392 | 9.336445 | 1.198571 |
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
|
def get_current_app(self)
|
Get the current app id.
| 7.216088 | 6.054976 | 1.191762 |
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
|
def get_services(self)
|
Get all services.
| 7.635472 | 6.302168 | 1.211563 |
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
|
def get_software_info(self)
|
Return the current software status.
| 7.784943 | 6.449805 | 1.207004 |
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
|
def get_inputs(self)
|
Get all inputs.
| 9.17455 | 7.697268 | 1.191923 |
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
|
def get_audio_status(self)
|
Get the current audio status
| 7.514224 | 6.586007 | 1.140938 |
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
|
def get_volume(self)
|
Get the current volume.
| 6.758111 | 5.575374 | 1.212136 |
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
|
def set_volume(self, volume)
|
Set volume.
| 7.109165 | 6.439437 | 1.104004 |
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
|
def get_channels(self)
|
Get all tv channels.
| 7.733974 | 5.447042 | 1.419849 |
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
|
def get_current_channel(self)
|
Get the current tv channel.
| 7.376833 | 6.024404 | 1.224492 |
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
|
def get_channel_info(self)
|
Get the current channel info.
| 7.430581 | 6.038619 | 1.23051 |
for res in pkg_resources.iter_entry_points(pgroup):
try:
yield res.load()
except ImportError:
if safe:
continue
raise
|
def _load_plugins_itr(pgroup, safe=True)
|
.. seealso:: the doc of :func:`load_plugins`
| 2.99735 | 2.804958 | 1.06859 |
itr = anyconfig.utils.concat(((k, v) for k in ks) for ks, v in items)
return list((k, sort_fn(t[1] for t in g))
for k, g
in anyconfig.utils.groupby(itr, operator.itemgetter(0)))
|
def select_by_key(items, sort_fn=sorted)
|
:param items: A list of tuples of keys and values, [([key], val)]
:return: A list of tuples of key and values, [(key, [val])]
>>> select_by_key([(["a", "aaa"], 1), (["b", "bb"], 2), (["a"], 3)])
[('a', [1, 3]), ('aaa', [1]), ('b', [2]), ('bb', [2])]
| 6.849983 | 6.813782 | 1.005313 |
if key == "type":
kfn = operator.methodcaller(key)
res = sorted(((k, sort_by_prio(g)) for k, g
in anyconfig.utils.groupby(prs, kfn)),
key=operator.itemgetter(0))
elif key == "extensions":
res = select_by_key(((p.extensions(), p) for p in prs),
sort_fn=sort_by_prio)
else:
raise ValueError("Argument 'key' must be 'type' or "
"'extensions' but it was '%s'" % key)
return res
|
def list_by_x(prs, key)
|
:param key: Grouping key, "type" or "extensions"
:return:
A list of :class:`Processor` or its children classes grouped by
given 'item', [(cid, [:class:`Processor`)]] by default
| 4.847967 | 4.339031 | 1.117293 |
return sorted((p for p in prs if predicate(p)),
key=operator.methodcaller("priority"), reverse=True)
|
def findall_with_pred(predicate, prs)
|
:param predicate: any callable to filter results
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:return: A list of appropriate processor classes or []
| 4.71887 | 5.915987 | 0.797647 |
if isinstance(type_or_id, cls):
return type_or_id
if type(type_or_id) == type(cls) and issubclass(type_or_id, cls):
return type_or_id()
return None
|
def maybe_processor(type_or_id, cls=anyconfig.models.processor.Processor)
|
:param type_or_id:
Type of the data to process or ID of the processor class or
:class:`anyconfig.models.processor.Processor` class object or its
instance
:param cls: A class object to compare with 'type_or_id'
:return: Processor instance or None
| 2.233312 | 2.439941 | 0.915314 |
def pred(pcls):
return pcls.cid() == type_or_id or pcls.type() == type_or_id
pclss = findall_with_pred(pred, prs)
if not pclss:
raise UnknownProcessorTypeError(type_or_id)
return pclss
|
def find_by_type_or_id(type_or_id, prs)
|
:param type_or_id: Type of the data to process or ID of the processor class
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:return:
A list of processor classes to process files of given data type or
processor 'type_or_id' found by its ID
:raises: UnknownProcessorTypeError
| 5.471818 | 4.378311 | 1.249756 |
def pred(pcls):
return fileext in pcls.extensions()
pclss = findall_with_pred(pred, prs)
if not pclss:
raise UnknownFileTypeError("file extension={}".format(fileext))
return pclss
|
def find_by_fileext(fileext, prs)
|
:param fileext: File extension
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:return: A list of processor class to processor files with given extension
:raises: UnknownFileTypeError
| 7.635281 | 5.982654 | 1.276236 |
if not isinstance(obj, IOInfo):
obj = anyconfig.ioinfo.make(obj)
return find_by_fileext(obj.extension, prs)
|
def find_by_maybe_file(obj, prs)
|
:param obj:
a file path, file or file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo' (namedtuple) object
:param cps_by_ext: A list of processor classes
:return: A list of processor classes to process given (maybe) file
:raises: UnknownFileTypeError
| 9.8114 | 7.752546 | 1.265571 |
if (obj is None or not obj) and forced_type is None:
raise ValueError("The first argument 'obj' or the second argument "
"'forced_type' must be something other than "
"None or False.")
if forced_type is None:
pclss = find_by_maybe_file(obj, prs) # :: [Processor], never []
else:
pclss = find_by_type_or_id(forced_type, prs) # Do.
return pclss
|
def findall(obj, prs, forced_type=None,
cls=anyconfig.models.processor.Processor)
|
:param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo` (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
class or None
:param cls: A class object to compare with 'forced_type' later
:return: A list of instances of processor classes to process 'obj' data
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
| 5.654773 | 5.463917 | 1.03493 |
if forced_type is not None:
processor = maybe_processor(forced_type, cls=cls)
if processor is not None:
return processor
pclss = findall(obj, prs, forced_type=forced_type, cls=cls)
return pclss[0]()
|
def find(obj, prs, forced_type=None, cls=anyconfig.models.processor.Processor)
|
:param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo' (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
class or :class:`anyconfig.models.processor.Processor` class object or
its instance itself
:param cls: A class object to compare with 'forced_type' later
:return: an instance of processor class to process 'obj' data
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
| 3.668751 | 4.20105 | 0.873294 |
for pcls in pclss:
if pcls.cid() not in self._processors:
self._processors[pcls.cid()] = pcls
|
def register(self, *pclss)
|
:param pclss: A list of :class:`Processor` or its children classes
| 3.691627 | 3.249989 | 1.135889 |
prs = self._processors.values()
if sort:
return sorted(prs, key=operator.methodcaller("cid"))
return prs
|
def list(self, sort=False)
|
:param sort: Result will be sorted if it's True
:return: A list of :class:`Processor` or its children classes
| 9.259993 | 7.874139 | 1.176001 |
prs = self._processors
return sorted(((cid, [prs[cid]]) for cid in sorted(prs.keys())),
key=operator.itemgetter(0))
|
def list_by_cid(self)
|
:return:
A list of :class:`Processor` or its children classes grouped by
each cid, [(cid, [:class:`Processor`)]]
| 10.567949 | 5.14639 | 2.053468 |
prs = self._processors
if item is None or item == "cid": # Default.
res = [(cid, [prs[cid]]) for cid in sorted(prs.keys())]
elif item in ("type", "extensions"):
res = list_by_x(prs.values(), item)
else:
raise ValueError("keyword argument 'item' must be one of "
"None, 'cid', 'type' and 'extensions' "
"but it was '%s'" % item)
return res
|
def list_by_x(self, item=None)
|
:param item: Grouping key, one of "cid", "type" and "extensions"
:return:
A list of :class:`Processor` or its children classes grouped by
given 'item', [(cid, [:class:`Processor`)]] by default
| 5.94496 | 3.711585 | 1.601731 |
if key in ("cid", "type"):
return sorted(set(operator.methodcaller(key)(p)
for p in self._processors.values()))
if key == "extension":
return sorted(k for k, _v in self.list_by_x("extensions"))
raise ValueError("keyword argument 'key' must be one of "
"None, 'cid', 'type' and 'extension' "
"but it was '%s'" % key)
|
def list_x(self, key=None)
|
:param key: Which of key to return from "cid", "type", and "extention"
:return: A list of x 'key'
| 6.33558 | 5.960032 | 1.063011 |
return [p() for p in findall(obj, self.list(),
forced_type=forced_type, cls=cls)]
|
def findall(self, obj, forced_type=None,
cls=anyconfig.models.processor.Processor)
|
:param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo' (namedtuple) object
:param forced_type: Forced processor type to find
:param cls: A class object to compare with 'ptype'
:return: A list of instances of processor classes to process 'obj'
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
| 6.426969 | 8.566407 | 0.750253 |
return find(obj, self.list(), forced_type=forced_type, cls=cls)
|
def find(self, obj, forced_type=None,
cls=anyconfig.models.processor.Processor)
|
:param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo' (namedtuple) object
:param forced_type: Forced processor type to find
:param cls: A class object to compare with 'ptype'
:return: an instance of processor class to process 'obj'
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
| 5.169708 | 12.703199 | 0.406961 |
options = common.filter_from_options("ac_dict", options)
if "ac_safe" in options:
options["typ"] = "safe" # Override it.
iopts = anyconfig.utils.filter_options(_YAML_INIT_KWARGS, options)
oopts = anyconfig.utils.filter_options(_YAML_INSTANCE_MEMBERS, options)
yml = ryaml.YAML(**iopts)
for attr, val in oopts.items():
setattr(yml, attr, val) # e.g. yml.preserve_quotes = True
return getattr(yml, fname)(*args)
|
def yml_fnc(fname, *args, **options)
|
:param fname:
"load" or "dump", not checked but it should be OK.
see also :func:`yml_load` and :func:`yml_dump`
:param args: [stream] for load or [cnf, stream] for dump
:param options: keyword args may contain "ac_safe" to load/dump safely
| 6.664323 | 6.216505 | 1.072037 |
ret = yml_fnc("load", stream, **options)
if ret is None:
return container()
return ret
|
def yml_load(stream, container, **options)
|
.. seealso:: :func:`anyconfig.backend.yaml.pyyaml.yml_load`
| 8.651622 | 8.541088 | 1.012941 |
try:
return ET.iterparse(xmlfile, events=("start-ns", ))
except TypeError:
return ET.iterparse(xmlfile, events=(b"start-ns", ))
|
def _iterparse(xmlfile)
|
Avoid bug in python 3.{2,3}. See http://bugs.python.org/issue9257.
:param xmlfile: XML file or file-like object
| 2.901761 | 3.201723 | 0.906312 |
nspaces = options.get("nspaces", None)
if nspaces is not None:
matched = _ET_NS_RE.match(tag)
if matched:
(uri, tag) = matched.groups()
prefix = nspaces.get(uri, False)
if prefix:
return "%s:%s" % (prefix, tag)
return tag
|
def _tweak_ns(tag, **options)
|
:param tag: XML tag element
:param nspaces: A namespaces dict, {uri: prefix}
:param options: Extra keyword options may contain 'nspaces' keyword option
provide a namespace dict, {uri: prefix}
>>> _tweak_ns("a", nspaces={})
'a'
>>> _tweak_ns("a", nspaces={"http://example.com/ns/val/": "val"})
'a'
>>> _tweak_ns("{http://example.com/ns/val/}a",
... nspaces={"http://example.com/ns/val/": "val"})
'val:a'
| 3.286595 | 3.783993 | 0.868552 |
key_itr = anyconfig.compat.from_iterable(d.keys() for d in dics)
return len(set(key_itr)) == sum(len(d) for d in dics)
|
def _dicts_have_unique_keys(dics)
|
:param dics: [<dict or dict-like object>], must not be [] or [{...}]
:return: True if all keys of each dict of 'dics' are unique
# Enable the followings if to allow dics is [], [{...}]:
# >>> all(_dicts_have_unique_keys([d]) for [d]
# ... in ({}, {'a': 0}, {'a': 1, 'b': 0}))
# True
# >>> _dicts_have_unique_keys([{}, {'a': 1}, {'b': 2, 'c': 0}])
# True
>>> _dicts_have_unique_keys([{}, {'a': 1}, {'a': 2}])
False
>>> _dicts_have_unique_keys([{}, {'a': 1}, {'b': 2}, {'b': 3, 'c': 0}])
False
>>> _dicts_have_unique_keys([{}, {}])
True
| 6.706601 | 8.21352 | 0.816532 |
dic_itr = anyconfig.compat.from_iterable(d.items() for d in dics)
return container(anyconfig.compat.OrderedDict(dic_itr))
|
def _merge_dicts(dics, container=dict)
|
:param dics: [<dict/-like object must not have same keys each other>]
:param container: callble to make a container object
:return: <container> object
>>> _merge_dicts(({}, ))
{}
>>> _merge_dicts(({'a': 1}, ))
{'a': 1}
>>> sorted(kv for kv in _merge_dicts(({'a': 1}, {'b': 2})).items())
[('a', 1), ('b', 2)]
| 8.607315 | 11.131159 | 0.773263 |
if val and options.get("ac_parse_value", False):
return anyconfig.parser.parse_single(val)
return val
|
def _parse_text(val, **options)
|
:return: Parsed value or value itself depends on 'ac_parse_value'
| 12.935771 | 6.247931 | 2.070409 |
elem.text = elem.text.strip()
if elem.text:
etext = _parse_text(elem.text, **options)
if len(elem) or elem.attrib:
subdic[text] = etext
else:
dic[elem.tag] = etext
|
def _process_elem_text(elem, dic, subdic, text="@text", **options)
|
:param elem: ET Element object which has elem.text
:param dic: <container> (dict[-like]) object converted from elem
:param subdic: Sub <container> object converted from elem
:param options:
Keyword options, see the description of :func:`elem_to_container` for
more details.
:return: None but updating elem.text, dic and subdic as side effects
| 3.022506 | 3.4173 | 0.884472 |
adic = dict((_tweak_ns(a, **options), v) for a, v in elem.attrib.items())
if options.get("ac_parse_value", False):
return container(dict((k, anyconfig.parser.parse_single(v))
for k, v in adic.items()))
return container(adic)
|
def _parse_attrs(elem, container=dict, **options)
|
:param elem: ET Element object has attributes (elem.attrib)
:param container: callble to make a container object
:return: Parsed value or value itself depends on 'ac_parse_value'
| 7.007162 | 5.573742 | 1.257174 |
adic = _parse_attrs(elem, container=container, **options)
if not elem.text and not len(elem) and options.get("merge_attrs"):
dic[elem.tag] = adic
else:
subdic[attrs] = adic
|
def _process_elem_attrs(elem, dic, subdic, container=dict, attrs="@attrs",
**options)
|
:param elem: ET Element object or None
:param dic: <container> (dict[-like]) object converted from elem
:param subdic: Sub <container> object converted from elem
:param options:
Keyword options, see the description of :func:`elem_to_container` for
more details.
:return: None but updating dic and subdic as side effects
| 4.57564 | 5.893813 | 0.776346 |
cdics = [elem_to_container(c, container=container, **options)
for c in elem]
merge_attrs = options.get("merge_attrs", False)
sdics = [container(elem.attrib) if merge_attrs else subdic] + cdics
if _dicts_have_unique_keys(sdics): # ex. <a><b>1</b><c>c</c></a>
dic[elem.tag] = _merge_dicts(sdics, container)
elif not subdic: # There are no attrs nor text and only these children.
dic[elem.tag] = cdics
else:
subdic[children] = cdics
|
def _process_children_elems(elem, dic, subdic, container=dict,
children="@children", **options)
|
:param elem: ET Element object or None
:param dic: <container> (dict[-like]) object converted from elem
:param subdic: Sub <container> object converted from elem
:param container: callble to make a container object
:param children: Tag for children nodes
:param options:
Keyword options, see the description of :func:`elem_to_container` for
more details.
:return: None but updating dic and subdic as side effects
| 5.178242 | 5.189109 | 0.997906 |
dic = container()
if elem is None:
return dic
elem.tag = _tweak_ns(elem.tag, **options) # {ns}tag -> ns_prefix:tag
subdic = dic[elem.tag] = container()
options["container"] = container
if elem.text:
_process_elem_text(elem, dic, subdic, **options)
if elem.attrib:
_process_elem_attrs(elem, dic, subdic, **options)
if len(elem):
_process_children_elems(elem, dic, subdic, **options)
elif not elem.text and not elem.attrib: # ex. <tag/>.
dic[elem.tag] = None
return dic
|
def elem_to_container(elem, container=dict, **options)
|
Convert XML ElementTree Element to a collection of container objects.
Elements are transformed to a node under special tagged nodes, attrs, text
and children, to store the type of these elements basically, however, in
some special cases like the followings, these nodes are attached to the
parent node directly for later convenience.
- There is only text element
- There are only children elements each has unique keys among all
:param elem: ET Element object or None
:param container: callble to make a container object
:param options: Keyword options
- nspaces: A namespaces dict, {uri: prefix} or None
- attrs, text, children: Tags for special nodes to keep XML info
- merge_attrs: Merge attributes and mix with children nodes, and the
information of attributes are lost after its transformation.
| 3.759933 | 3.868504 | 0.971935 |
if not all(nt in options for nt in _TAGS):
tags = options.get("tags", {})
for ntype, tag in _TAGS.items():
options[ntype] = tags.get(ntype, tag)
return options
|
def _complement_tag_options(options)
|
:param options: Keyword options :: dict
>>> ref = _TAGS.copy()
>>> ref["text"] = "#text"
>>> opts = _complement_tag_options({"tags": {"text": ref["text"]}})
>>> del opts["tags"] # To simplify comparison.
>>> sorted(opts.items())
[('attrs', '@attrs'), ('children', '@children'), ('text', '#text')]
| 4.839284 | 4.711285 | 1.027169 |
tree = container()
if root is None:
return tree
if nspaces is not None:
for uri, prefix in nspaces.items():
root.attrib["xmlns:" + prefix if prefix else "xmlns"] = uri
return elem_to_container(root, container=container, nspaces=nspaces,
**_complement_tag_options(options))
|
def root_to_container(root, container=dict, nspaces=None, **options)
|
Convert XML ElementTree Root Element to a collection of container objects.
:param root: etree root object or None
:param container: callble to make a container object
:param nspaces: A namespaces dict, {uri: prefix} or None
:param options: Keyword options,
- tags: Dict of tags for special nodes to keep XML info, attributes,
text and children nodes, e.g. {"attrs": "@attrs", "text": "#text"}
| 4.388606 | 4.494826 | 0.976368 |
for attr, val in anyconfig.compat.iteritems(obj):
parent.set(attr, to_str(val))
|
def _elem_set_attrs(obj, parent, to_str)
|
:param obj: Container instance gives attributes of XML Element
:param parent: XML ElementTree parent node object
:param to_str: Callable to convert value to string or None
:param options: Keyword options, see :func:`container_to_etree`
:return: None but parent will be modified
| 5.255966 | 8.171075 | 0.64324 |
for child in children_nodes: # child should be a dict-like object.
for ckey, cval in anyconfig.compat.iteritems(child):
celem = ET.Element(ckey)
container_to_etree(cval, parent=celem, **options)
yield celem
|
def _elem_from_descendants(children_nodes, **options)
|
:param children_nodes: A list of child dict objects
:param options: Keyword options, see :func:`container_to_etree`
| 6.826037 | 5.625661 | 1.213375 |
elem = ET.Element(key)
vals = val if anyconfig.utils.is_iterable(val) else [val]
for val_ in vals:
container_to_etree(val_, parent=elem, to_str=to_str, **options)
if parent is None: # 'elem' is the top level etree.
return elem
parent.append(elem)
return parent
|
def _get_or_update_parent(key, val, to_str, parent=None, **options)
|
:param key: Key of current child (dict{,-like} object)
:param val: Value of current child (dict{,-like} object or [dict{,...}])
:param to_str: Callable to convert value to string
:param parent: XML ElementTree parent node object or None
:param options: Keyword options, see :func:`container_to_etree`
| 5.463691 | 4.569892 | 1.195584 |
if to_str is None:
to_str = _to_str_fn(**options)
if not anyconfig.utils.is_dict_like(obj):
if parent is not None and obj:
parent.text = to_str(obj) # Parent is a leaf text node.
return parent # All attributes and text should be set already.
options = _complement_tag_options(options)
(attrs, text, children) = operator.itemgetter(*_ATC)(options)
for key, val in anyconfig.compat.iteritems(obj):
if key == attrs:
_elem_set_attrs(val, parent, to_str)
elif key == text:
parent.text = to_str(val)
elif key == children:
for celem in _elem_from_descendants(val, **options):
parent.append(celem)
else:
parent = _get_or_update_parent(key, val, to_str, parent=parent,
**options)
return ET.ElementTree(parent)
|
def container_to_etree(obj, parent=None, to_str=None, **options)
|
Convert a dict-like object to XML ElementTree.
:param obj: Container instance to convert to
:param parent: XML ElementTree parent node object or None
:param to_str: Callable to convert value to string or None
:param options: Keyword options,
- tags: Dict of tags for special nodes to keep XML info, attributes,
text and children nodes, e.g. {"attrs": "@attrs", "text": "#text"}
| 4.793162 | 4.814039 | 0.995663 |
try:
tree.write(stream, encoding="utf-8", xml_declaration=True)
except TypeError:
tree.write(stream, encoding="unicode", xml_declaration=True)
|
def etree_write(tree, stream)
|
Write XML ElementTree 'root' content into 'stream'.
:param tree: XML ElementTree object
:param stream: File or file-like object can write to
| 2.002206 | 2.593668 | 0.771959 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.