code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
ret = {} for scc in tarjan(g): ws = set() ews = set() for v in scc: ws.update(g[v]) for w in ws: assert w in ret or w in scc ews.add(w) ews.update(ret.get(w,())) if len(scc) > 1: ews.update(scc) ews = tuple(ews) for v in scc: ret[v] = ews return ret
def tc(g)
Given a graph @g, returns the transitive closure of @g
3.538766
3.45786
1.023398
feeds = configparser.ConfigParser() feeds.read(self.data_filename) return feeds.sections()
def list_feeds(self)
Output a list of all feed names
6.400015
5.612605
1.140293
try: if self.args["configfile"]: return self.args["configfile"] except KeyError: pass return os.path.expanduser('~/.config/greg/greg.conf')
def retrieve_config_file(self)
Retrieve config file
4.657415
4.262559
1.092634
args = self.args try: if args['datadirectory']: aux.ensure_dir(args['datadirectory']) return args['datadirectory'] except KeyError: pass config = configparser.ConfigParser() config.read([config_filename_global, self.config_filename_user]) section = config.default_section data_path = config.get(section, 'Data directory', fallback='~/.local/share/greg') data_path_expanded = os.path.expanduser(data_path) aux.ensure_dir(data_path_expanded) return os.path.expanduser(data_path_expanded)
def retrieve_data_directory(self)
Retrieve the data directory Look first into config_filename_global then into config_filename_user. The latter takes preeminence.
3.599406
3.105933
1.158881
args = self.args name = self.name try: if args[value]: return args[value] except KeyError: pass section = name if self.config.has_section( name) else self.config.default_section answer = self.config.get(section, value, fallback=default) return answer
def retrieve_config(self, value, default)
Retrieves a value (with a certain fallback) from the config files (looks first into config_filename_global then into config_filename_user. The latest takes preeminence) if the command line flag for the value is used, that overrides everything else
3.78509
3.462694
1.093106
section = self.name if self.config.has_section( self.name) else self.config.default_section download_path = self.config.get( section, 'Download directory', fallback='~/Podcasts') subdirectory = self.config.get( section, 'Create subdirectories', fallback='no') return [os.path.expanduser(download_path), subdirectory]
def retrieve_download_path(self)
Retrieves the download path (looks first into config_filename_global then into the [DEFAULT], then the [feed], section of config_filename_user. The latest takes preeminence)
4.47022
3.920224
1.140297
wanttags = self.retrieve_config('Tag', 'no') if wanttags == 'yes': if aux.staggerexists: willtag = True else: willtag = False print(("You want me to tag {0}, but you have not installed " "the Stagger module. I cannot honour your request."). format(self.name), file=sys.stderr, flush=True) else: willtag = False return willtag
def will_tag(self)
Check whether the feed should be tagged
7.134449
6.777534
1.052662
if self.linkdates != []: # What follows is a quick sanity check: if the entry date is in the # future, this is probably a mistake, and we just count the entry # date as right now. if max(self.linkdates) <= list(time.localtime()): currentdate = max(self.linkdates) else: currentdate = list(time.localtime()) print(("This entry has its date set in the future. " "I will use your current local time as its date " "instead."), file=sys.stderr, flush=True) stop = sys.maxsize else: currentdate = [1, 1, 1, 0, 0] firstsync = self.retrieve_config('firstsync', '1') if firstsync == 'all': stop = sys.maxsize else: stop = int(firstsync) return currentdate, stop
def how_many(self)
Ascertain where to start downloading, and how many entries.
5.686975
5.295127
1.074002
if self.sync_by_date: try: entry.linkdate = list(entry.published_parsed) self.linkdate = list(entry.published_parsed) except (AttributeError, TypeError): try: entry.linkdate = list(entry.updated_parsed) self.linkdate = list(entry.updated_parsed) except (AttributeError, TypeError): print(("This entry doesn't seem to have a parseable date. " "I will use your local time instead."), file=sys.stderr, flush=True) entry.linkdate = list(time.localtime()) self.linkdate = list(time.localtime()) else: entry.linkdate = list(time.localtime())
def fix_linkdate(self, entry)
Give a date for the entry, depending on feed.sync_by_date Save it as feed.linkdate
2.599968
2.129679
1.220826
mime = self.retrieve_config('mime', 'audio') mimedict = {"number": mime} # the input that parse_for_download expects return aux.parse_for_download(mimedict)
def retrieve_mime(self)
Check the mime-type to download
23.486034
19.710093
1.191574
downloadlinks = {} downloaded = False ignoreenclosures = self.retrieve_config('ignoreenclosures', 'no') notype = self.retrieve_config('notype', 'no') if ignoreenclosures == 'no': for enclosure in entry.enclosures: if notype == 'yes': downloadlinks[urlparse(enclosure["href"]).path.split( "/")[-1]] = enclosure["href"] # preserve original name else: try: # We will download all enclosures of the desired # mime-type if any([mimetype in enclosure["type"] for mimetype in self.mime]): downloadlinks[urlparse( enclosure["href"]).path.split( "/")[-1]] = enclosure["href"] # preserve original name except KeyError: print("This podcast carries no information about " "enclosure types. Try using the notype " "option in your greg.conf", file=sys.stderr, flush=True) else: downloadlinks[urlparse(entry.link).query.split( "/")[-1]] = entry.link for podname in downloadlinks: if (podname, entry.linkdate) not in zip(self.entrylinks, self.linkdates): try: title = entry.title except: title = podname try: sanitizedsummary = aux.html_to_text(entry.summary) if sanitizedsummary == "": sanitizedsummary = "No summary available" except: sanitizedsummary = "No summary available" try: placeholders = Placeholders( self, entry, downloadlinks[podname], podname, title, sanitizedsummary) placeholders = aux.check_directory(placeholders) condition = aux.filtercond(placeholders) if condition: print("Downloading {} -- {}".format(title, podname)) aux.download_handler(self, placeholders) if self.willtag: aux.tag(placeholders) downloaded = True else: print("Skipping {} -- {}".format(title, podname)) downloaded = False if self.info: with open(self.info, 'a') as current: # We write to file this often to ensure that # downloaded entries count as downloaded. current.write(''.join([podname, ' ', str(entry.linkdate), '\n'])) except URLError: sys.exit(("... something went wrong. " "Are you connected to the internet?")) return downloaded
def download_entry(self, entry)
Find entry link and download entry
4.74614
4.674875
1.015244
args = parser.parse_args() try: function = args.func except AttributeError: parser.print_usage() parser.exit(1) function(vars(args))
def main()
Parse the args and call whatever function was selected
3.486479
2.685604
1.29821
session = c.Session(args) if args["name"] in session.feeds.sections(): sys.exit("You already have a feed with that name.") if args["name"] in ["all", "DEFAULT"]: sys.exit( ("greg uses ""{}"" for a special purpose." "Please choose another name for your feed.").format(args["name"])) entry = {} for key, value in args.items(): if value is not None and key != "func" and key != "name": entry[key] = value session.feeds[args["name"]] = entry with open(session.data_filename, 'w') as configfile: session.feeds.write(configfile)
def add(args)
Add a new feed
5.360404
5.011939
1.069527
session = c.Session(args) if not args["name"] in session.feeds: sys.exit("You don't have a feed with that name.") inputtext = ("Are you sure you want to remove the {} " " feed? (y/N) ").format(args["name"]) reply = input(inputtext) if reply != "y" and reply != "Y": return 0 else: session.feeds.remove_section(args["name"]) with open(session.data_filename, 'w') as configfile: session.feeds.write(configfile) try: os.remove(os.path.join(session.data_dir, args["name"])) except FileNotFoundError: pass
def remove(args)
Remove the feed given in <args>
3.404809
3.174767
1.072459
session = c.Session(args) if "all" in args["names"]: feeds = session.list_feeds() else: feeds = args["names"] for feed in feeds: aux.pretty_print(session, feed)
def info(args)
Provide information of a number of feeds
6.439721
5.465878
1.178168
import operator session = c.Session(args) if "all" in args["names"]: targetfeeds = session.list_feeds() else: targetfeeds = [] for name in args["names"]: if name not in session.feeds: print("You don't have a feed called {}." .format(name), file=sys.stderr, flush=True) else: targetfeeds.append(name) for target in targetfeeds: feed = c.Feed(session, target, None) if not feed.wentwrong: try: title = feed.podcast.target.title except AttributeError: title = target print("Checking", title, end="...\n") currentdate, stop = feed.how_many() entrycounter = 0 entries_to_download = feed.podcast.entries for entry in entries_to_download: feed.fix_linkdate(entry) # Sort entries_to_download, but only if you want to download as # many as there are if stop >= len(entries_to_download): entries_to_download.sort(key=operator.attrgetter("linkdate"), reverse=False) for entry in entries_to_download: if entry.linkdate > currentdate: downloaded = feed.download_entry(entry) entrycounter += downloaded if entrycounter >= stop: break print("Done") else: msg = ''.join(["I cannot sync ", feed, " just now. Are you connected to the internet?"]) print(msg, file=sys.stderr, flush=True)
def sync(args)
Implement the 'greg sync' command
4.274459
4.233695
1.009629
session = c.Session(args) if str(args["url"]) != 'None': url = args["url"] name = "DEFAULT" else: try: url = session.feeds[args["feed"]]["url"] name = args["feed"] except KeyError: sys.exit("You don't appear to have a feed with that name.") podcast = aux.parse_podcast(url) for entry in enumerate(podcast.entries): listentry = list(entry) print(listentry[0], end=": ") try: print(listentry[1]["title"], end=" (") except: print(listentry[1]["link"], end=" (") try: print(listentry[1]["updated"], end=")") except: print("", end=")") print() dumpfilename = os.path.join(session.data_dir, 'feeddump') with open(dumpfilename, mode='wb') as dumpfile: dump = [name, podcast] pickle.dump(dump, dumpfile)
def check(args)
Implement the 'greg check' command
3.755402
3.819617
0.983188
session = c.Session(args) issues = aux.parse_for_download(args) if issues == ['']: sys.exit( "You need to give a list of issues, of the form ""a, b-c, d...""") dumpfilename = os.path.join(session.data_dir, 'feeddump') if not os.path.isfile(dumpfilename): sys.exit( ("You need to run ""greg check" "<feed>"" before using ""greg download"".")) with open(dumpfilename, mode='rb') as dumpfile: dump = pickle.load(dumpfile) try: feed = c.Feed(session, dump[0], dump[1]) except Exception: sys.exit(( "... something went wrong." "Are you sure your last ""greg check"" went well?")) for number in issues: entry = dump[1].entries[eval(number)] feed.info = [] feed.entrylinks = [] feed.fix_linkdate(entry) feed.download_entry(entry)
def download(args)
Implement the 'greg download' command
7.535344
7.089897
1.062828
try: podcast = feedparser.parse(url) wentwrong = "urlopen" in str(podcast["bozo_exception"]) except KeyError: wentwrong = False if wentwrong: print("Error: ", url, ": ", str(podcast["bozo_exception"])) return podcast
def parse_podcast(url)
Try to parse podcast
4.828053
4.768523
1.012484
feed = placeholders.feed args = feed.args placeholders.directory = "This very directory" # wink, wink placeholders.fullpath = os.path.join( placeholders.directory, placeholders.filename) try: if args["downloaddirectory"]: ensure_dir(args["downloaddirectory"]) placeholders.directory = args["downloaddirectory"] except KeyError: pass download_path = os.path.expanduser( feed.retrieve_config("Download Directory", "~/Podcasts")) subdirectory = feed.retrieve_config( "Create subdirectory", "no") if "no" in subdirectory: placeholders.directory = download_path elif "yes" in subdirectory: subdnametemplate = feed.retrieve_config( "subdirectory_name", "{podcasttitle}") subdname = substitute_placeholders( subdnametemplate, placeholders) placeholders.directory = os.path.join(download_path, subdname) ensure_dir(placeholders.directory) placeholders.fullpath = os.path.join( placeholders.directory, placeholders.filename) return placeholders
def check_directory(placeholders)
Find out, and create if needed, the directory in which the feed will be downloaded
4.391221
4.220084
1.040553
single_arg = "" # in the first bit we put all arguments # together and take out any extra spaces list_of_feeds = [] for arg in args["number"]: single_arg = ''.join([single_arg, " ", arg]) single_arg = single_arg.translate({32: None}) # eliminates spaces for group in single_arg.split(sep=","): if not("-" in group): list_of_feeds.append(group) else: extremes = group.split(sep="-") list_of_feeds = list_of_feeds + [str(x) for x in range( eval(extremes[0]), eval(extremes[1])+1)] return list_of_feeds
def parse_for_download(args)
Turn an argument such as 4, 6-8, 10 into a list such as [4,6,7,8,10]
4.611574
4.150566
1.111071
# We first recover the name of the file to be tagged... template = placeholders.feed.retrieve_config("file_to_tag", "{filename}") filename = substitute_placeholders(template, placeholders) podpath = os.path.join(placeholders.directory, filename) # ... and this is it # now we create a dictionary of tags and values tagdict = placeholders.feed.defaulttagdict # these are the defaults try: # We do as if there was a section with potential tag info feedoptions = placeholders.feed.config.options(placeholders.name) # this monstruous concatenation of classes... surely a bad idea. tags = [[option.replace("tag_", ""), placeholders.feed.config[ placeholders.name][option]] for option in feedoptions if "tag_" in option] # these are the tags to be filled if tags: for tag in tags: tagdict[tag[0]] = tag[1] except configparser.NoSectionError: pass for tag in tagdict: metadata = substitute_placeholders( tagdict[tag], placeholders) if metadata: stagger.util.set_frames(podpath, {tag: metadata}) else: stagger.util.remove_frames(podpath, tag)
def tag(placeholders)
Tag the file at podpath with the information in podcast and entry
7.724276
7.294119
1.058973
import shlex value = feed.retrieve_config('downloadhandler', 'greg') if value == 'greg': while os.path.isfile(placeholders.fullpath): placeholders.fullpath = placeholders.fullpath + '_' placeholders.filename = placeholders.filename + '_' urlretrieve(placeholders.link, placeholders.fullpath) else: value_list = shlex.split(value) instruction_list = [substitute_placeholders(part, placeholders) for part in value_list] returncode = subprocess.call(instruction_list) if returncode: raise URLError
def download_handler(feed, placeholders)
Parse and execute the download handler
6.383
6.178995
1.033016
entrylinks = [] linkdates = [] try: with open(infofile, 'r') as previous: for line in previous: entrylinks.append(line.split(sep=' ')[0]) # This is the list of already downloaded entry links linkdates.append(eval(line.split(sep=' ', maxsplit=1)[1])) # This is the list of already downloaded entry dates # Note that entrydates are lists, converted from a # time.struct_time() object except FileNotFoundError: pass return entrylinks, linkdates
def parse_feed_info(infofile)
Take a feed file in .local/share/greg/data and return a list of links and of dates
4.768693
4.422469
1.078288
if feed in session.feeds: print() feed_info = os.path.join(session.data_dir, feed) entrylinks, linkdates = parse_feed_info(feed_info) print(feed) print("-"*len(feed)) print(''.join([" url: ", session.feeds[feed]["url"]])) if linkdates != []: print(''.join([" Next sync will download from: ", time.strftime( "%d %b %Y %H:%M:%S", tuple(max(linkdates))), "."])) else: print("You don't have a feed called {}.".format(feed), file=sys.stderr, flush=True)
def pretty_print(session, feed)
Print the dictionary entry of a feed in a nice way.
5.234652
5.047662
1.037045
newst = inputstring.format(link=placeholders.link, filename=placeholders.filename, directory=placeholders.directory, fullpath=placeholders.fullpath, title=placeholders.title, filename_title=placeholders.filename_title, date=placeholders.date_string(), podcasttitle=placeholders.podcasttitle, filename_podcasttitle= placeholders.filename_podcasttitle, name=placeholders.name, subtitle=placeholders.sanitizedsubtitle, entrysummary=placeholders.entrysummary) return newst
def substitute_placeholders(inputstring, placeholders)
Take a string with placeholders, and return the strings with substitutions.
4.431804
4.632111
0.956757
ctx.index[v] = len(ctx.index) ctx.lowlink[v] = ctx.index[v] ctx.S.append(v) ctx.S_set.add(v) it = iter(ctx.g.get(v, ())) ctx.T.append((it,False,v,None))
def _tarjan_head(ctx, v)
Used by @tarjan and @tarjan_iter. This is the head of the main iteration
4.273627
4.355569
0.981187
for w in it: if w not in ctx.index: ctx.T.append((it,True,v,w)) _tarjan_head(ctx, w) return if w in ctx.S_set: ctx.lowlink[v] = min(ctx.lowlink[v], ctx.index[w]) if ctx.lowlink[v] == ctx.index[v]: scc = [] w = None while v != w: w = ctx.S.pop() scc.append(w) ctx.S_set.remove(w) ctx.ret.append(scc)
def _tarjan_body(ctx, it, v)
Used by @tarjan and @tarjan_iter. This is the body of the main iteration
3.267499
3.512084
0.930359
ctx = TarjanContext( g = g, S = [], S_set = set(), index = {}, lowlink = {}, T = [], ret = []) main_iter = iter(g) while True: try: v = next(main_iter) except StopIteration: return if v not in ctx.index: _tarjan_head(ctx, v) while ctx.T: it, inside, v, w = ctx.T.pop() if inside: ctx.lowlink[v] = min(ctx.lowlink[w], ctx.lowlink[v]) _tarjan_body(ctx, it, v) if ctx.ret: assert len(ctx.ret) == 1 yield ctx.ret.pop()
def tarjan_iter(g)
Returns the strongly connected components of the graph @g in a topological order. @g is the graph represented as a dictionary { <vertex> : <successors of vertex> }. This function does not recurse. It returns an iterator.
3.874801
4.171292
0.928921
S = [] S_set = set() index = {} lowlink = {} ret = [] def visit(v): index[v] = len(index) lowlink[v] = index[v] S.append(v) S_set.add(v) for w in g.get(v,()): if w not in index: visit(w) lowlink[v] = min(lowlink[w], lowlink[v]) elif w in S_set: lowlink[v] = min(lowlink[v], index[w]) if lowlink[v] == index[v]: scc = [] w = None while v != w: w = S.pop() scc.append(w) S_set.remove(w) ret.append(scc) for v in g: if not v in index: visit(v) return ret
def tarjan_recursive(g)
Returns the strongly connected components of the graph @g in a topological order. @g is the graph represented as a dictionary { <vertex> : <successors of vertex> }. This function recurses --- large graphs may cause a stack overflow.
1.914935
1.924737
0.994907
T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T-1, D, D) assert v.shape == (T, D) out = np.matmul(H_diag, v[:, :, None])[:, :, 0] out[:-1] += np.matmul(H_upper_diag, v[1:][:, :, None])[:, :, 0] out[1:] += np.matmul(np.swapaxes(H_upper_diag, -2, -1), v[:-1][:, :, None])[:, :, 0] return out
def symm_block_tridiag_matmul(H_diag, H_upper_diag, v)
Compute matrix-vector product with a symmetric block tridiagonal matrix H and vector v. :param H_diag: block diagonal terms of H :param H_upper_diag: upper block diagonal terms of H :param v: vector to multiple :return: H * v
2.065769
2.290086
0.902049
T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) H_lower_diag = np.swapaxes(H_upper_diag, -2, -1) ab = np.zeros((2 * D, T * D)) # Fill in blocks along the diagonal for d in range(D): # Get indices of (-d)-th diagonal of H_diag i = np.arange(d, D) j = np.arange(0, D - d) h = np.column_stack((H_diag[:, i, j], np.zeros((T, d)))) ab[d] = h.ravel() # Fill in lower left corner of blocks below the diagonal for d in range(0, D): # Get indices of (-d)-th diagonal of H_diag i = np.arange(d, D) j = np.arange(0, D - d) h = np.column_stack((H_lower_diag[:, i, j], np.zeros((T - 1, d)))) ab[D + d, :D * (T - 1)] = h.ravel() # Fill in upper corner of blocks below the diagonal for d in range(1, D): # Get indices of (+d)-th diagonal of H_lower_diag i = np.arange(0, D - d) j = np.arange(d, D) h = np.column_stack((np.zeros((T - 1, d)), H_lower_diag[:, i, j])) ab[D - d, :D * (T - 1)] += h.ravel() return ab if lower else transpose_lower_banded_matrix(ab)
def convert_block_tridiag_to_banded(H_diag, H_upper_diag, lower=True)
convert blocks to banded matrix representation required for scipy. we are using the "lower form." see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
2.20881
2.2193
0.995273
from scipy.linalg import solveh_banded ab = convert_block_tridiag_to_banded(H_diag, H_upper_diag) \ if ab is None else ab x = solveh_banded(ab, v.ravel(), lower=True) return x.reshape(v.shape)
def scipy_solve_symm_block_tridiag(H_diag, H_upper_diag, v, ab=None)
use scipy.linalg.solve_banded to solve a symmetric block tridiagonal system see https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.solveh_banded.html
2.490966
2.529546
0.984748
T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T,D)) y = info_sample(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return y
def sample_block_tridiag(H_diag, H_upper_diag)
helper function for sampling block tridiag gaussians. this is only for speed comparison with the solve approach.
2.930783
2.945758
0.994916
T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) log_Z_init = 0 J_21 = np.swapaxes(H_upper_diag, -1, -2) log_Z_pair = 0 J_node = H_diag h_node = np.zeros((T, D)) log_Z_node = 0 logZ, _, _ = kalman_info_filter(J_init, h_init, log_Z_init, J_11, J_21, J_22, h_1, h_2, log_Z_pair, J_node, h_node, log_Z_node) # logZ = -1/2 log |J| + n/2 log 2 \pi logdetJ = -2 * (logZ - (T*D) / 2 * np.log(2 * np.pi)) return logdetJ
def logdet_symm_block_tridiag(H_diag, H_upper_diag)
compute the log determinant of a positive definite, symmetric block tridiag matrix. Use the Kalman info filter to do so. Specifically, the KF computes the normalizer: log Z = 1/2 h^T J^{-1} h -1/2 log |J| +n/2 log 2 \pi We set h=0 to get -1/2 log |J| + n/2 log 2 \pi and from this we solve for log |J|.
3.011415
2.642969
1.139406
T, D, _ = H_diag.shape assert H_diag.ndim == 3 and H_diag.shape[2] == D assert H_upper_diag.shape == (T - 1, D, D) J_init = J_11 = J_22 = np.zeros((D, D)) h_init = h_1 = h_2 = np.zeros((D,)) J_21 = np.swapaxes(H_upper_diag, -1, -2) J_node = H_diag h_node = np.zeros((T, D)) _, _, sigmas, E_xt_xtp1 = \ info_E_step(J_init, h_init, 0, J_11, J_21, J_22, h_1, h_2, np.zeros((T-1)), J_node, h_node, np.zeros(T)) return sigmas, E_xt_xtp1
def compute_symm_block_tridiag_covariances(H_diag, H_upper_diag)
use the info smoother to solve a symmetric block tridiagonal system
3.28748
3.282142
1.001626
# TODO: move this to cython? T, N, C, D, b = self.T, self.D_emission, self.C, self.D, self.emission_distn.b indptr = [0] indices = [] vals = [] offset = 0 X = np.hstack((self.gaussian_states, self.inputs)) for t in range(T): # Evaluate probability of data y_t = np.zeros(N) ns_t = self.data.indices[self.data.indptr[t]:self.data.indptr[t+1]] y_t[ns_t] = self.data.data[self.data.indptr[t]:self.data.indptr[t+1]] ll = self.emission_distn._elementwise_log_likelihood((X[t], y_t)) ll = ll.ravel() # Evaluate the probability that each emission was "exposed", # i.e. p(z_tn = 1 | y_tn, x_tn) log_p_exposed = np.log(self.rho) + ll log_p_exposed -= np.log(np.exp(log_p_exposed) + (1-self.rho) * (y_t == 0)) # Sample zero inflation mask z_t = np.random.rand(N) < np.exp(log_p_exposed) # Construct the sparse matrix t_inds = np.where(z_t)[0] indices.append(t_inds) vals.append(y_t[t_inds]) offset += t_inds.size indptr.append(offset) # Construct a sparse matrix vals = np.concatenate(vals) indices = np.concatenate(indices) indptr = np.array(indptr) self.masked_data = csr_matrix((vals, indices, indptr), shape=(T, N))
def resample_zeroinflation_variables(self)
There's no way around the fact that we have to look at every data point, even the zeros here.
3.349323
3.308909
1.012213
# Flatten the covariance T = mus.shape[0] D = self.D_in sigs_vec = sigmas.reshape((T, D ** 2)) # Compute the log likelihood of each column ll = np.zeros((T, self.D_out)) for n in range(self.D_out): an = self.A[n] E_loglmbda = np.dot(mus, an) ll[:,n] += y[:,n] * E_loglmbda # Vectorized log likelihood calculation aa_vec = np.outer(an, an).reshape((D ** 2,)) ll[:,n] = -np.exp(E_loglmbda + 0.5 * np.dot(sigs_vec, aa_vec)) return ll
def expected_log_likelihood(self, mus, sigmas, y)
Compute the expected log likelihood for a mean and covariance of x and an observed value of y.
4.596652
4.766949
0.964276
if isinstance(data, list): x = np.vstack([d[0] for d in data]) y = np.vstack([d[1] for d in data]) elif isinstance(data, tuple): assert len(data) == 2 elif isinstance(data, np.ndarray): x, y = data[:,:self.D_in], data[:, self.D_in:] else: raise Exception("Invalid data type") from sklearn.linear_model import LogisticRegression for n in progprint_xrange(self.D_out): lr = LogisticRegression(fit_intercept=False) lr.fit(x, y[:,n]) self.A[n] = lr.coef_
def max_likelihood(self, data, weights=None, stats=None)
Maximize the likelihood for given data :param data: :param weights: :param stats: :return:
2.466755
2.531273
0.974512
T, D = self.T, self.D_latent assert x.shape == (T, D) ll = 0 for t in range(self.T): ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t]) return ll
def log_conditional_likelihood(self, x)
likelihood \sum_t log p(y_t | x_t) Optionally override this in base classes
3.690728
4.239261
0.870606
T, D = self.T, self.D_latent assert x.shape == (T, D) gfun = grad(self.local_log_likelihood) g = np.zeros((T, D)) for t in range(T): g[t] += gfun(x[t], self.data[t], self.inputs[t]) return g
def grad_local_log_likelihood(self, x)
return d/dxt log p(yt | xt) evaluated at xt Optionally override this in base classes
3.652872
4.029223
0.906595
T, D = self.T, self.D_latent assert x.shape == (T, D) hfun = hessian(self.local_log_likelihood) H_diag = np.zeros((T, D, D)) for t in range(T): H_diag[t] = hfun(x[t], self.data[t], self.inputs[t]) return H_diag
def hessian_local_log_likelihood(self, x)
return d^2/dxt^2 log p(y | x) for each time bin Optionally override this in base classes
3.438576
3.842897
0.894787
T, D = self.T, self.D_latent assert x.shape == (T, D) # prior log p(x) -- quadratic terms J_diag, J_upper_diag = self.sparse_J_prior lp = -0.5 * np.sum(x * symm_block_tridiag_matmul(J_diag, J_upper_diag, x)) # prior log p(x) -- linear terms _, h_init, log_Z_init = self.info_init_params _, _, _, h1, h2, log_Z_dyn = self.info_dynamics_params lp += x[0].dot(h_init) lp += np.sum(x[:-1] * h1) lp += np.sum(x[1:] * h2) # prior log p(x) -- normalization constants lp += log_Z_init lp += np.sum(log_Z_dyn) # likelihood log p(y | x) lp += self.log_conditional_likelihood(x) return lp
def log_joint(self, x)
Compute the log joint probability p(x, y)
4.398901
4.427735
0.993488
T, D = self.T, self.D_latent assert x.shape == (T, D) # Collect the Gaussian LDS prior terms J_diag, J_upper_diag = self.sparse_J_prior H_diag, H_upper_diag = -J_diag, -J_upper_diag # Collect the likelihood terms H_diag += self.hessian_local_log_likelihood(x) # Subtract a little bit to ensure negative definiteness H_diag -= 1e-8 * np.eye(D) return H_diag, H_upper_diag
def sparse_hessian_log_joint(self, x)
The Hessian includes the quadratic terms of the Gaussian LDS prior as well as the Hessian of the local log likelihood.
5.526696
4.456088
1.240257
T, D = self.T, self.D_latent assert x.shape == (T, D) # Collect the Gaussian LDS prior terms _, h_init, _ = self.info_init_params _, _, _, h1, h2, _ = self.info_dynamics_params H_diag, H_upper_diag = self.sparse_J_prior # Compute the gradient from the prior g = -1 * symm_block_tridiag_matmul(H_diag, H_upper_diag, x) g[0] += h_init g[:-1] += h1 g[1:] += h2 # Compute gradient from the likelihood terms g += self.grad_local_log_likelihood(x) return g
def gradient_log_joint(self, x)
The gradient of the log joint probability. For the Gaussian terms, this is d/dx [-1/2 x^T J x + h^T x] = -Jx + h. For the likelihood terms, we have for each time t d/dx log p(yt | xt)
6.321738
6.788263
0.931275
from pylds.util import solve_symm_block_tridiag, scipy_solve_symm_block_tridiag scale = self.T * self.D_emission def newton_step(x, stepsz): assert 0 <= stepsz <= 1 g = self.gradient_log_joint(x) H_diag, H_upper_diag = self.sparse_hessian_log_joint(x) Hinv_g = -scipy_solve_symm_block_tridiag(-H_diag / scale, -H_upper_diag / scale, g / scale) return x - stepsz * Hinv_g if verbose: print("Fitting Laplace approximation") itr = [0] def cbk(x): print("Iteration: ", itr[0], "\tObjective: ", (self.log_joint(x) / scale).round(4), "\tAvg Grad: ", (self.gradient_log_joint(x).mean() / scale).round(4)) itr[0] += 1 # Solve for optimal x with Newton's method x = self.gaussian_states dx = np.inf while dx >= tol: xnew = newton_step(x, stepsz) dx = np.mean(abs(xnew - x)) x = xnew if verbose: cbk(x) assert np.all(np.isfinite(x)) if verbose: print("Done") return x
def _laplace_approximation_newton(self, tol=1e-6, stepsz=0.9, verbose=False)
Solve a block tridiagonal system with message passing.
3.658657
3.655121
1.000967
# Observation likelihoods lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T)) return (self.data - lmbda).dot(self.C)
def grad_local_log_likelihood(self, x)
d/dx y^T Cx + y^T d - exp(Cx+d) = y^T C - exp(Cx+d)^T C = (y - lmbda)^T C
5.086603
4.356154
1.167682
# Observation likelihoods lmbda = np.exp(np.dot(x, self.C.T) + np.dot(self.inputs, self.D.T)) return np.einsum('tn, ni, nj ->tij', -lmbda, self.C, self.C)
def hessian_local_log_likelihood(self, x)
d/dx (y - lmbda)^T C = d/dx -exp(Cx + d)^T C = -C^T exp(Cx + d)^T C
5.43904
4.393054
1.2381
C, D, u, y = self.C, self.D, self.inputs, self.data psi = x.dot(C.T) + u.dot(D.T) p = 1. / (1 + np.exp(-psi)) return (y - p).dot(C)
def grad_local_log_likelihood(self, x)
d/d \psi y \psi - log (1 + exp(\psi)) = y - exp(\psi) / (1 + exp(\psi)) = y - sigma(psi) = y - p d \psi / dx = C d / dx = (y - sigma(psi)) * C
4.238722
4.100004
1.033834
C, D, u, y = self.C, self.D, self.inputs, self.data psi = x.dot(C.T) + u.dot(D.T) p = 1. / (1 + np.exp(-psi)) dp_dpsi = p * (1 - p) return np.einsum('tn, ni, nj ->tij', -dp_dpsi, self.C, self.C)
def hessian_local_log_likelihood(self, x)
d/dx (y - p) * C = -dpsi/dx (dp/d\psi) C = -C p (1-p) C
4.47473
3.950068
1.132824
masked_datas = [s.masked_data.tocsc() for s in self.states_list] xs = [np.hstack((s.gaussian_states, s.inputs))for s in self.states_list] for n in range(self.D_obs): # Get the nonzero values of the nth column rowns = [md.indices[md.indptr[n]:md.indptr[n+1]] for md in masked_datas] xns = [x[r] for x,r in zip(xs, rowns)] yns = [s.masked_data.getcol(n).data for s in self.states_list] maskns = [np.ones_like(y, dtype=bool) for y in yns] omegans = [s.omega.getcol(n).data for s in self.states_list] self.emission_distn._resample_row_of_emission_matrix(n, xns, yns, maskns, omegans)
def resample_emission_distn(self)
Now for the expensive part... the data is stored in a sparse row format, which is good for updating the latent states (since we primarily rely on dot products with the data, which can be efficiently performed for CSR matrices). However, in order to update the n-th row of the emission matrix, we need to know which counts are observed in the n-th column of data. This involves converting the data to a sparse column format, which can require (time) intensive re-indexing.
3.88808
3.498064
1.111495
@conv_func def my_conv(n_messages, messages, p_response, app_data): # Create an array of n_messages response objects addr = calloc(n_messages, sizeof(PamResponse)) response = cast(addr, POINTER(PamResponse)) p_response[0] = response for i in range(n_messages): if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF: dst = calloc(len(password)+1, sizeof(c_char)) memmove(dst, cpassword, len(password)) response[i].resp = dst response[i].resp_retcode = 0 return 0 # python3 ctypes prefers bytes if sys.version_info >= (3,): if isinstance(username, str): username = username.encode(encoding) if isinstance(password, str): password = password.encode(encoding) if isinstance(service, str): service = service.encode(encoding) else: if isinstance(username, unicode): username = username.encode(encoding) if isinstance(password, unicode): password = password.encode(encoding) if isinstance(service, unicode): service = service.encode(encoding) if b'\x00' in username or b'\x00' in password or b'\x00' in service: self.code = 4 # PAM_SYSTEM_ERR in Linux-PAM self.reason = 'strings may not contain NUL' return False # do this up front so we can safely throw an exception if there's # anything wrong with it cpassword = c_char_p(password) handle = PamHandle() conv = PamConv(my_conv, 0) retval = pam_start(service, username, byref(conv), byref(handle)) if retval != 0: # This is not an authentication error, something has gone wrong starting up PAM self.code = retval self.reason = "pam_start() failed" return False retval = pam_authenticate(handle, 0) auth_success = retval == 0 if auth_success and resetcreds: retval = pam_setcred(handle, PAM_REINITIALIZE_CRED); # store information to inform the caller why we failed self.code = retval self.reason = pam_strerror(handle, retval) if sys.version_info >= (3,): self.reason = self.reason.decode(encoding) if hasattr(libpam, 'pam_end'): pam_end(handle, retval) return auth_success
def authenticate(self, username, password, service='login', encoding='utf-8', resetcreds=True)
username and password authentication for the given service. Returns True for success, or False for failure. self.code (integer) and self.reason (string) are always stored and may be referenced for the reason why authentication failed. 0/'Success' will be stored for success. Python3 expects bytes() for ctypes inputs. This function will make necessary conversions using the supplied encoding. Inputs: username: username to authenticate password: password in plain text service: PAM service to authenticate against, defaults to 'login' Returns: success: True failure: False
3.776961
3.539263
1.06716
found_names = [] with open(self.filename) as f: content = f.readlines() for line in content: line = line.strip() if " " in line: attribute, value = line.split(" ", 1) attribute = attribute.strip() value = value.strip() if attribute.lower() in ['host']: found_names.append(value) return found_names
def names(self)
The names defined in ~/.ssh/config :return: the names
2.776166
2.592266
1.070942
with open(self.filename) as f: content = f.readlines() content = [" ".join(x.split()).strip('\n').lstrip().split(' ', 1) for x in content] # removes duplicated spaces, and splits in two fields, removes leading spaces hosts = {} host = "NA" for line in content: if line[0].startswith('#') or line[0] is '': pass # ignore line else: attribute = line[0] value = line[1] if attribute.lower().strip() in ['Host', 'host']: host = value hosts[host] = {'host': host} else: # In case of special configuration lines, such as port # forwarding, # there would be no 'Host india' line. if host in hosts: hosts[host][attribute] = value # pass self.hosts = hosts
def load(self)
list the hosts defined in the ssh config file
5.875459
5.392901
1.08948
if name in ["localhost"]: r = '\n'.join(Shell.sh("-c", command).split()[-1:]) else: r = '\n'.join(Shell.ssh(name, command).split()[-1:]) return r
def execute(self, name, command)
execute the command on the named host :param name: the name of the host in config :param command: the command to be executed :return:
5.50182
5.95015
0.924652
data = { "host": host, "key": key, "username": username } if verbose and key in self.names(): Console.error("{key} already in ~/.ssh/config".format(**data), traceflag=False) return "" else: entry = dedent(.format(**data)) try: with open(self.filename, "a") as config_ssh: config_ssh.write(entry) config_ssh.close() self.load() if verbose: Console.ok("Added india to ~/.ssh/config") except Exception as e: if verbose: Console.error(e.message)
def generate(self, key="india", host="india.futuresystems.org", username=None, force=False, verbose=False)
adds a host to the config file with given parameters. #TODO: make sure this is better documented :param key: the key :param host: the host :param username: the username :param force: not used :param verbose: prints debug messages :return:
4.162916
4.03613
1.031413
pwd = os.getcwd() name = filename.replace(pwd, "$PWD") try: (first, name) = name.split("site-packages") name += "... site" except: pass loglevel = logging.CRITICAL try: level = grep("loglevel:", config_file( "/cloudmesh_debug.yaml")).strip().split(":")[1].strip().lower() if level.upper() == "DEBUG": loglevel = logging.DEBUG elif level.upper() == "INFO": loglevel = logging.INFO elif level.upper() == "WARNING": loglevel = logging.WARNING elif level.upper() == "ERROR": loglevel = logging.ERROR else: level = logging.CRITICAL except: # print "LOGLEVEL NOT FOUND" loglevel = logging.DEBUG log = logging.getLogger(name) log.setLevel(loglevel) formatter = logging.Formatter( 'CM {0:>50}:%(lineno)s: %(levelname)6s - %(message)s'.format(name)) # formatter = logging.Formatter( # 'CM {0:>50}: %(levelname)6s - %(module)s:%(lineno)s %funcName)s: %(message)s'.format(name)) handler = logging.StreamHandler() handler.setFormatter(formatter) log.addHandler(handler) return log
def LOGGER(filename)
creates a logger with the given name. You can use it as follows:: log = cloudmesh.common.LOGGER(__file__) log.error("this is an error") log.info("this is an info") log.warning("this is a warning")
3.267518
3.216836
1.015755
items = [] for k, v in d.items(): new_key = k for p in prefix: new_key = new_key.replace(p, new_prefix, 1) items.append((new_key, v)) return dict(items)
def key_prefix_replace(d, prefix, new_prefix="")
replaces the list of prefix in keys of a flattened dict :param d: the flattened dict :param prefix: a list of prefixes that are replaced with a new prefix. Typically this will be "" :type prefix: list of str :param new_prefix: The new prefix. By default it is set to "" :return: the dict with the keys replaced as specified
1.964287
2.223795
0.883304
# http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys if type(d) == list: flat = [] for entry in d: flat.append(flatten(entry, parent_key=parent_key, sep=sep)) return flat else: items = [] for k, v in d.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collectionsAbc.MutableMapping): items.extend(flatten(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items)
def flatten(d, parent_key='', sep='__')
flattens the dict into a one dimensional dictionary :param d: multidimensional dict :param parent_key: replaces from the parent key :param sep: the separation character used when fattening. the default is __ :return: the flattened dict
1.747977
1.769336
0.987928
dict_result = cls.object_to_dict(obj) if flatten: dict_result = FlatDict(dict_result) return dict_result
def convert(cls, obj, flatten=True)
This function converts object into a Dict optionally Flattening it :param obj: Object to be converted :param flatten: boolean to specify if the dict has to be flattened :return dict: the dict of the object (Flattened or Un-flattened)
3.35487
4.167583
0.804992
dict_obj = dict() if obj is not None: if type(obj) == list: dict_list = [] for inst in obj: dict_list.append(cls.object_to_dict(inst)) dict_obj["list"] = dict_list elif not cls.is_primitive(obj): for key in obj.__dict__: # is an object if type(obj.__dict__[key]) == list: dict_list = [] for inst in obj.__dict__[key]: dict_list.append(cls.object_to_dict(inst)) dict_obj[key] = dict_list elif not cls.is_primitive(obj.__dict__[key]): temp_dict = cls.object_to_dict(obj.__dict__[key]) dict_obj[key] = temp_dict else: dict_obj[key] = obj.__dict__[key] elif cls.is_primitive(obj): return obj return dict_obj
def object_to_dict(cls, obj)
This function converts Objects into Dictionary
1.83619
1.820173
1.0088
if debug and error is not None: print(error) # TODO: BUG: trace should only be printed if debug is true if trace: print(traceback.format_exc())
def msg(cls, error=None, debug=True, trace=True)
prints the error message :param error: the error message :param debug: only prints it if debug is set to true :param trace: if true prints the trace :return:
5.046156
5.729718
0.880699
# TODO: if debug: Error.msg(error=error, debug=debug, trace=trace)
def traceback(cls, error=None, debug=True, trace=True)
prints the trace :param error: a message preceding the trace :param debug: prints it if debug is set to true :param trace: :return:
9.943754
9.983443
0.996025
return "\n".join( textwrap.wrap(text, width=width, initial_indent=" " * indent, subsequent_indent=" " * indent))
def indent(text, indent=2, width=128)
indents the given text by the indent specified and wrapping to the given width :param text: the text to print :param indent: indent characters :param width: the width of the text :return:
2.387379
3.388829
0.704485
if color: Console.theme = Console.theme_color else: Console.theme = Console.theme_bw Console.color = color
def set_theme(color=True)
defines if the console messages are printed in color :param color: if True its printed in color :return:
4.760948
5.375376
0.885696
# print (message, prefix) message = message or "" if prefix: text = "ERROR: " else: text = "" if cls.color: cls.cprint('FAIL', text, str(message)) else: print(cls.txt_msg(text + str(message))) if traceflag and cls.debug: trace = traceback.format_exc().strip() if trace: print() print("Trace:") print("\n ".join(str(trace).splitlines())) print()
def error(cls, message, prefix=True, traceflag=False)
prints an error message :param message: the message :param prefix: a prefix for the message :param traceflag: if true the stack trace is retrieved and printed :return:
4.43711
4.724435
0.939183
message = message or "" if prefix: text = "TODO: " else: text = "" if Console.color: Console.cprint('FAIL', text, str(message)) else: print(Console.msg(text + str(message))) trace = traceback.format_exc().strip() if traceflag and trace != "None": print() print("\n".join(str(trace).splitlines())) print()
def TODO(message, prefix=True, traceflag=True)
prints an TODO message :param message: the message :param prefix: if set to true it prints TODO: as prefix :param traceflag: if true the stack trace is retrieved and printed :return:
4.427874
4.918327
0.90028
message = message or "" if Console.color: Console.cprint('RED', 'DEBUG: ', message) else: print(Console.msg('DEBUG: ' + message))
def debug_msg(message)
print a debug message :param message: the message :return:
8.234778
9.71784
0.847388
message = message or "" if Console.color: Console.cprint('OKBLUE', "INFO: ", message) else: print(Console.msg("INFO: " + message))
def info(message)
prints an informational message :param message: the message :return:
8.16234
9.449832
0.863755
message = message or "" if Console.color: Console.cprint('WARNING', "WARNING: ", message) else: print(Console.msg("WARNING: " + message))
def warning(message)
prints a warning :param message: the message :return:
7.834002
8.774437
0.892821
message = message or "" if Console.color: Console.cprint('OKGREEN', "", message) else: print(Console.msg(message))
def ok(message)
prints an ok message :param message: the message< :return:
11.049846
12.710223
0.869367
message = message or "" prefix = prefix or "" print((Console.theme[color] + prefix + message + Console.theme['ENDC']))
def cprint(color, prefix, message)
prints a message in a given color :param color: the color as defined in the theme :param prefix: the prefix (a string) :param message: the message :return:
7.370386
7.179491
1.026589
if cls.debug: print("Timer", name, "started ...") cls.timer_start[name] = time.time()
def start(cls, name)
starts a timer with the given name. :param name: the name of the timer :type name: string
5.664026
6.02048
0.940793
cls.timer_end[name] = time.time() if cls.debug: print("Timer", name, "stopped ...")
def stop(cls, name)
stops the timer with a given name. :param name: the name of the timer :type name: string
6.890841
7.906579
0.871533
if name in cls.timer_end: cls.timer_elapsed[name] = cls.timer_end[name] - \ cls.timer_start[name] return cls.timer_elapsed[name] else: return "undefined"
def get(cls, name)
returns the time of the timer. :param name: the name of the timer :type name: string :rtype: the elapsed time
4.049394
3.642614
1.111673
if cls.verbose: if len(args) == 2: print(args[0], str("{0:.2f}".format(cls.get(args[1]))), "s") else: raise Exception("StopWatch: wrong number of arguments")
def print(cls, *args)
prints a timer. The first argument is the label if it exists, the last is the timer :param args: label, name :return:
5.215424
4.461234
1.169054
# # PRINT PLATFORM # if sysinfo: data_platform = systeminfo() print(Printer.attribute(data_platform, ["Machine Arribute", "Time/s"])) # # PRINT TIMERS # timers = StopWatch.keys() data_timers = {} for timer in timers: data_timers[timer] = { 'time': round(StopWatch.get(timer), 2), 'timer': timer } for attribute in ["node", "system", "machine", "mac_version", "win_version"]: data_timers[timer][attribute] = data_platform[attribute] # print(Printer.attribute(data_timers, header=["Command", "Time/s"])) print(Printer.write( data_timers, order=["timer", "time", "node", "system", "mac_version", "win_version"] )) print() print(Printer.write( data_timers, order=["timer", "time", "node", "system", "mac_version", "win_version"], output="csv" ))
def benchmark(cls, sysinfo=True)
prints out all timers in a convenient benchmark tabble :return: :rtype:
4.054155
3.869802
1.047639
def is_number(s): try: float(s) return True except ValueError: return False def fstr(s): return s if is_number(s) else '"%s"' % s if mode != 'dict': kv_tpl = '("%s", %s)' ST = 'OrderedDict([\n' END = '])' else: kv_tpl = '"%s": %s' ST = '{\n' END = '}' for i, k in enumerate(OD.keys()): if type(OD[k]) in [dict, OrderedDict]: level += 1 s += (level - 1) * indent + kv_tpl % ( k, ST + dprint(OD[k], mode=mode, indent=indent, level=level) + ( level - 1) * indent + END) level -= 1 else: s += level * indent + kv_tpl % (k, fstr(OD[k])) if i != len(OD) - 1: s += "," s += "\n" return s
def dprint(OD, mode='dict', s="", indent=' ' * 4, level=0)
a recursive dict printer method that adds indentations TODO: needs better explanation and test example :param OD: the ordered dict :param mode: the mode is dict :param s: TODO :param indent: the indentation characters. default is 4 :param level: the level :return:
2.65048
2.624744
1.009805
d = ConfigDict("cloudmesh.yaml") print(d, end='') d.info() print(d["meta"]) print(d["meta.kind"]) print(d["meta"]["kind"]) # this does not yet work d.data["cloudmesh"]["profile"]["firstname"] = 'ABC' print(d) d.save() import os os.system("cat cmd3.yaml") print(d.json) print(d.filename) print("YAML") print(d.yaml)
def main()
TODO: A test which should actually be moved into a nosetest :return:
6.896792
6.949791
0.992374
filename = path_expand(filename) file_contains_tabs = False with open(filename, 'r') as f: lines = f.read().split("\n") line_no = 1 for line in lines: if "\t" in line: file_contains_tabs = True location = [ i for i in range(len(line)) if line.startswith('\t', i)] if verbose: print("Tab found in line", line_no, "and column(s)", location) line_no += 1 return file_contains_tabs
def check_file_for_tabs(cls, filename, verbose=True)
identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints issues :param filename: the filename :type filename: str :rtype: True if there are tabs in the file
3.030993
3.254096
0.93144
current_dir = "." + os.path.sep if path.startswith(current_dir): cwd = str(os.getcwd()) path = path.replace(current_dir, cwd, 1) location = os.path.expandvars(os.path.expanduser(path)) return location
def path_expand(cls, path)
expands the path while replacing environment variables, ./, and ~/ :param path: the path to be expanded :type path: string :return:the new path :rtype: string
2.982096
2.971663
1.003511
if load_order is None: load_order = [".", os.path.join("~", ".cloudmesh")] for path in load_order: name = Config.path_expand(path + os.path.sep + filename) if verbose: print("try finding file", name) if os.path.isfile(name): if verbose: print("Found File", name) return name return None
def find_file(cls, filename, load_order=None, verbose=False)
find the specified file in the list of directories that are given in the array load_order :param filename: the file name :type filename: str :param load_order: an array with path names in with the filename is looked for. :type load_order: list of str :param verbose: :type verbose: bool :return: file name if successful :rtype: string if the file exists or None otherwise
3.281407
3.495209
0.93883
# print ("LOAD CONFIGDICT", filename) self.data = BaseConfigDict(filename=Config.path_expand(filename)) try: version = str(self.data["meta"]["version"]) if version not in self.versions: Console.error("The yaml file version must be {}".format( ', '.join(self.versions))) sys.exit(1) except Exception as e: Console.error( "Your yaml file ~/.cloudmesh/cloudmesh.yaml is not up to date.", traceflag=False) Console.error(e.message, traceflag=False) sys.exit(1)
def load(self, filename)
loads the configuration from the yaml filename :param filename: :type filename: string :return:
5.436296
5.286393
1.028356
if filename is not None: location = path_expand(filename) else: location = self['meta']['location'] # with open('data.yml', 'w') as outfile: # outfile.write( yaml.dump(data, default_flow_style=True) ) # Make a backup self.make_a_copy(location) f = os.open(location, os.O_CREAT | os.O_TRUNC | os.O_WRONLY, stat.S_IRUSR | stat.S_IWUSR) if output == "json": os.write(f, self.json()) elif output in ['yml', 'yaml']: # d = dict(self) # os.write(f, yaml.dump(d, default_flow_style=False)) os.write(f, ordered_dump(OrderedDict(self), Dumper=yaml.SafeDumper, default_flow_style=False, indent=attribute_indent)) elif output == "print": os.write(f, str(custom_print(self, attribute_indent))) else: os.write(f, self.dump()) os.close(f)
def write(self, filename=None, output="dict", attribute_indent=4)
This method writes the dict into various output formats. This includes a dict, json, and yaml :param filename: the file in which the dict is written :param output: is a string that is either "dict", "json", "yaml" :param attribute_indent: character indentation of nested attributes in
3.005613
3.202969
0.938383
import shutil destination = backup_name(location) shutil.copyfile(location, destination)
def make_a_copy(self, location=None)
Creates a backup of the file specified in the location. The backup filename appends a .bak.NO where number is a number that is not yet used in the backup directory. TODO: This function should be moved to another file maybe XShell :param location: the location of the file to be backed up
8.803351
8.178579
1.076391
content = self.data.yaml() with open(Config.path_expand(ConfigDict.filename), 'w') as f: f.write(content)
def save(self, filename=None)
saves the configuration in the given filename, if it is none the filename at load time is used. :param filename: the file name :type filename: string :return:
12.149126
11.809415
1.028766
if start is not None: data = self.data[start] return json.dumps(self.data, indent=4)
def json(self, start=None)
:param start: start key in dot notation returns the dict in json format :return: json string version :rtype: string
4.272286
3.598769
1.187152
try: config = d = ConfigDict("cloudmesh.yaml") d = ConfigDict("cloudmesh.yaml") # # bug: cloud is none when adding a group # config = d["cloudmesh"]["clouds"][cloud] credentials = config["credentials"] cloud_type = config["cm_type"] if cloud_type == "openstack": return credentials["OS_USERNAME"] else: raise ValueError("getUser for this cloud type not yet " "supported: {}".format(cloud)) except Exception as ex: Console.error("problem getting user")
def getUser(cls, cloud)
gets the username for a specified cloud. TODO: works currently only for opensatck. :param cloud: the name of the cloud :return:
6.289949
6.089598
1.032901
path = os.path.dirname(filename) if not os.path.isdir(path): Shell.mkdir(path)
def config_dir_setup(filename)
sets the config file and makes sure the directory exists if it has not yet been created. :param filename: :return:
3.283219
3.734871
0.879072
shell = Shell() print(shell.terminal_type()) r = shell.execute('pwd') # copy line replace print(r) # shell.list() # print json.dumps(shell.command, indent=4) # test some commands without args r = shell.execute('ls', ["-l", "-a"]) print(r) r = shell.execute('ls', "-l -a") print(r) r = shell.ls("-aux") print(r) r = shell.ls("-a", "-u", "-x") print(r) r = shell.pwd() print(r)
def main()
a test that should actually be added into a nosetest :return:
5.313158
5.418611
0.980539
python_version = sys.version_info[:3] v_string = [str(i) for i in python_version] python_version_s = '.'.join(v_string) # pip_version = pip.__version__ pip_version = Shell.pip("--version").split()[1] return python_version_s, pip_version
def get_python(cls)
returns the python and pip version :return: python version, pip version
4.090477
3.470997
1.178473
python_version = sys.version_info[:3] v_string = [str(i) for i in python_version] if python_version[0] == 2: python_version_s = '.'.join(v_string) if (python_version[0] == 2) and (python_version[1] >= 7) and ( python_version[2] >= 9): print( "You are running a supported version of python: {:}".format( python_version_s)) else: print( "WARNING: You are running an unsupported version of python: {:}".format( python_version_s)) print(" We recommend you update your python") elif python_version[0] == 3: python_version_s = '.'.join(v_string) if (python_version[0] == 3) and (python_version[1] >= 7) and ( python_version[2] >= 0): print( "You are running a supported version of python: {:}".format( python_version_s)) else: print( "WARNING: You are running an unsupported version of python: {:}".format( python_version_s)) print(" We recommend you update your python") # pip_version = pip.__version__ python_version, pip_version = cls.get_python() if int(pip_version.split(".")[0]) >= 18: print("You are running a supported version of pip: " + str( pip_version)) else: print("WARNING: You are running an old version of pip: " + str( pip_version)) print(" We recommend you update your pip with \n") print(" pip install -U pip\n")
def check_python(cls)
checks if the python version is supported :return: True if it is supported
2.1294
2.116966
1.005874
if platform.system().lower() == "darwin": command = "/Applications/VirtualBox.app/Contents/MacOS/VBoxManage" else: command = 'VBoxManage' return cls.execute(command, args)
def VBoxManage(cls, *args)
executes VboxManage with the given arguments :param args: :return:
3.193145
2.926686
1.091044
option = '-n' if platform.system().lower() == 'windows' else '-c' return cls.execute('ping', "{option} {count} {host}".format(option=option, count=count, host=host))
def ping(cls, host=None, count=1)
execute ping :param host: the host to ping :param count: the number of pings :return:
3.017302
3.626808
0.831944
result = [] for line in lines: if what not in line: result = result + [line] return result
def remove_line_with(cls, lines, what)
returns all lines that do not contain what :param lines: :param what: :return:
3.205172
2.999191
1.068679
result = [] for line in lines: if what in line: result = result + [line] return result
def find_lines_with(cls, lines, what)
returns all lines that contain what :param lines: :param what: :return:
3.060723
3.016303
1.014727
exe_paths = glob.glob(cls.cygwin_path + r'\*.exe') # print cls.cygwin_path # list all *.exe in cygwin path, use glob for c in exe_paths: exe = c.split('\\') name = exe[1].split('.')[0] # command['windows'][name] = "{:}\{:}.exe".format(cygwin_path, c) cls.command['windows'][name] = c
def find_cygwin_executables(cls)
find the executables in cygwin
5.493496
5.308055
1.034936
what = sys.platform kind = 'UNDEFINED_TERMINAL_TYPE' if 'linux' in what: kind = 'linux' elif 'darwin' in what: kind = 'darwin' elif 'cygwin' in what: kind = 'cygwin' elif 'windows' in what: kind = 'windows' return kind
def terminal_type(cls)
returns darwin, cygwin, cmd, or linux
2.942514
2.401763
1.225147
# print "--------------" result = None terminal = cls.terminal_type() # print cls.command os_command = [cmd] if terminal in ['linux', 'windows']: os_command = [cmd] elif 'cygwin' in terminal: if not cls.command_exists(cmd): print("ERROR: the command could not be found", cmd) return else: os_command = [cls.command[cls.operating_system()][cmd]] if isinstance(arguments, list): os_command = os_command + arguments elif isinstance(arguments, tuple): os_command = os_command + list(arguments) elif isinstance(arguments, str): os_command = os_command + arguments.split() else: print("ERROR: Wrong parameter type", type(arguments)) if cwd is None: cwd = os.getcwd() try: if shell: result = subprocess.check_output( os_command, stderr=subprocess.STDOUT, shell=True, cwd=cwd) else: result = subprocess.check_output( os_command, # shell=True, stderr=subprocess.STDOUT, cwd=cwd) except: if witherror: Console.error("problem executing subprocess", traceflag=traceflag) if result is not None: result = result.strip().decode() return result
def execute(cls, cmd, arguments="", shell=False, cwd=None, traceflag=True, witherror=True)
Run Shell command :param witherror: if set to False the error will not be printed :param traceflag: if set to true the trace is printed in case of an error :param cwd: the current working directory in whcih the command is supposed to be executed. :param shell: if set to true the subprocess is called as part of a shell :param cmd: command to run :param arguments: we do not know yet :return:
2.742003
2.823976
0.970973
directory = path_expand(directory) try: os.makedirs(directory) except OSError as e: # EEXIST (errno 17) occurs under two conditions when the path exists: # - it is a file # - it is a directory # # if it is a file, this is a valid error, otherwise, all # is fine. if e.errno == errno.EEXIST and os.path.isdir(directory): pass else: raise
def mkdir(cls, directory)
creates a directory with all its parents in ots name :param directory: the path of the directory :return:
4.20015
4.92029
0.853639