code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
if hasattr(self, 'sampled_topics') and append:
sampled_topics = \
samplers_lda.sampler(self.docid, self.tokens,
self.sampled_topics[self.samples-1, :],
self.N, self.V, self.K, self.D,
self.alpha, self.beta,
burnin, thinning, samples)
tt_temp = self.tt_comp(sampled_topics)
dt_temp = self.dt_comp(sampled_topics)
self.sampled_topics = \
np.concatenate((self.sampled_topics, sampled_topics))
self.tt = np.concatenate((self.tt, tt_temp), axis=2)
self.dt = np.concatenate((self.dt, dt_temp), axis=2)
self.samples = self.samples + samples
else:
self.samples = samples
seed = np.copy(self.topic_seed)
self.sampled_topics = \
samplers_lda.sampler(self.docid, self.tokens, seed,
self.N, self.V, self.K, self.D,
self.alpha, self.beta,
burnin, thinning, self.samples)
self.tt = self.tt_comp(self.sampled_topics)
self.dt = self.dt_comp(self.sampled_topics) | def sample(self, burnin, thinning, samples, append=True) | Estimate topics via Gibbs sampling.
burnin: number of iterations to allow chain to burn in before sampling.
thinning: thinning interval between samples.
samples: number of samples to take.
Total number of samples = burnin + thinning * samples
If sampled topics already exist and append = True, extend chain from
last sample.
If append = False, start new chain from the seed. | 2.576304 | 2.489226 | 1.034982 |
samples = sampled_topics.shape[0]
dt = np.zeros((self.D, self.K, samples))
for s in range(samples):
dt[:, :, s] = \
samplers_lda.dt_comp(self.docid, sampled_topics[s, :],
self.N, self.K, self.D, self.alpha)
return dt | def dt_comp(self, sampled_topics) | Compute document-topic matrix from sampled_topics. | 4.739536 | 4.422411 | 1.071709 |
samples = sampled_topics.shape[0]
tt = np.zeros((self.V, self.K, samples))
for s in range(samples):
tt[:, :, s] = \
samplers_lda.tt_comp(self.tokens, sampled_topics[s, :],
self.N, self.V, self.K, self.beta)
return tt | def tt_comp(self, sampled_topics) | Compute term-topic matrix from sampled_topics. | 4.540506 | 4.35603 | 1.04235 |
topic_top_probs = []
topic_top_words = []
tt = self.tt_avg(False)
for t in range(self.K):
top_word_indices = list(tt[:, t].argsort()[-W:][::-1])
topic_top_probs.append(np.round(np.sort(tt[:, t])[-W:][::-1], 3))
topic_top_words.append([list(self.token_key.keys())
[list(self.token_key.values()).index(i)]
for i in top_word_indices])
with codecs.open(output_file, "w", "utf-8") as f:
for t in range(self.K):
words = ','.join(topic_top_words[t])
probs = ','.join([str(i) for i in topic_top_probs[t]])
f.write("topic" + str(t) + ',')
f.write("%s\n" % words)
f.write(" " + ',')
f.write("%s\n" % probs) | def topic_content(self, W, output_file="topic_description.csv") | Print top W words in each topic to file. | 2.951396 | 2.885641 | 1.022787 |
return samplers_lda.perplexity_comp(self.docid, self.tokens,
self.tt, self.dt, self.N,
self.K, self.samples) | def perplexity(self) | Compute perplexity for each sample. | 15.193329 | 13.780662 | 1.102511 |
try:
if isinstance(index, (int, long)):
index = range(self.samples)[-index:]
except(NameError):
if isinstance(index,int):
index = range(self.samples)[-index:]
self.sampled_topics = np.take(self.sampled_topics, index, axis=0)
self.tt = np.take(self.tt, index, axis=2)
self.dt = np.take(self.dt, index, axis=2)
self.samples = len(index) | def samples_keep(self, index) | Keep subset of samples. If index is an integer, keep last N=index
samples. If index is a list, keep the samples
corresponding to the index values in the list. | 3.611043 | 3.271998 | 1.10362 |
avg = self.tt.mean(axis=2)
if print_output:
np.savetxt(output_file, avg, delimiter=",")
return avg | def tt_avg(self, print_output=True, output_file="tt.csv") | Compute average term-topic matrix, and print to file if
print_output=True. | 3.207466 | 2.862032 | 1.120696 |
with codecs.open(output_file, "w", encoding='utf-8') as f:
for (v, k) in self.token_key.items():
f.write("%s,%d\n" % (v, k)) | def dict_print(self, output_file="dict.csv") | Print mapping from tokens to numeric indices. | 3.753522 | 3.246714 | 1.156099 |
assert seed.dtype == np.int and seed.shape == (self.samples, self.N)
self.topic_seed = seed | def set_seed(self, seed) | Override default values for random initial topic assignment,
set to "seed" instead.
seed is 2-d array (number of samples in LDA model x number
of tokens in LDA model) | 10.888622 | 5.936428 | 1.834205 |
self.sampled_topics = np.zeros((self.samples, self.N),
dtype=np.int)
for s in range(self.samples):
self.sampled_topics[s, :] = \
samplers_lda.sampler_query(self.docid, self.tokens,
self.topic_seed,
np.ascontiguousarray(
self.tt[:, :, s],
dtype=np.float),
self.N, self.K, self.D,
self.alpha, query_samples)
print("Sample %d queried" % s)
self.dt = np.zeros((self.D, self.K, self.samples))
for s in range(self.samples):
self.dt[:, :, s] = \
samplers_lda.dt_comp(self.docid, self.sampled_topics[s, :],
self.N, self.K, self.D, self.alpha) | def query(self, query_samples) | Query docs with query_samples number of Gibbs
sampling iterations. | 4.10959 | 4.161216 | 0.987593 |
avg = self.dt.mean(axis=2)
if print_output:
np.savetxt(output_file, avg, delimiter=",")
return avg | def dt_avg(self, print_output=True, output_file="dt_query.csv") | Compute average document-topic matrix,
and print to file if print_output=True. | 3.360257 | 3.069152 | 1.094849 |
def r(tokens):
text = ' ' + ' '.join(tokens)
for k, v in replace_dict.items():
text = text.replace(" " + k + " ", " " + v + " ")
return text.split()
self.stems = list(map(r, self.stems)) | def phrase_replace(self, replace_dict) | Replace phrases with single token, mapping defined in replace_dict | 4.311321 | 4.57553 | 0.942256 |
def clean1(tokens):
return [t for t in tokens if t.isalpha() == 1 and len(t) > length]
def clean2(tokens):
return [t for t in tokens if t.isalnum() == 1 and len(t) > length]
if numbers:
self.tokens = list(map(clean1, self.tokens))
else:
self.tokens = list(map(clean2, self.tokens)) | def token_clean(self, length, numbers=True) | Strip out non-alphanumeric tokens.
length: remove tokens of length "length" or less.
numbers: strip out non-alpha tokens. | 2.288106 | 2.100243 | 1.089449 |
def s(tokens):
return [PorterStemmer().stem(t) for t in tokens]
self.stems = list(map(s, self.tokens)) | def stem(self) | Stem tokens with Porter Stemmer. | 5.406704 | 4.829383 | 1.119543 |
def bigram_join(tok_list):
text = nltk.bigrams(tok_list)
return list(map(lambda x: x[0] + '.' + x[1], text))
if items == "tokens":
self.bigrams = list(map(bigram_join, self.tokens))
elif items == "stems":
self.bigrams = list(map(bigram_join, self.stems))
else:
raise ValueError("Items must be either \'tokens\' or \'stems\'.") | def bigram(self, items) | generate bigrams of either items = "tokens" or "stems" | 3.039166 | 2.565049 | 1.184837 |
def remove(tokens):
return [t for t in tokens if t not in self.stopwords]
if items == 'tokens':
self.tokens = list(map(remove, self.tokens))
elif items == 'stems':
self.stems = list(map(remove, self.stems))
else:
raise ValueError("Items must be either \'tokens\' or \'stems\'.") | def stopword_remove(self, items, threshold=False) | Remove stopwords from either tokens (items = "tokens")
or stems (items = "stems") | 2.755227 | 2.181547 | 1.262969 |
if items == 'stems':
v = self.stems
elif items == 'tokens':
v = self.tokens
elif items == 'bigrams':
v = self.bigrams
else:
raise ValueError("Items must be either \'tokens\' , \'bigrams\' or \'stems\'.")
agg = itertools.chain(*v)
counts = collections.Counter(agg)
v_unique = list(map(lambda x: set(x), v))
agg_d = itertools.chain(*v_unique)
counts_d = collections.Counter(agg_d)
unique_tokens = set(itertools.chain(*v))
def tf_idf_compute(t):
return (1 + np.log(counts[t]))*np.log(self.N/counts_d[t])
unsorted_df = [counts[t] for t in unique_tokens]
unsorted_tf_idf = [tf_idf_compute(t) for t in unique_tokens]
self.df_ranking = sorted(zip(unique_tokens, unsorted_df),
key=lambda x: x[1], reverse=True)
self.tfidf_ranking = sorted(zip(unique_tokens, unsorted_tf_idf),
key=lambda x: x[1], reverse=True)
if print_output:
with codecs.open('df_ranking.csv', 'w', 'utf-8') as f:
for p in self.df_ranking:
f.write("%s,%d\n" % (p[0], p[1]))
with codecs.open('tfidf_ranking.csv', 'w', 'utf-8') as f:
for p in self.tfidf_ranking:
f.write("%s,%f\n" % (p[0], p[1])) | def term_rank(self, items, print_output=True) | Calculate corpus-level df and tf-idf scores on either tokens (items =
"tokens") bigrams (items = 'bigrams') or stems (items = "stems"). Print to file if
print_output = True. | 2.237531 | 2.00683 | 1.114958 |
def remove(tokens):
return [t for t in tokens if t not in to_remove]
if rank == "df":
to_remove = set([t[0] for t in self.df_ranking if t[1] <= cutoff])
elif rank == "tfidf":
to_remove = set([t[0] for t in self.tfidf_ranking
if t[1] <= cutoff])
else:
raise ValueError("Rank must be either \'df\' or \'tfidf\'.")
if items == 'tokens':
self.tokens = list(map(remove, self.tokens))
elif items == 'bigrams':
self.bigrams = list(map(remove, self.bigrams))
elif items == 'stems':
self.stems = list(map(remove, self.stems))
else:
raise ValueError("Items must be either \'tokens\', \'bigrams\' or \'stems\'.") | def rank_remove(self, rank, items, cutoff) | remove tokens or stems (specified in items) based on rank's (df or
tfidf) value being less than cutoff to remove all words with rank R or
less, specify cutoff = self.xxx_ranking[R][1] | 2.335614 | 1.910595 | 1.222454 |
self.rho = rho_seed
self.mu = mu_seed | def set_seed(self, rho_seed, mu_seed) | set seeds manually (should add dimensionality check) | 3.176009 | 2.936857 | 1.081431 |
# on the first iteration of estimation, this will be called
if not hasattr(self, 'type_prob'):
self.type_prob = np.empty((self.N, self.K))
temp_probs = np.zeros((self.N, self.K))
for i in range(self.N):
for k in range(self.K):
temp_probs[i, k] = \
np.log(self.rho[k]) + np.dot(self.feature_counts[i, :],
np.log(self.mu[k, :]))
temp_probsZ = temp_probs - np.max(temp_probs, axis=1)[:, np.newaxis]
self.type_prob = np.exp(temp_probsZ) / \
np.exp(temp_probsZ).sum(axis=1)[:, np.newaxis]
return np.log(np.exp(temp_probsZ).sum(axis=1)).sum() + \
np.max(temp_probs, axis=1).sum() | def E_step(self) | compute type probabilities given current parameter estimates. | 2.723883 | 2.53211 | 1.075737 |
for k in range(self.K):
self.rho[k] = self.type_prob[:, k].sum() / self.N
for k in range(self.K):
for m in range(self.M):
temp_prob = np.dot(self.type_prob[:, k],
self.feature_counts[:, m])
if temp_prob < 1e-99:
temp_prob = 1e-99
self.mu[k, m] = temp_prob / np.dot(self.type_prob[:, k],
self.observations) | def M_step(self) | generate new parameter estimates given updated type distribution | 2.951182 | 2.719994 | 1.084996 |
self.loglik = np.zeros(maxiter)
iter = 0
while iter < maxiter:
self.loglik[iter] = self.E_step()
if np.isnan(self.loglik[iter]):
print("undefined log-likelihood")
break
self.M_step()
if self.loglik[iter] - self.loglik[iter - 1] < 0 and iter > 0:
print("log-likelihood decreased by %f at iteration %d"
% (self.loglik[iter] - self.loglik[iter - 1],
iter))
elif self.loglik[iter] - self.loglik[iter - 1] < convergence \
and iter > 0:
print("convergence at iteration %d, loglik = %f" %
(iter, self.loglik[iter]))
self.loglik = self.loglik[self.loglik < 0]
break
iter += 1 | def estimate(self, maxiter=250, convergence=1e-7) | run EM algorithm until convergence, or until maxiter reached | 2.413409 | 2.380908 | 1.013651 |
with codecs.open(abs_path(path), encoding='utf-8') as f:
return f.read() | def get_content(path) | Get content of file. | 3.513714 | 3.57021 | 0.984176 |
import pandas as pd
import numpy as np
df = df.copy()
if isinstance(df.index, pd.core.index.MultiIndex):
raise ValueError('Hierarchical indices not supported')
if isinstance(df.columns, pd.core.index.MultiIndex):
raise ValueError('Hierarchical indices not supported')
def to_list_if_array(val):
if isinstance(val, np.ndarray):
return val.tolist()
else:
return val
for col_name, dtype in df.dtypes.iteritems():
if str(dtype) == 'category':
# XXXX: work around bug in to_json for categorical types
# https://github.com/pydata/pandas/issues/10778
df[col_name] = df[col_name].astype(str)
elif str(dtype) == 'bool':
# convert numpy bools to objects; np.bool is not JSON serializable
df[col_name] = df[col_name].astype(object)
elif np.issubdtype(dtype, np.integer):
# convert integers to objects; np.int is not JSON serializable
df[col_name] = df[col_name].astype(object)
elif np.issubdtype(dtype, np.floating):
# For floats, convert to Python float: np.float is not JSON serializable
# Also convert NaN/inf values to null, as they are not JSON serializable
col = df[col_name]
bad_values = col.isnull() | np.isinf(col)
df[col_name] = col.astype(object).where(~bad_values, None)
elif str(dtype).startswith('datetime'):
# Convert datetimes to strings
# astype(str) will choose the appropriate resolution
df[col_name] = df[col_name].astype(str).replace('NaT', '')
elif dtype == object:
# Convert numpy arrays saved as objects to lists
# Arrays are not JSON serializable
col = df[col_name].apply(to_list_if_array, convert_dtype=False)
df[col_name] = col.where(col.notnull(), None)
return df | def sanitize_dataframe(df) | Sanitize a DataFrame to prepare it for serialization.
* Make a copy
* Raise ValueError if it has a hierarchical index.
* Convert categoricals to strings.
* Convert np.bool_ dtypes to Python bool objects
* Convert np.int dtypes to Python int objects
* Convert floats to objects and replace NaNs/infs with None.
* Convert DateTime dtypes into appropriate string representations | 2.603574 | 2.445388 | 1.064687 |
import pandas as pd
if isinstance(data, pd.DataFrame):
# We have to do the isinstance test first because we can't
# compare a DataFrame to None.
data = sanitize_dataframe(data)
spec['data'] = {'values': data.to_dict(orient='records')}
elif data is None:
# Assume data is within spec & do nothing
# It may be deep in the spec rather than at the top level
pass
else:
# As a last resort try to pass the data to a DataFrame and use it
data = pd.DataFrame(data)
data = sanitize_dataframe(data)
spec['data'] = {'values': data.to_dict(orient='records')}
return spec | def prepare_spec(spec, data=None) | Prepare a Vega-Lite spec for sending to the frontend.
This allows data to be passed in either as part of the spec
or separately. If separately, the data is assumed to be a
pandas DataFrame or object that can be converted to to a DataFrame.
Note that if data is not None, this modifies spec in-place | 4.859159 | 4.404653 | 1.103188 |
id = uuid.uuid4()
return (
{'application/javascript': self._generate_js(id)},
{'jupyter-vega': '#{0}'.format(id)},
) | def _repr_mimebundle_(self, include=None, exclude=None) | Display the visualization in the Jupyter notebook. | 9.05208 | 8.910396 | 1.015901 |
# If you haven't already connected with Google, this won't work.
if not google.authorized:
return "Error: not yet connected with Google!", 400
if "refresh_token" not in google.token:
# We're missing the refresh token from Google, and the only way to get
# a new one is to force reauthentication. That's annoying.
return (
(
"Error: missing Google refresh token. "
"Uncomment the `reprompt_consent` line in the code to fix this."
),
500,
)
# Look up the user's name and email address from Google.
google_resp = google.get("/oauth2/v2/userinfo?fields=name,email")
assert google_resp.ok, "Received failure response from Google userinfo API"
google_userinfo = google_resp.json()
# Start the connection process by looking up all the information that
# Nylas needs in order to connect, and sending it to the authorize API.
nylas_authorize_data = {
"client_id": app.config["NYLAS_OAUTH_CLIENT_ID"],
"name": google_userinfo["name"],
"email_address": google_userinfo["email"],
"provider": "gmail",
"settings": {
"google_client_id": app.config["GOOGLE_OAUTH_CLIENT_ID"],
"google_client_secret": app.config["GOOGLE_OAUTH_CLIENT_SECRET"],
"google_refresh_token": google.token["refresh_token"],
},
}
nylas_authorize_resp = requests.post(
"https://api.nylas.com/connect/authorize", json=nylas_authorize_data
)
assert nylas_authorize_resp.ok, "Received failure response from Nylas authorize API"
nylas_code = nylas_authorize_resp.json()["code"]
# Now that we've got the `code` from the authorize response,
# pass it to the token response to complete the connection.
nylas_token_data = {
"client_id": app.config["NYLAS_OAUTH_CLIENT_ID"],
"client_secret": app.config["NYLAS_OAUTH_CLIENT_SECRET"],
"code": nylas_code,
}
nylas_token_resp = requests.post(
"https://api.nylas.com/connect/token", json=nylas_token_data
)
assert nylas_token_resp.ok, "Received failure response from Nylas token API"
nylas_access_token = nylas_token_resp.json()["access_token"]
# Great, we've connected Google to Nylas! In the process, Nylas gave us
# an OAuth access token, which we'll need in order to make API requests
# to Nylas in the future. We'll save that access token in the Flask session,
# so we can pick it up later and use it when we need it.
session["nylas_access_token"] = nylas_access_token
# We're all done here. Redirect the user back to the home page,
# which will pick up the access token we just saved.
return redirect(url_for("index")) | def pass_creds_to_nylas() | This view loads the credentials from Google and passes them to Nylas,
to set up native authentication. | 2.751932 | 2.744007 | 1.002888 |
try:
ngrok_resp = requests.get("http://localhost:4040/api/tunnels")
except requests.ConnectionError:
# I guess ngrok isn't running.
return None
ngrok_data = ngrok_resp.json()
secure_urls = [
tunnel["public_url"]
for tunnel in ngrok_data["tunnels"]
if tunnel["proto"] == "https"
]
return secure_urls[0] | def ngrok_url() | If ngrok is running, it exposes an API on port 4040. We can use that
to figure out what URL it has assigned, and suggest that to the user.
https://ngrok.com/docs#list-tunnels | 2.437332 | 2.30266 | 1.058486 |
# Start the connection process by looking up all the information that
# Nylas needs in order to connect, and sending it to the authorize API.
nylas_authorize_data = {
"client_id": app.config["NYLAS_OAUTH_CLIENT_ID"],
"name": name,
"email_address": email,
"provider": "exchange",
"settings": {"username": email, "password": password},
}
if server_host:
nylas_authorize_data["settings"]["eas_server_host"] = server_host
nylas_authorize_resp = requests.post(
"https://api.nylas.com/connect/authorize", json=nylas_authorize_data
)
if not nylas_authorize_resp.ok:
message = nylas_authorize_resp.json()["message"]
raise APIError(message)
nylas_code = nylas_authorize_resp.json()["code"]
# Now that we've got the `code` from the authorize response,
# pass it to the token response to complete the connection.
nylas_token_data = {
"client_id": app.config["NYLAS_OAUTH_CLIENT_ID"],
"client_secret": app.config["NYLAS_OAUTH_CLIENT_SECRET"],
"code": nylas_code,
}
nylas_token_resp = requests.post(
"https://api.nylas.com/connect/token", json=nylas_token_data
)
if not nylas_token_resp.ok:
message = nylas_token_resp.json()["message"]
raise APIError(message)
nylas_access_token = nylas_token_resp.json()["access_token"]
# Great, we've connected the Exchange account to Nylas!
# In the process, Nylas gave us an OAuth access token, which we'll need
# in order to make API requests to Nylas in the future.
# We'll save that access token in the Flask session, so we can pick it up
# later and use it when we need it.
session["nylas_access_token"] = nylas_access_token
# We're all done here. Redirect the user back to the success page,
# which will pick up the access token we just saved.
return redirect(url_for("success")) | def pass_creds_to_nylas(name, email, password, server_host=None) | Passes Exchange credentials to Nylas, to set up native authentication. | 2.52525 | 2.496069 | 1.011691 |
headers = headers or {}
headers.update(self.session.headers)
postfix = "/{}".format(extra) if extra else ""
if cls.api_root != "a":
url = "{}/{}/{}{}".format(self.api_server, cls.collection_name, id, postfix)
else:
url = "{}/a/{}/{}/{}{}".format(
self.api_server, self.app_id, cls.collection_name, id, postfix
)
converted_filters = convert_datetimes_to_timestamps(
filters, cls.datetime_filter_attrs
)
url = str(URLObject(url).add_query_params(converted_filters.items()))
response = self._get_http_session(cls.api_root).get(
url, headers=headers, stream=stream
)
return _validate(response) | def _get_resource_raw(
self, cls, id, extra=None, headers=None, stream=False, **filters
) | Get an individual REST resource | 3.913163 | 3.905966 | 1.001843 |
if cls.api_root != "a":
url_path = "/{name}/{id}/{method}".format(
name=cls.collection_name, id=id, method=method_name
)
else:
# Management method.
url_path = "/a/{app_id}/{name}/{id}/{method}".format(
app_id=self.app_id, name=cls.collection_name, id=id, method=method_name
)
url = URLObject(self.api_server).with_path(url_path)
converted_data = convert_datetimes_to_timestamps(data, cls.datetime_attrs)
session = self._get_http_session(cls.api_root)
response = session.post(url, json=converted_data)
result = _validate(response).json()
return cls.create(self, **result) | def _call_resource_method(self, cls, id, method_name, data) | POST a dictionary to an API method,
for example /a/.../accounts/id/upgrade | 3.667682 | 3.607694 | 1.016628 |
delta = dt - epoch
# return delta.total_seconds()
return delta.seconds + delta.days * 86400 | def timestamp_from_dt(dt, epoch=datetime(1970, 1, 1)) | Convert a datetime to a timestamp.
https://stackoverflow.com/a/8778548/141395 | 3.269647 | 3.040181 | 1.075478 |
if not data:
return data
new_data = {}
for key, value in data.items():
if key in datetime_attrs and isinstance(value, datetime):
new_key = datetime_attrs[key]
new_data[new_key] = timestamp_from_dt(value)
else:
new_data[key] = value
return new_data | def convert_datetimes_to_timestamps(data, datetime_attrs) | Given a dictionary of data, and a dictionary of datetime attributes,
return a new dictionary that converts any datetime attributes that may
be present to their timestamped equivalent. | 2.110873 | 2.084596 | 1.012605 |
# When you first tell Nylas about your webhook, it will test that webhook
# URL with a GET request to make sure that it responds correctly.
# We just need to return the `challenge` parameter to indicate that this
# is a valid webhook URL.
if request.method == "GET" and "challenge" in request.args:
print(" * Nylas connected to the webhook!")
return request.args["challenge"]
# Alright, this is a POST request, which means it's a webhook notification.
# The question is, is it genuine or fake? Check the signature to find out.
is_genuine = verify_signature(
message=request.data,
key=app.config["NYLAS_OAUTH_CLIENT_SECRET"].encode("utf8"),
signature=request.headers.get("X-Nylas-Signature"),
)
if not is_genuine:
return "Signature verification failed!", 401
# Alright, we have a genuine webhook notification from Nylas!
# Let's find out what it says...
data = request.get_json()
for delta in data["deltas"]:
# Processing the data might take awhile, or it might fail.
# As a result, instead of processing it right now, we'll push a task
# onto the Celery task queue, to handle it later. That way,
# we've got the data saved, and we can return a response to the
# Nylas webhook notification right now.
process_delta.delay(delta)
# Now that all the `process_delta` tasks have been queued, we can
# return an HTTP response to Nylas, to let them know that we processed
# the webhook notification successfully.
return "Deltas have been queued", 200 | def webhook() | When the Flask server gets a request at the `/webhook` URL, it will run
this function. Most of the time, that request will be a genuine webhook
notification from Nylas. However, it's possible that the request could
be a fake notification from someone else, trying to fool our app. This
function needs to verify that the webhook is genuine! | 5.484877 | 5.05183 | 1.085721 |
digest = hmac.new(key, msg=message, digestmod=hashlib.sha256).hexdigest()
return digest == signature | def verify_signature(message, key, signature) | This function will verify the authenticity of a digital signature.
For security purposes, Nylas includes a digital signature in the headers
of every webhook notification, so that clients can verify that the
webhook request came from Nylas and no one else. The signing key
is your OAuth client secret, which only you and Nylas know. | 2.509451 | 2.685081 | 0.934591 |
kwargs = {
"type": delta["type"],
"date": datetime.datetime.utcfromtimestamp(delta["date"]),
"object_id": delta["object_data"]["id"],
}
print(" * {type} at {date} with ID {object_id}".format(**kwargs)) | def process_delta(delta) | This is the part of the code where you would process the information
from the webhook notification. Each delta is one change that happened,
and might require fetching message IDs, updating your database,
and so on.
However, because this is just an example project, we'll just print
out information about the notification, so you can see what
information is being sent. | 4.62934 | 3.877311 | 1.193956 |
response = JSONRPCErrorResponse()
response.error = self.message
response.unique_id = None
response._jsonrpc_error_code = self.jsonrpc_error_code
if hasattr(self, 'data'):
response.data = self.data
return response | def error_respond(self) | Converts the error to an error response object.
:return: An error response object ready to be serialized and sent to the client.
:rtype: :py:class:`JSONRPCErrorResponse` | 4.940766 | 4.572605 | 1.080514 |
if self.unique_id is None:
return None
response = JSONRPCErrorResponse()
response.unique_id = None if self.one_way else self.unique_id
code, msg, data = _get_code_message_and_data(error)
response.error = msg
response._jsonrpc_error_code = code
if data:
response.data = data
return response | def error_respond(self, error) | Create an error response to this request.
When processing the request produces an error condition this method can be used to
create the error response object.
:param error: Specifies what error occurred.
:type error: str or Exception
:returns: An error response object that can be serialized and sent to the client.
:rtype: :py:class:`JSONRPCErrorResponse` | 5.191506 | 4.739413 | 1.09539 |
if self.one_way or self.unique_id is None:
return None
response = JSONRPCSuccessResponse()
response.result = result
response.unique_id = self.unique_id
return response | def respond(self, result) | Create a response to this request.
When processing the request completed successfully this method can be used to
create a response object.
:param result: The result of the invoked method.
:type result: Anything that can be encoded by JSON.
:returns: A response object that can be serialized and sent to the client.
:rtype: :py:class:`JSONRPCSuccessResponse` | 6.310999 | 4.855834 | 1.299674 |
if args and kwargs:
raise InvalidRequestError(
'Does not support args and kwargs at '
'the same time'
)
request = self.request_factory()
request.one_way = one_way
if not one_way:
request.unique_id = self._get_unique_id()
request.method = method
request.args = args
request.kwargs = kwargs
return request | def create_request(self, method, args=None, kwargs=None, one_way=False) | Creates a new :py:class:`JSONRPCRequest` object.
Called by the client when constructing a request.
JSON RPC allows either the ``args`` or ``kwargs`` argument to be set.
:param str method: The method name to invoke.
:param list args: The positional arguments to call the method with.
:param dict kwargs: The keyword arguments to call the method with.
:param bool one_way: The request is an update, i.e. it does not expect a reply.
:return: A new request instance.
:rtype: :py:class:`JSONRPCRequest`
:raises InvalidRequestError: when ``args`` and ``kwargs`` are both defined. | 3.34995 | 3.528579 | 0.949377 |
if isinstance(data, bytes):
data = data.decode()
try:
rep = json.loads(data)
except Exception as e:
raise InvalidReplyError(e)
for k in rep.keys():
if not k in self._ALLOWED_REPLY_KEYS:
raise InvalidReplyError('Key not allowed: %s' % k)
if 'jsonrpc' not in rep:
raise InvalidReplyError('Missing jsonrpc (version) in response.')
if rep['jsonrpc'] != self.JSON_RPC_VERSION:
raise InvalidReplyError('Wrong JSONRPC version')
if 'id' not in rep:
raise InvalidReplyError('Missing id in response')
if ('error' in rep) and ('result' in rep):
raise InvalidReplyError(
'Reply must contain exactly one of result and error.'
)
if 'error' in rep:
response = JSONRPCErrorResponse()
error = rep['error']
response.error = error["message"]
response._jsonrpc_error_code = error["code"]
if "data" in error:
response.data = error["data"]
else:
response = JSONRPCSuccessResponse()
response.result = rep.get('result', None)
response.unique_id = rep['id']
return response | def parse_reply(self, data) | Deserializes and validates a response.
Called by the client to reconstruct the serialized :py:class:`JSONRPCResponse`.
:param bytes data: The data stream received by the transport layer containing the
serialized request.
:return: A reconstructed response.
:rtype: :py:class:`JSONRPCSuccessResponse` or :py:class:`JSONRPCErrorResponse`
:raises InvalidReplyError: if the response is not valid JSON or does not conform
to the standard. | 2.491838 | 2.328412 | 1.070188 |
if isinstance(data, bytes):
data = data.decode()
try:
req = json.loads(data)
except Exception as e:
raise JSONRPCParseError()
if isinstance(req, list):
# batch request
requests = JSONRPCBatchRequest()
for subreq in req:
try:
requests.append(self._parse_subrequest(subreq))
except RPCError as e:
requests.append(e)
except Exception as e:
requests.append(JSONRPCInvalidRequestError())
if not requests:
raise JSONRPCInvalidRequestError()
return requests
else:
return self._parse_subrequest(req) | def parse_request(self, data) | Deserializes and validates a request.
Called by the server to reconstruct the serialized :py:class:`JSONRPCRequest`.
:param bytes data: The data stream received by the transport layer containing the
serialized request.
:return: A reconstructed request.
:rtype: :py:class:`JSONRPCRequest`
:raises JSONRPCParseError: if the ``data`` cannot be parsed as valid JSON.
:raises JSONRPCInvalidRequestError: if the request does not comply with the standard. | 2.51193 | 2.599931 | 0.966152 |
exc = JSONRPCError(error)
if self.raises_errors:
raise exc
return exc | def raise_error(self, error) | Recreates the exception.
Creates a :py:class:`~tinyrpc.protocols.jsonrpc.JSONRPCError` instance
and raises it.
This allows the error, message and data attributes of the original
exception to propagate into the client code.
The :py:attr:`~tinyrpc.protocols.RPCProtocol.raises_error` flag controls if the exception obejct is
raised or returned.
:returns: the exception object if it is not allowed to raise it.
:raises JSONRPCError: when the exception can be raised.
The exception object will contain ``message``, ``code`` and optionally a
``data`` property. | 8.393204 | 7.559541 | 1.11028 |
request = Request(environ)
request.max_content_length = self.max_content_length
access_control_headers = {
'Access-Control-Allow-Methods': 'POST',
'Access-Control-Allow-Origin': self.allow_origin,
'Access-Control-Allow-Headers': \
'Content-Type, X-Requested-With, Accept, Origin'
}
if request.method == 'OPTIONS':
response = Response(headers=access_control_headers)
elif request.method == 'POST':
# message is encoded in POST, read it...
msg = request.stream.read()
# create new context
context = self._queue_class()
self.messages.put((context, msg))
# ...and send the reply
response = Response(context.get(), headers=access_control_headers)
else:
# nothing else supported at the moment
response = Response('Only POST supported', 405)
return response(environ, start_response) | def handle(self, environ, start_response) | WSGI handler function.
The transport will serve a request by reading the message and putting
it into an internal buffer. It will then block until another
concurrently running function sends a reply using :py:meth:`send_reply`.
The reply will then be sent to the client being handled and handle will
return. | 3.35556 | 3.474704 | 0.965711 |
context, message = self.transport.receive_message()
if callable(self.trace):
self.trace('-->', context, message)
# assuming protocol is threadsafe and dispatcher is theadsafe, as
# long as its immutable
def handle_message(context, message):
try:
request = self.protocol.parse_request(message)
except tinyrpc.exc.RPCError as e:
response = e.error_respond()
else:
response = self.dispatcher.dispatch(request)
# send reply
if response is not None:
result = response.serialize()
if callable(self.trace):
self.trace('<--', context, result)
self.transport.send_reply(context, result)
self._spawn(handle_message, context, message) | def receive_one_message(self) | Handle a single request.
Polls the transport for a new message.
After a new message has arrived :py:meth:`_spawn` is called with a handler
function and arguments to handle the request.
The handler function will try to decode the message using the supplied
protocol, if that fails, an error response will be sent. After decoding
the message, the dispatcher will be asked to handle the resulting
request and the return value (either an error or a result) will be sent
back to the client using the transport. | 4.476917 | 4.210273 | 1.063332 |
ex = exc.RPCError('Error calling remote procedure: %s' % error.error['message'])
if self.raises_errors:
raise ex
return ex | def raise_error(self, error) | Raises the exception in the client.
Called by the client to convert the :py:class:`RPCErrorResponse` into an Exception
and raise or return it depending on the :py:attr:`raises_errors` attribute.
:param error: The error response received from the server.
:type error: :py:class:`RPCResponse`
:rtype: :py:exc:`~tinyrpc.exc.RPCError` when :py:attr:`raises_errors` is False.
:raises: :py:exc:`~tinyrpc.exc.RPCError` when :py:attr:`raises_errors` is True. | 10.33454 | 7.577388 | 1.363866 |
socket = zmq_context.socket(zmq.ROUTER)
socket.bind(endpoint)
return cls(socket) | def create(cls, zmq_context, endpoint) | Create new server transport.
Instead of creating the socket yourself, you can call this function and
merely pass the :py:class:`zmq.core.context.Context` instance.
By passing a context imported from :py:mod:`zmq.green`, you can use
green (gevent) 0mq sockets as well.
:param zmq_context: A 0mq context.
:param endpoint: The endpoint clients will connect to. | 2.61935 | 3.809238 | 0.687631 |
socket = zmq_context.socket(zmq.REQ)
socket.connect(endpoint)
return cls(socket) | def create(cls, zmq_context, endpoint) | Create new client transport.
Instead of creating the socket yourself, you can call this function and
merely pass the :py:class:`zmq.core.context.Context` instance.
By passing a context imported from :py:mod:`zmq.green`, you can use
green (gevent) 0mq sockets as well.
:param zmq_context: A 0mq context.
:param endpoint: The endpoint the server is bound to. | 2.504656 | 4.014641 | 0.62388 |
if not ('REQUEST_METHOD' in os.environ
and os.environ['REQUEST_METHOD'] == 'POST'):
print("Status: 405 Method not Allowed; only POST is accepted")
exit(0)
# POST
content_length = int(os.environ['CONTENT_LENGTH'])
request_json = sys.stdin.read(content_length)
request_json = urlparse.unquote(request_json)
# context isn't used with cgi
return None, request_json | def receive_message(self) | Receive a message from the transport.
Blocks until a message has been received. May return a context
opaque to clients that should be passed to :py:func:`send_reply`
to identify the client later on.
:return: A tuple consisting of ``(context, message)``. | 5.410532 | 5.408107 | 1.000448 |
# context isn't used with cgi
# Using sys.stdout.buffer.write() fails as stdout is on occasion monkey patched
# to AsyncFile which doesn't support the buffer attribute.
print("Status: 200 OK")
print("Content-Type: application/json")
print("Cache-Control: no-cache")
print("Pragma: no-cache")
print("Content-Length: %d" % len(reply))
print()
print(reply.decode()) | def send_reply(self, context, reply) | Sends a reply to a client.
The client is usually identified by passing ``context`` as returned
from the original :py:func:`receive_message` call.
Messages must be bytes, it is up to the sender to convert the message
beforehand. A non-bytes value raises a :py:exc:`TypeError`.
:param any context: A context returned by :py:func:`receive_message`.
:param bytes reply: A binary to send back as the reply. | 6.754403 | 7.613605 | 0.887149 |
gevent.spawn(func, *args, **kwargs) | def _spawn(self, func, *args, **kwargs) | Spawn a handler function.
Spawns the supplied ``func`` with ``*args`` and ``**kwargs``
as a gevent greenlet.
:param func: A callable to call.
:param args: Arguments to ``func``.
:param kwargs: Keyword arguments to ``func``. | 4.855917 | 6.725318 | 0.722035 |
req = self.protocol.create_request(method, args, kwargs, one_way)
rep = self._send_and_handle_reply(req, one_way)
if one_way:
return
return rep.result | def call(self, method, args, kwargs, one_way=False) | Calls the requested method and returns the result.
If an error occured, an :py:class:`~tinyrpc.exc.RPCError` instance
is raised.
:param method: Name of the method to call.
:param args: Arguments to pass to the method.
:param kwargs: Keyword arguments to pass to the method.
:param one_way: Whether or not a reply is desired. | 4.006077 | 5.329321 | 0.751705 |
threads = []
if 'gevent' in sys.modules:
# assume that gevent is available and functional, make calls in parallel
import gevent
for r in requests:
req = self.protocol.create_request(r.method, r.args, r.kwargs)
tr = r.transport.transport if len(r) == 4 else None
threads.append(
gevent.spawn(
self._send_and_handle_reply, req, False, tr, True
)
)
gevent.joinall(threads)
return [t.value for t in threads]
else:
# call serially
for r in requests:
req = self.protocol.create_request(r.method, r.args, r.kwargs)
tr = r.transport.transport if len(r) == 4 else None
threads.append(
self._send_and_handle_reply(req, False, tr, True)
)
return threads | def call_all(self, requests) | Calls the methods in the request in parallel.
When the :py:mod:`gevent` module is already loaded it is assumed to be
correctly initialized, including monkey patching if necessary.
In that case the RPC calls defined by ``requests`` is performed in
parallel otherwise the methods are called sequentially.
:param requests: A listof either :py:class:`~tinyrpc.client.RPCCall` or :py:class:`~tinyrpc.client.RPCCallTo` elements.
When RPCCallTo is used each element defines a transport.
Otherwise the default transport set when RPCClient is
created is used.
:return: A list with replies matching the order of the requests. | 3.08744 | 2.672525 | 1.155252 |
req = self.protocol.create_batch_request()
for call_args in calls:
req.append(self.protocol.create_request(*call_args))
return self._send_and_handle_reply(req) | def batch_call(self, calls) | Experimental, use at your own peril. | 3.984079 | 3.872722 | 1.028754 |
if callable(name):
f = name
f._rpc_public_name = f.__name__
return f
def _(f):
f._rpc_public_name = name or f.__name__
return f
return _ | def public(name=None) | Decorator. Mark a method as eligible for registration by a dispatcher.
The dispatchers :py:func:`~tinyrpc.dispatch.RPCDispatcher.register_instance` function
will do the actual registration of the marked method.
The difference with :py:func:`~tinyrpc.dispatch.RPCDispatcher.public` is that this decorator does
not register with a dispatcher, therefore binding the marked methods with a dispatcher is delayed
until runtime.
It also becomes possible to bind with multiple dispatchers.
:param name: The name to register the function with.
:type name: str or None
Example:
.. code-block:: python
def class Baz(object):
def not_exposed(self);
# ...
@public('do_something')
def visible_method(arg1):
# ...
baz = Baz()
dispatch = RPCDispatcher()
dispatch.register_instance(baz, 'bazzies`)
# Baz.visible_method is now callable via RPC as bazzies.do_something
``@public`` is a shortcut for ``@public()``. | 3.56724 | 4.955073 | 0.719917 |
if callable(name):
self.add_method(name)
return name
def _(f):
self.add_method(f, name=name)
return f
return _ | def public(self, name=None) | Convenient decorator.
Allows easy registering of functions to this dispatcher. Example:
.. code-block:: python
dispatch = RPCDispatcher()
@dispatch.public
def foo(bar):
# ...
class Baz(object):
def not_exposed(self):
# ...
@dispatch.public(name='do_something')
def visible_method(arg1)
# ...
:param str name: Name to register callable with. | 3.738772 | 5.499763 | 0.679806 |
self.subdispatchers.setdefault(prefix, []).append(dispatcher) | def add_subdispatch(self, dispatcher, prefix='') | Adds a subdispatcher, possibly in its own namespace.
:param dispatcher: The dispatcher to add as a subdispatcher.
:type dispatcher: RPCDispatcher
:param str prefix: A prefix. All of the new subdispatchers methods will be
available as prefix + their original name. | 4.752705 | 6.069945 | 0.78299 |
assert callable(f), "method argument must be callable"
# catches a few programming errors that are
# commonly silently swallowed otherwise
if not name:
name = f.__name__
if name in self.method_map:
raise exc.RPCError('Name %s already registered')
self.method_map[name] = f | def add_method(self, f, name=None) | Add a method to the dispatcher.
:param f: Callable to be added.
:type f: callable
:param str name: Name to register it with. If ``None``, ``f.__name__`` will
be used.
:raises ~tinyrpc.exc.RPCError: When the `name` is already registered. | 7.619345 | 6.729074 | 1.132302 |
if name in self.method_map:
return self.method_map[name]
for prefix, subdispatchers in self.subdispatchers.items():
if name.startswith(prefix):
for sd in subdispatchers:
try:
return sd.get_method(name[len(prefix):])
except exc.MethodNotFoundError:
pass
raise exc.MethodNotFoundError(name) | def get_method(self, name) | Retrieve a previously registered method.
Checks if a method matching ``name`` has been registered.
If :py:func:`get_method` cannot find a method, every subdispatcher
with a prefix matching the method name is checked as well.
:param str name: Function to find.
:returns: The callable implementing the function.
:rtype: callable
:raises: :py:exc:`~tinyrpc.exc.MethodNotFoundError` | 2.582802 | 2.525277 | 1.02278 |
dispatch = self.__class__()
for name, f in inspect.getmembers(
obj, lambda f: callable(f) and hasattr(f, '_rpc_public_name')):
dispatch.add_method(f, f._rpc_public_name)
# add to dispatchers
self.add_subdispatch(dispatch, prefix) | def register_instance(self, obj, prefix='') | Create new subdispatcher and register all public object methods on
it.
To be used in conjunction with the :py:func:`public`
decorator (*not* :py:func:`RPCDispatcher.public`).
:param obj: The object whose public methods should be made available.
:type obj: object
:param str prefix: A prefix for the new subdispatcher. | 5.660397 | 4.948985 | 1.143749 |
if hasattr(request, 'create_batch_response'):
results = [self._dispatch(req) for req in request]
response = request.create_batch_response()
if response is not None:
response.extend(results)
return response
else:
return self._dispatch(request) | def dispatch(self, request) | Fully handle request.
The dispatch method determines which method to call, calls it and
returns a response containing a result.
No exceptions will be thrown, rather, every exception will be turned
into a response using :py:func:`~tinyrpc.RPCRequest.error_respond`.
If a method isn't found, a :py:exc:`~tinyrpc.exc.MethodNotFoundError`
response will be returned. If any error occurs outside of the requested
method, a :py:exc:`~tinyrpc.exc.ServerError` without any error
information will be returend.
If the method is found and called but throws an exception, the
exception thrown is used as a response instead. This is the only case
in which information from the exception is possibly propagated back to
the client, as the exception is part of the requested method.
:py:class:`~tinyrpc.RPCBatchRequest` instances are handled by handling
all its children in order and collecting the results, then returning an
:py:class:`~tinyrpc.RPCBatchResponse` with the results.
:param request: The request containing the function to be called and its parameters.
:type request: ~tinyrpc.protocols.RPCRequest
:return: The result produced by calling the requested function.
:rtype: ~tinyrpc.protocols.RPCResponse
:raises ~exc.MethodNotFoundError: If the requested function is not published.
:raises ~exc.ServerError: If some other error occurred.
.. Note::
The :py:exc:`~tinyrpc.exc.ServerError` is raised for any kind of exception not
raised by the called function itself or :py:exc:`~tinyrpc.exc.MethodNotFoundError`. | 4.056031 | 4.170275 | 0.972605 |
if hasattr(method, '__code__'):
try:
inspect.getcallargs(method, *args, **kwargs)
except TypeError:
raise exc.InvalidParamsError() | def validate_parameters(self, method, args, kwargs) | Verify that `*args` and `**kwargs` are appropriate parameters for `method`.
:param method: A callable.
:param args: List of positional arguments for `method`
:param kwargs: Keyword arguments for `method`
:raises ~tinyrpc.exc.InvalidParamsError:
Raised when the provided arguments are not acceptable for `method`. | 4.209444 | 3.370588 | 1.248875 |
with self.lock:
try:
item = OrderedDict.__getitem__(self, key)
del self[key]
return item[0]
except KeyError:
return default | def pop(self, key, default=None) | Get item from the dict and remove it.
Return default if expired or does not exist. Never raise KeyError. | 3.718346 | 3.378292 | 1.100659 |
key_value, key_age = self.get(key, with_age=True)
if key_age:
key_ttl = self.max_age - key_age
if key_ttl > 0:
return key_ttl
return None | def ttl(self, key) | Return TTL of the `key` (in seconds).
Returns None for non-existent or expired keys. | 3.490492 | 3.322447 | 1.050579 |
" Return the value for key if key is in the dictionary, else default. "
try:
return self.__getitem__(key, with_age)
except KeyError:
if with_age:
return default, None
else:
return default | def get(self, key, default=None, with_age=False) | Return the value for key if key is in the dictionary, else default. | 4.070056 | 3.018361 | 1.348433 |
r = []
for key in self._safe_keys():
try:
r.append((key, self[key]))
except KeyError:
pass
return r | def items(self) | Return a copy of the dictionary's list of (key, value) pairs. | 3.900872 | 3.264914 | 1.194786 |
r = []
for key in self._safe_keys():
try:
r.append(self[key])
except KeyError:
pass
return r | def values(self) | Return a copy of the dictionary's list of values.
See the note for dict.items(). | 4.15469 | 3.825667 | 1.086004 |
return self.load(reload=True, callback=callback, errback=errback) | def reload(self, callback=None, errback=None) | Reload record data from the API. | 4.157424 | 3.82271 | 1.087559 |
if not reload and self.data:
raise RecordException('record already loaded')
def success(result, *args):
self._parseModel(result)
if callback:
return callback(self)
else:
return self
return self._rest.retrieve(self.parentZone.zone,
self.domain, self.type,
callback=success, errback=errback) | def load(self, callback=None, errback=None, reload=False) | Load record data from the API. | 6.259106 | 5.5982 | 1.118057 |
if not self.data:
raise RecordException('record not loaded')
def success(result, *args):
if callback:
return callback(result)
else:
return result
return self._rest.delete(self.parentZone.zone,
self.domain, self.type,
callback=success, errback=errback) | def delete(self, callback=None, errback=None) | Delete the record from the zone, including all advanced configuration,
meta data, etc. | 5.537387 | 4.958311 | 1.116789 |
if not self.data:
raise RecordException('record not loaded')
def success(result, *args):
self._parseModel(result)
if callback:
return callback(self)
else:
return self
return self._rest.update(self.parentZone.zone, self.domain, self.type,
callback=success, errback=errback, **kwargs) | def update(self, callback=None, errback=None, **kwargs) | Update record configuration. Pass list of keywords and their values to
update. For the list of keywords available for zone configuration, see
:attr:`ns1.rest.records.Records.INT_FIELDS`,
:attr:`ns1.rest.records.Records.PASSTHRU_FIELDS`,
:attr:`ns1.rest.records.Records.BOOL_FIELDS` | 5.532206 | 4.868431 | 1.136343 |
if not self.data:
raise RecordException('record not loaded')
stats = Stats(self.parentZone.config)
return stats.qps(zone=self.parentZone.zone,
domain=self.domain,
type=self.type,
callback=callback,
errback=errback) | def qps(self, callback=None, errback=None) | Return the current QPS for this record
:rtype: dict
:return: QPS information | 5.793743 | 5.355618 | 1.081807 |
if not self.data:
raise RecordException('record not loaded')
orig_answers = self.data['answers']
new_answers = self._rest._getAnswersForBody(answers)
orig_answers.extend(new_answers)
return self.update(answers=orig_answers, callback=callback,
errback=errback, **kwargs) | def addAnswers(self, answers, callback=None, errback=None, **kwargs) | Add answers to the record.
:param answers: answers structure. See the class note on answer format. | 4.663947 | 4.833802 | 0.964861 |
self._data = {
'default_key': 'default',
'keys': {
'default': {
'key': apikey,
'desc': 'imported API key'
}
}
}
self._keyID = 'default'
self._doDefaults()
if maybeWriteDefault:
path = os.path.expanduser(self.DEFAULT_CONFIG_FILE)
self.write(path) | def createFromAPIKey(self, apikey, maybeWriteDefault=False) | Create a basic config from a single API key
:param str apikey: NS1 API Key, as created in the NS1 portal
:param bool maybeWriteDefault: If True and DEFAULT_CONFIG_FILE doesn't\
exist write out the resulting config there. | 4.33495 | 4.564543 | 0.949701 |
try:
self._data = json.loads(body)
except Exception as e:
raise ConfigException('%s: invalid config body: %s' %
(self._path, e.message))
self._doDefaults() | def loadFromString(self, body) | Load config data (i.e. JSON text) from the given string
:param str body: config data in JSON format | 5.149556 | 5.573858 | 0.923876 |
if '~' in path:
path = os.path.expanduser(path)
f = open(path)
body = f.read()
f.close()
self._path = path
self.loadFromString(body) | def loadFromFile(self, path) | Load JSON config file from disk at the given path
:param str path: path to config file | 3.249639 | 3.860337 | 0.841802 |
if not self._path and not path:
raise ConfigException('no config path given')
if path:
self._path = path
if '~' in self._path:
self._path = os.path.expanduser(self._path)
f = open(self._path, 'w')
f.write(json.dumps(self._data))
f.close() | def write(self, path=None) | Write config data to disk. If this config object already has a path,
it will write to it. If it doesn't, one must be passed during this
call.
:param str path: path to config file | 2.550004 | 2.569205 | 0.992527 |
if keyID not in self._data['keys']:
raise ConfigException('keyID does not exist: %s' % keyID)
self._keyID = keyID | def useKeyID(self, keyID) | Use the given API key config specified by `keyID` during subsequent
API calls
:param str keyID: an index into the 'keys' maintained in this config | 4.731286 | 4.825326 | 0.980511 |
k = keyID if keyID is not None else self._keyID
if not k or k not in self._data['keys']:
raise ConfigException('request key does not exist: %s' % k)
return self._data['keys'][k] | def getKeyConfig(self, keyID=None) | Get key configuration specified by `keyID`, or current keyID.
:param str keyID: optional keyID to retrieve, or current if not passed
:return: a dict of the request (or current) key config | 4.166235 | 4.842136 | 0.860413 |
kcfg = self.getKeyConfig(keyID)
return 'writeLock' in kcfg and kcfg['writeLock'] is True | def isKeyWriteLocked(self, keyID=None) | Determine if a key config is write locked.
:param str keyID: optional keyID to retrieve, or current if not passed
:return: True if the given (or current) keyID is writeLocked | 5.62477 | 7.536951 | 0.746292 |
kcfg = self.getKeyConfig(keyID)
if 'key' not in kcfg:
raise ConfigException('invalid config: missing api key')
return kcfg['key'] | def getAPIKey(self, keyID=None) | Retrieve the NS1 API Key for the given keyID
:param str keyID: optional keyID to retrieve, or current if not passed
:return: API Key for the given keyID | 5.519058 | 6.824574 | 0.808704 |
port = ''
endpoint = ''
keyConfig = self.getKeyConfig()
if 'port' in keyConfig:
port = ':' + keyConfig['port']
elif self._data['port'] != self.PORT:
port = ':' + self._data['port']
if 'endpoint' in keyConfig:
endpoint = keyConfig['endpoint']
else:
endpoint = self._data['endpoint']
return 'https://%s%s/%s/' % (endpoint,
port,
self._data['api_version']) | def getEndpoint(self) | Retrieve the NS1 API Endpoint URL that will be used for requests.
:return: URL of the NS1 API that will be used for requests | 2.949158 | 3.099818 | 0.951397 |
if not reload and self.data:
raise NetworkException('Network already loaded')
def success(result, *args):
self.data = result
self.id = result['id']
self.name = result['name']
self.report = self._rest.report(self.id)
if callback:
return callback(self)
else:
return self
if self.id is None:
if self.name is None:
raise NetworkException('Must at least specify an id or name')
else:
self.id = [network for network in self._rest.list()
if network['name'] == self.name][0]['id']
return self._rest.retrieve(self.id, callback=success,
errback=errback) | def load(self, callback=None, errback=None, reload=False) | Load network data from the API. | 3.059239 | 2.901727 | 1.054282 |
return self._rest.delete(self.id, callback=callback, errback=errback) | def delete(self, callback=None, errback=None) | Delete the Network and all associated addresses | 4.305116 | 4.987143 | 0.863243 |
if not self.data:
raise NetworkException('Network not loaded')
def success(result, *args):
self.data = result
self.id = result['id']
self.name = result['name']
self.report = self._rest.report(self.id)
if callback:
return callback(self)
else:
return self
return self._rest.update(self.id, callback=success, errback=errback,
**kwargs) | def update(self, callback=None, errback=None, **kwargs) | Update Network configuration. Pass a list of keywords and their values to update.
For the list of keywords available for zone configuration, see :attr:`ns1.rest.ipam.Networks.INT_FIELDS` and :attr:`ns1.rest.ipam.Networks.PASSTHRU_FIELDS` | 3.552522 | 3.510397 | 1.012 |
if not self.data:
raise NetworkException('Network not loaded')
return Address(self.config, prefix, type, self).create(**kwargs) | def new_address(self, prefix, type, callback=None, errback=None, **kwargs) | Create a new address space in this Network
:param str prefix: The CIDR prefix of the address to add
:param str type: planned, assignment, host
:return: The newly created Address object | 9.551927 | 9.246963 | 1.03298 |
if not reload and self.data:
raise AddressException('Address already loaded')
def success(result, *args):
self.data = result
self.id = result['id']
self.prefix = result['prefix']
self.type = result['type']
self.network = Network(self.config, id=result['network_id'])
# self.scope_group = Scopegroup(config=self.config, id=result['scope_group_id']) NYI
if self.type != 'host':
self.report = self._rest.report(self.id)
children = self._rest.retrieve_children(self.id)
self.children = [Address(self.config, id=child['id']) for child in
children if len(children) > 0]
try:
parent = self._rest.retrieve_parent(self.id)
self.parent = Address(self.config, id=parent['id'])
except ResourceException:
pass
if callback:
return callback(self)
else:
return self
if self.id is None:
if self.prefix is None or self.type is None or self.network is None:
raise AddressException('Must at least specify an id or prefix, type and network')
else:
network_id = self.network.id
try:
self.id = [address for address in self._rest.list() if address['prefix'] == self.prefix and address[
'type'] == self.type and address['network_id'] == network_id][0]['id']
except IndexError:
raise AddressException("Could not find address by prefix. It may not exist, or is a child address. "
"Use the topmost parent prefix or specify ID")
return self._rest.retrieve(self.id, callback=success,
errback=errback) | def load(self, callback=None, errback=None, reload=False) | Load address data from the API. | 3.232389 | 3.15101 | 1.025826 |
if not self.data:
raise AddressException('Address not loaded')
def success(result, *args):
self.data = result
self.id = result['id']
self.prefix = result['prefix']
self.type = result['type']
self.network = Network(self.config, id=result['network_id'])
# self.scope_group = Scopegroup(config=self.config, id=result['scope_group_id'])
if self.type != 'host':
self.report = self._rest.report(self.id)
children = self._rest.retrieve_children(self.id)
self.children = [Address(self.config, id=child['id']) for child in
children if len(children) > 0]
try:
parent = self._rest.retrieve_parent(self.id)
self.parent = Address(self.config, id=parent['id'])
except ResourceException:
pass
if callback:
return callback(self)
else:
return self
return self._rest.update(self.id, callback=success, errback=errback, parent=parent,
**kwargs) | def update(self, callback=None, errback=None, parent=True, **kwargs) | Update address configuration. Pass a list of keywords and their values to
update. For the list of keywords available for address configuration, see :attr:`ns1.rest.ipam.Addresses.INT_FIELDS` and :attr:`ns1.rest.ipam.Addresses.PASSTHRU_FIELDS` | 3.037159 | 2.917121 | 1.04115 |
if not reload and self.data:
raise ScopegroupException('Scope Group already loaded')
def success(result, *args):
self.data = result
self.id = result['id']
self.dhcp4 = result['dhcp4']
self.dhcp6 = result['dhcp6']
self.name = result['name']
self.service_group_id = result['service_group_id']
if callback:
return callback(self)
else:
return self
if self.id is None:
if self.dhcp4 is None or self.dhcp6 is None or self.name is None or self.service_group_id is None:
raise AddressException('Must at least specify an id or name and service_group_id')
else:
try:
self.id = [scope_group for scope_group in self._rest.list() if scope_group['name'] == self.name and
scope_group['service_group_id'] == self.service_group_id][0]['id']
except IndexError:
raise AddressException("Could not find Scope Group by name and service_group_id. It may not exist")
return self._rest.retrieve(self.id, callback=success,
errback=errback) | def load(self, callback=None, errback=None, reload=False) | Load Scopegroup data from the API. | 2.917121 | 2.683546 | 1.08704 |
if not self.data:
raise ScopegroupException('Scope Group not loaded')
def success(result, *args):
self.data = result
self.id = result['id']
self.dhcp4 = result['dhcp4']
self.dhcp6 = result['dhcp6']
self.name = result['name']
self.service_group_id = result['service_group_id']
if callback:
return callback(self)
else:
return self
return self._rest.update(self.id, callback=success, errback=errback, **kwargs) | def update(self, callback=None, errback=None, **kwargs) | Update scope group configuration. Pass a list of keywords and their values to
update. For the list of keywords available for address configuration, see :attr:`ns1.rest.ipam.Scopegroups.INT_FIELDS` and :attr:`ns1.rest.ipam.Scopegroups.PASSTHRU_FIELDS` | 3.371105 | 2.93021 | 1.150465 |
if self.data:
raise ScopegroupException('Scope Group already loaded')
def success(result, *args):
self.data = result
self.id = result['id']
self.dhcp4 = result['dhcp4']
self.dhcp6 = result['dhcp6']
self.name = result['name']
self.service_group_id = result['service_group_id']
if callback:
return callback(self)
else:
return self
return self._rest.create(dhcp4=dhcp4.option_list, dhcp6=dhcp6.option_list, name=self.name, service_group_id=self.service_group_id,
callback=success, errback=errback) | def create(self, dhcp4, dhcp6, callback=None, errback=None) | :param DHCPOptions dhcp4: DHCPOptions object that contains the settings for dhcp4
:param DHCPOptions dhcp6: DHCPOptions object that contains the settings for dhcp6
Create a new Scope Group. Pass a list of keywords and their values to
configure. For the list of keywords available for address configuration, see :attr:`ns1.rest.ipam.Scopegroups.INT_FIELDS` and :attr:`ns1.rest.ipam.Scopegroups.PASSTHRU_FIELDS` | 3.035637 | 2.991901 | 1.014618 |
if not self.data:
raise ZoneException('zone not loaded')
return self._rest.search(self.zone, q, has_geo, callback, errback) | def search(self, q=None, has_geo=False, callback=None, errback=None) | Search within a zone for specific metadata. Zone must already be loaded. | 7.013545 | 5.08637 | 1.37889 |
return self._rest.delete(self.zone, callback=callback, errback=errback) | def delete(self, callback=None, errback=None) | Delete the zone and ALL records it contains. | 6.703889 | 4.380071 | 1.530543 |
if not self.data:
raise ZoneException('zone not loaded')
def success(result, *args):
self.data = result
if callback:
return callback(self)
else:
return self
return self._rest.update(self.zone, callback=success, errback=errback,
**kwargs) | def update(self, callback=None, errback=None, **kwargs) | Update zone configuration. Pass a list of keywords and their values to
update. For the list of keywords available for zone configuration, see
:attr:`ns1.rest.zones.Zones.INT_FIELDS` and
:attr:`ns1.rest.zones.Zones.PASSTHRU_FIELDS` | 4.527104 | 4.184634 | 1.08184 |
if self.data:
raise ZoneException('zone already loaded')
def success(result, *args):
self.data = result
if callback:
return callback(self)
else:
return self
if zoneFile:
return self._rest.import_file(self.zone, zoneFile,
callback=success, errback=errback,
**kwargs)
else:
return self._rest.create(self.zone, callback=success,
errback=errback, **kwargs) | def create(self, zoneFile=None, callback=None, errback=None, **kwargs) | Create a new zone. Pass a list of keywords and their values to
configure. For the list of keywords available for zone configuration,
see :attr:`ns1.rest.zones.Zones.INT_FIELDS` and
:attr:`ns1.rest.zones.Zones.PASSTHRU_FIELDS`
If zoneFile is passed, it should be a zone text file on the local disk
that will be used to populate the created zone file. | 3.253715 | 3.395135 | 0.958346 |
zone = Zone(self.config, new_zone)
kwargs['link'] = self.data['zone']
return zone.create(callback=callback, errback=errback, **kwargs) | def createLinkToSelf(self, new_zone, callback=None, errback=None,
**kwargs) | Create a new linked zone, linking to ourselves. All records in this
zone will then be available as "linked records" in the new zone.
:param str new_zone: the new zone name to link to this one
:return: new Zone | 3.981486 | 5.466256 | 0.728375 |
if '.' not in existing_domain:
existing_domain = existing_domain + '.' + self.zone
record = Record(self, new_domain, rtype)
return record.create(answers=[],
link=existing_domain,
callback=callback,
errback=errback,
**kwargs) | def linkRecord(self, existing_domain, new_domain, rtype,
callback=None, errback=None, **kwargs) | Create a new linked record in this zone. These records use the
configuration (answers, ttl, filters, etc) from an existing record
in the NS1 platform.
:param str existing_domain: FQDN of the target record whose config \
should be used. Does not have to be in the same zone.
:param str new_domain: Name of the new (linked) record. Zone name is\
appended automatically.
:param str rtype: DNS record type, which must match the target record.
:rtype: ns1.records.Record
:return: new Record | 3.790527 | 3.84804 | 0.985054 |
if zone is None:
zone = self.zone
if not new_domain.endswith(zone):
new_domain = new_domain + '.' + zone
def onSaveNewRecord(new_data):
if zone != self.zone:
pZone = Zone(self.config, zone)
else:
pZone = self
new_rec = Record(pZone, new_domain, rtype)
new_rec._parseModel(new_data)
if callback:
return callback(new_rec)
else:
return new_rec
def onLoadRecord(old_rec):
data = old_rec.data
data['zone'] = zone
data['domain'] = new_domain
restapi = Records(self.config)
return restapi.create_raw(zone, new_domain, rtype, data,
callback=onSaveNewRecord,
errback=errback)
return self.loadRecord(existing_domain, rtype,
callback=onLoadRecord,
errback=errback) | def cloneRecord(self, existing_domain, new_domain, rtype,
zone=None, callback=None, errback=None) | Clone the given record to a new record such that their configs are
identical.
:param str existing_domain: The existing record to clone
:param str new_domain: The name of the new cloned record
:param str rtype: DNS record type
:param str zone: Optional zone name, if the new record should exist in\
a different zone than the original record.
:rtype: ns1.records.Record
:return: new Record | 2.898897 | 3.064229 | 0.946045 |
rec = Record(self, domain, rtype)
return rec.load(callback=callback, errback=errback) | def loadRecord(self, domain, rtype, callback=None, errback=None) | Load a high level Record object from a domain within this Zone.
:param str domain: The name of the record to load
:param str rtype: The DNS record type
:rtype: ns1.records.Record
:return: new Record | 3.195494 | 6.442309 | 0.496017 |
stats = Stats(self.config)
return stats.qps(zone=self.zone, callback=callback, errback=errback) | def qps(self, callback=None, errback=None) | Return the current QPS for this zone
:rtype: dict
:return: QPS information | 5.681317 | 6.143332 | 0.924794 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.