code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
'''
Get the rating of certain post and user.
'''
try:
recs = TabRating.select().where(
(TabRating.post_id == postid) & (TabRating.user_id == userid)
)
except:
return False
if recs.count() > 0:
return recs.get().rating
else:
return False
|
def get_rating(postid, userid)
|
Get the rating of certain post and user.
| 3.24976 | 2.786354 | 1.166313 |
'''
Update the rating of certain post and user.
The record will be created if no record exists.
'''
rating_recs = TabRating.select().where(
(TabRating.post_id == postid) & (TabRating.user_id == userid)
)
if rating_recs.count() > 0:
MRating.__update_rating(rating_recs.get().uid, rating)
else:
MRating.__insert_data(postid, userid, rating)
|
def update(postid, userid, rating)
|
Update the rating of certain post and user.
The record will be created if no record exists.
| 4.2146 | 3.067679 | 1.373873 |
'''
Update rating.
'''
entry = TabRating.update(
rating=rating
).where(TabRating.uid == uid)
entry.execute()
|
def __update_rating(uid, rating)
|
Update rating.
| 8.912249 | 7.635521 | 1.167209 |
'''
Inert new record.
'''
uid = tools.get_uuid()
TabRating.create(
uid=uid,
post_id=postid,
user_id=userid,
rating=rating,
timestamp=tools.timestamp(),
)
return uid
|
def __insert_data(postid, userid, rating)
|
Inert new record.
| 6.273236 | 4.41549 | 1.420734 |
'''
Update the category of the post.
'''
catid = kwargs['catid'] if ('catid' in kwargs and MCategory.get_by_uid(kwargs['catid'])) else None
post_data = postdata
current_infos = MPost2Catalog.query_by_entity_uid(uid, kind='').objects()
new_category_arr = []
# Used to update post2category, to keep order.
def_cate_arr = ['gcat{0}'.format(x) for x in range(10)]
# for old page.
def_cate_arr.append('def_cat_uid')
# Used to update post extinfo.
cat_dic = {}
for key in def_cate_arr:
if key not in post_data:
continue
if post_data[key] == '' or post_data[key] == '0':
continue
# 有可能选重复了。保留前面的
if post_data[key] in new_category_arr:
continue
new_category_arr.append(post_data[key] + ' ' * (4 - len(post_data[key])))
cat_dic[key] = post_data[key] + ' ' * (4 - len(post_data[key]))
if catid:
def_cat_id = catid
elif new_category_arr:
def_cat_id = new_category_arr[0]
else:
def_cat_id = None
if def_cat_id:
cat_dic['def_cat_uid'] = def_cat_id
cat_dic['def_cat_pid'] = MCategory.get_by_uid(def_cat_id).pid
print('=' * 40)
print(uid)
print(cat_dic)
MPost.update_jsonb(uid, cat_dic)
for index, catid in enumerate(new_category_arr):
MPost2Catalog.add_record(uid, catid, index)
# Delete the old category if not in post requests.
for cur_info in current_infos:
if cur_info.tag_id not in new_category_arr:
MPost2Catalog.remove_relation(uid, cur_info.tag_id)
|
def update_category(uid, postdata, kwargs)
|
Update the category of the post.
| 3.619904 | 3.579818 | 1.011198 |
'''
Command entry
'''
command_dic = {
'init': run_init,
}
try:
# 这里的 h 就表示该选项无参数,i:表示 i 选项后需要有参数
opts, args = getopt.getopt(argv, "hi:")
except getopt.GetoptError:
print('Error: helper.py -i cmd')
sys.exit(2)
for opt, arg in opts:
if opt == "-h":
print('helper.py -i cmd')
print('cmd list ----------------------')
print(' init: ')
sys.exit()
elif opt == "-i":
if arg in command_dic:
command_dic[arg](args)
print('QED!')
else:
print('Wrong Command.')
|
def entry(argv)
|
Command entry
| 5.862247 | 5.744451 | 1.020506 |
'''
Adding relation between two posts.
'''
recs = TabRel.select().where(
(TabRel.post_f_id == app_f) & (TabRel.post_t_id == app_t)
)
if recs.count() > 1:
for record in recs:
MRelation.delete(record.uid)
if recs.count() == 0:
uid = tools.get_uuid()
entry = TabRel.create(
uid=uid,
post_f_id=app_f,
post_t_id=app_t,
count=1,
)
return entry.uid
elif recs.count() == 1:
MRelation.update_relation(app_f, app_t, weight)
else:
return False
|
def add_relation(app_f, app_t, weight=1)
|
Adding relation between two posts.
| 3.296803 | 2.978205 | 1.106976 |
'''
The the related infors.
'''
info_tag = MInfor2Catalog.get_first_category(app_id)
if info_tag:
return TabPost2Tag.select(
TabPost2Tag,
TabPost.title.alias('post_title'),
TabPost.valid.alias('post_valid')
).join(
TabPost, on=(TabPost2Tag.post_id == TabPost.uid)
).where(
(TabPost2Tag.tag_id == info_tag.tag_id) &
(TabPost.kind == kind)
).order_by(
peewee.fn.Random()
).limit(num)
return TabPost2Tag.select(
TabPost2Tag,
TabPost.title.alias('post_title'),
TabPost.valid.alias('post_valid')
).join(
TabPost, on=(TabPost2Tag.post_id == TabPost.uid)
).where(
TabPost.kind == kind
).order_by(peewee.fn.Random()).limit(num)
|
def get_app_relations(app_id, num=20, kind='1')
|
The the related infors.
| 2.927569 | 2.551429 | 1.147423 |
'''
/label/s/view
'''
url_arr = self.parse_url(args[0])
if len(url_arr) == 2:
if url_arr[0] == 'remove':
self.remove_redis_keyword(url_arr[1])
else:
self.list(url_arr[0], url_arr[1])
elif len(url_arr) == 3:
self.list(url_arr[0], url_arr[1], url_arr[2])
else:
return False
|
def get(self, *args, **kwargs)
|
/label/s/view
| 3.171481 | 2.603822 | 1.21801 |
'''
Remove the keyword for redis.
'''
redisvr.srem(CMS_CFG['redis_kw'] + self.userinfo.user_name, keyword)
return json.dump({}, self)
|
def remove_redis_keyword(self, keyword)
|
Remove the keyword for redis.
| 27.565809 | 20.353609 | 1.354345 |
'''
根据 cat_handler.py 中的 def view_cat_new(self, cat_slug, cur_p = '')
'''
# 下面用来使用关键字过滤信息,如果网站信息量不是很大不要开启
# Todo:
# if self.get_current_user():
# redisvr.sadd(config.redis_kw + self.userinfo.user_name, tag_slug)
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MPost2Label.total_number(tag_slug, kind) / CMS_CFG['list_num'])
tag_info = MLabel.get_by_slug(tag_slug)
if tag_info:
tag_name = tag_info.name
else:
tag_name = 'Label search results'
kwd = {'tag_name': tag_name,
'tag_slug': tag_slug,
'title': tag_name,
'current_page': current_page_number,
'router': router_post[kind],
'kind': kind
}
the_list_file = './templates/list/label_{kind}.html'.format(kind=kind)
if os.path.exists(the_list_file):
tmpl = 'list/label_{kind}.html'.format(kind=kind)
else:
tmpl = 'list/label.html'
self.render(tmpl,
infos=MPost2Label.query_pager_by_slug(
tag_slug,
kind=kind,
current_page_num=current_page_number
),
kwd=kwd,
userinfo=self.userinfo,
pager=self.gen_pager(kind, tag_slug, pager_num, current_page_number),
cfg=CMS_CFG)
|
def list(self, kind, tag_slug, cur_p='')
|
根据 cat_handler.py 中的 def view_cat_new(self, cat_slug, cur_p = '')
| 4.763379 | 4.094854 | 1.16326 |
'''
cat_slug 分类
page_num 页面总数
current 当前页面
'''
if page_num == 1:
return ''
pager_shouye = '''<li class="{0}"> <a href="/label/{1}/{2}"><< 首页</a>
</li>'''.format(
'hidden' if current <= 1 else '', kind, cat_slug
)
pager_pre = '''<li class="{0}"><a href="/label/{1}/{2}/{3}">< 前页</a>
</li>'''.format(
'hidden' if current <= 1 else '', kind, cat_slug, current - 1
)
pager_mid = ''
for ind in range(0, page_num):
tmp_mid = '''<li class="{0}"><a href="/label/{1}/{2}/{3}">{3}</a>
</li>'''.format(
'active' if ind + 1 == current else '', kind, cat_slug, ind + 1
)
pager_mid += tmp_mid
pager_next = '''<li class=" {0}"><a href="/label/{1}/{2}/{3}">后页 ></a>
</li>'''.format(
'hidden' if current >= page_num else '', kind, cat_slug, current + 1
)
pager_last = '''<li class=" {0}"><a href="/label/{1}/{2}/{3}">末页>></a>
</li>'''.format(
'hidden' if current >= page_num else '', kind, cat_slug, page_num
)
pager = pager_shouye + pager_pre + pager_mid + pager_next + pager_last
return pager
|
def gen_pager(self, kind, cat_slug, page_num, current)
|
cat_slug 分类
page_num 页面总数
current 当前页面
| 2.033223 | 1.854803 | 1.096194 |
'''
Build the directory for Whoosh database, and locale.
'''
if os.path.exists('locale'):
pass
else:
os.mkdir('locale')
if os.path.exists(WHOOSH_DB_DIR):
pass
else:
os.makedirs(WHOOSH_DB_DIR)
|
def build_directory()
|
Build the directory for Whoosh database, and locale.
| 3.993767 | 2.252644 | 1.772925 |
'''
Running the script.
'''
for kindv in router_post:
for rec_cat in MCategory.query_all(kind=kindv):
catid = rec_cat.uid
catinfo = MCategory.get_by_uid(catid)
for rec_post2tag in MPost2Catalog.query_by_catid(catid):
postinfo = MPost.get_by_uid(rec_post2tag.post_id)
if postinfo.kind == catinfo.kind:
pass
else:
print(postinfo.uid)
|
def run_check_kind(_)
|
Running the script.
| 5.857761 | 5.357235 | 1.09343 |
'''
creating the default administrator.
'''
post_data = {
'user_name': 'giser',
'user_email': '[email protected]',
'user_pass': '131322',
'role': '3300',
}
if MUser.get_by_name(post_data['user_name']):
print('User {user_name} already exists.'.format(user_name='giser'))
else:
MUser.create_user(post_data)
|
def run_create_admin(*args)
|
creating the default administrator.
| 5.557511 | 4.793005 | 1.159505 |
'''
Update the catagery.
'''
recs = MPost2Catalog.query_all().objects()
for rec in recs:
if rec.tag_kind != 'z':
print('-' * 40)
print(rec.uid)
print(rec.tag_id)
print(rec.par_id)
MPost2Catalog.update_field(rec.uid, par_id=rec.tag_id[:2] + "00")
|
def run_update_cat(_)
|
Update the catagery.
| 7.817377 | 6.83538 | 1.143664 |
'''
The rating of Post should be updaed if the count is greater than 10
'''
voted_recs = MRating.query_by_post(postid)
if voted_recs.count() > 10:
rating = MRating.query_average_rating(postid)
else:
rating = 5
logger.info('Get post rating: {rating}'.format(rating=rating))
# MPost.__update_rating(postid, rating)
MPost.update_misc(postid, rating=rating)
|
def update_post(self, postid)
|
The rating of Post should be updaed if the count is greater than 10
| 6.271246 | 4.340787 | 1.444726 |
'''
only the used who logged in would voting.
'''
post_data = self.get_post_data()
rating = float(post_data['rating'])
postinfo = MPost.get_by_uid(postid)
if postinfo and self.userinfo:
MRating.update(postinfo.uid, self.userinfo.uid, rating=rating)
self.update_post(postid)
else:
return False
|
def update_rating(self, postid)
|
only the used who logged in would voting.
| 6.435644 | 4.051852 | 1.588321 |
'''
Generate the difference of posts. recently.
'''
diff_str = ''
for key in router_post:
recent_posts = MPost.query_recent_edited(tools.timestamp() - TIME_LIMIT, kind=key)
for recent_post in recent_posts:
hist_rec = MPostHist.get_last(recent_post.uid)
if hist_rec:
raw_title = hist_rec.title
new_title = recent_post.title
infobox = diff_table(raw_title, new_title)
diff_str = diff_str + '''
<h2 style="color:red;font-size:larger;font-weight:70;">TITLE: {0}</h2>
'''.format(recent_post.title) + infobox
infobox = diff_table(hist_rec.cnt_md, recent_post.cnt_md)
diff_str = diff_str + '<h3>CONTENT:{0}</h3>'.format(
recent_post.title
) + infobox + '</hr>'
else:
continue
return diff_str
|
def __get_diff_recent()
|
Generate the difference of posts. recently.
| 5.587201 | 4.80312 | 1.163244 |
'''
Review for wikis.
'''
recent_posts = MWiki.query_recent_edited(tools.timestamp() - TIME_LIMIT, kind='2')
for recent_post in recent_posts:
hist_rec = MWikiHist.get_last(recent_post.uid)
if hist_rec:
foo_str = '''
<tr><td>{0}</td><td>{1}</td><td class="diff_chg">Edit</td><td>{2}</td>
<td><a href="{3}">{3}</a></td></tr>
'''.format(idx, recent_post.user_name, recent_post.title,
os.path.join(SITE_CFG['site_url'], 'page', recent_post.uid))
email_cnt = email_cnt + foo_str
else:
foo_str = '''
<tr><td>{0}</td><td>{1}</td><td class="diff_add">New </td><td>{2}</td>
<td><a href="{3}">{3}</a></td></tr>
'''.format(idx, recent_post.user_name, recent_post.title,
os.path.join(SITE_CFG['site_url'], 'page', recent_post.uid))
email_cnt = email_cnt + foo_str
idx = idx + 1
email_cnt = email_cnt + '</table>'
return email_cnt, idx
|
def __get_wiki_review(email_cnt, idx)
|
Review for wikis.
| 2.896073 | 2.863606 | 1.011338 |
'''
Review for posts.
'''
for key in router_post:
recent_posts = MPost.query_recent_edited(tools.timestamp() - TIME_LIMIT, kind=key)
for recent_post in recent_posts:
hist_rec = MPostHist.get_last(recent_post.uid)
if hist_rec:
foo_str = '''
<tr><td>{0}</td><td>{1}</td><td class="diff_chg">Edit</td><td>{2}</td>
<td><a href="{3}">{3}</a></td></tr>
'''.format(idx, recent_post.user_name, recent_post.title,
os.path.join(SITE_CFG['site_url'], router_post[key],
recent_post.uid))
email_cnt = email_cnt + foo_str
else:
foo_str = '''
<tr><td>{0}</td><td>{1}</td><td class="diff_add">New </td><td>{2}</td>
<td><a href="{3}">{3}</a></td></tr>
'''.format(idx, recent_post.user_name, recent_post.title,
os.path.join(SITE_CFG['site_url'], router_post[key],
recent_post.uid))
email_cnt = email_cnt + foo_str
idx = idx + 1
return email_cnt, idx
|
def __get_post_review(email_cnt, idx)
|
Review for posts.
| 2.951959 | 2.951179 | 1.000264 |
'''
Get the difference of recents modification, and send the Email.
For: wiki, page, and post.
'''
email_cnt = '''<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title></title>
<style type="text/css">
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}
</style></head><body>'''
idx = 1
email_cnt = email_cnt + '<table border=1>'
email_cnt, idx = __get_post_review(email_cnt, idx) # post
email_cnt, idx = __get_page_review(email_cnt, idx) # page.
email_cnt, idx = __get_wiki_review(email_cnt, idx) # wiki
###########################################################
diff_str = __get_diff_recent()
if len(diff_str) < 20000:
email_cnt = email_cnt + diff_str
email_cnt = email_cnt + '''</body></html>'''
if idx > 1:
send_mail(post_emails, "{0}|{1}|{2}".format(SMTP_CFG['name'], '文档更新情况', DATE_STR), email_cnt)
|
def run_review(*args)
|
Get the difference of recents modification, and send the Email.
For: wiki, page, and post.
| 3.910794 | 3.085149 | 1.267619 |
'''
用于首页。根据前两位,找到所有的大类与小类。
:param qian2: 分类id的前两位
:return: 数组,包含了找到的分类
'''
return TabTag.select().where(
TabTag.uid.startswith(qian2)
).order_by(TabTag.order)
|
def get_qian2(qian2)
|
用于首页。根据前两位,找到所有的大类与小类。
:param qian2: 分类id的前两位
:return: 数组,包含了找到的分类
| 10.563153 | 2.893014 | 3.651263 |
'''
Qeury all the categories, order by count or defined order.
'''
if by_count:
recs = TabTag.select().where(TabTag.kind == kind).order_by(TabTag.count.desc())
elif by_order:
recs = TabTag.select().where(TabTag.kind == kind).order_by(TabTag.order)
else:
recs = TabTag.select().where(TabTag.kind == kind).order_by(TabTag.uid)
return recs
|
def query_all(kind='1', by_count=False, by_order=True)
|
Qeury all the categories, order by count or defined order.
| 2.857178 | 2.0727 | 1.378482 |
'''
Query the posts count of certain category.
'''
return TabTag.select().where(
TabTag.kind == kind
).order_by(
TabTag.count.desc()
).limit(limit_num)
|
def query_field_count(limit_num, kind='1')
|
Query the posts count of certain category.
| 6.320704 | 3.828983 | 1.650752 |
'''
return the category record .
'''
rec = TabTag.select().where(TabTag.slug == slug)
if rec.count() > 0:
return rec.get()
return None
|
def get_by_slug(slug)
|
return the category record .
| 8.55152 | 5.317359 | 1.608227 |
'''
Update the count of certain category.
'''
# Todo: the record not valid should not be counted.
entry2 = TabTag.update(
count=TabPost2Tag.select().where(
TabPost2Tag.tag_id == cat_id
).count()
).where(TabTag.uid == cat_id)
entry2.execute()
|
def update_count(cat_id)
|
Update the count of certain category.
| 8.443494 | 7.499804 | 1.125829 |
'''
Update the category.
'''
raw_rec = TabTag.get(TabTag.uid == uid)
entry = TabTag.update(
name=post_data['name'] if 'name' in post_data else raw_rec.name,
slug=post_data['slug'] if 'slug' in post_data else raw_rec.slug,
order=post_data['order'] if 'order' in post_data else raw_rec.order,
kind=post_data['kind'] if 'kind' in post_data else raw_rec.kind,
pid=post_data['pid'],
).where(TabTag.uid == uid)
entry.execute()
|
def update(uid, post_data)
|
Update the category.
| 2.460521 | 2.29569 | 1.0718 |
'''
Add or update the data by the given ID of post.
'''
catinfo = MCategory.get_by_uid(uid)
if catinfo:
MCategory.update(uid, post_data)
else:
TabTag.create(
uid=uid,
name=post_data['name'],
slug=post_data['slug'],
order=post_data['order'],
kind=post_data['kind'] if 'kind' in post_data else '1',
pid=post_data['pid'],
)
return uid
|
def add_or_update(uid, post_data)
|
Add or update the data by the given ID of post.
| 4.244738 | 3.340489 | 1.270694 |
'''
listing the category.
'''
kwd = {
'pager': '',
'title': '最近文档',
'kind': kind,
'router': config.router_post[kind]
}
self.render('admin/{0}/category_list.html'.format(self.tmpl_router),
kwd=kwd,
view=MCategory.query_all(kind, by_order=True),
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=config.CMS_CFG)
|
def list_catalog(self, kind)
|
listing the category.
| 9.186623 | 7.950053 | 1.155542 |
'''
List posts that recent edited.
'''
kwd = {
'pager': '',
'title': 'Recent posts.',
'with_catalog': with_catalog,
'with_date': with_date,
}
self.render('list/post_list.html',
kwd=kwd,
view=MPost.query_recent(num=20),
postrecs=MPost.query_recent(num=2),
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG, )
|
def recent(self, with_catalog=True, with_date=True)
|
List posts that recent edited.
| 6.949336 | 5.564491 | 1.248872 |
'''
List the posts to be modified.
'''
post_recs = MPost.query_random(limit=1000)
outrecs = []
errrecs = []
idx = 0
for postinfo in post_recs:
if idx > 16:
break
cat = MPost2Catalog.get_first_category(postinfo.uid)
if cat:
if 'def_cat_uid' in postinfo.extinfo:
if postinfo.extinfo['def_cat_uid'] == cat.tag_id:
pass
else:
errrecs.append(postinfo)
idx += 1
else:
errrecs.append(postinfo)
idx += 1
else:
outrecs.append(postinfo)
idx += 1
self.render('list/errcat.html',
kwd={},
norecs=outrecs,
errrecs=errrecs,
userinfo=self.userinfo)
|
def errcat(self)
|
List the posts to be modified.
| 4.065889 | 3.555264 | 1.143625 |
'''
List the post of dated.
'''
kwd = {
'pager': '',
'title': '',
}
self.render('list/post_list.html',
kwd=kwd,
userinfo=self.userinfo,
view=MPost.query_dated(10),
postrecs=MPost.query_dated(10),
format_date=tools.format_date,
cfg=CMS_CFG)
|
def refresh(self)
|
List the post of dated.
| 10.20849 | 6.605485 | 1.545456 |
'''
Build the directory used for templates.
'''
tag_arr = ['add', 'edit', 'view', 'list', 'infolist']
path_arr = [os.path.join(CRUD_PATH, x) for x in tag_arr]
for wpath in path_arr:
if os.path.exists(wpath):
continue
os.makedirs(wpath)
|
def build_dir()
|
Build the directory used for templates.
| 5.284147 | 4.436556 | 1.191047 |
'''
Create the reply.
'''
uid = tools.get_uuid()
TabReply.create(
uid=uid,
post_id=post_data['post_id'],
user_name=post_data['user_name'],
user_id=post_data['user_id'],
timestamp=tools.timestamp(),
date=datetime.datetime.now(),
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_reply']),
cnt_html=tools.markdown2html(post_data['cnt_reply']),
vote=0
)
return uid
|
def create_reply(post_data)
|
Create the reply.
| 3.814854 | 3.572667 | 1.067789 |
'''
Get reply list of certain post.
'''
return TabReply.select().where(
TabReply.post_id == postid
).order_by(TabReply.timestamp.desc())
|
def query_by_post(postid)
|
Get reply list of certain post.
| 6.263393 | 4.221696 | 1.48362 |
'''
return filter dic for certain column
'''
row1_val = wk_sheet['{0}1'.format(column)].value
row2_val = wk_sheet['{0}2'.format(column)].value
row3_val = wk_sheet['{0}3'.format(column)].value
row4_val = wk_sheet['{0}4'.format(column)].value
if row1_val and row1_val.strip() != '':
row2_val = row2_val.strip()
slug_name = row1_val.strip()
c_name = row2_val.strip()
tags1 = [x.strip() for x in row3_val.split(',')]
tags_dic = {}
# if only one tag,
if len(tags1) == 1:
xx_1 = row2_val.split(':') # 'text' # HTML text input control.
if xx_1[0].lower() in INPUT_ARR:
xx_1[0] = xx_1[0].lower()
else:
xx_1[0] = 'text'
if len(xx_1) == 2:
ctr_type, unit = xx_1
else:
ctr_type = xx_1[0]
unit = ''
tags_dic[1] = unit
else:
ctr_type = 'select' # HTML selectiom control.
for index, tag_val in enumerate(tags1):
# the index of tags_dic starts from 1.
tags_dic[index + 1] = tag_val.strip()
outkey = 'html_{0}'.format(slug_name)
outval = {
'en': slug_name,
'zh': c_name,
'dic': tags_dic,
'type': ctr_type,
'display': row4_val,
}
return (outkey, outval)
else:
return (None, None)
|
def __write_filter_dic(wk_sheet, column)
|
return filter dic for certain column
| 3.21087 | 3.139067 | 1.022874 |
'''
Get the last wiki in history.
'''
recs = TabWikiHist.select().where(
TabWikiHist.wiki_id == postid
).order_by(TabWikiHist.time_update.desc())
return None if recs.count() == 0 else recs.get()
|
def get_last(postid)
|
Get the last wiki in history.
| 5.847881 | 4.217745 | 1.386495 |
'''
Compare between two role string.
'''
for iii in range(4):
if def_rule[iii] == '0':
continue
if usr_rule[iii] >= def_rule[iii]:
return True
return False
|
def is_prived(usr_rule, def_rule)
|
Compare between two role string.
| 6.79377 | 4.059762 | 1.673441 |
'''
role for view.
'''
def wrapper(self, *args, **kwargs):
'''
wrapper.
'''
if ROLE_CFG['view'] == '':
return method(self, *args, **kwargs)
elif self.current_user:
if is_prived(self.userinfo.role, ROLE_CFG['view']):
return method(self, *args, **kwargs)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
return wrapper
|
def auth_view(method)
|
role for view.
| 3.117863 | 3.060359 | 1.01879 |
'''
Running the script.
'''
print('--')
drop_the_table(TabPost)
drop_the_table(TabTag)
drop_the_table(TabMember)
drop_the_table(TabWiki)
drop_the_table(TabLink)
drop_the_table(TabEntity)
drop_the_table(TabPostHist)
drop_the_table(TabWikiHist)
drop_the_table(TabCollect)
drop_the_table(TabPost2Tag)
drop_the_table(TabRel)
drop_the_table(TabEvaluation)
drop_the_table(TabUsage)
drop_the_table(TabReply)
drop_the_table(TabUser2Reply)
drop_the_table(TabRating)
|
def run_drop_tables(_)
|
Running the script.
| 3.194784 | 2.975304 | 1.073767 |
result = dict(platform=dict(name=None, version=None))
_suggested_detectors = []
for info_type in detectorshub:
detectors = _suggested_detectors or detectorshub[info_type]
for detector in detectors:
try:
detector.detect(agent, result)
except Exception as _err:
pass
if fill_none:
for outer_key in ('os', 'browser'):
outer_value = result.setdefault(outer_key, dict())
for inner_key in ('name', 'version'):
outer_value.setdefault(inner_key, None)
return result
|
def detect(agent, fill_none=False)
|
fill_none: if name/version is not detected respective key is still added to the result with value None
| 4.183916 | 3.709771 | 1.12781 |
result = detect(agent)
os_list = []
if 'flavor' in result:
os_list.append(result['flavor']['name'])
if 'dist' in result:
os_list.append(result['dist']['name'])
if 'os' in result:
os_list.append(result['os']['name'])
os = os_list and " ".join(os_list) or "Unknown OS"
os_version = os_list and (result.get('flavor') and result['flavor'].get('version')) or \
(result.get('dist') and result['dist'].get('version')) or (result.get('os') and result['os'].get('version')) or ""
browser = 'browser' in result and result['browser'].get('name') or 'Unknown Browser'
browser_version = 'browser' in result and result['browser'].get('version') or ""
if browser_version:
browser = " ".join((browser, browser_version))
if os_version:
os = " ".join((os, os_version))
return os, browser
|
def simple_detect(agent)
|
-> (os, browser) # tuple of strings
| 2.197132 | 2.04982 | 1.071866 |
version_markers = self.version_markers if \
isinstance(self.version_markers[0], (list, tuple)) else [self.version_markers]
version_part = agent.split(word, 1)[-1]
for start, end in version_markers:
if version_part.startswith(start) and end in version_part:
version = version_part[1:]
if end: # end could be empty string
version = version.split(end)[0]
if not self.allow_space_in_version:
version = version.split()[0]
return version
|
def getVersion(self, agent, word)
|
=> version string /None
| 3.622267 | 3.688323 | 0.98209 |
''' Copiles the files and runs memory tests
if needed.
PARAM args: list of files passed as CMD args
to be compiled.
PARAM mem_test: Weither to perform memory test ?
'''
for filename in args:
if not os.path.isfile(filename):
print('The file doesn\'t exits')
return
build_and_run_file(filename)
print("")
|
def compile_files(args, mem_test=False)
|
Copiles the files and runs memory tests
if needed.
PARAM args: list of files passed as CMD args
to be compiled.
PARAM mem_test: Weither to perform memory test ?
| 11.428141 | 3.101248 | 3.685014 |
''' Builds and runs the filename specified
according to the extension
PARAM filename: name of file to build and run
'''
(directory, name, extension) = get_file_tuple(filename)
if extension == 'c':
print(" = = = = = = ", YELLOW, "GCC: Compiling " + filename + " file", \
RESET, " = = = = = =\n")
compiler = Compiler(filename)
out = compiler.compile()
if out != 0:
print('Error while compiling. Code:', out, 'Please retry.')
return out
print("")
out = compiler.run()
return out
elif extension == 'cpp':
print(" = = = = = = ", YELLOW, "GPP: Compiling " + filename + " file", \
RESET, " = = = = = =\n")
compiler = Compiler(filename)
out = compiler.compile()
if out != 0:
print('Error while compiling. Code:', out, 'Please retry.')
return out
print("")
out = compiler.run()
return out
elif extension == 'py':
print(" = = = = = = ", YELLOW, "PYTHON: Executing " + filename + " file", \
RESET, " = = = = = =\n")
compiler = Compiler(filename)
out = compiler.run()
return out
elif extension == 'java':
command = EXECUTABLE_JAVAC + ' ' + filename
perform_system_command(command)
command_run = EXECUTABLE_JAVA + ' ' + name
test_file = directory + "/" + name + ".input"
if os.path.exists(test_file):
command_run += " < " + test_file
return perform_system_command(command_run)
else:
print("Language yet not supported")
return -1
|
def build_and_run_file(filename)
|
Builds and runs the filename specified
according to the extension
PARAM filename: name of file to build and run
| 2.984838 | 2.777492 | 1.074652 |
all_installed = True
for exe in exec_list:
if not is_tool(exe):
print("Executable: " + exe + " is not installed")
all_installed = False
return all_installed
|
def check_exec_installed(exec_list)
|
Check the required programs are
installed.
PARAM exec_list: list of programs to check
RETURN: True if all installed else False
| 2.860482 | 3.121136 | 0.916487 |
parser = ArgumentParser()
parser.add_argument("-l", "--loop", type=int, help="Loop every X seconds")
parser.add_argument('-V', '--version',
action='store_true',
dest='version',
help='Print the version number and exit')
parser.add_argument("-u", "--update",
action='store_true',
dest="update",
help="Update the software from online repo")
parser.add_argument("-p", "--problem",
action='store_true',
dest="problem",
help="Report a problem")
parser.add_argument("-m", "--memory",
action='store_true',
dest="memory",
help="Run memory tests")
args, otherthings = parser.parse_known_args()
return args, otherthings, parser
|
def parse_known_args()
|
Parse command line arguments
| 2.655092 | 2.643262 | 1.004476 |
'''Function to use multiprocessing to process pandas Dataframe.
This function applies a function on each row of the input DataFrame by
multiprocessing.
Args:
func (function): The function to apply on each row of the input
Dataframe. The func must accept pandas.Series as the first
positional argument and return a pandas.Series.
data (pandas.DataFrame): A DataFrame to be processed.
num_process (int, optional): The number of processes to run in
parallel. Defaults to be the number of CPUs of the computer.
verbose (bool, optional): Set to False to disable verbose output.
args (dict): Keyword arguments to pass as keywords arguments to `func`
return:
A dataframe containing the results
'''
# Check arguments value
assert isinstance(data, pd.DataFrame), \
'Input data must be a pandas.DataFrame instance'
if num_process is None:
num_process = multiprocessing.cpu_count()
# Establish communication queues
tasks = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
error_queue = multiprocessing.Queue()
start_time = time.time()
# Enqueue tasks
num_task = len(data)
for i in range(num_task):
tasks.put(data.iloc[i, :])
# Add a poison pill for each consumer
for i in range(num_process):
tasks.put(None)
logger.info('Create {} processes'.format(num_process))
consumers = [Consumer(func, tasks, results, error_queue, **args)
for i in range(num_process)]
for w in consumers:
w.start()
# Add a task tracking process
task_tracker = TaskTracker(tasks, verbose)
task_tracker.start()
# Wait for all input data to be processed
tasks.join()
# If there is any error in any process, output the error messages
num_error = error_queue.qsize()
if num_error > 0:
for i in range(num_error):
logger.error(error_queue.get())
raise RuntimeError('Multi process jobs failed')
else:
# Collect results
result_table = []
while num_task:
result_table.append(results.get())
num_task -= 1
df_results = pd.DataFrame(result_table)
logger.info("Jobs finished in {0:.2f}s".format(
time.time()-start_time))
return df_results
|
def multi_process(func, data, num_process=None, verbose=True, **args)
|
Function to use multiprocessing to process pandas Dataframe.
This function applies a function on each row of the input DataFrame by
multiprocessing.
Args:
func (function): The function to apply on each row of the input
Dataframe. The func must accept pandas.Series as the first
positional argument and return a pandas.Series.
data (pandas.DataFrame): A DataFrame to be processed.
num_process (int, optional): The number of processes to run in
parallel. Defaults to be the number of CPUs of the computer.
verbose (bool, optional): Set to False to disable verbose output.
args (dict): Keyword arguments to pass as keywords arguments to `func`
return:
A dataframe containing the results
| 2.887987 | 2.043098 | 1.413533 |
'''Define the job of each process to run.
'''
while True:
next_task = self._task_queue.get()
# If there is any error, only consume data but not run the job
if self._error_queue.qsize() > 0:
self._task_queue.task_done()
continue
if next_task is None:
# Poison pill means shutdown
self._task_queue.task_done()
break
try:
answer = self._func(next_task, **self._args)
self._task_queue.task_done()
self._result_queue.put(answer)
except Exception as e:
self._task_queue.task_done()
self._error_queue.put((os.getpid(), e))
logger.error(e)
continue
|
def run(self)
|
Define the job of each process to run.
| 3.182369 | 2.769375 | 1.149129 |
'''Define the job of each process to run.
'''
if self.verbose:
pbar = tqdm(total=100)
while True:
task_remain = self._task_queue.qsize()
task_finished = int((float(self.total_task - task_remain) /
float(self.total_task)) * 100)
if task_finished % 20 == 0 and task_finished != self.current_state:
self.current_state = task_finished
logger.info('{0}% done'.format(task_finished))
if self.verbose and task_finished > 0:
pbar.update(20)
if task_remain == 0:
break
logger.debug('All task data cleared')
|
def run(self)
|
Define the job of each process to run.
| 3.866326 | 3.325693 | 1.162562 |
''' A sample function
It takes 'wait' seconds to calculate the sum of each row
'''
time.sleep(wait)
data_row['sum'] = data_row['col_1'] + data_row['col_2']
return data_row
|
def func(data_row, wait)
|
A sample function
It takes 'wait' seconds to calculate the sum of each row
| 4.85151 | 2.744774 | 1.767545 |
# Treating missing values
simulated_array, observed_array = treat_values(simulated_array, observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero)
return np.mean(simulated_array - observed_array)
|
def me(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the mean error of the simulated and observed data.
.. image:: /pictures/ME.png
**Range:** -inf < MAE < inf, data units, closer to zero is better, indicates bias.
**Notes:** The mean error (ME) measures the difference between the simulated data and the
observed data. For the mean error, a smaller number indicates a better fit to the original
data. Note that if the error is in the form of random noise, the mean error will be very small,
which can skew the accuracy of this metric. ME is cumulative and will be small even if there
are large positive and negative errors that balance.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The mean error value.
Examples
--------
Note that in this example the random noise cancels, leaving a very small ME.
>>> import HydroErr as he
>>> import numpy as np
>>> # Seed for reproducibility
>>> np.random.seed(54839)
>>> x = np.arange(100) / 20
>>> sim = np.sin(x) + 2
>>> obs = sim * (((np.random.rand(100) - 0.5) / 10) + 1)
>>> he.me(sim, obs)
-0.006832220968967168
References
----------
- Fisher, R.A., 1920. A Mathematical Examination of the Methods of Determining the Accuracy of
an Observation by the Mean Error, and by the Mean Square Error. Monthly Notices of the Royal
Astronomical Society 80 758 - 770.
| 2.211433 | 4.160319 | 0.531554 |
# Checking and cleaning the data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
return np.mean(np.absolute(simulated_array - observed_array))
|
def mae(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the mean absolute error of the simulated and observed data.
.. image:: /pictures/MAE.png
**Range:** 0 ≤ MAE < inf, data units, smaller is better.
**Notes:** The ME measures the absolute difference between the simulated data and the observed
data. For the mean abolute error, a smaller number indicates a better fit to the original data.
Also note that random errors do not cancel. Also referred to as an L1-norm.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The mean absolute error value.
References
----------
- Willmott, Cort J., and Kenji Matsuura. “Advantages of the Mean Absolute Error (MAE) over the
Root Mean Square Error (RMSE) in Assessing Average Model Performance.” Climate Research 30,
no. 1 (2005): 79–82.
- Willmott, Cort J., and Kenji Matsuura. “On the Use of Dimensioned Measures of Error to
Evaluate the Performance of Spatial Interpolators.” International Journal of Geographical
Information Science 20, no. 1 (2006): 89–102.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 6.8])
>>> he.mae(sim, obs)
0.5666666666666665
| 2.312146 | 3.773028 | 0.612809 |
# Checking and cleaning the data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
sim_log = np.log1p(simulated_array)
obs_log = np.log1p(observed_array)
return np.mean(sim_log - obs_log)
|
def mle(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the mean log error of the simulated and observed data.
.. image:: /pictures/MLE.png
**Range:** -inf < MLE < inf, data units, closer to zero is better.
**Notes** Same as the mean erro (ME) only use log ratios as the error term. Limits the impact of outliers, more
evenly weights high and low data values.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The mean log error value.
Examples
--------
Note that the value is very small because it is in log space.
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 6.8])
>>> he.mle(sim, obs)
0.002961767058151136
References
----------
- Törnqvist, Leo, Pentti Vartia, and Yrjö O. Vartia. “How Should Relative Changes Be Measured?”
The American Statistician 39, no. 1 (1985): 43–46.
| 2.372983 | 3.157461 | 0.751548 |
# Checking and cleaning the data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
return np.median(simulated_array - observed_array)
|
def mde(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the median error (MdE) between the simulated and observed data.
.. image:: /pictures/MdE.png
**Range** -inf < MdE < inf, closer to zero is better.
**Notes** This metric indicates bias. It is similar to the mean error (ME), only it takes the
median rather than the mean. Median measures reduces the impact of outliers.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Examples
--------
Note that the last outlier residual in the time series is negated using the median.
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 100])
>>> he.mde(sim, obs)
-0.10000000000000009
Returns
-------
float
The median error value.
| 2.736243 | 3.924681 | 0.697189 |
# Checking and cleaning the data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
return np.median(np.abs(simulated_array - observed_array))
|
def mdae(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the median absolute error (MdAE) between the simulated and observed data.
.. image:: /pictures/MdAE.png
**Range** 0 ≤ MdAE < inf, closer to zero is better.
**Notes** Random errors (noise) do not cancel. It is the same as the mean absolute error (MAE), only it takes the
median rather than the mean. Median measures reduces the impact of outliers.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Examples
--------
Note that the last outlier residual in the time series is negated using the median.
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 100])
>>> he.mdae(sim, obs)
0.75
Returns
-------
float
The median absolute error value.
| 2.655432 | 3.659323 | 0.725662 |
# Checking and cleaning the data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
return np.linalg.norm(observed_array - simulated_array)
|
def ed(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the Euclidean distance between predicted and observed values in vector space.
.. image:: /pictures/ED.png
**Range** 0 ≤ ED < inf, smaller is better.
**Notes** Also sometimes referred to as the L2-norm.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.ed(sim, obs)
1.63707055437449
Returns
-------
float
The euclidean distance error value.
References
----------
- Kennard, M. J., Mackay, S. J., Pusey, B. J., Olden, J. D., & Marsh, N. (2010). Quantifying
uncertainty in estimation of hydrologic metrics for ecohydrological studies. River Research
and Applications, 26(2), 137-156.
| 2.721691 | 3.954014 | 0.688336 |
# Checking and cleaning the data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
a = observed_array / np.mean(observed_array)
b = simulated_array / np.mean(simulated_array)
return np.linalg.norm(a - b)
|
def ned(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the normalized Euclidian distance between the simulated and observed data in vector
space.
.. image:: /pictures/NED.png
**Range** 0 ≤ NED < inf, smaller is better.
**Notes** Also sometimes referred to as the squared L2-norm.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The normalized euclidean distance value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.ned(sim, obs)
0.2872053604165771
References
----------
- Kennard, M. J., Mackay, S. J., Pusey, B. J., Olden, J. D., & Marsh, N. (2010). Quantifying
uncertainty in estimation of hydrologic metrics for ecohydrological studies. River Research
and Applications, 26(2), 137-156.
| 2.589914 | 3.344381 | 0.774408 |
# Checking and cleaning the data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
rmse_value = np.sqrt(np.mean((simulated_array - observed_array) ** 2))
obs_max = np.max(observed_array)
obs_min = np.min(observed_array)
return rmse_value / (obs_max - obs_min)
|
def nrmse_range(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the range normalized root mean square error between the simulated and observed data.
.. image:: /pictures/NRMSE_Range.png
**Range:** 0 ≤ NRMSE < inf.
**Notes:** This metric is the RMSE normalized by the range of the observed time series (x).
Normalizing allows comparison between data sets with different scales. The NRMSErange is the
most sensitive to outliers of the three normalized rmse metrics.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The range normalized root mean square error value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.nrmse_range(sim, obs)
0.0891108340256152
References
----------
- Pontius, R.G., Thontteh, O., Chen, H., 2008. Components of information for multiple
resolution comparison between maps that share a real variable. Environmental and Ecological
Statistics 15(2) 111-142.
| 2.32821 | 2.900135 | 0.802794 |
# Checking and cleaning the data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
rmse_value = np.sqrt(np.mean((simulated_array - observed_array) ** 2))
obs_mean = np.mean(observed_array)
return rmse_value / obs_mean
|
def nrmse_mean(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the mean normalized root mean square error between the simulated and observed data.
.. image:: /pictures/NRMSE_Mean.png
**Range:** 0 ≤ NRMSE < inf.
**Notes:** This metric is the RMSE normalized by the mean of the observed time series (x).
Normalizing allows comparison between data sets with different scales.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The mean normalized root mean square error.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.nrmse_mean(sim, obs)
0.11725109740212526
References
----------
- Pontius, R.G., Thontteh, O., Chen, H., 2008. Components of information for multiple
resolution comparison between maps that share a real variable. Environmental and Ecological
Statistics 15(2) 111-142.
| 2.517659 | 3.345991 | 0.752441 |
# Checking and cleaning the data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
rmse_value = np.sqrt(np.mean((simulated_array - observed_array) ** 2))
q1 = np.percentile(observed_array, 25)
q3 = np.percentile(observed_array, 75)
iqr = q3 - q1
return rmse_value / iqr
|
def nrmse_iqr(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the IQR normalized root mean square error between the simulated and observed data.
.. image:: /pictures/NRMSE_IQR.png
**Range:** 0 ≤ NRMSE < inf.
**Notes:** This metric is the RMSE normalized by the interquartile range of the observed time
series (x). Normalizing allows comparison between data sets with different scales.
The NRMSEquartile is the least sensitive to outliers of the three normalized rmse metrics.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The IQR normalized root mean square error.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.nrmse_iqr(sim, obs)
0.2595461185212093
References
----------
- Pontius, R.G., Thontteh, O., Chen, H., 2008. Components of information for multiple
resolution comparison between maps that share a real variable. Environmental and Ecological
Statistics 15(2) 111-142.
| 2.204738 | 2.724058 | 0.809358 |
# Checking and cleaning the data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
start = m
end = simulated_array.size - m
a = np.mean(np.abs(simulated_array - observed_array))
b = np.abs(observed_array[start:observed_array.size] - observed_array[:end])
return a / (np.sum(b) / end)
|
def mase(simulated_array, observed_array, m=1, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the mean absolute scaled error between the simulated and observed data.
.. image:: /pictures/MASE.png
**Range:**
**Notes:**
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
m: int
If given, indicates the seasonal period m. If not given, the default is 1.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The mean absolute scaled error.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.mase(sim, obs)
0.17341040462427745
References
----------
- Hyndman, R.J., Koehler, A.B., 2006. Another look at measures of forecast accuracy.
International Journal of Forecasting 22(4) 679-688.
| 2.968049 | 3.655899 | 0.811852 |
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
a = simulated_array - observed_array
b = np.abs(a / observed_array)
return np.mean(np.arctan(b))
|
def maape(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the the Mean Arctangent Absolute Percentage Error (MAAPE).
.. image:: /pictures/MAAPE.png
**Range:** 0 ≤ MAAPE < π/2, does not indicate bias, smaller is better.
**Notes:** Represents the mean absolute error as a percentage of the observed values. Handles
0s in the observed data. This metric is not as biased as MAPE by under-over predictions.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The mean arctangent absolute percentage error.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.mape(sim, obs)
11.639226612630866
References
----------
- Kim, S., Kim, H., 2016. A new metric of absolute percentage error for intermittent demand
forecasts. International Journal of Forecasting 32(3) 669-679.
| 2.55794 | 3.705882 | 0.690238 |
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
a = ((simulated_array - observed_array) / observed_array) ** 2
b = np.abs(simulated_array - np.mean(observed_array))
c = np.abs(observed_array - np.mean(observed_array))
e = ((b + c) / np.mean(observed_array)) ** 2
return 1 - (np.sum(a) / np.sum(e))
|
def drel(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the the relative index of agreement (drel).
.. image:: /pictures/drel.png
**Range:** 0 ≤ drel < 1, does not indicate bias, larger is better.
**Notes:** Instead of absolute differences, this metric uses relative differences.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The relative index of agreement.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.drel(sim, obs)
0.9740868625579597
References
----------
- Krause, P., Boyle, D., Bäse, F., 2005. Comparison of different efficiency criteria for
hydrological model assessment. Advances in geosciences 5 89-97.
| 2.239629 | 2.71759 | 0.824123 |
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
a = 2 / np.pi
b = np.mean((simulated_array - observed_array) ** 2) # MSE
c = np.std(observed_array, ddof=1) ** 2 + np.std(simulated_array, ddof=1) ** 2
e = (np.mean(simulated_array) - np.mean(observed_array)) ** 2
f = c + e
return a * np.arcsin(1 - (b / f))
|
def watt_m(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute Watterson's M (M).
.. image:: /pictures/M.png
**Range:** -1 ≤ M < 1, does not indicate bias, larger is better.
**Notes:**
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
Watterson's M value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.watt_m(sim, obs)
0.8307913876595929
References
----------
- Watterson, I.G., 1996. Non‐dimensional measures of climate model performance. International
Journal of Climatology 16(4) 379-391.
| 2.452559 | 2.920174 | 0.839867 |
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
a = np.dot(simulated_array, observed_array)
b = np.linalg.norm(simulated_array) * np.linalg.norm(observed_array)
return np.arccos(a / b)
|
def sa(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the Spectral Angle (SA).
.. image:: /pictures/SA.png
**Range:** -π/2 ≤ SA < π/2, closer to 0 is better.
**Notes:** The spectral angle metric measures the angle between the two vectors in hyperspace.
It indicates how well the shape of the two series match – not magnitude.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The Spectral Angle value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.sa(sim, obs)
0.10816831366492945
References
----------
- Robila, S.A., Gershman, A., 2005. Spectral matching accuracy in processing hyperspectral
data, Signals, Circuits and Systems, 2005. ISSCS 2005. International Symposium on. IEEE,
pp. 163-166.
| 2.216929 | 3.007225 | 0.737201 |
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
a = np.dot(observed_array - np.mean(observed_array), simulated_array - np.mean(simulated_array))
b = np.linalg.norm(observed_array - np.mean(observed_array))
c = np.linalg.norm(simulated_array - np.mean(simulated_array))
e = b * c
return np.arccos(a / e)
|
def sc(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the Spectral Correlation (SC).
.. image:: /pictures/SC.png
**Range:** -π/2 ≤ SA < π/2, closer to 0 is better.
**Notes:** The spectral correlation metric measures the angle between the two vectors in
hyperspace. It indicates how well the shape of the two series match – not magnitude.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The Spectral Correlation value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.sc(sim, obs)
0.27991341383646606
References
----------
- Robila, S.A., Gershman, A., 2005. Spectral matching accuracy in processing hyperspectral
data, Signals, Circuits and Systems, 2005. ISSCS 2005. International Symposium on. IEEE,
pp. 163-166.
| 2.047213 | 2.594803 | 0.788967 |
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
first = (observed_array / np.mean(observed_array)) - (
simulated_array / np.mean(simulated_array))
second1 = np.log10(observed_array) - np.log10(np.mean(observed_array))
second2 = np.log10(simulated_array) - np.log10(np.mean(simulated_array))
return np.dot(first, second1 - second2)
|
def sid(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the Spectral Information Divergence (SID).
.. image:: /pictures/SID.png
**Range:** -π/2 ≤ SID < π/2, closer to 0 is better.
**Notes:** The spectral information divergence measures the angle between the two vectors in
hyperspace. It indicates how well the shape of the two series match – not magnitude.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The Spectral information divergence value.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.sid(sim, obs)
0.03517616895318012
References
----------
- Robila, S.A., Gershman, A., 2005. Spectral matching accuracy in processing hyperspectral
data, Signals, Circuits and Systems, 2005. ISSCS 2005. International Symposium on. IEEE,
pp. 163-166.
| 2.244293 | 2.695316 | 0.832664 |
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
sgx = observed_array[1:] - observed_array[:observed_array.size - 1]
sgy = simulated_array[1:] - simulated_array[:simulated_array.size - 1]
a = np.dot(sgx, sgy)
b = np.linalg.norm(sgx) * np.linalg.norm(sgy)
return np.arccos(a / b)
|
def sga(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the Spectral Gradient Angle (SGA).
.. image:: /pictures/SGA.png
**Range:** -π/2 ≤ SID < π/2, closer to 0 is better.
**Notes:** The spectral gradient angle measures the angle between the two vectors in
hyperspace. It indicates how well the shape of the two series match – not magnitude.
SG is the gradient of the simulated or observed time series.
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The Spectral Gradient Angle.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.sga(sim, obs)
0.26764286472739834
References
----------
- Robila, S.A., Gershman, A., 2005. Spectral matching accuracy in processing hyperspectral
data, Signals, Circuits and Systems, 2005. ISSCS 2005. International Symposium on. IEEE,
pp. 163-166.
| 2.244489 | 2.698692 | 0.831695 |
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
h = (simulated_array - observed_array) / observed_array
return np.mean(h)
|
def h1_mhe(simulated_array, observed_array, replace_nan=None, replace_inf=None,
remove_neg=False, remove_zero=False)
|
Compute the H1 mean error.
.. image:: /pictures/H1.png
.. image:: /pictures/MHE.png
**Range:**
**Notes:**
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The mean H1 error.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.h1_mhe(sim, obs)
0.002106551840594386
References
----------
- Tornquist, L., Vartia, P., Vartia, Y.O., 1985. How Should Relative Changes be Measured?
The American Statistician 43-46.
| 2.592821 | 3.724207 | 0.696208 |
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
top = (simulated_array / observed_array - 1)
bot = np.power(0.5 * (1 + np.power(simulated_array / observed_array, k)), 1 / k)
h = top / bot
return np.mean(np.abs(h))
|
def h6_mahe(simulated_array, observed_array, k=1, replace_nan=None, replace_inf=None,
remove_neg=False,
remove_zero=False)
|
Compute the H6 mean absolute error.
.. image:: /pictures/H6.png
.. image:: /pictures/AHE.png
**Range:**
**Notes:**
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
k: int or float
If given, sets the value of k. If None, k=1.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The mean absolute H6 error.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.h6_mahe(sim, obs)
0.11743831388794852
References
----------
- Tornquist, L., Vartia, P., Vartia, Y.O., 1985. How Should Relative Changes be Measured?
The American Statistician 43-46.
| 2.793965 | 3.436494 | 0.813028 |
def decorator(function):
@wraps(function)
def wrapper(request, *args, **kwargs):
# We know the user has been authenticated via a canvas page if a signed request is set.
canvas = request.facebook is not False and hasattr(request.facebook, "signed_request")
# The user has already authorized the application, but the given view requires
# permissions besides the defaults listed in ``FACEBOOK_APPLICATION_DEFAULT_PERMISSIONS``.
#
# Derive a list of outstanding permissions and prompt the user to grant them.
if request.facebook and request.facebook.user and permissions:
outstanding_permissions = [p for p in permissions if p not in request.facebook.user.permissions]
if outstanding_permissions:
return authorize_application(
request = request,
redirect_uri = redirect_uri or get_post_authorization_redirect_url(request, canvas=canvas),
permissions = outstanding_permissions
)
# The user has not authorized the application yet.
#
# Concatenate the default permissions with permissions required for this particular view.
if not request.facebook or not request.facebook.user:
return authorize_application(
request = request,
redirect_uri = redirect_uri or get_post_authorization_redirect_url(request, canvas=canvas),
permissions = (FACEBOOK_APPLICATION_INITIAL_PERMISSIONS or []) + (permissions or [])
)
return function(request, *args, **kwargs)
return wrapper
if callable(redirect_uri):
function = redirect_uri
redirect_uri = None
return decorator(function)
else:
return decorator
|
def facebook_authorization_required(redirect_uri=FACEBOOK_AUTHORIZATION_REDIRECT_URL, permissions=None)
|
Require the user to authorize the application.
:param redirect_uri: A string describing an URL to redirect to after authorization is complete.
If ``None``, redirects to the current URL in the Facebook canvas
(e.g. ``http://apps.facebook.com/myapp/current/path``). Defaults to
``FACEBOOK_AUTHORIZATION_REDIRECT_URL`` (which, in turn, defaults to ``None``).
:param permissions: A list of strings describing Facebook permissions.
| 3.333399 | 3.282995 | 1.015353 |
if self.first_name and self.middle_name and self.last_name:
return "%s %s %s" % (self.first_name, self.middle_name, self.last_name)
if self.first_name and self.last_name:
return "%s %s" % (self.first_name, self.last_name)
|
def full_name(self)
|
Return the user's first name.
| 1.501155 | 1.460351 | 1.027941 |
records = self.graph.get('me/permissions')['data']
permissions = []
for record in records:
if record['status'] == 'granted':
permissions.append(record['permission'])
return permissions
|
def permissions(self)
|
A list of strings describing `permissions`_ the user has granted your application.
.. _permissions: http://developers.facebook.com/docs/reference/api/permissions/
| 4.381281 | 3.212752 | 1.363716 |
profile = graph_data or self.graph.get('me')
self.facebook_username = profile.get('username')
self.first_name = profile.get('first_name')
self.middle_name = profile.get('middle_name')
self.last_name = profile.get('last_name')
self.birthday = datetime.strptime(profile['birthday'], '%m/%d/%Y') if profile.has_key('birthday') else None
self.email = profile.get('email')
self.locale = profile.get('locale')
self.gender = profile.get('gender')
self.extra_data = profile
self.save()
|
def synchronize(self, graph_data=None)
|
Synchronize ``facebook_username``, ``first_name``, ``middle_name``,
``last_name`` and ``birthday`` with Facebook.
:param graph_data: Optional pre-fetched graph data
| 2.474469 | 2.094841 | 1.181221 |
if self.expires_at:
return self.expires_at - self.issued_at > timedelta(days=30)
else:
return False
|
def extended(self)
|
Determine whether the OAuth token has been extended.
| 4.388203 | 3.142649 | 1.396339 |
graph = GraphAPI()
response = graph.get('oauth/access_token',
client_id = FACEBOOK_APPLICATION_ID,
client_secret = FACEBOOK_APPLICATION_SECRET_KEY,
grant_type = 'fb_exchange_token',
fb_exchange_token = self.token
)
components = parse_qs(response)
self.token = components['access_token'][0]
self.expires_at = now() + timedelta(seconds = int(components['expires'][0]))
self.save()
|
def extend(self)
|
Extend the OAuth token.
| 3.080121 | 2.678004 | 1.150156 |
# User has already been authed by alternate middleware
if hasattr(request, "facebook") and request.facebook:
return
request.facebook = False
if not self.is_valid_path(request):
return
if self.is_access_denied(request):
return authorization_denied_view(request)
# No signed request found in either GET, POST nor COOKIES...
if 'signed_request' not in request.REQUEST and 'signed_request' not in request.COOKIES:
return
# If the request method is POST and its body only contains the signed request,
# chances are it's a request from the Facebook platform and we'll override
# the request method to HTTP GET to rectify their misinterpretation
# of the HTTP standard.
#
# References:
# "POST for Canvas" migration at http://developers.facebook.com/docs/canvas/post/
# "Incorrect use of the HTTP protocol" discussion at http://forum.developers.facebook.net/viewtopic.php?id=93554
if request.method == 'POST' and 'signed_request' in request.POST:
request.POST = QueryDict('')
request.method = 'GET'
request.facebook = Facebook()
try:
request.facebook.signed_request = SignedRequest(
signed_request = request.REQUEST.get('signed_request') or request.COOKIES.get('signed_request'),
application_secret_key = FACEBOOK_APPLICATION_SECRET_KEY
)
except SignedRequest.Error:
request.facebook = False
# Valid signed request and user has authorized the application
if request.facebook \
and request.facebook.signed_request.user.has_authorized_application \
and not request.facebook.signed_request.user.oauth_token.has_expired:
# Initialize a User object and its corresponding OAuth token
try:
user = User.objects.get(facebook_id=request.facebook.signed_request.user.id)
except User.DoesNotExist:
oauth_token = OAuthToken.objects.create(
token = request.facebook.signed_request.user.oauth_token.token,
issued_at = request.facebook.signed_request.user.oauth_token.issued_at.replace(tzinfo=tzlocal()),
expires_at = request.facebook.signed_request.user.oauth_token.expires_at.replace(tzinfo=tzlocal())
)
user = User.objects.create(
facebook_id = request.facebook.signed_request.user.id,
oauth_token = oauth_token
)
user.synchronize()
# Update the user's details and OAuth token
else:
user.last_seen_at = now()
if 'signed_request' in request.REQUEST:
user.authorized = True
if request.facebook.signed_request.user.oauth_token:
user.oauth_token.token = request.facebook.signed_request.user.oauth_token.token
user.oauth_token.issued_at = request.facebook.signed_request.user.oauth_token.issued_at.replace(tzinfo=tzlocal())
user.oauth_token.expires_at = request.facebook.signed_request.user.oauth_token.expires_at.replace(tzinfo=tzlocal())
user.oauth_token.save()
user.save()
if not user.oauth_token.extended:
# Attempt to extend the OAuth token, but ignore exceptions raised by
# bug #102727766518358 in the Facebook Platform.
#
# http://developers.facebook.com/bugs/102727766518358/
try:
user.oauth_token.extend()
except:
pass
request.facebook.user = user
|
def process_request(self, request)
|
Process the signed request.
| 3.064272 | 3.051425 | 1.00421 |
response['P3P'] = 'CP="IDC CURa ADMa OUR IND PHY ONL COM STA"'
if FANDJANGO_CACHE_SIGNED_REQUEST:
if hasattr(request, "facebook") and request.facebook and request.facebook.signed_request:
response.set_cookie('signed_request', request.facebook.signed_request.generate())
else:
response.delete_cookie('signed_request')
return response
|
def process_response(self, request, response)
|
Set compact P3P policies and save signed request to cookie.
P3P is a WC3 standard (see http://www.w3.org/TR/P3P/), and although largely ignored by most
browsers it is considered by IE before accepting third-party cookies (ie. cookies set by
documents in iframes). If they are not set correctly, IE will not set these cookies.
| 6.322426 | 5.425866 | 1.165238 |
# User has already been authed by alternate middleware
if hasattr(request, "facebook") and request.facebook:
return
request.facebook = False
if not self.is_valid_path(request):
return
if self.is_access_denied(request):
return authorization_denied_view(request)
request.facebook = Facebook()
oauth_token = False
# Is there a token cookie already present?
if 'oauth_token' in request.COOKIES:
try:
# Check if the current token is already in DB
oauth_token = OAuthToken.objects.get(token=request.COOKIES['oauth_token'])
except OAuthToken.DoesNotExist:
request.facebook = False
return
# Is there a code in the GET request?
elif 'code' in request.GET:
try:
graph = GraphAPI()
# Exchange code for an access_token
response = graph.get('oauth/access_token',
client_id = FACEBOOK_APPLICATION_ID,
redirect_uri = get_post_authorization_redirect_url(request, canvas=False),
client_secret = FACEBOOK_APPLICATION_SECRET_KEY,
code = request.GET['code'],
)
components = parse_qs(response)
# Save new OAuth-token in DB
oauth_token, new_oauth_token = OAuthToken.objects.get_or_create(
token = components['access_token'][0],
issued_at = now(),
expires_at = now() + timedelta(seconds = int(components['expires'][0]))
)
except GraphAPI.OAuthError:
pass
# There isn't a valid access_token
if not oauth_token or oauth_token.expired:
request.facebook = False
return
# Is there a user already connected to the current token?
try:
user = oauth_token.user
if not user.authorized:
request.facebook = False
return
user.last_seen_at = now()
user.save()
except User.DoesNotExist:
graph = GraphAPI(oauth_token.token)
profile = graph.get('me')
# Either the user already exists and its just a new token, or user and token both are new
try:
user = User.objects.get(facebook_id = profile.get('id'))
if not user.authorized:
if new_oauth_token:
user.last_seen_at = now()
user.authorized = True
else:
request.facebook = False
return
except User.DoesNotExist:
# Create a new user to go with token
user = User.objects.create(
facebook_id = profile.get('id'),
oauth_token = oauth_token
)
user.synchronize(profile)
# Delete old access token if there is any and only if the new one is different
old_oauth_token = None
if user.oauth_token != oauth_token:
old_oauth_token = user.oauth_token
user.oauth_token = oauth_token
user.save()
if old_oauth_token:
old_oauth_token.delete()
if not user.oauth_token.extended:
# Attempt to extend the OAuth token, but ignore exceptions raised by
# bug #102727766518358 in the Facebook Platform.
#
# http://developers.facebook.com/bugs/102727766518358/
try:
user.oauth_token.extend()
except:
pass
request.facebook.user = user
request.facebook.oauth_token = oauth_token
|
def process_request(self, request)
|
Process the web-based auth request.
| 3.354633 | 3.37613 | 0.993633 |
if hasattr(request, "facebook") and request.facebook and request.facebook.oauth_token:
if "code" in request.REQUEST:
path = get_full_path(request, remove_querystrings=['code', 'web_canvas'])
response = HttpResponseRedirect(path)
response.set_cookie('oauth_token', request.facebook.oauth_token.token)
else:
response.delete_cookie('oauth_token')
response['P3P'] = 'CP="IDC CURa ADMa OUR IND PHY ONL COM STA"'
return response
|
def process_response(self, request, response)
|
Set compact P3P policies and save auth token to cookie.
P3P is a WC3 standard (see http://www.w3.org/TR/P3P/), and although largely ignored by most
browsers it is considered by IE before accepting third-party cookies (ie. cookies set by
documents in iframes). If they are not set correctly, IE will not set these cookies.
| 5.24725 | 5.503996 | 0.953353 |
if not LOGBOOK_INSTALLED:
return
# validate log level
logbook.get_level_name(log_level)
if log_level == logger.level:
return
if log_level == logbook.NOTSET:
set_logger(is_enable=False)
else:
set_logger(is_enable=True)
logger.level = log_level
tabledata.set_log_level(log_level)
sqliteschema.set_log_level(log_level)
try:
import pytablereader
pytablereader.set_log_level(log_level)
except ImportError:
pass
|
def set_log_level(log_level)
|
Set logging level of this module. Using
`logbook <https://logbook.readthedocs.io/en/stable/>`__ module for logging.
:param int log_level:
One of the log level of
`logbook <https://logbook.readthedocs.io/en/stable/api/base.html>`__.
Disabled logging if ``log_level`` is ``logbook.NOTSET``.
:raises LookupError: If ``log_level`` is an invalid value.
| 3.826138 | 3.419168 | 1.119026 |
try:
# from a namedtuple to a dict
values = values._asdict()
except AttributeError:
pass
try:
# from a dictionary to a list
return [cls.__to_sqlite_element(values.get(attr_name)) for attr_name in attr_names]
except AttributeError:
pass
if isinstance(values, (tuple, list)):
return [cls.__to_sqlite_element(value) for value in values]
raise ValueError("cannot convert from {} to list".format(type(values)))
|
def to_record(cls, attr_names, values)
|
Convert values to a record to be inserted into a database.
:param list attr_names:
List of attributes for the converting record.
:param values: Values to be converted.
:type values: |dict|/|namedtuple|/|list|/|tuple|
:raises ValueError: If the ``values`` is invalid.
| 3.341564 | 3.18643 | 1.048686 |
return [cls.to_record(attr_names, record) for record in value_matrix]
|
def to_records(cls, attr_names, value_matrix)
|
Convert a value matrix to records to be inserted into a database.
:param list attr_names:
List of attributes for the converting records.
:param value_matrix: Values to be converted.
:type value_matrix: list of |dict|/|namedtuple|/|list|/|tuple|
.. seealso:: :py:meth:`.to_record`
| 4.416068 | 5.090665 | 0.867484 |
for disabled_path in DISABLED_PATHS:
match = re.search(disabled_path, path[1:])
if match:
return True
return False
|
def is_disabled_path(path)
|
Determine whether or not the path matches one or more paths
in the DISABLED_PATHS setting.
:param path: A string describing the path to be matched.
| 3.285131 | 3.857242 | 0.851679 |
for enabled_path in ENABLED_PATHS:
match = re.search(enabled_path, path[1:])
if match:
return True
return False
|
def is_enabled_path(path)
|
Determine whether or not the path matches one or more paths
in the ENABLED_PATHS setting.
:param path: A string describing the path to be matched.
| 3.258981 | 3.914973 | 0.83244 |
def decorator(function):
@wraps(function)
def wrapper(self):
key = 'fandjango.%(model)s.%(property)s_%(pk)s' % {
'model': self.__class__.__name__,
'pk': self.pk,
'property': function.__name__
}
cached_value = cache.get(key)
delta = timedelta(**kwargs)
if cached_value is None:
value = function(self)
cache.set(key, value, delta.days * 86400 + delta.seconds)
else:
value = cached_value
return value
return wrapper
return decorator
|
def cached_property(**kwargs)
|
Cache the return value of a property.
| 2.500104 | 2.539359 | 0.984541 |
authorization_denied_module_name = AUTHORIZATION_DENIED_VIEW.rsplit('.', 1)[0]
authorization_denied_view_name = AUTHORIZATION_DENIED_VIEW.split('.')[-1]
authorization_denied_module = import_module(authorization_denied_module_name)
authorization_denied_view = getattr(authorization_denied_module, authorization_denied_view_name)
return authorization_denied_view(request)
|
def authorization_denied_view(request)
|
Proxy for the view referenced in ``FANDJANGO_AUTHORIZATION_DENIED_VIEW``.
| 1.876651 | 1.673484 | 1.121404 |
path = request.get_full_path()
if canvas:
if FACEBOOK_APPLICATION_CANVAS_URL:
path = path.replace(urlparse(FACEBOOK_APPLICATION_CANVAS_URL).path, '')
redirect_uri = 'https://%(domain)s/%(namespace)s%(path)s' % {
'domain': FACEBOOK_APPLICATION_DOMAIN,
'namespace': FACEBOOK_APPLICATION_NAMESPACE,
'path': path
}
else:
if FANDJANGO_SITE_URL:
site_url = FANDJANGO_SITE_URL
path = path.replace(urlparse(site_url).path, '')
else:
protocol = "https" if request.is_secure() else "http"
site_url = "%s://%s" % (protocol, request.get_host())
redirect_uri = site_url + path
return redirect_uri
|
def get_post_authorization_redirect_url(request, canvas=True)
|
Determine the URL users should be redirected to upon authorization the application.
If request is non-canvas use user defined site url if set, else the site hostname.
| 2.554486 | 2.477765 | 1.030964 |
path = request.get_full_path()
for qs in remove_querystrings:
path = re.sub(r'&?' + qs + '=?(.+)?&?', '', path)
return path
|
def get_full_path(request, remove_querystrings=[])
|
Gets the current path, removing specified querstrings
| 3.840682 | 3.281652 | 1.17035 |
query = {
'client_id': FACEBOOK_APPLICATION_ID,
'redirect_uri': redirect_uri
}
if permissions:
query['scope'] = ', '.join(permissions)
return render(
request = request,
template_name = 'fandjango/authorize_application.html',
dictionary = {
'url': 'https://www.facebook.com/dialog/oauth?%s' % urlencode(query)
},
status = 401
)
|
def authorize_application(
request,
redirect_uri = 'https://%s/%s' % (FACEBOOK_APPLICATION_DOMAIN, FACEBOOK_APPLICATION_NAMESPACE),
permissions = FACEBOOK_APPLICATION_INITIAL_PERMISSIONS
)
|
Redirect the user to authorize the application.
Redirection is done by rendering a JavaScript snippet that redirects the parent
window to the authorization URI, since Facebook will not allow this inside an iframe.
| 2.278601 | 2.366724 | 0.962766 |
if request.facebook:
user = User.objects.get(
facebook_id = request.facebook.signed_request.user.id
)
user.authorized = False
user.save()
return HttpResponse()
else:
return HttpResponse(status=400)
|
def deauthorize_application(request)
|
When a user deauthorizes an application, Facebook sends a HTTP POST request to the application's
"deauthorization callback" URL. This view picks up on requests of this sort and marks the corresponding
users as unauthorized.
| 3.73701 | 3.455952 | 1.081326 |
validate_table_name(table)
table = Table(table)
if typepy.is_empty_sequence(insert_tuple):
raise ValueError("empty insert list/tuple")
return "INSERT INTO {:s} VALUES ({:s})".format(
table, ",".join(["?" for _i in insert_tuple])
)
|
def make_insert(cls, table, insert_tuple)
|
[Deprecated] Make INSERT query.
:param str table: Table name of executing the query.
:param list/tuple insert_tuple: Insertion data.
:return: Query of SQLite.
:rtype: str
:raises ValueError: If ``insert_tuple`` is empty |list|/|tuple|.
:raises simplesqlite.NameValidationError:
|raises_validate_table_name|
| 4.907981 | 4.279657 | 1.146816 |
validate_table_name(table)
if typepy.is_null_string(set_query):
raise ValueError("SET query is null")
query_list = ["UPDATE {:s}".format(Table(table)), "SET {:s}".format(set_query)]
if where and isinstance(where, (six.text_type, Where, And, Or)):
query_list.append("WHERE {:s}".format(where))
return " ".join(query_list)
|
def make_update(cls, table, set_query, where=None)
|
Make UPDATE query.
:param str table: Table name of executing the query.
:param str set_query: SET part of the UPDATE query.
:param str where:
Add a WHERE clause to execute query,
if the value is not |None|.
:return: Query of SQLite.
:rtype: str
:raises ValueError: If ``set_query`` is empty string.
:raises simplesqlite.NameValidationError:
|raises_validate_table_name|
| 3.999753 | 4.211607 | 0.949697 |
return "{:s} IN ({:s})".format(
Attr(key), ", ".join([Value(value).to_query() for value in value_list])
)
|
def make_where_in(cls, key, value_list)
|
Make part of WHERE IN query.
:param str key: Attribute name of the key.
:param str value_list:
List of values that the right hand side associated with the key.
:return: Part of WHERE query of SQLite.
:rtype: str
:Examples:
>>> from simplesqlite.sqlquery import SqlQuery
>>> SqlQuery.make_where_in("key", ["hoge", "foo", "bar"])
"key IN ('hoge', 'foo', 'bar')"
| 6.150888 | 5.857911 | 1.050014 |
self.close()
logger.debug("connect to a SQLite database: path='{}', mode={}".format(database_path, mode))
if mode == "r":
self.__verify_db_file_existence(database_path)
elif mode in ["w", "a"]:
self.__validate_db_path(database_path)
else:
raise ValueError("unknown connection mode: " + mode)
if database_path == MEMORY_DB_NAME:
self.__database_path = database_path
else:
self.__database_path = os.path.realpath(database_path)
try:
self.__connection = sqlite3.connect(database_path)
except sqlite3.OperationalError as e:
raise OperationalError(e)
self.__mode = mode
try:
# validate connection after connect
self.fetch_table_names()
except sqlite3.DatabaseError as e:
raise DatabaseError(e)
if mode != "w":
return
for table in self.fetch_table_names():
self.drop_table(table)
|
def connect(self, database_path, mode="a")
|
Connect to a SQLite database.
:param str database_path:
Path to the SQLite database file to be connected.
:param str mode:
``"r"``: Open for read only.
``"w"``: Open for read/write.
Delete existing tables when connecting.
``"a"``: Open for read/write. Append to the existing tables.
:raises ValueError:
If ``database_path`` is invalid or |attr_mode| is invalid.
:raises simplesqlite.DatabaseError:
If the file is encrypted or is not a database.
:raises simplesqlite.OperationalError:
If unable to open the database file.
| 2.869745 | 2.817811 | 1.018431 |
import time
self.check_connection()
if typepy.is_null_string(query):
return None
if self.debug_query or self.global_debug_query:
logger.debug(query)
if self.__is_profile:
exec_start_time = time.time()
try:
result = self.connection.execute(six.text_type(query))
except (sqlite3.OperationalError, sqlite3.IntegrityError) as e:
if caller is None:
caller = logging.getLogger().findCaller()
file_path, line_no, func_name = caller[:3]
raise OperationalError(
message="\n".join(
[
"failed to execute query at {:s}({:d}) {:s}".format(
file_path, line_no, func_name
),
" - query: {}".format(MultiByteStrDecoder(query).unicode_str),
" - msg: {}".format(e),
" - db: {}".format(self.database_path),
]
)
)
if self.__is_profile:
self.__dict_query_count[query] = self.__dict_query_count.get(query, 0) + 1
elapse_time = time.time() - exec_start_time
self.__dict_query_totalexectime[query] = (
self.__dict_query_totalexectime.get(query, 0) + elapse_time
)
return result
|
def execute_query(self, query, caller=None)
|
Send arbitrary SQLite query to the database.
:param str query: Query to executed.
:param tuple caller:
Caller information.
Expects the return value of :py:meth:`logging.Logger.findCaller`.
:return: The result of the query execution.
:rtype: sqlite3.Cursor
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
.. warning::
This method can execute an arbitrary query.
i.e. No access permissions check by |attr_mode|.
| 2.839309 | 2.732491 | 1.039092 |
self.verify_table_existence(table_name)
return self.execute_query(
six.text_type(Select(select, table_name, where, extra)),
logging.getLogger().findCaller(),
)
|
def select(self, select, table_name, where=None, extra=None)
|
Send a SELECT query to the database.
:param str select: Attribute for the ``SELECT`` query.
:param str table_name: |arg_select_table_name|
:param where: |arg_select_where|
:type where: |arg_where_type|
:param str extra: |arg_select_extra|
:return: Result of the query execution.
:rtype: sqlite3.Cursor
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
| 6.124022 | 5.462895 | 1.121021 |
import pandas
if columns is None:
columns = self.fetch_attr_names(table_name)
result = self.select(
select=AttrList(columns), table_name=table_name, where=where, extra=extra
)
if result is None:
return pandas.DataFrame()
return pandas.DataFrame(result.fetchall(), columns=columns)
|
def select_as_dataframe(self, table_name, columns=None, where=None, extra=None)
|
Get data in the database and return fetched data as a
:py:class:`pandas.Dataframe` instance.
:param str table_name: |arg_select_table_name|
:param list columns: |arg_select_as_xx_columns|
:param str where: |arg_select_where|
:param str extra: |arg_select_extra|
:return: Table data as a :py:class:`pandas.Dataframe` instance.
:rtype: pandas.DataFrame
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
:ref:`example-select-as-dataframe`
.. note::
``pandas`` package required to execute this method.
| 3.343409 | 3.562196 | 0.938581 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.