content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.Student)
admin.site.register(models.Subject)
admin.site.register(models.Assignment)
admin.site.register(models.Submission)
| python |
# -*- coding: utf-8 -*-
from django.core.management import call_command
from django.db import migrations
def create_cache_table(apps, schema_editor):
"""
创建 cache table
"""
call_command("createcachetable", "account_cache")
class Migration(migrations.Migration):
dependencies = [
("account", "0003_verifyinfo"),
]
operations = [migrations.RunPython(create_cache_table)]
| python |
from django.contrib.auth import get_user_model
from questionnaire.models import Questionnaire
from functional_tests.base import FunctionalTest
from functional_tests.pages.qcat import HomePage
from functional_tests.pages.questionnaire import QuestionnaireStepPage
from functional_tests.pages.technologies import TechnologiesNewPage, \
Technologies2018NewPage, TechnologiesDetailPage, TechnologiesEditPage, \
TechnologiesStepPage
from functional_tests.pages.wocat import AddDataPage
class QuestionnaireTest(FunctionalTest):
fixtures = [
'global_key_values',
'technologies',
]
def test_questionnaire_is_available(self):
# User logs in and goes to the home page.
home_page = HomePage(self)
home_page.open(login=True)
# User clicks a link to add data in the top menu.
home_page.click_add_slm_data()
# User clicks a link to add a new Technology.
add_page = AddDataPage(self)
add_page.click_add_technology()
# User sees an empty edit page and the categories of the Technology.
edit_page = Technologies2018NewPage(self)
edit_page.close_updated_edition_warning()
progress_indicators = edit_page.get_progress_indicators()
categories = edit_page.CATEGORIES
assert len(progress_indicators) == len(categories)
# All the categories are listed.
for __, category in categories:
edit_page.get_category_by_name(category)
# User edits the first category.
edit_page.click_edit_category(categories[0][0])
# The focal point is available
step_page = QuestionnaireStepPage(self)
step_page.is_focal_point_available()
# User saves the first category.
step_page.submit_step()
# All the categories are still there.
progress_indicators = edit_page.get_progress_indicators()
categories = edit_page.CATEGORIES
assert len(progress_indicators) == len(categories)
for __, category in categories:
edit_page.get_category_by_name(category)
def test_translation(self):
# User logs in and goes to the Edit page.
page = Technologies2018NewPage(self)
page.open(login=True)
page.close_updated_edition_warning()
# User sees the category names in English.
for __, category in page.CATEGORIES:
page.get_category_by_name(category)
# User changes the language.
page.change_language('es')
page.close_updated_edition_warning()
# User sees the category names in Spanish.
for __, category in page.CATEGORIES_TRANSLATED:
page.get_category_by_name(category)
class QuestionnaireFixturesTest(FunctionalTest):
fixtures = [
'global_key_values',
'technologies',
'technologies_questionnaires',
]
def test_show_edition_update_warning(self):
# User logs in and goes to the page to create a new Technology
page = Technologies2018NewPage(self)
page.open(login=True)
# There is a warning about updated editions.
assert page.has_updated_edition_warning()
page.close_updated_edition_warning()
# After creating a draft version, the warning is not there anymore.
page.click_edit_category('tech__1')
step_page = QuestionnaireStepPage(self)
step_page.submit_step()
assert not page.has_updated_edition_warning()
def test_redirect_edit_public_version(self):
# User is the compiler of technology "tech_1"
user = get_user_model().objects.get(pk=101)
identifier = 'tech_1'
title = 'WOCAT Technology 1'
# User logs in and goes to the details of a questionnaire
detail_page = TechnologiesDetailPage(self)
detail_page.route_kwargs = {'identifier': identifier}
detail_page.open(login=True, user=user)
assert detail_page.has_text(title)
# User goes to the edit page of the questionnaire and sees he has been
# redirected to the detail page.
edit_page = TechnologiesEditPage(self)
edit_page.route_kwargs = {'identifier': identifier}
edit_page.open()
assert self.browser.current_url == detail_page.get_url()
# User tries to open the URL of a step of this public questionnaire and
# sees he has been redirected as well.
step_page = TechnologiesStepPage(self)
step_page.route_kwargs = {
'identifier': identifier,
'step': 'tech__1'
}
step_page.open()
assert self.browser.current_url == detail_page.get_url()
# User starts a new questionnaire
new_page = Technologies2018NewPage(self)
new_page.open()
new_page.close_updated_edition_warning()
new_page.click_edit_category('tech__1')
step_page = TechnologiesStepPage(self)
step_page.submit_step()
# For draft versions, the edit URLs can be accessed
draft_identifier = Questionnaire.objects.get(status=1)
edit_page.route_kwargs = {'identifier': draft_identifier}
edit_page.open()
assert self.browser.current_url == edit_page.get_url()
step_page.route_kwargs = {
'identifier': draft_identifier,
'step': 'tech__1'
}
step_page.open()
assert self.browser.current_url == step_page.get_url()
| python |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from models.user import User
from database import session
def create_user(login_session):
"""Create a new user from login session and return his id."""
newUser = User(name=login_session["username"],
email=login_session["email"],
picture=login_session["picture"])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session["email"]).one_or_none()
return user.id
def get_user_info(user_id):
"""Return user object from his id."""
user = session.query(User).filter_by(id=user_id).one_or_none()
return user
def get_user_id(email):
"""Return user id from his email."""
try:
user = session.query(User).filter_by(email=email).one_or_none()
return user.id
except:
return None
| python |
from tkinter import *
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from PIL import ImageTk, Image
from PyDictionary import PyDictionary
from googletrans import Translator
root = tk.Tk()
root.title("Yanis's Dictionary")
root.geometry('600x300')
root['bg'] = 'white'
frame = Frame(root,width=200,height=300,borderwidth=1,relief=RIDGE)
frame.grid(sticky="W")
def get_meaning():
output.delete(1.0,'end')
dictionary=PyDictionary()
get_word = entry.get()
langauages = langauage.get()
if get_word == "":
messagebox.showerror('Dictionary','please write the word')
elif langauages == 'English-to-English':
d = dictionary.meaning(get_word)
output.insert('end',d['Noun'])
elif langauages == 'English-to-Arabic':
translator = Translator()
t = translator.translate(get_word, dest='arb')
output.insert('end',t.text)
def quit():
root.destroy()
img = ImageTk.PhotoImage(Image.open('dict.png'))
pic = Label(root, image = img)
pic.place(x=40,y=70)
word = Label(root,text="Enter Word",bg="white",font=('verdana',10,'bold'))
word.place(x=250,y=23)
a = tk.StringVar()
langauage = ttk.Combobox(root, width = 20, textvariable = a, state='readonly',font=('verdana',10,'bold'),)
langauage['values'] = (
'English-to-English',
'English-to-Arabic',
)
langauage.place(x=380,y=10)
langauage.current(0)
entry = Entry(root,width=50,borderwidth=2,relief=RIDGE)
entry.place(x=250,y=50)
search = Button(root,text="Search",font=('verdana',10,'bold'),cursor="hand2",relief=RIDGE,command=get_meaning)
search.place(x=430,y=80)
quit = Button(root,text="Quit",font=('verdana',10,'bold'),cursor="hand2",relief=RIDGE,command=quit)
quit.place(x=510,y=80)
meaning = Label(root,text="Meaning",bg="white",font=('verdana',15,'bold'))
meaning.place(x=230,y=120)
output = Text(root,height=8,width=40,borderwidth=2,relief=RIDGE)
output.place(x=230,y=160)
root.mainloop() | python |
import socket
import threading
HOST = '127.0.0.1'
PORT = 9999
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
print 'Connect Success!....'
def sendingMsg():
while True:
data = raw_input('')
sock.send(data)
sock.close()
def gettingMsg():
while True:
data = sock.recv(1024)
print 'From Server :', repr(data)
sock.close()
threading._start_new_thread(sendingMsg, ())
threading._start_new_thread(gettingMsg, ())
while True:
pass | python |
# Generated by Django 3.1.1 on 2020-10-30 15:53
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grant_applications', '0009_auto_20201030_1209'),
]
operations = [
migrations.AddField(
model_name='grantapplication',
name='export_experience_description',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='export_regions',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(choices=[('africa', 'Africa'), ('asia', 'Asia'), ('australasia', 'Australasia'), ('europe', 'Europe'), ('middle east', 'Middle East'), ('north america', 'North America'), ('south america', 'South America')], max_length=50), null=True, size=None),
),
migrations.AddField(
model_name='grantapplication',
name='export_strategy',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='has_exported_in_last_12_months',
field=models.BooleanField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='in_contact_with_dit_trade_advisor',
field=models.BooleanField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='markets_intending_on_exporting_to',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(choices=[('existing', 'existing markets'), ('new', 'new markets not exported to in the last 12 months')], max_length=10), null=True, size=None),
),
]
| python |
# select CALOL1_KEY from CMS_TRG_L1_CONF.L1_TRG_CONF_KEYS where ID='collisions2016_TSC/v206' ;
import re
import os, sys, shutil
import subprocess
import six
"""
A simple helper script that provided with no arguments dumps a list of
top-level keys, and provided with any key from this list as an argument,
dumps a list of sub-keys and saves corresponding configuration to local
files.
"""
# connection string
sqlplusCmd = ['env',
'sqlplus',
'-S',
'cms_trg_r/X3lmdvu4@cms_omds_adg'
]
if hash( sqlplusCmd[-1] ) != 1687624727082866629:
print 'Do not forget to plug password to this script'
print 'Exiting.'
exit(0)
myre = re.compile(r'(ID)|(-{80})')
# if no arguments are given, query the top level keys only and exit
if len(sys.argv) == 1:
sqlplus = subprocess.Popen(sqlplusCmd, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
print 'No args specified, querying and printing only top-level keys:'
for line in re.split('\n',sqlplus.communicate('select unique ID from CMS_TRG_L1_CONF.CALOL2_KEYS;')[0]):
if myre.search(line) == None :
print line
print 'Pick any of these keys as an argument next time you run this script'
exit(0)
# if an argument is given query the whole content of the key
key = sys.argv[1]
sqlplus = subprocess.Popen(sqlplusCmd,
shell=False,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE
)
queryKey = "select CALOL1_KEY from CMS_TRG_L1_CONF.L1_TRG_CONF_KEYS where ID='{0}'".format(key)
for line in re.split('\n',sqlplus.communicate(queryKey+';')[0]):
print line
if re.search('/v',line) :
key=line
print key
queryKeys = """
select
HW, ALGO, INFRA
from
CMS_TRG_L1_CONF.CALOL1_KEYS
where
ID = '{0}'
""".format(key)
# write results for specific configs to the following files
batch = {
'HW' : 'hw.xml',
'ALGO' : 'algo.xml',
'INFRA' : 'infra.xml'
}
# do the main job here
for config,fileName in six.iteritems(batch):
sqlplus = subprocess.Popen(sqlplusCmd, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
with open(fileName,'w') as f:
query = """
select
CONF.CONF
from
CMS_TRG_L1_CONF.CALOL1_CLOBS CONF, ({0}) KEY
where
CONF.ID = KEY.{1}
""".format(queryKeys, config)
for line in re.split('\n',sqlplus.communicate('\n'.join(['set linesize 200', 'set longchunksize 2000000 long 2000000 pages 0',query+';']))[0]):
f.write('\n')
f.write(line)
f.close()
sqlplus = subprocess.Popen(sqlplusCmd, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
print 'Following keys were found:'
for line in re.split('\n',sqlplus.communicate(queryKeys+';')[0]):
print line
print 'Results are saved in ' + ' '.join(batch.values()) + ' files'
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-11-23 10:18
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wdapp', '0011_auto_20181123_0955'),
]
operations = [
migrations.RemoveField(
model_name='business',
name='slug',
),
migrations.RemoveField(
model_name='company',
name='slug',
),
migrations.RemoveField(
model_name='trip',
name='slug',
),
]
| python |
# -*- coding: utf-8 -*-
"""
配置日志信息,并添加 request_id
:create: 2018/9/23
:copyright: smileboywtu
"""
import datetime
import logging
import sys
import uuid
from logging.handlers import TimedRotatingFileHandler
from tornado import gen
from tornado.log import access_log
from tornado.stack_context import run_with_stack_context, StackContext
class RequestIDContext:
class Data:
def __init__(self, request_id=0):
self.request_id = request_id
def __eq__(self, other):
return self.request_id == other.request_id
_data = Data()
def __init__(self, request_id):
self.current_data = RequestIDContext.Data(request_id=request_id)
self.old_data = None
def __enter__(self):
if RequestIDContext._data == self.current_data:
return
self.old_context_data = RequestIDContext.Data(
request_id=RequestIDContext._data.request_id,
)
RequestIDContext._data = self.current_data
def __exit__(self, exc_type, exc_value, traceback):
if self.old_data is not None:
RequestIDContext._data = self.old_data
def with_request_id(func):
@gen.coroutine
def _wrapper(*args, **kwargs):
request_id = uuid.uuid4().hex
yield run_with_stack_context(StackContext(lambda: RequestIDContext(request_id)), lambda: func(*args, **kwargs))
return _wrapper
def log_function(handler):
"""
log function to log access request information
regex parse: (?<remote_ip>[\d.]+) [-\w]+ [-\w]+ \[(?<request_date>[\d\/:\s\+]+)\] \"
(?<http_method>[A-Z]+) (?<http_uri>[\/a-zA-Z\.]+) (?<http_version>[A-Z\/\d\.]+)\"
(?<status_code>[\d]+) (?<length>[\d]+)
(?<request_time>[\d\.]+) (?<request_id>[\d\w]+) [\w\-]+ \[(?<request_body>.+)\] -
:param handler:
:return:
"""
_log_meta = dict(
app_id="app-up",
user="-",
username="-",
response_code="-",
http_uri=handler.request.uri,
http_status=handler.get_status(),
http_method=handler.request.method,
http_version=handler.request.version,
remote_ip=handler.request.remote_ip,
request_time=1000.0 * handler.request.request_time(),
request_id=RequestIDContext._data.request_id,
response_length=handler.request.headers.get("Content-Length", 0),
request_args=handler.request.arguments,
request_date=datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=8))).strftime("%x:%H:%M:%S %z")
)
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
log_method("%(remote_ip)s %(user)s %(username)s [%(request_date)s] \"%"
"(http_method)s %(http_uri)s %(http_version)s\" %(http_status)s "
"%(response_length)s %(request_time).2f %(request_id)s %(app_id)s [%(request_args)s] -", _log_meta)
class RequestIDFilter(logging.Filter):
def filter(self, record):
record.request_id = RequestIDContext._data.request_id
return True
def logger_config(name, path, level, log_format, rotate_interval, backup_count,
debug=False):
"""
配置 log handler 对象
:param name: 日志名称
:param path: 日志文件路径
:param level: 日志等级
:param log_format: 日志格式
:param max_bytes: 日志文件最大大小
:param backup_count: 日志文件滚动个数
:return:
"""
logger = logging.getLogger(name)
logger.addFilter(RequestIDFilter())
handler = TimedRotatingFileHandler(
path, when='D', interval=rotate_interval, backupCount=backup_count,
encoding="utf-8") \
if not debug else \
logging.StreamHandler(sys.stdout)
# handler = RotatingFileHandler(path, "a", maxBytes=max_bytes, backupCount=backup_count, encoding="utf-8") \
# if not debug else \
# logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
log_level = getattr(logging, level)
logger.setLevel(log_level)
logger.addHandler(handler)
def configure_tornado_logger(path, interval, backup_count,
level="INFO",
name="tornado.application",
debug=False):
"""
## read doc:
https://docs.python.org/3/library/logging.html#logrecord-attributes
tornado web application log_format:
%(asctime)s %(levelname)s %(request_id)-%(process)d %(filename)s:%(lineno)d -- %(message)s
:param path: log file path
:param level: log level
:param name: log name
:param debug: if debug, show logs on stdout
:return:
"""
if name == "tornado.access":
log_format = "[%(name)s] %(message)s"
elif name == "plugins":
log_format = "[%(name)s] %(asctime)s %(levelname)s -- %(message)s"
else:
log_format = "[%(name)s] %(asctime)s %(levelname)s %(request_id)s %(filename)s:%(lineno)d -- %(message)s"
return logger_config(
name=name,
path=path,
level=level,
log_format=log_format,
# max_bytes=100 * 1024 * 1024,
rotate_interval=interval,
backup_count=backup_count,
debug=debug
)
| python |
# coding: utf-8
# In[87]:
#基于分词的文本相似度的计算,
#利用jieba分词进行中文分析
import jieba
import jieba.posseg as pseg
from jieba import analyse
import numpy as np
import os
'''
文本相似度的计算,基于几种常见的算法的实现
'''
class TextSimilarity(object):
def __init__(self,file_a,file_b):
'''
初始化类行
'''
str_a = ''
str_b = ''
if not os.path.isfile(file_a):
print(file_a,"is not file")
return
elif not os.path.isfile(file_b):
print(file_b,"is not file")
return
else:
with open(file_a,'r') as f:
for line in f.readlines():
str_a += line.strip()
f.close()
with open(file_b,'r') as f:
for line in f.readlines():
str_b += line.strip()
f.close()
self.str_a = str_a
self.str_b = str_b
#get LCS(longest common subsquence),DP
def lcs(self,str_a, str_b):
lensum = float(len(str_a) + len(str_b))
#得到一个二维的数组,类似用dp[lena+1][lenb+1],并且初始化为0
lengths = [[0 for j in range(len(str_b)+1)] for i in range(len(str_a)+1)]
#enumerate(a)函数: 得到下标i和a[i]
for i, x in enumerate(str_a):
for j, y in enumerate(str_b):
if x == y:
lengths[i+1][j+1] = lengths[i][j] + 1
else:
lengths[i+1][j+1] = max(lengths[i+1][j], lengths[i][j+1])
#到这里已经得到最长的子序列的长度,下面从这个矩阵中就是得到最长子序列
result = ""
x, y = len(str_a), len(str_b)
while x != 0 and y != 0:
#证明最后一个字符肯定没有用到
if lengths[x][y] == lengths[x-1][y]:
x -= 1
elif lengths[x][y] == lengths[x][y-1]:
y -= 1
else: #用到的从后向前的当前一个字符
assert str_a[x-1] == str_b[y-1] #后面语句为真,类似于if(a[x-1]==b[y-1]),执行后条件下的语句
result = str_a[x-1] + result #注意这一句,这是一个从后向前的过程
x -= 1
y -= 1
#和上面的代码类似
#if str_a[x-1] == str_b[y-1]:
# result = str_a[x-1] + result #注意这一句,这是一个从后向前的过程
# x -= 1
# y -= 1
longestdist = lengths[len(str_a)][len(str_b)]
ratio = longestdist/min(len(str_a),len(str_b))
#return {'longestdistance':longestdist, 'ratio':ratio, 'result':result}
return ratio
def minimumEditDistance(self,str_a,str_b):
'''
最小编辑距离,只有三种操作方式 替换、插入、删除
'''
lensum = float(len(str_a) + len(str_b))
if len(str_a) > len(str_b): #得到最短长度的字符串
str_a,str_b = str_b,str_a
distances = range(len(str_a) + 1) #设置默认值
for index2,char2 in enumerate(str_b): #str_b > str_a
newDistances = [index2+1] #设置新的距离,用来标记
for index1,char1 in enumerate(str_a):
if char1 == char2: #如果相等,证明在下标index1出不用进行操作变换,最小距离跟前一个保持不变,
newDistances.append(distances[index1])
else: #得到最小的变化数,
newDistances.append(1 + min((distances[index1], #删除
distances[index1+1], #插入
newDistances[-1]))) #变换
distances = newDistances #更新最小编辑距离
mindist = distances[-1]
ratio = (lensum - mindist)/lensum
#return {'distance':mindist, 'ratio':ratio}
return ratio
def levenshteinDistance(self,str1, str2):
'''
编辑距离——莱文斯坦距离,计算文本的相似度
'''
m = len(str1)
n = len(str2)
lensum = float(m + n)
d = []
for i in range(m+1):
d.append([i])
del d[0][0]
for j in range(n+1):
d[0].append(j)
for j in range(1,n+1):
for i in range(1,m+1):
if str1[i-1] == str2[j-1]:
d[i].insert(j,d[i-1][j-1])
else:
minimum = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+2)
d[i].insert(j, minimum)
ldist = d[-1][-1]
ratio = (lensum - ldist)/lensum
#return {'distance':ldist, 'ratio':ratio}
return ratio
@classmethod
def splitWords(self,str_a):
'''
接受一个字符串作为参数,返回分词后的结果字符串(空格隔开)和集合类型
'''
wordsa=pseg.cut(str_a)
cuta = ""
seta = set()
for key in wordsa:
#print(key.word,key.flag)
cuta += key.word + " "
seta.add(key.word)
return [cuta, seta]
def JaccardSim(self,str_a,str_b):
'''
Jaccard相似性系数
计算sa和sb的相似度 len(sa & sb)/ len(sa | sb)
'''
seta = self.splitWords(str_a)[1]
setb = self.splitWords(str_b)[1]
sa_sb = 1.0 * len(seta & setb) / len(seta | setb)
return sa_sb
def countIDF(self,text,topK):
'''
text:字符串,topK根据TF-IDF得到前topk个关键词的词频,用于计算相似度
return 词频vector
'''
tfidf = analyse.extract_tags
cipin = {} #统计分词后的词频
fenci = jieba.cut(text)
#记录每个词频的频率
for word in fenci:
if word not in cipin.keys():
cipin[word] = 0
cipin[word] += 1
# 基于tfidf算法抽取前10个关键词,包含每个词项的权重
keywords = tfidf(text,topK,withWeight=True)
ans = []
# keywords.count(keyword)得到keyword的词频
# help(tfidf)
# 输出抽取出的关键词
for keyword in keywords:
#print(keyword ," ",cipin[keyword[0]])
ans.append(cipin[keyword[0]]) #得到前topk频繁词项的词频
return ans
@staticmethod
def cos_sim(a,b):
a = np.array(a)
b = np.array(b)
#return {"文本的余弦相似度:":np.sum(a*b) / (np.sqrt(np.sum(a ** 2)) * np.sqrt(np.sum(b ** 2)))}
return np.sum(a*b) / (np.sqrt(np.sum(a ** 2)) * np.sqrt(np.sum(b ** 2)))
@staticmethod
def eucl_sim(a,b):
a = np.array(a)
b = np.array(b)
#print(a,b)
#print(np.sqrt((np.sum(a-b)**2)))
#return {"文本的欧几里德相似度:":1/(1+np.sqrt((np.sum(a-b)**2)))}
return 1/(1+np.sqrt((np.sum(a-b)**2)))
@staticmethod
def pers_sim(a,b):
a = np.array(a)
b = np.array(b)
a = a - np.average(a)
b = b - np.average(b)
#print(a,b)
#return {"文本的皮尔森相似度:":np.sum(a*b) / (np.sqrt(np.sum(a ** 2)) * np.sqrt(np.sum(b ** 2)))}
return np.sum(a*b) / (np.sqrt(np.sum(a ** 2)) * np.sqrt(np.sum(b ** 2)))
def splitWordSimlaryty(self,str_a,str_b,topK = 20,sim =cos_sim):
'''
基于分词求相似度,默认使用cos_sim 余弦相似度,默认使用前20个最频繁词项进行计算
'''
#得到前topK个最频繁词项的字频向量
vec_a = self.countIDF(str_a,topK)
vec_b = self.countIDF(str_b,topK)
return sim(vec_a,vec_b)
@staticmethod
def string_hash(self,source): #局部哈希算法的实现
if source == "":
return 0
else:
#ord()函数 return 字符的Unicode数值
x = ord(source[0]) << 7
m = 1000003 #设置一个大的素数
mask = 2 ** 128 - 1 #key值
for c in source: #对每一个字符基于前面计算hash
x = ((x * m) ^ ord(c)) & mask
x ^= len(source) #
if x == -1: #证明超过精度
x = -2
x = bin(x).replace('0b', '').zfill(64)[-64:]
#print(source,x)
return str(x)
def simhash(self,str_a,str_b):
'''
使用simhash计算相似度
'''
pass
| python |
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output, State
popover = html.Div(
[
html.P(
["Click on the word ", html.Span("popover", id="popover-target")]
),
dbc.Popover(
[
dbc.PopoverHeader("Popover header"),
dbc.PopoverBody("Popover body"),
],
id="popover",
is_open=False,
target="popover-target",
),
]
)
@app.callback(
Output("popover", "is_open"),
[Input("popover-target", "n_clicks")],
[State("popover", "is_open")],
)
def toggle_popover(n, is_open):
if n:
return not is_open
return is_open
| python |
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file defines an end-to-end test that validates core functionality
# of the bundled CLI tool. This requires a GCP project in which the
# test will create, connect to, and delete Datalab instances.
import argparse
import os
import random
import socket
import subprocess
import sys
import tempfile
import time
import unittest
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import uuid
retry_count = 3
python_executable = sys.executable
connection_msg = (
'The connection to Datalab is now open and will '
'remain until this command is killed.')
readme_url_template = (
'http://localhost:{}/api/contents/datalab/docs/Readme.ipynb')
info_url_template = 'http://localhost:{}/_info'
readme_header = 'Guide to Google Cloud Datalab'
bastion_startup_template = """
# First, install fuser
apt-get update -yq && apt-get install -y psmisc
# Repeatedly try to run the SSH tunnel
while true; do
# Invoke gcloud in a separate process so we can check it
(gcloud compute ssh --zone {} --internal-ip \
--ssh-flag=-4 --ssh-flag=-N --ssh-flag=-L \
--ssh-flag=localhost:8080:localhost:8080 \
datalab@{}) &
gcloud_pid=$!
sleep 30
if [ -z "$(fuser -n tcp -4 8080)" ]; then
# The SSH tunnel never bound to the local port; kill it...
kill -9 "${{gcloud_pid}}"
fi
wait
done
"""
def generate_unique_id():
return uuid.uuid4().hex[0:12]
def call_gcloud(args):
return subprocess.check_output(['gcloud'] + args).decode('utf-8')
def free_port():
auto_socket = socket.socket()
auto_socket.bind(('localhost', 0))
port_number = auto_socket.getsockname()[1]
auto_socket.close()
return port_number
def random_zone():
zones_list = subprocess.check_output([
'gcloud', 'compute', 'zones', 'list',
'--filter=region~us-west', '--format=value(name)']).decode(
'utf-8')
zones = zones_list.split()
return random.choice(zones)
class DatalabInstance(object):
def __init__(self, test_run_id, project, zone, external_ip=True):
self.project = project
self.zone = zone
name_suffix = generate_unique_id()
self.network = "test-network-{0}-{1}".format(
test_run_id, name_suffix)
self.external_ip = external_ip
if self.external_ip:
self.name = "test-instance-{0}-{1}".format(
test_run_id, name_suffix)
else:
self.internal_name = "test-instance-{0}-{1}".format(
test_run_id, name_suffix)
self.name = "bastion-vm-{0}-{1}".format(
test_run_id, name_suffix)
def prepare_network_for_internal_ip(self):
region = call_gcloud(['compute', 'zones', 'describe',
'--format=value(region)', self.zone]).strip()
print('Using the region "{}"...'.format(region))
try:
print('Creating the network "{}"...'.format(self.network))
call_gcloud(['compute', 'networks', 'create', self.network])
self.subnet = call_gcloud([
'compute', 'networks', 'subnets', 'list',
'--filter=network~/{}$ region={}'.format(
self.network, region),
'--format=value(name)']).strip()
print('Updating the subnet "{}"...'.format(self.subnet))
call_gcloud(['compute', 'networks', 'subnets', 'update',
'--region', region, self.subnet,
'--enable-private-ip-google-access'])
except Exception:
delete_network_cmd = ['compute', 'networks', 'delete',
'--project', self.project,
'--quiet', self.network]
print('Deleting the network "{}" with the command "{}"'.format(
self.network, ' '.join(delete_network_cmd)))
call_gcloud(delete_network_cmd)
raise
def __enter__(self):
cmd = [python_executable, '-u', './tools/cli/datalab.py', '--quiet',
'--project', self.project,
'--zone', self.zone,
'--verbosity', 'debug',
'create', '--no-connect',
'--network-name', self.network]
if self.external_ip:
cmd.append(self.name)
else:
cmd.append('--beta-no-external-ip')
cmd.append(self.internal_name)
self.prepare_network_for_internal_ip()
print('Creating the instance "{}" with the command "{}"'.format(
self.name, ' '.join(cmd)))
subprocess.check_output(cmd)
print('Status of the instance: "{}"'.format(self.status()))
if not self.external_ip:
# Create a bastion VM that will forward to the real instance.
bastion_startup = bastion_startup_template.format(
self.zone, self.internal_name)
with tempfile.NamedTemporaryFile(mode='w', delete=False) \
as startup_script_file:
try:
startup_script_file.write(bastion_startup)
startup_script_file.close()
call_gcloud(['compute', 'instances', 'create',
'--zone', self.zone,
'--network', self.network,
'--subnet', self.subnet,
'--scopes=cloud-platform', '--tags=datalab',
'--metadata-from-file',
'startup-script='+startup_script_file.name,
self.name])
finally:
os.remove(startup_script_file.name)
return self
def __exit__(self, *unused_args, **unused_kwargs):
cmd = [python_executable, '-u', './tools/cli/datalab.py', '--quiet',
'--project', self.project,
'--zone', self.zone,
'delete', '--delete-disk']
if self.external_ip:
cmd.append(self.name)
else:
cmd.append(self.internal_name)
call_gcloud(['compute', 'instances', 'delete', '--zone', self.zone,
'--delete-disks=all', '--quiet', self.name])
print('Deleting the instance "{}" with the command "{}"'.format(
self.name, ' '.join(cmd)))
subprocess.check_output(cmd)
firewalls = call_gcloud([
'compute', 'firewall-rules', 'list',
'--filter=network='+self.network,
'--format=value(name)']).strip().split()
for firewall in firewalls:
delete_firewall_cmd = ['compute', 'firewall-rules', 'delete',
'--project', self.project,
'--quiet', firewall]
print('Deleting the firewall "{}" with the command "{}"'.format(
firewall, ' '.join(delete_firewall_cmd)))
call_gcloud(delete_firewall_cmd)
delete_network_cmd = ['compute', 'networks', 'delete',
'--project', self.project,
'--quiet', self.network]
print('Deleting the network "{}" with the command "{}"'.format(
self.network, ' '.join(delete_network_cmd)))
call_gcloud(delete_network_cmd)
def status(self):
cmd = [python_executable, '-u', './tools/cli/datalab.py', '--quiet',
'--project', self.project,
'--zone', self.zone,
'list', '--filter', "(name={})".format(self.name)]
return subprocess.check_output(cmd).decode('utf-8')
class DatalabConnection(object):
def __init__(self, project, zone, instance, stdout, max_attempts=10):
self.project = project
self.zone = zone
self.instance = instance
self.stdout = stdout
self.max_attempts = max_attempts
def __enter__(self):
self.port = free_port()
# Give a moment for the temporarily-acquired port to
# free up before trying to reuse it.
time.sleep(10)
cmd = [python_executable, '-u', './tools/cli/datalab.py', '--quiet',
'--project', self.project, '--zone', self.zone,
'connect', '--no-launch-browser',
'--port={}'.format(self.port),
self.instance]
self.process = subprocess.Popen(cmd, stdout=self.stdout)
attempts = 0
while attempts < self.max_attempts:
attempts += 1
with open(self.stdout.name, "r") as written_stdout:
if connection_msg in written_stdout.read():
self.readme_url = readme_url_template.format(self.port)
self.info_url = info_url_template.format(self.port)
return self
time.sleep(60)
return self
def __exit__(self, *unused_args, **unused_kwargs):
self.process.terminate()
self.process.communicate()
class TestEndToEnd(unittest.TestCase):
def setUp(self):
self.test_run_name = generate_unique_id()
self.project = call_gcloud(
['config', 'get-value', 'core/project']).strip()
self._zone = call_gcloud(
['config', 'get-value', 'compute/zone']).strip()
print('Testing with in the zone "{}" under the project {}'.format(
self.get_zone(), self.project))
def get_zone(self):
if self._zone == '':
return random_zone()
return self._zone
def call_datalab(self, subcommand, args):
cmd = [python_executable, '-u', './tools/cli/datalab.py', '--quiet',
'--project', self.project, subcommand] + args
print('Running datalab command "{}"'.format(' '.join(cmd)))
return subprocess.check_output(cmd).decode('utf-8')
def retry_test(self, test_method):
last_error = None
for _ in range(retry_count):
try:
test_method()
return
except Exception as ex:
last_error = ex
raise last_error
def test_create_delete(self):
self.retry_test(self.run_create_delete_test)
def run_create_delete_test(self):
instance_name = ""
instance_zone = self.get_zone()
with DatalabInstance(self.test_run_name,
self.project,
instance_zone) as instance:
instance_name = instance.name
self.assertIn('RUNNING', instance.status())
instances = self.call_datalab('list', [])
self.assertNotIn(instance_name, instances)
def test_connect(self):
self.retry_test(self.run_connection_test)
def run_connection_test(self):
instance_name = ""
instance_zone = self.get_zone()
with DatalabInstance(self.test_run_name,
self.project,
instance_zone) as instance:
instance_name = instance.name
self.assertIn('RUNNING', instance.status())
self.call_datalab('stop', ['--zone', instance_zone, instance.name])
self.assertIn('TERMINATED', instance.status())
with tempfile.NamedTemporaryFile() as tmp:
with DatalabConnection(self.project, instance_zone,
instance.name, tmp) as conn:
readme = urlopen(conn.readme_url)
readme_contents = readme.read().decode('utf-8')
print('README contents returned: "{}"'.format(
readme_contents))
self.assertIn(readme_header, readme_contents)
instances = self.call_datalab('list', [])
self.assertNotIn(instance_name, instances)
def test_internal_ip(self):
self.retry_test(self.run_internal_ip_test)
def run_internal_ip_test(self):
instance_name = ""
instance_zone = self.get_zone()
with DatalabInstance(self.test_run_name,
self.project,
instance_zone,
external_ip=False) as instance:
instance_name = instance.name
self.assertIn('RUNNING', instance.status())
with tempfile.NamedTemporaryFile() as tmp:
with DatalabConnection(self.project, instance_zone,
instance.name, tmp,
max_attempts=15) as conn:
# Private-IP instances cannot clone the sample notebooks,
# So we check the _info
info = urlopen(conn.info_url)
info_contents = info.read().decode('utf-8')
print('/_info contents returned: "{}"'.format(
info_contents))
self.assertIn('DATALAB_VERSION', info_contents)
instances = self.call_datalab('list', [])
self.assertNotIn(instance_name, instances)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--runs', type=int, default=1, choices=range(1, 100),
metavar='COUNT', dest='runs',
help='Number of times to run the test suite')
args = parser.parse_args()
failed_count, run_count = 0, 0
for _ in range(0, args.runs):
suite = unittest.TestLoader().loadTestsFromTestCase(TestEndToEnd)
result = unittest.TextTestRunner(buffer=True).run(suite)
run_count += 1
if not result.wasSuccessful():
failed_count += 1
print('Ran {} test runs with {} failing'.format(run_count, failed_count))
| python |
##############################
# import Verif #
# var = Verif.class(object) #
# var.def() #
##############################
# this lib it's verification #
# maked by khalil preview #
##############################
import tkinter
from tkinter import *
from tkinter import messagebox
class sign_in(object):
def __init__(self , un , up ,un1 , up1) :
self.un = un
self.up = up
self.un1 = un1
self.up1 = up1
def sign_in_verif(self):
if self.un1 == self.un and self.up1 == self.up :
result = []
username = str(self.un1)
userpass = str(self.up1)
result.append(username)
result.append(userpass)
f = open(str(username + '.sfr'), 'w')
f.write(str(result))
f.close()
else :
messagebox.showinfo("Sign up Failed", "Usernam or Password wrong !!!")
| python |
import attrs
import asyncio
import datetime
import os
import shutil
import pickle
from typing import Any, Optional, List
@attrs.define
class Cache:
name: str
data: Any
expired_after: int = attrs.field(default=10)
expiration: datetime.datetime = attrs.field(init=False)
@expiration.default
def _expiration(self):
return datetime.datetime.utcnow() + datetime.timedelta(
minutes=self.expired_after
)
def ensure_cachedir(cachedir: str):
if not os.path.isdir(cachedir):
os.makedirs(cachedir)
def get_cache_names(cachedir: str) -> List[str]:
ensure_cachedir(cachedir)
result = []
for cdir in os.listdir(cachedir):
if os.path.isfile(os.path.join(cachedir, cdir, "data")):
result.append(cdir)
return result
def has_cache(cachedir: str, name: str) -> bool:
ensure_cachedir(cachedir)
return name in get_cache_names(cachedir)
def store(cachedir: str, cache: Cache):
ensure_cachedir(cachedir)
if cache.name in get_cache_names(cachedir):
raise NameError(f"a cache with the name `{cache.name}` already stored.")
os.makedirs(os.path.join(cachedir, cache.name))
with open(os.path.join(cachedir, cache.name, "data"), "wb") as file:
pickle.dump(cache, file, protocol=pickle.HIGHEST_PROTOCOL)
def get(cachedir: str, name: str) -> Cache:
ensure_cachedir(cachedir)
for cdir in get_cache_names(cachedir):
if cdir == name:
with open(os.path.join(cachedir, cdir, "data"), "rb") as file:
return pickle.load(file)
def remove(cachedir, name: str):
ensure_cachedir(cachedir)
if has_cache(cachedir, name):
shutil.rmtree(os.path.join(cachedir, name))
else:
raise ValueError(f"cache with the name `{name}` not found.")
async def update_cachedir(cachedir: str):
while True:
for cdir in get_cache_names(cachedir):
cache = get(cachedir, cdir)
if cache:
if datetime.datetime.utcnow() >= cache.expiration:
remove(cachedir, cache.name)
await asyncio.sleep(0.1)
class MemCacheManager:
"""memory cache manager"""
def __init__(self):
self.caches: List[Cache] = []
def store(self, cache: Cache):
if cache.name in self.get_cache_names():
raise NameError(f"a cache with the name `{cache.name}` already stored.")
self.caches.append(cache)
def has_cache(self, name: str) -> bool:
return name in self.get_cache_names()
def get_cache_names(self) -> List[str]:
return [cache.name for cache in self.caches]
def get(self, name: str) -> Cache:
for cache in self.caches:
if cache.name == name:
return cache
def remove(self, name: str):
cache = self.get(name)
if cache:
self.caches.remove(cache)
else:
raise ValueError(f"cache with the name `{name}` not found.")
async def update(self):
"""check for expired caches"""
while True:
for index, cache in enumerate(self.caches):
if datetime.datetime.utcnow() >= cache.expiration:
self.caches.remove(cache)
await asyncio.sleep(0.1)
| python |
#Done by Carlos Amaral in 18/06/2020
"""
Imagine an alien was just shot down in a game. Create a
variable called alien_color and assign it a value of 'green' , 'yellow' , or 'red' .
• Write an if statement to test whether the alien’s color is green. If it is, print
a message that the player just earned 5 points.
• Write one version of this program that passes the if test and another that
fails. (The version that fails will have no output.)
"""
#Alien Colors 1
alien_color = 'green'
if alien_color == 'green':
print("Congratulations. You've just earned 5 points!")
print("\n")
#Fail version
alien_color = 'yellow'
if alien_color == 'green':
print("Congratulations. You've just earned 5 points!") | python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: epl/protobuf/v1/query.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='epl/protobuf/v1/query.proto',
package='epl.protobuf.v1',
syntax='proto3',
serialized_options=b'\n\023com.epl.protobuf.v1B\nQueryProtoP\001Z.github.com/geo-grpc/api/golang/epl/protobuf/v1\242\002\003QPB\252\002\023com.epl.protobuf.v1',
serialized_pb=b'\n\x1b\x65pl/protobuf/v1/query.proto\x12\x0f\x65pl.protobuf.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\xc0\x01\n\x0b\x46loatFilter\x12\x35\n\x08rel_type\x18\x02 \x01(\x0e\x32#.epl.protobuf.v1.FilterRelationship\x12\x0f\n\x05value\x18\x01 \x01(\x02H\x00\x12\x0f\n\x05start\x18\x03 \x01(\x02H\x00\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x02\x12\x36\n\x0esort_direction\x18\x05 \x01(\x0e\x32\x1e.epl.protobuf.v1.SortDirection\x12\x0b\n\x03set\x18\x06 \x03(\x02\x42\x06\n\x04\x64\x61ta\"\xc1\x01\n\x0c\x44oubleFilter\x12\x35\n\x08rel_type\x18\x02 \x01(\x0e\x32#.epl.protobuf.v1.FilterRelationship\x12\x0f\n\x05value\x18\x01 \x01(\x01H\x00\x12\x0f\n\x05start\x18\x03 \x01(\x01H\x00\x12\x0b\n\x03\x65nd\x18\x04 \x01(\x01\x12\x36\n\x0esort_direction\x18\x05 \x01(\x0e\x32\x1e.epl.protobuf.v1.SortDirection\x12\x0b\n\x03set\x18\x06 \x03(\x01\x42\x06\n\x04\x64\x61ta\"\x8b\x02\n\x0fTimestampFilter\x12\x35\n\x08rel_type\x18\x02 \x01(\x0e\x32#.epl.protobuf.v1.FilterRelationship\x12+\n\x05value\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12+\n\x05start\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\'\n\x03\x65nd\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x36\n\x0esort_direction\x18\x05 \x01(\x0e\x32\x1e.epl.protobuf.v1.SortDirectionB\x06\n\x04\x64\x61ta\"\xc1\x01\n\x0cUInt32Filter\x12\x35\n\x08rel_type\x18\x02 \x01(\x0e\x32#.epl.protobuf.v1.FilterRelationship\x12\x0f\n\x05value\x18\x01 \x01(\rH\x00\x12\x0f\n\x05start\x18\x03 \x01(\rH\x00\x12\x0b\n\x03\x65nd\x18\x04 \x01(\r\x12\x36\n\x0esort_direction\x18\x05 \x01(\x0e\x32\x1e.epl.protobuf.v1.SortDirection\x12\x0b\n\x03set\x18\x06 \x03(\rB\x06\n\x04\x64\x61ta\"a\n\x0cStringFilter\x12\r\n\x05value\x18\x01 \x01(\t\x12\x35\n\x08rel_type\x18\x02 \x01(\x0e\x32#.epl.protobuf.v1.FilterRelationship\x12\x0b\n\x03set\x18\x06 \x03(\t*2\n\rSortDirection\x12\x0e\n\nNOT_SORTED\x10\x00\x12\x08\n\x04\x44\x45SC\x10\x01\x12\x07\n\x03\x41SC\x10\x02*\x96\x01\n\x12\x46ilterRelationship\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x02\x12\x07\n\x03GTE\x10\x04\x12\x06\n\x02LT\x10\x08\x12\x06\n\x02GT\x10\x10\x12\x0b\n\x07\x42\x45TWEEN\x10 \x12\x0f\n\x0bNOT_BETWEEN\x10@\x12\x08\n\x03NEQ\x10\x80\x01\x12\x07\n\x02IN\x10\x80\x02\x12\x0b\n\x06NOT_IN\x10\x80\x04\x12\t\n\x04LIKE\x10\x80\x08\x12\r\n\x08NOT_LIKE\x10\x80\x10\x42o\n\x13\x63om.epl.protobuf.v1B\nQueryProtoP\x01Z.github.com/geo-grpc/api/golang/epl/protobuf/v1\xa2\x02\x03QPB\xaa\x02\x13\x63om.epl.protobuf.v1b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_SORTDIRECTION = _descriptor.EnumDescriptor(
name='SortDirection',
full_name='epl.protobuf.v1.SortDirection',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NOT_SORTED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DESC', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ASC', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1037,
serialized_end=1087,
)
_sym_db.RegisterEnumDescriptor(_SORTDIRECTION)
SortDirection = enum_type_wrapper.EnumTypeWrapper(_SORTDIRECTION)
_FILTERRELATIONSHIP = _descriptor.EnumDescriptor(
name='FilterRelationship',
full_name='epl.protobuf.v1.FilterRelationship',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='EQ', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LTE', index=1, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GTE', index=2, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LT', index=3, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GT', index=4, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BETWEEN', index=5, number=32,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_BETWEEN', index=6, number=64,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NEQ', index=7, number=128,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IN', index=8, number=256,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_IN', index=9, number=512,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LIKE', index=10, number=1024,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_LIKE', index=11, number=2048,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1090,
serialized_end=1240,
)
_sym_db.RegisterEnumDescriptor(_FILTERRELATIONSHIP)
FilterRelationship = enum_type_wrapper.EnumTypeWrapper(_FILTERRELATIONSHIP)
NOT_SORTED = 0
DESC = 1
ASC = 2
EQ = 0
LTE = 2
GTE = 4
LT = 8
GT = 16
BETWEEN = 32
NOT_BETWEEN = 64
NEQ = 128
IN = 256
NOT_IN = 512
LIKE = 1024
NOT_LIKE = 2048
_FLOATFILTER = _descriptor.Descriptor(
name='FloatFilter',
full_name='epl.protobuf.v1.FloatFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rel_type', full_name='epl.protobuf.v1.FloatFilter.rel_type', index=0,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='epl.protobuf.v1.FloatFilter.value', index=1,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='epl.protobuf.v1.FloatFilter.start', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='epl.protobuf.v1.FloatFilter.end', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort_direction', full_name='epl.protobuf.v1.FloatFilter.sort_direction', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='set', full_name='epl.protobuf.v1.FloatFilter.set', index=5,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='data', full_name='epl.protobuf.v1.FloatFilter.data',
index=0, containing_type=None, fields=[]),
],
serialized_start=82,
serialized_end=274,
)
_DOUBLEFILTER = _descriptor.Descriptor(
name='DoubleFilter',
full_name='epl.protobuf.v1.DoubleFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rel_type', full_name='epl.protobuf.v1.DoubleFilter.rel_type', index=0,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='epl.protobuf.v1.DoubleFilter.value', index=1,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='epl.protobuf.v1.DoubleFilter.start', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='epl.protobuf.v1.DoubleFilter.end', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort_direction', full_name='epl.protobuf.v1.DoubleFilter.sort_direction', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='set', full_name='epl.protobuf.v1.DoubleFilter.set', index=5,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='data', full_name='epl.protobuf.v1.DoubleFilter.data',
index=0, containing_type=None, fields=[]),
],
serialized_start=277,
serialized_end=470,
)
_TIMESTAMPFILTER = _descriptor.Descriptor(
name='TimestampFilter',
full_name='epl.protobuf.v1.TimestampFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rel_type', full_name='epl.protobuf.v1.TimestampFilter.rel_type', index=0,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='epl.protobuf.v1.TimestampFilter.value', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='epl.protobuf.v1.TimestampFilter.start', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='epl.protobuf.v1.TimestampFilter.end', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort_direction', full_name='epl.protobuf.v1.TimestampFilter.sort_direction', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='data', full_name='epl.protobuf.v1.TimestampFilter.data',
index=0, containing_type=None, fields=[]),
],
serialized_start=473,
serialized_end=740,
)
_UINT32FILTER = _descriptor.Descriptor(
name='UInt32Filter',
full_name='epl.protobuf.v1.UInt32Filter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rel_type', full_name='epl.protobuf.v1.UInt32Filter.rel_type', index=0,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='epl.protobuf.v1.UInt32Filter.value', index=1,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='epl.protobuf.v1.UInt32Filter.start', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='epl.protobuf.v1.UInt32Filter.end', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sort_direction', full_name='epl.protobuf.v1.UInt32Filter.sort_direction', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='set', full_name='epl.protobuf.v1.UInt32Filter.set', index=5,
number=6, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='data', full_name='epl.protobuf.v1.UInt32Filter.data',
index=0, containing_type=None, fields=[]),
],
serialized_start=743,
serialized_end=936,
)
_STRINGFILTER = _descriptor.Descriptor(
name='StringFilter',
full_name='epl.protobuf.v1.StringFilter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='epl.protobuf.v1.StringFilter.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rel_type', full_name='epl.protobuf.v1.StringFilter.rel_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='set', full_name='epl.protobuf.v1.StringFilter.set', index=2,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=938,
serialized_end=1035,
)
_FLOATFILTER.fields_by_name['rel_type'].enum_type = _FILTERRELATIONSHIP
_FLOATFILTER.fields_by_name['sort_direction'].enum_type = _SORTDIRECTION
_FLOATFILTER.oneofs_by_name['data'].fields.append(
_FLOATFILTER.fields_by_name['value'])
_FLOATFILTER.fields_by_name['value'].containing_oneof = _FLOATFILTER.oneofs_by_name['data']
_FLOATFILTER.oneofs_by_name['data'].fields.append(
_FLOATFILTER.fields_by_name['start'])
_FLOATFILTER.fields_by_name['start'].containing_oneof = _FLOATFILTER.oneofs_by_name['data']
_DOUBLEFILTER.fields_by_name['rel_type'].enum_type = _FILTERRELATIONSHIP
_DOUBLEFILTER.fields_by_name['sort_direction'].enum_type = _SORTDIRECTION
_DOUBLEFILTER.oneofs_by_name['data'].fields.append(
_DOUBLEFILTER.fields_by_name['value'])
_DOUBLEFILTER.fields_by_name['value'].containing_oneof = _DOUBLEFILTER.oneofs_by_name['data']
_DOUBLEFILTER.oneofs_by_name['data'].fields.append(
_DOUBLEFILTER.fields_by_name['start'])
_DOUBLEFILTER.fields_by_name['start'].containing_oneof = _DOUBLEFILTER.oneofs_by_name['data']
_TIMESTAMPFILTER.fields_by_name['rel_type'].enum_type = _FILTERRELATIONSHIP
_TIMESTAMPFILTER.fields_by_name['value'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TIMESTAMPFILTER.fields_by_name['start'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TIMESTAMPFILTER.fields_by_name['end'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TIMESTAMPFILTER.fields_by_name['sort_direction'].enum_type = _SORTDIRECTION
_TIMESTAMPFILTER.oneofs_by_name['data'].fields.append(
_TIMESTAMPFILTER.fields_by_name['value'])
_TIMESTAMPFILTER.fields_by_name['value'].containing_oneof = _TIMESTAMPFILTER.oneofs_by_name['data']
_TIMESTAMPFILTER.oneofs_by_name['data'].fields.append(
_TIMESTAMPFILTER.fields_by_name['start'])
_TIMESTAMPFILTER.fields_by_name['start'].containing_oneof = _TIMESTAMPFILTER.oneofs_by_name['data']
_UINT32FILTER.fields_by_name['rel_type'].enum_type = _FILTERRELATIONSHIP
_UINT32FILTER.fields_by_name['sort_direction'].enum_type = _SORTDIRECTION
_UINT32FILTER.oneofs_by_name['data'].fields.append(
_UINT32FILTER.fields_by_name['value'])
_UINT32FILTER.fields_by_name['value'].containing_oneof = _UINT32FILTER.oneofs_by_name['data']
_UINT32FILTER.oneofs_by_name['data'].fields.append(
_UINT32FILTER.fields_by_name['start'])
_UINT32FILTER.fields_by_name['start'].containing_oneof = _UINT32FILTER.oneofs_by_name['data']
_STRINGFILTER.fields_by_name['rel_type'].enum_type = _FILTERRELATIONSHIP
DESCRIPTOR.message_types_by_name['FloatFilter'] = _FLOATFILTER
DESCRIPTOR.message_types_by_name['DoubleFilter'] = _DOUBLEFILTER
DESCRIPTOR.message_types_by_name['TimestampFilter'] = _TIMESTAMPFILTER
DESCRIPTOR.message_types_by_name['UInt32Filter'] = _UINT32FILTER
DESCRIPTOR.message_types_by_name['StringFilter'] = _STRINGFILTER
DESCRIPTOR.enum_types_by_name['SortDirection'] = _SORTDIRECTION
DESCRIPTOR.enum_types_by_name['FilterRelationship'] = _FILTERRELATIONSHIP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FloatFilter = _reflection.GeneratedProtocolMessageType('FloatFilter', (_message.Message,), {
'DESCRIPTOR' : _FLOATFILTER,
'__module__' : 'epl.protobuf.v1.query_pb2'
# @@protoc_insertion_point(class_scope:epl.protobuf.v1.FloatFilter)
})
_sym_db.RegisterMessage(FloatFilter)
DoubleFilter = _reflection.GeneratedProtocolMessageType('DoubleFilter', (_message.Message,), {
'DESCRIPTOR' : _DOUBLEFILTER,
'__module__' : 'epl.protobuf.v1.query_pb2'
# @@protoc_insertion_point(class_scope:epl.protobuf.v1.DoubleFilter)
})
_sym_db.RegisterMessage(DoubleFilter)
TimestampFilter = _reflection.GeneratedProtocolMessageType('TimestampFilter', (_message.Message,), {
'DESCRIPTOR' : _TIMESTAMPFILTER,
'__module__' : 'epl.protobuf.v1.query_pb2'
# @@protoc_insertion_point(class_scope:epl.protobuf.v1.TimestampFilter)
})
_sym_db.RegisterMessage(TimestampFilter)
UInt32Filter = _reflection.GeneratedProtocolMessageType('UInt32Filter', (_message.Message,), {
'DESCRIPTOR' : _UINT32FILTER,
'__module__' : 'epl.protobuf.v1.query_pb2'
# @@protoc_insertion_point(class_scope:epl.protobuf.v1.UInt32Filter)
})
_sym_db.RegisterMessage(UInt32Filter)
StringFilter = _reflection.GeneratedProtocolMessageType('StringFilter', (_message.Message,), {
'DESCRIPTOR' : _STRINGFILTER,
'__module__' : 'epl.protobuf.v1.query_pb2'
# @@protoc_insertion_point(class_scope:epl.protobuf.v1.StringFilter)
})
_sym_db.RegisterMessage(StringFilter)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| python |
from qqai.classes import *
class TextTranslateAILab(QQAIClass):
"""文本翻译(AI Lab)"""
api = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_texttrans'
def make_params(self, text, translate_type=0):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'type': translate_type,
'text': text,
}
params['sign'] = self.get_sign(params)
return params
def run(self, text, translate_type=0):
params = self.make_params(text, translate_type)
response = self.call_api(params)
result = json.loads(response.text)
return result
class TextTranslateFanyi(QQAIClass):
"""文本翻译(翻译君)"""
api = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_texttranslate'
def make_params(self, text, source='auto', target='auto'):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'text': text,
'source': source,
'target': target,
}
params['sign'] = self.get_sign(params)
return params
def run(self, text, source='auto', target='auto'):
params = self.make_params(text, source, target)
response = self.call_api(params)
result = json.loads(response.text)
return result
class ImageTranslate(QQAIClass):
"""图片翻译"""
api = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_imagetranslate'
def make_params(self, image_param, scene, source='auto', target='auto'):
"""获取调用接口的参数"""
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'image': self.get_base64(image_param),
'session': int(time.time()),
'scene': scene,
'source': source,
'target': target,
}
params['sign'] = self.get_sign(params)
return params
def run(self, image_param, scene, source='auto', target='auto'):
params = self.make_params(image_param, scene, source, target)
response = self.call_api(params)
result = json.loads(response.text)
return result
class TextDetect(QQAIClass):
"""语种识别"""
api = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_textdetect'
def make_params(self, text, candidate_langs=None, force=0):
"""获取调用接口的参数"""
if candidate_langs is None:
candidate_langs = ['zh', 'en', 'jp', 'kr']
if type(candidate_langs) == str:
candidate_langs_param = candidate_langs
else:
candidate_langs_param = '|'.join(candidate_langs)
params = {'app_id': self.app_id,
'time_stamp': int(time.time()),
'nonce_str': int(time.time()),
'text': text,
'candidate_langs': candidate_langs_param,
'force': force
}
params['sign'] = self.get_sign(params)
return params
def run(self, text, candidate_langs=None, force=0):
params = self.make_params(text, candidate_langs, force)
response = self.call_api(params)
result = json.loads(response.text)
return result | python |
"""
Enumeración de estado del resultado de una partida de PPT.
"""
from enum import Enum
class Condicion(Enum):
"""
Posibles estados del resultado de la partida.
"""
VICTORIA = 0
DERROTA = 1
EMPATE = 2
| python |
import csv
import random
def load_lorem_sentences():
with open('lorem.txt') as fh:
return [l.strip() for l in fh.readlines()]
def load_dictionary():
with open('dictionary.csv') as csv_file:
return [l for l in csv.DictReader(csv_file, delimiter=',')]
SUFFIXES = ['at', 'it', 'is', 'us', 'et', 'um']
LOREM_SENTENCES = load_lorem_sentences()
EXPRESSIONS = load_dictionary()
def get_expression():
expression = random.choice(EXPRESSIONS)
foo = expression['stem'] if len(expression['stem']) > 0 else expression['expression']
if len(expression['alternatives']) > 0:
suffix = random.choice(expression['alternatives'].split())
else:
suffix = random.choice(SUFFIXES)
return foo + suffix
def get_sentence():
sentence = random.choice(LOREM_SENTENCES).split()
n = len(sentence) // 5 + 1
expressions = [get_expression() for _ in range(n)]
for i, expr in zip(random.sample(range(len(sentence)), n), expressions):
sentence[i] = expr
return ' '.join(sentence).strip(' .').capitalize() + '.'
if __name__ == '__main__':
print(get_sentence())
| python |
import pickle
import random
import h5py
import numpy as np
import pandas as pd
class Generator():
""" Data generator to the neural image captioning model (NIC).
The flow method outputs a list of two dictionaries containing
the inputs and outputs to the network.
# Arguments:
data_path = data_path to the preprocessed data computed by the
Preprocessor class.
"""
def __init__(self,data_path='preprocessed_data/',
training_filename=None,
validation_filename=None,
image_features_filename=None,
batch_size=100):
self.data_path = data_path
if training_filename == None:
self.training_filename = data_path + 'training_data.txt'
else:
self.training_filename = self.data_path + training_filename
if validation_filename == None:
self.validation_filename = data_path + 'validation_data.txt'
else:
self.validation_filename = self.data_path + validation_filename
if image_features_filename == None:
self.image_features_filename = (data_path +
'inception_image_name_to_features.h5')
else:
self.image_features_filename = self.data + image_features_filename
self.dictionary = None
self.training_dataset = None
self.validation_dataset = None
self.image_names_to_features = None
data_logs = np.genfromtxt(self.data_path + 'data_parameters.log',
delimiter=' ', dtype='str')
data_logs = dict(zip(data_logs[:, 0], data_logs[:, 1]))
self.MAX_TOKEN_LENGTH = int(data_logs['max_caption_length:']) + 2
self.IMG_FEATS = int(data_logs['IMG_FEATS:'])
self.BOS = str(data_logs['BOS:'])
self.EOS = str(data_logs['EOS:'])
self.PAD = str(data_logs['PAD:'])
self.VOCABULARY_SIZE = None
self.word_to_id = None
self.id_to_word = None
self.BATCH_SIZE = batch_size
self.load_dataset()
self.load_vocabulary()
self.load_image_features()
def load_vocabulary(self):
print('Loading vocabulary...')
word_to_id = pickle.load(open(self.data_path + 'word_to_id.p', 'rb'))
id_to_word = pickle.load(open(self.data_path + 'id_to_word.p', 'rb'))
self.VOCABULARY_SIZE = len(word_to_id)
self.word_to_id = word_to_id
self.id_to_word = id_to_word
def load_image_features(self):
self.image_names_to_features = h5py.File(
self.image_features_filename, 'r')
def load_dataset(self):
print('Loading training dataset...')
train_data = pd.read_table(self.training_filename, delimiter='*')
train_data = np.asarray(train_data,dtype=str)
self.training_dataset = train_data
print('Loading validation dataset...')
validation_dataset = pd.read_table(
self.validation_filename,delimiter='*')
validation_dataset = np.asarray(validation_dataset, dtype=str)
self.validation_dataset = validation_dataset
def return_dataset(self, path=None, dataset_name='all', mode='training'):
print('Loading dataset in memory...')
if path == None:
path = self.data_path
if mode == 'training':
data = pd.read_table(self.training_filename, sep='*')
elif mode == 'test':
data = pd.read_table(path + 'test_data.txt', sep='*')
if dataset_name != 'all':
data = data[data['image_names'].str.contains(dataset_name)]
data = np.asarray(data)
data_size = data.shape[0]
image_names = data[:, 0]
image_features = np.zeros((data_size,self.MAX_TOKEN_LENGTH,
self.IMG_FEATS))
image_captions = np.zeros((data_size,self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
target_captions = np.zeros((data_size,self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
for image_arg, image_name in enumerate(image_names):
caption = data[image_arg,1]
one_hot_caption = self.format_to_one_hot(caption)
image_captions[image_arg, :, :] = one_hot_caption
target_captions[image_arg, :, :] = self.get_one_hot_target(
one_hot_caption)
image_features[image_arg, :, :] = self.get_image_features(
image_name)
return image_features, image_captions, target_captions,image_names
def flow(self, mode):
if mode == 'train':
data = self.training_dataset
#random.shuffle(data) #this is probably correct but untested
if mode == 'validation':
data = self.validation_dataset
image_names = data[:,0].tolist()
empty_batch = self.make_empty_batch()
captions_batch = empty_batch[0]
images_batch = empty_batch[1]
targets_batch = empty_batch[2]
batch_counter = 0
while True:
for data_arg, image_name in enumerate(image_names):
caption = data[data_arg,1]
one_hot_caption = self.format_to_one_hot(caption)
captions_batch[batch_counter, :, :] = one_hot_caption
targets_batch[batch_counter, :, :] = self.get_one_hot_target(
one_hot_caption)
images_batch[batch_counter, :, :] = self.get_image_features(
image_name)
if batch_counter == self.BATCH_SIZE - 1:
yield_dictionary = self.wrap_in_dictionary(captions_batch,
images_batch,
targets_batch)
yield yield_dictionary
empty_batch = self.make_empty_batch()
captions_batch = empty_batch[0]
images_batch = empty_batch[1]
targets_batch = empty_batch[2]
batch_counter = 0
batch_counter = batch_counter + 1
def make_test_input(self,image_name=None):
if image_name == None:
image_name = random.choice(self.training_dataset[:, 0].tolist())
one_hot_caption = np.zeros((1, self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
begin_token_id = self.word_to_id[self.BOS]
one_hot_caption[0, 0, begin_token_id] = 1
image_features = np.zeros((1, self.MAX_TOKEN_LENGTH, self.IMG_FEATS))
image_features[0, :, :] = self.get_image_features(image_name)
return one_hot_caption, image_features, image_name
def make_empty_batch(self):
captions_batch = np.zeros((self.BATCH_SIZE,self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
images_batch = np.zeros((self.BATCH_SIZE, self.MAX_TOKEN_LENGTH,
self.IMG_FEATS))
targets_batch = np.zeros((self.BATCH_SIZE,self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
return captions_batch, images_batch , targets_batch
def format_to_one_hot(self,caption):
tokenized_caption = caption.split()
tokenized_caption = [self.BOS] + tokenized_caption + [self.EOS]
one_hot_caption = np.zeros((self.MAX_TOKEN_LENGTH,
self.VOCABULARY_SIZE))
word_ids = [self.word_to_id[word] for word in tokenized_caption
if word in self.word_to_id]
for sequence_arg, word_id in enumerate(word_ids):
one_hot_caption[sequence_arg,word_id] = 1
return one_hot_caption
def get_image_features(self, image_name):
image_features = self.image_names_to_features[image_name]\
['image_features'][:]
image_input = np.zeros((self.MAX_TOKEN_LENGTH, self.IMG_FEATS))
image_input[0,:] = image_features
return image_input
def get_one_hot_target(self,one_hot_caption):
one_hot_target = np.zeros_like(one_hot_caption)
one_hot_target[:-1, :] = one_hot_caption[1:, :]
return one_hot_target
def wrap_in_dictionary(self,one_hot_caption,
image_features,
one_hot_target):
return [{'text': one_hot_caption,
'image': image_features},
{'output': one_hot_target}]
| python |
#!/usr/bin/env python
# # -*- coding: utf-8 -*-
"""
@File: routes.py.py
@Author: Jim.Dai.Cn
@Date: 2020/9/22 上午11:26
@Desc:
"""
from app.company import blueprint
from flask import render_template, jsonify, current_app, request
@blueprint.route('/company', methods=['GET'])
def get_company_list():
clist = [
{"ID":1017,"USER_ID":117,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"江苏乐福德新材料技术有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102005,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":"91320206MA1MWACH6R","PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"吴迦迦","FIXED_TEL_ABCDEF":"","MOVE_TEL_ABCDEF":13814244466,"MAIL_ABCDEF":"","FAX_ABCDEF":"","ADDR_ABCDEF":"","REGIST_TIME_ABCE":"","REGIST_CAPITAL_AC":"","WORKERS_NO_AC":"","DEVELOP_NO_A":"","IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":"","INDUSTRY_A":"","NATURE_A":"","PROJ_A":"","IS_GAUGE":"","IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"36:43.7","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":"","TECHNOLOGY_FIELD":"","INVESTMENT_MONEY":"","DEV_MASTER_NUM":"","DEV_DOCTOR_NUM":"","INDEPENTDENT_LEGAL_PERSON":"","NATIONAL_ECONOMY_INDUSTRY":"","COMPANY_ATTRIBUTE":"","COMPANY_SCALE":"","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":"","COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":"","FINANCE_CONTACT":"","FINANCE_TEL":"","FINANCE_MOBEL":"","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":1,"REG_ADDRESS":""},
{"ID":1018,"USER_ID":118,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"无锡市易动智能装备有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102005,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":"91320206MA1W9HMH22","PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"邱林峰","FIXED_TEL_ABCDEF":"","MOVE_TEL_ABCDEF":13306199950,"MAIL_ABCDEF":"","FAX_ABCDEF":"","ADDR_ABCDEF":"无锡市惠山区长安街道畅惠路10","REGIST_TIME_ABCE":"","REGIST_CAPITAL_AC":"","WORKERS_NO_AC":"","DEVELOP_NO_A":"","IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":"","INDUSTRY_A":"","NATURE_A":"","PROJ_A":"","IS_GAUGE":"","IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"36:43.7","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":"","TECHNOLOGY_FIELD":"","INVESTMENT_MONEY":"","DEV_MASTER_NUM":"","DEV_DOCTOR_NUM":"","INDEPENTDENT_LEGAL_PERSON":"","NATIONAL_ECONOMY_INDUSTRY":"","COMPANY_ATTRIBUTE":"","COMPANY_SCALE":"","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":"","COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":"","FINANCE_CONTACT":"","FINANCE_TEL":"","FINANCE_MOBEL":"","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":1,"REG_ADDRESS":""},
{"ID":1020,"USER_ID":120,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"无锡达美新材料有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102006,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":"91320206MA1M97J91B","PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"郑巍","FIXED_TEL_ABCDEF":"","MOVE_TEL_ABCDEF":13951582299,"MAIL_ABCDEF":"","FAX_ABCDEF":"","ADDR_ABCDEF":"","REGIST_TIME_ABCE":"","REGIST_CAPITAL_AC":"","WORKERS_NO_AC":"","DEVELOP_NO_A":"","IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":"","INDUSTRY_A":"","NATURE_A":"","PROJ_A":"","IS_GAUGE":1,"IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"36:43.7","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":"","TECHNOLOGY_FIELD":"","INVESTMENT_MONEY":"","DEV_MASTER_NUM":"","DEV_DOCTOR_NUM":"","INDEPENTDENT_LEGAL_PERSON":"","NATIONAL_ECONOMY_INDUSTRY":"","COMPANY_ATTRIBUTE":"","COMPANY_SCALE":"","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":"","COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":"","FINANCE_CONTACT":"","FINANCE_TEL":"","FINANCE_MOBEL":"","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":1,"REG_ADDRESS":""},
{"ID":1021,"USER_ID":121,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"江苏韦兰德特种装备科技有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102006,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":913204000000000000,"PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"沈伟栋","FIXED_TEL_ABCDEF":"","MOVE_TEL_ABCDEF":18020301820,"MAIL_ABCDEF":"","FAX_ABCDEF":"","ADDR_ABCDEF":"无锡市惠山工业转型集聚区北惠路123号","REGIST_TIME_ABCE":"00:00.0","REGIST_CAPITAL_AC":7000,"WORKERS_NO_AC":65,"DEVELOP_NO_A":10,"IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":104,"INDUSTRY_A":41,"NATURE_A":"","PROJ_A":999,"IS_GAUGE":1,"IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"沈其明","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"19:46.8","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":1,"TECHNOLOGY_FIELD":807,"INVESTMENT_MONEY":0,"DEV_MASTER_NUM":0,"DEV_DOCTOR_NUM":0,"INDEPENTDENT_LEGAL_PERSON":1,"NATIONAL_ECONOMY_INDUSTRY":873,"COMPANY_ATTRIBUTE":"其他","COMPANY_SCALE":"中型","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":0,"COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":18020301818,"FINANCE_CONTACT":"","FINANCE_TEL":"","FINANCE_MOBEL":"","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":1,"REG_ADDRESS":""},
{"ID":1071,"USER_ID":171,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"无锡正则精准医学检验有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102004,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":"91320206MA1MCH2R4R","PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"杨丽华","FIXED_TEL_ABCDEF":"0510-85993951","MOVE_TEL_ABCDEF":13915279492,"MAIL_ABCDEF":"[email protected]","FAX_ABCDEF":"","ADDR_ABCDEF":"无锡惠山经济开发区惠山大道1699号八号楼五层","REGIST_TIME_ABCE":"00:00.0","REGIST_CAPITAL_AC":2000,"WORKERS_NO_AC":42,"DEVELOP_NO_A":16,"IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"医学检验;生物技术的研发、技术咨询、技术服务、技术转让;医疗器械的租赁。(依法须经批准的项目,经相关部门批准后方可开展经营活动)。","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":104,"INDUSTRY_A":21,"NATURE_A":"","PROJ_A":999,"IS_GAUGE":0,"IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"盛青松","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"08:05.6","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":1,"TECHNOLOGY_FIELD":201,"INVESTMENT_MONEY":"","DEV_MASTER_NUM":10,"DEV_DOCTOR_NUM":2,"INDEPENTDENT_LEGAL_PERSON":1,"NATIONAL_ECONOMY_INDUSTRY":44,"COMPANY_ATTRIBUTE":"其他","COMPANY_SCALE":"小型","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":0,"COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":13706159105,"FINANCE_CONTACT":"蒋静","FINANCE_TEL":"","FINANCE_MOBEL":"0510-85993951","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":1,"REG_ADDRESS":""},
{"ID":1072,"USER_ID":172,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"无锡申联专用汽车有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102009,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":"91320206132603380D","PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"陆芸","FIXED_TEL_ABCDEF":66681359,"MOVE_TEL_ABCDEF":13812188070,"MAIL_ABCDEF":"[email protected]","FAX_ABCDEF":"","ADDR_ABCDEF":"无锡市惠山区惠际路86号","REGIST_TIME_ABCE":"00:00.0","REGIST_CAPITAL_AC":6640,"WORKERS_NO_AC":142,"DEVELOP_NO_A":24,"IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"汽车零部件及配件的研发、制造,机械零部件加工,汽车及汽车零部件、配件、医疗器械的销售,汽车制造的技术咨询、技术服务,空调修理,自营和代理各类商品及技术的进出口业务(国家限定企业经营或禁止进出口的商品和技术除外)。(依法须经批准的项目,经相关部门批准后方可开展经营活动)","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":104,"INDUSTRY_A":"请选择...","NATURE_A":"","PROJ_A":999,"IS_GAUGE":1,"IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"蓝青松","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"蓝青松","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"38:06.4","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":0,"ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":1,"TECHNOLOGY_FIELD":"请选择...","INVESTMENT_MONEY":"","DEV_MASTER_NUM":5,"DEV_DOCTOR_NUM":"","INDEPENTDENT_LEGAL_PERSON":1,"NATIONAL_ECONOMY_INDUSTRY":36,"COMPANY_ATTRIBUTE":"","COMPANY_SCALE":"小型","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":0,"COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":18661097799,"FINANCE_CONTACT":"邱文华","FINANCE_TEL":66680152,"FINANCE_MOBEL":13921299955,"FINANCE_EMAIL":"[email protected]","COMPANY_TYPE":2,"IS_TECHNOLOGY":2,"REG_ADDRESS":"无锡市惠山区惠际路86号"},
{"ID":1077,"USER_ID":177,"STATE":50,"THIS_ROLE":"","SYS_ID":4501,"USER_NAME_ABCDEF":"无锡新纺欧迪诺电梯有限公司","USER_TYPE_ABCDEF":11,"AREA_ID_C_ABCDEF":450102,"AREA_ID_B_ABCDEF":4501,"AREA_ID_A_ABCDEF":45,"AREA_ID_ABCDEF":450102009,"PAPER_TYPE_ABCDEF":"","PAPER_NO_ABCDEF":913202000000000000,"PAPER_VALIDITY_ABCDEF":"","BANK_OPEN_ABCDEF":"","BANK_ACCOUNT_ABCDEF":"","OPEN_NAME_ABCDEF":"","OPEN_NO_ABCDEF":"","CONTACTS_ABCDEF":"王丹华","FIXED_TEL_ABCDEF":"","MOVE_TEL_ABCDEF":13861811885,"MAIL_ABCDEF":"","FAX_ABCDEF":"","ADDR_ABCDEF":"无锡惠山开发区堰新路580号","REGIST_TIME_ABCE":"00:00.0","REGIST_CAPITAL_AC":12800,"WORKERS_NO_AC":109,"DEVELOP_NO_A":30,"IP_NO_AC":"","IP_SYS_NO_AC":"","MAIN_PRODUCT_A":"电梯","MAIN_MARKET_A":"","IS_ISO_A":"","COMPANY_TYPE_A":"","INDUSTRY_A":"","NATURE_A":"","PROJ_A":"","IS_GAUGE":"","IS_CONTINUE_HIGH":"","LEGAL_PERSON_C":"","PROVINCES_RECORD_C":"","IS_HQ_C":"","HQ_USER_NAME_C":"","HQ_ADDR_C":"","HQ_ZIP_CODE_C":"","HQ_REGIST_ADDR_C":"","HQ_LEGAL_PERSON_C":"","HQ_NO_C":"","HQ_REGIST_TIME_C":"","HQ_EMPLOYMENT_C":"","HQ_PRACTISING_AGENT_C":"","HQ_CONTACTS_C":"","HQ_TEL_C":"","AGENT_NO_C":"","LAW_NO_C":"","NATIONAL_START_C":"","PROVINCE_START_C":"","REGISTRATION_C":"","REGISTRATION_VALIDITY_C":"","CREATE_TIME":"","REMARK":"","SPARE1":"","SPARE2":"","SPARE3":"","IS_DELETE":"","ISO_CREATE_TIME":"","BUSSINESS_TIME_START":"","BUSSINESS_TIME_END":"","REGISTER_PLACE":"","CHECK_DAY":"","REGISTER_STATUS":"","TECHNOLOGY_FIELD":"","INVESTMENT_MONEY":"","DEV_MASTER_NUM":"","DEV_DOCTOR_NUM":"","INDEPENTDENT_LEGAL_PERSON":"","NATIONAL_ECONOMY_INDUSTRY":"","COMPANY_ATTRIBUTE":"","COMPANY_SCALE":"","COMPANY_PROFILE":"","COMPANY_CREDIT_RATING":"","IS_ON_LISTED":"","COMPANY_LISTING_SECTOR":"","LEGAL_PERSON_TEL":"","FINANCE_CONTACT":"","FINANCE_TEL":"","FINANCE_MOBEL":"","FINANCE_EMAIL":"","COMPANY_TYPE":"","IS_TECHNOLOGY":"","REG_ADDRESS":""}
]
return jsonify(clist)
@blueprint.route('/company', methods=['POST'])
def add_company():
company = {}
if request.method == 'POST':
company["USER_NAME_ABCDEF"] = request.form.get("first-name")
company["middle_name"] = request.form.get("middle-name")
company["last_name"] = request.form.get("last-name")
company["gender"] = request.form.get("gender")
company["birthday"] = request.form.get("birthday")
current_app.mgConnection.db.user_info.insert_one(company)
return jsonify("success")
@blueprint.route('/companyDB', methods=['GET'])
def get_company_list_from_db():
conn = current_app.mgConnection.db.user_info.find({}, {'_id':0})
cList = []
for i in conn:
cList.append(i)
return jsonify(cList)
@blueprint.route('/course', methods=['GET'])
def get_course_from_db():
# conn = current_app.mgConnection.db.user_info.find({"type": "course"}, {'_id': 0})
conn = current_app.mgConnection.db.user_info.find({"type": "course", "chapters.author": "唐国安"}, {'_id':0})
cList = []
for i in conn:
cList.append(i)
return jsonify(cList)
@blueprint.route('/<template>')
def route_template(template):
return render_template(template + '.html')
| python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUIs\LoadDataDialog.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_fromMemoryDialog(object):
def setupUi(self, fromMemoryDialog):
fromMemoryDialog.setObjectName("fromMemoryDialog")
fromMemoryDialog.setWindowModality(QtCore.Qt.WindowModal)
fromMemoryDialog.resize(351, 318)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(fromMemoryDialog.sizePolicy().hasHeightForWidth())
fromMemoryDialog.setSizePolicy(sizePolicy)
fromMemoryDialog.setMinimumSize(QtCore.QSize(0, 0))
fromMemoryDialog.setMaximumSize(QtCore.QSize(16777215, 16777215))
fromMemoryDialog.setSizeGripEnabled(False)
fromMemoryDialog.setModal(True)
self.okBtn = QtWidgets.QPushButton(fromMemoryDialog)
self.okBtn.setGeometry(QtCore.QRect(240, 30, 75, 23))
self.okBtn.setObjectName("okBtn")
self.cancelBtn = QtWidgets.QPushButton(fromMemoryDialog)
self.cancelBtn.setGeometry(QtCore.QRect(240, 70, 75, 23))
self.cancelBtn.setObjectName("cancelBtn")
self.clearBtn = QtWidgets.QPushButton(fromMemoryDialog)
self.clearBtn.setGeometry(QtCore.QRect(240, 110, 75, 23))
self.clearBtn.setObjectName("clearBtn")
self.dataText = QtWidgets.QPlainTextEdit(fromMemoryDialog)
self.dataText.setGeometry(QtCore.QRect(20, 20, 201, 280))
self.dataText.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.dataText.setObjectName("dataText")
self.runnerDataFrame = QtWidgets.QFrame(fromMemoryDialog)
self.runnerDataFrame.setGeometry(QtCore.QRect(10, 10, 221, 301))
self.runnerDataFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.runnerDataFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.runnerDataFrame.setObjectName("runnerDataFrame")
self.runnerNrDataText = QtWidgets.QPlainTextEdit(self.runnerDataFrame)
self.runnerNrDataText.setGeometry(QtCore.QRect(10, 10, 101, 280))
self.runnerNrDataText.setTabChangesFocus(True)
self.runnerNrDataText.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.runnerNrDataText.setObjectName("runnerNrDataText")
self.runnerTimeDataText = QtWidgets.QPlainTextEdit(self.runnerDataFrame)
self.runnerTimeDataText.setGeometry(QtCore.QRect(110, 10, 101, 280))
self.runnerTimeDataText.setTabChangesFocus(True)
self.runnerTimeDataText.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.runnerTimeDataText.setObjectName("runnerTimeDataText")
self.inputMethodToggle = QtWidgets.QCheckBox(fromMemoryDialog)
self.inputMethodToggle.setGeometry(QtCore.QRect(240, 150, 101, 17))
self.inputMethodToggle.setObjectName("inputMethodToggle")
self.retranslateUi(fromMemoryDialog)
QtCore.QMetaObject.connectSlotsByName(fromMemoryDialog)
def retranslateUi(self, fromMemoryDialog):
_translate = QtCore.QCoreApplication.translate
fromMemoryDialog.setWindowTitle(_translate("fromMemoryDialog", "Įkelti duomenis"))
self.okBtn.setText(_translate("fromMemoryDialog", "Gerai"))
self.cancelBtn.setText(_translate("fromMemoryDialog", "Atšaukti"))
self.clearBtn.setText(_translate("fromMemoryDialog", "Valyti"))
self.dataText.setPlaceholderText(_translate("fromMemoryDialog", "Dalyvio nr. ir laikai"))
self.runnerNrDataText.setPlaceholderText(_translate("fromMemoryDialog", "Dalyvio nr."))
self.runnerTimeDataText.setPlaceholderText(_translate("fromMemoryDialog", "Laikai"))
self.inputMethodToggle.setText(_translate("fromMemoryDialog", "Bendras įvedimas"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
fromMemoryDialog = QtWidgets.QDialog()
ui = Ui_fromMemoryDialog()
ui.setupUi(fromMemoryDialog)
fromMemoryDialog.show()
sys.exit(app.exec_())
| python |
import random
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Person:
def __init__(self, name, hp, mp, atk, df, magic, items,type):
self.maxhp = hp
self.name = name
self.hp = hp
self.maxmp = mp
self.mp = mp
self.atkl = atk - 10
self.atkh = atk + 10
self.df = df
self.magic = magic
self.items = items
self.type = type
self.action = ["Attack", "Magic", "Items"]
def generate_damage(self):
return random.randrange(self.atkl,self.atkh)
def update_dmg(self,list):
type = list[0].type
for i in list:
if i.get_hp() == 0:
list.remove(i)
print(i.name, " defeated")
if(len(list) < 1):
if(type == "e"):
print("You Won")
else:
print("Enemy Won")
return False
return list
def take_damage(self,dmg):
self.hp -= dmg
if self.hp < 0:
self.hp = 0
return self.hp
def get_hp(self):
return self.hp
def get_maxhp(self):
return self.maxhp
def get_mp(self):
return self.mp
def get_maxmp(self):
return self.maxmp
def reduce_mp(self,cost):
self.mp -= cost
def heal(self,dmg):
if self.hp + dmg > self.maxhp:
self.hp = self.maxhp
else:
self.hp += dmg
def choose_enemy_spell(self):
magic_choice = random.randrange(0,len(self.magic))
spell = self.magic[magic_choice]
magic_dmg = self.generate_damage()
pct = (self.hp/self.maxhp)*100
if self.mp < spell.cost or spell.type == "White" and pct > 50:
self.choose_enemy_spell()
return spell, magic_dmg
def choose_action(self):
print("\n "+self.name+"'s turn")
print(" Actions: ")
i = 1
for item in self.action:
print(" " + str(i)+ ".", item)
i += 1
def choose_magic(self):
print(" Magics: ")
i = 1
for spell in self.magic:
print(" " + str(i)+ ".", spell.name, "(cost:", str(spell.cost) + ")")
i += 1
def choose_item(self):
print(" Items: ")
i = 1
for item in self.items:
print(" " + str(i)+ ".", item["item"].name, ":", item["item"].description, " (x" + str(item["quantity"])+")")
i += 1
def choose_target(self,enemies):
print(" Enimes: ")
i=1
for enemy in enemies:
print(" " + str(i)+ ".", enemy.name)
i += 1
choice = int(input("Choose Enemy: ")) -1
return choice
def get_enemy_stat(self):
hp_bar = "█"*int((self.hp/self.maxhp)*100 / 2) + " "*(50-len(str("█"*int((self.hp/self.maxhp)*100 / 2))))
hp_string = " "*(11-len(str(self.hp) + "/" + str(self.maxhp))) + str(self.hp) + "/" + str(self.maxhp)
print(" "+ 50*"_")
print(self.name+":"+ (16-len(self.name))*" ", hp_string, "|" + hp_bar + "|")
def get_stat(self):
hp_bar = "█"*int((self.hp/self.maxhp)*100 / 4) + " "*(25-len(str("█"*int((self.hp/self.maxhp)*100 / 4))))
mp_bar = "█"*int((self.mp/self.maxmp)*100 / 10) + " "*(10-len(str("█"*int((self.mp/self.maxmp)*100 / 10))))
hp_string = " "*(11-len(str(self.hp) + "/" + str(self.maxhp))) + str(self.hp) + "/" + str(self.maxhp)
mp_string = " "*(9-len(str(self.mp) + "/" + str(self.maxmp))) + str(self.mp) + "/" + str(self.maxmp)
print(" _________________________ __________")
print(self.name+":"+ (16-len(self.name))*" ", hp_string, "|" + hp_bar + "| ", mp_string, "|" + mp_bar + "|") | python |
import torch
import torch.utils.data as data
import os
import pickle
import numpy as np
from data_utils import Vocabulary
from data_utils import load_data_and_labels_klp, load_data_and_labels_exo
from eunjeon import Mecab
NER_idx_dic = {'<unk>': 0, 'B-PS_PROF': 1, 'B-PS_ENT': 2, 'B-PS_POL': 3, 'B-PS_NAME': 4,
'B-AF_REC': 5, 'B-AF_WARES': 6, 'B-AF_ITEM': 7, 'B-AF_SERVICE': 8, 'B-AF_OTHS': 9,
'B-OG_PRF': 10, 'B-OG_PRNF': 11, 'B-OG_PBF': 12, 'B-OG_PBNF': 13,
'B-LC_CNT': 14, 'B-LC_PLA': 15, 'B-LC_ADD': 16, 'B-LC_OTHS': 17,
'B-CV_TECH': 18, 'B-CV_LAWS': 19, 'B-EV_LT': 20, 'B-EV_ST': 21,
'B-GR_PLOR': 22, 'B-GR_PLCI': 23, 'B-TM_FLUC': 24, 'B-TM_ECOFIN': 25, 'B-TM_FUNC': 26,
'B-TM_CURR': 27, 'B-TM_OTHS': 28, 'B-PD_PD': 29, 'B-TI_TIME': 30,
'B-NUM_PRICE': 31, 'B-NUM_PERC': 32, 'B-NUM_OTHS': 33, 'I-PS_PROF': 34,
'I-PS_ENT': 35, 'I-PS_POL': 36, 'I-PS_NAME': 37, 'I-AF_REC': 38,
'I-AF_WARES': 39, 'I-AF_ITEM': 40, 'I-AF_SERVICE': 41, 'I-AF_OTHS': 42, 'I-OG_PRF': 43,
'I-OG_PRNF': 44, 'I-OG_PBF': 45, 'I-OG_PBNF': 46,
'I-LC_CNT': 47, 'I-LC_PLA': 48, 'I-LC_ADD': 49, 'I-LC_OTHS': 50, 'I-CV_TECH': 51, 'I-CV_LAWS': 52,
'I-EV_LT': 53, 'I-EV_ST': 54,
'I-GR_PLOR': 55, 'I-GR_PLCI': 56, 'I-TM_FLUC': 57, 'I-TM_ECOFIN': 58, 'I-TM_FUNC': 59,
'I-TM_CURR': 60, 'I-TM_OTHS': 61, 'I-PD_PD': 62,
'I-TI_TIME': 63, 'I-NUM_PRICE': 64, 'I-NUM_PERC': 65, 'I-NUM_OTHS': 66, 'O': 67}
class DocumentDataset (data.Dataset):
""""""
def __init__(self, vocab, char_vocab, pos_vocab, lex_dict, x_text, x_split, x_pos, labels):
"""
:param vocab:
"""
self.vocab = vocab
self.char_vocab = char_vocab
self.pos_vocab = pos_vocab
self.lex_dict = lex_dict
self.x_text = x_text
self.x_split = x_split
self.x_pos = x_pos
self.labels = labels
def __getitem__(self, index):
"""Returns 'one' data pair """
x_text_item = self.x_text[index]
x_split_item = self.x_split[index]
x_pos_item = self.x_pos[index]
label_item = self.labels[index]
x_text_char_item = []
for x_word in x_text_item:
x_char_item = []
for x_char in x_word:
x_char_item.append(x_char)
x_text_char_item.append(x_char_item)
x_idx_item = prepare_sequence(x_text_item, self.vocab.word2idx)
x_idx_char_item = prepare_char_sequence(x_text_char_item, self.char_vocab.word2idx)
x_pos_item = prepare_sequence(x_pos_item, self.pos_vocab.word2idx)
x_lex_item = prepare_lex_sequence(x_text_item, self.lex_dict)
label = torch.LongTensor(label_item)
# print("label")
# print(label)
# print(type(label))
return x_text_item, x_split_item, x_idx_item, x_idx_char_item, x_pos_item, x_lex_item, label
def __len__(self):
return len(self.x_text)
def prepare_sequence(seq, word_to_idx):
idxs = list()
# idxs.append(word_to_idx['<start>'])
for word in seq:
if word not in word_to_idx:
idxs.append(word_to_idx['<unk>'])
else:
idxs.append(word_to_idx[word])
# print(word_to_idx[word])
# idxs.append(word_to_idx['<eos>'])
return idxs
def prepare_char_sequence(seq, char_to_idx):
char_idxs = list()
# idxs.append(word_to_idx['<start>'])
for word in seq:
idxs = list()
for char in word:
if char not in char_to_idx:
idxs.append(char_to_idx['<unk>'])
else:
idxs.append(char_to_idx[char])
char_idxs.append(idxs)
# print(word_to_idx[word])
# idxs.append(word_to_idx['<eos>'])
return char_idxs
def prepare_lex_sequence(seq, lex_to_ner_list):
lex_idxs = list()
# idxs.append(word_to_idx['<start>'])
for lexicon in seq:
if lexicon not in lex_to_ner_list:
lex_idxs.append([lex_to_ner_list['<unk>']])
else:
lex_idxs.append(lex_to_ner_list[lexicon])
# print(word_to_idx[word])
# idxs.append(word_to_idx['<eos>'])
return lex_idxs
def collate_fn(data):
"""Creates mini-batch tensor"""
data.sort(key=lambda x: len(x[0]), reverse=True)
x_text_batch, x_split_batch, x_idx_batch, x_idx_char_batch, x_pos_batch, x_lex_batch, labels = zip(*data)
lengths = [len(label) for label in labels]
targets = torch.zeros(len(labels), max(lengths), 8).long()
for i, label in enumerate(labels):
end = lengths[i]
targets[i, :end] = label[:end]
max_word_len = int(np.amax([len(word_tokens) for word_tokens in x_idx_batch])) # ToDo: usually, np.mean can be applied
batch_size = len(x_idx_batch)
batch_words_len = []
batch_words_len = [len(word_tokens) for word_tokens in x_idx_batch]
batch_words_len = np.array(batch_words_len)
# Padding procedure (word)
padded_word_tokens_matrix = np.zeros((batch_size, max_word_len), dtype=np.int64)
for i in range(padded_word_tokens_matrix.shape[0]):
for j in range(padded_word_tokens_matrix.shape[1]):
try:
padded_word_tokens_matrix[i, j] = x_idx_batch[i][j]
except IndexError:
pass
max_char_len = int(np.amax([len(char_tokens) for word_tokens in x_idx_char_batch for char_tokens in word_tokens]))
if max_char_len < 5: # size of maximum filter of CNN
max_char_len = 5
# Padding procedure (char)
padded_char_tokens_matrix = np.zeros((batch_size, max_word_len, max_char_len), dtype=np.int64)
for i in range(padded_char_tokens_matrix.shape[0]):
for j in range(padded_char_tokens_matrix.shape[1]):
for k in range(padded_char_tokens_matrix.shape[1]):
try:
padded_char_tokens_matrix[i, j, k] = x_idx_char_batch[i][j][k]
except IndexError:
pass
# Padding procedure (pos)
padded_pos_tokens_matrix = np.zeros((batch_size, max_word_len), dtype=np.int64)
for i in range(padded_pos_tokens_matrix.shape[0]):
for j in range(padded_pos_tokens_matrix.shape[1]):
try:
padded_pos_tokens_matrix[i, j] = x_pos_batch[i][j]
except IndexError:
pass
# Padding procedure (lex)
padded_lex_tokens_matrix = np.zeros((batch_size, max_word_len, len(NER_idx_dic)))
for i in range(padded_lex_tokens_matrix.shape[0]):
for j in range(padded_lex_tokens_matrix.shape[1]):
for k in range(padded_lex_tokens_matrix.shape[2]):
try:
for x_lex in x_lex_batch[i][j]:
k = NER_idx_dic[x_lex]
padded_lex_tokens_matrix[i, j, k] = 1
except IndexError:
pass
padded_word_tokens_matrix = torch.from_numpy(padded_word_tokens_matrix)
padded_char_tokens_matrix = torch.from_numpy(padded_char_tokens_matrix)
padded_pos_tokens_matrix = torch.from_numpy(padded_pos_tokens_matrix)
padded_lex_tokens_matrix = torch.from_numpy(padded_lex_tokens_matrix).float()
return x_text_batch, x_split_batch, padded_word_tokens_matrix, padded_char_tokens_matrix, padded_pos_tokens_matrix, padded_lex_tokens_matrix, targets, batch_words_len
def get_loader(data_file_dir, vocab, char_vocab, pos_vocab, lex_dict, batch_size, shuffle, num_workers, dataset='klp'):
""""""
if dataset == 'klp':
x_list, x_pos_list, x_split_list, y_list = load_data_and_labels_klp(data_file_dir=data_file_dir)
y_list = np.array(y_list)
elif dataset == 'exo':
x_list, x_pos_list, x_split_list, y_list = load_data_and_labels_exo(data_file_dir='data_in/EXOBRAIN_NE_CORPUS_10000.txt')
y_list = np.array(y_list)
elif dataset == 'both':
x_list, x_pos_list, x_split_list, y_list = load_data_and_labels_klp(data_file_dir=data_file_dir)
x_list_2, x_pos_list_2, x_split_list_2, y_list_2 = load_data_and_labels_exo(data_file_dir='data_in/EXOBRAIN_NE_CORPUS_10000.txt')
x_list = x_list + x_list_2
x_pos_list = x_pos_list + x_pos_list_2
x_split_list = x_split_list + x_split_list_2
y_list = y_list + y_list_2
y_list = np.array(y_list)
print("len(x_list):",len(x_list))
print("len(y_list):",len(y_list))
document = DocumentDataset(vocab=vocab,
char_vocab=char_vocab,
pos_vocab=pos_vocab,
lex_dict=lex_dict,
x_text=x_list,
x_split=x_split_list,
x_pos=x_pos_list,
labels=y_list)
data_loader = torch.utils.data.DataLoader(dataset=document,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn)
return data_loader | python |
"""
Test file to test RetrieveMovie.py
"""
from Product.Database.DatabaseManager.Retrieve.RetrieveMovie import RetrieveMovie
from Product.Database.DBConn import create_session
from Product.Database.DBConn import Movie
def test_retrieve_movie():
"""
Author: John Andree Lidquist
Date: 2017-11-16
Last Updated:
Purpose: Assert that a movie, or all movies, are retrieved correctly
"""
# PRE-CONDITIONS
movie_id = -1
movie_title = "dummy"
movie_year = 1111
# We create a session and add a dummy movie that we can later retrieve
session = create_session()
dummy_movie = Movie(id=movie_id, title=movie_title, year=movie_year)
session.add(dummy_movie)
session.commit() # We need to close the session, else we get an error when trying to delete it
session.close()
# EXPECTED OUTPUT
expected_id = movie_id
expected_title = movie_title
expected_year = movie_year
# OBSERVED OUTPUT
# We call the method to be tested to get 1) The movie we added above, and 2) All the movies
# which is done by not setting the parameter "movie_id"
retrieve_movie = RetrieveMovie()
observed_one_movie = retrieve_movie.retrieve_movie(movie_id=movie_id)
observed_all_movies = retrieve_movie.retrieve_movie()
# After adding the dummy movie we remove them again.
session.delete(observed_one_movie)
session.commit()
session.close()
assert observed_one_movie
assert observed_one_movie.id == expected_id
assert observed_one_movie.title == expected_title
assert observed_one_movie.year == expected_year
assert observed_all_movies
| python |
import torch
import shutil
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def rotation(inputs):
batch = inputs.shape[0]
target = torch.Tensor(np.random.permutation([0, 1, 2, 3] * (int(batch / 4) + 1)), device=inputs.device)[:batch]
target = target.long()
image = torch.zeros_like(inputs)
image.copy_(inputs)
for i in range(batch):
image[i, :, :, :] = torch.rot90(inputs[i, :, :, :], target[i], [1, 2])
return image, target
def cosine_annealing(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max - lr_min) * 0.5 * (1 + np.cos(step / total_steps * np.pi))
def adjust_learning_rate(optimizer, epoch, args):
epoch = epoch + 1
if epoch <= 5:
lr = args.lr * epoch / 5
elif epoch > 160:
lr = args.lr * 0.01
elif epoch > 180:
lr = args.lr * 0.0001
else:
lr = args.lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, indices=None, num_samples=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = [0] * len(np.unique(dataset.targets))
for idx in self.indices:
label = self._get_label(dataset, idx)
label_to_count[label] += 1
beta = 0.9999
effective_num = 1.0 - np.power(beta, label_to_count)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
# weight for each sample
weights = [per_cls_weights[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
return dataset.targets[idx]
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, replacement=True).tolist())
def __len__(self):
return self.num_samples
def calc_confusion_mat(val_loader, model, args, save_path):
model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(input)
_, pred = torch.max(output, 1)
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
cf = confusion_matrix(all_targets, all_preds).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
print('Class Accuracy : ')
print(cls_acc)
classes = [str(x) for x in args.cls_num_list]
plot_confusion_matrix(all_targets, all_preds, classes, normalize=True, title=args.confusion_title)
plt.savefig(os.path.join(save_path, 'confusion_matrix.pdf'))
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
classes = [str(i) for i in range(10)]
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes)
# Rotate the tick labels and set their alignment.
# plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# plt.title(title, fontsize=18)
plt.xlabel('Predicted label', fontsize=17)
plt.ylabel('True label', fontsize=17)
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
norm = 1000 if normalize else 1
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j] / norm, fmt),
ha="center", va="center",
color="black") # color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
return ax
def prepare_folders(args):
folders_util = [args.root_log, args.root_model,
os.path.join(args.root_log, args.store_name),
os.path.join(args.root_model, args.store_name)]
for folder in folders_util:
if not os.path.exists(folder):
print(f'Creating folder: {folder}')
os.mkdir(folder)
def save_checkpoint(args, state, is_best):
filename = f'{args.root_model}/{args.store_name}/ckpt.pth.tar'
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, filename.replace('pth.tar', 'best.pth.tar'))
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
| python |
# -*- coding: utf-8 -*-
"""These test the utils.py functions."""
from __future__ import unicode_literals
import pytest
from hypothesis import given
from hypothesis.strategies import binary, floats, integers, lists, text
from natsort.compat.py23 import PY_VERSION, py23_str
from natsort.utils import natsort_key
if PY_VERSION >= 3:
long = int
def str_func(x):
if isinstance(x, py23_str):
return x
else:
raise TypeError("Not a str!")
def fail(_):
raise AssertionError("This should never be reached!")
@given(floats(allow_nan=False) | integers())
def test_natsort_key_with_numeric_input_takes_number_path(x):
assert natsort_key(x, None, str_func, fail, lambda y: y) is x
@pytest.mark.skipif(PY_VERSION < 3, reason="only valid on python3")
@given(binary().filter(bool))
def test_natsort_key_with_bytes_input_takes_bytes_path(x):
assert natsort_key(x, None, str_func, lambda y: y, fail) is x
@given(text())
def test_natsort_key_with_text_input_takes_string_path(x):
assert natsort_key(x, None, str_func, fail, fail) is x
@given(lists(elements=text(), min_size=1, max_size=10))
def test_natsort_key_with_nested_input_takes_nested_path(x):
assert natsort_key(x, None, str_func, fail, fail) == tuple(x)
@given(text())
def test_natsort_key_with_key_argument_applies_key_before_processing(x):
assert natsort_key(x, len, str_func, fail, lambda y: y) == len(x)
| python |
from ..abstract import ErdReadOnlyConverter
from ..primitives import *
from gehomesdk.erd.values.fridge import FridgeIceBucketStatus, ErdFullNotFull
class FridgeIceBucketStatusConverter(ErdReadOnlyConverter[FridgeIceBucketStatus]):
def erd_decode(self, value: str) -> FridgeIceBucketStatus:
"""Decode Ice bucket status"""
if not value:
n = 0
else:
n = erd_decode_int(value)
is_present_ff = bool(n & 1)
is_present_fz = bool(n & 2)
state_full_ff = ErdFullNotFull.FULL if n & 4 else ErdFullNotFull.NOT_FULL
state_full_fz = ErdFullNotFull.FULL if n & 8 else ErdFullNotFull.NOT_FULL
if not is_present_ff:
state_full_ff = ErdFullNotFull.NA
if not is_present_fz:
state_full_fz = ErdFullNotFull.NA
if not (is_present_ff or is_present_ff):
# No ice buckets at all
total_status = ErdFullNotFull.NA
elif (state_full_ff == ErdFullNotFull.NOT_FULL) or (state_full_fz == ErdFullNotFull.NOT_FULL):
# At least one bucket is not full
total_status = ErdFullNotFull.NOT_FULL
else:
total_status = ErdFullNotFull.FULL
ice_status = FridgeIceBucketStatus(
state_full_fridge=state_full_ff,
state_full_freezer=state_full_fz,
is_present_fridge=is_present_ff,
is_present_freezer=is_present_fz,
total_status=total_status,
)
return ice_status
| python |
import datetime
import unittest
import unittest.mock
from conflowgen.api.container_flow_generation_manager import ContainerFlowGenerationManager
from conflowgen.application.models.container_flow_generation_properties import ContainerFlowGenerationProperties
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.distribution_seeders import mode_of_transport_distribution_seeder
from conflowgen.domain_models.large_vehicle_schedule import Schedule
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestContainerFlowGenerationManager(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
sqlite_db = setup_sqlite_in_memory_db()
sqlite_db.create_tables([
ContainerFlowGenerationProperties,
ModeOfTransportDistribution,
Schedule
])
mode_of_transport_distribution_seeder.seed()
self.container_flow_generation_manager = ContainerFlowGenerationManager()
def test_generate_with_overwrite(self):
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'generate',
return_value=None) as mock_method:
self.container_flow_generation_manager.generate(overwrite=True)
mock_method.assert_called_once()
def test_generate_without_overwrite_and_no_previous_data(self):
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'generate',
return_value=None) as mock_generate, \
unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'container_flow_data_exists',
return_value=False) as mock_check:
self.container_flow_generation_manager.generate(overwrite=False)
mock_check.assert_called_once()
mock_generate.assert_called_once()
def test_generate_without_overwrite_and_some_previous_data(self):
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'generate',
return_value=None) as mock_generate, \
unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'container_flow_data_exists',
return_value=True) as mock_check:
self.container_flow_generation_manager.generate(overwrite=False)
mock_check.assert_called_once()
mock_generate.assert_not_called()
def test_get_properties(self):
class MockedProperties:
name = "my test data"
start_date = datetime.date(2030, 1, 1)
end_date = datetime.date(2030, 12, 31)
transportation_buffer = 0.2
minimum_dwell_time_of_import_containers_in_hours = 3
minimum_dwell_time_of_export_containers_in_hours = 4
minimum_dwell_time_of_transshipment_containers_in_hours = 5
maximum_dwell_time_of_import_containers_in_hours = 40
maximum_dwell_time_of_export_containers_in_hours = 50
maximum_dwell_time_of_transshipment_containers_in_hours = 60
dict_properties = {
'name': "my test data",
'start_date': datetime.date(2030, 1, 1),
'end_date': datetime.date(2030, 12, 31),
'transportation_buffer': 0.2,
'minimum_dwell_time_of_import_containers_in_hours': 3,
'minimum_dwell_time_of_export_containers_in_hours': 4,
'minimum_dwell_time_of_transshipment_containers_in_hours': 5,
'maximum_dwell_time_of_import_containers_in_hours': 40,
'maximum_dwell_time_of_export_containers_in_hours': 50,
'maximum_dwell_time_of_transshipment_containers_in_hours': 60
}
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_properties_repository,
'get_container_flow_generation_properties',
return_value=MockedProperties) as mock_method:
retrieved_properties = self.container_flow_generation_manager.get_properties()
mock_method.assert_called_once()
self.assertDictEqual(dict_properties, retrieved_properties)
def test_set_properties(self):
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_properties_repository,
'set_container_flow_generation_properties',
return_value=None) as mock_method:
self.container_flow_generation_manager.set_properties(
datetime.datetime.now().date(), datetime.datetime.now().date()
)
properties = ContainerFlowGenerationProperties.get()
mock_method.assert_called_once_with(properties)
def test_container_flow_data_exists(self):
with unittest.mock.patch.object(
self.container_flow_generation_manager.container_flow_generation_service,
'container_flow_data_exists',
return_value=True) as mock_method:
response = self.container_flow_generation_manager.container_flow_data_exists()
mock_method.assert_called_once()
self.assertTrue(response)
| python |
from polecat.rest.schema_builder import RestSchemaBuilder
def test_schema_builder():
schema = RestSchemaBuilder().build()
assert len(schema.routes) > 0
| python |
from PIL import Image
import matplotlib.pyplot as plt
# Log images
def log_input_image(x, opts):
return tensor2im(x)
def tensor2im(var):
# var shape: (3, H, W)
var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy()
var = ((var + 1) / 2)
var[var < 0] = 0
var[var > 1] = 1
var = var * 255
return Image.fromarray(var.astype('uint8'))
def vis_faces(log_hooks):
display_count = len(log_hooks)
fig = plt.figure(figsize=(8, 4 * display_count))
gs = fig.add_gridspec(display_count, 3)
for i in range(display_count):
hooks_dict = log_hooks[i]
fig.add_subplot(gs[i, 0])
if 'diff_input' in hooks_dict:
vis_faces_with_id(hooks_dict, fig, gs, i)
else:
vis_faces_no_id(hooks_dict, fig, gs, i)
plt.tight_layout()
return fig
def vis_faces_with_id(hooks_dict, fig, gs, i):
plt.imshow(hooks_dict['input_face'])
plt.title('Input\nOut Sim={:.2f}'.format(float(hooks_dict['diff_input'])))
fig.add_subplot(gs[i, 1])
plt.imshow(hooks_dict['target_face'])
plt.title('Target\nIn={:.2f}, Out={:.2f}'.format(float(hooks_dict['diff_views']),
float(hooks_dict['diff_target'])))
fig.add_subplot(gs[i, 2])
plt.imshow(hooks_dict['output_face'])
plt.title('Output\n Target Sim={:.2f}'.format(float(hooks_dict['diff_target'])))
def vis_faces_no_id(hooks_dict, fig, gs, i):
plt.imshow(hooks_dict['input_face'], cmap="gray")
plt.title('Input')
fig.add_subplot(gs[i, 1])
plt.imshow(hooks_dict['target_face'])
plt.title('Target')
fig.add_subplot(gs[i, 2])
plt.imshow(hooks_dict['output_face'])
plt.title('Output')
| python |
import csv
from argparse import ArgumentParser
import re
parser = ArgumentParser()
parser.add_argument('--input_file', type=str)
parser.add_argument('--output_csv_file', type=str)
parser.add_argument('--option', default='eval', choices=['eval', 'debug'])
args = parser.parse_args()
lang_regex = re.compile('lang=(\w+)')
row_dicts = []
with open(args.input_file, 'r') as f_in:
for line in f_in:
if args.option == 'eval':
fieldnames = ['language', 'em_accuracy', 'bleu_score']
em_regex = re.compile('\"em\":\s(\d+\.\d+)')
bleu_regex = re.compile('"bleu":\s(\d+\.\d+)')
if ('lang' in line):
language = lang_regex.findall(line)[0]
elif ('em' in line) or ('bleu' in line):
em = em_regex.findall(line)[0]
bleu = bleu_regex.findall(line)[0]
row_dicts.append({'language': language, 'em_accuracy': em, 'bleu_score': bleu})
elif args.option == 'debug':
fieldnames = ['language', 'size', 'em_accuracy', 'em_wo_params', 'syntax']
if ('lang' in line):
language = lang_regex.findall(line)[0]
elif 'eval' in line or 'test' in line:
_, _, size, em, em_wo_params, fm, dm, nfm, syntax = map(lambda part: part.strip(), line.split(','))
row_dicts.append({'language': language, 'size': size, 'em_accuracy': float(em)*100, 'em_wo_params': float(em_wo_params)*100, 'syntax': float(syntax)*100})
with open(args.output_csv_file, 'w') as f_out:
csv_writer = csv.DictWriter(f_out, fieldnames)
csv_writer.writeheader()
csv_writer.writerows(row_dicts)
| python |
import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super(DecoderRNN, self).__init__()
self.embed_size = embed_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.num_layers = num_layers
self.embedding = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
#I haven't initiated the initial hidden states, in that case as per documentation, it will be at default 0 initially.
#make captions also the same size as embedded features
embed = self.embedding(captions[:,:-1])
# Stack the features and captions
embedded_input = torch.cat((features.unsqueeze(1), embed), dim=1) # shape :(batch_size, caption length,embed_size)
hidden_op, (h_1, c_1) = self.lstm(embedded_input) #didn't pass any initial hidden states so its automatically zero
output = self.fc(hidden_op)
return output
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
tokens = []
for i in range(max_len):
hidden_output, states = self.lstm(inputs, states)
outputs = self.fc(hidden_output.squeeze(1))
_, predicted = outputs.max(dim=1) # predicted: (1, 1)
tokens.append(predicted.item())
inputs = self.embedding(predicted) # inputs: (1, embed_size)
inputs = inputs.unsqueeze(1) # inputs: (1, 1, embed_size)
return tokens | python |
import unittest
from unittest.mock import patch
import pytest
import Parser.languageInterface as languageInterface
# class Test_LanguageInterface(unittest.TestCase):
# @patch('Parser.languageInterface.LanguageInterface.getSymbols')
# @patch('Parser.languageInterface.LanguageInterface.printParsedData')
# @patch('Parser.languageInterface.LanguageInterface.uploadToApi')
# def test_parseXMLFileWithoutVerbose(self,
# mock_uploadToApi,
# mock_printParsedData,
# mock_getSymbols):
# '''
# it should call getSymbols and uploadToApi but not printParsedData
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.parseXMLFile('filename')
# mock_getSymbols.assert_called_once()
# mock_printParsedData.assert_not_called()
# mock_uploadToApi.assert_called_once()
# @patch('Parser.languageInterface.useful.verbose', True)
# @patch('Parser.languageInterface.LanguageInterface.getSymbols')
# @patch('Parser.languageInterface.LanguageInterface.printParsedData')
# @patch('Parser.languageInterface.LanguageInterface.uploadToApi')
# def test_parseXMLFileWithVerbose(self,
# mock_uploadToApi,
# mock_printParsedData,
# mock_getSymbols):
# '''
# it should call getSymbols, uploadToApi and printParsedData
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.parseXMLFile('filename')
# mock_getSymbols.assert_called_once()
# mock_printParsedData.assert_called_once()
# mock_uploadToApi.assert_called_once()
# @patch('Parser.languageInterface.LanguageInterface.printParsedData')
# @patch('Parser.languageInterface.LanguageInterface.uploadToApi')
# def test_getSymbolsNotImplemented(self,
# mock_uploadToApi,
# mock_printParsedData):
# '''
# it should raise an exception as getSymbols isn't implemented
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# with pytest.raises(Exception) as e:
# assert interface.parseXMLFile('filename')
# assert str(e.value) == 'Not implemented'
# def test_appendToSymbols(self):
# '''
# should append the symbol to the list
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.appendToSymbols('variable', 'symbol')
# self.assertEqual(interface.symbols[0]['symbol_type'], 'variable')
# self.assertEqual(interface.symbols[0]['symbol_list'][0], 'symbol')
# interface.appendToSymbols('variable', 'symbol2')
# self.assertEqual(interface.symbols[0]['symbol_list'][1], 'symbol2')
# @patch('Parser.languageInterface.printingFunctions.printUnions')
# def test_printParsedData(self,
# mock_printUnions):
# '''
# should call the union printing function
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.symbols = [
# {
# 'symbol_type': 'union',
# 'symbol_list': ['symbol']
# }
# ]
# interface.printParsedData()
# mock_printUnions.assert_called_once()
# @patch('Parser.languageInterface.useful.upload', False)
# @patch('Parser.languageInterface.AIClient')
# @patch('Parser.languageInterface.JSONRequestCrafter')
# def test_uploadToApiNoUpload(self,
# mock_JSONRequestCrafter,
# mock_AIClient):
# '''
# it shouldn't call the JsonRequestCrafter function as upload isn't on
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.symbols = [
# {
# 'symbol_type': 'union',
# 'symbol_list': ['symbol']
# }
# ]
# interface.uploadToApi()
# mock_JSONRequestCrafter.assert_not_called()
# @patch('Parser.languageInterface.useful.upload', True)
# @patch('Parser.languageInterface.AIClient')
# @patch('Parser.languageInterface.JSONRequestCrafter')
# def test_uploadToApiUpload(self,
# mock_JSONRequestCrafter,
# mock_AIClient):
# '''
# it should call the JsonRequestCrafter function
# '''
# interface = languageInterface.LanguageInterface('lang', 'lib')
# interface.symbols = [
# {
# 'symbol_type': 'union',
# 'symbol_list': ['symbol']
# }
# ]
# interface.uploadToApi()
# mock_JSONRequestCrafter.assert_called_once()
| python |
import os
import os.path as op
from sklearn.externals import joblib as jl
from glob import glob
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import f_classif, SelectPercentile
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, f1_score
from skbold.postproc import MvpResults
from skbold.utils import ArrayPermuter
from sklearn.externals.joblib import Parallel, delayed
import numpy as np
def run_subject(sub, N_PERMS):
sub_name = op.basename(op.dirname(sub))
out_dir = op.join('RESULTS', 'TRAIN', 'WITHIN_SUBS', sub_name)
print("Processing sub-%s" % sub_name)
mvp = jl.load(sub)
pipe = Pipeline([
('ufs', SelectPercentile(score_func=f_classif, percentile=100)),
('scaler', StandardScaler()),
('permuter', ArrayPermuter()),
('clf', SVC(kernel='linear'))
])
for i in np.arange(N_PERMS):
mvp_results = MvpResults(mvp=mvp, type_model='classification',
n_iter=10, feature_scoring='fwm',
verbose=False, accuracy=accuracy_score,
f1_score=f1_score)
skf = StratifiedKFold(n_splits=10)
for train_idx, test_idx in skf.split(X=mvp.X, y=mvp.y):
X_train, y_train = mvp.X[train_idx], mvp.y[train_idx]
X_test, y_test = mvp.X[test_idx], mvp.y[test_idx]
pipe.fit(X_train, y_train)
pred = pipe.predict(X_test)
mvp_results.update(pipeline=pipe, test_idx=test_idx, y_pred=pred)
mvp_results.compute_scores(maps_to_tstat=False)
tmp_out_dir = op.join(out_dir, 'perm_%i' % (i + 1))
if not op.isdir(tmp_out_dir):
os.makedirs(tmp_out_dir)
mvp_results.write(out_path=tmp_out_dir)
if __name__ == '__main__':
N_PERMS = 1000
subjects = sorted(glob(op.join('MVP', '???', 'mvp_train_nonzero.jl')))
_ = Parallel(n_jobs=6)(delayed(run_subject)(sub, N_PERMS)
for sub in subjects)
| python |
import os
import copy
from util.queryParser import SimpleQueryParser
def gene_imagenet_synset(output_file):
sid2synset = {}
for line in open('visualness_data/words.txt'):
sid, synset = line.strip().split('\t')
sid2synset[sid] = synset
fout = open(output_file, 'w')
for line in open('visualness_data/imagenet.synsetid.txt'):
sid = line.strip()
fout.write(sid + "\t" + sid2synset[sid].lower().replace('-', ' ') + '\n')
fout.close()
def readImageNetSynset():
len2visualsynset = {}
data_file = 'visualness_data/imagenet.sid.synset.txt'
if not os.path.exists(data_file):
gene_imagenet_synset(data_file)
for line in open(data_file):
sid, synsets_data = line.strip().split("\t")
synsets = map(str.strip, synsets_data.strip().split(','))
for synset in synsets:
words = synset.strip().split()
length = len(words)
len2visualsynset.setdefault(length, []).append(" ".join(words))
# print 'length:', len2visualsynset.keys()
new_len2visualsynset = {}
for key in len2visualsynset:
new_len2visualsynset[key] = set(len2visualsynset[key])
return new_len2visualsynset
class VisualDetector:
def __init__(self):
self.len2visualsynset = readImageNetSynset()
self.qp = SimpleQueryParser()
def predict(self, query):
origin_word_list = self.qp.process_list(query)
original_len = len(origin_word_list)
word_list = copy.deepcopy(origin_word_list)
all_len = len(word_list)
valid_len = len(word_list)
current_group = max(self.len2visualsynset.keys())
match_counter = 0
while current_group > 0:
if valid_len == 0:
break
while current_group > valid_len:
current_group -= 1
match_flag = 0
for i in range(0, all_len + 1 - current_group):
pattern = " ".join(word_list[i:i+current_group])
if "#" in pattern:
continue
else:
if pattern in self.len2visualsynset[current_group]:
word_list = word_list[:i] + ['#%d' % current_group] + word_list[i+current_group:]
all_len = all_len - current_group + 1
valid_len = valid_len - current_group
match_counter += current_group
match_flag = 1
break
if match_flag == 0:
current_group -= 1
index = 0
labeled_query = []
for word in word_list:
if word.startswith("#"):
n_words = int(word[1:])
new_word = "[" + " ".join(origin_word_list[index:index+n_words]) + "]"
labeled_query.append(new_word)
index += n_words
else:
labeled_query.append(word)
index += 1
return 0 if match_counter == 0 else 1.0*match_counter/original_len, " ".join(labeled_query)
if __name__ == "__main__":
vd = VisualDetector()
query_list = ["flowers", "soccer ball", "dogs and cat", "tattoo design", "barack obama family", "hot weather girls", "funny", "saying and quote"]
for query in query_list:
# print query
visualness_score, labeled_query = vd.predict(query)
print query, "->", labeled_query, visualness_score, '\n'
| python |
#!/usr/bin/env python3
"""
Count the number of called variants per sample in a VCF file.
"""
import argparse
import collections
import vcf
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"vcf", help="the vcf file to analyze", type=lambda f: vcf.Reader(filename=f)
)
return parser.parse_args()
def main():
args = parse_args()
call_counts = collections.Counter()
hom_alt_counts = collections.Counter()
het_counts = collections.Counter()
for record in filter(lambda r: not r.is_filtered, args.vcf):
for call in filter(lambda s: not s.is_filtered, record.samples):
call_counts[call.sample] += 1
if call.is_variant:
if call.is_het:
het_counts[call.sample] += 1
else:
hom_alt_counts[call.sample] += 1
print("\t".join(["sample", "call_count", "hom_alt_count", "het_count"]))
for sample in call_counts.keys():
print(
"\t".join(
map(
str,
[
sample,
call_counts[sample],
hom_alt_counts[sample],
het_counts[sample],
],
)
)
)
if __name__ == "__main__":
main()
| python |
from django.contrib import admin
from django.db import models
from tinymce.widgets import TinyMCE
from .models import Aviso
from .models import AvisoViewer
from .forms import AvisoFormAdmin
@admin.register(Aviso)
class AvisoAdmin(admin.ModelAdmin):
fields = ['titulo', 'subtitulo', 'data', 'texto', 'autor', 'editado_por']
list_display = ('titulo', 'subtitulo', 'autor', 'data')
search_fields = ('titulo', 'subtitulo', 'autor', 'texto')
readonly_fields = ['autor', 'editado_por', 'data']
formfield_overrides = {
models.TextField: {'widget': TinyMCE()},
}
form = AvisoFormAdmin
date_hierarchy = 'data'
def save_model(self, request, obj, form, change):
if change:
obj.editado_por = request.user
else:
obj.autor = request.user
obj.save()
@admin.register(AvisoViewer)
class AvisoViewerAdmin(admin.ModelAdmin):
fields = ['aviso', 'residente', 'data_visualizado']
list_display = ('aviso', 'residente', 'data_visualizado')
search_fields = ('aviso', 'residente')
autocomplete_fields = ['residente', 'aviso']
date_hierarchy = 'data_visualizado'
| python |
import json
USERS = "../static/user.json"
def read_JSON(filename):
try:
with open(filename, "r") as file_obj:
return json.load(file_obj)
except:
return dict()
def write_JSON(data, filename):
with open(filename, "w+") as file_obj:
json.dump(data, file_obj)
def append_JSON(filename, new_data):
with open(filename, "w+") as file_obj:
try:
old_data = json.load(file_obj)
old_data.update(new_data)
except: #In case there is a .json file but its empty
old_data = new_data
write_JSON(old_data, filename)
| python |
from utils.code_runner import execute_code
import math
def sum_divisors(n):
if n == 1:
return 1
sqrt_n = math.ceil(math.sqrt(n))
divisor = 2
total_sum = 1
while divisor < sqrt_n:
if n % divisor == 0:
total_sum += divisor
total_sum += n // divisor
divisor += 1
# Check for root divisor of square number
if sqrt_n ** 2 == n:
total_sum += sqrt_n
return total_sum
def is_abundant(n):
"""
Abundant: if the sum of its proper divisors is higher than n.
Deficient: if the sum of its proper divisors is less than n.
Perfect: If the sum of its proper divisors exactly equals to n.
"""
if n < 12: # Smallest abundant number is 12.
return False
return sum_divisors(n) > n
def is_sum_of_two_abundant_numbers(n, abundant_numbers):
for abundant in abundant_numbers:
difference = n - abundant
if difference in abundant_numbers:
return True
return False
def problem():
# All abundant number greater than the upper limit
# can be written as the sum of two abundant numbers.
upper_limit = 28123
total_sum = 0
abundant_numbers = []
# Get the sum of all the positive integers
# that cannot be written as the sum of two abundant numbers.
for i in range(0, upper_limit + 1):
# Add i to abundant list if it is abundant
if is_abundant(i):
abundant_numbers.append(i)
# Check if i can be summed up with two abundant numbers.
if not is_sum_of_two_abundant_numbers(i, abundant_numbers):
total_sum += i
return total_sum
if __name__ == '__main__':
execute_code(problem)
| python |
from datadog import initialize, statsd
import random
import time
options = {
'statsd_host':'127.0.0.1',
'statsd_port':8125
}
initialize(**options)
namespace = "testing7"
# statsd.distribution('example_metric.distribution', random.randint(0, 20), tags=["environment:dev"])
statsd.timing("%s.timing"%namespace, random.randint(1, 20), tags=["environment:dev"])
statsd.distribution("%s.distribution"%namespace, 50 + random.randint(1, 20), tags=["environment:dev"])
# time.sleep(5)
# statsd.timing("%s.timing"%namespace, random.randint(1, 20), tags=["environment:dev"])
# statsd.distribution("%s.distribution"%namespace, 50 + random.randint(1, 20), tags=["environment:dev"])
| python |
import numpy as np
import ad_path
import antenna_diversity as ad
import matplotlib.pyplot as plt
import h5py
import typing as t
import time
import os
ad_path.nop()
bits_per_slot = 440
slots_per_frame = 1
give_up_value = 1e-6
# How many bits to aim for at give_up_value
certainty = 20
# Stop early at x number of errors. Make sure to scale together with
# slots_per_frame, as this number number must include several different
# h values.
stop_at_errors = 100000
snr_stop = 50
snr_step = 2.5
branches = 5
crc_fail_penalty = 320 # Payload len
savefile = "diversity_mega.h5"
bit_goal = np.ceil(1/give_up_value) * certainty
max_tries = int(np.ceil(bit_goal / (bits_per_slot * slots_per_frame)))
print(bit_goal, max_tries)
snr_values = np.arange(-10, snr_stop+snr_step, snr_step)
snr_todo = list(range(len(snr_values)))
snr_channels = []
for snr in snr_values:
snr_channels.append(ad.channel.RayleighAWGNChannel(branches, snr))
gfsk = ad.modulation.GFSK()
encoder = ad.encoding.SymbolEncoder(2)
# Keep track of class instances used at the innermost loop
selector_dictionary = {}
def rest(hat_recv: np.ndarray, symbols: np.ndarray, slot) -> t.Tuple[int, int, bool, int]:
hat_symbols = gfsk.demodulate(hat_recv)
hat_data = encoder.decode_msb(hat_symbols)
unpacked = ad.protocols.dect.Full.from_bytes(hat_data)
err, n = ad.common.count_symbol_errors(symbols, hat_symbols)
crc_fail = unpacked.crc_drops_packet()
if crc_fail:
pbes = crc_fail_penalty
else:
pbes, _ = ad.common.count_bit_errors(slot.b_field, unpacked.b_field)
return err, n, crc_fail, pbes
# Must return (errors, total, crc, pbes)
def selection_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, _) \
-> t.Tuple[int, int, bool, int]:
hat_recv, _ = ad.diversity_technique.selection_from_h(recv, h)
return rest(hat_recv, symbols, slot)
def mrc_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, _) \
-> t.Tuple[int, int, bool, int]:
hat_recv = ad.diversity_technique.combining.mrc(recv, h)
return rest(hat_recv, symbols, slot)
def egc_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, _) \
-> t.Tuple[int, int, bool, int]:
hat_recv = ad.diversity_technique.combining.egc(recv)
return rest(hat_recv, symbols, slot)
def crc_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, state_id)\
-> t.Tuple[int, int, bool, int]:
if state_id not in selector_dictionary:
selector = ad.diversity_technique.CRCSelection(len(recv))
selector_dictionary[state_id] = selector
else:
selector = selector_dictionary[state_id]
hat_recv, _ = selector.select(recv)
err, n, crc_fail, pbes = rest(hat_recv, symbols, slot)
selector.report_crc_status(not crc_fail)
return err, n, crc_fail, pbes
def power_and_crc_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, state_id)\
-> t.Tuple[int, int, bool, int]:
crc_fails = []
# loop over branches
for r in recv:
_, _, crc_fail, _ = rest(r, symbols, slot)
crc_fails.append(crc_fail)
answer, index = ad.diversity_technique.selection.selection_from_power_and_crc(recv, crc_fails)
return rest(answer, symbols, slot)
def renedif_recv_h(recv: np.ndarray, h: np.ndarray, symbols: np.ndarray, slot, state_id)\
-> t.Tuple[int, int, bool, int]:
if state_id not in selector_dictionary:
selector = ad.diversity_technique.ReneDif()
selector_dictionary[state_id] = selector
else:
selector = selector_dictionary[state_id]
hat_recv, _ = selector.select(recv)
return rest(hat_recv, symbols, slot)
# Her instilles listen af algorithmer der skal køres
algorithms = [selection_recv_h, mrc_recv_h, crc_recv_h, egc_recv_h, renedif_recv_h, power_and_crc_recv_h]
algo_names = ["Selection", "MRC", "CRC", "EGC", "rene", "power_and_crc"]
# algorithms = [renedif_recv_h, crc_recv_h]
# algo_names = ["rene", "crc"]
# Thing with structure [snr_index][branch][algorithm] = [errors, total, payload_errors, slots, pbes]
data = np.zeros((len(snr_values), branches, len(algorithms), 5))
if os.path.isfile(savefile):
with h5py.File(savefile, "r") as f:
data = f["data"][:]
print("Loaded existing data from file")
def make_frame_array():
frame_array = []
for i in range(slots_per_frame):
data = ad.protocols.dect.Full.with_random_payload()
frame_array.append(data)
return frame_array
run = 0
start = time.time()
while len(snr_todo) > 0:
frame = make_frame_array()
for slot in frame:
symbols = encoder.encode_msb(slot.to_bytes())
signal = gfsk.modulate(symbols)
for i, snr_index in enumerate(snr_todo):
ch = snr_channels[snr_index]
recv, h = ch.run(signal)
done = True
for branch in range(branches):
for ai, algorithm in enumerate(algorithms):
state_id = f"{snr_index}.{branch}.{ai}"
errors, total, _, _, _ = data[snr_index][branch][ai]
prob = errors / total
# print(f"snr_index: {snr_index}, branch: {branch}, snr: {snr_values[snr_index]}, total: {total}, prob: {prob}")
if total > bit_goal or errors > stop_at_errors:
continue
done = False
err, n, crc_fault, pbes = algorithm(recv[:branch+1], h[:branch+1], symbols, slot, state_id)
data[snr_index][branch][ai][0] += err
data[snr_index][branch][ai][1] += n
data[snr_index][branch][ai][2] += int(crc_fault)
data[snr_index][branch][ai][3] += 1
data[snr_index][branch][ai][4] += pbes
ch.frame_sent()
if done:
del snr_todo[i]
run += 1
if run % 10 == 0:
end = time.time()
duration = (end - start) / 10
print(f"Run: {run}, time: {duration}s, last_snr_goal: {total}/{bit_goal}, snr_todo: ({len(snr_todo)}) {snr_values[snr_todo]}")
start = end
print(data)
with h5py.File("diversity_mega.h5", "w") as f:
f.create_dataset("data", data=data)
for i, algo_name in enumerate(algo_names):
# Draw BER over SNR plots
plt.figure()
for branch in range(branches):
probs = np.empty(len(snr_values))
for snr_i, _ in enumerate(snr_values):
errors, total, _, _, _ = data[snr_i][branch][i]
probs[snr_i] = errors / total
plt.title(algo_name)
plt.plot(snr_values, probs, label=f"N = {branch+1}")
plt.xlabel('SNR [dB]')
plt.ylabel('Bit Error Rate')
plt.yscale("log")
plt.legend()
plt.grid(True)
plt.savefig(f"{algo_name}_snrber.pdf")
# Draw payload_error graph
plt.figure()
for branch in range(branches):
probs = np.empty(len(snr_values))
for snr_i, _ in enumerate(snr_values):
_, _, payload_fail, slots, _ = data[snr_i][branch][i]
probs[snr_i] = payload_fail / slots
plt.plot(snr_values, probs, label=f"N = {branch+1}")
plt.xlabel("SNR [dB]")
plt.ylabel("Ratio of packets CRC errors")
plt.legend()
plt.grid(True)
plt.savefig(f"{algo_name}_payload_error.pdf")
# Draw pbes graph
plt.figure()
for branch in range(branches):
probs = np.empty(len(snr_values))
for snr_i, _ in enumerate(snr_values):
_, _, _, slots, pbes = data[snr_i][branch][i]
probs[snr_i] = pbes / slots
plt.plot(snr_values, probs, label=f"N = {branch+1}")
plt.xlabel("SNR [dB]")
plt.ylabel("Payload Bit Error Score")
plt.legend()
plt.grid(True)
plt.savefig(f"{algo_name}_payload_bit_error_score.pdf")
| python |
import uuid
import factory.fuzzy
from dataworkspace.apps.request_access import models
from dataworkspace.tests.factories import UserFactory
class AccessRequestFactory(factory.django.DjangoModelFactory):
requester = factory.SubFactory(UserFactory)
contact_email = factory.LazyAttribute(lambda _: f"test.user+{uuid.uuid4()}@example.com")
reason_for_access = factory.fuzzy.FuzzyText()
class Meta:
model = models.AccessRequest
| python |
# Joey Alexander
# Built by Gautam Mittal (2017)
# Real-time chord detection and improvisation software that uses Fast Fourier Transforms, DSP, and machine learning
import sys
sys.path.append('util')
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from music21 import *
import os, threading, subprocess, numpy as np, atexit, pyaudio, matplotlib.pyplot as plt, chords, peakutils, player
# Set up chord detection variables
global CURRENT_CHORD, CURRENT_SCALE, detection_started
chordFinder = chords.ChordDetector()
chordQualities = chords.qualities
chordRoots = chords.noteNames
# Set up synthesized instrument
instrument = player.Player()
instrument.setBPM(240)
def run():
global CURRENT_SCALE
while True:
instrument.play(CURRENT_SCALE["scale"])
# Given chord symbol return list of 1, 3, 5, 7 scale degrees ("chord tones")
def chordTones(chordSymbol):
return eval(os.popen('./util/chordScale "'+chordSymbol+'"').read())
# Given a chord, find an appropriate scale to use for improvisation
def improvisationScale(chord, symbol):
# Decide on scale type based on common chord-scale conventions
scaleType = scale.DorianScale()
if chord.quality == 1:
scaleType = scale.MajorScale()
elif chord.quality == 3:
scaleType = scale.MixolydianScale()
tones = map(lambda x: x.replace('b', '-'), chordTones(symbol))
scales = scaleType.derive(tones) # Find the scale based on the given tones
allPitches = scales.getPitches() # Get the assosciated scale degrees
allNoteNames = [i.name for i in allPitches] # Turn them into real note names
return {'name': scales.name, 'scale': allNoteNames}
# Record audio in real-time for chord detection
class MicrophoneRecorder(object):
def __init__(self, rate=2000, chunksize=2**12):
self.rate = rate
self.chunksize = chunksize
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paInt16,
channels=1,
rate=self.rate,
input=True,
frames_per_buffer=self.chunksize,
stream_callback=self.new_frame)
self.lock = threading.Lock()
self.stop = False
self.frames = []
atexit.register(self.close)
def new_frame(self, data, frame_count, time_info, status):
data = np.fromstring(data, 'int16')
with self.lock:
self.frames.append(data)
if self.stop:
return None, pyaudio.paComplete
return None, pyaudio.paContinue
def get_frames(self):
with self.lock:
frames = self.frames
self.frames = []
return frames
def start(self):
self.stream.start_stream()
def close(self):
with self.lock:
self.stop = True
self.stream.close()
self.p.terminate()
class MplFigure(object):
def __init__(self, parent):
self.figure = plt.figure(facecolor='white')
self.canvas = FigureCanvas(self.figure)
class LiveFFTWidget(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.initUI()
self.initData()
self.initMplWidget()
def initUI(self):
vbox = QtGui.QVBoxLayout()
self.main_figure = MplFigure(self)
vbox.addWidget(self.main_figure.canvas)
self.setLayout(vbox)
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('Joey Alexander')
self.show()
timer = QtCore.QTimer()
timer.timeout.connect(self.handleNewData)
timer.start(50)
self.timer = timer
def initData(self):
mic = MicrophoneRecorder()
mic.start()
self.mic = mic
self.freq_vect = np.fft.rfftfreq(mic.chunksize,
1./mic.rate)
self.time_vect = np.arange(mic.chunksize, dtype=np.float32) / mic.rate * 1000
def initMplWidget(self):
self.ax_top = self.main_figure.figure.add_subplot(211)
self.ax_top.set_ylim(-32768, 32768)
self.ax_top.set_xlim(0, self.time_vect.max())
self.ax_top.set_xlabel(u'time (ms)', fontsize=6)
self.ax_bottom = self.main_figure.figure.add_subplot(212)
self.ax_bottom.set_ylim(0, 1)
self.ax_bottom.set_xlim(0, self.freq_vect.max())
self.ax_bottom.set_xlabel(u'frequency (Hz)', fontsize=6)
self.line_top, = self.ax_top.plot(self.time_vect,
np.ones_like(self.time_vect))
self.line_bottom, = self.ax_bottom.plot(self.freq_vect,
np.ones_like(self.freq_vect))
# handles the asynchroneously collected sound chunks
def handleNewData(self):
global detection_started, CURRENT_SCALE, CURRENT_CHORD
frames = self.mic.get_frames()
if len(frames) > 0:
current_frame = frames[-1]
# get 12x1 chroma vector with respective energies for each note
chroma = chords.calculateChromagram(self.freq_vect, np.abs(np.fft.rfft(current_frame)))
chordFinder.detectChord(chroma)
chordString = ""
if chordFinder.intervals > 0:
chordString = str(chordRoots[chordFinder.rootNote]) + str(chordQualities[chordFinder.quality]) + str(chordFinder.intervals)
else:
chordString = str(chordRoots[chordFinder.rootNote]) + str(chordQualities[chordFinder.quality])
CURRENT_SCALE = improvisationScale(chordFinder, chordString)
CURRENT_CHORD = {
'chord': chordString,
'root': chordRoots[chordFinder.rootNote],
'quality': chordQualities[chordFinder.quality],
'interval': chordFinder.intervals
}
print CURRENT_CHORD
if detection_started == False:
detection_started = True
t = threading.Thread(target=run).start()
# plots the time signal
self.line_top.set_data(self.time_vect, current_frame)
fft_frame = np.fft.rfft(current_frame)
fft_frame /= np.abs(fft_frame).max()
self.line_bottom.set_data(self.freq_vect, np.abs(fft_frame))
self.main_figure.canvas.draw()
if __name__ == "__main__":
detection_started = False
app = QtGui.QApplication(sys.argv)
window = LiveFFTWidget()
sys.exit(app.exec_())
| python |
# coding=utf-8
#Author: Chion82<[email protected]>
import requests
import urllib
import re
import sys, os
import HTMLParser
import json
from urlparse import urlparse, parse_qs
reload(sys)
sys.setdefaultencoding('utf8')
class PixivHackLib(object):
def __init__(self):
self.__session_id = ''
self.__session = requests.Session()
self.__session.headers.update({'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.125 Safari/537.36'})
self.__keyword = 'kancolle'
self.__min_ratings = 0
self.__max_pics = 10
self.__pic_downloaded_count = 0
self.__download_manga = True
self.__download_big_images = True
self.__author_ratings = []
if not os.path.exists('pixivimages'):
os.makedirs('pixivimages')
@property
def session_id(self):
return self.__session_id
@session_id.setter
def session_id(self, id_str):
self.__session_id = id_str
def config(self, keyword, min_ratings, max_pics, download_manga, download_big_images):
self.__keyword = keyword
self.__min_ratings = min_ratings
self.__max_pics = max_pics
self.__download_manga = download_manga
self.__download_big_images = download_big_images
def crawl(self):
self.__pic_downloaded_count = 0
self.__author_ratings = []
page = 1
while self.__pic_downloaded_count < self.__max_pics :
try:
search_result = self.__get_search_result(page, None)
if (len(search_result)==0 or page>1000):
print('No more result found. ')
break
for link in search_result:
if (self.__pic_downloaded_count >= self.__max_pics):
break
self.__enter_illustration_page(link, 'pixivimages')
page = page + 1
print('************************Moving to next page************************')
except Exception:
print('Crawl error. Skipping page...')
page = page + 1
continue
print('All Done! Saving author info...')
self.__save_author_ratings()
def crawl_by_author(self, author_list, max_pics_per_author):
for author_id in author_list:
print('***********************Crawling by author*************************')
print('author Pixiv ID : ' + author_id)
self.__pic_downloaded_count = 0
page = 1
if not os.path.exists('pixivimages/' + author_id):
os.makedirs('pixivimages/' + author_id)
while self.__pic_downloaded_count < max_pics_per_author:
try:
search_result = self.__get_search_result(page, author_id)
if (len(search_result) == 0):
print('No more result found.')
break
for link in search_result:
if (self.__pic_downloaded_count >= max_pics_per_author):
break
self.__enter_illustration_page(link, 'pixivimages/' + author_id)
page = page + 1
print('************************Moving to next page***************************')
except Exception:
print('Crawl error. Skipping page...')
page = page + 1
continue
print('***********************Moving to next author**************************')
print('All Done!')
def __get_search_result(self, page, author_id):
try:
if (author_id == None):
search_result = self.__session.get('http://www.pixiv.net/search.php?word=' + urllib.quote(self.__keyword) + '&p=' + str(page), cookies={'PHPSESSID': self.__session_id})
else:
search_result = self.__session.get('http://www.pixiv.net/member_illust.php?id=' + author_id + '&type=all&p=' + str(page), cookies={'PHPSESSID': self.__session_id})
except Exception:
print('Connection failure. Retrying...')
return self.__get_search_result(page, author_id)
result_list = re.findall(r'<a href="(/member_illust\.php\?mode=.*?&illust_id=.*?)">', search_result.text)
return ['http://www.pixiv.net'+self.__html_decode(link) for link in result_list if (not '"' in link)]
def __enter_illustration_page(self, url, directory):
print('********************Entering illustration page*********************')
print('Entering ' + url)
try:
page_result = self.__session.get(url, cookies={'PHPSESSID': self.__session_id})
except Exception:
print('Connection failure. Retrying...')
self.__enter_illustration_page(url, directory)
return
re_result_ratings = re.findall(r'<dd class="rated-count">(.*?)</dd>', page_result.text)
ratings = re_result_ratings[0]
pixiv_id = parse_qs(urlparse(url).query)['illust_id'][0]
re_result_author_id = re.findall(r'<a href="/member\.php\?id=(.*?)" class="user-link">', page_result.text)
pixiv_author_id = re_result_author_id[0]
print('pixiv_id=' + pixiv_id)
print('ratings='+ratings)
print('author_id='+pixiv_author_id)
if (int(ratings) < self.__min_ratings):
print('Ratings < ' + str(self.__min_ratings) + ' , Skipping...')
return
self.__increment_author_ratings(pixiv_author_id, int(ratings), pixiv_id)
re_manga_result = re.findall(r'<a href="(member_illust\.php\?mode=manga&illust_id=.*?)"', page_result.text)
re_image_result = re.findall(r'data-src="(.*?)" class="original-image"', page_result.text)
re_big_image_result = re.findall(r'<a href="(member_illust\.php\?mode=big&illust_id=.*?)"', page_result.text)
if (len(re_manga_result) > 0):
if (self.__download_manga == False):
print('Illustration is manga. Skipping...')
return
print('Illustration is manga. Entering manga page.')
self.__enter_manga_page('http://www.pixiv.net/' + self.__html_decode(re_manga_result[0]), pixiv_id, url, directory)
self.__pic_downloaded_count = self.__pic_downloaded_count + 1
elif (len(re_image_result) > 0):
print('Illustration is image. Downloading image...')
self.__pic_downloaded_count = self.__pic_downloaded_count + 1
self.__download_image(self.__html_decode(re_image_result[0]), url, directory)
print('Download completed.')
elif (len(re_big_image_result) > 0):
if (self.__download_big_images == False):
print('Illustration is big-image. Skipping...')
return
print('Illustration mode is big-image. Entering big-image page.')
self.__enter_big_image_page('http://www.pixiv.net/' + self.__html_decode(re_big_image_result[0]), url, directory)
self.__pic_downloaded_count = self.__pic_downloaded_count + 1
else:
print('Illustration mode not supported. Skipping...')
def __enter_big_image_page(self, url, referer, directory):
print('********************Entering big-image page************************')
print('Entering ' + url)
try:
page_result = self.__session.get(url, cookies={'PHPSESSID': self.__session_id}, headers={'Referer':referer})
except Exception:
print('Connection failure. Retrying...')
self.__enter_big_image_page(url, referer, directory)
return
re_big_image_url = re.findall(r'<img src="(.*?)"', page_result.text)
print('Downloading big-image.')
self.__download_image(self.__html_decode(re_big_image_url[0]), url, directory)
print('Download completed.')
def __enter_manga_page(self, url, pixiv_id, referer,directory):
print('********************Entering manga page**************************')
print('Entering ' + url)
if not os.path.exists(directory + '/' + pixiv_id):
os.makedirs(directory + '/' + pixiv_id)
try:
page_result = self.__session.get(url, cookies={'PHPSESSID': self.__session_id}, headers={'Referer':referer})
except Exception:
print('Connection failure. Retrying...')
self.__enter_manga_page(url, pixiv_id, referer,directory)
return
re_manga_page_result = re.findall(r'<a href="(/member_illust\.php\?mode=manga_big.*?)"', page_result.text)
for link in re_manga_page_result:
self.__enter_manga_big_page('http://www.pixiv.net' + self.__html_decode(link), url, directory + '/' + pixiv_id)
def __enter_manga_big_page(self, url, referer, directory):
print('********************Entering manga-big page***************************')
print('Entering ' + url)
try:
page_result = self.__session.get(url, cookies={'PHPSESSID': self.__session_id}, headers={'Referer':referer})
except Exception:
print('Connection failure. Retrying...')
self.__enter_manga_big_page(url, referer, directory)
return
re_image_result = re.findall(r'<img src="(.*?)"', page_result.text)
print('Downloading manga-big image...')
self.__download_image(self.__html_decode(re_image_result[0]), url, directory)
print('Download completed.')
def __increment_author_ratings(self, author_id, increment, pixiv_id):
for author in self.__author_ratings:
if (author['author_id'] == author_id):
if (pixiv_id in author['illust_id']):
return
author['total_ratings'] = author['total_ratings'] + increment
author['illust_id'].append(pixiv_id)
return
self.__author_ratings.append({'author_id':author_id, 'total_ratings':increment, 'illust_id':[pixiv_id]})
def __save_author_ratings(self):
self.__author_ratings = sorted(self.__author_ratings, key=lambda author:author['total_ratings'], reverse=True)
f = open('author_info.json','w+')
f.write(json.dumps(self.__author_ratings))
f.close()
def __html_decode(self, string):
h = HTMLParser.HTMLParser()
return h.unescape(string)
def __download_image(self, url, referer, directory):
try:
download_result = self.__session.get(url, cookies={'PHPSESSID': self.__session_id}, headers={'Referer':referer})
except Exception:
print('Connection failure. Retrying...')
self.__download_image(url, referer, directory)
return
if (download_result.status_code != 200):
print('Download Error')
print(download_result.text)
return
url_parsed_array = url.split('/')
file_name = url_parsed_array[len(url_parsed_array)-1]
with open(directory + '/' + file_name, 'wb+') as f:
for chunk in download_result.iter_content():
f.write(chunk)
f.close()
| python |
# Generated by Django 3.0.2 on 2020-01-20 10:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20200117_1430'),
]
operations = [
migrations.AlterField(
model_name='imagefile',
name='image',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image_files', to='api.Image'),
),
]
| python |
#!/usr/bin/env python
# file_modified.py
# takes input file or string and returns file modified date
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os.path, sys
parent_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(parent_dir)
from util.parse_inputs import parse_inputs
import os.path
import time
# -----------------------------------------------------------------------------
# Variables
# -----------------------------------------------------------------------------
time_format = "%a, %d %b %Y %H:%M:%S"
# -----------------------------------------------------------------------------
# Input should be a list of files or directories
# -----------------------------------------------------------------------------
def file_modified(input_value):
for i in input_value:
if os.path.exists(i):
unix_time = os.path.getmtime(i)
formatted_time = time.strftime(time_format, time.localtime(unix_time))
print(str(i) + '\t' + formatted_time)
else:
print('Unable to find ' + str(i))
if __name__ == "__main__":
input_value = parse_inputs(strip_newline_stdin=True)
if input_value:
file_modified(input_value)
| python |
#!/usr/bin/env python
"""Normalizes ini files."""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R1702
# pylint: disable=R0912
import re
import sys
from collections import defaultdict
class Processor:
"""Process and normalizes an ini file."""
def __init__(self):
self.r: dict[str, dict[str, str]] = defaultdict(dict)
self.heading = re.compile(r"\[(\w+)\]")
self.entry = re.compile(r"(\w+)=(.*)")
self.cur = None
def line(self, line: str):
"""Process a line of an ini file to be normalized."""
if m := self.heading.match(line):
self.cur = m[1]
if m := self.entry.match(line):
if not self.cur:
raise ValueError("Missing section header")
self.r[self.cur][m[1]] = m[2]
def out(self) -> str:
"""Generates normalized ini file."""
sections = []
hdrs = list(self.r.keys())
hdrs.sort()
for hdr in hdrs:
rc = self.r[hdr]
sec = [f"[{hdr}]\n"]
ks = list(rc.keys())
ks.sort()
for k in ks:
sec.append(f"{k}={rc[k]}\n")
sections.append("".join(sec))
return "\n".join(sections)
def main():
"""Main function."""
rep = Processor()
for fname in sys.argv[1:]:
with open(fname, encoding="utf8") as fd:
for line in fd:
rep.line(line)
print(rep.out(), end="")
if __name__ == '__main__':
main()
| python |
import poplib
from email.parser import Parser
email = '[email protected]'
password = 'lrh0000'
pop3_server = 'pop.163.com'
server = poplib.POP3(pop3_server)
print(server.getwelcome().decode('utf8'))
server.user(email)
server.pass_(password)
print('Message: %s. Size: %s' % (server.stat()))
resp, mails, octets = server.list()
# print(mails)
index = len(mails)
resp, lines, octets = server.retr(index)
msg_content = b'\r\n'.join(lines).decode('utf-8')
msg = Parser().parsestr(msg_content)
print(msg)
server.quit()
| python |
def test_dictionary():
"""Dictionary"""
fruits_dictionary = {
'cherry': 'red',
'apple': 'green',
'banana': 'yellow',
}
assert isinstance(fruits_dictionary, dict)
assert fruits_dictionary['apple'] == 'green'
assert fruits_dictionary['banana'] == 'yellow'
assert fruits_dictionary['cherry'] == 'red'
assert 'apple' in fruits_dictionary
assert 'pineapple' not in fruits_dictionary
# Modify
fruits_dictionary['apple'] = 'red'
# Add
fruits_dictionary['pineapple'] = 'yellow'
assert fruits_dictionary['pineapple'] == "yellow"
assert list(fruits_dictionary) == ['cherry', 'apple', 'banana', 'pineapple']
assert sorted(fruits_dictionary) == [
'apple', 'banana', 'cherry', 'pineapple'
]
del fruits_dictionary['pineapple']
assert list(fruits_dictionary) == ['cherry', 'apple', 'banana']
dictionary_via_constructor = dict([('sape', 4139), ('guido', 4127),
('jack', 4098)])
assert dictionary_via_constructor['sape'] == 4139
assert dictionary_via_constructor['guido'] == 4127
assert dictionary_via_constructor['jack'] == 4098
dictionary_via_expression = {x: x**2 for x in (2, 4, 6)}
assert dictionary_via_expression[2] == 4
assert dictionary_via_expression[4] == 16
assert dictionary_via_expression[6] == 36
dictionary_for_string_keys = dict(sape=4139, guido=4127, jack=4098)
assert dictionary_for_string_keys['sape'] == 4139
assert dictionary_for_string_keys['guido'] == 4127
assert dictionary_for_string_keys['jack'] == 4098
| python |
import os, time, logging, configparser, psutil
# Setting
logging.basicConfig(filename='log/app.log', filemode='w',format='[%(levelname)s][%(name)s][%(asctime)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info('Module Loaded')
config = configparser.ConfigParser()
config.read("settings.ini")
filesystem = config['Filesystem']
def _get_name(dirpath):
if str.find(dirpath,'\\') != -1:
if str.find(dirpath,'\\') < len(dirpath)-1:
return dirpath[str.rindex(dirpath,'\\')+1:]
else:
return dirpath
else:
return ''
def _get_parent_path(dirpath):
if str.find(dirpath,'\\') != -1:
if str.find(dirpath,'\\') < len(dirpath)-1:
return dirpath[:str.rindex(dirpath,'\\')]
else:
return dirpath
else:
return ''
def _get_format(filename):
if str.find(filename,'.') != -1:
return filename[str.rindex(filename,'.'):]
else:
return ''
def _get_level(dirpath):
path_list = dirpath.split('\\')
if path_list[1] == '':
level = 1
else:
level = len(path_list)
return level - 1
def _get_measure_index(measure = filesystem['measure']):
if measure in ('b', 'bytes'):
measure_index = 1
elif measure in ('kb', 'KB'):
measure_index = 1000
elif measure.lower() in ('mb', 'MB', 'mb'):
measure_index = 1000000
else:
measure_index = 1
return measure_index
def _get_file_size(path, file, measure = filesystem['measure']):
try:
measure_index = _get_measure_index(measure)
filepath = os.path.join(path, file)
return os.path.getsize(filepath) / measure_index
except Exception as err:
logging.error(f'[Path]: {path} [File]: {file} issue ' + str(err))
def _get_time(path, time_type = 'c'):
try:
if time_type == 'c':
return time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime(os.path.getctime(path)))
if time_type == 'm':
return time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime(os.path.getmtime(path)))
except Exception as err:
logging.error(f'[File]: {path} issue ' + str(err))
def get_folder_info(root_disk, dirpath, dirnames, filenames):
file = {}
file['root'] = root_disk
file['name'] = _get_name(dirpath)
file['path'] = dirpath
file['parent'] = _get_parent_path(dirpath)
file['file_type'] = 'folder'
file['format'] = 'folder'
file['level'] = _get_level(dirpath) - 1
file['dirs_count'] = len(dirnames)
file['files_count'] = len(filenames)
file['size'] = 0
file['measure'] = filesystem['measure']
file['created_at'] = _get_time(dirpath, 'c')
file['updated_at'] = _get_time(dirpath, 'm')
return file
def get_file_info(root_disk, dirpath, filename):
file = {}
file['root'] = root_disk
file['name'] = filename
file['path'] = os.path.join(dirpath, filename)
file['parent'] = dirpath
file['file_type'] = 'file'
file['format'] = _get_format(filename)
file['level'] = _get_level(dirpath) - 1
file['dirs_count'] = 0
file['files_count'] = 0
file['size'] = _get_file_size(dirpath, filename)
file['measure'] = filesystem['measure']
file['created_at'] = _get_time(file['path'], 'c')
file['updated_at'] = _get_time(file['path'], 'm')
return file
def get_total_space(root_disk = 'C:\\', measure = filesystem['measure']):
measure_index = _get_measure_index(measure)
total_info = {}
total_info['root'] = root_disk
total_info['total'] = psutil.disk_usage(root_disk).total / measure_index
total_info['used'] = psutil.disk_usage(root_disk).used / measure_index
total_info['free'] = psutil.disk_usage(root_disk).free / measure_index
return total_info | python |
# Copyright 2016 AC Technologies LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for training translation models and decoding from them.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import re
import numpy as np
import six
import sys
from tensor2tensor.data_generators.problem import problem_hparams_to_features
import tensorflow as tf
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.framework import graph_util
from tensorflow.python.util import compat
# Dependency imports
from tensor2tensor import models # pylint: disable=unused-import
from g2p_seq2seq import g2p_problem
from g2p_seq2seq import g2p_trainer_utils
from tensor2tensor.utils import registry
from tensor2tensor.utils import usr_dir
from tensor2tensor.utils import decoding
from tensor2tensor.utils import trainer_lib
from tensor2tensor.data_generators import text_encoder
from six.moves import input
from six import text_type
EOS = text_encoder.EOS
class G2PModel(object):
"""Grapheme-to-Phoneme translation model class.
"""
def __init__(self, params, train_path="", dev_path="", test_path="",
cleanup=False, p2g_mode=False):
# Point out the current directory with t2t problem specified for g2p task.
usr_dir.import_usr_dir(os.path.dirname(os.path.abspath(__file__)))
self.params = params
self.test_path = test_path
if not os.path.exists(self.params.model_dir):
os.makedirs(self.params.model_dir)
# Register g2p problem.
self.problem = registry._PROBLEMS[self.params.problem_name](
self.params.model_dir, train_path=train_path, dev_path=dev_path,
test_path=test_path, cleanup=cleanup, p2g_mode=p2g_mode)
self.frozen_graph_filename = os.path.join(self.params.model_dir,
"frozen_model.pb")
self.inputs, self.features, self.input_fn = None, None, None
self.mon_sess, self.estimator_spec, self.g2p_gt_map = None, None, None
self.first_ex = False
if train_path:
self.train_preprocess_file_path, self.dev_preprocess_file_path =\
None, None
self.estimator, self.decode_hp, self.hparams =\
self.__prepare_model(train_mode=True)
self.train_preprocess_file_path, self.dev_preprocess_file_path =\
self.problem.generate_preprocess_data()
elif os.path.exists(self.frozen_graph_filename):
self.estimator, self.decode_hp, self.hparams =\
self.__prepare_model()
self.__load_graph()
self.checkpoint_path = tf.train.latest_checkpoint(self.params.model_dir)
else:
self.estimator, self.decode_hp, self.hparams =\
self.__prepare_model()
def __prepare_model(self, train_mode=False):
"""Prepare utilities for decoding."""
hparams = registry.hparams(self.params.hparams_set)
hparams.problem = self.problem
hparams.problem_hparams = self.problem.get_hparams(hparams)
if self.params.hparams:
tf.logging.info("Overriding hparams in %s with %s",
self.params.hparams_set,
self.params.hparams)
hparams = hparams.parse(self.params.hparams)
trainer_run_config = g2p_trainer_utils.create_run_config(hparams,
self.params)
if train_mode:
exp_fn = g2p_trainer_utils.create_experiment_fn(self.params, self.problem)
self.exp = exp_fn(trainer_run_config, hparams)
decode_hp = decoding.decode_hparams(self.params.decode_hparams)
estimator = trainer_lib.create_estimator(
self.params.model_name,
hparams,
trainer_run_config,
decode_hparams=decode_hp,
use_tpu=False)
return estimator, decode_hp, hparams
def __prepare_interactive_model(self):
"""Create monitored session and generator that reads from the terminal and
yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Raises:
ValueError: Could not find a trained model in model_dir.
ValueError: if batch length of predictions are not same.
"""
def input_fn():
"""Input function returning features which is a dictionary of
string feature name to `Tensor` or `SparseTensor`. If it returns a
tuple, first item is extracted as features. Prediction continues until
`input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`)."""
gen_fn = decoding.make_input_fn_from_generator(
self.__interactive_input_fn())
example = gen_fn()
example = decoding._interactive_input_tensor_to_features_dict(
example, self.hparams)
return example
self.res_iter = self.estimator.predict(input_fn)
if os.path.exists(self.frozen_graph_filename):
return
# List of `SessionRunHook` subclass instances. Used for callbacks inside
# the prediction call.
hooks = estimator_lib._check_hooks_type(None)
# Check that model has been trained.
# Path of a specific checkpoint to predict. The latest checkpoint
# in `model_dir` is used
checkpoint_path = estimator_lib.saver.latest_checkpoint(
self.params.model_dir)
if not checkpoint_path:
raise ValueError('Could not find trained model in model_dir: {}.'
.format(self.params.model_dir))
with estimator_lib.ops.Graph().as_default() as graph:
estimator_lib.random_seed.set_random_seed(
self.estimator._config.tf_random_seed)
self.estimator._create_and_assert_global_step(graph)
self.features, input_hooks = self.estimator._get_features_from_input_fn(
input_fn, estimator_lib.model_fn_lib.ModeKeys.PREDICT)
self.estimator_spec = self.estimator._call_model_fn(
self.features, None, estimator_lib.model_fn_lib.ModeKeys.PREDICT,
self.estimator.config)
try:
self.mon_sess = estimator_lib.training.MonitoredSession(
session_creator=estimator_lib.training.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=self.estimator_spec.scaffold,
config=self.estimator._session_config),
hooks=hooks)
except:
# raise StandardError("Invalid model in {}".format(self.params.model_dir))
raise ValueError("Invalid model in {}".format(self.params.model_dir))
def decode_word(self, word):
"""Decode word.
Args:
word: word for decoding.
Returns:
pronunciation: a decoded phonemes sequence for input word.
"""
num_samples = 1
decode_length = 100
vocabulary = self.problem.source_vocab
# This should be longer than the longest input.
const_array_size = 10000
input_ids = vocabulary.encode(word)
input_ids.append(text_encoder.EOS_ID)
self.inputs = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(self.inputs) < const_array_size
self.inputs += [0] * (const_array_size - len(self.inputs))
result = next(self.res_iter)
pronunciations = []
if self.decode_hp.return_beams:
beams = np.split(result["outputs"], self.decode_hp.beam_size, axis=0)
for k, beam in enumerate(beams):
tf.logging.info("BEAM %d:" % k)
beam_string = self.problem.target_vocab.decode(
decoding._save_until_eos(beam, is_image=False))
pronunciations.append(beam_string)
tf.logging.info(beam_string)
else:
if self.decode_hp.identity_output:
tf.logging.info(" ".join(map(str, result["outputs"].flatten())))
else:
res = result["outputs"].flatten()
if text_encoder.EOS_ID in res:
index = list(res).index(text_encoder.EOS_ID)
res = res[0:index]
pronunciations.append(self.problem.target_vocab.decode(res))
return pronunciations
def __interactive_input_fn(self):
num_samples = self.decode_hp.num_samples if self.decode_hp.num_samples > 0\
else 1
decode_length = self.decode_hp.extra_length
input_type = "text"
p_hparams = self.hparams.problem_hparams
has_input = "inputs" in p_hparams.input_modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
features = {
"inputs": np.array(self.inputs).astype(np.int32),
}
for k, v in six.iteritems(problem_hparams_to_features(p_hparams)):
features[k] = np.array(v).astype(np.int32)
yield features
def __run_op(self, sess, decode_op, feed_input):
"""Run tensorflow operation for decoding."""
results = sess.run(decode_op,
feed_dict={"inp_decode:0" : [feed_input]})
return results
def train(self):
"""Run training."""
print('Training started.')
execute_schedule(self.exp, self.params)
def interactive(self):
"""Interactive decoding."""
self.inputs = []
self.__prepare_interactive_model()
if os.path.exists(self.frozen_graph_filename):
with tf.Session(graph=self.graph) as sess:
saver = tf.train.import_meta_graph(self.checkpoint_path + ".meta",
import_scope=None,
clear_devices=True)
saver.restore(sess, self.checkpoint_path)
inp = tf.placeholder(tf.string, name="inp_decode")[0]
decode_op = tf.py_func(self.decode_word, [inp], tf.string)
while True:
word = get_word()
pronunciations = self.__run_op(sess, decode_op, word)
print (" ".join(pronunciations))
else:
while not self.mon_sess.should_stop():
word = get_word()
pronunciations = self.decode_word(word)
print(" ".join(pronunciations))
# To make sure the output buffer always flush at this level
sys.stdout.flush()
def decode(self, output_file_path):
"""Run decoding mode."""
outfile = None
# Output results to a file if given.
if output_file_path:
tf.logging.info("Writing decodes into %s" % output_file_path)
outfile = tf.gfile.Open(output_file_path, "w")
if os.path.exists(self.frozen_graph_filename):
with tf.Session(graph=self.graph) as sess:
inp = tf.placeholder(tf.string, name="inp_decode")[0]
decode_op = tf.py_func(self.__decode_from_file, [inp],
[tf.string, tf.string])
[inputs, decodes] = self.__run_op(sess, decode_op, self.test_path)
else:
inputs, decodes = self.__decode_from_file(self.test_path)
# Output decoding results
for _input, _decode in zip(inputs, decodes):
_input = compat.as_text(_input)
_decode = compat.as_text(_decode)
if output_file_path:
outfile.write("{} {}\n".format(_input, _decode))
else:
print("Raw prediction: {} {}".format(_input, _decode))
def evaluate(self):
"""Run evaluation mode."""
words, pronunciations = [], []
for case in self.problem.generator(self.test_path,
self.problem.source_vocab,
self.problem.target_vocab):
word = self.problem.source_vocab.decode(case["inputs"]).replace(
EOS, "").strip()
pronunciation = self.problem.target_vocab.decode(case["targets"]).replace(
EOS, "").strip()
words.append(word)
pronunciations.append(pronunciation)
self.g2p_gt_map = create_g2p_gt_map(words, pronunciations)
if os.path.exists(self.frozen_graph_filename):
with tf.Session(graph=self.graph) as sess:
inp = tf.placeholder(tf.string, name="inp_decode")[0]
decode_op = tf.py_func(self.calc_errors, [inp],
[tf.int64, tf.int64, tf.int64, tf.int64])
results = self.__run_op(sess, decode_op, self.test_path)
else:
results = self.calc_errors(self.test_path)
word_correct, word_errors, phone_errors, total_ref_phones = results
wer = 100.0 * word_errors / (word_correct + word_errors)
per = 100.0 * phone_errors / total_ref_phones
print("="*80)
print("Total: {} words, {} phones".\
format(word_correct + word_errors, total_ref_phones))
print("Word errors: {} ({:.2f}%)".format(word_errors, wer))
print("Phone errors: {} ({:.2f}%)".format(phone_errors, per))
print("Total word errors: {}".format(word_errors))
print("Total phone errors: {}".format(phone_errors))
print("="*80)
def freeze(self):
"""Freeze pre-trained model."""
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(self.params.model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_folder + "/frozen_model.pb"
# Before exporting our graph, we need to precise what is our output node
# This is how TF decides what part of the Graph he has to keep and what
# part it can dump
# NOTE: this variable is plural, because you can have multiple output nodes
output_node_names = []
hparams = self.params.hparams.split(",")
num_layers = [int(hp.split("=")[1]) for hp in hparams
if hp.startswith("num_hidden_layers")][0]
root_dir = "transformer/parallel_0_4/transformer/transformer/body"
for i in range(num_layers):
output_node_names.append("{}/encoder/layer_{}/self_attention/".format(root_dir, i) +\
"multihead_attention/dot_product_attention/attention_weights")
for i in range(num_layers):
output_node_names.append("{}/decoder/layer_{}/self_attention/".format(root_dir, i) +\
"multihead_attention/dot_product_attention/attention_weights")
output_node_names.append("{}/decoder/layer_{}/encdec_attention/".format(root_dir, i) +\
"multihead_attention/dot_product_attention/attention_weights")
# We clear devices to allow TensorFlow to control on which device it will
# load operations
clear_devices = True
# We import the meta graph and retrieve a Saver
saver = tf.train.import_meta_graph(input_checkpoint + '.meta',
clear_devices=clear_devices)
# We retrieve the protobuf graph definition
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
# We start a session and restore the graph weights
with tf.Session() as sess:
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
input_graph_def, # The graph_def is used to retrieve the nodes
output_node_names, # The output node names are used to select the
#usefull nodes
variable_names_blacklist=['global_step'])
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as output_graph_file:
output_graph_file.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
def __load_graph(self):
"""Load freezed graph."""
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(self.frozen_graph_filename, "rb") as frozen_graph_file:
graph_def = tf.GraphDef()
graph_def.ParseFromString(frozen_graph_file.read())
# Then, we import the graph_def into a new Graph and returns it
with tf.Graph().as_default() as self.graph:
# The name var will prefix every op/nodes in your graph
# Since we load everything in a new graph, this is not needed
tf.import_graph_def(graph_def, name="import")
def __decode_from_file(self, filename):
"""Compute predictions on entries in filename and write them out."""
if not self.decode_hp.batch_size:
self.decode_hp.batch_size = 32
tf.logging.info("decode_hp.batch_size not specified; default=%d" %
self.decode_hp.batch_size)
p_hparams = self.hparams.problem_hparams
inputs_vocab = p_hparams.vocabulary["inputs"]
targets_vocab = p_hparams.vocabulary["targets"]
problem_name = "grapheme_to_phoneme_problem"
tf.logging.info("Performing decoding from a file.")
inputs = _get_inputs(filename)
num_decode_batches = (len(inputs) - 1) // self.decode_hp.batch_size + 1
def input_fn():
"""Function for inputs generator."""
input_gen = _decode_batch_input_fn(
num_decode_batches, inputs, inputs_vocab,
self.decode_hp.batch_size, self.decode_hp.max_input_size)
gen_fn = decoding.make_input_fn_from_generator(input_gen)
example = gen_fn()
return decoding._decode_input_tensor_to_features_dict(example,
self.hparams)
decodes = []
result_iter = self.estimator.predict(input_fn)
try:
for result in result_iter:
if self.decode_hp.return_beams:
decoded_inputs = inputs_vocab.decode(
decoding._save_until_eos(result["inputs"], False))
beam_decodes = []
output_beams = np.split(result["outputs"], self.decode_hp.beam_size,
axis=0)
for k, beam in enumerate(output_beams):
decoded_outputs = targets_vocab.decode(
decoding._save_until_eos(beam, False))
beam_decodes.append(decoded_outputs)
decodes.append(beam_decodes)
else:
decoded_inputs = inputs_vocab.decode(
decoding._save_until_eos(result["inputs"], False))
decoded_outputs = targets_vocab.decode(
decoding._save_until_eos(result["outputs"], False))
decodes.append(decoded_outputs)
except:
# raise StandardError("Invalid model in {}".format(self.params.model_dir))
raise ValueError("Invalid model in {}".format(self.params.model_dir))
return [inputs, decodes]
def calc_errors(self, decode_file_path):
"""Calculate a number of word and phone prediction errors."""
inputs, decodes = self.__decode_from_file(decode_file_path)
word_correct, word_errors, phone_errors = 0, 0, 0
total_ref_phones = 0
word_set = set()
for index, word in enumerate(inputs):
if word in word_set:
continue
word_set.add(word)
# Estimate #phones of the word
ref_phone_count = np.mean([len(ref_str.split(" "))
for ref_str in self.g2p_gt_map[word]])
total_ref_phones += int(ref_phone_count)
if self.decode_hp.return_beams:
beam_correct_found = False
for beam_decode in decodes[index]:
if beam_decode in self.g2p_gt_map[word]:
beam_correct_found = True
break
if beam_correct_found:
word_correct += 1
else:
word_errors += 1
# Estimate phone-level errors
phone_error = phone_errors_for_single_word(decodes[index],
self.g2p_gt_map[word])
phone_errors += phone_error
else:
if decodes[index] in self.g2p_gt_map[word]:
word_correct += 1
else:
word_errors += 1
# Estimate phone-level errors
phone_error = phone_errors_for_single_word([decodes[index]],
self.g2p_gt_map[word])
phone_errors += phone_error
return word_correct, word_errors, phone_errors, total_ref_phones
def phone_errors_for_single_word(predicted_strs, ref_strs):
"""
Given decoded results (depending on beam size) and a list of ref
pronunciations, estimate the phone-level edit distance. Return the min
distance.
"""
phone_error_list = []
for ref_str in ref_strs:
for predicted_str in predicted_strs:
d = phone_edit_distance(predicted_str, ref_str)
phone_error_list.append(d)
return min(phone_error_list)
def phone_edit_distance(predicted_str, ref_str):
"""
Estimate the edit distance between predicted and ref phone sequences.
"""
predicted_list = predicted_str.split(" ")
ref_list = ref_str.split(" ")
m, n = len(predicted_list), len(ref_list)
dp = [[0] * (m+1) for _ in range(n+1)]
dp[0][0] = 0
for i in range(1, m+1):
dp[0][i] = i
for i in range(1, n+1):
dp[i][0] = i
for i in range(1, m+1):
for j in range(1, n+1):
if predicted_list[i-1] == ref_list[j-1]:
dp[j][i] = dp[j-1][i-1]
else:
dp[j][i] = min(dp[j-1][i] + 1, dp[j][i-1] + 1, dp[j-1][i-1] + 1)
return dp[n][m]
def get_word():
"""Get next word in the interactive mode."""
word = ""
try:
word = input("> ")
#if not issubclass(type(word), text_type):
# word = text_type(word, encoding="utf-8", errors="replace")
except EOFError:
pass
if not word:
pass
return word
def create_g2p_gt_map(words, pronunciations):
"""Create grapheme-to-phoneme ground true mapping."""
g2p_gt_map = {}
for word, pronunciation in zip(words, pronunciations):
if word in g2p_gt_map:
g2p_gt_map[word].append(pronunciation)
else:
g2p_gt_map[word] = [pronunciation]
return g2p_gt_map
def _get_inputs(filename, delimiters="\t "):
"""Returning inputs.
Args:
filename: path to file with inputs, 1 per line.
delimiters: str, delimits records in the file.
Returns:
a list of inputs
"""
tf.logging.info("Getting inputs")
delimiters_regex = re.compile("[" + delimiters + "]+")
inputs = []
with tf.gfile.Open(filename) as input_file:
lines = input_file.readlines()
for line in lines:
if set("[" + delimiters + "]+$").intersection(line):
items = re.split(delimiters_regex, line.strip(), maxsplit=1)
inputs.append(items[0])
else:
inputs.append(line.strip())
return inputs
def _decode_batch_input_fn(num_decode_batches, inputs,
vocabulary, batch_size, max_input_size):
"""Decode batch"""
for batch_idx in range(num_decode_batches):
tf.logging.info("Decoding batch %d out of %d" % (batch_idx, num_decode_batches))
batch_length = 0
batch_inputs = []
for _inputs in inputs[batch_idx * batch_size:(batch_idx + 1) * batch_size]:
input_ids = vocabulary.encode(_inputs)
if max_input_size > 0:
# Subtract 1 for the EOS_ID.
input_ids = input_ids[:max_input_size - 1]
input_ids.append(text_encoder.EOS_ID)
batch_inputs.append(input_ids)
if len(input_ids) > batch_length:
batch_length = len(input_ids)
final_batch_inputs = []
for input_ids in batch_inputs:
assert len(input_ids) <= batch_length
encoded_input = input_ids + [0] * (batch_length - len(input_ids))
final_batch_inputs.append(encoded_input)
yield {
"inputs": np.array(final_batch_inputs).astype(np.int32),
"problem_choice": np.array(0).astype(np.int32),
}
def execute_schedule(exp, params):
if not hasattr(exp, params.schedule):
raise ValueError(
"Experiment has no method %s, from --schedule" % params.schedule)
with profile_context(params):
getattr(exp, params.schedule)()
@contextlib.contextmanager
def profile_context(params):
if params.profile:
with tf.contrib.tfprof.ProfileContext("t2tprof",
trace_steps=range(100),
dump_steps=range(100)) as pctx:
opts = tf.profiler.ProfileOptionBuilder.time_and_memory()
pctx.add_auto_profiling("op", opts, range(100))
yield
else:
yield
| python |
import os
from shutil import copy
def prepare_iso_linux(iso_base_dir, rootfs_dir):
# copy isolinux files to the corresponding folder
isolinux_files = ['isolinux.bin', 'isolinux.cfg', 'ldlinux.c32']
for file in isolinux_files:
full_file = '/etc/omni-imager/isolinux/' + file
copy(full_file, iso_base_dir)
# copy linux kernel to the corresponding folder
kernel_dir = rootfs_dir + '/boot/vmlinuz-*'
cmd = ['cp', kernel_dir, iso_base_dir + '/vmlinuz']
os.system(' '.join(cmd))
def make_iso(iso_base, rootfs_dir):
prepare_iso_linux(iso_base, rootfs_dir)
orig_dir = os.getcwd()
os.chdir(iso_base)
cmd = 'mkisofs -R -l -D -o ../openEuler-test.iso -b isolinux.bin -c boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table ./'
os.system(cmd)
os.chdir(orig_dir)
| python |
# Generated by Django 3.1 on 2021-03-02 21:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('waterspout_api', '0007_auto_20201215_1526'),
]
operations = [
migrations.AddField(
model_name='calibratedparameter',
name='price_yield_correction_factor',
field=models.DecimalField(decimal_places=3, default=1, max_digits=6),
),
migrations.AddField(
model_name='cropmodification',
name='region',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='crop_modifications', to='waterspout_api.crop'),
),
]
| python |
#!/usr/bin/env python3
import sys, utils, random # import the modules we will need
utils.check_version((3,7)) # make sure we are running at least Python 3.7
utils.clear() # clear the screen
print('Greetings!') # print out 'Greeting!'
colors = ['red','orange','yellow','green','blue','violet','purple'] # make a list of color
play_again = '' # make "play again" empty
best_count = sys.maxsize # the biggest number
while (play_again != 'n' and play_again != 'no'): #start a while loop with two conditions
match_color = random.choice(colors) # using random method to select a color randomly
count = 0 # count strat with 0
color = '' # make color empty
while (color != match_color):
color = input("\nWhat is my favorite color? ") #\n is a special code that adds a new line
color = color.lower().strip() # It turns all letters on 'color' into the lower case and delete all the spaces.
count += 1 # the 'count' will plus one after finishing a loop
if (color == match_color): # if color equals to match_color, it will execute the following codes
print('Correct!') # when condition is true, it will print out 'Correct!'
else: # if false
print('Sorry, try again. You have guessed {guesses} times.'.format(guesses=count)) # if false, it will print out this line.
print('\nYou guessed it in {0} tries!'.format(count)) # print out this line in the next line with the number of user's tries
if (count < best_count): # if user's tries are less than best_count which is the biggest number
print('This was your best guess so far!') # it will print out this line
best_count = count # let best_count = count
play_again = input("\nWould you like to play again? ").lower().strip() # print this out on the next line and delete all spaces and turn it into lower case.
print('Thanks for playing!') # print out 'Thanks for playing.' | python |
import sys
PY3 = (sys.version_info[0] >= 3)
if PY3:
basestring = unicode = str
else:
unicode = unicode
basestring = basestring
if PY3:
from ._py3compat import execfile
else:
execfile = execfile
| python |
#!/usr/bin/env python
import sys
import subprocess
#----------------------------------------------------------------------
## generic pipe-like cleaning functions
def rpl(x, y=''):
def _func(s):
return s.replace(x, y)
return _func
def pipe(*args):
def _func(txt):
return subprocess.run(list(args), input=txt,
text=True, capture_output=True).stdout
return _func
def read_file(path):
with open(path) as f:
txt = f.read()
return txt
def remove_blanklines(txt):
return '\n'.join([l for l in txt.splitlines() if l])
#----------------------------------------------------------------------
## main process pipeline
def main(path):
# text processing pipeline
pipeline = [
pipe('/usr/local/bin/stripcmt'), # strip comments
remove_blanklines,
rpl(';'),
rpl('C74_CONST', 'const'),
rpl('(void)', '()'),
]
# read it
with open(path) as f:
txt = f.read()
# process it
for func in pipeline:
txt = func(txt)
return txt
if __name__ == '__main__':
output = main(sys.argv[1])
print(output) # for convenient redirection
| python |
import flask
from flask import request, jsonify
from secrets import secrets
from babel_categories import BabelCategories
from babel_hypernyms import BabelHypernyms
from babel_lemmas_of_senses import BabelLemmasOfSenses
from babel_parser import BabelParser
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/', methods=['GET'])
def home():
return "<h1>API para reconocimiento de metáforas</h1><p>Esta es un prototipo de API para reconocimiento de metáforas en castellano.</p>"
def parse_mode(args, key):
if not 'parser' in args: # Default
return BabelParser(secrets['babel_key'])
elif args['parser'] == 'babel_parser':
return BabelParser(secrets['babel_key'])
else:
raise Exception('El parser elegido no existe')
def source_mode(args, key):
if not 'mode' in args:
raise Exception('No se ha elegido un método de comprobación')
elif args['mode'] == 'babel_categories':
return BabelCategories(key)
elif args['mode'] == 'babel_hypernyms':
return BabelHypernyms(key)
elif args['mode'] == 'babel_senses':
return BabelLemmasOfSenses(key)
else:
raise Exception('El método de comprobación elegido no existe')
def choose_parser_key(args):
if 'parser_key' in args:
return args['parser_key']
else:
return secrets['babel_key']
def choose_source_key(args):
if 'mode_key' in args:
return args['mode_key']
else:
return secrets['babel_key']
def get_text(args):
if 'text' in args:
return args['text']
else:
raise Exception('Es necesario proporcionar el texto a analizar')
@app.route('/api/v1/check', methods=['GET'])
def api_v1_check():
#TODO comprobar si la API de babel no devuelve nada
parser_key = choose_parser_key(request.args)
source_key = choose_source_key(request.args)
parser = parse_mode(request.args, parser_key)
source = source_mode(request.args, source_key)
text = get_text(request.args)
word_and_id = None
try:
word_and_id = parser.parse(text)
except:
raise Exception('Hubo un problema analizando sintácticamente el texto')
metaphors_found = None
try:
metaphors_found = source.find_metaphors(word_and_id)
except:
raise Exception('Hubo un problema buscando la metáfora')
return {
'text': text,
'parser': parser.toString(),
'mode': source.toString(),
'relation': metaphors_found['relation'],
'isMetaphor': metaphors_found['isMetaphor'],
'reason': metaphors_found['reason'],
}, 200, {'Access-Control-Allow-Origin': '*'}
if __name__ == '__main__':
app.run()
| python |
__author__ = 'Spasley'
| python |
from rest_framework import exceptions, status
from api.services import translation
class PreconditionFailedException(exceptions.APIException):
status_code = status.HTTP_412_PRECONDITION_FAILED
default_detail = translation.Messages.MSG_PRECONDITION_FAILED
default_code = 'precondition_failed'
| python |
import warnings
import pulumi
class Provider(pulumi.ProviderResource):
"""
The provider type for the kubernetes package.
"""
def __init__(self,
resource_name,
opts=None,
cluster=None,
context=None,
enable_dry_run=None,
kubeconfig=None,
namespace=None,
suppress_deprecation_warnings=None,
render_yaml_to_directory=None,
__name__=None,
__opts__=None):
"""
Create a Provider resource with the given unique name, arguments, and options.
:param str resource_name: The unique name of the resource.
:param pulumi.ResourceOptions opts: An optional bag of options that controls this resource's behavior.
:param pulumi.Input[str] cluster: If present, the name of the kubeconfig cluster to use.
:param pulumi.Input[str] context: If present, the name of the kubeconfig context to use.
:param pulumi.Input[bool] enable_dry_run: BETA FEATURE - If present and set to True, enable server-side diff
calculations. This feature is in developer preview, and is disabled by default.
This config can be specified in the following ways, using this precedence:
1. This `enableDryRun` parameter.
2. The `PULUMI_K8S_ENABLE_DRY_RUN` environment variable.
:param pulumi.Input[str] kubeconfig: The contents of a kubeconfig file.
If this is set, this config will be used instead of $KUBECONFIG.
:param pulumi.Input[str] namespace: If present, the default namespace to use.
This flag is ignored for cluster-scoped resources.
A namespace can be specified in multiple places, and the precedence is as follows:
1. `.metadata.namespace` set on the resource.
2. This `namespace` parameter.
3. `namespace` set for the active context in the kubeconfig.
:param pulumi.Input[bool] suppress_deprecation_warnings: If present and set to True, suppress apiVersion
deprecation warnings from the CLI.
This config can be specified in the following ways, using this precedence:
1. This `suppressDeprecationWarnings` parameter.
2. The `PULUMI_K8S_SUPPRESS_DEPRECATION_WARNINGS` environment variable.
:param pulumi.Input[str] render_yaml_to_directory: BETA FEATURE - If present, render resource manifests to this
directory. In this mode, resources will not be created on a Kubernetes cluster, but
the rendered manifests will be kept in sync with changes to the Pulumi program.
This feature is in developer preview, and is disabled by default. Note that some
computed Outputs such as status fields will not be populated since the resources are
not created on a Kubernetes cluster. These Output values will remain undefined,
and may result in an error if they are referenced by other resources. Also note that
any secret values used in these resources will be rendered in plaintext to the
resulting YAML.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = {
"cluster": cluster,
"context": context,
"enableDryRun": enable_dry_run,
"kubeconfig": kubeconfig,
"namespace": namespace,
"suppressDeprecationWarnings": suppress_deprecation_warnings,
"renderYamlToDirectory": render_yaml_to_directory,
}
super(Provider, self).__init__("kubernetes", resource_name, __props__, opts)
| python |
import json
import pulumi
import pulumi_aws as aws
# CONFIG
DB_NAME='dbdemo'
DB_USER='user1'
DB_PASSWORD='p2mk5JK!'
DB_PORT=6610
IAM_ROLE_NAME = 'redshiftrole'
redshift_role = aws.iam.Role(IAM_ROLE_NAME,
assume_role_policy=json.dumps({
"Version": "2012-10-17",
"Statement": [{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Sid": "",
"Principal": {
"Service": "redshift.amazonaws.com",
},
}],
}))
# allow s3 read
aws.iam.RolePolicyAttachment(IAM_ROLE_NAME+'attachment',
role=redshift_role.name,
policy_arn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess")
redshift_cluster = aws.redshift.Cluster("default",
cluster_identifier="moshe-cluster",
cluster_type="single-node",
database_name=DB_NAME,
master_password=DB_PASSWORD,
master_username=DB_USER,
node_type="dc1.large",
iam_roles=[redshift_role.arn],
port=DB_PORT,
skip_final_snapshot=True,
)
pulumi.export('arn', redshift_role.arn)
pulumi.export('host', redshift_cluster.dns_name) | python |
import win32api, mmapfile
import winerror
import tempfile, os
from pywin32_testutil import str2bytes
system_info=win32api.GetSystemInfo()
page_size=system_info[1]
alloc_size=system_info[7]
fname=tempfile.mktemp()
mapping_name=os.path.split(fname)[1]
fsize=8*page_size
print fname, fsize, mapping_name
m1=mmapfile.mmapfile(File=fname, Name=mapping_name, MaximumSize=fsize)
m1.seek(100)
m1.write_byte(str2bytes('?'))
m1.seek(-1,1)
assert m1.read_byte()==str2bytes('?')
## A reopened named mapping should have exact same size as original mapping
m2=mmapfile.mmapfile(Name=mapping_name, File=None, MaximumSize=fsize*2)
assert m2.size()==m1.size()
m1.seek(0,0)
m1.write(fsize*str2bytes('s'))
assert m2.read(fsize)==fsize*str2bytes('s')
move_src=100
move_dest=500
move_size=150
m2.seek(move_src,0)
assert m2.tell()==move_src
m2.write(str2bytes('m')*move_size)
m2.move(move_dest, move_src, move_size)
m2.seek(move_dest, 0)
assert m2.read(move_size) == str2bytes('m') * move_size
## m2.write('x'* (fsize+1))
m2.close()
m1.resize(fsize*2)
assert m1.size()==fsize * 2
m1.seek(fsize)
m1.write(str2bytes('w') * fsize)
m1.flush()
m1.close()
os.remove(fname)
## Test a file with size larger than 32 bits
## need 10 GB free on drive where your temp folder lives
fname_large=tempfile.mktemp()
mapping_name='Pywin32_large_mmap'
offsetdata=str2bytes('This is start of offset')
## Deliberately use odd numbers to test rounding logic
fsize = (1024*1024*1024*10) + 333
offset = (1024*1024*32) + 42
view_size = (1024*1024*16) + 111
## round mapping size and view size up to multiple of system page size
if fsize%page_size:
fsize += page_size - (fsize%page_size)
if view_size%page_size:
view_size += page_size - (view_size%page_size)
## round offset down to multiple of allocation granularity
offset -= offset%alloc_size
m1=None
m2=None
try:
try:
m1=mmapfile.mmapfile(fname_large, mapping_name, fsize, 0, offset*2)
except mmapfile.error, exc:
# if we don't have enough disk-space, that's OK.
if exc.winerror!=winerror.ERROR_DISK_FULL:
raise
print "skipping large file test - need", fsize, "available bytes."
else:
m1.seek(offset)
m1.write(offsetdata)
## When reopening an existing mapping without passing a file handle, you have
## to specify a positive size even though it's ignored
m2=mmapfile.mmapfile(File=None, Name=mapping_name, MaximumSize=1,
FileOffset=offset, NumberOfBytesToMap=view_size)
assert m2.read(len(offsetdata))==offsetdata
finally:
if m1 is not None:
m1.close()
if m2 is not None:
m2.close()
if os.path.exists(fname_large):
os.remove(fname_large)
| python |
# Copyright (c) Microsoft Corporation.
# Copyright (c) 2018 Jensen Group
# Licensed under the MIT License.
"""
Module for generating rdkit molobj/smiles/molecular graph from free atoms
Implementation by Jan H. Jensen, based on the paper
Yeonjoon Kim and Woo Youn Kim
"Universal Structure Conversion Method for Organic Molecules: From Atomic Connectivity
to Three-Dimensional Geometry"
Bull. Korean Chem. Soc. 2015, Vol. 36, 1769-1777
DOI: 10.1002/bkcs.10334
"""
from qdk.chemistry._xyz2mol.ac import xyz2AC, AC2mol
from qdk.chemistry._xyz2mol.util import chiral_stereo_check
def xyz2mol(
atoms,
coordinates,
charge=0,
allow_charged_fragments=True,
use_graph=True,
use_huckel=False,
embed_chiral=True
):
"""
Generate a rdkit molobj from atoms, coordinates and a total_charge.
args:
atoms - list of atom types (int)
coordinates - 3xN Cartesian coordinates
charge - total charge of the system (default: 0)
optional:
allow_charged_fragments - alternatively radicals are made
use_graph - use graph (networkx)
use_huckel - Use Huckel method for atom connectivity prediction
embed_chiral - embed chiral information to the molecule
returns:
mols - list of rdkit molobjects
"""
# Get atom connectivity (AC) matrix, list of atomic numbers, molecular charge,
# and mol object with no connectivity information
AC, mol = xyz2AC(atoms, coordinates, charge, use_huckel=use_huckel)
# Convert AC to bond order matrix and add connectivity and charge info to
# mol object
new_mols = AC2mol(mol, AC, atoms, charge,
allow_charged_fragments=allow_charged_fragments,
use_graph=use_graph)
# Check for stereocenters and chiral centers
if embed_chiral:
for new_mol in new_mols:
chiral_stereo_check(new_mol)
return new_mols
| python |
import logging
import os
import socket
from logging import Logger
from typing import Any, Dict, List, Optional, Union
from pathlib import Path
import docker
import dockerpty
from docker import DockerClient
from docker.models.images import Image
from docker.errors import APIError, DockerException
from requests import RequestException
from .utils import BLDRSetupFailed
def _create_docker_client() -> DockerClient:
try:
return docker.from_env(version='auto')
except DockerException as e:
raise BLDRSetupFailed(
'Cannot create Docker client. Is Docker daemon running?\nAdditional info: {}'.format(e)
)
def _check_docker_client(client: DockerClient) -> None:
try:
client.ping()
except (DockerException, RequestException) as e:
raise BLDRSetupFailed(
'Cannot connect to Docker daemon. Is Docker daemon running?\nAdditional info: {}'.format(e)
)
class DockerImageBuilder:
def __init__(self, client: Optional[DockerClient] = None, logger: Logger = logging.getLogger('DockerImageBuilder')) -> None:
self._logger: logging.Logger = logger
if client is None:
client = _create_docker_client()
self._client: DockerClient = client
_check_docker_client(self._client)
def build(self, path: Path, dockerfile: str, tag: str, buildargs: Dict, nocache: bool = False) -> 'DockerImage':
stream = self._client.api.build(
path=str(path),
dockerfile=dockerfile,
tag=tag,
forcerm=True,
nocache=nocache,
buildargs=buildargs,
decode=True,
)
for chunk in stream:
if chunk.get('stream', None) is not None:
self._logger.debug(chunk.get('stream').strip())
elif chunk.get('errorDetail', None) is not None:
raise DockerException(chunk.get('error'))
return DockerImage(client=self._client, image=tag)
class DockerImage:
def __init__(self, image: Union[str, Image], client: Optional[DockerClient] = None, logger: Optional[Logger] = None) -> None:
if client is None:
client = _create_docker_client()
self._client = client
_check_docker_client(self._client)
self._logger = logger
if self._logger is None:
self._logger = logging.getLogger('DockerImage')
self._tag = image
def create_container(self, **kwargs: Any) -> 'DockerContainer':
return DockerContainer(client=self._client, image=self._tag, **kwargs)
class DockerContainer:
def __init__(
self,
image: Union[str, Image],
command: Union[str, List],
environment: Optional[Dict] = None,
user: Optional[str] = None,
volumes: Optional[Dict] = None,
client: Optional[DockerClient] = None,
logger: Logger = logging.getLogger('DockerContainer'),
tmp_on_tmpfs: bool = True,
) -> None:
if client is None:
client = _create_docker_client()
self._client = client
_check_docker_client(self._client)
self._logger = logger
try:
self._client.images.get(image)
except docker.errors.ImageNotFound:
self._client.images.pull(image)
tmpfs = {'/tmp': 'rw,exec'} if tmp_on_tmpfs else {}
self._container = self._client.containers.create(
init=True,
image=image,
command=command,
stdin_open=True,
tty=os.isatty(0),
environment=environment,
network='host',
security_opt=['seccomp=unconfined'],
tmpfs=tmpfs,
user=user,
volumes=volumes,
extra_hosts={socket.gethostname(): "127.0.0.1"},
)
def __enter__(self) -> 'DockerContainer':
self._container.start()
return self
def run_with_pty(self, interactive: bool = False) -> int:
dockerpty.start(self._client.api, self._container.id, interactive=interactive, logs=True)
exit_code = self.get_exit_code()
self._container.remove()
return exit_code
def exec(self, command: Union[str, List]) -> int:
exec_id = self._client.api.exec_create(container=self._container.id, cmd=command)
stream = self._client.api.exec_start(exec_id=exec_id, stream=True)
for chunk in stream:
self._logger.debug(chunk.decode('utf-8', errors='ignore').strip())
return self._client.api.exec_inspect(exec_id=exec_id).get('ExitCode', 0)
def exec_run(self, command: Union[str, List]) -> str:
exitcode, output = self._container.exec_run(command)
if exitcode != 0:
raise ValueError('The following command "{}" exited with code: {}'.format(command, exitcode))
output = output.decode('utf-8', errors='ignore')
return output
def exec_with_pty(self, command: Union[str, List]) -> None:
dockerpty.exec_command(self._client.api, self._container.id, command=command)
def get_exit_code(self) -> int:
return self._client.api.inspect_container(self._container.id)['State'].get('ExitCode', 0)
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
try:
self._container.kill()
except APIError:
pass
finally:
self._container.remove()
| python |
#!/usr/bin/python
# coding=utf-8
import json
import sys
from PIL import Image
from pprint import pprint
import mutual_infor as mi
'''
note: Imager
'''
default_img_path = "img.jpg"
data_dir = "data/map_img/"
class Imager:
def __init__(self, path):
self.path = path
self.entropy = 0.0
self.width = 0
self.height = 0
self.is_process = False
def load(self):
try:
im = Image.open(self.path)
except IOError, e:
print "error msg:", e
return
self.data = im.getdata()
self.width = im.width
self.height = im.height
im.close()
def display(self):
data = {}
data["path"] = self.path
data["entropy"] = self.entropy
data["width"] = self.width
data["height"] = self.height
res = json.dumps(data)
return res
def get_image_info(self):
image_info = {}
if not self.is_process:
self.process()
image_info["width"] = self.width
image_info["height"] = self.height
image_info["entropy"] = self.entropy
return image_info
def process(self):
try:
im = Image.open(self.path).convert("L")
except IOError as e:
print e
else:
self.width = im.width
self.height = im.height
# get entropy
self.data = im.getdata()
mi_base = mi.MIBase()
self.entropy = mi_base.compute_entropy(self.data)
im.close()
def get_graydata(self):
try:
im = Image.open(self.path).convert("L")
except IOError as e:
print e
return
else:
data = im.getdata()
im.close()
return data
if __name__ == '__main__':
if len(sys.argv) == 2:
image = Imager(sys.argv[1])
image.process()
data = image.display()
print data
else:
print "param error"
| python |
from z3 import Int
class Storage(object):
def __init__(self):
self._storage = {}
def __getitem__(self, item):
if item not in self._storage.keys():
# self._storage[item] = Int("s_" + str(item))
self._storage[item] = 0
return self._storage[item]
def __setitem__(self, key, value):
self._storage[key] = value
def __len__(self):
return len(self._storage)
def get_storage(self) -> dict:
return self._storage
| python |
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
import kenlm
from predictor import WordPredictor
from vocabtrie import VocabTrie
import numbers
class TestWordPredictor(unittest.TestCase):
def setUp(self):
self.wordPredictor = WordPredictor('../resources/lm_word_medium.kenlm', '../resources/vocab_100k')
self.language_model = kenlm.LanguageModel('../resources/lm_word_medium.kenlm')
self.vocab_filename = '../resources/vocab_100k'
self.vocab_id = ''
def test_create_new_trie(self):
wp = self.wordPredictor
self.assertIsInstance(wp.create_new_trie(self.vocab_filename), VocabTrie, "OK")
def test_update_char_list_from_string(self):
list = ['a']
str = "bc"
res = ['a', 'b', 'c']
self.assertEqual(self.wordPredictor.update_char_list_from_string(list, str), res, "OK")
def test_create_char_list_from_vocab(self):
char_set = self.wordPredictor.create_char_list_from_vocab(self.vocab_id, self.vocab_filename)
#id, char_set = test_res.popitem()
#self.assertIsInstance(type(id), type(str), "Return type is not same")
self.assertIsInstance(type(char_set), type(set), "Return type is not same")
def test_add_vocab(self, vocab_id = 'vocab_id'):
new_trie = self.wordPredictor.create_new_trie(self.vocab_filename)
self.assertTrue((new_trie!= None))
self.assertFalse((new_trie == None))
def test_get_vocab_trie(self):
flag, vocabTr = self.wordPredictor.get_vocab_trie(self.vocab_id)
self.assertIsInstance(vocabTr, VocabTrie, 'Not OK')
self.assertIsInstance(type(flag), type(bool), "Not OK")
"""
def test_get_punc_token(self):
self.assertEqual(self.wordPredictor.get_punc_token(','), ',comma', 'Punctuation and token are not equal')
"""
def test_get_context_state(self):
sIn, sOut = self.wordPredictor.get_context_state('<s>', self.language_model, self.vocab_id)
self.assertIsInstance(sIn, kenlm.State, 'stateIn is not an instance of kenlm.State')
self.assertIsInstance(sOut, kenlm.State, 'stateOut is not an instance of kenlm.State')
def test_find_most_probable_word(self):
pass
def test_get_words(self):
pass
def test__get_words(self):
suggestion_list = self.wordPredictor._get_words('a', 'the united states of', self.vocab_id, 3,-float('inf'))
self.assertTrue(isinstance(type(suggestion_list), type(str)), "Not a list") #basestring is gone in python 3
def test_print_suggestions(self):
pass
def test_get_most_likely_word(self):
word, log_prob = self.wordPredictor.get_most_probable_word('a', 'the united states of', self.vocab_id)
self.assertEqual(word, 'america', "Not equal")
self.assertTrue(isinstance(log_prob, numbers.Number), "False")
if __name__ == '__main__':
unittest.main()
| python |
import logging
from pdb import Pdb
import sys
import time
from pathlib import Path
from typing import List
from pprint import pformat
import docker
import yaml
logger = logging.getLogger(__name__)
current_dir = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
WAIT_TIME_SECS = 20
RETRY_COUNT = 7
MAX_WAIT_TIME = 240
# https://docs.docker.com/engine/swarm/how-swarm-mode-works/swarm-task-states/
pre_states = ["NEW", "PENDING", "ASSIGNED", "PREPARING", "STARTING"]
failed_states = [
"COMPLETE",
"FAILED",
"SHUTDOWN",
"REJECTED",
"ORPHANED",
"REMOVE",
"CREATED",
]
# UTILS --------------------------------
def get_tasks_summary(tasks):
msg = ""
for t in tasks:
t["Status"].setdefault("Err", "")
msg += "- task ID:{ID}, STATE: {Status[State]}, ERROR: '{Status[Err]}' \n".format(
**t
)
return msg
def get_failed_tasks_logs(service, docker_client):
failed_logs = ""
for t in service.tasks():
if t["Status"]["State"].upper() in failed_states:
cid = t["Status"]["ContainerStatus"]["ContainerID"]
failed_logs += "{2} {0} - {1} BEGIN {2}\n".format(
service.name, t["ID"], "=" * 10
)
if cid:
container = docker_client.containers.get(cid)
failed_logs += container.logs().decode("utf-8")
else:
failed_logs += " log unavailable. container does not exists\n"
failed_logs += "{2} {0} - {1} END {2}\n".format(
service.name, t["ID"], "=" * 10
)
return failed_logs
# --------------------------------------------------------------------------------
def osparc_simcore_root_dir() -> Path:
WILDCARD = "services/web/server"
root_dir = Path(current_dir)
while not any(root_dir.glob(WILDCARD)) and root_dir != Path("/"):
root_dir = root_dir.parent
msg = f"'{root_dir}' does not look like the git root directory of osparc-simcore"
assert root_dir.exists(), msg
assert any(root_dir.glob(WILDCARD)), msg
assert any(root_dir.glob(".git")), msg
return root_dir
def core_docker_compose_file() -> Path:
return osparc_simcore_root_dir() / ".stack-simcore-version.yml"
def core_services() -> List[str]:
with core_docker_compose_file().open() as fp:
dc_specs = yaml.safe_load(fp)
return [x for x in dc_specs["services"].keys()]
def ops_docker_compose_file() -> Path:
return osparc_simcore_root_dir() / ".stack-ops.yml"
def ops_services() -> List[str]:
with ops_docker_compose_file().open() as fp:
dc_specs = yaml.safe_load(fp)
return [x for x in dc_specs["services"].keys()]
def wait_for_services() -> None:
# get all services
services = core_services() + ops_services()
client = docker.from_env()
running_services = [
x for x in client.services.list() if x.name.split("_")[-1] in services
]
# check all services are in
assert len(running_services), "no services started!"
assert len(services) == len(
running_services
), f"Some services are missing or unexpected:\nexpected: {len(services)} {services}\ngot: {len(running_services)} {[service.name for service in running_services]}"
# now check they are in running mode
for service in running_services:
task = None
for n in range(RETRY_COUNT):
# get last updated task
sorted_tasks = sorted(service.tasks(), key=lambda task: task["UpdatedAt"])
task = sorted_tasks[-1]
if task["Status"]["State"].upper() in pre_states:
print(
"Waiting [{}/{}] for {}...\n{}".format(
n, RETRY_COUNT, service.name, get_tasks_summary(service.tasks())
)
)
time.sleep(WAIT_TIME_SECS)
elif task["Status"]["State"].upper() in failed_states:
print(
f"Waiting [{n}/{RETRY_COUNT}] Service {service.name} failed once...\n{get_tasks_summary(service.tasks())}"
)
time.sleep(WAIT_TIME_SECS)
else:
break
assert task
assert (
task["Status"]["State"].upper() == "RUNNING"
), "Expected running, got \n{}\n{}".format(
pformat(task), get_tasks_summary(service.tasks())
)
# get_failed_tasks_logs(service, client))
if __name__ == "__main__":
# get retry parameters
# wait for the services
sys.exit(wait_for_services())
| python |
"""Settings for admin panel related to the authors app."""
| python |
import unittest
from yauber_algo.errors import *
class PercentRankTestCase(unittest.TestCase):
def test_category(self):
import yauber_algo.sanitychecks as sc
from numpy import array, nan, inf
import os
import sys
import pandas as pd
import numpy as np
from yauber_algo.algo import percent_rank
#
# Function settings
#
algo = 'percent_rank'
func = percent_rank
with sc.SanityChecker(algo) as s:
#
# Check regular algorithm logic
#
s.check_regular(
array([nan, nan, nan, nan, nan, .30, .10]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
5
),
suffix='reg'
)
s.check_regular(
array([nan, nan, nan, nan, nan, 1.00, .90]),
func,
(
array([1, 2, 3, 4, 5, 6, 6]),
5
),
suffix='equal_numbers'
)
s.check_regular(
array([nan, nan, nan, nan, nan, .50, .50]),
func,
(
array([1, 1, 1, 1, 1, 1, 1]),
5
),
suffix='all_equal_numbers'
)
s.check_regular(
array([nan, nan, nan, nan, nan, nan, .10]),
func,
(
array([nan, 2, 1, 4, 3, 2, 1]),
5
),
suffix='skip_nan'
)
s.check_regular(
array([nan, nan, nan, nan, nan, nan, nan]),
func,
(
array([nan, 2, nan, 2, 3, 2, 1]),
5
),
suffix='skip_nan_min_count_5'
)
s.check_regular(
array([nan, nan, nan, nan, nan, 2 / 5, 1 / 5]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
4
),
suffix='min_period_eq_5',
exception=YaUberAlgoInternalError
)
s.check_regular(
array([nan, nan, nan, nan, nan, 2 / 5, 1 / 5]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
0
),
suffix='zero_period_err',
exception=YaUberAlgoArgumentError
)
s.check_regular(
array([nan, nan, nan, nan, nan, 2 / 5, 1 / 5]),
func,
(
array([3, 2, 1, 4, 3, 2, 1]),
-1
),
suffix='neg_period_err',
exception=YaUberAlgoArgumentError
)
s.check_naninf(
array([nan, nan, nan, nan, nan, nan, .10, nan, .20]),
func,
(
array([nan, 2, 1, 4, 3, 5, 1, inf, 1]),
6
),
suffix='inf'
)
s.check_naninf(
array([nan, nan, nan, nan, nan, nan, .10, nan, nan]),
func,
(
array([nan, 2, 1, 4, 3, 5, 1, inf, nan]),
6
),
suffix='inf_nan'
)
s.check_naninf(
array([nan, nan, nan, nan, nan, nan, .10, nan, .20]),
func,
(
array([nan, 2, 1, 4, 3, 5, 1, -inf, 1]),
6
),
suffix='neg_inf'
)
s.check_series(
pd.Series(array([nan, nan, nan, nan, nan, .30, .10])),
func,
(
pd.Series(array([3, 2, 1, 4, 3, 2, 1])),
5
),
suffix=''
)
s.check_dtype_float(
array([nan, nan, nan, nan, nan, .30, .10], dtype=np.float),
func,
(
array([3, 2, 1, 4, 3, 2, 1], dtype=np.float),
5
),
suffix=''
)
s.check_dtype_bool(
array([nan, nan, nan, nan, nan, .20, .70], dtype=np.float),
func,
(
array([0, 1, 1, 0, 1, 0, 1], dtype=np.bool),
5
),
suffix=''
)
s.check_dtype_int(
array([nan, nan, nan, nan, nan, .30, .10], dtype=np.float),
func,
(
array([3, 2, 1, 4, 3, 2, 1], dtype=np.int32),
5
),
suffix=''
)
s.check_dtype_object(
func,
(
array([3, 2, 1, 4, 3, 2, 1], dtype=np.object),
5
),
suffix=''
)
s.check_futref(5, 1,
func,
(
np.random.random(100),
5
),
)
s.check_window_consistency(5, 1,
func,
(
np.random.random(100),
5
),
) | python |
import sys
import pandas as pd
from sqlalchemy import create_engine
import pickle
import nltk
nltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])
import re
import numpy as np
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
# custom transformer
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
'''
Modified StartingVerbExtractor class used to improve analysis performance
'''
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, x, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def load_data(database_filepath):
'''
Load dataset, input set, and labels set from SQLite database.
Arguments:
database_filepath: path to database where dataset is saved to (String)
Returns:
X: feature dataset (Pandas Series)
y: label dataset (Pandas Series)
category_names: list of column names (Pandas Index)
'''
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table('df',engine)
# load feature set (X), label set (Y), and column names
X = df['message']
y = df.iloc[:,4:]
category_names = y.columns
return X, y, category_names
def tokenize(text):
'''
Tokenize text to enable NLP.
Arguments:
text: English text to be tokenized for ML (List)
Returns:
clean_tokens: tokenized text for ML (List)
'''
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex, text)
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
'''
Build ML pipeline that includes GridSearch, FeatureUnion, pipeline with CountVectorizer and TfidfTransformer, StartingVerbExtractor, and AdaBoostClassifier for analysis.
Returns:
model: ML pipeline that contains NLP processes and classifier (Scikit Pipeline)
'''
# parameters for grid search to improve pipeline performance
parameters = {
'features__text_pipeline__vect__ngram_range': ((1, 1), (1, 2)),
'features__text_pipeline__vect__max_df': (0.5, 0.75),
'features__text_pipeline__vect__max_features': (None, 5000),
'features__text_pipeline__tfidf__use_idf': (True, False)
}
pipeline = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
])),
('starting_verb', StartingVerbExtractor())
])),
('clf', MultiOutputClassifier(AdaBoostClassifier()))
])
model = GridSearchCV(pipeline, param_grid=parameters)
return model
def evaluate_model(model, X_test, Y_test, category_names):
'''
Evaluate performance of ML pipeline by displaying multiple scores.
Arguments:
model: ML pipeline to be evaluated (Scikit Pipeline)
X_test: test feature dataset (Pandas Series)
Y_test: test label dataset (Pandas Series)
category_names: list of column names (List)
'''
# model predictions
y_pred = model.predict(X_test)
# Overall accuracy of model
accuracy = (y_pred == Y_test).mean()
print("Overall Accuracy:", accuracy.mean())
# scores report
y_pred_df = pd.DataFrame(y_pred, columns=category_names)
for col in category_names:
print('Attribute: {}\n'.format(col))
print(classification_report(Y_test[col], y_pred_df[col]))
def save_model(model, model_filepath):
'''
Build ML pipeline that includes FeatureUnion, pipeline with CountVectorizer and TfidfTransformer, StartingVerbExtractor, and AdaBoostClassifier for analysis.
Arguments:
model: ML pipeline to be saved (Scikit Pipeline)
model_filepath: name of pickle file the model is saved to (String)
'''
filename = model_filepath
pickle.dump(model, open(filename, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | python |
_base_ = '../pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py'
voxel_size = [0.16, 0.16, 4]
point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1]
model = dict(
type='DynamicVoxelNet',
voxel_layer=dict(
max_num_points=-1,
point_cloud_range=point_cloud_range,
voxel_size=voxel_size,
max_voxels=(-1, -1)),
voxel_encoder=dict(
type='DynamicPillarFeatureNet',
in_channels=4,
feat_channels=[64],
with_distance=False,
voxel_size=voxel_size,
point_cloud_range=point_cloud_range))
| python |
import collections
import sys
def main(letters, words):
d = collections.defaultdict(list)
print(d)
print(letters)
print(words)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2:])
| python |
# ### Problem 1
# Ask the user to enter a number.
# Using the provided list of numbers, use a for loop to iterate the array and print out all the values that are smaller than the user input and print out all the values that are larger than the number entered by the user.
# ```
# # Start with this List
# list_of_many_numbers = [12, 24, 1, 34, 10, 2, 7]
# Example Input/Output if the user enters the number 9:
# ```
# The User entered 9
# 1 2 7 are smaller than 9
# 12 24 34 10 are larger than 9
# ```
userinput= int(input(" Enter a number here: "))# Input from the User
list_of_many_numbers = [12, 24, 1, 34, 10, 2, 7]
# KEY: Didnt print in order like example but code is good and commented
for each in list_of_many_numbers:
if each > userinput:
print(f'{each} is great than {userinput} ')
elif each== userinput:
print("This number is present in my array") # Equal case addressed
else:
print(f'{each} is smaller than {userinput}')
| python |
import tensorflow as tf
import numpy as np
import json
import argparse
import cv2
import os
import glob
import math
import time
import glob
def infer(frozen_pb_path, output_node_name, img_path, output_path=None):
with tf.gfile.GFile(frozen_pb_path, "rb") as f:
restored_graph_def = tf.GraphDef()
restored_graph_def.ParseFromString(f.read())
tf.import_graph_def(
restored_graph_def,
input_map=None,
return_elements=None,
name=""
)
graph = tf.get_default_graph()
input_image = graph.get_tensor_by_name("image:0")
output_heat = graph.get_tensor_by_name("%s:0" % output_node_name)
res = {}
use_times = []
with tf.Session() as sess:
# if directory, then glob all files
if os.path.isdir(img_path):
img_files = glob.glob(os.path.join(img_path,"*"))
else:
img_files = [img_path]
print(img_path)
print(img_files)
# if file, then do once
for img_path in img_files:
fname = os.path.basename(img_path)
print(img_path)
ori_img = cv2.imread(img_path)
ori_shape = ori_img.shape
shape = input_image.get_shape().as_list()
inp_img = cv2.resize(ori_img, (shape[1], shape[2]))
st = time.time()
heat = sess.run(output_heat, feed_dict={input_image: [inp_img]})
infer_time = 1000 * (time.time() - st)
#print("img_id = %d, cost_time = %.2f ms" % (img_id, infer_time))
use_times.append(infer_time)
grey_heat = 255*np.squeeze(np.amax(heat, axis=3))
grey_heat = cv2.resize(grey_heat, (ori_shape[1], ori_shape[0]), interpolation=cv2.INTER_AREA)
color_heat = np.zeros((ori_shape[0], ori_shape[1], 3), dtype=np.float32)
color_heat[:,:,2] = grey_heat
#cv2.imwrite(output_path, grey_heat)
merged_img = cv2.addWeighted(ori_img.astype(np.float32), 1.0, color_heat, 1.0, 0)
new_fname = "_out.".join(fname.split("."))
out_fpath = os.path.join(output_path, new_fname)
cv2.imwrite(out_fpath, merged_img)
#res[img_id] = np.squeeze(heat)
print("Average inference time = %.2f ms" % np.mean(use_times))
#return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--frozen_pb_path", type=str, default="")
parser.add_argument("--img_path", type=str, default="")
parser.add_argument("--output_path", type=str, default="output_images")
parser.add_argument("--output_node_name", type=str, default='Convolutional_Pose_Machine/stage_5_out')
parser.add_argument("--gpus", type=str, default="1")
args = parser.parse_args()
if not os.path.isfile(args.output_path):
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
infer(args.frozen_pb_path, args.output_node_name, args.img_path, args.output_path)
| python |
#!flask/bin/python
# -*- coding: utf-8 -*-
from api import app
from flask import jsonify, make_response
@app.errorhandler(401)
def unauthorized(error=None):
mensagem = {'status': 401, 'mensagem': 'Voce nao tem permissao para acessar essa pagina!'}
resp = jsonify(mensagem)
resp.status_code = 401
# REDIRECIONAR PRO LOGIN
return resp
@app.errorhandler(404)
def not_found(error=None):
mensagem = {"status": 404, "mensagem": 'Nao encontramos o que voce estava procurando. Tente novamente.'}
resp = jsonify(mensagem)
resp.status_code = 404
return resp
@app.errorhandler(405)
def method_not_allowed(error=None):
mensagem = {'status': 405, 'mensagem': 'Metodo nao permitido!'}
resp = jsonify(mensagem)
resp.status_code = 405
return resp
@app.errorhandler(500)
def internal_server_error(error=None):
mensagem = {'status': 500, 'mensagem': 'Ops. Algo deu errado. Tente novamente.'}
resp = jsonify(mensagem)
resp.status_code = 500
return resp | python |
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.stream import HStream
from hwt.hdl.types.struct import HStruct
class USB_VER:
USB1_0 = "1.0"
USB1_1 = "1.1"
USB2_0 = "2.0"
class PID:
"""
USB Protocol layer packet identifier values
:attention: visualy writen in msb-first, transmited in lsb first
"""
# Address for host-to-device transfer
TOKEN_OUT = 0b0001
# Address for device-to-host transfer
TOKEN_IN = 0b1001
# Start of frame marker (sent each ms)
TOKEN_SOF = 0b0101
# Address for host-to-device control transfer
TOKEN_SETUP = 0b1101
# Even-numbered data packet
DATA_0 = 0b0011
# Odd-numbered data packet
DATA_1 = 0b1011
# Data packet for high-bandwidth isochronous transfer (USB 2.0)
DATA_2 = 0b0111
# Data packet for high-bandwidth isochronous transfer (USB 2.0)
DATA_M = 0b1111
# Data packet accepted
HS_ACK = 0b0010
# Data packet not accepted; please retransmit
HS_NACK = 0b1010
# Transfer impossible; do error recovery
HS_STALL = 0b1110
# Data not ready yet (USB 2.0)
HS_NYET = 0b0110
# Low-bandwidth USB preamble
PREAMBLE = 0b1100
# Split transaction error (USB 2.0)
ERR = 0b1100
# High-bandwidth (USB 2.0) split transaction
SPLIT = 0b1000
# Check if endpoint can accept data (USB 2.0)
PING = 0b0100
addr_t = Bits(7)
endp_t = Bits(4)
crc5_t = Bits(5)
crc16_t = Bits(16)
pid_t = Bits(4)
"""
:attention: every packet starts with sync and ends in EOP,
which is not in data structures below
"""
"""
There are three types of token packets,
* In - Informs the USB device that the host wishes to read information.
* Out - Informs the USB device that the host wishes to send information.
* Setup - Used to begin control transfers.
"""
packet_token_t = HStruct(
(pid_t, "pid"),
(addr_t, "addr"),
(endp_t, "endp"),
(crc5_t, "crc5"),
)
USB_MAX_FRAME_LEN = {
USB_VER.USB1_0: 8,
USB_VER.USB1_1: 1023,
USB_VER.USB2_0: 1024,
}
def get_packet_data_t(usb_ver: USB_VER):
max_frame_len = USB_MAX_FRAME_LEN[usb_ver]
# pid has to be one of DATA_0, DATA_1, DATA_2, DATA_M
return HStruct(
(pid_t, "pid"),
(HStream(Bits(8), frame_len=(1, max_frame_len)), "data"),
(crc16_t, "crc"),
)
"""
There are three type of handshake packets which consist simply of the PID
* ACK - Acknowledgment that the packet has been successfully received.
* NAK - Reports that the device temporary cannot send or received data.
Also used during interrupt transactions to inform the host there is no data to send.
* STALL - The device finds its in a state that it requires intervention from the host.
"""
packet_hs_t = HStruct(
(pid_t, "pid"),
)
"""
The SOF packet consisting of an 11-bit frame number is sent by the host
every 1ms ± 500ns on a full speed bus or every 125 µs ± 0.0625 µs on a high speed bus.
"""
frame_number_t = Bits(11)
packet_sof_t = HStruct(
(pid_t, "pid"),
(frame_number_t, "frame_number"),
(crc5_t, "crc5"),
)
| python |
###############################################################################
# Author: Wasi Ahmad
# Project: Match Tensor: a Deep Relevance Model for Search
# Date Created: 7/28/2017
#
# File Description: This script contains code related to the sequence-to-sequence
# network.
###############################################################################
import torch
import torch.nn as nn
from nn_layer import EmbeddingLayer, Encoder, ExactMatchChannel
class MatchTensor(nn.Module):
"""Class that classifies question pair as duplicate or not."""
def __init__(self, dictionary, embedding_index, args):
""""Constructor of the class."""
super(MatchTensor, self).__init__()
self.dictionary = dictionary
self.embedding_index = embedding_index
self.config = args
self.num_directions = 2 if self.config.bidirection else 1
self.embedding = EmbeddingLayer(len(self.dictionary), self.config)
self.linear_projection = nn.Linear(self.config.emsize, self.config.featsize)
self.query_encoder = Encoder(self.config.featsize, self.config.nhid_query, True, self.config)
self.document_encoder = Encoder(self.config.featsize, self.config.nhid_doc, True, self.config)
self.query_projection = nn.Linear(self.config.nhid_query * self.num_directions, self.config.nchannels)
self.document_projection = nn.Linear(self.config.nhid_doc * self.num_directions, self.config.nchannels)
self.exact_match_channel = ExactMatchChannel()
self.conv1 = nn.Conv2d(self.config.nchannels + 1, self.config.nfilters, (3, 3), padding=1)
self.conv2 = nn.Conv2d(self.config.nchannels + 1, self.config.nfilters, (3, 5), padding=(1, 2))
self.conv3 = nn.Conv2d(self.config.nchannels + 1, self.config.nfilters, (3, 7), padding=(1, 3))
self.relu = nn.ReLU()
self.conv = nn.Conv2d(self.config.nfilters * 3, self.config.match_filter_size, (1, 1))
self.output = nn.Linear(self.config.match_filter_size, 1)
# Initializing the weight parameters for the embedding layer.
self.embedding.init_embedding_weights(self.dictionary, self.embedding_index, self.config.emsize)
def forward(self, batch_queries, query_len, batch_docs, doc_len):
"""
Forward function of the match tensor model. Return average loss for a batch of sessions.
:param batch_queries: 2d tensor [batch_size x max_query_length]
:param query_len: 1d numpy array [batch_size]
:param batch_docs: 3d tensor [batch_size x num_rel_docs_per_query x max_document_length]
:param doc_len: 2d numpy array [batch_size x num_clicks_per_query]
:return: score representing click probability [batch_size x num_clicks_per_query]
"""
# step1: apply embedding lookup
embedded_queries = self.embedding(batch_queries)
embedded_docs = self.embedding(batch_docs.view(-1, batch_docs.size(-1)))
# step2: apply linear projection on embedded queries and documents
embedded_queries = self.linear_projection(embedded_queries.view(-1, embedded_queries.size(-1)))
embedded_docs = self.linear_projection(embedded_docs.view(-1, embedded_docs.size(-1)))
# step3: transform the tensors so that they can be given as input to RNN
embedded_queries = embedded_queries.view(*batch_queries.size(), self.config.featsize)
embedded_docs = embedded_docs.view(-1, batch_docs.size()[-1], self.config.featsize)
# step4: pass the encoded query and doc through a bi-LSTM
encoded_queries = self.query_encoder(embedded_queries, query_len)
encoded_docs = self.document_encoder(embedded_docs, doc_len.reshape(-1))
# step5: apply linear projection on query hidden states
projected_queries = self.query_projection(encoded_queries.view(-1, encoded_queries.size()[-1])).view(
*batch_queries.size(), -1)
projected_queries = projected_queries.unsqueeze(1).expand(projected_queries.size(0), batch_docs.size(1),
*projected_queries.size()[1:])
projected_queries = projected_queries.contiguous().view(-1, *projected_queries.size()[2:])
projected_docs = self.document_projection(encoded_docs.view(-1, encoded_docs.size()[-1]))
projected_docs = projected_docs.view(-1, batch_docs.size(2), projected_docs.size()[-1])
projected_queries = projected_queries.unsqueeze(2).expand(*projected_queries.size()[:2], batch_docs.size()[-1],
projected_queries.size(2))
projected_docs = projected_docs.unsqueeze(1).expand(projected_docs.size(0), batch_queries.size()[-1],
*projected_docs.size()[1:])
# step6: 2d product between projected query and doc vectors
query_document_product = projected_queries * projected_docs
# step7: append exact match channel
exact_match = self.exact_match_channel(batch_queries, batch_docs).unsqueeze(3)
query_document_product = torch.cat((query_document_product, exact_match), 3)
query_document_product = query_document_product.transpose(2, 3).transpose(1, 2)
# step8: run the convolutional operation, max-pooling and linear projection
convoluted_feat1 = self.conv1(query_document_product)
convoluted_feat2 = self.conv2(query_document_product)
convoluted_feat3 = self.conv3(query_document_product)
convoluted_feat = self.relu(torch.cat((convoluted_feat1, convoluted_feat2, convoluted_feat3), 1))
convoluted_feat = self.conv(convoluted_feat).transpose(1, 2).transpose(2, 3)
max_pooled_feat = torch.max(convoluted_feat, 2)[0].squeeze()
max_pooled_feat = torch.max(max_pooled_feat, 1)[0].squeeze()
return self.output(max_pooled_feat).squeeze().view(*batch_docs.size()[:2])
| python |
"""
Aravind Veerappan
BNFO 601 - Exam 2
Question 2. Protein BLAST
"""
import math
from PAM import PAM
class BLAST(object):
FORWARD = 1 # These are class variables shared by all instances of the BLAST class
BACKWARD = -1
ROW = (0, 1)
COLUMN = (1, 0)
def __init__(self, query=None, target=None, word_size=3, gap_open=-10, gap_extend=-4, threshold=10, PAM=None):
self.query = query # This is the string corresponding to the query sequence
self.target = target # This is the string corresponding to the target sequence
self.word_size = word_size # Size of the seed word for initiating extensions
self.word_score = None # something different required for PBLAST!
self.gap_open = gap_open
self.gap_extend = gap_extend
self.querylen = len(query)
self.targetlen = len(target)
self.blast_table = {} # Our main dynamic programming table containing scores
self.traceback_table = {} # A corresponding table for recording the tracebacks
self.target_index = {}
self.threshold = threshold # Neighborhood threshold value for scoring
self.PAM = PAM # PAM table
return
def score(self): # This method performs BLAST scoring and returns a string describing the resulting alignment
result_summary = [] # A list, for now, that will store results of the alignments
if not self.target_index: # if this is the first time scoring we should index the target
for i in xrange(len(self.target) - self.word_size + 1):
word = self.target[i: i + self.word_size]
if word in self.target_index:
self.target_index[word].append(i) # A dict of lists is an efficient structure for this index.
# The list items are word coordinates in the target.
else:
self.target_index[word] = [i]
# print self.target_index
## First we must iterate through words in the query:
query_position = 0
while query_position < self.querylen - self.word_size + 1:
# print "Query position is", query_position
query_word = self.query[query_position:query_position + self.word_size]
# lookup scores for each AA pair from PAM table
for target_word in self.target_index.keys():
score = 0
for i in range(len(target_word)):
score += self.PAM[target_word[i], query_word[i]]
# If the calculated score is higher than the neighborhood threshold value then extend the alignment
# and set the starting word score equal to the calculated score
if score > self.threshold:
self.word_score = score
for target_position in self.target_index[target_word]:
print "Searching for seed", query_word, "at target position", target_position
# print "Extending forward"
forward_score, forward_extension_q, forward_extension_t = \
self._extend_alignment(query_position, target_position, self.FORWARD)
# print "Extending backwards"
backward_score, backward_extension_q, backward_extension_t = \
self._extend_alignment(query_position, target_position, self.BACKWARD)
q_result = backward_extension_q[:-1] + query_word + forward_extension_q[1:]
t_result = backward_extension_t[:-1] + query_word + forward_extension_t[1:]
# Note that the last character of a backward extension, and the zeroth character of a forward
# extension overlap with the query word and should therefore be discarded - thus the slice notation.
score = forward_score + backward_score - self.word_score
# We need to make sure that we don't double count the seed score!
# calculate e-value
# e_value = self.querylen * self.targetlen * math.e ** (math.log(1 / 4) * score)
# calculate bit score
# bit_score = (-math.log(1 / 4) * score - math.log(1)) / math.log(2)
query_begin = query_position - len(backward_extension_q) + 2
target_begin = target_position - len(backward_extension_t) + 2
# result_summary.append((e_value, bit_score, score, q_result, t_result, query_begin, target_begin))
result_summary.append((score, q_result, t_result, query_begin, target_begin))
alignment_string = '\nAlignment had a score of ' + str(score) + ' and is:\n\nTarget:\t' + \
str(target_begin) + '\t' + str(t_result) + '\n\t\t\t'
for k in xrange(len(t_result)): # t and q alignments should be the same length!
if t_result[k] == q_result[k]:
alignment_string += '|'
# Only put a bar if the two characters are identical at this position
else:
alignment_string += ' ' # otherwise just insert a space
alignment_string += '\nQuery:\t' + str(query_begin) + '\t' + str(q_result) + '\n'
print alignment_string
# The above statements just concatenate together a multi-line string that will correctly display
# the best alignment when it is subsequently printed.
query_position += 1
return result_summary
def _extend_alignment(self, query_start, target_start, direction):
""" This private method attempts to extend an alignment in the forward and backward direction
depending on the value of the direction flag, which here takes the value 1 (for forward extension) or
-1 for backward.For clarity these constants are defined by the class variables self.FORWARD and self.BACKWARD
"""
self.high_score = self.word_score
# highest scores encountered so far will always initially be the word_score * match_reward
self.high_q_pos = self.high_t_pos = 0
if direction == self.FORWARD: # We start with the 0,0 position representing the last character
query_start += self.word_size - 1 # of the seed word for forward extensions.
target_start += self.word_size - 1 # For backward extensions, leave it as it is (i.e. zeroth character)
self.blast_table = dict()
# The BLAST table is a dict of tuples. Each tuple represents a (query, target) position
# this sparse representation will be much more efficient than using a 2D list
self.blast_table[0, 0] = self.high_score # initialize the top left corner with the word score
self.high_q_pos = 0
self.high_t_pos = 0
self.traceback_table[0, 0] = (1, 1)
# There is no traceback path for the origin, but the program logic elsewhere dictates that we provide one
cur_t_pos = 1 # we are going to score the edges first (top and left), which can *only* ever be gaps back
# to the origin. i.e. the question of matching or not matching is completely irrelevant here.
# We start by scoring the top edge, beginning with position 1..
cur_score = max(0, self.blast_table[(0, 0)] + self.gap_open) # first one always a gap open
while cur_score: # only keep going as long as we have non-zero values
self.blast_table[(0, cur_t_pos)] = cur_score # only record non-zero values
self.traceback_table[(0, cur_t_pos)] = (0, 1) # record a target gap in the traceback table
cur_score = max(0, self.blast_table[(0, cur_t_pos)] + self.gap_extend) # any subsequent are extends
cur_t_pos += 1
cur_t_pos = 0 # Now we do the same thing for the left edge as we just did for the top edge
cur_q_pos = 1
cur_score = max(0, self.blast_table[(0, 0)] + self.gap_open) # first one always a gap open
while cur_score: # only keep going as long as we have non-zero values
self.blast_table[(cur_q_pos, 0)] = cur_score # only record non-zero values
self.traceback_table[(cur_q_pos, 0)] = (1, 0) # record a query gap in the traceback table
cur_score = max(0, self.blast_table[(cur_q_pos, 0)] + self.gap_extend)
cur_t_pos += 1
# print "blast table 0,0 is", self.blast_table[0, 0], "and high score is", self.high_score
# alright, finished with edges. Note that high scores can NEVER occur in an edge so these were not considered.
# Henceforth, however, we will need to think about this.
cur_t_pos = 0 # Start at the first position
cur_q_pos = 0
# Now we will score the table, proceeding according to the algorithm description: first incrementing along
# the diagonal, then scoring the adjacent row, then the column below
# Unlike Smith Waterman, the matrix is no longer of defined size, so we need to use while loops instead of for
while True: # I think it's cleaner to affirmatively break out of this main loop. Too bad Python has no do-while
cur_t_pos += 1 # Advance along the diagonal by incrementing
cur_q_pos += 1 # Remember, these refer to coordinates in our table, not in the actual target or query
# Probably we need to do some bounds checking here too with respect to absolute position in the query and
# target similar to what is done in the _fill_in_row_or_column method
# print "Beginning row starting at", cur_q_pos, cur_t_pos, "of the blast table"
max_in_row = self._fill_in_row_or_column(cur_q_pos, cur_t_pos, query_start, target_start,
direction, self.ROW)
# print "Max in row was ", max_in_row
# print "Beginning column starting at", cur_q_pos, cur_t_pos, "of the blast table"
max_in_column = self._fill_in_row_or_column(cur_q_pos, cur_t_pos, query_start,
target_start, direction, self.COLUMN)
# print "Max in column was ", max_in_column
if not max(max_in_row, max_in_column):
break # If the maximum value we encounter in both the rows and columns is zero, we are done building
# print "Finished building a matrix"
best_q_alignment = [] # best partial alignment for the query sequence
best_t_alignment = [] # best partial alignment for the target sequence
## Now we can go ahead and produce an output string corresponding to the best alignment
cur_q_pos = self.high_q_pos # our approach is start at the high scoring box, and to trace our way back
cur_t_pos = self.high_t_pos
while cur_q_pos >= 0 and cur_t_pos >= 0 and self.blast_table.setdefault((cur_q_pos, cur_t_pos), 0):
q_offset, t_offset = self.traceback_table[cur_q_pos, cur_t_pos]
# unpack the offset tuples stored in the traceback table
if q_offset:
try:
best_q_alignment.append(self.query[query_start + cur_q_pos * direction])
except IndexError:
print "YO!", query_start, cur_q_pos, direction, query_start + cur_q_pos * direction
print "Best_q_alignment", best_q_alignment
quit()
else:
best_q_alignment.append('-') # if the value is a zero, we are gapping!
if t_offset:
best_t_alignment.append(self.target[target_start + cur_t_pos * direction])
else:
best_t_alignment.append('-') # if the value is a zero, we are gapping, now the other way
cur_q_pos -= q_offset # Note that we are subtracting positively valued offsets.
cur_t_pos -= t_offset # This design choice makes later printing a traceback table a lot prettier.
# Alternatively, we could have built our alignments by adding things at the beginning using statements like
# best_t_alignment.insert(0,'-') etc. But in Python inserting items at the beginning of a list is much slower
# than appending at the end. We are better off appending at the end, then reversing the whole mess when done.
# print "Returning information about a partial alignment", self.high_score, best_q_alignment, best_t_alignment
# flip 'em both once we are done, since we built them "end-to-beginning". Note that we don't need to flip
# sequences corresponding to backwards extensions!
if direction == self.FORWARD:
best_q_alignment.reverse()
best_t_alignment.reverse()
return self.high_score, ''.join(best_q_alignment), ''.join(best_t_alignment)
def _fill_in_row_or_column(self, cur_q_pos, cur_t_pos, query_start, target_start, direction, row_or_column):
"""This private method will fill in a row or column, depending on the tuple passed in the row_or_column argument
Each row or column is filled in until a zero-valued result is obtained.
"""
# print "filling in a row or column"
max_in_current_row_or_column = 0
q_add, t_add = row_or_column
# These variables will control whether we fill in a row or a column. If the argument row_or_column = (0,1)
# we will end filling in a row. If the argument is assigned (1,0) we will fill a column
while True:
query_position = query_start + cur_q_pos * direction # remember, direction here is either -1 or 1
target_position = target_start + cur_t_pos * direction # so is a positive or negative offset multiplier
# query and target position variables here refer to the actual (absolute) position within the query
# and target sequences respectively
if (query_position < 0) or (target_position < 0):
# print "Ran out of query or target sequence while attempting backwards extension"
break # we can go no further
if (query_position >= self.querylen) or (target_position >= self.targetlen):
# print "Ran out of q or t while attempting forwards extension", query_position, target_position
break # again, we can go no further
q_char = self.query[query_position]
t_char = self.target[target_position]
# print "comparing", q_char, query_position, "to", t_char, target_position
# use PAM table to find the increment
increment = self.PAM[(q_char, t_char)]
match_score = self.blast_table[(cur_q_pos - 1, cur_t_pos - 1)] + increment
# improvement for later - decide whether to apply gap opening or gap extension penalties
# for the moment just set gap increment to the gap_open value
increment = self.gap_open
# scores associated with gapping in either the target or query
target_gap_score = self.blast_table.setdefault((cur_q_pos, cur_t_pos - 1), 0) + increment
query_gap_score = self.blast_table.setdefault((cur_q_pos - 1, cur_t_pos), 0) + increment
best_score = max(
(0, (0, 0)), # a 0 score will never have a traceback
(match_score, (1, 1)), # A match corresponds to a -1,-1 traceback
(target_gap_score, (0, 1)), # A target gap corresponds to a 0, -1 traceback
(query_gap_score, (1, 0)) # A query gap corresponds to a -1, 0 traceback
)
if not best_score[0]:
break
self.blast_table[cur_q_pos, cur_t_pos] = best_score[0]
# The first element in the tuple is the actual score to be recorded
# print "Recording", best_score[0], "at position", cur_q_pos, cur_t_pos
self.traceback_table[cur_q_pos, cur_t_pos] = best_score[1]
# The traceback offsets associated with the score are in a tuple as described earlier
if best_score[0] >= self.high_score:
# This represents the "high road" approach. "low road" would simply be >
self.high_score = best_score[0] # record the new high score
self.high_q_pos = cur_q_pos # also record the i and j positions associated with that score
self.high_t_pos = cur_t_pos
if best_score[0] > max_in_current_row_or_column:
max_in_current_row_or_column = best_score[0]
# The maximum in a particular row or column is different from the overall high score! We actually
# only care if this value is non-zero, as this will tell us that another iteration along the diagonal is
# required.
cur_t_pos += t_add # We end up adding either a zero or a one to these depending on
cur_q_pos += q_add # whether we are filling in a row or a column, setting us up for the next iteration
return max_in_current_row_or_column
def __str__(self):
""" This is a "special method attribute" overwriting the __str__ method defined in object.
__str__ controls what the string representation of objects of the BLAST class will look like.
It is invoked by print statements, which will print the return value. The bad news is that the routine here
was more-or-less just lifted from the old Smith Waterman program. However, BLAST uses a fundamentally
different sort of data structure for representing the blast and traceback tables.
Can you fix this method so that it does something useful?
"""
lineout = 'Scoring table:\n\t' + '\t'.join(self.target) + '\n'
# The above is just a fancy looking way to break the target string into tab-delimited individual characters
for i in xrange(self.querylen):
lineout += self.query[i] + "\t"
for j in xrange(self.targetlen):
lineout += str(self.blast_table[i, j]) + "\t"
lineout += '\n'
lineout += '\n\nTraceback table:\n\t' + '\t'.join(self.target) + '\n'
for i in xrange(self.querylen):
lineout += self.query[i] + "\t"
for j in xrange(self.targetlen):
lineout += ''.join([str(k) for k in self.traceback_table[i, j]]) + "\t"
# just prettying up the traceback tuples
lineout += '\n'
return lineout
# MAIN PROGRAM
numbat = 'LVSMLESYVAAPDLILLDIMMPGMDGLELGGMDGGKPILT'
quoll = 'DDMEVIGTAYNPDVLVLDIIMPHLDGLAVAAMEAGRPLIS'
# calculate PAM120 matrix
A = PAM(N=120)
PAM1 = A.Build_PAMN()
B = BLAST(numbat, quoll, PAM=PAM1)
print B.score()
| python |
import sublime, sublimeplugin
import os
class NewPluginCommand(sublimeplugin.WindowCommand):
def run(self, window, args):
view = window.newFile()
path = sublime.packagesPath() + u"/user"
try:
os.chdir(path)
except Exception:
pass
view.options().set("syntax", "Packages/Python/Python.tmLanguage")
template = """import sublime, sublimeplugin
# This simple plugin will add 'Hello, World!' to the end of the buffer when run.
# To run it, save it within the User/ directory, then open the console (Ctrl+~),
# and type: view.runCommand('sample')
#
# See http://www.sublimetext.com/docs/plugin-basics for more information
class SampleCommand(sublimeplugin.TextCommand):
def run(self, view, args):
view.insert(view.size(), "Hello, World!\\n")
"""
view.insert(0, template)
| python |
#!/usr/bin/env python3
import math
def main():
limit = 999
print(sumOfMultiples(3, limit) + sumOfMultiples(5, limit) - sumOfMultiples(15, limit))
def sumOfMultiples(n, max):
return n * (math.floor(max / n) * (math.floor(max / n) + 1)) / 2
if __name__ == "__main__": main() | python |
from .orders import Order
from .customers import Customer
from .products import Product
from .line_items import LineItem
from .lot_code import LotCode
from .warehouse import Warehouse
from .location import Location
from .inventories import Inventory
from .inventory_adjustments import InventoryAdjustment
from .inventory_adjustment_logs import InventoryAdjustmentLog
from .receipt import Receipt
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from flask import Flask
from flask import abort
from flask import make_response
from flask import render_template
from flask import request
import sleekxmpp
app = Flask(__name__)
app.config.from_envvar("XMPP_CHAT_BADGE_CONFIG")
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
else:
raw_input = input
class MUCBot(sleekxmpp.ClientXMPP):
""" """
def __init__(self, jid, password, nick):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.nick = nick
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can initialize
# our roster.
self.add_event_handler("session_start", self.start)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.get_roster()
self.send_presence()
def get_number_of_occupants(self, room):
querying_jid = '{}/{}'.format(room, self.nick)
try:
result = self.plugin['xep_0030'].get_info(
jid=room,
node=None,
cached=True,
ifrom=querying_jid,
block=True,
timeout=10
)
except sleekxmpp.exceptions.IqError:
return None
fields = result.xml.find(
'{http://jabber.org/protocol/disco#info}query').find(
'{jabber:x:data}x').findall(
'{jabber:x:data}field')
for field in fields:
if field.get('var') == 'muc#roominfo_occupants':
return field.find('{jabber:x:data}value').text
return None
def initBOT(jid, password, nick):
# Set up the MUCBot and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp_client = MUCBot(jid, password, nick)
xmpp_client.register_plugin('xep_0030') # Service Discovery
return xmpp_client
bot = initBOT(app.config['JID'], app.config['PASSWORD'], app.config['NICK'])
bot.connect()
bot.process(block=False)
@app.route("/badge.svg")
def hello():
room = request.args.get('room')
if room is None:
return abort(400)
number = bot.get_number_of_occupants(room)
svg = render_template('badge.svg', number=number)
response = make_response(svg)
response.content_type = 'image/svg+xml'
response.cache_control.max_age = 60
return response
| python |
import chess
from datetime import datetime
from tqdm import tqdm
from os import getcwd
from utils.trans_table_utils import *
from utils.history_utils import *
from utils.heuristics import combined
from agents.alpha_beta_agent import AlphaBetaAgent
from agents.alpha_beta_agent_trans import AlphaBetaAgentTrans
from agents.combined_agent import CombinedAgent
from agents.history_agent import OrderedAgent
from agents.minimax_agent import MiniMaxAgent
from agents.pv_agent import PVAgent
from agents.random_agent import RandAgent
from agents.random_agent_trans import RandAgentTrans
from agents.history_agent_trans import OrderedAgentTrans
class ChessGame:
def __init__(self, white_agent_name, white_agent, black_agent_name, black_agent):
self.white_agent_name = white_agent_name
self.black_agent_name = black_agent_name
self.white_agent = white_agent
self.black_agent = black_agent
self.white_agent_depth = white_agent.maximum_depth if hasattr(white_agent, 'maximum_depth') else 0
self.black_agent_depth = black_agent.maximum_depth if hasattr(black_agent, 'maximum_depth') else 0
self.white_agent_num_moves = 0
self.black_agent_num_moves = 0
self.white_agent_decision_time = 0
self.black_agent_decision_time = 0
self.white_agent_result = 0
self.black_agent_result = 0
self.board = chess.Board()
def play_game(self, display=False):
while not self.board.is_game_over() or self.board.is_seventyfive_moves() or self.board.is_fivefold_repetition():
self.play_round(display=display)
result = self.board.result()
if result == '0-1':
self.white_agent_result = -1
self.black_agent_result = 1
elif result == '1-0':
self.white_agent_result = 1
self.black_agent_result = -1
return {
'white_agent_name': self.white_agent_name,
'black_agent_name': self.black_agent_name,
'white_agent_depth': str(self.white_agent_depth),
'black_agent_depth': str(self.black_agent_depth),
'white_agent_num_moves': str(self.white_agent_num_moves),
'black_agent_num_moves': str(self.black_agent_num_moves),
'white_agent_decision_time': str(self.white_agent_decision_time),
'black_agent_decision_time': str(self.black_agent_decision_time),
'white_agent_result': str(self.white_agent_result),
'black_agent_result': str(self.black_agent_result)
}
def play_round(self, display=False):
start = datetime.utcnow()
self.play_move(self.white_agent)
self.white_agent_decision_time += (datetime.utcnow() - start).total_seconds()
self.white_agent_num_moves += 1
if display:
print(self.board.unicode(borders=True))
start = datetime.utcnow()
self.play_move(self.black_agent)
self.black_agent_decision_time += (datetime.utcnow() - start).total_seconds()
self.black_agent_num_moves += 1
if display:
print(self.board.unicode(borders=True))
def play_move(self, agent):
chosen_move = agent.get_move(self.board.copy())
if chosen_move is not None:
self.board.push_uci(chosen_move.uci())
def generate_data(white_agent_name, black_agent_name, white_agent, black_agent, path, num_runs=100, display=False):
with open(path, 'w') as f:
f.write('game_number\tagent_type\tagent_color\tagent_depth\tagent_num_moves\tagent_decision_time\tgame_result\n')
for g_n in tqdm(range(num_runs)):
g = ChessGame(white_agent_name, black_agent_name, white_agent, black_agent).play_game(display=display)
f.write(str(g_n) + '\t' + g['white_agent_name'] + '\t' + 'white' + '\t' + g['white_agent_depth'] + '\t' + g['white_agent_num_moves'] + '\t' + g['white_agent_decision_time'] + '\t' + g['white_agent_result'] + '\n')
f.write(str(g_n) + '\t' + g['black_agent_name'] + '\t' + 'black' + '\t' + g['black_agent_depth'] + '\t' + g['black_agent_num_moves'] + '\t' + g['black_agent_decision_time'] + '\t' + g['black_agent_result'] + '\n')
# TODO: This is stupid hard-coded. Remove this you dummies. Love you
write_trans_table(black_agent.trans_table, getcwd() + '/data/history_agent/trans_table.pickle')
write_history_table(black_agent)
def main():
# Base
# generate_data('random', RandAgent(chess.WHITE), 'random', RandAgent(chess.BLACK), getcwd()[:-5] + 'data/RvR.csv')
# generate_data('random', RandAgent(chess.WHITE), 'alphabeta2', AlphaBetaAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/RvA2.csv')
# generate_data('minimax2', MiniMaxAgent(chess.WHITE, combined, 2), 'alphabeta2', AlphaBetaAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/M2vA2.csv')
# generate_data('alphabeta2', AlphaBetaAgent(chess.WHITE, combined, 2), 'alphabeta2', AlphaBetaAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/A2vA2.csv')
# Transposition Tables
# generate_data('alphabeta2', AlphaBetaAgent(chess.WHITE, combined, 2), 'alphabeta2_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/A2vA2T_1.csv', 300)
# History tables
# generate_data('history2', OrderedAgent(chess.WHITE, combined, 2), 'history2', OrderedAgent(chess.BLACK, combined, 2, True), getcwd()[:-5] + 'data/H2vH2.csv')
# generate_data('pv2', PVAgent(chess.WHITE, combined, 2), 'pv2', PVAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/P2vP2.csv')
# generate_data('combined2', CombinedAgent(chess.WHITE, combined, 2), 'combined2', CombinedAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/C2vC2.csv')
# Depth
# generate_data('alphabeta1', AlphaBetaAgent(chess.WHITE, combined, 1), 'alphabeta2', AlphaBetaAgent(chess.BLACK, combined, 2), getcwd()[:-5] + 'data/A1vA2.csv')
# generate_data('alphabeta1', AlphaBetaAgent(chess.WHITE, combined, 1), 'alphabeta3', AlphaBetaAgent(chess.BLACK, combined, 3), getcwd()[:-5] + 'data/A1vA3.csv')
# generate_data('alphabeta2', AlphaBetaAgent(chess.WHITE, combined, 2), 'alphabeta3', AlphaBetaAgent(chess.BLACK, combined, 3), getcwd()[:-5] + 'data/A2vA3.csv')
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_1', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_2', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_3', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_4', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_5', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_6', 300)
# generate_data('random', RandAgent(chess.WHITE), 'random_trans', RandAgentTrans(chess.BLACK), getcwd()[:-5] + 'data/RvRT_7', 300)
#
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_1', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_2', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_3', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_4', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_5', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_6', 300)
# generate_data('greedy', AlphaBetaAgent(chess.WHITE, combined, 1), 'greedy_trans', AlphaBetaAgentTrans(chess.BLACK, combined, 1), getcwd()[:-5] + 'data/AvAT_7', 300)
agent1, agent2 = [OrderedAgent(chess.WHITE, combined, 2), OrderedAgentTrans(chess.BLACK, combined, 3)]
generate_data('ordered_history2', agent1, 'ordered_history2_trans', agent2, getcwd()[:-5] + 'data/H2vHT2.csv', 1, display=True)
write_trans_table(agent2.trans_table, getcwd()[:-5] + 'data/history_agent/trans_table.pickle')
write_history_table(agent2)
if __name__ == '__main__':
main()
| python |
#!/usr/bin/env python
"""
The galvo voltage control UI
Aditya Venkatramani 04/21 --> Adapted from zStage.py
"""
import os
from PyQt5 import QtCore, QtGui, QtWidgets
import storm_control.sc_library.parameters as params
import storm_control.hal4000.halLib.halDialog as halDialog
import storm_control.hal4000.halLib.halMessage as halMessage
import storm_control.hal4000.halLib.halModule as halModule
import storm_control.hal4000.qtdesigner.galvo1D_ui as galvoUi
class GalvoView(halDialog.HalDialog):
"""
Manages the galvo1D GUI.
"""
def __init__(self, configuration = None, **kwds):
super().__init__(**kwds)
self.parameters = params.StormXMLObject()
self.galvo_fn = None
# Load UI
self.ui = galvoUi.Ui_Dialog()
self.ui.setupUi(self)
icon_path = os.path.join(os.path.dirname(__file__),"../icons/")
self.ui.upLButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "2uparrow-128.png")))
self.ui.upLButton.clicked.connect(self.handleUpLButton)
self.ui.upSButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "1uparrow-128.png")))
self.ui.upSButton.clicked.connect(self.handleUpSButton)
self.ui.downSButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "1downarrow-128.png")))
self.ui.downSButton.clicked.connect(self.handleDownSButton)
self.ui.downLButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "2downarrow-128.png")))
self.ui.downLButton.clicked.connect(self.handleDownLButton)
self.ui.zeroButton.clicked.connect(self.handleZeroButton)
self.ui.goButton.clicked.connect(self.handleGoButton)
# Set to minimum size & fix.
self.adjustSize()
self.setFixedSize(self.width(), self.height())
# Add parameters.
self.parameters.add(params.ParameterRangeFloat(description ="Galvo large step size",
name = "volt_large_step",
value = configuration.get("large_step"),
min_value = 0.0,
max_value = 1000.0))
self.parameters.add(params.ParameterRangeFloat(description ="Galvo small step size",
name = "volt_small_step",
value = configuration.get("small_step"),
min_value = 0.0,
max_value = 1000.0))
#self.setEnabled(False)
def getParameters(self):
return self.parameters
def handleDownLButton(self, boolean):
self.galvo_fn.goRelative(-1.0*self.parameters.get("volt_large_step"))
def handleDownSButton(self, boolean):
self.galvo_fn.goRelative(-1.0*self.parameters.get("volt_small_step"))
def handleGoButton(self, boolean):
self.galvo_fn.goAbsolute(self.ui.goSpinBox.value())
def handleUpLButton(self, boolean):
self.galvo_fn.goRelative(self.parameters.get("volt_large_step"))
def handleUpSButton(self, boolean):
self.galvo_fn.goRelative(self.parameters.get("volt_small_step"))
def handleZeroButton(self, boolean):
self.galvo_fn.zero()
def handleGalvoVoltage(self, volt):
self.ui.galvoVoltLabel.setText("{0:.2f}".format(volt))
def newParameters(self, parameters):
self.parameters.setv("volt_large_step", parameters.get("volt_large_step"))
self.parameters.setv("volt_small_step", parameters.get("volt_small_step"))
def setFunctionality(self, galvo_fn):
self.galvo_fn = galvo_fn
self.galvo_fn.galvoVoltage.connect(self.handleGalvoVoltage)
self.galvo_fn.zero()
class Galvo(halModule.HalModule):
def __init__(self, module_params = None, qt_settings = None, **kwds):
super().__init__(**kwds)
self.configuration = module_params.get("configuration")
self.view = GalvoView(module_name = self.module_name,
configuration = module_params.get("configuration"))
self.view.halDialogInit(qt_settings,
module_params.get("setup_name") + " galvo")
def cleanUp(self, qt_settings):
self.view.cleanUp(qt_settings)
def handleResponse(self, message, response):
if message.isType("get functionality"):
self.view.setFunctionality(response.getData()["functionality"])
def processMessage(self, message):
if message.isType("configure1"):
self.sendMessage(halMessage.HalMessage(m_type = "add to menu",
data = {"item name" : "Galvo",
"item data" : "galvoview"}))
self.sendMessage(halMessage.HalMessage(m_type = "get functionality",
data = {"name" : self.configuration.get("galvo_fn")}))
self.sendMessage(halMessage.HalMessage(m_type = "initial parameters",
data = {"parameters" : self.view.getParameters()}))
elif message.isType("new parameters"):
p = message.getData()["parameters"]
message.addResponse(halMessage.HalMessageResponse(source = self.module_name,
data = {"old parameters" : self.view.getParameters().copy()}))
self.view.newParameters(p.get(self.module_name))
message.addResponse(halMessage.HalMessageResponse(source = self.module_name,
data = {"new parameters" : self.view.getParameters()}))
elif message.isType("show"):
if (message.getData()["show"] == "galvoview"):
self.view.show()
elif message.isType("start"):
if message.getData()["show_gui"]:
self.view.showIfVisible()
| python |
from os import system, name
system('cls' if name == 'nt' else 'clear')
dsc = ('''DESAFIO 019:
Um professor quer sortear um dos seus quatro alunos para apagar
o quadro. Faça um um programa que ajude ele, lendo o nome deles e
escrevendo o nome escolhido.
''')
from random import choice
alunos = []
alunos.append(input('Digite o nome do primeiro aluno: '))
alunos.append(input('Digite o nome do segundo aluno: '))
alunos.append(input('Digite o nome do terceiro aluno: '))
alunos.append(input('Digite o nome do quarto aluno: '))
print('O aluno escolhido é {}'.format(choice(alunos)))
| python |
#!/usr/bin/env python
import pytest
import sklearn.datasets as datasets
import sklearn.neural_network as nn
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestNeuralNtwork(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.neural_network.BernoulliRBM, nn.BernoulliRBM)
self.assertIs(df.neural_network.MLPClassifier, nn.MLPClassifier)
self.assertIs(df.neural_network.MLPRegressor, nn.MLPRegressor)
@pytest.mark.parametrize("algo", ['BernoulliRBM'])
def test_RBM(self, algo):
digits = datasets.load_digits()
df = pdml.ModelFrame(digits)
mod1 = getattr(df.neural_network, algo)(random_state=self.random_state)
mod2 = getattr(nn, algo)(random_state=self.random_state)
df.fit(mod1)
mod2.fit(digits.data, digits.target)
result = df.transform(mod1)
expected = mod2.transform(digits.data)
self.assertIsInstance(result, pdml.ModelFrame)
self.assert_numpy_array_almost_equal(result.data.values, expected)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
'''
Find the water-land threshold in an image (for example the band 7 of
a WorldView multispectral image by computing a kernel-density
estimate using Gaussian kernels. A good threshold is usually the
first minimum of this estimate.
This tool needs python 3, numpy, scipy, matplotlib, and osgeo.
'''
import sys, time, math, argparse
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
from osgeo import gdal
from scipy.signal import argrelextrema
# Try to use sklearn as well, gives very similar results in very similar time.
# Install this with:
# conda install -c conda-forge scikit-learn
use_sklearn = False # off by default
if use_sklearn:
from sklearn.neighbors import KernelDensity
usage = "python bathy_threshold_calc.py --image <image> --num-samples <num>."
parser = argparse.ArgumentParser(usage=usage,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--image', dest = 'image', default = "",
help='The single-channel image to use to find the water-land threshold.')
parser.add_argument("--num-samples", dest="num_samples", type=int, default = 1000000,
help="The number of samples to pick from the image (more samples " +
"will result in more accuracy but will be slower).")
parser.add_argument("--no-plot", action="store_true", default=False,
dest="no_plot", help="Do not show the plot.")
(options, args) = parser.parse_known_args(sys.argv)
if options.image == "":
parser.print_help()
sys.exit(1)
print("Image file is " + options.image)
print("Number of samples is " + str(options.num_samples))
# Try to read the file using GDAL
try:
ds = gdal.Open(options.image, gdal.GA_ReadOnly)
if ds is None:
print("Could not read the file: " + options.image)
sys.exit(1)
if ds.RasterCount != 1:
print("Expecting one band in " + options.image + ", but got instead: " +
str(ds.RasterCount) + ".")
sys.exit(1)
rb = ds.GetRasterBand(1)
image = rb.ReadAsArray()
except Exception as err:
print("Could not read the file: " + options.image)
print("It must exist and be a single-band TIF file.")
sys.exit(1)
num_rows = image.shape[0]
num_cols = image.shape[1]
if num_rows <= 0 or num_cols <= 0:
print("Expecting an image with positive dimensions")
sys.exit(1)
num_vals = num_rows * num_cols
samp_ratio = math.sqrt( float(num_vals) / float(options.num_samples) )
num_sub_rows = round(num_rows / samp_ratio)
if num_sub_rows < 1:
num_sub_rows = 1
if num_sub_rows > num_rows:
num_sub_rows = num_rows
num_sub_cols = round(num_cols / samp_ratio)
if num_sub_cols < 1:
num_sub_cols = 1
if num_sub_cols > num_cols:
num_sub_cols = num_cols
print("Number of image rows and columns: " + str(num_rows) + ", " + str(num_cols))
print("Picking a uniform sample of dimensions " + str(num_sub_rows) + ", " + str(num_sub_cols))
print("Please be patient. It make take several minutes to find the answer.")
# Subsample uniformly the image
sub_rows = np.round(np.array(range(num_sub_rows)) * float(num_rows - 1)/float(num_sub_rows - 1))
sub_cols = np.round(np.array(range(num_sub_cols)) * float(num_cols - 1)/float(num_sub_cols - 1))
sub_rows = sub_rows.astype(int)
sub_cols = sub_cols.astype(int)
sub_image = image[sub_rows, :][:, sub_cols]
# Make it into an array
data = sub_image.reshape(-1)
xvals = np.linspace(data.min(), data.max(), 1000)
beg = time.time()
kde = st.gaussian_kde(data)
yvals = kde(xvals)
min_pos = argrelextrema(yvals, np.less); min_vals = xvals[min_pos]
end = time.time()
# Note that it is not universal for it to be first minimum. Sometimes
# the second minimum is better!
print("Positions of the minima: ", min_vals)
print("Suggested threshold is the position of the first minimum: ", min_vals[0])
print("Please verify with the graph. There is a chance subsequent minima may work better.")
print("Elapsed time in seconds:", round(10.0*(end - beg))/10.0)
# sklearn, with similar results
if use_sklearn:
beg2 = time.time()
kernel = 'gaussian'
kde2 = KernelDensity(kernel = kernel, bandwidth = 10).fit(data[:, np.newaxis])
log_dens = kde2.score_samples(xvals[:, np.newaxis])
yvals2 = np.exp(log_dens).reshape(-1)
min_pos2 = argrelextrema(yvals2, np.less); min_vals2 = xvals[min_pos2]
end2 = time.time()
print("Elapsed time for sklearn kernel estimation in seconds:", round(10.0*(end2 - beg2))/10.0)
print("Suggested threshold is the position of the first minimum2: ", min_vals2[0])
print("Positions of the minima2: ", min_vals2)
# Plot the kernel-density estimate and highlight the minima
if not options.no_plot:
plt.figure(1)
plt.hist(data, bins=100, density=True, label="Data histogram")
plt.plot(xvals, yvals, label="KDE", c="red")
plt.vlines(min_vals, ymin=0, ymax=yvals.max(),colors='g', ls="--", label="Minima", alpha=0.7)
if use_sklearn:
plt.plot(xvals, yvals2, color = 'green', lw = 2,
linestyle='-', label="kernel = '{0}'".format(kernel))
plt.legend()
plt.show()
| python |
""" Tests for the main server file. """
from unittest import TestCase
from unittest.mock import patch
from app import views
class ViewsTestCase(TestCase):
""" Our main server testcase. """
def test_ping(self):
self.assertEqual(views.ping(None, None), 'pong')
@patch('app.views.notify_recipient')
@patch('app.views.is_valid_pull_request')
def test_valid_pull_request(self, validator, notifier):
""" Should notify upon a valid pull request. """
validator.return_value = True
notifier.return_value = True
result = views.pull_request({}, None)
self.assertEqual(result, 'Recipient Notified')
@patch('app.views.is_valid_pull_request')
def test_invalid_pull_request(self, validator):
""" Should ignore an invalid pull request. """
validator.return_value = False
result = views.pull_request({}, None)
self.assertRegex(result, 'ignored')
| python |
#
__doc__ = """
Schema for test/simulator configuration file.
TODO:
- Somehow, validation of test config doesn't work correctly. Only type conversion works.
"""
from configobj import ConfigObj, flatten_errors
from validate import Validator, ValidateError, VdtTypeError
import os
from StringIO import StringIO
import mproboenv
from environ import EnvFileLoc, EnvFileLoc, EnvTestcfgSection, EnvTestcfgOption, EnvTestcfgPort, EnvSimcfg, EnvPortName
from dave.common.misc import get_abspath, from_engr, force_list, str2num
from dave.common.davelogger import DaVELogger
import dave.mprobo.mchkmsg as mcode
class SchemaConfig(object):
def __init__(self, configobj, configspecfile, config_type, logger_id='logger_id'):
self._logger = DaVELogger.get_logger('%s.%s.%s' % (logger_id, __name__, self.__class__.__name__)) # logger
self.cfgtype = config_type
configspec = ConfigObj(infile=configspecfile, interpolation=False, list_values=False)
vfile = StringIO()
configobj.write(vfile)
self.config = ConfigObj(vfile.getvalue().splitlines(), configspec=configspec)
vfile.close()
def _validate(self, custom_check = {}):
self.vtor = Validator(custom_check)
results = self.config.validate(self.vtor) # this will always not be True
return flatten_errors(self.config, results)
def _output_vdterror(self, error_key):
for (section_list, key, _) in self.vdt_errors:
if key is None:
pass
#print 'The following sections "%s" is(are) missing in the %s configuration' % ('.'.join(section_list), self.cfgtype)
else:
msg = mcode.ERR_011 % (key, ','.join(section_list))
if key in error_key:
raise ValidateError(msg)
else:
print '[Warning]' + msg
def get_cfg(self):
''' get validated ConfigObj '''
return self.config
class SchemaSimulatorConfig(SchemaConfig):
def __init__(self, configobj, is_goldenonly=False, logger_id='logger_id'):
self._tenvf = EnvFileLoc()
self._tenvsc = EnvSimcfg()
self._schema_filename = mproboenv.get_simcfg()
SchemaConfig.__init__(self, configobj, self._schema_filename, 'simulator', logger_id)
self.vdt_errors = self._validate()
self._run_custom_check(is_goldenonly)
def raise_vdterror(self):
self._output_vdterror([self._tenvsc.model, self._tenvsc.simulator])
def _run_custom_check(self, is_goldenonly):
models = [self._tenvsc.golden] + [] if is_goldenonly else [self._tenvsc.revised]
for x in models:
self.config[x] = self._chk_circuit_subsection(self.config[x])
self.config[x] = self._chk_ams_control(self.config[x])
self.config[x] = self._chk_hdl_files(self.config[x])
def _chk_ams_control(self, section):
if section[self._tenvsc.ams_control_file] == '':
del section[self._tenvsc.ams_control_file]
return section
assert section[self._tenvsc.model] == self._tenvsc.model_ams, '"%s" is valid only for model="%s"' % (self._tenvsc.ams_control_file, self._tenvsc.model_ams)
v = section[self._tenvsc.ams_control_file]
assert type(v)==str, mcode.ERR_012 % (v, self._tenvsc.ams_control_file)
fname = get_abspath(v, do_assert=False, logger=self._logger)
#assert os.path.isfile(fname), mcode.ERR_013 % v
section[self._tenvsc.ams_control_file]=fname
return section
def _chk_circuit_subsection(self, section):
''' circuit subsection is not validated with schema.
Rather, it is separately validated because it depends on 'model' '''
if section[self._tenvsc.circuit] == {}:
del section[self._tenvsc.circuit]
return section
assert section[self._tenvsc.model] == self._tenvsc.model_ams, mcode.ERR_014 % self._tenvsc.model_ams
for k,v in section[self._tenvsc.circuit].items():
assert type(v)==str, mcode.ERR_015 % (v,k)
fname = get_abspath(v, do_assert=False, logger=self._logger)
#assert os.path.isfile(fname), mcode.ERR_016 % v
section[self._tenvsc.circuit][k]=fname
return section
def _chk_hdl_files(self, section):
''' check hdl files exist and update path'''
if section[self._tenvsc.hdl_files] == ['']:
section[self._tenvsc.hdl_files] = []
for idx, f in enumerate(section[self._tenvsc.hdl_files]):
assert type(f)==str, mcode.ERR_017 % self._tenvsc.hdl_files
fname = get_abspath(f, do_assert=False, logger=self._logger)
#assert os.path.isfile(fname), mcode.ERR_018 % f
section[self._tenvsc.hdl_files][idx] = fname
return section
#--------------------------------------------------------------
def _chk_engrtime(value):
''' Check if value is time in engr notation like 11ns, 5fs, etc. '''
time_suffix = 's'
if not isinstance(value,str) or value[-1] != time_suffix or from_engr(value[:-1]) == None:
raise VdtTypeError(value)
return value
def _chk_verilogtime(value):
''' Check if value is Verilog timescale format like 1fs, 10fs, 100fs, ... '''
check_engrtime(value)
if value[0] == '1' and all(x is '0' for x in value[1:]):
return value
else:
raise VdtValueError(value)
class SchemaTestConfig(SchemaConfig):
#(_pkg_module_root_dir,dummy_filename) = os.path.split(os.path.abspath(__file__))
def __init__(self, configobj, logger_id='logger_id'):
self._tenvf = EnvFileLoc()
self._tenvs = EnvTestcfgSection()
self._tenvr = EnvTestcfgOption()
self._tenvtp = EnvTestcfgPort()
self._tenvp = EnvPortName()
self._schema_filename = mproboenv.get_testcfg()
SchemaConfig.__init__(self, configobj, self._schema_filename, 'test', logger_id)
self.vdt_errors = self._validate({
'time_engr' : _chk_engrtime,
'time_verilg' : _chk_verilogtime
})
self._run_custom_check()
def raise_vdterror(self):
self._output_vdterror([])
def _run_custom_check(self):
for t in self.config.keys():
self.config[t][self._tenvs.option] = self._chk_regress(self.config[t][self._tenvs.option])
self.config[t][self._tenvs.port] = self._chk_port(self.config[t][self._tenvs.port])
def _chk_regress(self, section):
''' do_not_progress subsection under regression section
it takes/returns the whole regress section
'''
if self._tenvr.regression_do_not_regress not in section.keys():
return section
section[self._tenvr.regression_do_not_regress] = dict([(k,force_list(v)) for k,v in section[self._tenvr.regression_do_not_regress].items()])
return section
def _chk_port(self, section):
''' prohibited, default_value '''
for k,v in section.items():
section[k][self._tenvtp.default_value] = self._chk_port_default(section[k])
#TODO: validate prohibited
#try:
# section[k][self._tenvtp.prohibited] = self._chk_port_prohibited(section[k])
#except:
# pass
return section
def _chk_port_default(self, port):
ana_port = [self._tenvp.AnalogInput, self._tenvp.AnalogOutput]
dtype = float if port[self._tenvtp.port_type] in ana_port else int
return str2num(port[self._tenvtp.default_value], dtype)
| python |
# -*- coding: utf-8 -*-
"""
Progress component
"""
from bowtie._component import Component
class Progress(Component):
"""This component is used by all visual components and
is not meant to be used alone.
By default, it is not visible.
It is an opt-in feature and you can happily use Bowtie
without using the progress indicators at all.
It is useful for indicating progress to the user for long-running processes.
It can be accessed through the ``.progress`` accessor.
Examples
--------
>>> plotly = Plotly()
>>> def callback(x):
>>> plotly.progress.do_visible(True)
>>> plotly.progress.do_percent(0)
>>> compute1()
>>> plotly.progress.do_inc(50)
>>> compute2()
>>> plotly.progress.do_visible(False)
"""
_TEMPLATE = 'progress.jsx'
_COMPONENT = 'CProgress'
_PACKAGE = None
_TAG = ('<CProgress '
'socket={{socket}} '
'uuid={{{uuid}}} '
'>')
def _instantiate(self):
return self._TAG.format(
uuid="'{}'".format(self._uuid)
)
# pylint: disable=no-self-use
def do_percent(self, percent):
"""Set the percentage of the progress.
Parameters
----------
percent : number
Sets the progress to this percentage.
Returns
-------
None
"""
return percent
def do_inc(self, inc):
"""Increments the progress indicator.
Parameters
----------
inc : number
Value to increment the progress.
Returns
-------
None
"""
return inc
def do_visible(self, visible):
"""Hides and shows the progress indicator.
Parameters
----------
visible : bool
If ``True`` shows the progress indicator
otherwise it is hidden.
Returns
-------
None
"""
return visible
def do_active(self):
"""Hides and shows the progress indicator.
Returns
-------
None
"""
pass
def do_success(self):
"""Hides and shows the progress indicator.
Returns
-------
None
"""
pass
def do_error(self):
"""Hides and shows the progress indicator.
Returns
-------
None
"""
pass
| python |
from rest_framework.test import APITestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from requests.auth import HTTPBasicAuth
from django.conf import settings
class JWTViewsTestCase(APITestCase):
def test_fails_when_logged_out(self):
self.client.logout()
response = self.client.post(reverse('auth-api-token-session'), {})
self.assertEqual(response.status_code, 403)
self.assertEqual(response.data["detail"], "Authentication credentials were not provided.")
def test_fails_with_non_session_authentication(self):
# Will try HTTP Basic Authentication, make sure that's elected in the settings
self.assertIn('rest_framework.authentication.BasicAuthentication', settings.REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'])
user = get_user_model().objects.create(username='user', password='pass')
self.client.auth = HTTPBasicAuth('user', 'pass')
response = self.client.post(reverse('auth-api-token-session'), {})
self.assertEqual(response.status_code, 403)
self.assertEqual(response.data["detail"], "Authentication credentials were not provided.")
def test_succeeds_with_session_authentication(self):
get_user_model().objects.create_user(username='user', password='pass')
self.client.login(username='user', password='pass')
response = self.client.post(reverse('auth-api-token-session'), {})
self.assertEqual(response.status_code, 200)
self.assertIn('token', response.data)
| python |
from guhs.guhs_configuration import GuhsConfiguration
def from_guhs_configuration(configuration: GuhsConfiguration):
return {
'targets': [
{'order_id': t.order_id, 'name': t.name}
for t in configuration.targets
],
'boot_selection_timeout': configuration.boot_selection_timeout,
'default_target': str(configuration.default_target.order_id)
}
| python |
from email.policy import default
from .base import print_done, finalize, SRC_PATH, CONFIG_PATH
import invoke
@invoke.task
def isort(context, src_path=SRC_PATH):
print('Running isort...')
context.run('isort {src_path} -m VERTICAL_HANGING_INDENT --tc'.format(src_path=src_path))
print_done(indent=4)
@invoke.task
def yapf(context, src_path=SRC_PATH, config_path=CONFIG_PATH):
print('Running yapf...')
config_file = config_path / '.style.yapf'
context.run('yapf --style="{config_file}" {src_path} -r -i'.format(src_path=src_path, config_file=config_file))
print_done(indent=4)
@invoke.task
def unify(context, src_path=SRC_PATH):
print('Running unify...')
context.run('unify {src_path} -r -i --quote "\""'.format(src_path=src_path))
print_done(indent=4)
@invoke.task(name='format', default=True, post=[isort, yapf, unify, ])
def format_task(_):
print("Running formatters...")
formatter = invoke.Collection('format')
formatter.add_task(isort, 'isort')
formatter.add_task(yapf, 'yapf')
formatter.add_task(unify, 'unify')
formatter.add_task(format_task, 'all')
| python |
# Escreva um programa que pergunte a quantidade de Km
# percorridos por um carro alugado e a quantidade de dias pelos
# quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro
# custa R$60 por dia e R$0.15 por Km rodado.
km = float(input("Quantos km percorreu?: "))
dia = int(input("Quantos dias ele foi alugado?: "))
print("O valor a ser pago é: R${:.2f}".format(km * 0.15 + dia * 60)) | python |
import os
# import pprint
import re
from datetime import datetime
from pathlib import Path
from nornir_napalm.plugins.tasks import napalm_get
from nornir_utils.plugins.functions import print_result
from nornir_utils.plugins.tasks.files import write_file
# from nornir_netmiko.tasks import netmiko_send_command, netmiko_send_config
from helpers import Helpers
from app.utils import write_cfg_on_db, get_last_config_for_device
from path_helper import search_configs_path
from differ import diff_get_change_state
from config import *
# nr_driver = Helpers()
drivers = Helpers(username=username, password=password)
search_configs_path = search_configs_path()
configs_folder_path = f"{Path(__file__).parent.parent}/configs"
# Get time for configs name
timestamp = datetime.now()
# The function needed replace ntp clock period on cisco switch, but he's always changing
def clear_clock_period(config: str) -> str:
# pattern for replace
pattern = r"ntp\sclock-period\s[0-9]{1,30}\n"
# Returning changed config or if this command not found return original file
return re.sub(pattern, "", str(config))
# Start process backup configs
def backup_config(task, path):
"""
This function starts to process backup config on the network devices
"""
# Get ip address in task
ipaddress = task.host.hostname
# Get Last config dict
last_config = search_configs_path.get_lats_config_for_device(ipaddress=ipaddress)
# Start task and get config on device
device_config = task.run(task=napalm_get, getters=["config"])
device_config = device_config.result["config"]["running"]
if task.host.platform == "ios" and fix_clock_period is True:
device_config = clear_clock_period(device_config)
# Open last config
if last_config is not None:
last_config = open(last_config["config_path"])
# Get candidate config from nornir tasks
candidate_config = device_config
# Get diff result state if config equals pass
result = diff_get_change_state(
config1=candidate_config, config2=last_config.read()
)
# Close last config file
last_config.close()
else:
result = False
# If configs not equals
if result is False:
# Create directory for configs
if not os.path.exists(
f"{path}/{timestamp.date()}_{timestamp.hour}-{timestamp.minute}"
):
os.mkdir(f"{path}/{timestamp.date()}_{timestamp.hour}-{timestamp.minute}")
# Startt task for write cfg file
task.run(
task=write_file,
content=device_config,
filename=f"{path}/{timestamp.date()}_{timestamp.hour}-{timestamp.minute}/{task.host.hostname}.cfg",
)
# Start process backup configs
def backup_config_on_db(task):
"""
This function starts to process backup config on the network devices
Need for work nornir task
"""
# Get ip address in task
ipaddress = task.host.hostname
# Get the latest configuration file from the database,
# needed to compare configurations
last_config = get_last_config_for_device(ipaddress=ipaddress)
# Run the task to get the configuration from the device
device_config = task.run(task=napalm_get, getters=["config"])
device_config = device_config.result["config"]["running"]
if task.host.platform == "ios" and fix_clock_period is True:
device_config = clear_clock_period(device_config)
# Open last config
if last_config is not None:
last_config = last_config["last_config"]
# Get candidate config from nornir tasks
candidate_config = device_config
# Get diff result state if config equals pass
result = diff_get_change_state(config1=candidate_config, config2=last_config)
else:
result = False
# If configs not equals
if result is False:
write_cfg_on_db(ipaddress=str(ipaddress), config=str(device_config))
def main():
"""
Main
"""
# Start process
with drivers.nornir_driver() as nr_driver:
result = nr_driver.run(
name="Backup configurations", path=configs_folder_path, task=backup_config
)
# Print task result
print_result(result, vars=["stdout"])
# if you have error uncomment this row, and you see all result
# print_result(result)
def main2():
"""
Main
"""
# Start process
with drivers.nornir_driver() as nr_driver:
result = nr_driver.run(name="Backup configurations", task=backup_config_on_db)
# Print task result
print_result(result, vars=["stdout"])
# if you have error uncomment this row, and you see all result
# print_result(result)
if __name__ == "__main__":
main2()
| python |
from sqlalchemy import exc as sa_exc
from sqlalchemy.orm import state_changes
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
class StateTestChange(state_changes._StateChangeState):
a = 1
b = 2
c = 3
class StateMachineTest(fixtures.TestBase):
def test_single_change(self):
"""test single method that declares and invokes a state change"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._state = StateTestChange.b
m = Machine()
eq_(m._state, _NO_CHANGE)
m.move_to_b()
eq_(m._state, StateTestChange.b)
def test_single_incorrect_change(self):
"""test single method that declares a state change but changes to the
wrong state."""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._state = StateTestChange.c
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method 'move_to_b\(\)' "
r"caused an unexpected state change to <StateTestChange.c: 3>",
):
m.move_to_b()
def test_single_failed_to_change(self):
"""test single method that declares a state change but didn't do
the change."""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
pass
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method 'move_to_b\(\)' failed to change state "
"to <StateTestChange.b: 2> as "
"expected",
):
m.move_to_b()
def test_change_from_sub_method_with_declaration(self):
"""test successful state change by one method calling another that
does the change.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def _inner_move_to_b(self):
self._state = StateTestChange.b
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._inner_move_to_b()
m = Machine()
eq_(m._state, _NO_CHANGE)
m.move_to_b()
eq_(m._state, StateTestChange.b)
def test_method_and_sub_method_no_change(self):
"""test methods that declare the state should not change"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def _inner_do_nothing(self):
pass
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def do_nothing(self):
self._inner_do_nothing()
m = Machine()
eq_(m._state, _NO_CHANGE)
m._state = StateTestChange.a
m.do_nothing()
eq_(m._state, StateTestChange.a)
def test_method_w_no_change_illegal_inner_change(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.c
)
def _inner_move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def do_nothing(self):
self._inner_move_to_c()
m = Machine()
eq_(m._state, _NO_CHANGE)
m._state = StateTestChange.a
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method '_inner_move_to_c\(\)' can't be called here; "
r"method 'do_nothing\(\)' is already in progress and this "
r"would cause an unexpected state change to "
"<StateTestChange.c: 3>",
):
m.do_nothing()
eq_(m._state, StateTestChange.a)
def test_change_from_method_sub_w_no_change(self):
"""test methods that declare the state should not change"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a,), _NO_CHANGE
)
def _inner_do_nothing(self):
pass
@state_changes._StateChange.declare_states(
(StateTestChange.a,), StateTestChange.b
)
def move_to_b(self):
self._inner_do_nothing()
self._state = StateTestChange.b
m = Machine()
eq_(m._state, _NO_CHANGE)
m._state = StateTestChange.a
m.move_to_b()
eq_(m._state, StateTestChange.b)
def test_invalid_change_from_declared_sub_method_with_declaration(self):
"""A method uses _expect_state() to call a sub-method, which must
declare that state as its destination if no exceptions are raised.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
# method declares StateTestChange.c so can't be called under
# expect_state(StateTestChange.b)
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.c
)
def _inner_move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._inner_move_to_c()
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Cant run operation '_inner_move_to_c\(\)' here; will move "
r"to state <StateTestChange.c: 3> where we are "
"expecting <StateTestChange.b: 2>",
):
m.move_to_b()
def test_invalid_change_from_invalid_sub_method_with_declaration(self):
"""A method uses _expect_state() to call a sub-method, which must
declare that state as its destination if no exceptions are raised.
Test an error is raised if the sub-method doesn't change to the
correct state.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
# method declares StateTestChange.b, but is doing the wrong
# change, so should fail under expect_state(StateTestChange.b)
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def _inner_move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._inner_move_to_c()
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"While method 'move_to_b\(\)' was running, method "
r"'_inner_move_to_c\(\)' caused an unexpected state change "
"to <StateTestChange.c: 3>",
):
m.move_to_b()
def test_invalid_prereq_state(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._state = StateTestChange.b
@state_changes._StateChange.declare_states(
(StateTestChange.c,), "d"
)
def move_to_d(self):
self._state = "d"
m = Machine()
eq_(m._state, _NO_CHANGE)
m.move_to_b()
eq_(m._state, StateTestChange.b)
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Can't run operation 'move_to_d\(\)' when "
"Session is in state <StateTestChange.b: 2>",
):
m.move_to_d()
def test_declare_only(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
state_changes._StateChangeStates.ANY, StateTestChange.b
)
def _inner_move_to_b(self):
self._state = StateTestChange.b
def move_to_b(self):
with self._expect_state(StateTestChange.b):
self._move_to_b()
m = Machine()
eq_(m._state, _NO_CHANGE)
with expect_raises_message(
AssertionError,
"Unexpected call to _expect_state outside of "
"state-changing method",
):
m.move_to_b()
def test_sibling_calls_maintain_correct_state(self):
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
state_changes._StateChangeStates.ANY, StateTestChange.c
)
def move_to_c(self):
self._state = StateTestChange.c
@state_changes._StateChange.declare_states(
state_changes._StateChangeStates.ANY, _NO_CHANGE
)
def do_nothing(self):
pass
m = Machine()
m.do_nothing()
eq_(m._state, _NO_CHANGE)
m.move_to_c()
eq_(m._state, StateTestChange.c)
def test_change_from_sub_method_requires_declaration(self):
"""A method can't call another state-changing method without using
_expect_state() to allow the state change to occur.
"""
_NO_CHANGE = state_changes._StateChangeStates.NO_CHANGE
class Machine(state_changes._StateChange):
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def _inner_move_to_b(self):
self._state = StateTestChange.b
@state_changes._StateChange.declare_states(
(StateTestChange.a, _NO_CHANGE), StateTestChange.b
)
def move_to_b(self):
self._inner_move_to_b()
m = Machine()
with expect_raises_message(
sa_exc.IllegalStateChangeError,
r"Method '_inner_move_to_b\(\)' can't be called here; "
r"method 'move_to_b\(\)' is already in progress and this would "
r"cause an unexpected state change to <StateTestChange.b: 2>",
):
m.move_to_b()
| python |
import operator
import rules
from rules.predicates import is_authenticated
from marketplace.domain import marketplace
rules.add_perm('user.is_same_user', operator.eq)
rules.add_perm('user.is_authenticated', is_authenticated)
rules.add_rule('user.is_site_staff', marketplace.user.is_site_staff)
rules.add_rule('volunteer.new_user_review', marketplace.user.is_site_staff)
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.