seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
35069305556
|
from enum import unique
from flask_sqlalchemy import SQLAlchemy
from .utils import utcnow
db = SQLAlchemy()
class Home(db.Model):
__tablename__ = "home"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120), unique=False, nullable=False)
content = db.Column(db.String(250), unique=True, nullable=False)
image_url = db.Column(db.String(250), unique=True, nullable=True)
label = db.Column(db.String(120), unique=False, nullable=False)
created = db.Column(db.DateTime, default=utcnow)
updated = db.Column(db.DateTime, default=utcnow)
def __repr__(self):
return '<Home %r>' % self.home_content
def serialize(self):
return {
"id": self.id,
"title": self.title,
"content": self.content,
"image_url": self.image_url,
"label": self.label,
"created": self.created,
"updated": self.updated
# do not serialize the password, its a security breach
}
class ContactForm(db.Model):
__tablename__ = "contact_form"
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(120), unique=False, nullable=False)
last_name = db.Column(db.String(120), unique=False, nullable=False)
title = db.Column(db.String(120), unique=False, nullable=False)
email = db.Column(db.String(250), unique=False, nullable=False)
message = db.Column(db.Text, unique=False, nullable=False)
created = db.Column(db.DateTime, default=utcnow)
def __repr__(self):
return '<ContactForm %r>' % self.contact_form
def serialize(self):
return {
"id": self.id,
"first_name": self.first_name,
"last_name": self.last_name,
"title": self.title,
"email": self.email,
"message": self.message,
"created": self.created,
# do not serialize the password, its a security breach
}
|
jgustavoj/midwestern-project
|
src/api/models.py
|
models.py
|
py
| 1,995 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72492706428
|
from xdist.scheduler import LoadScheduling
class XDistScheduling(LoadScheduling):
def __init__(self, config, log, test_order):
super().__init__(config, log)
self.test_order = test_order
def schedule(self):
assert self.collection_is_completed
# Initial distribution already happened, reschedule on all nodes
if self.collection is not None:
for node in self.nodes:
self.check_schedule(node)
return
# XXX allow nodes to have different collections
if not self._check_nodes_have_same_collection():
self.log("**Different tests collected, aborting run**")
return
# Collections are identical, create the index of pending items.
self.collection = list(self.node2collection.values())[0]
self.pending[:] = range(len(self.collection))
if not self.collection:
return
if self.maxschedchunk is None:
self.maxschedchunk = len(self.collection)
for (test, gw) in self.test_order.items():
node = [x for x in self.nodes if x.gateway.id == gw][0]
test_id = self.collection.index(test)
self.node2pending[node].append(test_id)
for node in self.nodes:
node.send_runtest_some(self.node2pending[node])
self.pending = []
for node in self.nodes:
node.shutdown()
|
JaurbanRH/pytest-persistence
|
pytest_persistence/XDistScheduling.py
|
XDistScheduling.py
|
py
| 1,428 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22927248113
|
import rclpy
import math
from rclpy.node import Node
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from rclpy.qos import qos_profile_sensor_data
class MinimalSubscriber(Node):
def __init__(self):
super().__init__('minimal_subscriber')
self.lin_vel = 0
self.rot_vel = 0
self.get_logger().info ("debut init")
self.publisher= self.create_publisher(Twist,'cmd_vel' , 10)
self.subscription = self.create_subscription(
LaserScan,
'scan',
self.listener_callback,
qos_profile_sensor_data)
self.subscription # prevent unused variable warning
self.get_logger().info ("debut init")
def listener_callback(self, msg):
# self.get_logger().info ('I heard: "%s"' % msg)
self.get_logger().info ('Distances : ' "%s" %msg.ranges)
#self.get_logger().info ('Range max : ' "%s" %msg.ranges)
self.get_logger().info ('angle min : ' "%s" %msg.angle_min)
self.get_logger().info ('angle max : ' "%s" %msg.angle_max)
self.get_logger().info ('temps entre 2 scans : ' "%s" %msg.scan_time)
self.get_logger().info ('time_increment : ' "%s" %msg.time_increment)
sum_x =0
sum_y =0
i = -1
tour =0
r_min = 0.3
r_max =1
x_centre = 0
msg_twist =Twist()
for r in msg.ranges :
i=i+1
theta = msg.angle_min +msg.angle_increment *i
thetadeg = math.degrees(theta)
print('Theta ={}, r ={}, thetadeg = {}'.format(theta,r,thetadeg))
if 330<thetadeg or thetadeg<30 :
if r>r_min and r<r_max :
x= r * math.cos(theta)
y= r * math.sin(theta)
sum_x = sum_x +x
sum_y = sum_y +y
tour +=1
if tour == 0 :
msg_twist.linear.x = 0.0
msg_twist.angular.z = 0.0
else :
bari_x = sum_x / tour
bari_y = sum_y /tour
print('bari_x ={}, bari_y ={}'.format(bari_x, bari_y))
print('time ={}'.format(msg.scan_time))
x_centre = (r_max + r_min)/2
delta_x = bari_x -x_centre
delta_y = bari_y
self.lin_vel = delta_x *2
self.rot_vel = delta_y *2
msg_twist.linear.x = self.lin_vel
msg_twist.angular.z = self.rot_vel
print('delta_x ={}, delta_y ={}'.format(delta_x,delta_y))
print('lin_vel ={}, rot_vel ={}'.format(self.lin_vel, self.rot_vel))
self.publisher.publish(msg_twist)
#twist.linear.x = 0.0; twist.linear.y = 0.0; twist.linear.z = 0.0
#twist.angular.x = 0.0; twist.angular.y = 0.0; twist.angular.z = 0.0
#pub.publish(twist)
#Follow me
# If valeur_absolue_bari_x ou bari_y > 0.5m alors
# Robot avance de r vers theta_moy
def main(args=None):
rclpy.init(args=args)
minimal_subscriber = MinimalSubscriber()
rclpy.spin(minimal_subscriber)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
minimal_subscriber.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
Anth0o0/02-Robotics_team2
|
subscriber_member_function.py
|
subscriber_member_function.py
|
py
| 3,418 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75385541946
|
for i in range(int(input())):
a = int(input())
i_last = -1
last = "F"
dp = [0]
s = input()
for j, c in enumerate(s):
if c != "F" and s[i_last] != "F" and c != last:
dp.append((dp[i_last + 1] + i_last + 1))
else:
dp.append(dp[-1])
if c != "F":
i_last = j
last = c
print(f"Case #{i+1}: {sum(dp) % 1_000_000_007}")
|
fortierq/competitions
|
fb_hacker_cup/2021/round1/2/weak_typing.py
|
weak_typing.py
|
py
| 412 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37663232255
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 13 02:55:11 2021
@author: Anato
"""
from pathlib import Path
source_path = Path(__file__).resolve()
source_dir = source_path.parent
main_dir = str(source_dir.parent)
info_dir = main_dir + '/info/'
def open_info(file_name, mode):
return open(info_dir + file_name + '.txt', mode)
import scrapy
from urllib.parse import urljoin
class MySpider(scrapy.Spider):
name = "cfspider"
allowed_domains = ["codeforces.com"]
visited_urls = []
d = {}
def start_requests(self):
with open_info('to_check', 'r') as f:
self.d = dict.fromkeys([el for el in f.read().split()], 1)
fs = open_info('result', 'w')
fs.close()
url = ""
with open_info('s_url', 'r') as f:
url = f.read() + '/standings/page/1'
#self.logger.info(url)
yield scrapy.Request(url = url, callback = self.parse)
def parse(self, response):
a = response.xpath('//tr[@participantid]/td[2]/a/text()').extract()
#with open('debug.txt', 'a') as f:
# for el in a:
# f.write(el + '\n')
with open_info('result', 'a') as f:
for el in a:
if el in self.d:
f.write(el + '\n')
next_pages = response.xpath('//a[contains(@href,"standings/page")]/@href').extract()
for next_page in next_pages:
url = urljoin(response.url + '/', next_page)
if url not in self.visited_urls:
self.visited_urls.append(url)
yield response.follow(url, callback = self.parse)
|
Anatoly7/codeforces-spider
|
tutorial/spiders/codeforces_spider.py
|
codeforces_spider.py
|
py
| 1,714 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25692788695
|
# !/urs/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: LAGOU Spider
@author: Troy
@email: [email protected]
"""
# 项目构架:
# p1: 依据搜索关键词 城市 职业, 爬取索引页, 解析并获取相关岗位url接连
# p2: 解析url链接, 获取数据
# p3: 存储到MongoDB
# 技术路径: requests urllib json re pq pymongo
import requests
from requests.exceptions import ConnectionError
from pyquery import PyQuery as pq
import urllib
import json
import pymongo
import numpy as np
import time
from config import *
client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]
proxy = None
def started_search_url(start_url, page):
headers = {
'Accept' : 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding' : 'gzip, deflate, br',
'Accept-Language' : 'zh-CN,zh;q=0.9',
'Cache-Control' : 'no-cache',
'Connection' : 'keep-alive',
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie' : COOKIE,
'Host' : 'www.lagou.com',
'Origin' : 'https://www.lagou.com',
'Pragma' : 'no-cache',
'Referer' : REFERER,
'User-Agent' : 'Mozilla/5.0 Chrome/58.0.3029.81 Safari/537.36',
}
query_parameters = {
'city' : CITY,
'needAddtionalResult' : 'false',
'isSchoolJob' : '0'
}
form_data = {
'first' : 'false',
'pn' : page,
'kd' : KEYWORD
}
url = start_url + urllib.parse.urlencode(query_parameters)
try:
res = requests.post(url, headers=headers, data=form_data, allow_redirects=False)
if res.status_code == 200:
print('get succeed 200, page:', page)
res.encoding = res.apparent_encoding
res = json.loads(res.text)
return res['content']['positionResult']['result']
else:
print('get failed, status code:', res.status_code)
return None
except ConnectionError as e:
print('requests error:', e.args)
return None
def get_base_data(data):
try:
companyId = data['companyId']
companyFullName = data['companyFullName']
companyShortName = data['companyShortName']
companySize = data['companySize']
positionAdvantage = data['positionAdvantage']
city = data['city']
latitude = data['latitude']
longitude = data['longitude']
stationname = data['stationname']
subwayline = data['subwayline']
financeStage = data['financeStage']
positionName = data['positionName']
firstType = data['firstType']
secondType = data['secondType']
workYear = data['workYear']
education = data['education']
district = data['district']
salary = data['salary']
positionLables = data['positionLables']
positionId = data['positionId']
html = request_index_search(positionId)
position_description = parse_url_detail(html)
result = {
'companyId' : companyId,
'companyFullName' : companyFullName,
'companyShortName' : companyShortName,
'positionAdvantage' : positionAdvantage,
'latitude' : latitude,
'longitude' : longitude,
'stationname' : stationname,
'subwayline' : subwayline,
'financeStage' : financeStage,
'positionName' : positionName,
'firstType' : firstType,
'secondType' : secondType,
'workyear' : workYear,
'education' : education,
'district' : district,
'salary' : salary,
'positionLables' : positionLables,
'positionId' : positionId,
'position_description' : position_description
}
return result
except TypeError :
print('data get error')
return None
def get_proxy():
try:
response = requests.get(PROXIES_URL)
if response.status_code == 200:
return response.text
return None
except ConnectionError:
return None
def request_index_search(positionId):
global proxy
url = 'https://www.lagou.com/jobs/{}.html'.format(positionId)
headers = {
'Accept' : 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding' : 'gzip, deflate, br',
'Accept-Language' : 'zh-CN,zh;q=0.9',
'Cache-Control' : 'no-cache',
'Connection' : 'keep-alive',
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie' : COOKIE,
'Host' : 'www.lagou.com',
'Pragma' : 'no-cache',
'User-Agent' : 'Mozilla/5.0 Chrome/58.0.3029.81 Safari/537.36'
}
try:
if proxy:
proxies = {
'https' : 'https://' + proxy
}
res = requests.get(url, headers=headers, proxies=proxies, allow_redirects=False)
else:
res = requests.get(url, headers=headers, allow_redirects=False)
print('Res.status_code:', res.status_code)
if res.status_code == 200:
print('get detail url succeed', url)
res.encoding = res.apparent_encoding
return res.text
if res.status_code == 302:
print('chunkError', res.status_code, url)
proxy = get_proxy()
if proxy:
return request_index_search(positionId)
else:
print('proxy is fail')
return None
except ConnectionError as e:
print('get url error:', e.args, url)
return None
def parse_url_detail(html):
doc = pq(html)
position_description = doc('#job_detail > dd.job_bt > div').text()
return position_description
def save_to_mongoDB(result):
if db[MONGO_TABLE].update({'positionId' : result['positionId']}, {'$set' : result}, True):
print('save to mongoDB Succeed', result)
else:
print('save to mongoDB Failed', result)
def main(pn):
time.sleep(np.random.randint(0.1, 1))
datas = started_search_url(start_url=START_URL, page=pn)
print(datas)
for data in datas:
result = get_base_data(data)
save_to_mongoDB(result)
if __name__ == '__main__':
for pn in range(1, 20):
main(pn)
|
Troysps/spider
|
lagou/spider.py
|
spider.py
|
py
| 6,376 |
python
|
en
|
code
| 1 |
github-code
|
6
|
24796364963
|
from __future__ import division
import os
import re
import sys
import struct
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
def load(fname):
color = None
width = None
height = None
scale = None
endian = None
file = open(fname)
header = file.readline().rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
return np.flipud(np.reshape(data, shape)).astype(np.float32), scale
def save(fname, image, scale=1):
file = open(fname, 'w')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n' if color else 'Pf\n')
file.write('%d %d\n' % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write('%f\n' % scale)
np.flipud(image).tofile(file)
def show(img):
imgplot = plt.imshow(img.astype(np.float32), cmap='gray');
plt.show();
|
kbatsos/CBMV
|
pylibs/pfmutil.py
|
pfmutil.py
|
py
| 1,781 |
python
|
en
|
code
| 52 |
github-code
|
6
|
33213038710
|
# 반복문 : while문
# while 조건식 :
# 조건식이 참인 경우에 실행될 문장
# 1~100까지 홀수, 짝수의 합을 구하는 프로그램
i = 1; odd = even = 0
while i<=100:
if i%2 == 0:
even += i
else:
odd += i
i += 1
print('1~100까지 홀수의 합 : ', odd)
print('1~100까지 짝수의 합 : ', even)
|
lilacman888/pythonExam
|
src/2020_06_02/while03.py
|
while03.py
|
py
| 357 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
74604827388
|
import sys
a, b, v = map(int, sys.stdin.readline().rstrip().split())
res = v //(a-b)
tempV = v %(a-b)
res = (v-a) // (a-b)
if (v-a) % (a-b) > 0:
res += 2
else:
res += 1
print(res)
|
LimTurtle/BasicAlgorithm
|
Baekjoon/2869.py
|
2869.py
|
py
| 190 |
python
|
es
|
code
| 0 |
github-code
|
6
|
29852193126
|
def menu(water,milk,beans,cup,money):
print("The Coffee machine has :")
print(str(water)+" of water")
print(str(milk)+" of milk")
print(str(beans)+" of coffee beans")
print(str(cup)+" of disposable cups")
print(str(money)+" of money")
def fill(water,water_add,milk,milk_add,beans,beans_add,cup,cup_add):
water = int(water) + int(water_add)
milk = int(milk) + int(milk_add)
beans = int(beans) + int(beans_add)
cup = int(cup) + int(cup_add)
return (water,milk,beans,cup)
def take(money):
print("I gave you "+str(money))
def buy(water,water_used,milk,milk_used,beans,beans_used,cup,cup_used,money,money_take):
water = int(water) - int(water_used)
milk = int(milk) - int(milk_used)
beans = int(beans) - int(beans_used)
cup = int(cup) - int(cup_used)
money = int(money) + int(money_take)
return (water,milk,beans,cup,money)
y = 1
water = 400
milk = 540
beans = 120
cup = 9
money = 550
while y == 1:
menu(water,milk,beans,cup,money)
command = str(input("write action (buy, fill, take):\n> "))
if "buy" in command:
kind_of_coffee = str(input("What do you want to buy ? 1 - espresso , 2 - latte , 3 - capppuccino :\n> "))
if "1" in kind_of_coffee:
x = (buy(water,250,milk,0,beans,16,cup,1,money,4))
menu((x[0]),(x[1]),(x[2]),(x[3]),(x[4]))
water = (x[0])
milk = (x[1])
beans = (x[2])
cup = (x[3])
money = (x[4])
elif "2" in kind_of_coffee:
x = (buy(water,350,milk,75,beans,20,cup,1,money,7))
menu((x[0]),(x[1]),(x[2]),(x[3]),(x[4]))
water = (x[0])
milk = (x[1])
beans = (x[2])
cup = (x[3])
money = (x[4])
elif "3" in kind_of_coffee:
x = (buy(water,200,milk,100,beans,12,cup,1,money,6))
menu((x[0]),(x[1]),(x[2]),(x[3]),(x[4]))
water = (x[0])
milk = (x[1])
beans = (x[2])
cup = (x[3])
money = (x[4])
elif "fill" in command:
water_add = int(input("Write how many ml of water do you want add\n> "))
milk_add = int(input("Write how many ml of milk do you want add\n> "))
beans_add = int(input("Write how many grams of coffee beans do you want add\n> "))
cup_add = int(input("Write how many disposable cups of coffee do you want add\n> "))
x = fill(water, water_add, milk, milk_add, beans, beans_add, cup, cup_add)
menu((x[0]),(x[1]),(x[2]),(x[3]),money)
water = (x[0])
milk = (x[1])
beans = (x[2])
cup = (x[3])
money = (money)
elif "take" in command:
take(money)
money = int(0)
menu(water,milk,beans,cup,money)
end = str(input("1 = exit / 2 = continue "))
if "1" in end:
break
else:
pass
|
karnthiLOL/myFirstPythonHW
|
Other/Coffee Cal the great Origins.py
|
Coffee Cal the great Origins.py
|
py
| 3,000 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12918395650
|
#!/usr/bin/env python3
""" Import build-in and custom modules to check system utilization and connection"""
import shutil
import psutil
import network
site_name = "http://www.google.com"
# Verifies that there's enough free space on disk.
def check_disk_usage(disk):
du = shutil.disk_usage(disk)
free = du.free / du.total * 100
return free > 20
# Verifies that there's enough unused CPU.
def check_cpu_usage():
usage = psutil.cpu_percent(1)
return usage < 75
# If there's not enough disk, or not enough CPU, print an error.
# Output information about connection.
if not check_disk_usage('/') or not check_cpu_usage():
print("ERROR!")
elif network.check_localhost() and network.check_connectivity(site_name):
print("Everything ok")
else:
print("Network checks failed")
|
TyapinIA/Coursera_Google_IT_Automation_with_Python
|
psutil_shutil/health_check.py
|
health_check.py
|
py
| 803 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4669072111
|
from Bio.Seq import Seq
def get_pattern_count(text, pattern):
seq = Seq(text)
return seq.count_overlap(pattern)
with open('rosalind_ba1e.txt') as file:
genome = file.readline().rstrip()
k, l, t = map(lambda x: int(x), file.readline().rstrip().split(' '))
genome_len = len(genome)
clump = []
for i in range(genome_len - l + 1):
current_genome = genome[i:i+l]
current_genome_len = len(current_genome)
for j in range(current_genome_len - k + 1):
pattern = current_genome[j:j+k]
pattern_count = get_pattern_count(current_genome, pattern)
if pattern_count >= t and pattern not in clump:
clump.append(pattern)
print(pattern)
output = ' '.join(clump)
print(output)
with open('output.txt', 'w') as file:
file.write(output)
|
Partha-Sarker/Rosalind-Problems
|
Lab Assignment - 1/chapter 1/ba1e Find Patterns Forming Clumps in a String.py
|
ba1e Find Patterns Forming Clumps in a String.py
|
py
| 802 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12987469517
|
#A python program to find anagrams of a given word.
import sys
allLengthAnagrams = []
def findAnagrams(aWord) :
anagrams = []
if( len(aWord) == 2 ) :
anagrams.append( aWord[0] + aWord[1] )
anagrams.append( aWord[1] + aWord[0] )
return anagrams
for i in range(len(aWord)) :
aLetter = aWord[i]
restOfLetters = ""
for j in range(len(aWord)) :
if( i != j ) :
restOfLetters = restOfLetters + aWord[j]
anagramRest = findAnagrams(restOfLetters)
for restWord in anagramRest :
allLengthAnagrams.append(restWord)
anagrams.append(aLetter + restWord)
return anagrams
if( len(sys.argv) < 2 ) :
print("How about a parameter there champ?")
sys.exit(1)
aWord = sys.argv[1]
wordAnagrams = findAnagrams(aWord)
#print(wordAnagrams)
#print(allLengthAnagrams)
#print("There were", len(wordAnagrams), "found")
allLengthAnagrams = set(allLengthAnagrams)
allEnglishWords = []
with open("allwords.txt") as fIn:
allEnglishWords = fIn.read().split(",")
englishAnagrams = []
for englishWord in allEnglishWords :
if englishWord in allLengthAnagrams :
englishAnagrams.append(englishWord)
print("Here is all english anagrams")
print(englishAnagrams)
|
CodeSpaceIndica/Python
|
ep6_anagrams/Anagrams.py
|
Anagrams.py
|
py
| 1,286 |
python
|
en
|
code
| 4 |
github-code
|
6
|
33062616710
|
import pandas as pd
def agg_count(row):
count_x = getattr(row, 'Count_x', 0)
count_y = getattr(row, 'Count_y', 0)
count = getattr(row, 'Count', 0)
row.Count = count + count_x + count_y
return row
def top3(list_year, PATH):
cols = ['Name', 'Gender', 'Count']
names_all = None
for year in list_year:
names_year = pd.read_csv(PATH + 'yob{}.txt'.format(year), names=cols)
if names_all is None:
names_all = names_year
else:
names_all = pd.merge(names_all, names_year, on=['Name', 'Gender']).apply(agg_count, axis=1)
names_all['total'] = names_all.sum(axis=1)
#print(names_all)
result = names_all.sort_values(by='total', ascending=False).head(3)
return result['Name'].values
def dynamics(list_year, PATH):
cols = ['Name', 'Gender', 'Count']
names_all = None
list_f = []
list_m = []
for year in list_year:
names_year = pd.read_csv(PATH + 'yob{}.txt'.format(year), names=cols)
f = names_year[names_year.Gender == 'F'].Count.sum()
m = names_year[names_year.Gender == 'M'].Count.sum()
list_f.append(f)
list_m.append(m)
result = {'F': list_f, 'M': list_m}
return result
def main():
PATH = r'C:\Users\swetlanka\Documents\GitHub\py3\4-1\names\\'
list_year = [1900,1950,2000]
print('count_top3({}) =='.format(list_year), top3(list_year, PATH))
print('count_dynamics({}) =='.format(list_year), dynamics(list_year, PATH))
main()
|
swetlanka/py3
|
4-1/test.py
|
test.py
|
py
| 1,504 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3977389831
|
import psycopg2
import csv
from db.create_connection import create_connection as create_connection
def import_menu_from_csv():
conn = create_connection()
cursor = conn.cursor()
with open("menu.csv", mode="r", encoding="utf-8") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
cursor.execute("""
INSERT INTO menu (name, type, price)
VALUES (%s, %s, %s)
""", (row["name"], row["type"], row["price"]))
conn.commit()
conn.close()
cursor.close()
def menu_cleaning():
try:
conn = create_connection()
cursor = conn.cursor()
cursor.execute("DELETE FROM menu;")
conn.commit()
deleted_rows = cursor.rowcount
return deleted_rows > 0
except psycopg2.Error as e:
conn.rollback()
raise e
finally:
if conn is not None:
conn.close()
cursor.close()
|
Tolik1923/restaurantordertaker
|
Back-end/db/exsport_menu.py
|
exsport_menu.py
|
py
| 1,024 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30950783677
|
import numpy as np
import sys
import matplotlib.pyplot as plt
sys.path.append('../../analysis_scripts')
from dumpfile import DumpFile
from pickle_dump import save_obj, load_obj
from spatialcorrelations import calculate_items
if __name__ == "__main__":
rho = sys.argv[1]
fps = np.array([0])#,1,5,10,20,40,60,80,100])
load_prefix = '../raw_data_processing/pickled_data/'
for fp in fps:
dc_name = load_prefix + f'ret_o_{fp}_{rho}'
ret_o = load_obj(dc_name)
gmatrix = ret_o['sum_g']
Nsamples = ret_o['g_cnt']
rs = gmatrix[:,0]/Nsamples
gs = gmatrix[:,1]/Nsamples
plt.plot(rs,gs)
plt.show()
|
samueljmcameron/ABPs_coarse_graining
|
experiments/2020_03_19/correlations/plot_correlations.py
|
plot_correlations.py
|
py
| 686 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34196938558
|
#!/user/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import h5py
#一个HDF5文件就是一个容器,用于储存两类对象:datasets,类似于数组的数据集合;groups,类似于文件夹的容器,可以储存datasets和其它groups。
from lr_utils import load_dataset
train_set_x_orig , train_set_y , test_set_x_orig , test_set_y , classes = load_dataset()
# index = 30
# print(train_set_x_orig[index])
# plt.imshow(train_set_x_orig[index])
#打印出当前的训练标签值
#使用np.squeeze的目的是压缩维度,【未压缩】train_set_y[:,index]的值为[1] , 【压缩后】np.squeeze(train_set_y[:,index])的值为1
#print("【使用np.squeeze:" + str(np.squeeze(train_set_y[:,index])) + ",不使用np.squeeze: " + str(train_set_y[:,index]) + "】")
#只有压缩后的值才能进行解码操作
# print("train_set_y=" +str(train_set_y[:,index]))
# print(classes[np.squeeze(train_set_y[:,index])])
# plt.show()
#image.shape[0],image.shape[1],image.shape[2]表示图像长,宽,通道数 image.shape表示图片的维度
m_train=train_set_y.shape[1]
m_test=test_set_y.shape[1]
num_px=train_set_x_orig.shape[1]
print("训练集的数量:m_train="+str(m_train))
print("测试集的数量:m_test="+str(m_test))
print("每张图片的高和宽:num_px="+str(num_px))
print("每张图片的大小:("+str(num_px)+","+str(num_px)+",3)")
print("训练集图片的维度:"+str(train_set_x_orig.shape))
print("训练集标签的维度:"+str(train_set_y.shape))
print("测试集图片的维度:"+str(test_set_x_orig.shape))
print("测试集标签的维度:"+str(test_set_y.shape))
#X_flatten = X.reshape(X.shape [0],-1).T #X.T是X的转置
#将训练集的维度降低并转置。这里的-1被理解为unspecified value,意思是未指定为给定的。如果我只需要特定的行数,列数多少我无所谓,我只需要指定行数,那么列数直接用-1代替就行了,计算机帮我们算赢有多少列,反之亦然。
#如果是reshape(5,-1) 就是将数组变为5行的矩阵,列的话根据具体的来分
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
#将测试集的维度降低并转置。
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
#
# print ("训练集降维最后的维度: " + str(train_set_x_flatten.shape))
# print ("训练集_标签的维数 : " + str(train_set_y.shape))
# print ("测试集降维之后的维度: " + str(test_set_x_flatten.shape))
# print ("测试集_标签的维数 : " + str(test_set_y.shape))
train_set_x = train_set_x_flatten / 255
test_set_x = test_set_x_flatten / 255
def sigmoid(z):
"""
:param z: 任意大小的标量或者numpy数组
:return:
"""
s=1/(1+np.exp(-z))
return s
def initialize_with_zero(dim):
"""
此函数为w创建一个维度为(dim,1)的0向量,并将b初始化为0,w b都被初始化为0
:param dim:想要的w的大小
:return:w-维度为(dim,1)的初始化向量 b-初始化的标量
"""
w=np.zeros(shape=(dim,1))
b=0
assert (w.shape==(dim,1))#assert 表示如果出错则终止程序,断言函数是对表达式布尔值的判断,要求表达式计算值必须为真。如果表达式为假,触发异常;如果表达式为真,不执行任何操作。
assert (isinstance(b,float)or isinstance(b,int))#isinstance() 函数来判断一个对象是否是一个已知的类型,类似 type()。
return (w,b)
def propagate(w,b,X,Y):
"""
:param w:权重,大小不等的数组(num_px * num_px * 3,1)
:param b:偏差,一个标量
:param X:矩阵类型为(num_px * num_px * 3,训练数量)
:param Y: 真正的“标签”矢量(如果非猫则为0,如果是猫则为1),矩阵维度为(1,训练数据数量)
:return: cost- 逻辑回归的负对数似然成本
dw - 相对于w的损失梯度,因此与w相同的形状
db - 相对于b的损失梯度,因此与b的形状相同
"""
m=X.shape[1] #X=np.array([[1,2,4,5], [3,4,6,1]]),X.shape[0]=2,X.shape[1]=4
#正向传播
A=sigmoid(np.dot(w.T,X)+b)
cost=(-1/m)*(np.sum(Y*np.log(A)+(1-Y)*np.log(1-A)))
#反向传播
dw=(1/m)*(np.dot(X,(A-Y).T))
db=(1/m)*(np.sum(A-Y))
# 使用断言确保我的数据是正确的
assert (dw.shape==w.shape)
assert (db.dtype==float)
cost=np.squeeze(cost)#只有一行或一列的维度(a singleton dimension)被去除掉了
assert (cost.shape==())
grads={
"dw":dw,
"db":db
}
return (grads,cost)
# #测试一下propagate
# print("====================测试propagate====================")
# #初始化一些参数
# w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1,2], [3,4]]), np.array([[1, 0]])
# grads, cost = propagate(w, b, X, Y)
# print ("dw = " + str(grads["dw"]))
# print ("db = " + str(grads["db"]))
# print ("cost = " + str(cost))
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):
"""
此函数通过运行梯度下降算法来优化w和b
参数:
w - 权重,大小不等的数组(num_px * num_px * 3,1)
b - 偏差,一个标量
X - 维度为(num_px * num_px * 3,训练数据的数量)的数组。
Y - 真正的“标签”矢量(如果非猫则为0,如果是猫则为1),矩阵维度为(1,训练数据的数量)
num_iterations - 优化循环的迭代次数
learning_rate - 梯度下降更新规则的学习率
print_cost - 每100步打印一次损失值
返回:
params - 包含权重w和偏差b的字典
grads - 包含权重和偏差相对于成本函数的梯度的字典
成本 - 优化期间计算的所有成本列表,将用于绘制学习曲线。
提示:
我们需要写下两个步骤并遍历它们:
1)计算当前参数的成本和梯度,使用propagate()。
2)使用w和b的梯度下降法则更新参数。
"""
costs = []
for i in range(num_iterations):
grads, cost = propagate(w, b, X, Y)
dw = grads["dw"]
db = grads["db"]
w = w - learning_rate * dw
b = b - learning_rate * db
# 记录成本
if i % 100 == 0:
costs.append(cost)
# 打印成本数据
if (print_cost) and (i % 100 == 0):
print("迭代的次数: %i , 误差值: %f" % (i, cost))
params = {
"w": w,
"b": b}
grads = {
"dw": dw,
"db": db}
return (params, grads, costs)
# #测试optimize
# print("====================测试optimize====================")
# w, b, X, Y = np.array([[1], [2]]), 2, np.array([[1,2], [3,4]]), np.array([[1, 0]])
# params , grads , costs = optimize(w , b , X , Y , num_iterations=100 , learning_rate = 0.009 , print_cost = False)
# print ("w = " + str(params["w"]))
# print ("b = " + str(params["b"]))
# print ("dw = " + str(grads["dw"]))
# print ("db = " + str(grads["db"]))
def predict(w, b, X):
"""
使用学习逻辑回归参数logistic (w,b)预测标签是0还是1,
参数:
w - 权重,大小不等的数组(num_px * num_px * 3,1)
b - 偏差,一个标量
X - 维度为(num_px * num_px * 3,训练数据的数量)的数据
返回:
Y_prediction - 包含X中所有图片的所有预测【0 | 1】的一个numpy数组(向量)
"""
m = X.shape[1] # 图片的数量????为什么是图片的数量
"""
shape函数是numpy.core.fromnumeric中的函数,它的功能是读取矩阵的长度,
比如shape[0]就是读取矩阵第一维度的长度。
shape的输入参数可以是一个整数(表示维度),也可以是一个矩阵。
"""
Y_prediction = np.zeros((1, m))
w = w.reshape(X.shape[0], 1) # reshape函数:改变数组的维数
# 计预测猫在图片中出现的概率
A = sigmoid(np.dot(w.T, X) + b)
for i in range(A.shape[1]):
# 将概率a[0,i]转换为实际预测p[0,i]
Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0
# 使用断言
assert (Y_prediction.shape == (1, m))
return Y_prediction
#
# #测试predict
# print("------测试predict------")
# w,b,X,Y = np.array([[1],[2]]),2,np.array([[1,2],[3,4]]),np.array([[1,0]])
# print("predictions = " + str(predict(w,b,X)))
def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False):
"""
通过调用之前实现的函数来构建逻辑回归模型
参数:
X_train - numpy的数组,维度为(num_px * num_px * 3,m_train)的训练集
Y_train - numpy的数组,维度为(1,m_train)(矢量)的训练标签集
X_test - numpy的数组,维度为(num_px * num_px * 3,m_test)的测试集
Y_test - numpy的数组,维度为(1,m_test)的(向量)的测试标签集
num_iterations - 表示用于优化参数的迭代次数的超参数
learning_rate - 表示optimize()更新规则中使用的学习速率的超参数
print_cost - 设置为true以每100次迭代打印成本
返回:
d - 包含有关模型信息的字典。
"""
w, b = initialize_with_zero(X_train.shape[0])
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# 从字典“参数”中检索参数w和b
w, b = parameters["w"], parameters["b"]
# 预测测试/训练集的例子
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
# 打印训练后的准确性
print("训练集准确性:", format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100), "%") # mean()函数功能:求取均值,np.abs()返回决定值
print("测试集准确性:", format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100), "%") # abs() 函数返回数字的绝对值
d = {
"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train": Y_prediction_train,
"w": w,
"b": b,
"learning_rate": learning_rate,
"num_iterations": num_iterations}
return d
print("------测试model------")
# 这里加载的是真实的数据,请参见上面的代码部分
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=2000, learning_rate=0.005, print_cost=True)
# 绘制图
costs = np.squeeze(d['costs'])
"""
squeeze()函数的用法:
在机器学习和深度学习中,通常算法的结果是可以表示向量的数组(即包含两对或以上的方括号形式[[]]),
如果直接利用这个数组进行画图可能显示界面为空(见后面的示例)。我们可以利用squeeze()函数将表示向量
的数组转换为秩为1的数组,这样利用matplotlib库函数画图时,就可以正常的显示结果了。
"""
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations(per hundreds)')
plt.title("Learning rate = " + str(d["learning_rate"]))
plt.show()
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print("learning rate is:" + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations=1500, learning_rate=i,
print_cost=False)
print('\n' + "--------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label=str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True) # loc:图例所有figure位置;shadow:控制是否在图例后面画一个阴影
# 设置图例legend背景颜色
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
|
CheQiXiao/cfair
|
fc_net.py
|
fc_net.py
|
py
| 11,968 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
4374705477
|
"""
Author: Walfred Cutaran
Problem: Given a time in -hour AM/PM format, convert it to military (24-hour) time.
Note: - 12:00:00AM on a 12-hour clock is 00:00:00 on a 24-hour clock.
- 12:00:00PM on a 12-hour clock is 12:00:00 on a 24-hour clock.
"""
#
# Complete the 'timeConversion' function below.
#
# The function is expected to return a STRING.
# The function accepts STRING s as parameter.
#
def timeConversion(s):
ans = ""
s_end = s[8:] # am or pm
s = s[:8]
s = s.split(':')
if s_end == "AM":
if s[0] == "12":
ans += "00:"
else:
ans += s[0] + ':'
ans += s[1] + ':'
ans += s[2]
return ans
# if end == pm
if s[0] == "12":
ans += s[0] + ':'
ans += s[1] + ':'
ans += s[2]
return ans
else:
ans += str(int(s[0]) + 12) + ':'
ans += s[1] + ':'
ans += s[2]
return ans
|
walfredcutaran/Hackerrank-Solutions
|
Solutions/Time Conversion.py
|
Time Conversion.py
|
py
| 951 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9031625453
|
import random
print("Input 2 numbers and guess the random number the computer chose")
def guess_game():
# Lets the user choose lower limit, higher limit and number of tries
try:
first_num = int(input("Input the lower limit number: "))
second_num = int(input("Input the higher limit number: "))
number_of_chance = int(input("Input the number of tries: "))
except:
print("Input only numbers!")
guess_game()
# Filter out input that doesn't make sense
if first_num > second_num:
print("Lower limit number must be lower than Higher limit number")
guess_game()
elif number_of_chance == 0:
print("Number of chance must be more than 0")
guess_game()
else:
print(f"The computer is going to randomly select a number between {first_num} and {second_num}")
print(f"You have {number_of_chance} tries")
global guess_num
guess_num = random.randint(first_num, second_num)
print(guess_num)
for x in range(1, number_of_chance + 1, 1):
user_guess = int(input(f"Guess {x} : "))
if user_guess == guess_num:
print(f"Congrats! The number is {guess_num}")
break
elif user_guess < guess_num:
print("The number is higher")
elif user_guess > guess_num:
print("The number is lower")
print("Game has ended")
# Whether user wants to continue
user_play = input("Type 'C' to continue or any other keys to quit: ").capitalize()
if user_play == 'C':
guess_game()
else:
quit()
# Start the game
guess_game()
|
KWenYuan/randompython
|
numberguess.py
|
numberguess.py
|
py
| 1,744 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3755198481
|
import os
import csv
#Create file path variable
budget_csv = os.path.join("Resources", "budget_data.csv")
#Create empty lists to store data as we iterate through the CSV file.
date = []
profitLoss = []
monthlyChange = []
#Open CSV file and create reader.
with open(budget_csv, encoding='utf8', newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
#Skip the header row.
csv_header = next(csvreader)
#Iterate through CSV file and record Date and Profit/Loss data in
#each appropriate list
for row in csvreader:
date.append(row[0])
profitLoss.append(row[1])
#cast list as integer using list comprehension.
ProfitLossInt = [int(i) for i in profitLoss]
#sum of profit and loss.
Total = sum(ProfitLossInt)
#Iterate through Profit/Loss rows tofind monthly change and put it in a list.
#It needs to be off set by 1 because
#otherwise it will iterate beyond the last row since using i+1 in conditional statement.
for i in range(len(ProfitLossInt)-1):
if i+1 <= len(ProfitLossInt):
monthlyChange.append(ProfitLossInt[i+1] - ProfitLossInt[i])
#Create an function to calculate average.
def average(mylist):
return sum(mylist)/len(mylist)
monthlyChangeAvg = round(average(monthlyChange),2)
#Find the maximum and minimum monthly change.
monthlyChangeMax = max(monthlyChange)
monthlyChangeMin = min(monthlyChange)
#find the date that corresponds to the maxium and minimum monthly change
MaxDate = date[monthlyChange.index(max(monthlyChange))+1]
MinDate = date[monthlyChange.index(min(monthlyChange))+1]
#Print output to console and create text file summary
print("Financial Analysis")
print("-------------------------------")
print("Total Months: " + str(len(date)))
print(f"Total: ${Total}")
print(f"Average Change: ${monthlyChangeAvg}")
print(f"Greatest Increase in Profits: {MaxDate} (${monthlyChangeMax})")
print(f"Greatest Decrease in Profits: {MinDate} (${monthlyChangeMin})")
output = open("Financial_Analysis.txt", "w")
output.write("Financial Analysis\n")
output.write("-------------------------------\n")
output.write("Total Months: " + str(len(date))+"\n")
output.write(f"Total: ${Total}\n")
output.write(f"Average Change: ${monthlyChangeAvg}\n")
output.write(f"Greatest Increase in Profits: {MaxDate} (${monthlyChangeMax})\n")
output.write(f"Greatest Decrease in Profits: {MinDate} (${monthlyChangeMin})\n")
output.close()
os.startfile("Financial_Analysis.txt")
|
Mattyapolis/Data_Analytics__Homework_2019
|
03-Python/PyBank/main.py
|
main.py
|
py
| 2,483 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19715632836
|
# [https://www.yahoo.com/somerandomstring, http://www.yahoo.com/some/random/string, http://www.google.com]
import re
def condense(arr):
d = {}
output = []
domain = re.compile(r'www.\w*.\w*')
cleanarr = domain.findall(''.join(arr))
for i in cleanarr:
if i in d:
d[i] += 1
else:
d[i] = 1
for k,v in d.items():
output.append(k+' '+str(v))
return output
print(condense(["https://www.yahoo.com/somerandomstring", "http://www.yahoo.com/some/random/string", "http://www.google.com"]))
|
Nohclu/General
|
OathQuizQuestion/domains.py
|
domains.py
|
py
| 559 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3661651084
|
def sequential_search(data, key):
for item in data:
if item == key:
return True
return False
my_list = [3, 6, 2, 9, 4, 7]
key = 6
found = sequential_search(my_list, key)
if found:
print("elemen ditemukan.")
else:
print("elemen tidak ditemukan")
|
debbypermatar/SequencialSearch_BinarySearch
|
sequential_search.py
|
sequential_search.py
|
py
| 297 |
python
|
en
|
code
| 1 |
github-code
|
6
|
28187322584
|
import sys
input = sys.stdin.readline
input_num = int(input())
ops = [0 for _ in range(input_num+1)]
for i in range(2, input_num+1):
ops[i] = ops[i-1]+1
if i % 3 == 0:
ops[i] = min(ops[i], ops[i//3] + 1)
if i % 2 == 0:
ops[i] = min(ops[i], ops[i//2] + 1)
print(ops[input_num])
|
Bandi120424/Algorithm_Python
|
백준/Silver/1463. 1로 만들기/1로 만들기.py
|
1로 만들기.py
|
py
| 322 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44701138323
|
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.gridspec as gridspec
def plot(samples):
x_dim=samples.shape[1]
color=samples.shape[3]
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
if samples.shape[3] == 3:
sample = sample.reshape(x_dim, x_dim, color)
plt.imshow(sample)
else:
sample = sample.reshape(x_dim, x_dim)
plt.imshow(sample, cmap='Greys_r')
return fig
def generateSamples(out_dir, z_dim=100):
# fileNames=[]
if not os.path.exists(out_dir+'/generated/'):
os.makedirs(out_dir+'/generated/')
for root, dirs, files in os.walk(out_dir+"/model/"):
for filename in sorted(files):
if os.path.splitext(filename)[1].lower() =='.meta':
model=root+os.path.splitext(filename)[0]
imageName=os.path.splitext(filename)[0]
print(model)
# fileNames.append(root+os.path.splitext(filename)[0])
tf.reset_default_graph()
with tf.Session() as sess:
# z = tf.placeholder(tf.float32, shape=[None, z_dim])
# saver = tf.train.Saver()
saver=tf.train.import_meta_graph(model+'.meta')
saver.restore(sess, model)
graph=tf.get_default_graph()
tName1=graph.get_operation_by_name('z').name+':0'
z=graph.get_tensor_by_name(tName1)
tName2=graph.get_operation_by_name('generator/final_gen').name+':0'
gen=graph.get_tensor_by_name(tName2)
np.random.seed(42)
batch_z = np.random.normal(-1.0, 1.0, size=[16, z_dim]).astype(np.float32)
samples = sess.run(gen, feed_dict={z: batch_z})
fig = plot(samples)
plt.savefig(out_dir+'/generated/{}.png'
.format(imageName), bbox_inches='tight')
plt.show()
plt.close()
|
adityagarg/improvedWGANs
|
utils.py
|
utils.py
|
py
| 2,488 |
python
|
en
|
code
| 0 |
github-code
|
6
|
69900225789
|
import enum
from PySide2 import QtCore
from PySide2.QtCore import QPoint
from PySide2.QtGui import QColor, QFont, QFontDatabase
from PySide2.QtWidgets import QGraphicsSceneMouseEvent, QGraphicsItem
class NodeState(enum.Enum):
normal = 0
used = 1
highlight = 2
class Node(QGraphicsItem):
Type = QGraphicsItem.UserType + 1
def __init__(self, graphWidget, name: str, group_name: str, size=22):
QGraphicsItem.__init__(self)
self.state = NodeState.normal
self.size = size
self.fixedFont = QFont("Monospace")
self.fixedFont.setStyleHint(QFont.TypeWriter)
self.group_name = group_name
self.name = name
self.tag = group_name + " " + self.name
self.color = QColor('light green')
# self.setFlag(QGraphicsItem.ItemIsMovable)
# self.setFlag(QGraphicsItem.ItemIsSelectable)
self.setCacheMode(self.DeviceCoordinateCache)
self.setZValue(-1)
def mousePressEvent(self, event: QGraphicsSceneMouseEvent):
# self.state=NodeState.highlight
print(self.state)
# self.color = QColor('cyan')
# self.adjust()
# self.update()
#
# def adjust(self):
# if self.state != NodeState.highlight:
# print("sd")
# if self.state == NodeState.normal:
# self.color = QColor('light green')
# elif self.state == NodeState.used:
# self.color = QColor('yellow')
# elif self.state == NodeState.highlight:
# self.color = QColor('cyan')
# print(("Ss"))
def type(self):
return Node.Type
def boundingRect(self):
return QtCore.QRectF((self.size // 2) * -1, (self.size // 2) * -1, self.size, self.size)
def paint(self, painter, option, widget):
if self.state == NodeState.normal:
self.color = QColor('light green')
elif self.state == NodeState.used:
self.color = QColor('yellow')
elif self.state == NodeState.highlight:
self.color = QColor('cyan')
painter.setPen(QColor("black"))
painter.setBrush(self.color)
painter.drawRect((self.size // 2) * -1, (self.size // 2) * -1, self.size, self.size)
painter.setPen(QColor("black"))
painter.setFont(self.fixedFont)
if len(self.name) >= 3:
textpoint = QPoint(-11, 3)
elif len(self.name) >= 2:
textpoint = QPoint(-7, 3)
else:
textpoint = QPoint(-4, 3)
painter.drawText(textpoint, self.name)
|
JIuH4/KB_V2
|
ui_elements/graph_items/node.py
|
node.py
|
py
| 2,560 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21325562870
|
import pytest
from pysyncgateway import Database, Query
@pytest.fixture
def database(admin_client):
"""
Returns:
Database: 'db' database written to Sync Gateway.
"""
database = Database(admin_client, 'db')
database.create()
return database
@pytest.fixture
def query(database):
"""
Returns:
Query: Not written to Sync Gateway.
"""
return Query(database, 'all_lists')
@pytest.fixture
def slow_view(database):
"""
A view that returns all documents, but slowly. This uses a horrible
sleep-like function that locks up Walrus for 1.5s per document. Fixture
populates the database with a document to ensure that calling the
view takes at least 1 second in total.
NOTE: On Circle, it looks like processing the view might be done in
parallel because it is able to return a view containing 2 documents in just
over the time in the delay function.
Returns:
Query: Called 'slow_lists', written to Sync Gateway, with a single view
called 'all' that takes 1.5 second per document in the database.
"""
database.get_document('a').create_update()
query = Query(database, 'slow_lists')
query.data = {
'views': {
'all': {
'map':
"""
function(doc, meta) {
function pausecomp(millis){
var date = new Date();
var curDate = null;
do { curDate = new Date(); }
while(curDate-date < millis);
}
pausecomp(1500);
emit(meta.id,doc);
}
""",
},
},
}
query.create_update()
return query
@pytest.fixture
def food_query(database):
"""
Populates the database with some foods and builds a query, all written to
Sync Gateway. View does not need hotting up because docs are in place when
it is created.
Returns:
Query: With 'all' view populated where key will search for the foods
where the first letter of the name of the food matches.
"""
for name, data in [
('lightbulb', {
'type': 'fixture',
'name': 'Lightbulb',
}),
('apple', {
'type': 'food',
'name': 'apple',
}),
('banana', {
'type': 'food',
'name': 'banana',
}),
('apricot', {
'type': 'food',
'name': 'apricot',
}),
('walrus', {
'type': 'animal',
'name': 'I AM THE WALRUS',
}),
('almond', {
'type': 'food',
'name': 'almond',
}),
('pumpkin', {
'type': 'food',
'name': 'pumpkin',
}),
]:
doc = database.get_document(name)
doc.data = data
doc.create_update()
query = Query(database, 'food_index')
query.data = {
'views': {
'all': {
'map':
"""
function(doc, meta) {
if(doc.type == "food" && doc.name) {
emit(doc.name[0], doc)
}
}
""",
},
},
}
query.create_update()
return query
|
constructpm/pysyncgateway
|
tests/query/conftest.py
|
conftest.py
|
py
| 3,136 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3037885810
|
def solution(n):
answer = []
triangle = [[0] * n for _ in range(n)]
num = 1
x, y = -1, 0
for i in range(n):
for j in range(i, n):
if i % 3 == 0:
x += 1
elif i % 3 == 1:
y += 1
elif i % 3 == 2:
x -= 1
y -= 1
triangle[x][y] = num
num += 1
for tri in triangle:
for t in tri:
if t != 0:
answer.append(t)
return answer
if __name__ == '__main__':
real_answer = solution(4)
print('정답 : ', real_answer)
|
sunyeongchoi/sydsyd_challenge
|
argorithm/trianglesnail.py
|
trianglesnail.py
|
py
| 606 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17034031092
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.types import Header, Menu, Panel, PropertyGroup
from fd_datablocks import enums, const
import os
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
BoolVectorProperty,
PointerProperty,
CollectionProperty,
EnumProperty)
def find_node(material, nodetype):
if material and material.node_tree:
ntree = material.node_tree
for node in ntree.nodes:
if getattr(node, "type", None) == nodetype:
return node
return None
def find_node_input(node, name):
for input in node.inputs:
if input.name == name:
return input
return None
def panel_node_draw(layout, id_data, output_type, input_name):
if not id_data.use_nodes:
layout.operator("cycles.use_shading_nodes", icon='NODETREE')
return False
ntree = id_data.node_tree
node = find_node(id_data, output_type)
if not node:
layout.label(text="No output node")
else:
input = find_node_input(node, input_name)
layout.template_node_view(ntree, node, input)
return True
class PANEL_scenes(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Scenes"
bl_context = "objectmode"
bl_label = " "
bl_options = {'HIDE_HEADER'}
#bl_idname = "mvProject.part_properties"
def draw_header(self,context):
layout = self.layout
row = layout.row(align=True)
row.label("Scenes: ",icon='SCENE_DATA')
@classmethod
def poll(cls, context):
return True
def draw(self, context):
unit = context.scene.unit_settings
scene = context.scene
layout = self.layout
space = context.space_data
col = layout.column(align=True)
box = col.box()
row = box.row(align=True)
row.template_ID(context.screen, "scene", new="fd_scene.create_scene", unlink="scene.delete")
box = col.box()
row = box.row()
row.prop(scene, "camera",text="Active Camera")
row = box.row()
row.label("Main Units:")
row.row().prop(unit, "system", expand=True)
row = box.row()
row.label("Angle Units:")
row.row().prop(unit, "system_rotation", expand=True)
if space.type == 'VIEW_3D' and scene.unit_settings.system == 'NONE':
row = box.row()
row.label("Grid Spacing:")
row.row().prop(space, "grid_scale", expand=True)
box = col.box()
scene.mv.PromptPage.draw_prompt_page(box,scene)
class PANEL_worlds(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Worlds"
bl_context = "objectmode"
bl_label = " "
bl_options = {'HIDE_HEADER'}
#bl_idname = "mvProject.part_properties"
def draw_header(self,context):
layout = self.layout
row = layout.row(align=True)
row.label("World Management: ",icon=const.icon_world)
@classmethod
def poll(cls, context):
return True
def draw(self, context):
scene = context.scene
world = context.scene.world
layout = self.layout
col = layout.column(align=True)
box = col.box()
row = box.row(align=True)
row.template_ID(context.scene, "world", new="world.new")
box = col.box()
if not panel_node_draw(box, world, 'OUTPUT_WORLD', 'Surface'):
box.prop(world, "horizon_color", text="Color")
box = col.box()
world.mv.PromptPage.draw_prompt_page(box,world)
class PANEL_materials(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Materials"
bl_context = "objectmode"
bl_label = " "
bl_options = {'HIDE_HEADER'}
def draw_header(self,context):
layout = self.layout
row = layout.row(align=True)
row.label("Material Management: ",icon=const.icon_material)
@classmethod
def poll(cls, context):
return True
def draw(self, context):
layout = self.layout
box = layout.box()
row = box.row()
row.operator("fd_material.apply_materials_from_pointers",text="Assign Materials",icon=const.icon_material)
row.operator("fd_material.clear_unused_materials_from_file",text="Clear Unused",icon='ZOOMOUT')
row.operator("fd_material.clear_all_materials_from_file",text="Clear All",icon='PANEL_CLOSE')
box.template_list("MATERIAL_UL_matslots", "", bpy.data, "materials", context.scene.mv, "active_material_index", rows=5)
if len(bpy.data.materials) > 0:
box = layout.box()
material = bpy.data.materials[context.scene.mv.active_material_index]
material.mv.draw_properties(box,material)
class PANEL_libraries(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Libraries"
bl_context = "objectmode"
bl_label = " "
bl_options = {'HIDE_HEADER'}
def draw_header(self,context):
layout = self.layout
row = layout.row(align=True)
row.label("Library Management: ",icon=const.icon_library)
@classmethod
def poll(cls, context):
return True
def draw(self, context):
dm = context.scene.mv.dm
layout = self.layout
col = layout.column(align=True)
box = col.box()
if os.path.exists(dm.Libraries.path):
Libraries = context.scene.mv.dm.Libraries
row = box.row(align=True)
row.prop(dm.Libraries,"path",text="",icon='FILE_TICK')
# box = col.box()
# row = box.row(align=True)
# Libraries.draw_active_pointer_library_menus(row)
else:
row = box.row(align=True)
row.prop(dm.Libraries,"path",text="",icon='ERROR')
dm.Specgroups.draw_spec_groups(box)
#------REGISTER
classes = [
PANEL_scenes,
PANEL_worlds,
PANEL_materials,
PANEL_libraries
]
def register():
for c in classes:
bpy.utils.register_class(c)
def unregister():
for c in classes:
bpy.utils.unregister_class(c)
if __name__ == "__main__":
register()
|
satishgoda/fluid-designer-scripts
|
scripts/startup/fluid_ui/space_fluid_view3d_tools.py
|
space_fluid_view3d_tools.py
|
py
| 7,224 |
python
|
en
|
code
| 1 |
github-code
|
6
|
13231283002
|
"""
@Author : Hirsi
@ Time : 2020/7/3
"""
"""
思路(线程池)
1.定义变量,保存源文件夹,目标文件夹所在的路径
2.在目标路径创建新的文件夹
3.获取源文件夹中所有的文件(列表)
4.便利列表,得到所有的文件名
5.定义函数,进行文件拷贝
文件拷贝函数 参数(源文件夹路径,目标文件夹路径,文件名)
1.拼接源文件和目标文件的具体路径
2.打开源文件,创建目标文件
3.读取源文件的内容,写入到目标文件中(while)
"""
import os
import multiprocessing
import time
# 5.定义函数,进行文件拷贝
def copy_work(source_dir,dest_dir,file_name):
print(multiprocessing.current_process().name)
# 1.拼接源文件和目标文件的具体路径,打开源文件,创建目标文件
source_path=source_dir+'/'+file_name
dest_path=dest_dir+'/'+file_name
# 3.读取源文件的内容,写入到目标文件中(while)
with open(source_path,'rb') as source_file:
with open(dest_path,'wb') as dest_file:
while True:
read_data = source_file.read(1024)
if read_data:
dest_file.write(read_data)
time.sleep(0.5)
else:
break
if __name__ == '__main__':
# 1.定义变量,保存源文件夹,目标文件夹所在的路径
source_dir='./test'
dest_dir='/home/hirsi/桌面/test'
# 2.在目标路径创建新的文件夹
try:
os.mkdir(dest_dir)
except:
print('文件已存在!')
# 3.获取源文件夹中所有的文件(列表)
file_list = os.listdir(source_dir)
# ***创建进程池
pool = multiprocessing.Pool(3)
# 4.遍历列表,得到所有的文件名
for file_name in file_list:
# 单进程
# copy_work(source_dir,dest_dir,file_name)
pool.apply_async(copy_work,(source_dir,dest_dir,file_name))
# 不再接受新的任务
pool.close()
# 让主进程等待进程池结束后再退出
pool.join()
print('复制完成!')
|
gitHirsi/PythonNotes02
|
day07-多任务-进程/10-文件夹拷贝器_多进程版.py
|
10-文件夹拷贝器_多进程版.py
|
py
| 2,139 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
24143273312
|
from selenium.webdriver import Chrome,ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import xlsxwriter
opts = ChromeOptions()
opts.add_experimental_option("detach", True)
driver = Chrome(chrome_options=opts)
driver.get("https://google.com")
driver.maximize_window()
searchBox = driver.find_element(By.CLASS_NAME,"gLFyf")
searchBox.send_keys("IBTECH")
searchBox.send_keys(Keys.ENTER)
def purifyExtensions(i, j):
if '.net' in newLinks[i][j]:
newRefs.append(newLinks[i][j].split('.net'))
elif '.io' in newLinks[i][j]:
newRefs.append(newLinks[i][j].split('.io'))
elif '.gov' in newLinks[i][j]:
newRefs.append(newLinks[i][j].split('.gov'))
elif '.org' in newLinks[i][j]:
newRefs.append(newLinks[i][j].split('.org'))
elif '.dev' in newLinks[i][j]:
newRefs.append(newLinks[i][j].split('.dev'))
else:
newRefs.append(newLinks[i][j].split('.com'))
newLinks = []
newRefs = []
for i in range(3):
links = driver.find_elements(By.CLASS_NAME,"yuRUbf")
driver.implicitly_wait(3)
for k in range(1):
for j in range(9):
newLinks.append(links[j].text.split('https://'))
if i == 0:
for j in range(1):
for k in range(9):
purifyExtensions(k,1)
#newRefs.append(newLinks[k][1].split('.com'))
driver.find_element(By.XPATH, '//*[@id="pnnext"]/span[2]').click()
elif i == 1:
for j in range(1):
for k in range(9):
if k ==4:
purifyExtensions(k+9,0)
#newRefs.append(newLinks[k + 9][0].split('.com'))
else:
purifyExtensions(k + 9, 1)
#newRefs.append(newLinks[k + 9][1].split('.com'))
driver.find_element(By.XPATH, '//*[@id="pnnext"]/span[2]').click()
elif i == 2:
for j in range(1):
for k in range(9):
purifyExtensions(k+18,1)
#newRefs.append(newLinks[k+18][1].split('.com'))
driver.close()
workbook = xlsxwriter.Workbook('import_file.xlsx')
worksheet = workbook.add_worksheet()
worksheet.set_column('A:A', len(newLinks))
worksheet.set_column('B:B', len(newLinks))
text1 = 'A{n:.2f}'
text2 = 'B{n:.2f}'
for j in range(len(newLinks)):
for k in range(1):
stringLink = newLinks[j][k]
stringRef = newRefs[j][k]
worksheet.write(text1.format(n = j+1), stringLink)
worksheet.write(text2.format(n = j+1), 'https://' + stringRef + '.com')
workbook.close()
|
keremguzel/selenium-excel-import
|
main.py
|
main.py
|
py
| 2,595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39654504754
|
import os
from .register import register_cityscapes_segmentation, register_voc_context_segmentation_dataset
from .seg_builtin_meta import get_segmentation_builtin_meta_data
_CITYSCAPES_SPLITS = {}
_CITYSCAPES_SPLITS['cityscapes'] = {
'cityscapes_train': ('cityscapes/leftImg8bit/train', 'cityscapes/gtFine/train'),
'cityscapes_val': ('cityscapes/leftImg8bit/val', 'cityscapes/gtFine/val'),
'cityscapes_test': ('cityscapes/leftImg8bit/test', 'cityscapes/gtFine/test')
}
def register_cityscapes(root):
for dataset_name, splits_per_dataset in _CITYSCAPES_SPLITS.items():
for k, (image_root, label_root) in splits_per_dataset.items():
meta_data = get_segmentation_builtin_meta_data(dataset_name)
# Registe Dataset
register_cityscapes_segmentation(k, meta_data, os.path.join(root, image_root), os.path.join(root, label_root))
_RAW_VOC_CONTEXT_SPLITS = {}
_RAW_VOC_CONTEXT_SPLITS['voc_context'] = {
"voc_context_seg_train": ("Context/train", "Context/train_labels"),
"voc_context_seg_val": ("Context/val", "Context/val_labels"),
}
def register_voc_context(root):
for dataset_name, splits_per_dataset in _RAW_VOC_CONTEXT_SPLITS.items():
for k, (image_root, label_root) in splits_per_dataset.items():
metadata = get_segmentation_builtin_meta_data(dataset_name)
# Registe Dataset
register_voc_context_segmentation_dataset(k, metadata, os.path.join(root, image_root), os.path.join(root, label_root))
|
lqxisok/llSeg
|
datasets/segmentation/seg_builtin.py
|
seg_builtin.py
|
py
| 1,519 |
python
|
en
|
code
| 2 |
github-code
|
6
|
24826000762
|
import numpy as np
import pandas as pd
def main():
df1 = pd.DataFrame()
print(df1)
time_vals = np.arange(1628610738, 1628611738)
data_vals = np.ones(1000)
s1 = pd.Series(data_vals, time_vals)
df1['S1'] = s1
print(df1)
dv2 = np.random.uniform(10, 20, 1000)
s2 = pd.Series(dv2, time_vals)
df1['S2'] = s2
df1.to_csv('df1.csv', index_label='timestamp')
print(df1)
tv2 = np.arange(1628611738, 1628611838)
s3 = pd.Series(np.ones(100)*2, tv2)
df2 = pd.DataFrame({'S1': s3, 'S2': pd.Series(np.array(np.random.poisson(10, 100), dtype=float), tv2)})
print(df2)
df3 = df1.merge(df2, 'outer')
print(df3)
df4 = pd.DataFrame({'S1': data_vals, 'S2': dv2}, time_vals)
print(df4)
if __name__ == '__main__':
main()
|
dantesdoesthings/danteswebsite
|
sandbox/dataframe_testing.py
|
dataframe_testing.py
|
py
| 785 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5272336888
|
import gradio as gr
import pytesseract
from langchain import PromptTemplate
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from pdf2image import convert_from_path
# 質問テンプレート
template = """
あなたは親切なアシスタントです。下記の質問に日本語で回答してください。
質問:{question}
回答:
"""
prompt = PromptTemplate(
input_variables=["question"],
template=template,
)
def pdf_to_text_ocr(pdf_file):
images = convert_from_path(pdf_file)
text = ""
for image in images:
text += pytesseract.image_to_string(image, lang="jpn+eng")
return text
def process_input(pdf_file, input_text):
# PDFファイルの読み込み
pdf_text = pdf_to_text_ocr(pdf_file.name)
# テキストの分割
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.create_documents([pdf_text])
# 埋め込みの作成
embeddings = OpenAIEmbeddings()
vectordb = Chroma.from_documents(texts, embeddings)
# RetrievalQAの作成
qa = RetrievalQA.from_chain_type(llm=ChatOpenAI(model_name="gpt-3.5-turbo"), chain_type="stuff",
retriever=vectordb.as_retriever())
# 質問の送信と回答の取得
question = input_text
query = prompt.format(question=question)
response = qa.run(query)
return response
# UIコンポーネントの作成
pdf_upload = gr.inputs.File(type="file", label="PDFファイルをアップロード")
textarea = gr.inputs.Textbox(lines=15, placeholder="GPTの応答がここに表示されます...", label="GPT")
input_box = gr.inputs.Textbox(lines=1, placeholder="ここに質問を入力してください", label="")
iface = gr.Interface(
fn=process_input,
inputs=[pdf_upload, input_box],
outputs=textarea,
layout="vertical",
css=".gr-input {width: 80%;}",
allow_flagging='never'
)
iface.launch()
|
motomk/pdf_gpt
|
main.py
|
main.py
|
py
| 2,156 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
34373278865
|
import os
from unittest import TestCase
import jinja2
from apply.issue.issure_js_auto_code.db_util import res_to_dict
from config.db_conf import localhost_oa_engine
from util.str_util import to_lower_camel, to_snake, to_upper_camel
class Form:
@staticmethod
def get_tables(db):
sql = "select TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA = %(db)s"
res = localhost_oa_engine.execute(sql, {"db": db})
return res_to_dict(res)
@staticmethod
def get_table_info(table_name, db=None):
# 表名,表注释
# 字段名,字段类型,字段注释, 枚举值
# json数据 -- class数据-- 类数据
sql = """SELECT COL.COLUMN_NAME, COL.COLUMN_TYPE, COL.COLUMN_COMMENT, COL.DATA_TYPE
FROM INFORMATION_SCHEMA.COLUMNS COL
Where COL.table_schema = %(db)s AND COL.TABLE_NAME = %(table_name)s"""
args = {"db": db, "table_name": table_name}
res = localhost_oa_engine.execute(sql, args)
data = res_to_dict(res)
for item in data:
item["COLUMN_NAME"] = to_lower_camel(item["COLUMN_NAME"])
return data
@staticmethod
def to_file(data_dic, path, resource_dir, template_file):
template_loader = jinja2.FileSystemLoader(searchpath=resource_dir)
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(template_file)
output_text = template.render(data_dic)
with open(path, "w", encoding="utf-8") as f:
f.write(output_text)
class TestAutoCode(TestCase):
def test_run(self):
table = "organization"
rows = Form.get_table_info(table, "oa")
data = {
"list": rows
}
Form.to_file(data, f"{table}.vue", os.path.dirname(__file__), "templates/vue.template")
def test_run_vue_js(self):
tables = Form.get_tables("oa")
for table in tables:
table_name = table["TABLE_NAME"]
table_upper_caml = to_upper_camel(table_name)
rows = Form.get_table_info(table_name, "oa")
data = {
"list": rows,
"tableUpperCaml": table_upper_caml,
"tableConst": to_snake(table_name).upper(),
}
Form.to_file(data, f"tmp/{table_upper_caml}.vue", os.path.dirname(__file__), "templates/vue.template")
def test_run_index_js(self):
tables = Form.get_tables("oa")
table_infos = []
for table in tables:
table_name = table["TABLE_NAME"]
table_upper_caml = to_upper_camel(table_name)
table_infos.append({
"tableUpperCaml": table_upper_caml,
"tableLowerCaml": to_lower_camel(table_name),
"tableConst": to_snake(table_name).upper(),
})
data = {
"list": table_infos
}
Form.to_file(data, "tmp/routes.js", os.path.dirname(__file__), "templates/routes.template")
def test_run_config_js(self):
tables = Form.get_tables("oa")
table_infos = []
for table in tables:
table_name = table["TABLE_NAME"]
table_infos.append({
"tableLowerCaml": to_lower_camel(table_name),
"tableConst": to_snake(table_name).upper(),
})
data = {
"list": table_infos
}
Form.to_file(data, "tmp/config.js", os.path.dirname(__file__), "templates/config.template")
|
QQ1134614268/PythonTemplate
|
src/apply/issue/issure_js_auto_code/js_auto_code_v0.py
|
js_auto_code_v0.py
|
py
| 3,508 |
python
|
en
|
code
| 2 |
github-code
|
6
|
7657570760
|
#!/usr/bin/python3
"""
The function "add_integer" adds two integers
"""
def add_integer(a, b=98):
"""
Check if arguments are of the required type, else raise TypeError
"""
if a is None and b is None:
raise TypeError("add_integer() missing 1 required"
"positional argument: 'a'")
if not isinstance(a, (int, float)):
raise TypeError("a must be an integer")
if not isinstance(b, (int, float)):
raise TypeError("b must be an integer")
if a + 1 == a:
raise OverflowError("a too large")
if b + 1 == b:
raise OverflowError("b too large")
"""
Convert arguments to int if they are float and return the sum
"""
return int(a) + int(b)
|
frace-engineering/alx-higher_level_programming
|
0x07-python-test_driven_development/0-add_integer.py
|
0-add_integer.py
|
py
| 738 |
python
|
en
|
code
| 0 |
github-code
|
6
|
58242642
|
try:
from zohocrmsdk.src.com.zoho.crm.api.exception import SDKException
from zohocrmsdk.src.com.zoho.crm.api.util import Constants
except Exception:
from ..exception import SDKException
from ..util import Constants
class Backup(object):
def __init__(self):
"""Creates an instance of Backup"""
self.__rrule = None
self.__id = None
self.__start_date = None
self.__scheduled_date = None
self.__status = None
self.__requester = None
self.__key_modified = dict()
def get_rrule(self):
"""
The method to get the rrule
Returns:
string: A string representing the rrule
"""
return self.__rrule
def set_rrule(self, rrule):
"""
The method to set the value to rrule
Parameters:
rrule (string) : A string representing the rrule
"""
if rrule is not None and not isinstance(rrule, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: rrule EXPECTED TYPE: str', None, None)
self.__rrule = rrule
self.__key_modified['rrule'] = 1
def get_id(self):
"""
The method to get the id
Returns:
int: An int representing the id
"""
return self.__id
def set_id(self, id):
"""
The method to set the value to id
Parameters:
id (int) : An int representing the id
"""
if id is not None and not isinstance(id, int):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: id EXPECTED TYPE: int', None, None)
self.__id = id
self.__key_modified['id'] = 1
def get_start_date(self):
"""
The method to get the start_date
Returns:
datetime: An instance of datetime
"""
return self.__start_date
def set_start_date(self, start_date):
"""
The method to set the value to start_date
Parameters:
start_date (datetime) : An instance of datetime
"""
from datetime import datetime
if start_date is not None and not isinstance(start_date, datetime):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: start_date EXPECTED TYPE: datetime', None, None)
self.__start_date = start_date
self.__key_modified['start_date'] = 1
def get_scheduled_date(self):
"""
The method to get the scheduled_date
Returns:
datetime: An instance of datetime
"""
return self.__scheduled_date
def set_scheduled_date(self, scheduled_date):
"""
The method to set the value to scheduled_date
Parameters:
scheduled_date (datetime) : An instance of datetime
"""
from datetime import datetime
if scheduled_date is not None and not isinstance(scheduled_date, datetime):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: scheduled_date EXPECTED TYPE: datetime', None, None)
self.__scheduled_date = scheduled_date
self.__key_modified['scheduled_date'] = 1
def get_status(self):
"""
The method to get the status
Returns:
string: A string representing the status
"""
return self.__status
def set_status(self, status):
"""
The method to set the value to status
Parameters:
status (string) : A string representing the status
"""
if status is not None and not isinstance(status, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: status EXPECTED TYPE: str', None, None)
self.__status = status
self.__key_modified['status'] = 1
def get_requester(self):
"""
The method to get the requester
Returns:
Requester: An instance of Requester
"""
return self.__requester
def set_requester(self, requester):
"""
The method to set the value to requester
Parameters:
requester (Requester) : An instance of Requester
"""
try:
from zohocrmsdk.src.com.zoho.crm.api.backup.requester import Requester
except Exception:
from .requester import Requester
if requester is not None and not isinstance(requester, Requester):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: requester EXPECTED TYPE: Requester', None, None)
self.__requester = requester
self.__key_modified['requester'] = 1
def is_key_modified(self, key):
"""
The method to check if the user has modified the given key
Parameters:
key (string) : A string representing the key
Returns:
int: An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if key in self.__key_modified:
return self.__key_modified.get(key)
return None
def set_key_modified(self, key, modification):
"""
The method to mark the given key as modified
Parameters:
key (string) : A string representing the key
modification (int) : An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if modification is not None and not isinstance(modification, int):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modification EXPECTED TYPE: int', None, None)
self.__key_modified[key] = modification
|
zoho/zohocrm-python-sdk-5.0
|
zohocrmsdk/src/com/zoho/crm/api/backup/backup.py
|
backup.py
|
py
| 4,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32585270834
|
import cv2
import numpy as np
from .base import BaseTask
class BlurAndPHash(BaseTask):
def __init__(self):
super().__init__(taskID=4, taskName='BlurAndPHash')
self.thresholdLaplacian = 120
self.thresholdDiffStop = 120
self.thresholdDiffPre = 25
self.hashLen = 32
self.preStopPHash = None
self.prePHash = None
self.n = 0
def exec(self, inputData):
frame, isLastFrame = inputData
if isLastFrame:
return None, isLastFrame
currPHash = self.getPHash(frame)
if currPHash is None:
return None
if self.preStopPHash is None:
self.preStopPHash = currPHash
self.prePHash = currPHash
return frame, isLastFrame
diffStop = self.hamDistance(self.preStopPHash, currPHash)
diffPre = self.hamDistance(self.prePHash, currPHash)
self.prePHash = currPHash
if diffStop >= self.thresholdDiffStop \
or diffPre <= self.thresholdDiffPre:
return None
self.n += 1
if self.n <= 3:
return None
self.n = 0
self.preStopPHash = currPHash
return frame, isLastFrame
def getPHash(self, img):
pHash = None
laplacian = cv2.Laplacian(img, cv2.CV_64F).var()
if laplacian <= self.thresholdLaplacian:
return pHash
imgGray = cv2.resize(
cv2.cvtColor(img, cv2.COLOR_RGB2GRAY),
(self.hashLen, self.hashLen),
cv2.INTER_AREA)
height, width = imgGray.shape[:2]
matrixOriginal = np.zeros(
(height, width),
np.float32)
matrixOriginal[:height, :width] = imgGray
matrix = cv2.dct(cv2.dct(matrixOriginal))
matrix.resize(self.hashLen, self.hashLen)
matrixFlatten = matrix.flatten()
medianValue = sum(matrixFlatten) * 1. / len(matrixFlatten)
pHash = 0
for i in matrixFlatten:
pHash <<= 1
if i >= medianValue:
pHash += 1
return pHash
@staticmethod
def hamDistance(x, y):
tmp = x ^ y
distance = 0
while tmp > 0:
distance += tmp & 1
tmp >>= 1
return distance
|
Cloudslab/FogBus2
|
containers/taskExecutor/sources/utils/taskExecutor/tasks/blurAndPHash.py
|
blurAndPHash.py
|
py
| 2,292 |
python
|
en
|
code
| 17 |
github-code
|
6
|
3831338977
|
# encoding=utf-8
import logging
import logging.config
import os
import sys
import time
import traceback
import datetime
def init_log(name='root'):
path = os.path.dirname(__file__)
config_file = path + os.sep + 'logger.conf'
log_path = os.path.join(os.path.abspath(__file__ + ('/..' * 3)), 'zz_logs')
if not os.path.exists(log_path):
os.makedirs(log_path)
log_path = os.path.join(log_path, str(datetime.datetime.now().date()) + '.log')
if os.path.isfile(config_file) is False:
raise Exception("Config file {} not found".format(config_file))
datalines = list()
with open(config_file, 'r') as f:
for data in f.readlines():
if '$path' in data:
data = data.replace('$path', log_path)
datalines.append(data)
f = open(config_file + '_bak', 'w')
f.writelines(datalines)
f.close()
del datalines
logging.config.fileConfig(config_file + '_bak')
# os.remove(config_file + '_bak')
return logging.getLogger(name)
# decorator print log
def addlog(name=''):
begin = time.time()
def _addlog(func):
def wapper(*args, **kwargs):
data = None
begin1 = time.time()
try:
s = traceback.extract_stack()
file = s[-2][0]
__project_name = os.path.abspath(__file__ + ('/..' * 3))
file_name = file[file.find(__project_name) + len(__project_name) + 1:file.rfind(r'.')]
func_descrip = (file_name + '.' + func.__name__) if name == '' else name
log.info('Start Execute:%s ...' % func_descrip)
data = func(*args, **kwargs)
inner_secs = time.time() - begin1
log.info('Complete:%s , Time Consume: %s, Total Time: %s ' % (func_descrip,
time_str(inner_secs), time_str(time.time() - begin)))
except Exception as e:
# traceback.print_exc()
log.exception('Failure Calling Time Consume:%s, Total Time:%s, Err Message:%s', time_str(time.time() - begin1),
time_str(time.time() - begin), e)
# traceback.print_exc(file=open(log_file, 'a'))
sys.exit(0)
return data
return wapper
return _addlog
def time_str(second):
return ('%.2f sec' % second) if second < 60 else ('%.2f min' % (second / 60.0))
log = init_log()
# example
@addlog()
def log_test1():
time.sleep(1)
@addlog(name='test2')
def log_test2():
time.sleep(1)
log_test1()
time.sleep(2)
raise ValueError('A very specific bad thing happened.')
if __name__ == "__main__":
col = 'aaaa'
missing_rate = 0.26587
log.info('%s has missing rate as %f' % (col, missing_rate))
# log_test2()
#
# __project_name = os.path.abspath(__file__ + ('/..' * 3))
# print(__project_name)
log.debug('debug')
log.info('test - debug')
log.warning('warining')
|
charliedream1/ai_quant_trade
|
tools/log/log_util.py
|
log_util.py
|
py
| 3,053 |
python
|
en
|
code
| 710 |
github-code
|
6
|
37446574229
|
from metux.util.task import Task, TaskFail
from metux.util.git import GitRepo
"""Task: clone an git repo w/ initial checkout"""
class GitCloneTask(Task):
def do_run(self):
spec = self.param['spec']
repo = GitRepo(spec['path'])
repo.initialize()
for remote in spec['remotes']:
repo.set_remote(remote, spec['remotes'][remote]['url'])
if repo.is_checked_out():
if ('init-force' in spec) and spec['init-force']:
self.log_info("forcing re-init to "+spec['init-ref'])
repo.remote_update_all()
repo.force_checkout(spec['init-ref'], spec['init-branch'])
if ('remote-update' in spec) and spec['remote-update']:
repo.remote_update_all()
else:
if (not 'init-ref' in spec) or (spec['init-ref'] is None):
raise TaskFail(self, 'cant checkout "'+spec['path']+'": autobuild-ref not defined')
else:
self.log_info("running initial checkout of "+spec['init-ref'])
if not repo.checkout(spec['init-ref'], spec['init-branch']):
raise TaskFail(self, 'cant checkout "'+spec['path']+'": git checkout failed')
if spec.get('init-submodules', False):
self.log_info("initializing submodules")
repo.submodule_init()
return True
|
LibreZimbra/librezimbra
|
deb_autopkg/util/tasks_git.py
|
tasks_git.py
|
py
| 1,389 |
python
|
en
|
code
| 4 |
github-code
|
6
|
51383141
|
from typing import *
import random
class Solution:
def partition(self, nums, left, right):
pivot = random.randint(left, right)
nums[pivot], nums[right] = nums[right], nums[pivot]
i = left
for j in range(left, right):
if nums[j] <= nums[right]:
nums[i], nums[j] = nums[j], nums[i]
i += 1
if nums[j] == nums[right]:
self.found_n = True
self.n = nums[right]
nums[i], nums[right] = nums[right], nums[i]
return i
def qselect(self, nums, left, right, k):
if left == right:
return left
pivot = self.partition(nums, left, right)
if self.found_n:
return self.n
if pivot == k:
return pivot
elif pivot > k:
return self.qselect(nums, left, pivot-1, k)
else:
return self.qselect(nums, pivot+1, right, k)
def repeatedNTimes(self, nums: List[int]) -> int:
self.found_n = False
self.n = None
ans = self.qselect(nums, 0, len(nums)-1, len(nums)//2)
if self.found_n:
return self.n
if ans+1 < len(nums) and nums[ans] == nums[ans+1]:
return nums[ans+1]
else:
return nums[ans-1]
if __name__ == "__main__":
s = Solution()
assert s.repeatedNTimes([9,5,3,3]) == 3
assert s.repeatedNTimes([4,1,7,0,0,9,0,0]) == 0
assert s.repeatedNTimes([5,1,5,2,5,3,5,4]) == 5
|
code-cp/leetcode
|
solutions/961/main2.py
|
main2.py
|
py
| 1,550 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7437025622
|
"""Module containing class `UntagClipsCommand`."""
import logging
import random
import time
from django.db import transaction
from vesper.command.clip_set_command import ClipSetCommand
from vesper.django.app.models import Job, Tag, TagEdit, TagInfo
import vesper.command.command_utils as command_utils
import vesper.django.app.model_utils as model_utils
import vesper.util.archive_lock as archive_lock
import vesper.util.text_utils as text_utils
import vesper.util.time_utils as time_utils
_logger = logging.getLogger()
class TagClipsCommand(ClipSetCommand):
extension_name = 'tag_clips'
def __init__(self, args):
super().__init__(args, True)
get_opt = command_utils.get_optional_arg
self._clip_count = get_opt('clip_count', args)
def execute(self, job_info):
self._job_info = job_info
clip_indices = self._get_tag_clip_indices()
self._tag_clips(clip_indices)
return True
def _get_tag_clip_indices(self):
if self._clip_count is None:
# tag all clips
return None
clip_count = self._count_clips()
if clip_count <= self._clip_count:
# tag all clips
return None
# If we get here, a clip count is specified and it is less than
# the number of untagged clips.
_logger.info('Getting indices of clips to tag...')
indices = random.sample(range(clip_count), self._clip_count)
return frozenset(indices)
def _count_clips(self):
value_tuples = self._create_clip_query_values_iterator()
count = 0
for station, mic_output, date, detector in value_tuples:
clips = model_utils.get_clips(
station=station,
mic_output=mic_output,
date=date,
detector=detector,
annotation_name=self._annotation_name,
annotation_value=self._annotation_value,
tag_name=self._tag_name,
tag_excluded=True,
order=False)
count += clips.count()
return count
def _tag_clips(self, clip_indices):
start_time = time.time()
value_tuples = self._create_clip_query_values_iterator()
clip_index = 0
total_clip_count = 0
total_tagged_count = 0
for station, mic_output, date, detector in value_tuples:
# Get clip for this station, mic_output, date, and detector.
clips = model_utils.get_clips(
station=station,
mic_output=mic_output,
date=date,
detector=detector,
annotation_name=self._annotation_name,
annotation_value=self._annotation_value,
tag_name=self._tag_name,
tag_excluded=True,
order=False)
# Get list of clip IDs.
clip_ids = clips.values_list('pk', flat=True)
# Get IDs of clips to tag.
tag_clip_ids = \
self._get_tag_clip_ids(clip_ids, clip_index, clip_indices)
clip_count = len(clip_ids)
tagged_count = len(tag_clip_ids)
clip_index += clip_count
# Tag clips.
try:
self._tag_clip_batch(tag_clip_ids)
except Exception as e:
batch_text = \
_get_batch_text(station, mic_output, date, detector)
command_utils.log_and_reraise_fatal_exception(
e, f'Tagging of clips for {batch_text}')
# Log clip counts.
if tagged_count == clip_count:
prefix = 'Tagged'
else:
untagged_count = clip_count - tagged_count
prefix = (
f'Tagged {tagged_count} and left untagged '
f'{untagged_count} of')
count_text = text_utils.create_count_text(clip_count, 'clip')
batch_text = _get_batch_text(station, mic_output, date, detector)
_logger.info(f'{prefix} {count_text} for {batch_text}.')
total_clip_count += clip_count
total_tagged_count += tagged_count
# Log total clip counts and tagging rate.
if total_tagged_count == total_clip_count:
prefix = 'Tagged'
else:
total_untagged_count = total_clip_count - total_tagged_count
prefix = (
f'Tagged {total_tagged_count} and left untagged '
f'{total_untagged_count} of')
count_text = text_utils.create_count_text(total_clip_count, 'clip')
elapsed_time = time.time() - start_time
timing_text = command_utils.get_timing_text(
elapsed_time, total_clip_count, 'clips')
_logger.info(f'{prefix} a total of {count_text}{timing_text}.')
def _get_tag_clip_ids(self, clip_ids, start_clip_index, clip_indices):
if clip_indices is None:
# tagging all clips
return clip_ids
else:
# not tagging all clips
clip_index = start_clip_index
tag_clip_ids = []
for clip_id in clip_ids:
if clip_index in clip_indices:
tag_clip_ids.append(clip_id)
clip_index += 1
return tag_clip_ids
def _tag_clip_batch(self, clip_ids):
with archive_lock.atomic():
with transaction.atomic():
# See note in untag_clips_command.py about maximum
# chunk size. I'm not certain we have to do the same
# thing here, but it seems likely that we do, for a
# similar reason.
max_chunk_size = 900
tag_info = TagInfo.objects.get(name=self._tag_name)
action = TagEdit.ACTION_SET
creation_time = time_utils.get_utc_now()
creating_job = Job.objects.get(id=self._job_info.job_id)
for i in range(0, len(clip_ids), max_chunk_size):
chunk = clip_ids[i:i + max_chunk_size]
# Create tags.
Tag.objects.bulk_create([
Tag(
clip_id=clip_id,
info=tag_info,
creation_time=creation_time,
creating_user=None,
creating_job=creating_job,
creating_processor=None)
for clip_id in chunk])
# Create tag edits.
TagEdit.objects.bulk_create([
TagEdit(
clip_id=clip_id,
info=tag_info,
action=action,
creation_time=creation_time,
creating_user=None,
creating_job=creating_job,
creating_processor=None)
for clip_id in chunk])
def _get_batch_text(station, mic_output, date, detector):
return (
f'station "{station.name}", mic output "{mic_output.name}", '
f'date {date}, and detector "{detector.name}"')
|
HaroldMills/Vesper
|
vesper/command/tag_clips_command.py
|
tag_clips_command.py
|
py
| 7,835 |
python
|
en
|
code
| 47 |
github-code
|
6
|
29685980647
|
import re
import pep8
import six
"""
Guidelines for writing new hacking checks
- Use only for Octavia specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range O3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the O3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to
octavia/tests/unit/test_hacking.py
"""
log_translation = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")")
author_tag_re = (re.compile("^\s*#\s*@?(a|A)uthor"),
re.compile("^\.\.\s+moduleauthor::"))
_all_hints = set(['_', '_LI', '_LE', '_LW', '_LC'])
_all_log_levels = {
# NOTE(yamamoto): Following nova which uses _() for audit.
'audit': '_',
'error': '_LE',
'info': '_LI',
'warn': '_LW',
'warning': '_LW',
'critical': '_LC',
'exception': '_LE',
}
log_translation_hints = []
for level, hint in six.iteritems(_all_log_levels):
r = "(.)*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % {
'level': level,
'wrong_hints': '|'.join(_all_hints - set([hint])),
}
log_translation_hints.append(re.compile(r))
assert_trueinst_re = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
"(\w|\.|\'|\"|\[|\])+\)\)")
assert_equal_in_end_with_true_or_false_re = re.compile(
r"assertEqual\((\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)")
assert_equal_in_start_with_true_or_false_re = re.compile(
r"assertEqual\((True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)")
assert_equal_with_true_re = re.compile(
r"assertEqual\(True,")
assert_equal_with_false_re = re.compile(
r"assertEqual\(False,")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
assert_equal_end_with_none_re = re.compile(r"(.)*assertEqual\(.+, None\)")
assert_equal_start_with_none_re = re.compile(r".*assertEqual\(None, .+\)")
assert_not_equal_end_with_none_re = re.compile(
r"(.)*assertNotEqual\(.+, None\)")
assert_not_equal_start_with_none_re = re.compile(
r"(.)*assertNotEqual\(None, .+\)")
assert_no_xrange_re = re.compile(
r"\s*xrange\s*\(")
def _directory_to_check_translation(filename):
return True
def assert_true_instance(logical_line):
"""Check for assertTrue(isinstance(a, b)) sentences
O316
"""
if assert_trueinst_re.match(logical_line):
yield (0, "O316: assertTrue(isinstance(a, b)) sentences not allowed")
def assert_equal_or_not_none(logical_line):
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences,
assertNotEqual(A, None) or assertNotEqual(None, A) sentences
O318
"""
msg = ("O318: assertEqual/assertNotEqual(A, None) or "
"assertEqual/assertNotEqual(None, A) sentences not allowed")
res = (assert_equal_start_with_none_re.match(logical_line) or
assert_equal_end_with_none_re.match(logical_line) or
assert_not_equal_start_with_none_re.match(logical_line) or
assert_not_equal_end_with_none_re.match(logical_line))
if res:
yield (0, msg)
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
O319
"""
if _directory_to_check_translation(filename) and logical_line.startswith(
"LOG.debug(_("):
yield(0, "O319 Don't translate debug level logs")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory
if "octavia/tests" in filename:
return
if pep8.noqa(physical_line):
return
msg = "O320: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
if _directory_to_check_translation(filename):
msg = "O320: Log messages require translation hints!"
for log_translation_hint in log_translation_hints:
if log_translation_hint.match(logical_line):
yield (0, msg)
def use_jsonutils(logical_line, filename):
msg = "O321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
# Some files in the tree are not meant to be run from inside Octavia
# itself, so we should not complain about them not using jsonutils
json_check_skipped_patterns = [
]
for pattern in json_check_skipped_patterns:
if pattern in filename:
return
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
def no_author_tags(physical_line):
for regex in author_tag_re:
if regex.match(physical_line):
physical_line = physical_line.lower()
pos = physical_line.find('moduleauthor')
if pos < 0:
pos = physical_line.find('author')
return pos, "O322: Don't use author tags"
def assert_equal_true_or_false(logical_line):
"""Check for assertEqual(True, A) or assertEqual(False, A) sentences
O323
"""
res = (assert_equal_with_true_re.search(logical_line) or
assert_equal_with_false_re.search(logical_line))
if res:
yield (0, "O323: assertEqual(True, A) or assertEqual(False, A) "
"sentences not allowed")
def no_mutable_default_args(logical_line):
msg = "O324: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def assert_equal_in(logical_line):
"""Check for assertEqual(A in B, True), assertEqual(True, A in B),
assertEqual(A in B, False) or assertEqual(False, A in B) sentences
O338
"""
res = (assert_equal_in_start_with_true_or_false_re.search(logical_line) or
assert_equal_in_end_with_true_or_false_re.search(logical_line))
if res:
yield (0, "O338: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in B, True/False) when checking collection "
"contents.")
def no_log_warn(logical_line):
"""Disallow 'LOG.warn('
O339
"""
if logical_line.startswith('LOG.warn('):
yield(0, "O339:Use LOG.warning() rather than LOG.warn()")
def no_xrange(logical_line):
"""Disallow 'xrange()'
O340
"""
if assert_no_xrange_re.match(logical_line):
yield(0, "O340: Do not use xrange().")
def factory(register):
register(assert_true_instance)
register(assert_equal_or_not_none)
register(no_translate_debug_logs)
register(validate_log_translations)
register(use_jsonutils)
register(no_author_tags)
register(assert_equal_true_or_false)
register(no_mutable_default_args)
register(assert_equal_in)
register(no_log_warn)
register(no_xrange)
|
BeaconFramework/Distributor
|
octavia/hacking/checks.py
|
checks.py
|
py
| 7,161 |
python
|
en
|
code
| 1 |
github-code
|
6
|
45413329386
|
import os
import pathlib
import pandas as pd
import keyring
import dropbox
from dropbox.exceptions import AuthError
# Directory
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
dropbox_home = "https://www.dropbox.com/home/"
dropbox_app = "MAD_WahooToGarmin"
dropbox_app_dir = "/Apps/WahooFitness/"
DROPBOX_ACCESS_TOKEN = keyring.get_password("dropbox", dropbox_app)
# https://practicaldatascience.co.uk/data-science/how-to-use-the-dropbox-api-with-python
def dropbox_connect():
"""Create a connection to Dropbox."""
try:
dbx = dropbox.Dropbox(DROPBOX_ACCESS_TOKEN)
except AuthError as e:
print("Error connecting to Dropbox with access token: " + str(e))
return dbx
def dropbox_list_files(path):
"""Return a Pandas dataframe of files in a given Dropbox folder path in the Apps directory.
"""
dbx = dropbox_connect()
try:
files = dbx.files_list_folder(path).entries
files_list = []
for file in files:
if isinstance(file, dropbox.files.FileMetadata):
metadata = {
"filename": file.name,
"path_display": file.path_display,
"client_modified": pd.Timestamp(file.client_modified).isoformat(),
"server_modified": pd.Timestamp(file.server_modified).isoformat(),
}
files_list.append(metadata)
df = pd.DataFrame.from_records(files_list)
return df.sort_values(by="server_modified", ascending=False)
except Exception as e:
print("Error getting list of files from Dropbox: " + str(e))
def dropbox_download_file(dropbox_file_path, local_file_path):
"""Download a file from Dropbox to the local machine."""
try:
dbx = dropbox_connect()
with open(local_file_path, "wb") as f:
metadata, result = dbx.files_download(path=dropbox_file_path)
f.write(result.content)
except Exception as e:
print("Error downloading file from Dropbox: " + str(e))
def dropbox_upload_file(local_path, local_file, dropbox_file_path):
"""Upload a file from the local machine to a path in the Dropbox app directory.
Args:
local_path (str): The path to the local file.
local_file (str): The name of the local file.
dropbox_file_path (str): The path to the file in the Dropbox app directory.
Example:
dropbox_upload_file('.', 'test.csv', '/stuff/test.csv')
Returns:
meta: The Dropbox file metadata.
"""
try:
dbx = dropbox_connect()
local_file_path = pathlib.Path(local_path) / local_file
with local_file_path.open("rb") as f:
meta = dbx.files_upload(
f.read(), dropbox_file_path, mode=dropbox.files.WriteMode("overwrite")
)
return meta
except Exception as e:
print("Error uploading file to Dropbox: " + str(e))
if __name__ == "__main__":
print("here")
|
michaeladavis10/WahooToGarmin
|
dropbox_utils.py
|
dropbox_utils.py
|
py
| 2,994 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42399945606
|
"""empty message
Revision ID: a5cfe890710d
Revises: 7352c721e0a4
Create Date: 2023-05-28 16:47:42.177222
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a5cfe890710d'
down_revision = '7352c721e0a4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('images', schema=None) as batch_op:
batch_op.drop_column('url_small')
batch_op.drop_column('url_full')
batch_op.drop_column('url_regular')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('images', schema=None) as batch_op:
batch_op.add_column(sa.Column('url_regular', sa.VARCHAR(length=500), autoincrement=False, nullable=True))
batch_op.add_column(sa.Column('url_full', sa.VARCHAR(length=500), autoincrement=False, nullable=True))
batch_op.add_column(sa.Column('url_small', sa.VARCHAR(length=500), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
RBird111/capstone-yelp-clone
|
migrations/versions/20230528_164742_.py
|
20230528_164742_.py
|
py
| 1,132 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39688530564
|
# Time: O(n)
# Space: O(n)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> int:
"""
Use cumulative sum, in each root to leaf path
"""
run_map = collections.defaultdict(int)
run_map[0] = 1
run_sum = 0
return self.dfs(root, run_map, sum, run_sum)
def dfs(self, root, run_map, sum, run_sum):
if not root:
return 0
run_sum+=root.val
count = 0
if run_sum-sum in run_map and run_map[run_sum-sum]>0:
count+=run_map[run_sum-sum]
run_map[run_sum]+=1
left_count = self.dfs(root.left, run_map, sum, run_sum)
right_count = self.dfs(root.right, run_map, sum, run_sum)
run_map[run_sum]-=1
return count+left_count+right_count
|
cmattey/leetcode_problems
|
Python/lc_437_path_sum_iii.py
|
lc_437_path_sum_iii.py
|
py
| 960 |
python
|
en
|
code
| 4 |
github-code
|
6
|
32166211761
|
import requests
from bs4 import BeautifulSoup
import json
def get_description(url):
response = requests.get(url)
if response is not None:
soup = BeautifulSoup(response.text, 'html.parser')
description = {}
l1 = []
l2 = []
for item in soup.find_all("span", class_="adPage__content__features__key"):
if item is not None:
l1.append(item.text)
for item in soup.find_all("span", class_="adPage__content__features__value"):
if item is not None:
l2.append(item.text)
for index in range(len(l2)):
description[l1[index]] = l2[index]
extra = []
for index in range(len(l2)+1,len(l1)):
extra.append(index)
description['Extra_features'] = extra
if soup.find('h1') is not None:
car_model = soup.find('h1').text
des = soup.find('div', class_=("adPage__content__description grid_18"))
if des is not None:
description["description"] = des.text
print(description)
desc = {}
desc[car_model] = description
file_name = "description"
with open(file_name,"a") as json_file:
json.dump(desc, json_file)
|
Drkiller325/PR_Lab2
|
homework.py
|
homework.py
|
py
| 1,157 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30534890966
|
import re
import sys
import subprocess
class INA3221:
I2C_ADDR = 0x40
MANUFACTURER_ID_VALUE = 0x5449 #Texas Instruments
DIE_ID_VALUE = 0x3220 #INA 3221
class Reg:
MANUFACTURER_ID = 0xFE
DIE_ID = 0xFF
CH1_SHUNT = 0x1
CH1_BUS = 0x2
ShiftChannel = 0x2
@staticmethod
def read(bus, register):
reved = bus.read_word_data(INA3221.I2C_ADDR, register)
#reverse bytes in word
return int.from_bytes(((reved + 2**16) % 2**16).to_bytes(2, byteorder='little'), byteorder='big', signed=False)
def pretty_floats(float_arr):
return ["{0:05.2f}".format(f) for f in float_arr]
def get_power_data(pci_id):
import smbus
subprocess.run(["modprobe", "i2c-dev"])
i2c_devs = subprocess.run(["i2cdetect", "-l"], capture_output=True, text=True).stdout.splitlines()
i2c_dev_selected = [i.split("\t")[0] for i in i2c_devs if "NVIDIA i2c adapter 2 at "+pci_id in i]
if len(i2c_dev_selected) != 1:
print("Failed finding i2c adapter for NVIDIA gpu at " + pci_id)
return None
i2c_dev = i2c_dev_selected[0]
print("Using ", i2c_dev)
i2c_num = int(i2c_dev.split('-')[-1])
bus = smbus.SMBus(i2c_num)
if (
INA3221.read(bus, INA3221.Reg.DIE_ID) != INA3221.DIE_ID_VALUE
or INA3221.read(bus, INA3221.Reg.MANUFACTURER_ID) != INA3221.MANUFACTURER_ID_VALUE
):
print("Unknown I2C device")
return None
shunts = {
"If all shunts r5":[1,1,1],
"If shunts r5, r5, r2":[1,1,0]
}
powers = []
for channel in range(3):
shunt_millivolts_raw = INA3221.read(bus, INA3221.Reg.CH1_SHUNT + channel * INA3221.Reg.ShiftChannel)
if shunt_millivolts_raw >= 0x8000: shunt_millivolts_raw -= 0x10000 #handle negative value during lite load
shunt_millivolts = shunt_millivolts_raw * 5 / 1000.
in_volts = INA3221.read(bus, INA3221.Reg.CH1_BUS + channel * INA3221.Reg.ShiftChannel)/1000.
r2_r5_amps = [shunt_millivolts/2., shunt_millivolts/5.]
r2_r5_powers = [in_volts * amps for amps in r2_r5_amps]
powers.append(r2_r5_powers)
print(
*pretty_floats([in_volts]), " Volt (normal is 12.000) [r2, r5]",
*pretty_floats([shunt_millivolts]), "millivolts on shunt ",
pretty_floats(r2_r5_amps), "amps ",
pretty_floats(r2_r5_powers), "Watts")
for sk, sv in shunts.items():
watts = [powers[i][v] for i, v in enumerate(sv)]
print(sk, pretty_floats(watts), " Total watts:", sum(watts))
bus.close()
if len(sys.argv) != 2:
print(
"""This program gets power info from nvidia GPUs using I2C busses provided by nvidia driver.
Direct I2C is risky and if something goes wrong
IT MAY BRICK YOUR DISPLAY OR GPU.
Use at your own risk!
Usage (as root): """+sys.argv[0]+""" PCI_ID
Where PCI_ID is id of nvidia gpu with ina3221 onboard chip, like 3:00
""")
sys.exit(1)
try:
get_power_data(sys.argv[1])
except:
print("Error occured, are you runing as root and has python-smbus and i2cdetect installed?\n\n")
raise
"""Example output for EVGA 980TI under load
Using i2c-3
12.09 Volt (normal is 12.000) [r2, r5] 15.76 millivolts on shunt ['07.88', '03.15'] amps ['95.25', '38.10'] Watts
12.17 Volt (normal is 12.000) [r2, r5] 21.08 millivolts on shunt ['10.54', '04.22'] amps ['128.25', '51.30'] Watts
12.13 Volt (normal is 12.000) [r2, r5] 38.44 millivolts on shunt ['19.22', '07.69'] amps ['233.10', '93.24'] Watts
If all shunts r5 [38.101376, 51.30028799999999, 93.240064] Total watts: 182.641728
"""
|
galkinvv/galkinvv.github.io
|
nvidia-sensors.py
|
nvidia-sensors.py
|
py
| 3,340 |
python
|
en
|
code
| 29 |
github-code
|
6
|
14471351413
|
'''
Given a list accounts, each element accounts[i] is a list of strings, where the first element accounts[i][0] is a name, and the rest of the elements are emails representing emails of the account.
Now, we would like to merge these accounts. Two accounts definitely belong to the same person if there is some email that is common to both accounts. Note that even if two accounts have the same name, they may belong to different people as people could have the same name. A person can have any number of accounts initially, but all of their accounts definitely have the same name.
After merging the accounts, return the accounts in the following format: the first element of each account is the name, and the rest of the elements are emails in sorted order. The accounts themselves can be returned in any order.
Example 1:
Input:
accounts = [["John", "[email protected]", "[email protected]"], ["John", "[email protected]"], ["John", "[email protected]", "[email protected]"], ["Mary", "[email protected]"]]
Output: [["John", '[email protected]', '[email protected]', '[email protected]'], ["John", "[email protected]"], ["Mary", "[email protected]"]]
Explanation:
The first and third John's are the same person as they have the common email "[email protected]".
The second John and Mary are different people as none of their email addresses are used by other accounts.
We could return these lists in any order, for example the answer [['Mary', '[email protected]'], ['John', '[email protected]'],
['John', '[email protected]', '[email protected]', '[email protected]']] would still be accepted.
Note:
The length of accounts will be in the range [1, 1000].
The length of accounts[i] will be in the range [1, 10].
The length of accounts[i][j] will be in the range [1, 30].
'''
from collections import defaultdict
class Solution:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
email_to_name = {}
graph = defaultdict(set)
for account in accounts:
name = account[0]
first_email = account[1]
email_to_name[first_email] = name
for email in account[1:]:
graph[first_email].add(email)
graph[email].add(first_email)
email_to_name[email] = name
seen = set()
ans = []
for email in graph:
if email not in seen:
seen.add(email)
stack = [email]
component = []
while stack:
node = stack.pop()
component.append(node)
for nei in graph[node]:
if nei not in seen:
seen.add(nei)
stack.append(nei)
ans.append([email_to_name[email]] + sorted(component))
return ans
|
loganyu/leetcode
|
problems/721_accounts_merge.py
|
721_accounts_merge.py
|
py
| 2,893 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30804267516
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
if __name__ == '__main__':
# 将csv数据读取为pandas对象
fund = pd.read_csv('./csv/001112.csv', dtype={'fcode': str})
# 转化时间字符串为时间
fund['fdate'] = pd.to_datetime(fund['fdate'])
# 设置时间列为索引,并升序排列
fund = fund.set_index('fdate').sort_index(ascending=False)
# x轴为数据2017后的索引 y为2017后的NAV
x = fund.loc['2017'].index
y = fund.loc['2017']['NAV']
# 将xy数据转化为矩阵:
# 将时间转化为时间戳
x = x.astype(np.int64)
# 将时间戳转化为1列多行的二维数组
x = x.values.reshape(-1, 1)
y = y.values.reshape(-1, 1)
# 放入数据开始训练
lr = LinearRegression()
lr.fit(x, y)
# 构建一个时间戳,来预测y轴的值
test_x = pd.to_datetime(np.array(['2017-9-30', '2017-10-1'])).astype(np.int64).values.reshape(-1, 1)
# 预测到Y轴的值 [[1.41483561]
# [1.41626252]]
new_y = lr.predict(test_x)
# 把拟合线画出来:如果y为训练预测出的值,则线条为直线拟合线
x_date = fund.loc['2017'].index
# 走势点图
plt.scatter(x_date, fund.loc['2017']['NAV'])
plt.plot(x_date, lr.predict(x), 'r')
plt.show()
print(new_y)
|
bobchi/learn_py
|
23.py
|
23.py
|
py
| 1,416 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25805435139
|
import random
from models.storage import *
##########################
# Monte-Carlo Prediction #
##########################
# Source : Reinforcement Learning : an introcuction, Sutton & Barto, p.92
class MCPrediction:
def __init__(self, game, gamma=0.8, default_q=999):
self.V = {}
self.Returns = StoreAverage()
self.gamma = gamma
self.game = game
self.name = "Monte-Carlo prediction"
def argmax(self, s):
actions = self.game.possible_actions(s)
if len(actions) == 0: return None
random.shuffle(actions)
scores = []
for a in actions:
sp = self.game.step(s, a)
scores.append((self.V.Get(sp), a))
return max(scores)[1]
def make_game(self, initial_state, smart=False):
s = initial_state
states = [s]
results = [self.game.reward(s)]
while not self.game.is_terminal(s):
if not smart: a = self.game.random_action(s)
else: a = self.choice(s, True)
s = self.game.step(s, a)
states.append(s)
results.append(self.game.reward(s))
return states, results
def train(self, n=1000, smart=False):
initial_state = self.game.initial_state()
for i in range(n):
G = 0
S, R = self.make_game(initial_state, smart)
S, R = reversed(S), reversed(R)
for s, r in zip(S, R):
G = self.gamma * G + r
self.Returns.Add(s, G)
self.V.Set(s, self.Returns.Average(s))
################################
# Monte-Carlo Exploring Starts #
################################
# Source : Reinforcement Learning : an introcuction, Sutton & Barto, p.99
class MCES:
def __init__(self, game, gamma=0.8, defaultq=0):
self.P = {}
self.Returns = StoreAverage()
self.Q = StoreValue(defaultq)
self.gamma = gamma
self.game = game
self.name = "Monte-Carlo Exploring Starts"
def argmax(self, s):
actions = self.game.possible_actions(s)
if len(actions) == 0: return None
random.shuffle(actions)
scores = []
for a in actions:
scores.append((self.Q.Get((a, s)), a))
return max(scores, key=lambda x: x[0])[1]
def make_game(self, initial_state, epsilon=0.5):
s = initial_state
states = [s]
rewards = [0]
actions = []
while not self.game.is_terminal(s):
rand = random.uniform(0, 1)
if rand < epsilon: a = self.game.random_action(s)
else: a = self.argmax(s)
actions.append(a)
s = self.game.step(s, a)
states.append(s)
rewards.append(self.game.reward(s))
actions.append(None)
return states, rewards, actions
def train(self, n=1000, epsilon=0.5):
initial_state = self.game.initial_state()
for i in range(n):
G = 0
S, R, A = self.make_game(initial_state, epsilon)
S, R, A = reversed(S), reversed(R), reversed(A)
for s, r, a in zip(S, R, A):
G = self.gamma * G + r
self.Returns.Add((a,s), G)
self.Q.Set((a, s), self.Returns.Average((a,s)))
self.P[s] = self.argmax(s)
##########################
# Q-Learning #
##########################
# Source : Reinforcement Learning : an introcuction, Sutton & Barto, p.131
class QLearning:
def __init__(self, game, alpha=0.3, gamma=0.8, defaultq=0):
self.Q = StoreValue(defaultq)
self.alpha = alpha
self.gamma = gamma
self.game = game
self.name = "Q-Learning"
def best_Q(self, s):
scores = []
A = self.game.possible_actions(s)
random.shuffle(A)
A.append(None)
for a in A:
scores.append(self.Q.Get((a, s)))
return max(scores)
def argmax(self, s):
actions = self.game.possible_actions(s)
if len(actions) == 0: return None
random.shuffle(actions)
scores = []
for a in actions:
scores.append((self.Q.Get((a, s)), a))
return max(scores, key=lambda x: (x[0], -x[1]))[1]
def make_game(self, initial_state, epsilon=0.5):
s = initial_state
actions = []
while not self.game.is_terminal(s):
rand = random.uniform(0, 1)
if rand < epsilon: a = self.game.random_action(s)
else: a = self.argmax(s)
sp = self.game.step(s, a)
r = self.game.reward(s)
self.Q.Set((a, s), self.Q.Get((a, s)) + self.alpha * (r + self.gamma * self.best_Q(sp) - self.Q.Get((a, s))))
s = sp
r = self.game.reward(s)
self.Q.Set((None, s), self.Q.Get((None, s)) + self.alpha * r)
return actions
def train(self, n=1000, epsilon=0.5):
initial_state = self.game.initial_state()
for i in range(n):
s = initial_state
A = self.make_game(initial_state, epsilon)
##############################
# Bandit Problem #
##############################
# Source : Reinforcement Learning : an introcuction, Sutton & Barto, p.32
class Bandit:
def __init__(self, game, alpha=0.3, gamma=0.8):
self.Q = StoreValue(9999)
self.N = StoreValue()
self.alpha = alpha
self.gamma = gamma
self.game = game
self.name = "Bandit problem"
def argmax(self, s):
actions = self.game.possible_actions(s)
if len(actions) == 0: return None
random.shuffle(actions)
scores = []
for a in actions:
scores.append((self.Q.Get((a, s)), a))
return max(scores, key=lambda x: (x[0]))[1]
def make_game(self, initial_state, epsilon=0.5):
s = initial_state
actions = []
while not self.game.is_terminal(s):
rand = random.uniform(0, 1)
if rand < epsilon: a = self.game.random_action(s)
else: a = self.argmax(s)
sp = self.game.step(s, a)
r = self.game.reward(s)
self.N.Set((a, s), self.N.Get((a, s))+1)
Qa = self.Q.Get((a, s))
self.Q.Set((a, s), Qa + (r-Qa) / self.N.Get((a, s)))
s = sp
r = self.game.reward(s)
Qa = self.Q.Get((None, s))
self.N.Set((None, s), self.N.Get((None, s))+1)
self.Q.Set((None, s), Qa + (r-Qa) / self.N.Get((None, s)))
return actions
def train(self, n=1000, epsilon=0.5):
initial_state = self.game.initial_state()
for i in range(n):
s = initial_state
A = self.make_game(initial_state, epsilon)
##############################
# n-step SARSA #
##############################
# Source : Reinforcement Learning : an introcuction, Sutton & Barto, p.147
import numpy as np
class NStepSarsa:
def __init__(self, game, n=5, alpha=0.5, gamma=0.8):
self.Q = StoreValue(9999)
self.alpha = alpha
self.gamma = gamma
self.game = game
self.n = n
self.name = "n-Step SARSA"
def argmax(self, s):
actions = self.game.possible_actions(s)
if len(actions) == 0: return None
random.shuffle(actions)
scores = []
for a in actions:
scores.append((self.Q.Get((a, s)), a))
return max(scores, key=lambda x: (x[0]))[1]
def choose_action(self, s, epsilon):
rand = random.uniform(0, 1)
if rand < epsilon: a = self.game.random_action(s)
else: a = self.argmax(s)
return a
def make_game(self, initial_state, epsilon=0.5):
t = 0
T = 999999999
s = initial_state
a = self.choose_action(s, epsilon)
actions = [a]
states = [s]
rewards = [0]
while True:
if t < T:
s = self.game.step(s, a)
r = self.game.reward(s)
states.append(s)
rewards.append(r)
if self.game.is_terminal(s): T = t+1
else:
a = self.choose_action(s, epsilon)
actions.append(a)
tau = t-self.n+1
if tau >= 0:
G = 0
for i in range(tau+1, min(tau+self.n+1, T+1)):
G += np.power(self.gamma, i-tau-1) * rewards[i]
if tau + self.n < T:
S,A = states[tau+self.n], actions[tau+self.n]
G += np.power(self.gamma, self.n) * self.Q.Get((A,S))
S,A = states[tau], actions[tau]
QSA = self.Q.Get((A,S))
self.Q.Set((A,S), QSA + self.alpha * (G - QSA))
if tau == T - 1: break
t += 1
return actions
def train(self, n=1000, epsilon=0.5):
initial_state = self.game.initial_state()
for i in range(n):
s = initial_state
A = self.make_game(initial_state, epsilon)
|
AMasquelier/Reinforcement-Learning
|
models/solo.py
|
solo.py
|
py
| 9,650 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40033463881
|
import json
from pathlib import Path
import numpy as np
import torch
import torch.utils.data
from PIL import Image
from panopticapi.utils import rgb2id
from utils.utils import masks_to_boxes
from dataset.utils import make_coco_transforms
city2int = {
"aachen": 0,
"bremen": 1,
"darmstadt": 2,
"erfurt": 3,
"hanover": 4,
"krefeld": 5,
"strasbourg": 6,
"tubingen": 7,
"weimar": 8,
"bochum": 9,
"cologne": 10,
"dusseldorf": 11,
"hamburg": 12,
"jena": 13,
"monchengladbach": 14,
"stuttgart": 15,
"ulm": 16,
"zurich": 17,
"frankfurt": 18,
"lindau": 19,
"munster": 20,
"berlin": 21,
"bielefeld": 22,
"bonn": 23,
"leverkusen": 24,
"mainz": 25,
"munich": 26,
}
int2city = {v: k for k, v in city2int.items()}
def imgid2int(id):
city, f, s = id.split("_")
return int(int(s) + int(f) * 1e6 + city2int[city] * 1e12)
def int2imgid(num):
cityn = num // int(1e12)
f = (num - int(cityn * 1e12)) // int(1e6)
s = num % int(1e6)
return int2city[cityn] + "_" + str(f).zfill(6) + "_" + str(s).zfill(6)
class CityscapesPanoptic:
def __init__(
self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True
):
with open(ann_file, "r") as f:
self.coco = json.load(f)
# sort 'images' field so that they are aligned with 'annotations'
# i.e., in alphabetical order
self.coco["images"] = sorted(self.coco["images"], key=lambda x: x["id"])
self.img_folder = img_folder
self.ann_folder = ann_folder
self.ann_file = ann_file
self.transforms = transforms
self.return_masks = return_masks
def __getitem__(self, idx):
ann_info = (
self.coco["annotations"][idx]
if "annotations" in self.coco
else self.coco["images"][idx]
)
city = ann_info["image_id"].split("_")[0]
img_path = (
Path(self.img_folder) / city / (ann_info["image_id"] + "_leftImg8bit.png")
)
ann_path = Path(self.ann_folder) / ann_info["file_name"]
img = Image.open(img_path).convert("RGB")
w, h = img.size
if "segments_info" in ann_info:
masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
masks = rgb2id(masks)
ids = np.array([ann["id"] for ann in ann_info["segments_info"]])
masks = masks == ids[:, None, None]
masks = torch.as_tensor(masks, dtype=torch.uint8)
labels = torch.tensor(
[ann["category_id"] for ann in ann_info["segments_info"]],
dtype=torch.int64,
)
target = {}
target["image_id"] = torch.tensor(
[
imgid2int(
ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]
)
]
)
if self.return_masks:
target["masks"] = masks
target["labels"] = labels
target["boxes"] = masks_to_boxes(masks)
target["size"] = torch.as_tensor([int(h), int(w)])
target["orig_size"] = torch.as_tensor([int(h), int(w)])
if "segments_info" in ann_info:
for name in ["iscrowd", "area"]:
target[name] = torch.tensor(
[ann[name] for ann in ann_info["segments_info"]]
)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.coco["images"])
def get_height_and_width(self, idx):
img_info = self.coco["images"][idx]
height = img_info["height"]
width = img_info["width"]
return height, width
def build_cityscapes_panoptic(image_set, args):
img_folder_root = Path(args.coco_path)
ann_folder_root = Path(args.coco_panoptic_path)
assert img_folder_root.exists(), f"provided path {img_folder_root} does not exist"
assert ann_folder_root.exists(), f"provided path {ann_folder_root} does not exist"
ann_file = {
"train": "/content/drive/MyDrive/cityscapes/gtFine/cityscapes_panoptic_train.json",
"val": "/content/drive/MyDrive/cityscapes/gtFine/cityscapes_panoptic_val.json",
}
img_folder_path = {
"train": "/content/drive/MyDrive/cityscapes/leftImg8bit/train",
"val": "/content/drive/MyDrive/cityscapes/leftImg8bit/val",
}
ann_folder = {
"train": "/content/drive/MyDrive/cityscapes/gtFine/cityscapes_panoptic_train",
"val": "/content/drive/MyDrive/cityscapes/gtFine/cityscapes_panoptic_val",
}
dataset = CityscapesPanoptic(
img_folder_path[image_set],
ann_folder[image_set],
ann_file[image_set],
transforms=make_coco_transforms(image_set),
return_masks=args.masks,
)
return dataset
def build_dataset(image_set, args):
if args.dataset_file == "coco_panoptic":
# to avoid making panopticapi required for coco
return build_cityscapes_panoptic(image_set, args)
raise ValueError(f"dataset {args.dataset_file} not supported")
|
adilsammar/detr-fine
|
archived/dataset/cts_dataset.py
|
cts_dataset.py
|
py
| 5,196 |
python
|
en
|
code
| 4 |
github-code
|
6
|
38368937564
|
import string, itertools
ascii_lowercases = list(string.ascii_lowercase)
MAX_WORD_LENGTH = 5
for i in range(1, MAX_WORD_LENGTH + 1):
charlist = [[x for x in ascii_lowercases]] * i
for combinations in itertools.product(*charlist):
combinations = "".join(combinations)
with open("../wordlist.txt", "a") as file:
file.write(combinations + "\n")
print("finished!", i)
|
1LCB/hash-cracker
|
complement/wordlist generator.py
|
wordlist generator.py
|
py
| 407 |
python
|
en
|
code
| 2 |
github-code
|
6
|
17466316782
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: cifar-convnet.py
# Author: Yuxin Wu <[email protected]>
import tensorflow as tf
import argparse
import numpy as np
import os
from tensorpack import *
import tensorpack.tfutils.symbolic_functions as symbf
from tensorpack.tfutils.summary import *
from tensorpack.utils.gpu import get_nr_gpu
"""
A small convnet model for Cifar10 or Cifar100 dataset.
Cifar10:
91% accuracy after 50k step.
19.3 step/s on Tesla M40
Not a good model for Cifar100, just for demonstration.
"""
class Model(ModelDesc):
def __init__(self, cifar_classnum):
super(Model, self).__init__()
self.cifar_classnum = cifar_classnum
def _get_input_vars(self):
return [InputVar(tf.float32, [None, 30, 30, 3], 'input'),
InputVar(tf.int32, [None], 'label')
]
def _build_graph(self, input_vars):
image, label = input_vars
is_training = get_current_tower_context().is_training
keep_prob = tf.constant(0.5 if is_training else 1.0)
if is_training:
tf.image_summary("train_image", image, 10)
image = image / 4.0 # just to make range smaller
with argscope(Conv2D, nl=BNReLU, use_bias=False, kernel_shape=3):
logits = LinearWrap(image) \
.Conv2D('conv1.1', out_channel=64) \
.Conv2D('conv1.2', out_channel=64) \
.MaxPooling('pool1', 3, stride=2, padding='SAME') \
.Conv2D('conv2.1', out_channel=128) \
.Conv2D('conv2.2', out_channel=128) \
.MaxPooling('pool2', 3, stride=2, padding='SAME') \
.Conv2D('conv3.1', out_channel=128, padding='VALID') \
.Conv2D('conv3.2', out_channel=128, padding='VALID') \
.FullyConnected('fc0', 1024 + 512, nl=tf.nn.relu) \
.tf.nn.dropout(keep_prob) \
.FullyConnected('fc1', 512, nl=tf.nn.relu) \
.FullyConnected('linear', out_dim=self.cifar_classnum, nl=tf.identity)()
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
wrong = symbf.prediction_incorrect(logits, label)
# monitor training error
add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
# weight decay on all W of fc layers
wd_cost = tf.mul(0.0004,
regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
add_moving_summary(cost, wd_cost)
add_param_summary([('.*/W', ['histogram'])]) # monitor W
self.cost = tf.add_n([cost, wd_cost], name='cost')
def get_data(train_or_test, cifar_classnum):
isTrain = train_or_test == 'train'
if cifar_classnum == 10:
ds = dataset.Cifar10(train_or_test)
else:
ds = dataset.Cifar100(train_or_test)
if isTrain:
augmentors = [
imgaug.RandomCrop((30, 30)),
imgaug.Flip(horiz=True),
imgaug.Brightness(63),
imgaug.Contrast((0.2,1.8)),
imgaug.GaussianDeform(
[(0.2, 0.2), (0.2, 0.8), (0.8,0.8), (0.8,0.2)],
(30,30), 0.2, 3),
imgaug.MeanVarianceNormalize(all_channel=True)
]
else:
augmentors = [
imgaug.CenterCrop((30, 30)),
imgaug.MeanVarianceNormalize(all_channel=True)
]
ds = AugmentImageComponent(ds, augmentors)
ds = BatchData(ds, 128, remainder=not isTrain)
if isTrain:
ds = PrefetchData(ds, 3, 2)
return ds
def get_config(cifar_classnum):
logger.auto_set_dir()
# prepare dataset
dataset_train = get_data('train', cifar_classnum)
step_per_epoch = dataset_train.size()
dataset_test = get_data('test', cifar_classnum)
sess_config = get_default_sess_config(0.5)
lr = symbf.get_scalar_var('learning_rate', 1e-2, summary=True)
def lr_func(lr):
if lr < 3e-5:
raise StopTraining()
return lr * 0.31
return TrainConfig(
dataset=dataset_train,
optimizer=tf.train.AdamOptimizer(lr, epsilon=1e-3),
callbacks=Callbacks([
StatPrinter(), ModelSaver(),
InferenceRunner(dataset_test, ClassificationError()),
StatMonitorParamSetter('learning_rate', 'val_error', lr_func,
threshold=0.001, last_k=10),
]),
session_config=sess_config,
model=Model(cifar_classnum),
step_per_epoch=step_per_epoch,
max_epoch=150,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--classnum', help='10 for cifar10 or 100 for cifar100',
type=int, default=10)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
with tf.Graph().as_default():
config = get_config(args.classnum)
if args.load:
config.session_init = SaverRestore(args.load)
if args.gpu:
config.nr_tower = len(args.gpu.split(','))
nr_gpu = get_nr_gpu()
if nr_gpu == 1:
QueueInputTrainer(config).train()
else:
SyncMultiGPUTrainer(config).train()
|
jxwufan/NLOR_A3C
|
tensorpack/examples/cifar-convnet.py
|
cifar-convnet.py
|
py
| 5,549 |
python
|
en
|
code
| 16 |
github-code
|
6
|
6363984731
|
# This program reads data from a CD catalog data set
# and builds arrays for the catalog items
# Each array contains the title, artist, country, price, and year
# At the bottom, the total number of items and average price is
# displayed
# References: datacamp.com, stackoverflow.com
# References: datacamp.com, stackoverflow.com
# References: educba.com, guru99.com, nanonets.com
# References: realpython.com, edureka.co, stackabuse.com
# References: superfastpython.com, https://developer.rhino3d.com/
import sys
# Need a function to read the XML file and return arrays
# of all the titles, artists, countries, prices, and years
def read_file(xml_file):
with open(xml_file, 'r') as file:
data = file.read()
title1 = "<TITLE>"
title2 = "</TITLE>"
artist1 = "<ARTIST>"
artist2 = "</ARTIST>"
country1 = "<COUNTRY>"
country2 = "</COUNTRY>"
price1 = "<PRICE>"
price2 = "</PRICE>"
year1 = "<YEAR>"
year2 = "</YEAR>"
title_array = []
artist_array = []
country_array = []
price_array = []
year_array = []
while True:
start = data.find("<CD>")
end = data.find("</CD>")
if start == -1 or end == -1:
break
catalog = data[start:end]
title_count = catalog.find(title1)
title = catalog[title_count + len(title1): catalog.find(title2)]
title_array.append(title)
artist_count = catalog.find(artist1)
artist = catalog[artist_count + len(artist1): catalog.find(artist2)]
artist_array.append(artist)
country_count = catalog.find(country1)
country = catalog[country_count + len(country1):catalog.find(country2)]
country_array.append(country)
price_count = catalog.find(price1)
price = catalog[price_count + len(price1): catalog.find(price2)]
price_array.append(price)
year_count = catalog.find(year1)
year = catalog[year_count + len(year1): catalog.find(year2)]
year_array.append(year)
data = data[end + len("</CD>"):]
# check for empty file
if len(title_array) == 0:
print("Error: Missing or bad data.")
sys.exit()
return title_array, artist_array, country_array, price_array, year_array
# Find the average price
def calculate_average(xml_file):
title_array, artist_array, \
country_array, price_array, \
year_array = read_file(xml_file)
items = len(title_array)
average = sum([float(price) for price in price_array]) / items
return average
# Calculate total number of items
def calculate_total(xml_file):
title_array = read_file(xml_file)
total = len(title_array)
return total
# Display arrays
def display_result(xml_file):
title_array, artist_array, \
country_array, price_array, \
year_array = read_file(xml_file)
if len(title_array) == 0:
print("Error: Missing or bad data")
return
for i in range(len(title_array)):
print(f"{title_array[i]} - {artist_array[i]} - {country_array[i]} - {price_array[i]} - {year_array[i]}")
def display_result2(xml_file):
title_array, artist_array, \
country_array, price_array, \
year_array = read_file(xml_file)
if len(title_array) == 0:
print("Error: Missing or bad data")
return
num_items = len(title_array)
avg_price = sum([float(price) for price in price_array]) / num_items
print("Number of items: " + str(num_items))
print("Average price: " + str(round(avg_price, 2)))
# Main function and call
def main():
xml_file = "cd_catalog.xml"
display_result(xml_file)
display_result2(xml_file)
main()
|
Prowler01/CIS106_ThomasLam
|
Final Project/Final Project.py
|
Final Project.py
|
py
| 3,748 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20894068105
|
import cv2
import math
import monta
import numpy as np
import matcompat
from scipy import signal
import matplotlib.pyplot as plt
lammbda=6
pi = math.pi
theta = np.arange(0, (np.pi-np.pi/8)+(np.pi/8), np.pi/8)
psi = 0
gamma = np.linspace(.4,1,4)
gamma = np.arange(.4, 1.2, .2)
b = 4
sigma = (1/pi)*math.sqrt((math.log(2)/2))*((2**b+1)/(2**b-1))*lammbda
l = int(12/2)
gt = 0
imagen0 = np.float32(cv2.imread('images/NegroyYo4.jpg'))
imagen0 = cv2.cvtColor(imagen0,cv2.COLOR_BGR2RGB)
imagen0 = cv2.resize(imagen0, (320, 240))
imagen1 = (imagen0-128)/127
imagen = np.zeros((240,320,4))
imagen[:,:,0]=(imagen1[:,:,0]-imagen1[:,:,1])/2
imagen[:,:,1]=(imagen1[:,:,0]+imagen1[:,:,1]-2*imagen1[:,:,2])/4
imagen[:,:,2]=(imagen1[:,:,0]+imagen1[:,:,1]+imagen1[:,:,2])/3
s = matcompat.size(imagen0)
for i in np.arange(1., (s[0])+1):
for j in np.arange(1., (s[1])+1):
imagen[int(i)-1,int(j)-1,3] = ((imagen1[int(i)-1,int(j)-1,:]).max()-(imagen1[int(i)-1,int(j)-1,:]).min())/2
contador = 0
g = np.zeros((13,13))
imagenSalida=np.zeros((240,320,4,32))
for i in range(len(theta)):
for f in range(len(gamma)):
for j in range(-l,l+1):
for k in range(-l,l+1):
x = j*math.cos(theta[i])+k*math.sin(theta[i])
y = k*math.cos(theta[i])-j*math.sin(theta[i])
g[j+l,k+l]=math.exp(-(x**2 + (gamma[f]**2)*(y**2))/(2*sigma**2))*math.cos((2*pi*x/lammbda)+psi)
imagenSalida[:,:,0,contador] = signal.convolve2d(imagen[:,:,0], g, boundary='symm', mode='same')
imagenSalida[:,:,1,contador] = signal.convolve2d(imagen[:,:,1], g, boundary='symm', mode='same')
imagenSalida[:,:,2,contador] = signal.convolve2d(imagen[:,:,2], g, boundary='symm', mode='same')
imagenSalida[:,:,3,contador] = signal.convolve2d(imagen[:,:,3], g, boundary='symm', mode='same')
contador = contador + 1
s = matcompat.size(imagenSalida)
FM = np.zeros(s)
area = []
for i in range(s[3]):
alpha = .6
m1 = alpha*imagenSalida[:,:,0,k].max().max()
m2 = alpha*imagenSalida[:,:,1,k].max().max()
m3 = alpha*imagenSalida[:,:,2,k].max().max()
m4 = alpha*imagenSalida[:,:,3,k].max().max()
for i in range(s[0]):
for j in range(s[1]):
if imagenSalida[i,j,0,k]>m1:
FM[i,j,0,k] = 1
if imagenSalida[i,j,1,k]>m2:
FM[i,j,1,k]=1
if imagenSalida[i,j,2,k]>m3:
FM[i,j,2,k]=1
if imagenSalida[i,j,3,k]>m4:
FM[i,j,3,k]=1
[area,num] = monta.monta(FM[:,:,0,k])
cv2.imshow("input", imagen)
cv2.waitKey(0)
cv2.destroyAllWindows()
#theta = [round(float(i)/10000000,4) for i in range(0,int((pi-pi/8)*10000000),int((pi/8)*10000000))]
#gamma = [float(i)/10 for i in range(4,11,2)]
|
ErickJuarez/AtencionSelectiva
|
Python/main.py
|
main.py
|
py
| 2,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36030730386
|
"""Timezones lookup."""
import concurrent.futures
import os
import shutil
import subprocess
import sys
import time
import traceback
from datetime import datetime
from multiprocessing import cpu_count
from pathlib import Path
import pytz
import requests
import tzlocal
from fuzzywuzzy import process
import pycountry
import albert as v0
__title__ = "Timezones lookup"
__version__ = "0.4.0"
__triggers__ = "tz "
__authors__ = "Nikos Koukis"
__homepage__ = (
"https://github.com/bergercookie/awesome-albert-plugins/blob/master/plugins/timezones"
)
__py_deps__ = ["pycountry", "fuzzywuzzy",
"tzlocal", "requests", "traceback", "pytz"]
icon_path = str(Path(__file__).parent / "timezones")
cache_path = Path(v0.cacheLocation()) / "timezones"
config_path = Path(v0.configLocation()) / "timezones"
data_path = Path(v0.dataLocation()) / "timezones"
country_logos_path = data_path / "logos"
dev_mode = False
# country code -> cities
code_to_cities = dict({k: v for k, v in pytz.country_timezones.items()})
codes = list(code_to_cities.keys())
city_to_code = {vi: k for k, v in pytz.country_timezones.items() for vi in v}
cities = list(city_to_code.keys())
country_to_code = {
c.name: c.alpha_2 for c in pycountry.countries if c.alpha_2 in codes}
country_to_cities = {
country: [code_to_cities[code]] for country, code in country_to_code.items()
}
countries = list(country_to_code.keys())
local_tz_str = tzlocal.get_localzone().zone
def download_logo_for_code(code: str) -> bytes:
"""
Download the logo of the given code.
.. raises:: KeyError if given code is invalid.
"""
# ret = requests.get(f"https://www.countryflags.io/{code}/flat/64.png")
ret = requests.get(f"file:///64/{code}.png")
if not ret.ok:
print(f"[E] Couldn't download logo for code {code}")
return ret.content
def get_logo_path_for_code(code: str) -> Path:
"""Return the path to the cached country logo"""
return country_logos_path / f"{code}.png"
def save_logo_for_code(code: str, data: bytes):
with open(get_logo_path_for_code(code), "wb") as f:
f.write(data)
def download_and_save_logo_for_code(code):
save_logo_for_code(code, download_logo_for_code(code))
def download_all_logos():
with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_count()) as executor:
future_to_code = {
executor.submit(download_and_save_logo_for_code, code): code for code in codes
}
for future in concurrent.futures.as_completed(future_to_code):
code = future_to_code[future]
try:
future.result()
except Exception as exc:
print(
f"[W] Fetching logo for {code} generated an exception: {exc}")
# plugin main functions -----------------------------------------------------------------------
def initialize():
"""Called when the extension is loaded (ticked in the settings) - blocking."""
# create plugin locations
for p in (cache_path, config_path, data_path):
p.mkdir(parents=False, exist_ok=True)
# fetch all logos at startup
country_logos_path.mkdir(exist_ok=True)
if not list(country_logos_path.iterdir()):
print("Downloading country logos")
t = time.time()
download_all_logos()
print(f"Downloaded country logos - Took {time.time() - t} seconds")
def finalize():
pass
def get_uniq_elements(seq):
"""Return only the unique elements off the list - Preserve the order.
.. ref:: https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-whilst-preserving-order
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def handleQuery(query) -> list:
"""Hook that is called by albert with *every new keypress*.""" # noqa
results = []
if query.isTriggered:
try:
query.disableSort()
results_setup = setup(query)
if results_setup:
return results_setup
query_str = query.string.strip()
matched = [
elem[0] for elem in process.extract(query_str, [*cities, *countries], limit=8)
]
matched2 = []
# replace country names with its cities
for m in matched:
if m in countries:
matched2.extend(*country_to_cities[m])
else:
matched2.append(m)
matched2 = get_uniq_elements(matched2)
# add own timezone:
if local_tz_str in matched2:
matched2.remove(local_tz_str)
matched2.insert(0, local_tz_str)
results.extend([get_as_item(m) for m in matched2])
except Exception: # user to report error
if dev_mode: # let exceptions fly!
print(traceback.format_exc())
raise
results.insert(
0,
v0.Item(
id=__title__,
icon=icon_path,
text="Something went wrong! Press [ENTER] to copy error and report it",
actions=[
v0.ClipAction(
f"Copy error - report it to {__homepage__[8:]}",
f"{traceback.format_exc()}",
)
],
),
)
return results
# supplementary functions ---------------------------------------------------------------------
def get_as_item(city: str):
"""Return an item - ready to be appended to the items list and be rendered by Albert."""
code = city_to_code[city]
icon = str(get_logo_path_for_code(code))
utc_dt = pytz.utc.localize(datetime.utcnow())
dst_tz = pytz.timezone(city)
dst_dt = utc_dt.astimezone(dst_tz)
text = f"{str(dst_dt)}"
subtext = f"[{code}] | {city}"
return v0.Item(
id=__title__,
icon=icon,
text=text,
subtext=subtext,
completion=city,
actions=[
v0.UrlAction(
"Open in zeitverschiebung.net",
f'https://www.zeitverschiebung.net/en/timezone/{city.replace("/", "--").lower()}',
),
],
)
def sanitize_string(s: str) -> str:
return s.replace("<", "<")
def get_as_subtext_field(field, field_title=None) -> str:
"""Get a certain variable as part of the subtext, along with a title for that variable."""
s = ""
if field:
s = f"{field} | "
else:
return ""
if field_title:
s = f"{field_title}: " + s
return s
def save_data(data: str, data_name: str):
"""Save a piece of data in the configuration directory."""
with open(config_path / data_name, "w") as f:
f.write(data)
def load_data(data_name) -> str:
"""Load a piece of data from the configuration directory."""
with open(config_path / data_name, "r") as f:
data = f.readline().strip().split()[0]
return data
def setup(query):
"""Setup is successful if an empty list is returned.
Use this function if you need the user to provide you data
"""
results = []
return results
|
ppablocruzcobas/Dotfiles
|
albert/timezones/__init__.py
|
__init__.py
|
py
| 7,290 |
python
|
en
|
code
| 2 |
github-code
|
6
|
36653559534
|
#!/usr/bin/python3
# Codeforces - Round 640
# Problem A - Sum of Round Numbers
def read_int():
n = int(input())
return n
def read_ints():
ints = [int(x) for x in input().split(" ")]
return ints
#---
def solve(n):
s = str(n)
l = len(s)
sol = []
for i in range(l):
val = s[i]
if val != '0':
si = val + '0' * (l - i - 1)
sol.append(si)
return sol
# Main
t = read_int()
for case in range(t):
n = read_int()
sol = solve(n)
print (len(sol))
print (" ".join(sol))
|
thaReal/MasterChef
|
codeforces/round_640/round.py
|
round.py
|
py
| 495 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7985866436
|
import numpy as np
import cv2
import time
def my_padding(src, filter):
(h, w) = src.shape
if isinstance(filter, tuple):
(h_pad, w_pad) = filter
else:
(h_pad, w_pad) = filter.shape
h_pad = h_pad // 2
w_pad = w_pad // 2
padding_img = np.zeros((h+h_pad*2, w+w_pad*2))
padding_img[h_pad:h+h_pad, w_pad:w+w_pad] = src
# repetition padding
# up
padding_img[:h_pad, w_pad:w_pad + w] = src[0, :]
# down
padding_img[h_pad + h:, w_pad:w_pad + w] = src[h - 1, :]
# left
padding_img[:, :w_pad] = padding_img[:, w_pad:w_pad + 1]
# right
padding_img[:, w_pad + w:] = padding_img[:, w_pad + w - 1:w_pad + w]
return padding_img
def my_filtering(src, filter):
(h, w) = src.shape
(f_h, f_w) = filter.shape
#filter 확인
#print('<filter>')
#print(filter)
# 직접 구현한 my_padding 함수를 이용
pad_img = my_padding(src, filter)
dst = np.zeros((h, w))
for row in range(h):
for col in range(w):
dst[row, col] = np.sum(pad_img[row:row + f_h, col:col + f_w] * filter)
return dst
def get_my_sobel():
sobel_x = np.dot(np.array([[1], [2], [1]]), np.array([[-1, 0, 1]]))
sobel_y = np.dot(np.array([[-1], [0], [1]]), np.array([[1, 2, 1]]))
return sobel_x, sobel_y
def calc_derivatives(src):
# calculate Ix, Iy
sobel_x, sobel_y = get_my_sobel()
Ix = my_filtering(src, sobel_x)
Iy = my_filtering(src, sobel_y)
return Ix, Iy
def find_local_maxima(src, ksize):
(h, w) = src.shape
pad_img = np.zeros((h+ksize, w+ksize))
pad_img[ksize//2:h+ksize//2, ksize//2:w+ksize//2] = src
dst = np.zeros((h, w))
for row in range(h):
for col in range(w):
max_val = np.max(pad_img[row : row+ksize, col:col+ksize])
if max_val == 0:
continue
if src[row, col] == max_val:
dst[row, col] = src[row, col]
return dst
def get_integral_image(src):
assert len(src.shape) == 2
h, w = src.shape
dst = np.zeros(src.shape)
##############################
# ToDo
# dst는 integral image
# dst 알아서 채우기
##############################
integral_image = dst
for row in range(0, h):
summation = 0
for col in range(0, w):
summation += src[row][col]
integral_image[row][col] = summation
if row > 0:
integral_image[row][col] += integral_image[row - 1][col]
# dst = integral_image
# return dst
dst2 = np.zeros(src.shape)
for y in range(h):
for x in range(w):
min_row, max_row = max(0, y - 1), min(h - 1, y + 1)
min_col, max_col = max(0, x - 1), min(w - 1, x + 1)
dst2[y][x] = integral_image[max_row][max_col]
if min_row > 0:
dst2[y][x] -= integral_image[min_row - 1][max_col]
if min_col > 0:
dst2[y][x] -= integral_image[max_row][min_col - 1]
if min_col > 0 and min_row > 0:
dst2[y][x] += integral_image[min_row - 1][min_col - 1]
return dst2
def calc_M_harris(IxIx, IxIy, IyIy, fsize = 5):
assert IxIx.shape == IxIy.shape and IxIx.shape == IyIy.shape
h, w = IxIx.shape
M = np.zeros((h, w, 2, 2))
IxIx_pad = my_padding(IxIx, (fsize, fsize))
IxIy_pad = my_padding(IxIy, (fsize, fsize))
IyIy_pad = my_padding(IyIy, (fsize, fsize))
# for row in range(h):
# for col in range(w):
# M[row, col, 0, 0] = np.sum(IxIx_pad[row:row+fsize, col:col+fsize])
# M[row, col, 0, 1] = np.sum(IxIy_pad[row:row+fsize, col:col+fsize])
# M[row, col, 1, 0] = M[row, col, 0, 1]
# M[row, col, 1, 1] = np.sum(IyIy_pad[row:row+fsize, col:col+fsize])
for row in range(h):
for col in range(w):
ixix = 0
ixiy = 0
iyiy = 0
for f_row in range(fsize):
for f_col in range(fsize):
ixix = ixix + IxIx_pad[row + f_row][col + f_col]
ixiy = ixiy + IxIy_pad[row + f_row][col + f_col]
iyiy = iyiy + IyIy_pad[row + f_row][col + f_col]
M[row, col, 0, 0] = ixix
M[row, col, 0, 1] = ixiy
M[row, col, 1, 0] = ixiy
M[row, col, 1, 1] = iyiy
return M
def harris_detector(src, k = 0.04, threshold_rate = 0.01, fsize=5):
harris_img = src.copy()
h, w, c = src.shape
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) / 255.
# calculate Ix, Iy
Ix, Iy = calc_derivatives(gray)
# Square of derivatives
IxIx = Ix**2
IyIy = Iy**2
IxIy = Ix * Iy
start = time.perf_counter() # 시간 측정 시작
M_harris = calc_M_harris(IxIx, IxIy, IyIy, fsize)
end = time.perf_counter() # 시간 측정 끝
print('M_harris time : ', end-start)
R = np.zeros((h, w))
for row in range(h):
for col in range(w):
##########################################################################
# ToDo
# det_M 계산
# trace_M 계산
# R 계산 Harris & Stephens (1988), Nobel (1998) 어떤걸로 구현해도 상관없음
##########################################################################
det_M = M_harris[row, col, 0, 0] * M_harris[row, col, 1, 1] - (M_harris[row, col, 0, 1] * M_harris[row, col, 1, 0])
trace_M = M_harris[row, col, 0, 0] + M_harris[row, col, 1, 1]
R[row, col] = det_M - k*trace_M*trace_M
# thresholding
R[R < threshold_rate * np.max(R)] = 0
R = find_local_maxima(R, 21)
R = cv2.dilate(R, None)
harris_img[R != 0]=[0, 0, 255]
return harris_img
def harris_detector_integral(src, k = 0.04, threshold_rate = 0.01, fsize=5):
harris_img = src.copy()
h, w, c = src.shape
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) / 255.
# calculate Ix, Iy
Ix, Iy = calc_derivatives(gray)
# Square of derivatives
IxIx = Ix**2
IyIy = Iy**2
IxIy = Ix * Iy
start = time.perf_counter() # 시간 측정 시작
IxIx_integral = get_integral_image(IxIx)
IxIy_integral = get_integral_image(IxIy)
IyIy_integral = get_integral_image(IyIy)
end = time.perf_counter() # 시간 측정 끝
print('make integral image time : ', end-start)
start = time.perf_counter() # 시간 측정 시작
##############################
# ToDo
# M_integral 완성시키기
##############################
M_integral = calc_M_harris(IxIx_integral, IxIy_integral, IyIy_integral, fsize)
end = time.perf_counter() # 시간 측정 끝
print('M_harris integral time : ', end-start)
R = np.zeros((h, w))
for row in range(h):
for col in range(w):
##########################################################################
# ToDo
# det_M 계산
# trace_M 계산
# R 계산 Harris & Stephens (1988), Nobel (1998) 어떤걸로 구현해도 상관없음
##########################################################################
det_M = M_integral[row, col, 0, 0] * M_integral[row, col, 1, 1] - (M_integral[row, col, 0, 1] * M_integral[row, col, 1, 0])
trace_M = M_integral[row, col, 0, 0] + M_integral[row, col, 1, 1]
R[row, col] = det_M - k * trace_M * trace_M
# thresholding
R[R < threshold_rate * np.max(R)] = 0
R = find_local_maxima(R, 21)
R = cv2.dilate(R, None)
harris_img[R != 0]=[0, 0, 255]
return harris_img
def main():
src = cv2.imread('zebra.png') # shape : (552, 435, 3)
print('start!')
cv2.imshow('original ', src)
harris_img = harris_detector(src)
cv2.imshow('harris_img ' + '201402414', harris_img)
harris_integral_img = harris_detector_integral(src)
cv2.imshow('harris_integral_img ' + '201402414' , harris_integral_img)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
201402414/CG
|
[CG]201402414_장수훈_5주차_과제/[CG]201402414_장수훈_5주차_과제/integral_image_report.py
|
integral_image_report.py
|
py
| 8,317 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37055851732
|
from unittest.runner import TextTestRunner
import urllib.request
import unittest
from typing import TypeVar, Callable, List
T = TypeVar('T')
S = TypeVar('S')
#################################################################################
# EXERCISE 1
#################################################################################
def mysort(lst: List[T], compare: Callable[[T, T], int]) -> List[T]:
"""
This method should sort input list lst of elements of some type T.
Elements of the list are compared using function compare that takes two
elements of type T as input and returns -1 if the left is smaller than the
right element, 1 if the left is larger than the right, and 0 if the two
elements are equal.
"""
temp = lst
switched = True
while switched:
switched = False
for i in range(len(temp) - 1):
if compare(temp[i], temp[i + 1]) == 1:
temp[i], temp[i + 1] = temp[i + 1], temp[i]
switched = True
return temp
def mybinsearch(lst: List[T], elem: S, compare: Callable[[T, S], int]) -> int:
"""
This method search for elem in lst using binary search.
The elements of lst are compared using function compare. Returns the
position of the first (leftmost) match for elem in lst. If elem does not
exist in lst, then return -1.
"""
def binsearch(ar, targ, start, end):
if start > end:
return -1
mid = (start + end) // 2
if compare(ar[mid], targ) == 0:
return mid
if compare(ar[mid], targ) == 1:
return binsearch(ar, targ, start, mid-1)
else:
return binsearch(ar, targ, mid+1, end)
return binsearch(lst, elem, 0, len(lst)-1)
class Student():
"""Custom class to test generic sorting and searching."""
def __init__(self, name: str, gpa: float):
self.name = name
self.gpa = gpa
def __eq__(self, other):
return self.name == other.name
# 30 Points (total)
def test1():
"""Tests for generic sorting and binary search."""
print(80 * "#" + "\nTests for generic sorting and binary search.")
test1_1()
test1_2()
test1_3()
test1_4()
test1_5()
# 6 Points
def test1_1():
"""Sort ints."""
print("\t-sort ints")
tc = unittest.TestCase()
ints = [ 4, 3, 7, 10, 9, 2 ]
intcmp = lambda x,y: 0 if x == y else (-1 if x < y else 1)
sortedints = mysort(ints, intcmp)
tc.assertEqual(sortedints, [2, 3, 4, 7, 9, 10])
# 6 Points
def test1_2():
"""Sort strings based on their last character."""
print("\t-sort strings on their last character")
tc = unittest.TestCase()
strs = [ 'abcd', 'aacz', 'zasa' ]
suffixcmp = lambda x,y: 0 if x[-1] == y[-1] else (-1 if x[-1] < y[-1] else 1)
sortedstrs = mysort(strs,suffixcmp)
tc.assertEqual(sortedstrs, [ 'zasa', 'abcd', 'aacz' ])
# 6 Points
def test1_3():
"""Sort students based on their GPA."""
print("\t-sort students on their GPA.")
tc = unittest.TestCase()
students = [ Student('Josh', 3.0), Student('Angela', 2.5), Student('Vinesh', 3.8), Student('Jia', 3.5) ]
sortedstudents = mysort(students, lambda x,y: 0 if x.gpa == y.gpa else (-1 if x.gpa < y.gpa else 1))
expected = [ Student('Angela', 2.5), Student('Josh', 3.0), Student('Jia', 3.5), Student('Vinesh', 3.8) ]
tc.assertEqual(sortedstudents, expected)
# 6 Points
def test1_4():
"""Binary search for ints."""
print("\t-binsearch ints")
tc = unittest.TestCase()
ints = [ 4, 3, 7, 10, 9, 2 ]
intcmp = lambda x,y: 0 if x == y else (-1 if x < y else 1)
sortedints = mysort(ints, intcmp)
tc.assertEqual(mybinsearch(sortedints, 3, intcmp), 1)
tc.assertEqual(mybinsearch(sortedints, 10, intcmp), 5)
tc.assertEqual(mybinsearch(sortedints, 11, intcmp), -1)
# 6 Points
def test1_5():
"""Binary search for students by gpa."""
print("\t-binsearch students")
tc = unittest.TestCase()
students = [ Student('Josh', 3.0), Student('Angela', 2.5), Student('Vinesh', 3.8), Student('Jia', 3.5) ]
stcmp = lambda x,y: 0 if x.gpa == y.gpa else (-1 if x.gpa < y.gpa else 1)
stbincmp = lambda x,y: 0 if x.gpa == y else (-1 if x.gpa < y else 1)
sortedstudents = mysort(students, stcmp)
tc.assertEqual(mybinsearch(sortedstudents, 3.5, stbincmp), 2)
tc.assertEqual(mybinsearch(sortedstudents, 3.7, stbincmp), -1)
#################################################################################
# EXERCISE 2
#################################################################################
class PrefixSearcher():
def __init__(self, document, k):
"""
Initializes a prefix searcher using a document and a maximum
search string length k.
"""
self.strings = []
for x in range(0, len(document) - 1):
if x + k < len(document):
self.strings.append(document[x: x + k])
else:
self.strings.append(document[x: len(document)])
comp = lambda x,y: 0 if len(x) == len(y) else (-1 if len(x) > len(y) else 1)
self.strings = mysort(self.strings, comp)
def search(self, q):
"""
Return true if the document contains search string q (of
length up to n). If q is longer than n, then raise an
Exception.
"""
for x in self.strings:
if q in x:
return True
return False
pass
# 30 Points
def test2():
print("#" * 80 + "\nSearch for substrings up to length n")
test2_1()
test2_2()
# 15Points
def test2_1():
print("\t-search in hello world")
tc = unittest.TestCase()
p = PrefixSearcher("Hello World!", 1)
tc.assertTrue(p.search("l"))
tc.assertTrue(p.search("e"))
tc.assertFalse(p.search("h"))
tc.assertFalse(p.search("Z"))
tc.assertFalse(p.search("Y"))
p = PrefixSearcher("Hello World!", 2)
tc.assertTrue(p.search("l"))
tc.assertTrue(p.search("ll"))
tc.assertFalse(p.search("lW"))
# 20 Points
def test2_2():
print("\t-search in Moby Dick")
tc = unittest.TestCase()
md_url = 'https://www.gutenberg.org/files/2701/2701-0.txt'
md_text = urllib.request.urlopen(md_url).read().decode()
p = PrefixSearcher(md_text[0:1000],4)
tc.assertTrue(p.search("Moby"))
tc.assertTrue(p.search("Dick"))
#################################################################################
# EXERCISE 3
#################################################################################
class SuffixArray():
def __init__(self, document: str):
"""
Creates a suffix array for document (a string).
"""
comp = lambda x,y: 0 if x == y else (-1 if x < y else 1)
self.sa = mysort([document[i:] for i in range(len(document))], comp)
pass
def positions(self, searchstr: str):
"""
Returns all the positions of searchstr in the documented indexed by the suffix array.
"""
out = []
for x in range(0, len(self.sa)):
sub = self.sa[x]
if searchstr == sub[0:len(searchstr)]:
out.append(x)
return out
pass
def contains(self, searchstr: str):
"""
Returns true of searchstr is coontained in document.
"""
for x in self.sa:
if searchstr in x:
return True
pass
# 40 Points
def test3():
"""Test suffix arrays."""
print(80 * "#" + "\nTest suffix arrays.")
test3_1()
test3_2()
# 20 Points
def test3_1():
print("\t-suffixarray on Hello World!")
tc = unittest.TestCase()
s = SuffixArray("Hello World!")
tc.assertTrue(s.contains("l"))
tc.assertTrue(s.contains("e"))
tc.assertFalse(s.contains("h"))
tc.assertFalse(s.contains("Z"))
tc.assertFalse(s.contains("Y"))
tc.assertTrue(s.contains("ello Wo"))
# 20 Points
def test3_2():
print("\t-suffixarray on Moby Dick!")
tc = unittest.TestCase()
md_url = 'https://www.gutenberg.org/files/2701/2701-0.txt'
md_text = urllib.request.urlopen(md_url).read().decode()
s = SuffixArray(md_text[0:1000])
tc.assertTrue(s.contains("Moby-Dick"))
tc.assertTrue(s.contains("Herman Melville"))
posset = set(s.positions("Moby-Dick"))
tc.assertEqual(posset, {355, 356})
#################################################################################
# TEST CASES
#################################################################################
def main():
test1()
test2()
test3()
if __name__ == '__main__':
main()
|
saronson/cs331-s21-jmallett2
|
lab03/lab03.py
|
lab03.py
|
py
| 8,672 |
python
|
en
|
code
| 2 |
github-code
|
6
|
8670124375
|
import pandas as pd
import pickle
df=pd.read_csv(r'C:/Users/SAIDHANUSH/spam-ham.csv')
df['Category'].replace('spam',0,inplace=True)
df['Category'].replace('ham',1,inplace=True)
x=df['Message']
y=df['Category']
from sklearn.feature_extraction.text import CountVectorizer
cv=CountVectorizer()
x=cv.fit_transform(x)
pickle.dump(cv,open('transform.pkl','wb'))
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=4,test_size=0.2)
from sklearn.tree import DecisionTreeClassifier
clf=DecisionTreeClassifier()
clf.fit(x_train,y_train)
pr=clf.predict(x_test)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,pr))
pickle.dump(clf,open('nlp_model1.pkl','wb'))
|
dhanush77777/spam-messages-classification-app
|
nlp_model.py
|
nlp_model.py
|
py
| 770 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15917643475
|
from django.shortcuts import render, redirect, get_object_or_404
from django.shortcuts import render, get_object_or_404
from .models import *
from .forms import *
from .models import Product
from .forms import ProductUpdateForm
from .models import Category
from django.http import JsonResponse
# libraries for Import Export
from import_export.formats import base_formats
from django.http import HttpResponse
from import_export.resources import modelresource_factory
from .resources import ProductResources
from tablib import Dataset
from reportlab.pdfgen import canvas
from import_export import resources
from import_export.resources import ModelResource
#===============================================================code for category========================================
#====== list and add category======================
def category_list(request):
queryset = Category.objects.all()
if request.method == "POST": # Add Categories
formbb = CategoryForm(request.POST or None)
if formbb.is_valid():
formbb.save()
return redirect('category_list')
else:
formbb = CategoryForm()
context = {
"queryset":queryset,
"formbb": formbb,
}
return render(request, "category_list.html", context)
#============= delete category===================
def delete_categorys(request, pk):
queryset = get_object_or_404(Category, pk=pk) # used to get the product
if request.method == 'POST':
queryset.delete()
return redirect('category_list')
return redirect('category_list')
#===========================================================the for Products page==========================================
#=========== list and Add product ===========================
def product_list(request):
queryset = Product.objects.all()
if request.method == 'POST': # Add Products
formcc = ProductForm(request.POST)
if formcc.is_valid():
formcc.save()
return redirect('product_list')
else:
formcc = ProductForm()
queryset = Product.objects.all().order_by('product_name')
context ={
"queryset":queryset,
"formcc":formcc,
}
return render(request, "product_list.html", context)
#====================================== Add product to main Store=======================================
def receive_products(request, code):
queryset = Product.objects.get(code=code)
formjj= ProductAmendForm(request.POST or None, instance=queryset)
if formjj.is_valid():
instance= formjj.save(commit=False)
instance.shop_send_quantity = 0 # set the value of the "issue to shop" =0
instance.factory_quantity += instance.receive_main_quantity # add the received quantity from factory to the quantity in the store
instance.first_add_main_quantity = instance.receive_main_quantity+instance.first_add_main_quantity # recording the products that are stored in main store from the first record of received itme until now
instance.save()
return redirect ("product_list")
context = {
"instance":queryset,
"formjj":formjj,
}
return render(request, 'receive_products.html',context)
#===================== issue products from main store to shop store================================
def issue_products(request, code):
queryset = Product.objects.get(code=code)
formuu= ProductIssueForm(request.POST or None, instance=queryset)
if formuu.is_valid():
instance= formuu.save(commit=False)
instance.receive_main_quantity=0
instance.factory_quantity -= instance.shop_send_quantity
instance.shop_receive_quantity = instance.shop_receive_quantity+ instance.shop_send_quantity
instance.shop_remain_quantity +=instance.shop_send_quantity
instance.save()
return redirect ("product_list")
context = {
"instance":queryset,
"formuu":formuu,
}
return render(request, 'issue_products.html',context)
#====================== delete products from the list====================
def delete_product(request, code):
queryset = get_object_or_404(Product, code=code)
if request.method == 'POST':
queryset.delete()
return redirect('product_list')
return redirect('product_list')
#======================= Update the Products =================================
def update_products(request, code):
queryset= Product.objects.get(code=code)
formvv= ProductUpdateForm(instance = queryset)
if request.method == 'POST':
formvv = ProductUpdateForm(request.POST,instance=queryset)
if formvv.is_valid():
formvv.save()
return redirect('product_list')
context= {
'formvv' : formvv
}
return render(request, 'update_products.html', context)
#=================================================================for the shop store =============================================
def shop_sell(request):
return render(request, 'shop_sell.html')
def product_shop_list(request):
queryset = Product.objects.all()
queryset = Product.objects.all().order_by('product_name')
context ={
"queryset":queryset,
}
return render(request, "product_shop_list.html", context)
#==================== code for the import and Export=========================================
class ProductResource(ModelResource):
class Meta:
model = Product
def export_pdf(request):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="products.pdf"'
p = canvas.Canvas(response)
product = Product.objects.all()
for item in product:
p.drawString(100, 700, f'Name: {item.product_name}')
p.drawString(100, 680, f'Description: {item.description}')
p.showPage()
p.save()
return response
def export_excel(request):
product = ProductResource().export()
response = HttpResponse(product.xls, content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="product.xls"'
return response
def import_excel(request):
if request.method == 'POST':
dataset = Dataset()
new_data = request.FILES['myfile']
if not new_data.name.endswith('xls'):
messages.info(request, 'Wrong format')
return render(request, 'import_data.html')
imported_data = dataset.load(new_data.read(), 'xls')
result = ProductResource().import_data(dataset, dry_run=True) # Check if the data is valid
if not result.has_errors():
ProductResource().import_data(dataset, dry_run=False) # Import the actual data
messages.success(request, 'Data imported successfully')
return render(request, 'import_data.html')
def export_import(request):
return render(request, 'product_list.html')
|
elumes446/Store-Management-System
|
Store Managment System/main/views.py
|
views.py
|
py
| 7,245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71830613309
|
def for_P():
for row in range(7):
for col in range(6):
if col==0 or (row%3==0 and col<5 and row<6) or (col==5 and row%3!=0 and row<3):
print("*",end=" ")
else:
print(end=" ")
print()
def while_P():
i=0
while i<7:
j=0
while j<6:
if j==0 or (i%3==0 and j<5 and i<6) or (j==5 and i%3!=0 and i<3):
print("*",end=" ")
else:
print(end=" ")
j+=1
i+=1
print()
|
Ashokkommi0001/patterns
|
Alp/cap_alp/P.py
|
P.py
|
py
| 560 |
python
|
en
|
code
| 2 |
github-code
|
6
|
23682024390
|
import datetime
def rest_sec_of_day():
"""
:return: 截止到目前当日剩余时间
"""
today = datetime.datetime.strptime(str(datetime.date.today()), "%Y-%m-%d")
tomorrow = today + datetime.timedelta(days=1)
nowTime = datetime.datetime.now()
return (tomorrow - nowTime).seconds # 获取秒
|
peacefulyin/gh
|
BackEnd/util/common.py
|
common.py
|
py
| 339 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40411312041
|
#!/usr/bin/env python3
"""
Name: bgp_neighbor_prefix_received.py
Description: NXAPI: display bgp neighbor summary info
"""
our_version = 109
script_name = "bgp_neighbor_prefix_received"
# standard libraries
import argparse
from concurrent.futures import ThreadPoolExecutor
# local libraries
from nxapi_netbox.args.args_cookie import ArgsCookie
from nxapi_netbox.args.args_nxapi_tools import ArgsNxapiTools
from nxapi_netbox.general.log import get_logger
from nxapi_netbox.netbox.netbox_session import netbox, get_device_mgmt_ip
from nxapi_netbox.vault.vault import get_vault
from nxapi_netbox.nxapi.nxapi_bgp_unicast_summary import (
NxapiBgpUnicastSummaryIpv4,
NxapiBgpUnicastSummaryIpv6,
)
def get_parser():
help_afi = "address family to query. one of ipv4 or ipv6."
help_nonzero = (
"if specified, only display neighbors with non-zero prefixes received"
)
ex_prefix = "Example: "
ex_afi = "{} --afi ipv6".format(ex_prefix)
ex_nonzero = "{} --nonzero".format(ex_prefix)
parser = argparse.ArgumentParser(
description="DESCRIPTION: display bgp unicast summary info via NXAPI",
parents=[ArgsCookie, ArgsNxapiTools],
)
default = parser.add_argument_group(title="DEFAULT SCRIPT ARGS")
mandatory = parser.add_argument_group(title="MANDATORY SCRIPT ARGS")
default.add_argument(
"--afi",
dest="afi",
required=False,
choices=["ipv4", "ipv6"],
default="ipv4",
help="{} {}".format(help_afi, ex_afi),
)
default.add_argument(
"--nonzero",
dest="nonzero",
required=False,
default=False,
action="store_true",
help="{} {}".format(help_nonzero, ex_nonzero),
)
parser.add_argument(
"--version", action="version", version="{} v{}".format("%(prog)s", our_version)
)
return parser.parse_args()
def get_device_list():
try:
return cfg.devices.split(",")
except:
log.error(
"exiting. Cannot parse --devices {}. Example usage: --devices leaf_1,spine_2,leaf_2".format(
cfg.devices
)
)
exit(1)
def print_header():
print(fmt.format("ip", "hostname", "neighbor", "prefix_rx"))
def print_output(futures):
for future in futures:
output = future.result()
if output == None:
continue
for line in output:
print(line)
def collect_prefix_rx(ip, bgp):
lines = list()
for neighbor in bgp.neighbor_info:
bgp.neighbor = neighbor
try:
prefixreceived = int(bgp.prefixreceived)
except:
log.warning(
"collect_prefix_rx. {} skipping neighbor {}. cannot convert bgp.prefixreceived {} to int()".format(
bgp.hostname, bgp.neighbor, bgp.prefixreceived
)
)
continue
if prefixreceived == 0 and cfg.nonzero == True:
continue
lines.append(fmt.format(ip, bgp.hostname, bgp.neighbor, bgp.prefixreceived))
lines.append("")
return lines
def get_instance(ip, vault):
"""
return a list of NxapiBgpUnicastSummary*() instances based on cfg.afi
"""
if cfg.afi == "ipv4":
return NxapiBgpUnicastSummaryIpv4(
vault.nxos_username, vault.nxos_password, ip, log
)
elif cfg.afi == "ipv6":
return NxapiBgpUnicastSummaryIpv6(
vault.nxos_username, vault.nxos_password, ip, log
)
else:
log.error("exiting. Unknown afi {}".format(cfg.afi))
exit(1)
def worker(device, vault):
ip = get_device_mgmt_ip(nb, device)
instance = get_instance(ip, vault)
instance.nxapi_init(cfg)
instance.vrf = cfg.vrf
instance.refresh()
return collect_prefix_rx(ip, instance)
def get_fmt():
fmt_ipv6 = "{:<15} {:<18} {:<40} {:>9}"
fmt_ipv4 = "{:<15} {:<18} {:<15} {:>9}"
if cfg.afi == "ipv4":
return fmt_ipv4
else:
return fmt_ipv6
cfg = get_parser()
log = get_logger(script_name, cfg.loglevel, "DEBUG")
vault = get_vault(cfg.vault)
vault.fetch_data()
nb = netbox(vault)
devices = get_device_list()
fmt = get_fmt()
print_header()
executor = ThreadPoolExecutor(max_workers=len(devices))
futures = list()
for device in devices:
args = [device, vault]
futures.append(executor.submit(worker, *args))
print_output(futures)
|
allenrobel/nxapi-netbox
|
scripts/bgp_neighbor_prefix_received.py
|
bgp_neighbor_prefix_received.py
|
py
| 4,416 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32990866297
|
import tensorflow as tf
def cast_float(x, y):
x = tf.cast(x, tf.float32)
return x, y
def normalize(x, y):
x = tf.reshape(x, (-1, 28, 28, 1))
x = x / 255.0
return x, y
def augment(x, y):
x = tf.image.random_flip_left_right(x)
x = tf.image.random_flip_up_down(x)
return x, y
def to_one_hot(x, y):
y = tf.one_hot(y, 10)
return x, y
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
mnist_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32).shuffle(10000)
mnist_train = mnist_train.map(cast_float)
mnist_train = mnist_train.map(normalize)
mnist_train = mnist_train.map(augment)
mnist_train = mnist_train.map(to_one_hot)
mnist_train = mnist_train.repeat()
mnist_validation = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(1000).shuffle(10000)
mnist_validation = mnist_validation.map(cast_float)
mnist_validation = mnist_validation.map(normalize)
mnist_validation = mnist_validation.map(to_one_hot)
mnist_validation = mnist_validation.repeat()
mnist_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(1000)
mnist_test = mnist_test.map(cast_float)
mnist_test = mnist_test.map(normalize)
mnist_test = mnist_test.map(to_one_hot)
|
christophstach/ai-robotics
|
src/excercise_1/src/dataset.py
|
dataset.py
|
py
| 1,250 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26189029070
|
import datetime
import table
import restaurant
class Restaurant:
def __init__(self):
self.tables = []
self.name = "Restaurant Dingo"
for i in range(8):
self.tables.append(table.Table(i))
def get_tables(self):
return self.tables
def print_tables(self):
for i in range(8):
print("Table " + str(i))
def loop_opening_hours(action):
dt = datetime.datetime.now()
newdate = dt.replace(hour=12, minute=0)
for i in range(12, 20):
newdate = dt.replace(hour=i, minute=0)
action(newdate)
|
jemmajh/Reservation_system_Y2
|
restaurant.py
|
restaurant.py
|
py
| 560 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73526892347
|
#!/usr/bin/python3
import os
filename = os.path.basename(__file__)
# 如果鲁迅也炒股
textStr = '我家门前有两棵树,一棵是绿枣树,另一棵也是绿枣树。'
voiceArr = ['YunjianNeural']
for v in voiceArr:
shell = f'edge-tts --text {textStr} --voice zh-CN-{v} --write-media ./audios/{filename}.{v}.mp3'
os.system(shell)
|
zhouhuafei/edge-tts-case
|
scripts/20230421.1.py
|
20230421.1.py
|
py
| 348 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42928428434
|
class Solution:
def maxOnesRow(self,arr, n, m):
i, j, ans = 0, m-1, -1
while i < n:
while j >= 0 and arr[i][j] == 1:
ans = i
j -= 1
i += 1
return ans
obj = Solution()
arr, n, m = [[0, 1, 1, 1], [0, 0, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0]], 4, 4
ans = obj.maxOnesRow(arr, n, m)
print(ans)
|
shwetakumari14/Leetcode-Solutions
|
Array/GFG/Row with max 1s.py
|
Row with max 1s.py
|
py
| 371 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41058762766
|
class Poly:
def __init__(self,*terms):
# __str__ uses the name self.terms for the dictionary of terms
# So __init__ should build this dictionary from terms
self.terms = {}
for i in terms:
assert type(i[0]) in (int,float)
assert type(i[1]) == int, float
assert i[1] >= 0
assert i[1] not in self.terms.keys()
if i[0] != 0:
self.terms.update({i[1]:i[0]})
# Fill in the rest of this method, using *terms to intialize self.terms
# I have written str(...) because it is used in the bsc.txt file and
# it is a bit subtle to get correct. Notice that it assumes that
# every Poly object stores a dict whose keys are powers and whose
# associated values are coefficients. This function does not depend
# on any other method in this class being written correctly.
def __str__(self):
def term(c,p,var):
return (str(c) if p == 0 or c != 1 else '') +\
('' if p == 0 else var+('^'+str(p) if p != 1 else ''))
if len(self.terms) == 0:
return '0'
else:
return ' + '.join([term(c,p,'x') for p,c in sorted(self.terms.items(),reverse=True)]).replace('+ -','- ')
def __repr__(self):
new_str = ''
for i in self.terms.items():
new_str += str('(' +str(i[1]) +','+ str(i[0]) + ')')
return 'Poly({})'.format(new_str)
def __len__(self):
new_list = []
for i in self.terms.keys():
new_list.append(i)
if len(self.terms.keys()) > 0:
return max(new_list)
else:
return 0
def __call__(self,arg):
count = 0
for i in self.terms.items():
count += (arg**i[0])*i[1]
return count
def __iter__(self):
for i in sorted(self.terms.items(), reverse = True):
yield (i[1],i[0])
def __getitem__(self,index):
if type(index) != int:
raise TypeError
elif index < 0 :
raise TypeError
return self.terms[index]
def __setitem__(self,index,value):
if type(index) == int and index > 0:
self.terms.update({index:value})
for i in self.terms.keys():
if self.terms[i] == 0:
del self.terms[i]
break
else:
raise TypeError
def __delitem__(self,index):
if type(index) == int and index >= 0:
if index in self.terms.keys():
del self.terms[index]
else:
raise TypeError
def _add_term(self,c,p):
if type(c) in (int,float) and type(p) == int:
if p not in self.terms.keys() and c != 0:
self.terms.update({p:c})
elif p in self.terms.keys() and c != 0:
self.terms.update({p:c + self.terms[p]})
for i in self.terms.keys():
if self.terms[i] == 0:
del self.terms[i]
break
def __add__(self,right):
pass
# new_dict = {}
# for i in right.terms.items():
# if i[1] in self.terms.keys():
# new_dict.update({i[1]})
def __radd__(self,left):
pass
def __mul__(self,right):
pass
def __rmul__(self,left):
pass
def __eq__(self,right):
new_list = []
if type(right) not in (int,str):
for i,e in zip(self.terms.items(), right.terms.items()):
if i[0] == e[0] and i[1] == e[1]:
new_list.append(True)
else:
new_list.append(False)
return all(new_list)
elif type(right) == int:
if 1 not in self.terms.keys():
if right in self.terms.values():
return True
else:
return False
else:
raise TypeError
if __name__ == '__main__':
# Some simple tests; you can comment them out and/or add your own before
# the driver is called.
print('Start simple tests')
p = Poly((3,2),(-2,1), (4,0))
print(' For Polynomial: 3x^2 - 2x + 4')
print(' str(p):',p)
print(' repr(p):',repr(p))
print(' len(p):',len(p))
print(' p(2):',p(2))
print(' list collecting iterator results:',[t for t in p])
print(' p+p:',p+p)
print(' p+2:',p+2)
print(' p*p:',p*p)
print(' p*2:',p*2)
print('End simple tests\n')
import driver
#driver.default_show_exception=True
#driver.default_show_exception_message=True
#driver.default_show_traceback=True
driver.driver()
|
solomc1/python
|
ics 33/solutions/ile2 solutions/Lab 6/VegaHector/poly.py
|
poly.py
|
py
| 5,121 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41211987806
|
import matplotlib.pyplot as plt
import librosa
import librosa.display
import os
import torch
from torch.distributions.beta import Beta
import numpy as np
from pytorch_lightning.callbacks import Callback
import torch.nn as nn
from einops import rearrange
from tqdm import tqdm
from helpers import nessi
image_folder = "images"
os.makedirs(image_folder, exist_ok=True)
class MyStaticPostQuantizationCallback(Callback):
def __init__(self, get_calibration_loader, calibration_batches=100):
self.calibration_loader = get_calibration_loader()
self.calibration_batches = calibration_batches
def quantize_model(self, pl_module):
print("*********** Before Quantization: ***********")
if hasattr(pl_module, 'mel'):
pl_module.mel.cpu()
# get the shape of spectrograms
sample = next(iter(self.calibration_loader))[0][0].unsqueeze(0)
sample = sample[:, :, :sample.size(2) // 10]
shape = pl_module.mel_forward(sample).size()
# get original macs and params
macc_orig, n_params_orig = nessi.get_model_size(pl_module.net, input_size=(1, shape[1], shape[2], shape[3]))
print("macc_orig: ", macc_orig)
print("n_params_orig: ", n_params_orig)
# print size of model before quantization
print_size_of_model(pl_module.net)
pl_module.net.fuse_model()
# get macs and params after fusing model
macc, n_params = nessi.get_model_size(
pl_module.net, input_size=(1, shape[1], shape[2], shape[3]))
print("macc after fuse : ", macc)
print("n_params after fuse: ", n_params)
pl_module.net.qconfig = torch.quantization.get_default_qconfig('fbgemm')
torch.quantization.prepare(pl_module.net, inplace=True)
pl_module.net.cpu()
if hasattr(pl_module, 'mel'):
pl_module.mel.cpu()
for i, batch in enumerate(tqdm(self.calibration_loader, total=self.calibration_batches)):
x, files, y, device_indices, cities, indices = batch
# split to 1-second pieces
x = rearrange(x, 'b c (slices t) -> (b slices) c t', slices=10)
x = x.cpu()
if hasattr(pl_module, 'mel'):
x = pl_module.mel_forward(x)
with torch.no_grad():
pl_module.net(x)
# stop after a certain number of calibration samples
if i == self.calibration_batches:
break
torch.quantization.convert(pl_module.net, inplace=True)
print("*********** After Quantization: ***********")
return dict(macc_orig=macc_orig, n_params_orig=n_params_orig,
macc_fuse=macc, n_params_fuse=n_params, model_size_bytes=print_size_of_model(pl_module.net))
def on_test_start(self, trainer, pl_module):
self.quantize_model(pl_module)
def mixstyle(x, p=0.5, alpha=0.1, eps=1e-6):
if np.random.rand() > p:
return x
batch_size = x.size(0)
# changed from dim=[2,3] to dim=[1,3] from channel-wise statistics to frequency-wise statistics
f_mu = x.mean(dim=[1, 3], keepdim=True)
f_var = x.var(dim=[1, 3], keepdim=True)
f_sig = (f_var + eps).sqrt() # compute instance standard deviation
f_mu, f_sig = f_mu.detach(), f_sig.detach() # block gradients
x_normed = (x - f_mu) / f_sig # normalize input
lmda = Beta(alpha, alpha).sample((batch_size, 1, 1, 1)).to(x.device) # sample instance-wise convex weights
perm = torch.randperm(batch_size).to(x.device) # generate shuffling indices
f_mu_perm, f_sig_perm = f_mu[perm], f_sig[perm] # shuffling
mu_mix = f_mu * lmda + f_mu_perm * (1 - lmda) # generate mixed mean
sig_mix = f_sig * lmda + f_sig_perm * (1 - lmda) # generate mixed standard deviation
return x_normed * sig_mix + mu_mix # denormalize input using the mixed statistics
def print_size_of_model(model):
torch.save(model.state_dict(), "temp.p")
model_size_bytes = os.path.getsize("temp.p")
print('Size (MB):', model_size_bytes/1e6)
os.remove('temp.p')
return model_size_bytes
def mixup(size, alpha):
rn_indices = torch.randperm(size)
lambd = np.random.beta(alpha, alpha, size).astype(np.float32)
lambd = np.concatenate([lambd[:, None], 1 - lambd[:, None]], 1).max(1)
lam = torch.FloatTensor(lambd)
# data = data * lam + data2 * (1 - lam)
# targets = targets * lam + targets2 * (1 - lam)
return rn_indices, lam
def spawn_get(seedseq, n_entropy, dtype):
child = seedseq.spawn(1)[0]
state = child.generate_state(n_entropy, dtype=np.uint32)
if dtype == np.ndarray:
return state
elif dtype == int:
state_as_int = 0
for shift, s in enumerate(state):
state_as_int = state_as_int + int((2 ** (32 * shift) * s))
return state_as_int
else:
raise ValueError(f'not a valid dtype "{dtype}"')
|
CPJKU/cpjku_dcase22
|
helpers/utils.py
|
utils.py
|
py
| 4,903 |
python
|
en
|
code
| 18 |
github-code
|
6
|
70103613629
|
#!/usr/bin/env python3
"""
Example for Implied Volatility using the NAG Library for Python
Finds implied volatilities of the Black Scholes equation using specfun.opt_imp_vol
Data needs to be downloaded from:
http://www.cboe.com/delayedquote/QuoteTableDownload.aspx
Make sure to download data during CBOE Trading Hours.
Updated for NAG Library for Python Mark 27.1
"""
# pylint: disable=invalid-name,too-many-branches,too-many-locals,too-many-statements
try:
import sys
import pandas
import numpy as np
import matplotlib.pylab as plt
import warnings
from naginterfaces.library import specfun, fit
from naginterfaces.base import utils
from matplotlib import cm
except ImportError as e:
print(
"Could not import the following module. "
"Do you have a working installation of the NAG Library for Python?"
)
print(e)
sys.exit(1)
__author__ = "Edvin Hopkins, John Morrissey and Brian Spector"
__copyright__ = "Copyright 2021, The Numerical Algorithms Group Inc"
__email__ = "[email protected]"
# Set to hold expiration dates
dates = []
cumulative_month = {'Jan': 31, 'Feb': 57, 'Mar': 90,
'Apr': 120, 'May': 151, 'Jun': 181,
'Jul': 212, 'Aug': 243, 'Sep': 273,
'Oct': 304, 'Nov': 334, 'Dec': 365}
def main(): # pylint: disable=missing-function-docstring
try:
if len(sys.argv)>1:
QuoteData = sys.argv[1]
else:
QuoteData = 'QuoteData.dat'
qd = open(QuoteData, 'r')
qd_head = []
qd_head.append(qd.readline())
qd_head.append(qd.readline())
qd.close()
except: # pylint: disable=bare-except
sys.stderr.write("Usage: implied_volatility.py QuoteData.dat\n")
sys.stderr.write("Couldn't read QuoteData\n")
sys.exit(1)
print("Implied Volatility for %s %s" % (qd_head[0].strip(), qd_head[1]))
# Parse the header information in QuotaData
first = qd_head[0].split(',')
second = qd_head[1].split()
qd_date = qd_head[1].split(',')[0]
company = first[0]
underlyingprice = float(first[1])
month, day = second[:2]
today = cumulative_month[month] + int(day) - 30
current_year = int(second[2])
def getExpiration(x):
monthday = x.split()
adate = monthday[0] + ' ' + monthday[1]
if adate not in dates:
dates.append(adate)
return (int(monthday[0]) - (current_year % 2000)) * 365 + cumulative_month[monthday[1]]
def getStrike(x):
monthday = x.split()
return float(monthday[2])
data = pandas.io.parsers.read_csv(QuoteData, sep=',', header=2, na_values=' ')
# Need to fill the NA values in dataframe
data = data.fillna(0.0)
# Let's look at data where there was a recent sale
data = data[(data['Last Sale'] > 0) | (data['Last Sale.1'] > 0)]
# Get the Options Expiration Date
exp = data.Calls.apply(getExpiration)
exp.name = 'Expiration'
# Get the Strike Prices
strike = data.Calls.apply(getStrike)
strike.name = 'Strike'
data = data.join(exp).join(strike)
print("Number of data points found: {}\n".format(len(data.index)))
print('Calculating Implied Vol of Calls...')
r = np.zeros(len(data.index))
t = (data.Expiration - today)/365.0
s0 = np.full(len(data.index),underlyingprice)
pCall= (data.Bid + data.Ask) / 2
# A lot of the data is incomplete or extreme so we tell the NAG routine
# not to worry about warning us about data points it can't work with
warnings.simplefilter('ignore',utils.NagAlgorithmicWarning)
sigmaCall = specfun.opt_imp_vol('C',pCall,data.Strike, s0,t,r,mode = 1).sigma
impvolcall = pandas.Series(sigmaCall,index=data.index, name='impvolCall')
data = data.join(impvolcall)
print('Calculating Implied Vol of Puts...')
pPut= (data['Bid.1'] + data['Ask.1']) / 2
sigmaPut = specfun.opt_imp_vol('P',pPut,data.Strike, s0,t,r,mode = 1).sigma
impvolput = pandas.Series(sigmaPut,index=data.index, name='impvolPut')
data = data.join(impvolput)
fig = plt.figure(1)
fig.subplots_adjust(hspace=.4, wspace=.3)
# Plot the Volatility Curves
# Encode graph layout: 3 rows, 3 columns, 1 is first graph.
num = 331
max_xticks = 4
for date in dates:
# add each subplot to the figure
plot_year, plot_month = date.split()
plot_date = (int(plot_year) - (current_year % 2000)) * 365 + cumulative_month[plot_month]
plot_call = data[(data.impvolCall > .01) &
(data.Expiration == plot_date) &
(data['Last Sale'] > 0)]
plot_put = data[(data.impvolPut > .01) &
(data.Expiration == plot_date) &
(data['Last Sale.1'] > 0)]
myfig = fig.add_subplot(num)
xloc = plt.MaxNLocator(max_xticks)
myfig.xaxis.set_major_locator(xloc)
myfig.set_title('Expiry: %s 20%s' % (plot_month, plot_year))
myfig.plot(plot_call.Strike, plot_call.impvolCall, 'pr', label='call',markersize=0.5)
myfig.plot(plot_put.Strike, plot_put.impvolPut, 'p', label='put',markersize=0.5)
myfig.legend(loc=1, numpoints=1, prop={'size': 10})
myfig.set_ylim([0,1])
myfig.set_xlabel('Strike Price')
myfig.set_ylabel('Implied Volatility')
num += 1
plt.suptitle('Implied Volatility for %s Current Price: %s Date: %s' %
(company, underlyingprice, qd_date))
print("\nPlotting Volatility Curves/Surface")
# The code below will plot the Volatility Surface
# It uses fit.dim2_cheb_lines to fit with a polynomial and
# fit.dim2_cheb_eval to evaluate at intermediate points
m = np.empty(len(dates), dtype=np.int32)
y = np.empty(len(dates), dtype=np.double)
xmin = np.empty(len(dates), dtype=np.double)
xmax = np.empty(len(dates), dtype=np.double)
data = data.sort_values(by=['Strike']) # Need to sort for NAG Algorithm
k = 3 # this is the degree of polynomial for x-axis (Strike Price)
l = 3 # this is the degree of polynomial for y-axis (Expiration Date)
i = 0
for date in dates:
plot_year, plot_month = date.split()
plot_date = (int(plot_year) - (current_year % 2000)) * 365 + cumulative_month[plot_month]
call_data = data[(data.Expiration == plot_date) &
(data.impvolPut > .01) &
(data.impvolPut < 1) &
(data['Last Sale.1'] > 0)]
exp_sizes = call_data.Expiration.size
if exp_sizes > 0:
m[i] = exp_sizes
if i == 0:
x = np.array(call_data.Strike)
call = np.array(call_data.impvolPut)
xmin[0] = x.min()
xmax[0] = x.max()
else:
x2 = np.array(call_data.Strike)
x = np.append(x,x2)
call2 = np.array(call_data.impvolPut)
call = np.append(call,call2)
xmin[i] = x2.min()
xmax[i] = x2.max()
y[i] = plot_date-today
i+=1
nux = np.zeros(1,dtype=np.double)
nuy = np.zeros(1,dtype=np.double)
if len(dates) != i:
print(
"Error with data: the CBOE may not be open for trading "
"or one expiration date has null data"
)
return 0
weight = np.ones(call.size, dtype=np.double)
#Call the NAG Chebyshev fitting function
output_coef = fit.dim2_cheb_lines(m,k,l,x,y,call,weight,(k + 1) * (l + 1),xmin,xmax,nux,nuy)
# Now that we have fit the function,
# we use fit.dim2_cheb_eval to evaluate at different strikes/expirations
nStrikes = 100 # number of Strikes to evaluate
spacing = 20 # number of Expirations to evaluate
for i in range(spacing):
mfirst = 1
xmin = data.Strike.min()
xmax = data.Strike.max()
x = np.linspace(xmin, xmax, nStrikes)
ymin = data.Expiration.min() - today
ymax = data.Expiration.max() - today
y = (ymin) + i * np.floor((ymax - ymin) / spacing)
fx=np.empty(nStrikes)
fx=fit.dim2_cheb_eval(mfirst,k,l,x,xmin,xmax,y,ymin,ymax,output_coef)
if 'xaxis' in locals():
xaxis = np.append(xaxis, x)
temp = np.empty(len(x))
temp.fill(y)
yaxis = np.append(yaxis, temp)
for j in range(len(x)):
zaxis.append(fx[j])
else:
xaxis = x
yaxis = np.empty(len(x), dtype=np.double)
yaxis.fill(y)
zaxis = []
for j in range(len(x)):
zaxis.append(fx[j])
fig = plt.figure(2)
ax = fig.add_subplot(111, projection='3d')
# A try-except block for Matplotlib
try:
ax.plot_trisurf(xaxis, yaxis, zaxis, cmap=cm.jet)
except AttributeError:
print ("Your version of Matplotlib does not support plot_trisurf")
print ("...plotting wireframe instead")
ax.plot(xaxis, yaxis, zaxis)
ax.set_xlabel('Strike Price')
ax.set_ylabel('Days to Expiration')
ax.set_zlabel('Implied Volatility for Put Options')
plt.suptitle('Implied Volatility Surface for %s Current Price: %s Date: %s' %
(company, underlyingprice, qd_date))
plt.show()
if __name__ == "__main__":
main()
|
cthadeufaria/passport
|
investing/impliedVolatility.py
|
impliedVolatility.py
|
py
| 9,398 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20775589752
|
import numpy as np
class StaticFns:
@staticmethod
def termination_fn(obs, act, next_obs):
assert len(obs.shape) == len(next_obs.shape) == len(act.shape) == 2
height = next_obs[:, 0]
angle = next_obs[:, 1]
not_done = (height > 0.8) \
* (height < 2.0) \
* (angle > -1.0) \
* (angle < 1.0)
done = ~not_done
done = done[:, None]
return done
@staticmethod
def reward_fn(obs, act, next_obs):
reward_ctrl = -0.1 * np.sum(np.square(act), axis=1)
reward_run = obs[:, 8]
reward_height = -3.0 * np.square(obs[:, 0] - 1.3)
reward = reward_run + reward_ctrl + reward_height + 1.0
return -reward
|
duxin0618/CDA-MBPO
|
static/walker2d.py
|
walker2d.py
|
py
| 756 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34268489867
|
'''Проверка гипотезы Сиракуз'''
# Гипотеза Сиракуз гласит, что любое натуральное число сводится к единице при следующих
# действиях над ним: а) если число четное, то разделить его пополам, б) если число нечетное,
# то умножить его на 3, прибавить 1 и результат разделить на 2. Над вновь полученным
# числом вновь повторить действия a) или б) в зависимости от четности числа. Рано или
# поздно число станет равным 1.
def syracuse(n: int):
print(n)
while n != 1:
if n % 2 == 0:
n = n // 2
else:
n = (3 * n + 1) // 2
print(n, end=' ')
syracuse(56)
|
ziGFriedman/My_programs
|
Testing_the_Syracuse_hypothesis.py
|
Testing_the_Syracuse_hypothesis.py
|
py
| 920 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
34228406110
|
from pymongo.collection import Collection
from bson.objectid import ObjectId
def insert_object(obj: dict, collection: Collection):
"""Вставка объекта в коллекцию"""
obj['fields'] = list(obj['fields'].items())
return collection.insert_one(obj).inserted_id
def delete_object(object_id: str, collection: Collection):
"""Удаление объекта из коллекции"""
collection.delete_one({"_id": ObjectId(object_id)})
def get_object(object_id: str, collection: Collection):
"""Получение объекта из коллекции по id"""
obj = collection.find_one({"_id": ObjectId(object_id)})
if obj is not None:
obj['fields'] = dict(obj['fields'])
return obj
def get_objects(
page_size: int,
page_number: int,
collection: Collection
) -> list[dict]:
"""
Получение объектов из коллекции
:param page_size: Размер страницы
:param page_number: Номер страницы
:param collection: Коллекция MongoDB
:return: Список объектов
"""
result = []
for obj in collection.find({}).limit(page_size).skip((page_number - 1) * page_size):
obj['fields'] = dict(obj['fields'])
result.append(obj)
return result
|
AKovalyuk/test-task
|
app/db/crud.py
|
crud.py
|
py
| 1,341 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
35717342742
|
import torch
import torch.nn as nn
from utils.resnet_infomin import model_dict
import torch.nn.functional as F
from collections import OrderedDict
class RGBSingleHead(nn.Module):
"""RGB model with a single linear/mlp projection head"""
def __init__(self, name='resnet50', head='linear', feat_dim=128):
super(RGBSingleHead, self).__init__()
name, width = self._parse_width(name)
dim_in = int(2048 * width)
self.width = width
self.encoder = model_dict[name](width=width)
if head == 'linear':
self.head = nn.Sequential(
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
elif head == 'mlp':
self.head = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
else:
raise NotImplementedError(
'head not supported: {}'.format(head))
@staticmethod
def _parse_width(name):
if name.endswith('x4'):
return name[:-2], 4
elif name.endswith('x2'):
return name[:-2], 2
else:
return name, 1
def forward(self, x, mode=0):
# mode --
# 0: normal encoder,
# 1: momentum encoder,
# 2: testing mode
feat = self.encoder(x)
if mode == 0 or mode == 1:
feat = self.head(feat)
return feat
class RGBMultiHeads(RGBSingleHead):
"""RGB model with Multiple linear/mlp projection heads"""
def __init__(self, name='resnet50', head='linear', feat_dim=128):
super(RGBMultiHeads, self).__init__(name, head, feat_dim)
self.head_jig = JigsawHead(dim_in=int(2048*self.width),
dim_out=feat_dim,
head=head)
def forward(self, x, x_jig=None, mode=0):
# mode --
# 0: normal encoder,
# 1: momentum encoder,
# 2: testing mode
if mode == 0:
feat = self.head(self.encoder(x))
feat_jig = self.head_jig(self.encoder(x_jig))
return feat, feat_jig
elif mode == 1:
feat = self.head(self.encoder(x))
return feat
else:
feat = self.encoder(x)
return feat
class CMCSingleHead(nn.Module):
"""CMC model with a single linear/mlp projection head"""
def __init__(self, name='resnet50', head='linear', feat_dim=128):
super(CMCSingleHead, self).__init__()
name, width = self._parse_width(name)
dim_in = int(2048 * width)
self.width = width
self.encoder1 = model_dict[name](width=width, in_channel=1)
self.encoder2 = model_dict[name](width=width, in_channel=2)
if head == 'linear':
self.head1 = nn.Sequential(
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
self.head2 = nn.Sequential(
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
elif head == 'mlp':
self.head1 = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
self.head2 = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, feat_dim),
Normalize(2)
)
else:
raise NotImplementedError(
'head not supported: {}'.format(head))
@staticmethod
def _parse_width(name):
if name.endswith('x4'):
return name[:-2], 2
elif name.endswith('x2'):
return name[:-2], 1
else:
return name, 0.5
def forward(self, x, mode=0):
# mode --
# 0: normal encoder,
# 1: momentum encoder,
# 2: testing mode
x1, x2 = torch.split(x, [1, 2], dim=1)
feat1 = self.encoder1(x1)
feat2 = self.encoder2(x2)
if mode == 0 or mode == 1:
feat1 = self.head1(feat1)
feat2 = self.head2(feat2)
return torch.cat((feat1, feat2), dim=1)
class CMCMultiHeads(CMCSingleHead):
"""CMC model with Multiple linear/mlp projection heads"""
def __init__(self, name='resnet50', head='linear', feat_dim=128):
super(CMCMultiHeads, self).__init__(name, head, feat_dim)
self.head1_jig = JigsawHead(dim_in=int(2048*self.width),
dim_out=feat_dim,
head=head)
self.head2_jig = JigsawHead(dim_in=int(2048*self.width),
dim_out=feat_dim,
head=head)
def forward(self, x, x_jig=None, mode=0):
# mode --
# 0: normal encoder,
# 1: momentum encoder,
# 2: testing mode
x1, x2 = torch.split(x, [1, 2], dim=1)
feat1 = self.encoder1(x1)
feat2 = self.encoder2(x2)
if mode == 0:
x1_jig, x2_jig = torch.split(x_jig, [1, 2], dim=1)
feat1_jig = self.encoder1(x1_jig)
feat2_jig = self.encoder2(x2_jig)
feat1, feat2 = self.head1(feat1), self.head2(feat2)
feat1_jig = self.head1_jig(feat1_jig)
feat2_jig = self.head2_jig(feat2_jig)
feat = torch.cat((feat1, feat2), dim=1)
feat_jig = torch.cat((feat1_jig, feat2_jig), dim=1)
return feat, feat_jig
elif mode == 1:
feat1, feat2 = self.head1(feat1), self.head2(feat2)
return torch.cat((feat1, feat2), dim=1)
else:
return torch.cat((feat1, feat2), dim=1)
class Normalize(nn.Module):
def __init__(self, p=2):
super(Normalize, self).__init__()
self.p = p
def forward(self, x):
return F.normalize(x, p=self.p, dim=1)
class JigsawHead(nn.Module):
"""Jigswa + linear + l2norm"""
def __init__(self, dim_in, dim_out, k=9, head='linear'):
super(JigsawHead, self).__init__()
if head == 'linear':
self.fc1 = nn.Linear(dim_in, dim_out)
elif head == 'mlp':
self.fc1 = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, dim_out),
)
else:
raise NotImplementedError('JigSaw head not supported: {}'.format(head))
self.fc2 = nn.Linear(dim_out * k, dim_out)
self.l2norm = Normalize(2)
self.k = k
def forward(self, x):
bsz = x.shape[0]
x = self.fc1(x)
# ==== shuffle ====
# this step can be moved to data processing step
shuffle_ids = self.get_shuffle_ids(bsz)
x = x[shuffle_ids]
# ==== shuffle ====
n_img = int(bsz / self.k)
x = x.view(n_img, -1)
x = self.fc2(x)
x = self.l2norm(x)
return x
def get_shuffle_ids(self, bsz):
n_img = int(bsz / self.k)
rnd_ids = [torch.randperm(self.k) for i in range(n_img)]
rnd_ids = torch.cat(rnd_ids, dim=0)
base_ids = torch.arange(bsz)
base_ids = torch.div(base_ids, self.k).long()
base_ids = base_ids * self.k
shuffle_ids = rnd_ids + base_ids
return shuffle_ids
#default settings taken from https://github.com/HobbitLong/PyContrast/tree/master/pycontrast
OPT = {'method': 'InfoMin',
'modal': 'RGB',
'jigsaw': True,
'mem': 'moco',
'arch': 'resnet50',
'feat_dim': 128,
'head': 'mlp',
'ckpt': '/experimentos/pesos/infomin/InfoMin_800.pth', #custom path
'aug_linear': 'NULL',
'n_class': 1000,
'aug': 'D'}
NAME_TO_FUNC = {
'RGBSin': RGBSingleHead,
'RGBMul': RGBMultiHeads,
'CMCSin': CMCSingleHead,
'CMCMul': CMCMultiHeads,
}
def load_encoder_weights(model):
"""load pre-trained weights for encoder
Args:
model: pretrained encoder, should be frozen
"""
msg = "Empty Message"
if OPT['ckpt']:
ckpt = torch.load(OPT['ckpt'], map_location='cpu')
state_dict = ckpt['model']
if OPT['modal'] == 'RGB':
# Unimodal (RGB) case
encoder_state_dict = OrderedDict()
for k, v in state_dict.items():
k = k.replace('module.', '')
if 'encoder' in k:
k = k.replace('encoder.', '')
encoder_state_dict[k] = v
msg = model.encoder.load_state_dict(encoder_state_dict)
else:
# Multimodal (CMC) case
encoder1_state_dict = OrderedDict()
encoder2_state_dict = OrderedDict()
for k, v in state_dict.items():
k = k.replace('module.', '')
if 'encoder1' in k:
k = k.replace('encoder1.', '')
encoder1_state_dict[k] = v
if 'encoder2' in k:
k = k.replace('encoder2.', '')
encoder2_state_dict[k] = v
msg = model.encoder1.load_state_dict(encoder1_state_dict)
msg += " " + model.encoder2.load_state_dict(encoder2_state_dict)
print('Pre-trained weights loaded!', msg)
else:
print('==============================')
print('warning: no pre-trained model!')
print('==============================')
msg = "warning: no pre-trained model!"
return model, msg
def build_model():
# specify modal key
branch = 'Mul' if OPT['jigsaw'] else 'Sin'
model_key = OPT['modal'] + branch
model = NAME_TO_FUNC[model_key](OPT['arch'], OPT['head'], OPT['feat_dim'])
if OPT['mem'] == 'moco':
model_ema = NAME_TO_FUNC[model_key](OPT['arch'], OPT['head'], OPT['feat_dim'])
else:
model_ema = None
return model, model_ema
if __name__ == '__main__':
model, _ = build_model()
model, msg = load_encoder_weights(model)
print(msg)
|
VirtualSpaceman/ssl-skin-lesions
|
utils/build_backbone_infomin.py
|
build_backbone_infomin.py
|
py
| 10,323 |
python
|
en
|
code
| 7 |
github-code
|
6
|
437075340
|
#[3.2, [[1, 0, 1, 0, 2], [330, 0, 220, 180, 20], [190, 120, 190, 120, 290], [[230, 800, 530, 800, 430], [350, 480, 350, 50, 680]]], [[2, 1, 1], [0, 1, 0], [2, 1, 0], [0, 1, 1], [3, 0, 0]], [False, False, False, False, False]]
# consecutive classes longest consecutive class time time after counter days without classes (true for no class)
#teacher rating time between classes time before counter size slots of classes
def calculateFinal(ratingsObject, weight, minMax):
#weight = [0, 0, -1, 0, 0, -1, 0, -1, 0, 0, -1, 0, -1] #default settings
# longest class allowed
calculated = 0
#weight: importance of
# High Teacher Rating 0
# Class Time Start (see below) 1
# -1 = early start | 1 = late start 2
# Days Off 3
# Short or long Class Times (see below) 4
# -1 = short classes | 1 = long classes 5
# Time between classes 6
# -1 = no time| 1 = lots of time 7
# Longest class time allowed 8
# Class Time End (see below) 9
# -1 = early end | 1 = late end 10
# consecutive Classes (see below) 11
# -1 = low | 1 = high 12
if max(ratingsObject[1][2]) > weight[8] and weight[8] != 0: #remove if classes exceed max time
return -1
#Time between classes
maxSubMin = float(minMax[0][1]) - float(minMax[0][0])
timeBetweenClasses = 0
if maxSubMin != 0:
timeBetweenClasses = (maxSubMin - (sum(ratingsObject[1][1]) - float(minMax[0][0])))/maxSubMin*10
if weight[7] == 1:
timeBetweenClasses = 10 - timeBetweenClasses
#time Before
maxSubMin = float(minMax[1][1]) - float(minMax[1][0])
timeBefore = 0
if maxSubMin != 0:
timeBefore = (maxSubMin - (sum(ratingsObject[1][3][0]) - float(minMax[1][0])))/maxSubMin*10
if weight[2] == 1:
timeBefore = 10 - timeBefore
#time After
maxSubMin = float(minMax[2][1]) - float(minMax[2][0])
timeAfter = 0
if maxSubMin != 0:
timeAfter = (maxSubMin - (sum(ratingsObject[1][3][1]) - float(minMax[2][0])))/maxSubMin*10
if weight[10] == -1:
timeAfter = 10 - timeAfter
#short or long classes
maxSubMin = float(minMax[3][1]) - float(minMax[3][0])
classLength = 0
if maxSubMin != 0:
classLength = (maxSubMin - (sum(ratingsObject[1][2]) - float(minMax[3][0])))/maxSubMin*10
if weight[5] == -1:
classLength = 10 - classLength
#Consecutive Classes
maxSubMin = float(minMax[4][1]) - float(minMax[4][0])
consecutiveClasses = 0
if maxSubMin != 0:
consecutiveClasses = (maxSubMin - (sum(ratingsObject[1][0]) - float(minMax[4][0])))/maxSubMin*10
if weight[12] == 1:
consecutiveClasses = 10 - consecutiveClasses
daysOffCounter = ratingsObject[3].count(True) / float(5) * 30
calculated += weight[3] * daysOffCounter
calculated += timeBetweenClasses * weight[6]
calculated += timeBefore * weight[1]
calculated += timeAfter * weight[9]
calculated += consecutiveClasses * weight[11]
calculated += weight[0] * ratingsObject[0] * 2 #teacher rating
calculated += classLength * weight[4]
return calculated
|
iam4722202468/GuelphScheduler
|
python/finalRating.py
|
finalRating.py
|
py
| 3,051 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39441402911
|
from mlearn import base
from functools import reduce
from datetime import datetime
from mlearn.data.dataset import GeneralDataset
from mlearn.data.batching import Batch, BatchExtractor
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
def process_and_batch(dataset: GeneralDataset, data: base.DataType, batch_size: int, onehot: bool = True,
shuffle: bool = False, **kwargs):
"""
Process a dataset and data.
:dataset (GeneralDataset): The dataset object to use for processing.
:data (base.DataType): The data to be batched and processed.
:batch_size (int): Size of batches to create.
:returns: Batched data.
"""
# Process labels and encode data.
dataset.process_labels(data)
# Batch data
batch = Batch(batch_size, data)
batch.create_batches()
batches = BatchExtractor('label', batch, dataset, onehot)
if shuffle:
batches.shuffle()
return batches
def get_deep_dict_value(source: dict, keys: str, default = None):
"""
Get values from deeply nested dicts.
:source (dict): Dictionary to get data from.
:keys (str): Keys split by '|'. E.g. outerkey|middlekey|innerkey.
:default: Default return value.
"""
value = reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("|"), source)
return value
def select_vectorizer(vectorizer: str = 'dict') -> base.VectType:
"""
Identify vectorizer used and return it to be used.
:vectorizer, default = 'dict': Vectorizer to be used.
:return v: Vectorizer function.
"""
vect = vectorizer.lower()
if 'dict' in vect:
v = DictVectorizer()
setattr(v, 'name', 'DictVectorizer')
elif 'tfidf' in vect:
v = TfidfVectorizer()
setattr(v, 'name', 'TFIDF-Vectorizer')
elif 'count' in vect:
v = CountVectorizer()
setattr(v, 'name', 'CountVectorizer')
setattr(v, 'fitted', False)
return v
def _get_datestr():
return datetime.now().strftime('%Y.%m.%d.%H.%M.%S')
def hyperparam_space(search_space: base.List[dict], hyper_parameters: base.List[base.Tuple]
) -> base.List[dict]:
"""
Create all hyper-parameter combinations to run.
:search_space (base.List[dict]): List of dictionaries with one value
:hyper_parameters (base.List[dict]): List of tuples containing a dict with all values for each iteration.
:returns search_space (base.Generator[dict]): A list of dictionaries containing the search space.
"""
for param_name, param_space in hyper_parameters:
additions = []
for comb_dict in search_space:
for param in param_space:
additions.append({**comb_dict, **{param_name: param}})
search_space = additions
return search_space
|
zeeraktalat/mlearn
|
mlearn/utils/pipeline.py
|
pipeline.py
|
py
| 2,898 |
python
|
en
|
code
| 2 |
github-code
|
6
|
74377247228
|
'''
@Author: Never
@Date: 2020-06-13 11:02:05
@Description:
@LastEditTime: 2020-07-14 15:20:19
@LastEditors: Never
'''
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/4/27 19:47
# @Author : Shark
# @Site :
# @File : lepin1.py
# @Software: PyCharm
import csv
import requests
import json
import random
import time
start =time.time()
print('程序开始时间:%s'%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(start))))
WinStatuslist=[]
i=0
with open('C:\\Users\\lhx\\Desktop\\user1.csv','rt') as myfile:
lines=csv.reader(myfile)
for memberid,addid in lines:
with open(r'\\192.168.0.200\shop.h5\MemberId.json', 'w') as m:
m.write(memberid)
t=0
while t<20:
date={
"productid":216869,
"lpTimes":1,
"addressid":addid,
"isPay":'true',
"useBalance":10.9,
}
url = "http://192.168.0.200:818/order/ActivityOrderConfirm"
response = requests.post(url,data=date)
text=response.text
jsonobj=json.loads(text)
if jsonobj['success']==200:
totext=jsonobj['data']['OrderIdList']
url="http://192.168.0.200:818/HappyOrder/ForthWithOrder"
data={"happyOrderId":totext,
"chooseNumber":random.randint(0,9)}
response=requests.post(url,data=data)
text=response.text
jsonobj=json.loads(text)
totext=jsonobj['data']['WinStatus']
WinStatuslist.append(totext)
else:
print(jsonobj)
break
t+=1
# time.sleep(1)
i+=1
print(i)
m=0
print(WinStatuslist)
for j in WinStatuslist:
if j==1:
m+=1
i=20*i
print("订单数:%s"%i)
print("中奖次数:%s"%m)
s=m/i*100
print('中奖概率:{:.2f}%'.format(s))
end =time.time()
print('程序结束时间:%s'%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(end))))
print("循环运行时间:%.2f秒"%(end-start))
|
gitxzq/py
|
lepin1.py
|
lepin1.py
|
py
| 2,131 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27986132061
|
# dataset settings
dataset_type = 'DIORVOCDataset'
data_root = 'data/DIOR_VOC/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(800, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(800, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=[
data_root + 'ImageSets/Main/trainval.txt'
],
img_prefix=[data_root],
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'ImageSets/Main/test.txt',
img_prefix=data_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'ImageSets/Main/test.txt',
img_prefix=data_root,
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='mAP')
cat_name_list = ['ship', 'overpass', 'tenniscourt', 'stadium', 'vehicle',
'airplane', 'storagetank', 'dam', 'golffield', 'trainstation',
'Expressway-Service-area', 'groundtrackfield', 'Expressway-toll-station',
'windmill', 'airport', 'harbor', 'baseballfield',
'basketballcourt', 'bridge', 'chimney']
num_classes = len(cat_name_list) # 20
max_bbox_per_img = 600
|
cenchaojun/mmd_rs
|
DOTA_configs/_base_/datasets/DIOR_VOC.py
|
DIOR_VOC.py
|
py
| 2,196 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19274830613
|
#!/usr/bin/env python
'''
Created on Jun 28, 2016
@author: isvoboda
'''
from __future__ import print_function
import sys
import multiprocessing
import logging
import yaml
import argparse
from collections import OrderedDict
import cnn_image_processing as ci
import signal
signal.signal(signal.SIGINT, lambda x, y: sys.exit(1))
LOGGER = logging.getLogger("cnn_image_processing")
def parse_phase(conf):
"""
Parse net phase Train/Test
"""
dmodules = {}
creator = ci.Creator
pque_size = 5
if 'provider_queue_size' in conf:
pque_size = conf['provider_queue_size']
sque_size = 512
if 'sample_queue_size' in conf:
sque_size = conf['sample_queue_size']
dmodules['pque'] = multiprocessing.Queue(pque_size)
dmodules['sque'] = multiprocessing.Queue(sque_size)
if 'Provider' in conf:
dmodules['provider'] = creator.create_provider(conf['Provider'])
dmodules['provider'].out_queue = dmodules['pque']
else:
dmodules['provider'] = None
# train_provider.file_list = train_list
if 'Sampler' in conf:
dmodules['sampler'] = creator.create_sampler(conf['Sampler'])
dmodules['sampler'].in_queue = dmodules['pque']
dmodules['sampler'].out_queue = dmodules['sque']
else:
dmodules['sampler'] = None
return dmodules
def parse_config(conf=None):
"""
Parse the train_cnn application configuration
"""
creator = ci.Creator
app = {}
app['Train'] = parse_phase(conf['Train'])
app['Train']['provider'].out_queue = app['Train']['pque']
app['Train']['sampler'].in_queue = app['Train']['pque']
app['Train']['sampler'].out_queue = app['Train']['sque']
in_ques = []
if 'Test' in conf:
test_nets = OrderedDict()
test_net_list = [test_net.keys()[0] for test_net in conf['Test']]
test_net_list.sort()
for i_key, net_key in enumerate(test_net_list):
test_nets[net_key] = parse_phase(conf['Test'][i_key][net_key])
if test_nets[net_key]['provider'] == None:
tprovider = creator.create_provider(conf['Train']['Provider'])
tprovider.out_queue = test_nets[net_key]['pque']
test_nets[net_key]['provider'] = tprovider
if test_nets[net_key]['sampler'] == None:
tsampler = creator.create_sampler(['Train']['Sampler'])
tsampler.in_queue = test_nets[net_key]['pque']
tsampler.out_queue = test_nets[net_key]['sque']
test_nets[net_key]['sampler'] = tsampler
in_ques.append(test_nets[net_key]['sque'])
app['Test'] = test_nets
app['Trainer'] = creator.create_trainer(conf['Trainer'])
app['Trainer'].train_in_queue = app['Train']['sque']
app['Trainer'].test_in_queue = in_ques
return app
def main():
'''
Entry point
Args:
argv: list of command line arguments.
'''
parser = argparse.ArgumentParser(description="Train the cnn")
parser.add_argument("-c", "--conf-file", action='store', type=str,
choices=None, required=True, help="Configuration file",
metavar=None, dest='conf_file')
parser.add_argument("-s", "--solver-file", action='store', type=str,
choices=None, required=True, help="Solver file",
metavar=None, dest='solver_file')
parser.add_argument("-v", "--verbose", action="store_true", required=False,
help="Set the verbose mode.", dest='verbose')
parser.add_argument("-tr", "--train-list", action='store', type=str,
help="Training file list", required=True,
dest='train_list')
parser.add_argument("-te", "--test-lists", action='store',
nargs='*', type=str, default=None,
required=False, dest='test_lists',
help="Training file lists")
args = parser.parse_args()
# Print the arguments
for key, val in vars(args).iteritems():
print("{}: {}".format(key, val))
# Initialize logging
if args.verbose:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.INFO)
logging.basicConfig()
config_file = args.conf_file
solver_file = args.solver_file
train_list = args.train_list
test_lists = args.test_lists
# Open, parse and print the configuration file
with open(config_file) as cf_file:
conf = yaml.safe_load(cf_file)
print (yaml.dump(conf))
app = parse_config(conf)
app['Train']['provider'].file_list = train_list
app['Train']['provider'].start()
app['Train']['sampler'].start()
if test_lists is not None:
assert len(test_lists) == len(app['Test'])
for i_test, test_k in enumerate(app['Test']):
app['Test'][test_k]['provider'].file_list = test_lists[i_test]
app['Test'][test_k]['provider'].start()
app['Test'][test_k]['sampler'].start()
app['Trainer'].solver_file = solver_file
app['Trainer'].start()
app['Trainer'].join()
if __name__ == "__main__":
main()
|
DCGM/cnn-image-processing
|
bin/train_cnn.py
|
train_cnn.py
|
py
| 5,210 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8773605987
|
import streamlit as st
from utils import get_modelpaths
from Scripts.video_processor import webcam_input
def main():
model_list = ["AnimeGANv2_Hayao","AnimeGANv2_Shinka","AnimeGANv2_Paprika"]
st.title("Real-time Anime to Anime Converter")
model_name = st.selectbox("Select model name", model_list)
model_path = get_modelpaths(model_name)
webcam_input(model_path)
if __name__ == "__main__":
main()
|
avhishekpandey/RealTime_video-to-anime
|
app.py
|
app.py
|
py
| 427 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19203747427
|
try:
import usocket as socket
except:
import socket
import re
from tof_i2c import TOF10120
tof=TOF10120()
import Site
import network
from machine import Pin
from neopixel import NeoPixel
import time
import math
from Site import WebPage
from Distance import DistanceMethods
from Sides import SidesMethods
from BrakeDrive import BrakeDriveMethods
DS = DistanceMethods()
SM = SidesMethods()
while True:
conn, addr = s.accept()
print('Got a connection from %s' % str(addr))
request = conn.recv(1024)
request = str(request)
print('Content = %s' % str(request))
LeftScenario1 = request.find('/?LeftScenario1')
LeftScenario2 = request.find('/?LeftScenario2')
LeftScenario3 = request.find('/?LeftScenario3')
LeftScenario4 = request.find('/?LeftScenario4')
LeftScenario5 = request.find('/?LeftScenario5')
RightScenario1 = request.find('/?RightScenario1')
RightScenario2 = request.find('/?RightScenario2')
RightScenario3 = request.find('/?RightScenario3')
RightScenario4 = request.find('/?RightScenario4')
RightScenario5 = request.find('/?RightScenario5')
ClosingScenario1 = request.find('/?ClosingScenario1')
ClosingScenario2 = request.find('/?ClosingScenario2')
ClosingScenario3 = request.find('/?ClosingScenario3')
ClosingScenario4 = request.find('/?ClosingScenario4')
BrakeScenario1 = request.find('/?BrakeScenario1')
BrakeScenario2 = request.find('/?BrakeScenario2')
BrakeScenario3 = request.find('/?BrakeScenario3')
DriveScenario1 = request.find('/?DriveScenario1')
DriveScenario2 = request.find('/?DriveScenario2')
DriveScenario3 = request.find('/?DriveScenario3')
OffSide = request.find('/?NeoOffSides')
OffColl = request.find('/?NeoOffColl')
ToggleBool = 1
toggleon = 0
if LeftScenario1 == 6:
SM.SideAnimationFullLEFT()
if LeftScenario2 == 6:
SM.SideAnimationPartLEFT()
if LeftScenario3 == 6:
SM.SideFullRotationLEFT()
if LeftScenario4 == 6:
SM.SideFadeLEFT()
if LeftScenario5 == 6:
SM.SideBlinkerLEFT()
if RightScenario1 == 6:
SM.SideAnimationFullRIGHT()
if RightScenario2 == 6:
SM.SideAnimationPartRIGHT()
if RightScenario3 == 6:
SM.SideFullRotationRIGHT()
if RightScenario4 == 6:
SM.SideFadeRIGHT()
if RightScenario5 == 6:
SM.SideBlinkerRIGHT()
if BrakeScenario1 == 6:
SM.BrakePulse()
if BrakeScenario2 == 6:
SM.BrakeBlink()
if BrakeScenario3 == 6:
SM.BrakeFade()
if DriveScenario1 == 6:
SM.DrivePulse()
if DriveScenario2 == 6:
SM.DriveBlink()
if DriveScenario3 == 6:
SM.DriveFade()
if ClosingScenario1 == 6:
x = 2000
while x >= 0:
DS.ClosingPulse(14, x, 1)
time.sleep_ms(100)
x = x - 100
if ClosingScenario2 == 6:
DS.ClosingStoplichtBlink()
if ClosingScenario3 == 6:
DS.ClosingRed()
if ClosingScenario4 == 6:
DS.ClosingSwipe()
if OffSide == 6:
SM.off()
print('off sides')
if OffColl == 6:
DS.off()
print('off Distance')
if toggleon == 6:
ToggleBool = 0
print('TOF On')
counter = 0
while ToggleBool == 0:
time.sleep_ms(100)
distance = tof.get_distance_filtered()
print(distance)
DS.Closing(10, distance, maxLeds)
counter = counter + 1
print('distance ' + str(distance))
print('tijd ' + str(counter))
if counter >= 100:
ToggleBool = 1
LC.off()
response = WebPage()
conn.send(response)
conn.close()
|
TomD14/HoReCaRobot
|
Horizontal Led strip User Test/main.py
|
main.py
|
py
| 3,920 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21672470765
|
#!/usr/bin/python
#coding:utf-8
"""
Author: Andy Tian
Contact: [email protected]
Software: PyCharm
Filename: get_heatMap_html.py
Time: 2019/2/21 10:51
"""
import requests
import re
def get_html():
'''
获取百度热力图demo的源代码
:return: h5代码
'''
url = "http://lbsyun.baidu.com/jsdemo/demo/c1_15.htm"
header = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6788.400 QQBrowser/10.3.2864.400",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"zh-CN,zh;q=0.9"
}
htmlstr = requests.get(url,headers=header)._content.decode()
htmlstr_formated = htmlstr.replace("\n",'').replace("\t",'')
return htmlstr_formated
def modify_html(htmlstr):
'''
根据项目需要,对demo中的参数进行修改
1)ak 修改为自己在百度中申请的密钥
2)container{height:500px;width:100%;} 地图打开时的大小.
3)points 热力图显示的坐标
4)new BMap.point 地图打开时显示的重点位置
5)heatmapOverlay = new BMapLib.HeatmapOverlay({"radius":20}) 热力显示半径
6)# heatmapOverlay.setDataSet({data:points,max:100}); 数据大小,超过max后显示颜色一致,根据实际数据,修改max大小
:param htmlstr:需要修改的h5代码
:return: 修改好的代码
'''
data = open("G:\Python\Project\Spider\scrapyProject\lianjia\lon_lat.json")
datastr = data.read()
htmlstr = htmlstr.replace("height:500px","height:80%").replace('{"radius":20}','{"radius":10}').replace("max:100","max:120000")
be_replaced_data = ",\n".join(re.findall(r'{"lng":.*"count":\d*}',htmlstr))
htmlstr_modified = htmlstr.replace(be_replaced_data,datastr)
return htmlstr_modified
def rewrite_html(str):
'''
h5代码写入文件
:param str: h5代码
:return: h5文档
'''
with open("heat.html","w",encoding="utf-8") as f:
f.write(str)
if __name__ == "__main__":
htmlstr = get_html()
htmlstr_modified = modify_html(htmlstr)
write_html(htmlstr_modified)
|
tianzheyiran/HeatMap
|
get_heatMap_html.py
|
get_heatMap_html.py
|
py
| 2,229 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74693961467
|
import torch
import time
import torch.nn.functional as F
def train(model, device, train_loader, optimizer, epoch): # 训练模型
model.train()
best_acc = 0.0
for batch_idx, (x1, x2, x3, y) in enumerate(train_loader):
start_time = time.time()
x1, x2, x3, y = x1.to(device), x2.to(device), x3.to(device), y.to(device)
out = model([x1, x2, x3]) # 得到预测结果
y_pred = out[0]
model.zero_grad() # 梯度清零
loss = F.cross_entropy(y_pred, y.squeeze()) # 得到loss
loss.backward()
optimizer.step()
if(batch_idx + 1) % 100 == 0: # 打印loss
print('Train Epoch: {} [{}/{} ({:.2f}%)]\t\tLoss: {:.6f}'.format(epoch, (batch_idx+1) * len(x1),
len(train_loader.dataset),
100. * (batch_idx+1) / len(train_loader),
loss.item())) # 记得为loss.item()
def test(model, device, test_loader): # 测试模型, 得到测试集评估结果
model.eval()
test_loss = 0.0
acc = 0
for batch_idx, (x1, x2, x3, y) in enumerate(test_loader):
x1, x2, x3, y = x1.to(device), x2.to(device), x3.to(device), y.to(device)
with torch.no_grad():
out = model([x1, x2, x3])
y_ = out[0]
test_loss += F.cross_entropy(y_, y.squeeze())
pred = y_.max(-1, keepdim=True)[1] # .max(): 2输出,分别为最大值和最大值的index
acc += pred.eq(y.view_as(pred)).sum().item() # 记得加item()
test_loss /= len(test_loader)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss, acc, len(test_loader.dataset),
100. * acc / len(test_loader.dataset)))
return acc / len(test_loader.dataset)
def test_lem(model, device, test_loader): # 测试模型, 得到测试集评估结果
model.eval()
input_embeddings, label_embeddings, labels = [], [], []
test_loss = 0.0
acc = 0
for batch_idx, (x1, x2, x3, y) in enumerate(test_loader):
x1, x2, x3, y = x1.to(device), x2.to(device), x3.to(device), y.to(device)
with torch.no_grad():
out = model([x1, x2, x3])
y_, V, C = out[0], out[1], out[2]
input_embeddings.append(V.cpu())
label_embeddings = C.cpu()
test_loss += F.cross_entropy(y_, y.squeeze())
pred = y_.max(-1, keepdim=True)[1] # .max(): 2输出,分别为最大值和最大值的index
labels.append(pred)
acc += pred.eq(y.view_as(pred)).sum().item() # 记得加item()
test_loss /= len(test_loader)
return acc / len(test_loader.dataset), input_embeddings, label_embeddings, labels
|
Huasheng-hou/r2-nlp
|
src/utils.py
|
utils.py
|
py
| 2,866 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27259799820
|
"""We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""257. Binary Tree Paths
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution:
def dfs(self, root, curr_path):
if not root:
return None
new_node = str(root.val)
if not root.left and not root.right:
self.paths.append(curr_path + new_node)
else:
self.dfs(root.left, curr_path + new_node + "->")
self.dfs(root.right, curr_path + new_node + "->")
def binaryTreePaths(self, root):
if not root:
return []
self.paths = []
self.dfs(root, "")
return self.paths
|
asperaa/back_to_grind
|
Trees/binary_tree_paths.py
|
binary_tree_paths.py
|
py
| 777 |
python
|
en
|
code
| 1 |
github-code
|
6
|
35347629144
|
import json
with open('mahasiswa.json', 'r') as file:
a = json.load(file)
b = dict()
c = int(input("Masukkan Jumkah Mahasiswa baru : "))
for i in range(c):
nm = input("Masukkan nama anda: ")
hb = []
untuk_hobi = int(input("Masukkan jumlah hobi: "))
for j in range(untuk_hobi):
hb1 = input("Masukkan hobi ke-{} : ".format(j+1))
hb.append(hb1)
per = input("Masukkan prestasi anda: ")
print("====Data Berhasil ditambahkan===")
print()
b [nm] = [{"Biodata": {"Hobi": hb, "Prestasti" : per}}]
a.update(b)
with open('mahasiswa.json', 'w') as file:
json.dump(a,file)
|
TIRSA30/strukdat_04_71210700
|
ug4.py
|
ug4.py
|
py
| 705 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28924320598
|
import os
from flask import Flask, request, abort, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import random
from sqlalchemy import func
from models import setup_db, Question, Category
QUESTIONS_PER_PAGE = 10
# Create APP and settings cors headers
def create_app(test_config=None):
app = Flask(__name__)
setup_db(app)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization,true')
response.headers.add('Access-Control-Allow-Methods',
'GET,PATCH,POST,DELETE,OPTIONS')
return response
# Paginate method
def paginate_questions(request, questions):
page = request.args.get('page', 1, type=int)
start = (page - 1) * QUESTIONS_PER_PAGE
end = start + QUESTIONS_PER_PAGE
questions = [question.format() for question in questions]
paginated_questions = questions[start:end]
return paginated_questions
# Questions API with pagination
@app.route('/api/questions', methods=['GET'])
def get_questions_with_pagination():
error_code = 422
try:
categories = Category.query.all()
questions = Question.query.all()
formatted_questions = paginate_questions(request, questions)
formatted_categories = [category.format()
for category in categories]
if len(formatted_categories) == 0 or len(formatted_questions) == 0:
error_code = 404
abort(error_code)
current_categories = []
for question in formatted_questions:
category = question['category']
if not (category in current_categories):
current_categories.append(category)
return jsonify({
'success': True,
'questions': formatted_questions,
'total_questions': len(questions),
'current_category': current_categories,
'categories': formatted_categories
})
except:
abort(error_code)
# Categories API
@app.route('/api/categories', methods=['GET'])
def get_categories():
try:
categories = Category.query.all()
formatted_categories = [category.format()
for category in categories]
if len(formatted_categories) == 0:
abort(404)
return jsonify({
'success': True,
'categories': formatted_categories,
'total_categories': len(formatted_categories)
})
except:
abort(422)
# Delete Question API
@app.route('/api/questions/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
question = Question.query.filter_by(id=question_id).first()
if question is None:
abort(404)
try:
question.delete()
return jsonify({
'success': True,
'question': question_id
})
except:
abort(405)
# Create Question API
@app.route('/api/questions/create', methods=['POST'])
def new_question():
try:
body = request.get_json()
new_question = body.get('question', None)
new_answer = body.get('answer', None)
new_category = body.get('category', None)
new_difficulty = body.get('difficulty', None)
question = Question(
question=new_question,
answer=new_answer,
category=new_category,
difficulty=new_difficulty)
question.insert()
return jsonify({
'success': True,
'created': question.id
})
except:
abort(422)
# Get Questions by Category API
@app.route(
'/api/category/<int:question_category>/questions',
methods=['GET']
)
def get_questions_by_categories(question_category):
error_code = 422
try:
questions = Question.query.filter(
question_category == Question.category).all()
formatted_questions = paginate_questions(request, questions)
if len(formatted_questions) == 0:
error_code = 404
abort(error_code)
current_categories = []
for question in formatted_questions:
category = question['category']
if not (category in current_categories):
current_categories.append(category)
return jsonify({
'success': True,
'questions': formatted_questions,
'total_questions': len(formatted_questions),
'current_categories': current_categories,
})
except:
abort(error_code)
# Get Question by Search Term API
@app.route('/api/questions/search', methods=['POST'])
def search_questions():
body = request.get_json()
search_term = body.get('searchTerm', None)
search = "%{}%".format(search_term.replace(" ", "\ "))
data = Question.query.filter(Question.question.ilike(search)).all()
formatted_questions = [question.format() for question in data]
if len(formatted_questions) == 0:
abort(404)
try:
current_categories = []
for question in formatted_questions:
category = question['category']
if not (category in current_categories):
current_categories.append(category)
return jsonify({
'success': True,
'questions': formatted_questions,
'totalQuestions': len(formatted_questions),
'current_categories': current_categories,
'search': search_term
})
except:
abort(422)
# Get Question to Play Quiz API
@app.route('/api/quizzes', methods=['POST'])
def post_quiz_questions():
code = 422
try:
request_quiz = request.get_json()
previous_questions = request_quiz.get('previous_questions')
quiz_category = request_quiz.get('quiz_category')
question = Question.query
question = question.filter(~Question.id.in_(previous_questions))
if quiz_category != 0:
question = question.filter(Question.category == quiz_category)
questions_random = question.order_by(func.random()).first()
if not questions_random:
return(jsonify({
'success': True,
'previous_question': len(previous_questions)
}))
return jsonify({
'success': True,
'question': questions_random.format(),
'previous_question': previous_questions
})
except:
abort(code)
@app.errorhandler(404)
def not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "bad request"
}), 400
@app.errorhandler(405)
def method_not_allowed(error):
return jsonify({
"success": False,
"error": 405,
"message": "Method Not Allowed"
}), 405
return app
|
steffaru/udacity-trivia-api-project
|
starter/backend/flaskr/__init__.py
|
__init__.py
|
py
| 8,097 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20914243110
|
"""added columns to Places
Revision ID: cba44d27f422
Revises: 061ea741f852
Create Date: 2023-06-28 15:56:11.475592
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cba44d27f422'
down_revision = '061ea741f852'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('places', schema=None) as batch_op:
batch_op.add_column(sa.Column('website', sa.String(), nullable=True))
batch_op.add_column(sa.Column('photo', sa.String(), nullable=True))
batch_op.add_column(sa.Column('price_level', sa.Integer(), nullable=True))
batch_op.add_column(sa.Column('user_ratings_total', sa.Integer(), nullable=True))
batch_op.add_column(sa.Column('rating', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('places', schema=None) as batch_op:
batch_op.drop_column('rating')
batch_op.drop_column('user_ratings_total')
batch_op.drop_column('price_level')
batch_op.drop_column('photo')
batch_op.drop_column('website')
# ### end Alembic commands ###
|
choihalim/halfway
|
server/migrations/versions/cba44d27f422_added_columns_to_places.py
|
cba44d27f422_added_columns_to_places.py
|
py
| 1,294 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16816563467
|
import json
import requests
from django.http import JsonResponse
from django.shortcuts import render
import numpy as np
# Create your views here.
from django.template.defaultfilters import upper
from django.template.loader import render_to_string
from apps.utils.cases import get_scenario_on_day
from apps.utils.date_adjustment import date_adjustment
def home(request):
countries = requests.get('https://corona.lmao.ninja/countries').json()
url_parameter = request.GET.get("q")
if url_parameter != None:
countries = [ct for ct in countries if upper(url_parameter) in upper(ct['country'])]
if request.is_ajax():
html = render_to_string(
template_name="countries-results-partial.html",
context={"dados": countries}
)
data_dict = {"html_from_view": html}
return JsonResponse(data=data_dict, safe=False)
return render(request, 'home.html', {'dados': countries})
def historico(request):
countries = requests.get('https://corona.lmao.ninja/countries').json()
if request.is_ajax():
context = {}
selected_country = request.GET.get('sortBy')
historic = requests.get(f'https://corona.lmao.ninja/v2/historical/{selected_country}').json()
context['dates'] = historic['timeline']['cases']
context['cases'] = list(context['dates'].values())
context['casesOnDay'] = get_scenario_on_day(context['cases'])
context['deaths'] = list(historic['timeline']['deaths'].values())
context['deathsOnDay'] = get_scenario_on_day(context['deaths'])
context['historic'] = historic
context['adjusted_dates'] = [date_adjustment(date) for date in historic['timeline']['cases'].keys()]
html = render_to_string(
template_name="countries-historical-partial.html", context=context
)
data_dict = {"html_from_view": html}
valores = [{'name': context['adjusted_dates'][i], 'y': context['deathsOnDay'][i]}
for i in range(len(context['dates']))]
chart = {
'chart': {'type': 'column'},
'title': {'text': 'Impacto de Mortes por Corona'},
'series': [{
'name': 'Número de vítimas',
'data': valores
}],
'xAxis': {
'categories': context['adjusted_dates']
}
}
data_dict['html_to_chart'] = chart
return JsonResponse(data=data_dict, safe=False)
return render(request, 'historic.html', {'countries': countries})
|
Akijunior/corona-relatorio
|
src/apps/core/views.py
|
views.py
|
py
| 2,580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42951183100
|
class Address:
def __init__(self, obj):
"""
See "https://smartystreets.com/docs/cloud/us-reverse-geo-api#address"
"""
self.street = obj.get('street', None)
self.city = obj.get('city', None)
self.state_abbreviation = obj.get('state_abbreviation', None)
self.zipcode = obj.get('zipcode', None)
self.source = obj.get('source', None)
|
smartystreets/smartystreets-python-sdk
|
smartystreets_python_sdk/us_reverse_geo/address.py
|
address.py
|
py
| 398 |
python
|
en
|
code
| 25 |
github-code
|
6
|
36014041676
|
import torch.nn as nn
import tqdm
import torch
class ANN(nn.Module):
def __init__(self, input=4):
super().__init__()
# self.relu1 = nn.ReLU(inplace=True)
self.liner1 = nn.Linear(input,128)
self.relu = nn.ReLU()
self.liner2 = nn.Linear(128,8)
self.liner3 = nn.Linear(8,4)
# self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.relu(self.liner1(x))
out = self.relu(self.liner2(out))
out = self.relu(self.liner3(out))
return out
|
infinity-linh/Bot_Inf
|
scripts/model_ANN.py
|
model_ANN.py
|
py
| 539 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74525063866
|
import argparse
from datetime import datetime
import os
import sys
import time
import random
from Classifier_3d_v1 import Classifier
import tensorflow as tf
from util import Visualizer
import numpy as np
from dataset_classifier import LungDataset
import torch
from ops import load,save,pixelwise_cross_entropy
import torchnet as tnt
from torch.utils.data import DataLoader
#restore_from='./models'
restore_from=None
models_path='./models'
logs='./logs'
luna="/home/x/dcsb/data/TianChi/"
luna_data="/home/x/data/datasets/tianchi/train/"
batch_size = 1
max_run = 1000
epoch_print = 100
iters=0
vis = Visualizer()
def main():
vis.vis.texts=''
dice_loss_meter =tnt.meter.AverageValueMeter()
image_batch=tf.placeholder(tf.float32, shape=[None, 48, 48,48, 1])
label_batch=tf.placeholder(tf.float32, shape=[None,2])
net=Classifier({'data': image_batch},batch_size=batch_size)
prob = net.layers['result']
logits=net.layers['logits']
dataset=LungDataset("/home/x/dcsb/data/TianChi",augument=True)
all_trainable =tf.trainable_variables()
restore_var = tf.global_variables()
cross_loss = tf.losses.softmax_cross_entropy(label_batch,logits)
global iters
cross_loss_sum=tf.summary.scalar("crossloss",cross_loss)
# accuracy=tf.metrics.accuracy(label_batch,prob)
optimiser = tf.train.MomentumOptimizer(0.01,0.99)
gradients = tf.gradients(cross_loss, all_trainable)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,1.)
train_op = optimiser.apply_gradients(zip(clipped_gradients, all_trainable))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
all_sum=tf.summary.merge([cross_loss_sum])
summary_writer = tf.summary.FileWriter(logs,graph=tf.get_default_graph())
saver = tf.train.Saver(var_list=restore_var, max_to_keep=40)
# Load variables if the checkpoint is provided.
if restore_from is not None:
loader = tf.train.Saver(var_list=restore_var)
load(loader, sess, restore_from,"classifier_v2")
for i in range(max_run):
dice_loss_meter.reset()
start_time = time.time()
labels=np.array([1,0])
labels=labels[np.newaxis,:]
pred=np.array([1,0])
pred=pred[np.newaxis,:]
train_loader = DataLoader(dataset,batch_size = batch_size,shuffle = True,num_workers = 1,pin_memory=True,drop_last=True)
for batch_idx, (img_, label_,_) in enumerate(train_loader):
iters+=1
img=img_.numpy()
label=label_.numpy()
labels=np.concatenate([labels,label],axis=0)
img=img.transpose([0,2,3,4,1])
feed_dict={image_batch:img,label_batch:label}
_,cross_loss_,probs,summary=sess.run([train_op,cross_loss,prob,all_sum],feed_dict=feed_dict)
summary_writer.add_summary(summary, iters)
pred=np.concatenate([pred,probs],axis=0)
# print "prob+:",probs[:,0]
vis.plot('accuracy',np.mean(np.argmax(labels,axis=1)==np.argmax(pred,axis=1)))
dice_loss_meter.add(cross_loss_)
if batch_idx>10:
try:
vis.plot('cross_loss',dice_loss_meter.value()[0])
except:
pass
vis.img('input',img_[0,0,24,:,:].cpu().float())
if iters%50==0:
pred_=np.argmax(pred,axis=1)
label_=np.argmax(labels,axis=1)
acc=np.mean(label_==pred_)
cross=cross_loss.eval(feed_dict,session=sess)
print("Epoch: [%2d] [%4d] ,time: %4.4f,cross_loss:%.8f,accuracy:%.8f"% \
(i,batch_idx,time.time() - start_time,cross,acc))
if i%2==0:
save(saver,sess,models_path,iters,"classifier_v2",train_tag="nodule_predict")
main()
|
jimmyyfeng/Tianchi-1
|
Tianchi_tensorflow/train_classifier.py
|
train_classifier.py
|
py
| 3,980 |
python
|
en
|
code
| 5 |
github-code
|
6
|
24890875535
|
#!/bin/env python
# -*- coding: UTF-8 -*-
import wx
import os
import sys
import shutil
import re
import math
from bqList import MyBibleList
from exhtml import exHtmlWindow
class MyApp(wx.App):
path = None
def __init__(self, *args, **kwds):
wx.App.__init__ (self, *args, **kwds)
def OnInit(self):
self.path = os.path.realpath(os.path.dirname(sys.argv[0]))
#self.path = '/home/noah/Files/Soft-Win/BibleQuote'
self.SetAppName('BQTlite')
self.SetClassName('BQT reader lite')
frame = MyFrame("BQT reader lite", (150, 72), (667, 740))
frame.Show()
self.SetTopWindow(frame)
return True
class MyFrame(wx.Frame):
path = ''
strongs = False
page = None
findPanel = None
sizer = None
bibles = None
activeModule = None
compareModule = None
buttonz = {}
searchField = None
strongsOn = False
currentBook = -1
currentChapter = -1
fullScreen = False
def buttonData(self):
return (("RST", self.OnModule, 'module', 130, 'Module', ''),
("Genesis", self.OnBook, 'book', 200, 'Book', ''),
("<", self.PrevChapter, None, 20, 'Previous chapter', ''),
("1", self.OnChapter, 'chapter', 40, 'Chapter', ''),
(">", self.NextChapter, None, 20, 'Next chapter', ''),
("#", self.ToggleStrongs, 'strongs', 30, 'Toggle Strong numbers', ''),
("H", self.OnHistory, 'history', 30, 'History', 'HistoryButton.bmp'),
("S", self.OnFind, 'find', 30, 'Search', 'SearchButton.bmp'),
("-Compare-", self.OnCompare, 'compare', 130, 'Compare translation with...', ''),
('F', self.ToggleFullScreen, None, 30, 'Toggle fullscreen', 'FullScreen.bmp'))
def createButtonBar(self, panel, yPos = 0):
xPos = 0
height = 0
for eachLabel, eachHandler, eachName, eachWidth, eachHint, eachPic in self.buttonData():
pos = (xPos, yPos)
button = self.buildOneButton(panel, eachLabel, eachHandler, pos, eachHint, eachPic, height)
if(eachName):
self.buttonz[eachName] = button
if(eachWidth):
button.SetSize((eachWidth, -1))
xPos += button.GetSize().width
if(button.GetSize().height>height):
height=button.GetSize().height
return height
def buildOneButton(self, parent, label, handler, position=(0,0), hint='', img='', height=0):
if(img and os.path.exists(self.path + '/GLYPHS/' + img)):
image1 = wx.Image(self.path + '/GLYPHS/' + img,\
wx.BITMAP_TYPE_ANY).ConvertToBitmap()
button = wx.BitmapButton(parent, id=-1, bitmap=image1,
pos=position, size = (height, height))
elif(img and os.path.exists(self.path + '/help/buttons/' + img)):
image1 = wx.Image(self.path + '/help/buttons/' + img,\
wx.BITMAP_TYPE_ANY).ConvertToBitmap()
button = wx.BitmapButton(parent, id=-1, bitmap=image1,
pos=position, size = (height, height))
else:
button = wx.Button(parent, -1, label, position)
self.Bind(wx.EVT_BUTTON, handler, button)
if(hint):
button.SetToolTip(wx.ToolTip(hint))
return button
def createTabs(self):
# create notebook
notebook = wx.Notebook( self, -1, (0,40), (500,500))
# create pages
ctrl = wx.Panel( notebook, -1 )
# add pages
notebook.AddPage( wx.TextCtrl( notebook, -1 ), "Page 1", False, -1 )
notebook.AddPage( ctrl, "Page 2 Will be Selected", True, -1 )
self.page = wx.html.HtmlWindow(ctrl, -1, (0,0), (200, 200))
return notebook
def __init__(self, title, pos, size):
self.path = os.path.realpath(os.path.dirname(sys.argv[0]))
#self.path = '/home/noah/Files/Soft-Win/BibleQuote'
self.bibles = MyBibleList()
wx.Frame.__init__(self, None, -1, title, pos, size)
#self.createMenuBar()
self.panel = wx.Panel(self, -1)
self.panel.SetBackgroundColour("Yellow")
height = self.createButtonBar(self.panel)
#notebook = self.createTabs()
self.page = exHtmlWindow(self, -1, (0,0), (100,100))
self.page.SetLinkClicked(self.OnLinkClicked)
self.findPanel = wx.Panel(self, -1)
self.searchField = wx.TextCtrl(self.findPanel, -1, '', (0,0))
self.findButton = wx.Button(self.findPanel, -1, 'Find', (100,0))
self.Bind(wx.EVT_BUTTON, self.OnSearchStart, self.findButton)
self.findPanel.Hide()
self.CreateStatusBar()
self.SetStatusText("Ready")
self.strongs = False
self.bibles.loadList(self.path)
self.__do_layout()
if(len(self.bibles.history)>0):
history0 = self.bibles.history[0]
self.bibleGo(history0['command'][3:])
else:
self.OnModule(None)
self.Bind (wx.EVT_CLOSE, self.OnClose)
favicon = wx.Icon(self.path + '/favicon.ico', wx.BITMAP_TYPE_ICO, 16, 16)
self.SetIcon(favicon)
import gobject
gobject.threads_init()
import pygtk
pygtk.require('2.0')
import gtk, gtk.gdk
self.taskBarIcon = favicon
def __do_layout(self):
self.sizer = wx.FlexGridSizer(3, 1, 0, 0)
self.sizer.Add(self.panel, 1, flag = wx.EXPAND)
self.sizer.Add(self.findPanel, 2, flag = wx.EXPAND)
self.sizer.Add(self.page, 3, flag = wx.EXPAND)
self.sizer.AddGrowableRow(2)
self.sizer.AddGrowableCol(0)
self.SetSizer(self.sizer)
searchSizer = wx.FlexGridSizer(1, 2, 0, 0)
searchSizer.Add(self.searchField, 1, flag = wx.EXPAND)
searchSizer.Add(self.findButton, 2, flag = wx.EXPAND)
searchSizer.AddGrowableCol(0)
self.findPanel.SetSizer(searchSizer)
self.Layout()
def arrangeControls(self):
if(self.page.getMode()=='search'):
self.findPanel.Show()
else:
self.findPanel.Hide()
if(not self.activeModule or not self.activeModule.Bible \
or not self.activeModule.StrongNumbers):
self.strongsOn = False
if(self.strongsOn):
self.buttonz['strongs'].SetForegroundColour('Green')
else:
self.buttonz['strongs'].SetForegroundColour('Black')
if(self.activeModule):
self.buttonz['module'].SetLabel(self.activeModule.BibleShortName)
self.buttonz['book'].SetLabel(self.activeModule.FullName[self.currentBook])
self.buttonz['chapter'].SetLabel(str(self.currentChapter))
statusText = self.activeModule.BibleName
else:
self.buttonz['module'].SetLabel('')
self.buttonz['book'].SetLabel('')
self.buttonz['chapter'].SetLabel('')
statusText = 'Select a module'
if(self.compareModule):
statusText = statusText + ' | ' + self.compareModule.BibleName
self.buttonz['compare'].SetLabel(self.compareModule.BibleShortName)
else:
self.buttonz['compare'].SetLabel('-Compare-')
statusText = statusText + ' | ' + 'Mode: ' +self.page.getMode()
if(self.page.ctrlDown):
statusText = statusText + ' [Ctrl]'
self.SetStatusText(statusText)
self.Layout()
def OnCopy(self, event):
self.page.OnCopy(event)
event.Skip()
def OnOptions(self, event): pass
def OnQuit(self, event):
self.Close()
def OnClose(self, event):
try:
self.bibles.saveHistory()
except:
pass
self.Destroy()
def OnAbout(self, event):
wx.MessageBox("BQT reader light (very light)\nWritten by Noah for the sake of learning Python.",
"BQT reader light", wx.OK | wx.ICON_INFORMATION, self)
def OnLinkClicked(self, link):
tmpRe = re.search('^([^:]+):(.*)$', link.GetHref())
if(tmpRe):
if(tmpRe.groups()[0]=='module'):
path = tmpRe.groups()[1]
if(self.activeModule and self.activeModule.path == path):
self.ShowChapter(self.currentChapter)
else:
oldModule = self.activeModule
self.activeModule = self.bibles.getModule(path)
self.activeModule.loadModule()
if(oldModule and oldModule.Bible and self.activeModule.Bible):
#wx.MessageBox('[0]', "Module", wx.ICON_ERROR | wx.OK)
newBookInd = self.activeModule.getOrderNumber(oldModule.getAbsoluteIndex(self.currentBook))
#wx.MessageBox('[1]', "Module", wx.ICON_ERROR | wx.OK)
if(newBookInd>=0):
#wx.MessageBox('[2] book:'+str(newBookInd), "Module", wx.ICON_ERROR | wx.OK)
if(self.activeModule.loadBook(newBookInd)):
self.currentBook = newBookInd
self.ShowChapter(self.currentChapter)
#wx.MessageBox('[3]', "Module", wx.ICON_ERROR | wx.OK)
else:
pass
#wx.MessageBox('Could not find the book', "Module", wx.ICON_ERROR | wx.OK)
else:
self.ChooseBook(path)
elif(tmpRe.groups()[0]=='book'):
book = int(tmpRe.groups()[1])
self.activeModule.loadBook(book)
self.buttonz['book'].SetLabel(self.activeModule.FullName[book])
self.ChooseChapter(book)
elif(tmpRe.groups()[0]=='chapter'):
chapter = int(tmpRe.groups()[1])
self.ShowChapter(chapter)
self.buttonz['chapter'].SetLabel(str(chapter))
elif(tmpRe.groups()[0]=='strong'):
number = tmpRe.groups()[1]
self.ShowStrong(number)
elif(tmpRe.groups()[0]=='go'):
self.bibleGo(tmpRe.groups()[1])
elif(tmpRe.groups()[0]=='searchpage'):
page = int(tmpRe.groups()[1])
self.ShowSearchPage(page)
elif(tmpRe.groups()[0]=='compare'):
path = tmpRe.groups()[1]
if(self.activeModule.path == path):
path = ''
self.OnCompareChoise(path)
else:
self.page.OutputHTML('Unknown command:', link.GetHref(), 'error')
self.arrangeControls()
def OnModule(self, event):
title = 'Choose a module:'
return self.ShowModuleList(title, 'module', True, False)
def ShowModuleList(self, title, mode, showOthers, showNothing):
if(self.page.getMode()==mode):
self.ShowChapter(self.currentChapter)
return
self.page.saveScrollPos()
modList = self.bibles.getBibleList()
content = ''
if(showNothing):
content = content + '<a href="' + mode + ':">Unselect</a>'
if(len(modList)):
content = content + '<h2>Bibles:</h2>' + self.ProcessList(modList, mode)
modList = self.bibles.getCommentaryList()
if(len(modList)):
content = content + '<h2>Commentaries:</h2>' + self.ProcessList(modList, mode)
if(showOthers):
modList = self.bibles.getOtherList()
if(len(modList)):
content = content + '<h2>Other books:</h2>' + self.ProcessList(modList, mode)
if(content == ''):
title = 'Could not find modules'
self.page.OutputHTML(title, content, mode)
self.arrangeControls()
def ProcessList(self, modList, mode):
content = '<ul>'
for mod in modList:
label = mod.BibleName
link = mode+ ':' + mod.path
content = content + '<li> <a href="' + link + '">' + label + '</a>'
content = content + '</ul>'
return content
def ChooseBook(self, path):
if(not self.activeModule): return
self.page.setPath(path)
if(self.activeModule.BookQty>1):
content = '<table><tr><td valign=top><ul>'
cnt = int((len(self.activeModule.FullName)-1)/3)+1
for i in range(len(self.activeModule.FullName)):
content = content + '<li><a href="book:' + str(i) + '">' + \
self.activeModule.FullName[i] + '</a>'
if(i+1==cnt or i+1==cnt+cnt):
content = content + '</ul></td><td valign=top><ul>'
content = content + '</ul></td></tr></table>'
self.page.OutputHTML('', content, 'book')
self.arrangeControls()
else:
self.activeModule.loadBook(0)
self.currentBook = 0
self.ChooseChapter(0)
return
def ChooseChapter(self, book):
if(not self.activeModule): return
self.currentBook = int(book)
content = ''
chRange = self.activeModule.getChapterRange(book)
if(len(chRange)>1):
for i in chRange:
content = content + '<a href="chapter:' + str(i) + \
'"><font size="7"> ' + str(i) + \
' </font></a> '
self.page.OutputHTML('', content, 'chapter')
self.arrangeControls()
else:
self.ShowChapter(chRange[0])
return
def transformContent(self, text, strongPrfx, module):
if(module.StrongNumbers):
if(self.strongsOn):
text = re.sub(' ([0-9]{1,5})', ' <a href="strong:' + strongPrfx +\
'\\1"><small>\\1</small></a>', text)
else:
text = re.sub(' [0-9]{1,5}', '', text)
#text = text.replace('<','<br>[[').replace('>',']]<br>')
text = re.sub('<p( [^>]*)?>', '', text)
text = text.replace('</p>','<br>')
return text
def ShowChapter(self, chapter):
if(not self.activeModule): return
self.currentChapter = int(chapter)
content = self.activeModule.getChapter(chapter)
prfx = ''
if(self.activeModule.isOT(self.currentBook)): prfx = '0'
content = self.transformContent(content, prfx, self.activeModule)
absInd = self.activeModule.getAbsoluteIndex(self.currentBook)
newBookInd = self.activeModule.getOrderNumber(absInd)
title = ''
if(self.compareModule):
self.compareModule.loadModule()
newBookInd = self.compareModule.getOrderNumber(absInd)
if(newBookInd>=0 and self.compareModule.loadBook(newBookInd)):
content2 = self.compareModule.getChapter(chapter)
if(content2):
content2 = self.transformContent(content2, prfx, self.compareModule)
prc = int(len(content)*100./(len(content)+len(content2)))
content = '<table><tr><td width='+str(prc)+'% valign=top>' + content + '</td>' +\
'<td width='+str(100-prc)+'% valign=top>' + content2 + '</td></tr></table>'
self.page.OutputHTML('', content, 'text')
self.page.restoreScrollPos()
chzero = 0
if(self.activeModule.ChapterZero): chzero = 1
command = os.path.basename(self.activeModule.path).lower()\
+ ' ' + str(self.currentBook + 1)\
+ ' ' + str(self.currentChapter + chzero)
title = self.activeModule.BibleShortName\
+ ' ' + self.activeModule.ShortName[self.currentBook][0]\
+ ' ' + str(self.currentChapter)
self.bibles.pushHistory(command, title)
self.arrangeControls()
self.page.SetFocus()
def OnBook(self, event):
if(not self.activeModule): return
if(self.page.getMode()=='book'):
self.ShowChapter(self.currentChapter)
return
self.ChooseBook(self.activeModule.path)
def OnChapter(self, event):
if(not self.activeModule): return
if(self.page.getMode()=='chapter'):
self.ShowChapter(self.currentChapter)
return
self.page.setMode('chapter')
self.ChooseChapter(self.currentBook)
def PrevChapter(self, event):
if(not self.activeModule): return
if(self.page.getMode()!='text'): return
ch = self.activeModule.getPrevChapter(self.currentBook, self.currentChapter)
if(ch):
self.activeModule.loadBook(ch[0])
self.currentBook = ch[0]
self.ShowChapter(ch[1])
self.page.clearScrollPos()
self.arrangeControls()
def NextChapter(self, event):
if(not self.activeModule): return
if(self.page.getMode()!='text'): return
ch = self.activeModule.getNextChapter(self.currentBook, self.currentChapter)
if(ch):
self.activeModule.loadBook(ch[0])
self.currentBook = ch[0]
self.ShowChapter(ch[1])
self.page.clearScrollPos()
self.arrangeControls()
def ToggleStrongs(self, event):
if(not self.activeModule): return
if(1 or self.page.getMode()!='strong'):
#if(not self.page.getMode() in ('text','strong')): return
if(self.strongsOn):
self.strongsOn = False
else:
self.strongsOn = True
self.ShowChapter(self.currentChapter)
self.arrangeControls()
def ShowStrong(self, number):
isHeb = False
if(number[0]=='0'):
isHeb = True
number = number[1:]
if(number[0]=='0'):
number = number[1:]
number = int(number)
res = self.bibles.getStrongText(number, isHeb)
title = res[0]
content = res[1]
self.page.OutputHTML(title, content, 'strong')
self.arrangeControls()
def OnHistory(self, event):
if(self.page.getMode()=='history'):
self.ShowChapter(self.currentChapter)
return
content = ''
for item in self.bibles.history:
content = content + '<a href="go:' + item['command'][3:] +'">' + item['title'] + '</a><br>'
title = 'History:'
self.page.OutputHTML(title, content, 'history')
self.arrangeControls()
def OnFind(self, event):
if(not self.activeModule): return
self.page.setMode('search')
if(self.findPanel.IsShown()):
self.ShowChapter(self.currentChapter)
else:
self.ShowSearchPage(1)
self.arrangeControls()
def OnSearchStart(self, event):
if(not self.activeModule or self.searchField.GetValue()==''): return
self.activeModule.search(self.searchField.GetValue(), [])
self.ShowSearchPage(1)
def ShowSearchPage(self, page):
pageSize = 20
searchCount = self.activeModule.searchCount()
found = self.activeModule.getSearchPage(page, pageSize)
title = str(searchCount) + ' results'
content = ''
for i in range(len(found)):
content = content + '<hr><a href="go:- ' + \
str(found[i][0]) + ' ' + \
str(found[i][1]) + ' ' + \
str(found[i][2]) + '">' + \
found[i][3] + '</a> ' + \
self.transformContent(found[i][4], '', self.activeModule)
pageCount = int(searchCount / pageSize) + 1
content = content + '<hr>'
for i in range(1, pageCount+1):
if(i == page):
content = content + ' <FONT size="+2">' + str(i) + '</FONT> '
else:
content = content + ' <a href="searchpage:' + str(i) + '"><FONT size="+2">' + str(i) + '</FONT></a> '
self.page.OutputHTML(title, content, 'search')
def bibleGo(self, command):
where = command.split(' ')
if(not self.activeModule or where[0]!='-'):
newModule = self.bibles.getModuleByShortPath(where[0])
if(not newModule):
wx.MessageBox("Could not open the module: \n" + command, "Error", wx.ICON_ERROR | wx.OK)
return
self.activeModule = newModule
self.activeModule.loadModule()
self.buttonz['module'].SetLabel(self.activeModule.BibleShortName)
currentBook = int(where[1])-1
self.activeModule.loadBook(currentBook)
self.currentBook = currentBook
self.buttonz['book'].SetLabel(self.activeModule.FullName[currentBook])
#wx.MessageBox(where[2], "Chapter", wx.ICON_ERROR | wx.OK)
currentChapter = int(where[2])
if(self.activeModule.ChapterZero):
currentChapter = currentChapter - 1
self.currentChapter = currentChapter
self.buttonz['chapter'].SetLabel(str(currentChapter))
self.ShowChapter(currentChapter)
self.arrangeControls()
def OnCompare(self, event):
title = 'Choose a module to compare:'
return self.ShowModuleList(title, 'compare', False, True)
def OnCompareChoise(self, path):
if(path):
self.compareModule = self.bibles.getModule(path)
else:
self.compareModule = None
self.ShowChapter(self.currentChapter)
def ToggleFullScreen(self, event):
if self.fullScreen:
self.fullScreen = False
else:
self.fullScreen = True
self.ShowFullScreen(self.fullScreen, style=wx.FULLSCREEN_ALL)
self.page.SetFocus()
if __name__ == '__main__':
app = MyApp(False)
app.MainLoop()
|
noah-ubf/BQTLite
|
pybq.py
|
pybq.py
|
py
| 19,493 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26293955207
|
import os
import shutil
#path that this script is in
path = os.path.dirname(os.path.realpath(__file__))
#r = root, d = directories, f = files
for r, d, f in os.walk(path):
for file in f:
if '.c' in file:
#joins the root and file to make file path
file_path = os.path.join(r, file)
#copies the file to path
shutil.copy(file_path, path)
#makes new file path where copied file is
file_path = os.path.join(path, file)
#renames copied file from '.c' to '.txt'
new_file_path = file_path[:-2] + '.txt'
os.rename(file_path, new_file_path)
|
rijaasif/2SH4
|
FILE_SCRIPTS/GET_TXT.py
|
GET_TXT.py
|
py
| 803 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2778228066
|
import types
from imp import reload
def print_status(module):
print(f'reloading {module.__name__}')
def try_reload(module):
try:
reload(module)
except Exception as e:
print(f'FAILED {e.__repr__()} : {module}')
def transitive_reload(module, visited):
if not module in visited:
print_status(module)
try_reload(module)
visited[module] = True
for attrobj in module.__dict__.values():
if type(attrobj) == types.ModuleType:
transitive_reload(attrobj, visited)
def reload_all(*args):
visited = {}
for arg in args:
if type(arg) == types.ModuleType:
transitive_reload(arg, visited)
if __name__ == '__main__':
def tester(reloader, modname):
import importlib, sys
if len(sys.argv) > 1:
modname = sys.argv[1]
module = importlib.import_module(modname)
reloader(module)
tester(reload_all, 'reloadall')
|
Quessou/quessoutils
|
qssmodules/reloadall.py
|
reloadall.py
|
py
| 967 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14159066621
|
import tkinter as tk
from tkinter import ttk
import pyautogui
import pygetwindow
# The app was developed by Tom Girshovksi.
class CenterWindowGUI:
def __init__(self, master):
self.master = master
master.title("Center Window")
# Create the frame
self.frame = ttk.Frame(master, padding=20)
self.frame.pack()
# Configure columns to have equal weight
self.frame.columnconfigure(0, weight=1)
self.frame.columnconfigure(1, weight=1)
self.frame.columnconfigure(2, weight=1)
# Create the label
self.label = ttk.Label(self.frame, text="Choose a window to center:")
self.label.grid(row=0, column=0, columnspan=3, pady=10)
# Create the listbox to display the windows
self.listbox = tk.Listbox(self.frame, width=50, height=10, selectmode=tk.SINGLE)
self.listbox.grid(row=1, column=0, columnspan=3, padx=10, pady=10)
self.update_windows()
# Center Button
self.center_button = ttk.Button(self.frame, text="Center Window", command=self.center_window)
self.center_button.grid(row=2, column=0, pady=10)
# Scale Function Button
self.scale_button = ttk.Button(self.frame, text="Scale Window", command=self.scale_window)
self.scale_button.grid(row=2, column=1, pady=10)
# Update List Button
self.update_button = ttk.Button(self.frame, text="Update List", command=self.update_windows)
self.update_button.grid(row=2, column=2, pady=10)
def center_window(self):
# Get the index of the selected item in the list box
index = self.listbox.curselection()[0]
# Get the selected window
window = self.windows[index]
# Get the size of the screen
screen_width, screen_height = pyautogui.size()
# Get the size of the window
window_width, window_height = window.size
# Calculate the new position to center the window
new_left = (screen_width - window_width) // 2
new_top = (screen_height - window_height) // 2
# Move the window to the new position
window.moveTo(new_left, new_top)
def update_windows(self):
# Clear the list box
self.listbox.delete(0, tk.END)
# Get a list of all windows that are currently open
self.windows = pyautogui.getAllWindows()
# Add the window titles to the list box
for window in self.windows:
self.listbox.insert(tk.END, window.title)
def scale_window(self):
# Get the index of the selected item in the list box
index = self.listbox.curselection()[0]
# Get the selected window
window = self.windows[index]
# Get the size of the screen
screen_width, screen_height = pyautogui.size()
# Get the size of the window
window_width, window_height = window.size
if window_width == screen_width and window_height == screen_height:
# If the window is already full screen, center it instead
self.center_window()
else:
# Resize the window to full screen
window.resizeTo(screen_width // 2 + 500, screen_height // 2 +300)
# Create the root window
root = tk.Tk()
root.resizable(False, False)
# Set the style of the GUI
style = ttk.Style(root)
gui = CenterWindowGUI(root)
root.mainloop()
|
R1veltm/WindowCenterizer
|
main.py
|
main.py
|
py
| 3,398 |
python
|
en
|
code
| 2 |
github-code
|
6
|
43447079150
|
import sys, re
from argparse import ArgumentParser #import the library
parser = ArgumentParser(description = 'Classify a sequence as DNA or RNA') #create one ArgumentParser
parser.add_argument("-s", "--seq", type = str, required = True, help = "Input sequence") #add the first argument
parser.add_argument("-m", "--motif", type = str, required = False, help = "Motif") #add the second argument
if len(sys.argv) == 1: #print the help message only if no arguments are supplied on the command line
parser.print_help()
sys.exit(1)
args = parser.parse_args() #parser the argument
args.seq = args.seq.upper() #convert the sequence in upper case
if 'U' in args.seq and 'T' in args.seq: #if it finds U and T in the sequence return a message that the sequence have a mutagenic bases
print ('The sequence have a mutagenic bases') # if it finds this condition it does not execute the others command line
sys.exit ()
if re.search('^[ACGTU]+$', args.seq): #search in the sequence the pattern within the string
if re.search('T', args.seq): #if it finds T in the sequence return a message that the sequence is DNA
print ('The sequence is DNA')
elif re.search('U', args.seq):
print ('The sequence is RNA') #if it finds U in the sequence return a message that the sequence is RNA
else:
print ('The sequence can be DNA or RNA') #if it finds T and U in the sequence return a message that the sequence can be DNA or RNA
else:
print ('The sequence is not DNA') #else the sequence is not DNA
if args.motif:
args.motif = args.motif.upper() #converte the motif in upper case
print(f'Motif search enabled: looking for motif in sequence') #to find simple motifs in the sequence, besides printing the type of molecule (DNA or RNA)
if re.search(args.motif, args.seq):
print("FOUND")
else:
print("NOT FOUND")
|
stepsnap/git_HandsOn
|
seqClass.py
|
seqClass.py
|
py
| 1,881 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10996457940
|
import time
import pyrealsense2 as rs
import numpy as np
import cv2
import os
import open3d as o3d
intrinsics = np.array([
[605.7855224609375, 0., 324.2651672363281, 0.0],
[0., 605.4981689453125, 238.91090393066406, 0.0],
[0., 0., 1., 0.0],
[0., 0., 0., 1.],])
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def launch_realsense(pixel_width, pixel_high, fps, found_rgb=False):
pipeline = rs.pipeline()
# Create a config and configure the pipeline to stream
config = rs.config()
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("Can't launch rgb camera")
exit(0)
config.enable_stream(rs.stream.depth, pixel_width, pixel_high, rs.format.z16, fps)
config.enable_stream(rs.stream.color, pixel_width, pixel_high, rs.format.bgr8, fps)
align_to = rs.stream.color
alignedFs = rs.align(align_to)
# Start streaming
pipeline.start(config)
# Create folders by date
save_path = os.path.join(os.getcwd(), "out_data",
time.strftime("%Y_%m_%d_%H_%M_%S",
time.localtime()))
os.makedirs(save_path)
os.makedirs(os.path.join(save_path, "rgb"))
os.makedirs(os.path.join(save_path, "depth"))
os.makedirs(os.path.join(save_path, "depth_colormap"))
# cv2.namedWindow("camera in real time", cv2.WINDOW_AUTOSIZE)
# saved_color_image = None
# saved_depth_mapped_image = None
try:
flag = 0
while True:
if flag == 0:
time.sleep(2)
flag = 1
continue
# Wait for a coherent pair of frames: rgb and depth
frames = pipeline.wait_for_frames()
align_frames = alignedFs.process(frames)
depth_frame = align_frames.get_depth_frame()
color_frame = align_frames.get_color_frame()
if not depth_frame or not color_frame:
continue
color_profile = color_frame.get_profile()
cvsprofile = rs.video_stream_profile(color_profile)
color_intrin = cvsprofile.get_intrinsics()
color_intrin_part = [color_intrin.ppx, color_intrin.ppy,
color_intrin.fx, color_intrin.fy]
print('**color_intrin_part**:',color_intrin_part)
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.1), cv2.COLORMAP_JET)
# depth_colormap_dim = depth_colormap.shape
# color_colormap_dim = color_image.shape
#
# if depth_colormap_dim != color_colormap_dim:
# resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]),
# interpolation=cv2.INTER_AREA)
# images = np.hstack((resized_color_image, depth_colormap))
# else:
# images = np.hstack((color_image, depth_colormap))
# # Show images
# cv2.imshow("camera in real time", images)
# key = cv2.waitKey(1)
# Save the image
# if key & 0xFF == ord('s'):
saved_count = 0
for filename in os.listdir(os.path.join((save_path), "rgb")):
if filename.endswith('.png'):
saved_count += 1
print('save data:',saved_count)
saved_color_image = color_image
saved_depth_image = depth_image
saved_depth_mapped_image = depth_colormap
# save rgb png
cv2.imwrite(os.path.join((save_path), "rgb",
"rgb_{}.png".format(saved_count)),saved_color_image)
# save depth_colormap png
cv2.imwrite(os.path.join((save_path), "depth_colormap",
"depth_colormap_{}.png".format(saved_count)),
saved_depth_mapped_image)
# save depth png
cv2.imwrite(os.path.join((save_path), "depth",
"depth_{}.png".format(saved_count)),
saved_depth_image)
# save depth npy
np.save(os.path.join((save_path), "depth",
"depth_{}.npy".format(saved_count)), saved_depth_image)
depth_path = os.path.join((save_path), "depth", "depth_{}.npy".format(saved_count))
color_path = os.path.join((save_path), "rgb", "rgb_{}.png".format(saved_count))
return depth_path, color_path
finally:
# Stop streaming
pipeline.stop()
def loadRGB(color_file):
return cv2.cvtColor(cv2.imread(color_file), cv2.COLOR_BGR2RGB)
def loadDepth(depth_file):
return cv2.imread(depth_file, cv2.IMREAD_UNCHANGED)
def save_points(depth_path, color_path):
colors = loadRGB(color_path).astype(np.float32) / 255.0
depths = np.load(depth_path) # loadDepth(depth_path)
# convert RGB-D to point cloud
fx, fy = intrinsics[0, 0], intrinsics[1, 1]
cx, cy = intrinsics[0, 2], intrinsics[1, 2]
# depth factor
s = 1000.0
xmap, ymap = np.arange(colors.shape[1]), np.arange(colors.shape[0])
xmap, ymap = np.meshgrid(xmap, ymap)
points_z = depths / s
points_x = (xmap - cx) / fx * points_z
points_y = (ymap - cy) / fy * points_z
points = np.stack([points_x, points_y, points_z], axis=-1)
points = points.reshape((-1, 3))
colors = colors.reshape((-1, 3))
mask = np.where(points[:, 2] < 1)
points = points[mask]
colors = colors[mask]
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(points)
cloud.colors = o3d.utility.Vector3dVector(colors)
coord = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1, origin=[0, 0, 0])
o3d.visualization.draw_geometries([cloud, coord])
base_dir = os.path.dirname(os.path.dirname(color_path))
points_file = os.path.join(base_dir, 'points.npy')
colors_file = os.path.join(base_dir, 'colors.npy')
np.save(points_file, points)
np.save(colors_file, colors)
return points_file, colors_file
if __name__ == '__main__':
depth_path, color_path = launch_realsense(pixel_width=640, pixel_high=480, fps=30)
save_points(depth_path, color_path)
|
midea-ai/CMG-Net
|
utils/get_points.py
|
get_points.py
|
py
| 6,789 |
python
|
en
|
code
| 3 |
github-code
|
6
|
20182818588
|
# File: utils.py
# Name: Sergio Ley Languren
"""Utility for wordle program"""
from WordleDictionary import FIVE_LETTER_WORDS
from WordleGraphics import CORRECT_COLOR, PRESENT_COLOR, MISSING_COLOR, UNKNOWN_COLOR, N_COLS, N_ROWS, WordleGWindow
from random import choice
from typing import Type, Union, Optional
from copy import deepcopy
from tempfile import NamedTemporaryFile
from os import getcwd, unlink
__all__ = [
"choose_word",
"validate_responce",
"ScoreFileParser"
]
# CONSTANT
MINUS_COL = 5
t = None
# Functions
def choose_word() -> str:
"""Chooses the answer from a list of words of five characters"""
a = choice(FIVE_LETTER_WORDS)
return a
# -----------------------------------------------
def _set_key_color_or_not(gw, key_colored, k, c, override_check=False):
if not key_colored or override_check:
gw.set_key_color(k.capitalize(), c)
def _add_color(gw, column, keycolored, character, color, ac, oc: Optional[bool] = None):
gw.set_square_color(gw.get_current_row(), column, color)
if oc:
_set_key_color_or_not(gw, keycolored, character, color, oc)
else:
_set_key_color_or_not(gw, keycolored, character, color)
a_copy = ac.replace(character, "", 1)
return a_copy
def add_tempfile() -> NamedTemporaryFile:
"""creates score file"""
global t
if not t:
t = NamedTemporaryFile("w+", encoding="utf-8", prefix="wordle_", dir=getcwd(), delete=False)
return t
def validate_responce(gw: Type[WordleGWindow], res: str, a: str) -> Union[bool, bool, NamedTemporaryFile]:
"""Validates user response
:param gw: Main Wordle window class
:param res: User responce
:param a: answer to the wordle
Returns:
validity | word-validation | score tempfile
"""
global MINUS_COL
a_copy = deepcopy(a)
correct_counter = 0
temp = add_tempfile()
# checks if word is not in the word list
if res not in FIVE_LETTER_WORDS:
gw.show_message(f"{res} is not a word!!!")
return False, True, temp
for c in a:
col = N_COLS - MINUS_COL
ch = gw.get_square_letter(gw.get_current_row(), col).lower()
key_colored = gw.get_key_color(c.capitalize()) != UNKNOWN_COLOR
if ch == c:
a_copy = _add_color(gw, col, key_colored, ch, CORRECT_COLOR, a_copy, True)
correct_counter += 1
elif ch in a_copy:
a_copy = _add_color(gw, col, key_colored, ch, PRESENT_COLOR, a_copy)
else:
a_copy = _add_color(gw, col, key_colored, ch, MISSING_COLOR, a_copy)
MINUS_COL -= 1
line = f"{gw.get_current_row()}|{correct_counter}\n"
temp.write(line)
temp.flush()
MINUS_COL = 5
if correct_counter == 5:
return True, False, temp
return False, False, temp
class ScoreFileParser:
"""
Parses and adds score to wordle grid based on the scorefile
"""
cleared = False
def __init__(self, gw: Type[WordleGWindow], tmp: Type[NamedTemporaryFile]):
self.gw = gw
self.tmpfile = tmp
def parse(self):
"""Main function to parse the score file"""
self.tmpfile.seek(0)
lines = self.tmpfile.readlines()
if not self.cleared:
self.clear_grid()
self.parse()
for l in lines:
row = l.split("|")[0]
correct_points = l.split("|")[1].replace("\n", "")
self.gw.set_square_letter(int(row), 0, str(int(row) + 1))
self.gw.set_square_letter(int(row), 4, correct_points)
self.gw.set_square_color(int(row), 0, PRESENT_COLOR)
if int(correct_points) == 5:
self.gw.set_square_color(int(row), 4, CORRECT_COLOR)
else:
self.gw.set_square_color(int(row), 4, MISSING_COLOR)
self.gw.show_message("rows points", "limegreen")
def clear_grid(self):
"""Clear wordle grid"""
for i in range(N_ROWS):
self.gw.set_current_row(i)
for j in range(N_COLS):
self.gw.set_square_letter(i, j, "")
self.cleared = True
def close(self):
"""closes the score file"""
self.tmpfile.close()
path = self.tmpfile.name
print(path)
unlink(path)
|
SLey3/Project-1
|
utils.py
|
utils.py
|
py
| 4,316 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43011396057
|
"""Calculate various statistics for the CEA playerbase, and stores in a spreadsheet.
Attributes:
counts (Counter): counting number of games
EXTRA_GAMES_FILE (str): File to be used if we need to input extra games
K (int): K-value used for elo ratings.
"""
import csv
import json
import os
import re
import string
import sys
import traceback
from datetime import datetime
from datetime import timedelta
from collections import Counter, deque
import mpyq
import sc2reader
import trueskill
import glicko2
import cea_team_name_parser
import xlsxwriter
import pandas as pd
from sc2reader.engine.plugins import APMTracker, SelectionTracker # unused
from consts import SEASONS, STARTING_DATE, WEEKS
from setup_replays import find_team, replay_directory, teams_file
from zeroNumber import zeroNumber
from elo import EloRating
sc2reader.engine.register_plugin(APMTracker())
UNKNOWN_TEAM = "TEAM_NOT_KNOWN"
EXTRA_GAMES_FILE = "extra_games.csv"
# K is used for elo.
K=80
counts = Counter()
class PlayerObject:
def __init__(self, name, season, team):
self.name = name
self.aliases = set()
self.wins = 0
self.rating = 1000
self.glicko = glicko2.Player()
# long term glicko rating
self.glicko_longterm = glicko2.Player()
self.trueskill = trueskill.Rating()
self.peak_rating = 1000
self.games = []
self.teams = {season : team}
self.zeroNumber = sys.maxsize
losses = property(fget=lambda self: len(self.games) - self.wins)
mmr = property(fget=lambda self: max(game.mmr for game in self.games))
def setRating(self, rating):
self.rating = rating
if rating > self.peak_rating:
self.peak_rating = rating
def isActive(self):
return 0 in self.teams
def addTeam(self, season, team):
self.teams[season] = team
@property
def race(self):
race_counter = Counter([game.race for game in self.games])
return race_counter.most_common(1)[0][0]
@property
def opponents_beaten(self):
return [game.opponent for game in self.games if game.win]
@property
def opponents_lost_to(self):
return [game.opponent for game in self.games if not game.win]
@property
def mostRecentTeam(self):
return self.teams[sorted(list(self.teams.keys()))[0]]
def addGames(self, game):
self.games.append(game)
if game.win:
self.wins += 1
class GameObject:
""" Struct containing information about a game, given 1 player.
Attributes:
duration (int): Length of the game in seconds
opponent (str): Name of the opponent
race (str): Selected race
"""
def __init__(self, opponent, race, opponent_race, map_name, mmr, win, duration,
season, glicko_longterm, opp_glicko_longterm):
self.opponent = opponent
self.race = race
self.opponent_race = opponent_race
self.mmr = mmr
self.win = win
self.map = map_name
self.glicko_rating = glicko_longterm.getRating()
self.glicko_rd = glicko_longterm.getRd()
self.opp_glicko_rating = opp_glicko_longterm.getRating()
self.opp_glicko_rd = opp_glicko_longterm.getRd()
# self.apm = apm
self.duration = duration
self.season = season
# Add in extra games
# Games is 2d array: each one has [date, player1, player2, win]
def input_extra_elo(players, games, current_date, season):
"""Add in extra games.
Args:
players (Array[PlayerObject]): array of the 2 players
games (str[n,4]): Each column is [date, player1, player2, win].
Each row is a game.
current_date (datetime): current date. don't process games after date.
season (int): season. 0 is most recent
"""
while games and games[0][0] and current_date > datetime.strptime(games[0][0], "%m/%d/%Y"):
# ISSUE: doesn't resolve aliases, doesn't work if player has not already been processed.
player_names = [games[0][1].lower(), games[0][2].lower()]
for index, player in enumerate(player_names):
# add them in if not in there
if player not in players:
players[player] = PlayerObject(player,
season, find_team(teams, player))
for index, player in enumerate(player_names):
gameObject = GameObject(opponent=player_names[1-index], race="", opponent_race="", map_name="", mmr=0,
win=games[0][3].lower() == player,
duration=0, season=season,
glicko_longterm = players[player].glicko_longterm,
opp_glicko_longterm = players[player_names[1 - index]].glicko_longterm)
players[player].addGames(gameObject)
winner = games[0][3].lower() == player_names[0]
update_rating(players[player_names[0]], players[player_names[1]], winner)
games.popleft()
def update_rating(player1, player2, win):
"""Update player ratings after a game
Args:
player1 (PlayerObject):
player2 (PlayerObject):
win (bool): whether player 1 won
"""
# Update Elo rating
A,B = EloRating(player1.rating, player2.rating, K, win)
player1.rating = A
player2.rating = B
# Update Glicko-2 rating
player1.glicko.update_player([player2.glicko.getRating()], [player2.glicko.getRd()], [win])
player2.glicko.update_player([player1.glicko.getRating()], [player1.glicko.getRd()], [not win])
# Update Trueskill rating
winner, loser = trueskill.rate_1vs1(player1.trueskill, player2.trueskill) if win == 1 else trueskill.rate_1vs1(player2.trueskill, player1.trueskill)
player1.trueskill = winner if win else loser
player2.trueskill = loser if win else winner
def update_glicko_longterm(players):
"""Updates Longterm Glicko ratings
Args:
players (Dict<Player>[String]): Dictionary of players: key is player
name (lowercase), value is PlayerObject
"""
# Iterate through seasons in reverse order (oldest to newest)
for season in reversed(range(len(SEASONS))):
for player in players.values():
# First, gather all the glicko ratings in their games
opp_ratings = []
opp_rds = []
win = []
for game in player.games:
if game.season == season:
opp_ratings.append(game.opp_glicko_rating)
opp_rds.append(game.opp_glicko_rd)
win.append(game.win)
if not opp_ratings:
player.glicko_longterm.did_not_compete()
else:
player.glicko_longterm.update_player(opp_ratings, opp_rds, win)
def load_value(replay_filename, value):
"""Gets values from replay file
Args:
replay_filename (Replay): Replay
value (String): Key to get from replay. (I.e MMR)
Returns:
TYPE: Description
"""
archive = mpyq.MPQArchive(replay_filename)
jsondata = archive.read_file("replay.gamemetadata.json").decode("utf-8")
obj = json.loads(jsondata)
mmrs = [0,0]
for i in [0,1]:
mmrs[i] = 0 if value not in obj['Players'][i] else obj['Players'][i][value]
return mmrs
def calculate_elo(directory, players, teams, aliases, season, games):
# Using mypq, load the replay file
matcher = re.compile(r'\.SC2Replay$', re.IGNORECASE)
def myFunc(replay):
replay_file = sc2reader.load_replay(os.path.join(directory, replay), load_level=2)
return replay_file.date
replays = [file for file in os.listdir(directory) if matcher.search(file)]
replays.sort(key=myFunc)
print("Found %d replays to scan" % len(replays))
for replay in replays:
try:
replay_filename = os.path.join(directory, replay)
replay_file = sc2reader.load_replay(replay_filename, load_level=2)
player_list = replay_file.players
player_names = [player_list[0].name,
player_list[1].name]
player_mmrs = load_value(replay_filename, 'MMR')
input_extra_elo(players, games, replay_file.date, season)
# ignore 2v2
if len(replay_file.players) > 2:
print(replay)
continue
# resolve aliases for players who play under several accounts
for i in range(len(player_names)):
if player_names[i].lower() in aliases:
player_names[i] = aliases[player_names[i].lower()].lower()
else:
player_names[i] = player_names[i].lower()
# Ignore it
if replay_file.winner is None:
print(replay)
continue
# Add them to the player list if they're not there
for index, player in enumerate(player_list):
player_name = player_names[index]
if player_name not in players:
players[player_name] = PlayerObject(player.name,
season, find_team(teams, player.name))
else:
players[player_name].addTeam(season, find_team(teams, player.name))
# Loop again to add the games
for index, player in enumerate(player_list):
player_name = player_names[index]
gameObject = GameObject(opponent=player_names[1-index],
race = player.pick_race,
opponent_race=player_list[1-index].pick_race,
map_name = replay_file.map_name,
mmr = player_mmrs[index],
win=replay_file.winner.players[0] == player,
duration=replay_file.real_length,
season=season,
glicko_longterm=players[player_name].glicko_longterm,
opp_glicko_longterm=players[player_names[1 - index]].glicko_longterm)
players[player_name].addGames(gameObject)
winner = replay_file.winner.players[0] == player_list[0]
update_rating(players[player_names[0]], players[player_names[1]], winner)
except:
print("Error processing replay: %s" % replay)
traceback.print_exc()
def writeProfile(value, workbook, player_dictionary):
if value.name not in workbook.sheetnames:
sheet_name = value.name
else:
sheet_name = value.name + ' 1'
playerWorksheet = workbook.add_worksheet(sheet_name)
main_sheet = "Main"
playerWorksheet.write_url(0, 0, f"internal:'{main_sheet}'!A1", string='Back to Main Sheet')
playerWorksheet.write(0, 1, 'Player Name')
playerWorksheet.write(1, 1, value.name)
playerWorksheet.set_column(1, 1, max(len('Player Name'), len(value.name))+1)
playerWorksheet.write(0, 2, 'Teams')
playerWorksheet.set_column(2, 2, 20)
playerWorksheet.set_column(3, 4, 12)
playerWorksheet.write(0, 4, 'Games')
playerWorksheet.write(0, 5, 'Opponent Team')
playerWorksheet.set_column(5, 5, 15)
playerWorksheet.write(0, 6, 'Opponent')
playerWorksheet.set_column(6, 6, 15)
playerWorksheet.write(0, 7, 'Player Race')
playerWorksheet.set_column(7, 7, 8)
playerWorksheet.write(0, 8, 'Opponent Race')
playerWorksheet.set_column(8, 8, 8)
playerWorksheet.write(0, 9, 'Match Result')
playerWorksheet.set_column(9, 9, 6)
playerWorksheet.write(0, 10, 'Map')
playerWorksheet.set_column(10, 10, 20)
playerWorksheet.write(0, 12, 'Records')
playerWorksheet.set_column(11, 11, 25)
index = 1
for season, team in value.teams.items():
startIndex = 2
playerWorksheet.write(index, startIndex, team)
playerWorksheet.write(index, startIndex + 1, SEASONS[season])
index += 1
indexGame = 1
raceWinCounter = Counter()
raceLossCounter = Counter()
for game in value.games:
win = "Win" if game.win else "Loss"
if game.opponent_race:
if game.win:
raceWinCounter[game.opponent_race] += 1
else:
raceLossCounter[game.opponent_race] += 1
startIndex = 4
playerWorksheet.write(indexGame, startIndex, SEASONS[game.season])
if game.season in player_dictionary[game.opponent].teams:
oppTeam = player_dictionary[game.opponent].teams[game.season]
else:
oppTeam = "UNKOWN_TEAM"
playerWorksheet.write(indexGame, startIndex + 1, oppTeam)
playerWorksheet.write(indexGame, startIndex + 2, player_dictionary[game.opponent].name)
playerWorksheet.write(indexGame, startIndex + 3, game.race)
playerWorksheet.write(indexGame, startIndex + 4, game.opponent_race)
playerWorksheet.write(indexGame, startIndex + 5, win)
playerWorksheet.write(indexGame, startIndex + 6, game.map)
indexGame += 1
# For Player Records
opponentsBeaten = Counter(value.opponents_beaten)
opponentsLostTo = Counter(value.opponents_lost_to)
indexRecord = 1
for opponent in set(value.opponents_beaten + value.opponents_lost_to):
count = 0
startIndex = 11
if opponent in opponentsBeaten:
count += opponentsBeaten[opponent]
if opponent in opponentsLostTo:
count += opponentsLostTo[opponent]
if count >= 2:
playerWorksheet.write(indexRecord, startIndex, player_dictionary[opponent].name)
playerWorksheet.write(indexRecord, startIndex+1, "{0}:{1}".format(opponentsBeaten[opponent], opponentsLostTo[opponent]))
indexRecord += 1
indexRecord += 1
for race in ['Terran', 'Zerg', 'Protoss', 'Random']:
playerWorksheet.write(indexRecord, startIndex, "vs " + race)
playerWorksheet.write(indexRecord, startIndex + 1, "{0}:{1}".format(raceWinCounter[race], raceLossCounter[race]))
indexRecord += 1
return sheet_name
def write_profiles(player_dictionary):
workbook = xlsxwriter.Workbook('cea_season_stats.xlsx')
index = 0
for key, value in player_dictionary.items():
writeProfile(value, workbook, player_dictionary)
index +=1
workbook.close()
def make_csv(player_dictionary):
# calculate zero number
maxPlayer = zeroNumber(player_dictionary)
headers_arr = ["Team Name", "Name", "Wins", "Losses", "Elo (avg=1000)", "Trueskill Rating (avg=25)", "Peak MMR", maxPlayer + " Number", "Active", "Race",
"Players Defeated", "Players Lost To"]
workbook = xlsxwriter.Workbook('cea_season_stats.xlsx')
worksheet1 = workbook.add_worksheet("Main")
worksheet1.write_row(0, 0, headers_arr)
worksheet1.freeze_panes(1, 0)
# worksheet1.autofilter('A1:L500')
index = 0
for key, value in player_dictionary.items():
new_entry = []
# Name
new_entry.append(value.mostRecentTeam)
new_entry.append(value.name)
# Wins
new_entry.append(int(value.wins))
# Losses
new_entry.append(int(value.losses))
# Elo
new_entry.append(int(value.rating))
# Glicko-2
# new_entry.append("{} ± {}".format(int(value.glicko.getRating()), int(value.glicko.getRd())) )
# Trueskill Rating
new_entry.append("{:.2f} ± {:.1f}".format(value.trueskill.mu, value.trueskill.sigma))
# MMR
new_entry.append(int(value.mmr))
# zero number
zeroNum = int(value.zeroNumber) if value.zeroNumber < sys.maxsize else ''
new_entry.append(zeroNum)
new_entry.append("Yes" if value.isActive() else "No")
# Race
new_entry.append(value.race)
# APM
# new_entry.append(int(value.apm))
# Retrieve list of opponents beaten / lost to, with MMR differential.
def opponent_func(opponents_list, descending):
new_opponents_list = [opp_nickname for opp_nickname in opponents_list]
new_opponents_list = sorted(new_opponents_list, key=lambda item: (
player_dictionary[item].rating), reverse=descending)
new_opponents_list = [player_dictionary[opponent].name for opponent in new_opponents_list]
return new_opponents_list
opponents_beaten = opponent_func(value.opponents_beaten, True)
opponents_lost_to = opponent_func(value.opponents_lost_to, False)
# Opponents beaten / lost to
new_entry.append(" ; ".join(opponents_beaten))
new_entry.append(" ; ".join(opponents_lost_to))
worksheet1.write_row(index + 1, 0, new_entry)
if 0 in value.teams or (1 in value.teams and len(value.games) >= 5):
playerSheet = writeProfile(value, workbook, player_dictionary)
worksheet1.write_url(index + 1, 1, f"internal:'{playerSheet}'!A1", string=value.name)
index += 1
worksheet1.conditional_format('E2:E500', {'type': '3_color_scale'})
print("Done creating CSV")
workbook.close()
if __name__ == "__main__":
players = {}
extra_games = cea_team_name_parser.init_extra_games(EXTRA_GAMES_FILE)
# Instantiate Trueskill
trueskill.setup(draw_probability=0)
# Iterate seasons descending from oldest to newest
for season in reversed(range(len(SEASONS))):
#for season in [3]:
teams, aliases = cea_team_name_parser.init_dictionary(teams_file(season))
calculate_elo(replay_directory(season), players,
teams, aliases, season, extra_games)
# Input extra elo for newest season
input_extra_elo(players, extra_games, datetime.today(), 0)
make_csv(players)
#write_profiles(players)
|
carsonhu/cea-elo
|
calculate_elo.py
|
calculate_elo.py
|
py
| 16,986 |
python
|
en
|
code
| 3 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.