seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
46525020186
|
import pygame
from robot import Robot
from manual_robot import ManualRobot
from automated_robot import AutomatedRobot
from automated_robots.robots_concursantes import *
from automated_robots.robots_zimatek import *
from robot_hub import RobotHub
from coin import Coin
import numpy as np
import os
class Combat:
"""
A class to represent a robot combat environment.
...
Attributes
----------
dims : list
with and height of the screen, dims = [width, height]
robots : pygame.sprite.Group
a sprite group containing the robot objects
robot_list : list
a list containing the robot objects
left_robot : Robot
the Robot that starts in the left-hand side
right_robot : Robot
the Robot that starts in the right-hand side
robot_hubs : pygame.sprite.Group
a sprite group containing the RobotHub objects
coin_per_second : float
estimated coin per second
Methods
-------
fix_bugs:
fixes bugs in the position of the robot sprites
run:
runs the robot combat
"""
def __init__(self, left_robot: Robot, right_robot: Robot, coin_per_second: float):
self.dims = (1050, 750)
self.robots = pygame.sprite.Group()
self.robots.add(left_robot)
self.robots.add(right_robot)
self.robot_list = [left_robot, right_robot]
self.left_robot = left_robot
self.right_robot = right_robot
self.robot_hubs = pygame.sprite.Group()
self.left_robot_hub = RobotHub(self.left_robot, RobotHub.DownLeft)
self.right_robot_hub = RobotHub(self.right_robot, RobotHub.DownRight)
self.robot_hubs.add(self.left_robot_hub)
self.robot_hubs.add(self.right_robot_hub)
self.coin_per_second = coin_per_second
self.font = None
self.font2 = None
def fix_bugs(self):
"""
fixes bugs in the position of the robot sprites
:return:
"""
if self.right_robot.living and self.left_robot.living:
collide = self.left_robot.rect.colliderect(self.right_robot.rect)
if collide:
if self.left_robot.rect.x <= self.right_robot.rect.x:
self.left_robot.move(dx=-self.left_robot.rect.width, combat=self)
self.right_robot.move(dx=self.right_robot.rect.width, combat=self)
else:
self.left_robot.move(dx=self.left_robot.rect.width, combat=self)
self.right_robot.move(dx=-self.right_robot.rect.width, combat=self)
def run(self):
"""
runs the robot combat
:return:
"""
pygame.init()
pygame.font.init()
self.font = pygame.font.Font("Resources/Pokemon_Classic.ttf", 16)
self.font2 = pygame.font.Font("Resources/Pokemon_Classic.ttf", 28)
background_image = pygame.image.load("Resources/background_mountains.jpg")
background_image = pygame.transform.rotozoom(background_image, 0, 2.5)
os.environ['SDL_VIDEO_CENTERED'] = '0'
screen = pygame.display.set_mode(self.dims)
pygame.display.set_caption("Robot Combat: {:s} vs {:s}".format(str(type(self.left_robot)).split(".")[-1][:-2],
str(type(self.right_robot)).split(".")[-1][:-2]))
for robot in self.robots:
robot.set_up()
for hub in self.robot_hubs:
hub.set_up()
# PRE LOOP
sprites_all = pygame.sprite.Group()
projectiles = pygame.sprite.Group()
coins = pygame.sprite.Group()
stop = False
pause = False
winner = None
sprites_all.add(self.robots)
sprites_all.add(projectiles)
sprites_all.add(coins)
clock = pygame.time.Clock()
time = 1
count_down = 60*3
totalcoins = 0
# -------- Principal Loop of the Program -----------
while not stop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
stop = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
pause = not pause
winner = None
if event.key == pygame.K_1:
pause = True
winner = 1
if event.key == pygame.K_0:
pause = True
winner = 0
if isinstance(self.left_robot, ManualRobot) and self.left_robot.living:
projectile = self.left_robot.decide(event, self.left_robot_hub)
if projectile is not None:
sprites_all.add(projectile)
projectiles.add(projectile)
elif isinstance(self.right_robot, ManualRobot) and self.right_robot.living:
projectile = self.right_robot.decide(event, self.right_robot_hub)
if projectile is not None:
sprites_all.add(projectile)
projectiles.add(projectile)
# --- The Logic
if not pause and (time > count_down):
np.random.shuffle(self.robot_list)
for robot in self.robot_list:
if robot == self.left_robot:
other = self.right_robot
else:
other = self.left_robot
if isinstance(robot, AutomatedRobot) and robot.living:
projectile = robot.decide(other_robot_properties=other.get_properties(),
coins=coins,
projectiles=projectiles)
if projectile is not None:
sprites_all.add(projectile)
projectiles.add(projectile)
for robot in self.robot_list:
if robot.living:
robot_damaged = pygame.sprite.spritecollide(robot, projectiles, True)
coins_captured = pygame.sprite.spritecollide(robot, coins, True)
for projectile_hit in robot_damaged:
robot.suffer(projectile_hit.damage)
for coin in coins_captured:
robot.claim_coin(coin)
robot.update(combat=self)
for projectile in projectiles:
projectile.draw(screen)
projectile.update(combat=self)
coins.update()
self.fix_bugs()
if np.random.random() < self.coin_per_second / 60:
totalcoins += 2
pos1 = 50 + np.random.random(2) * (np.array(self.dims)-100) * np.array([0.5, 1])
pos2 = np.array(self.dims) * np.array([1, 0]) + pos1 * np.array([-1, 1])
coin_left = Coin(pos1)
coin_right = Coin(pos2)
coins.add(coin_left)
coins.add(coin_right)
sprites_all.add(coin_left)
sprites_all.add(coin_right)
# --- The image
screen.fill((255, 255, 255))
screen.blit(background_image, (0, 0))
sprites_all.draw(screen)
for projectile in projectiles:
projectile.draw(screen)
for hub in self.robot_hubs:
hub.draw(screen)
time_text = self.font.render("{:02d}:{:02d}".format(int((time / 60) // 60), int((time / 60) % 60)), False,
(0, 0, 0))
screen.blit(time_text, (self.dims[0] - 5 - time_text.get_width(), 5))
coin_text = self.font.render("# {:d}/{:d}".format(len(coins), int(totalcoins)), False,
(0, 0, 0))
screen.blit(coin_text, (self.dims[0] - 5 - coin_text.get_width(), 5 + coin_text.get_height()))
# Only for zimabot
if (isinstance(self.left_robot, Zimabot) or isinstance(self.right_robot, Zimabot)):
if isinstance(self.left_robot, Zimabot):
zimabot = self.left_robot
other = self.right_robot
else:
zimabot = self.right_robot
other = self.left_robot
if time < count_down or (zimabot.living and not other.living):
belt_image = pygame.image.load("Resources/cinturon.png").convert_alpha()
belt_image = pygame.transform.scale(belt_image, (2*zimabot.width, 2*zimabot.height))
screen.blit(belt_image, zimabot.pos - np.array([zimabot.width//2, int(zimabot.height*1.5)]))
# ----
if self.left_robot.living and not self.right_robot.living:
winner = 1
pause = True
if not self.left_robot.living and self.right_robot.living:
winner = 0
pause = True
if pause:
if winner == 1:
pause_text = self.font2.render("The winner is {:s}".format(str(type(self.left_robot)).split(".")[-1][:-2]), False, (0, 0, 0))
center = (self.dims[0] // 2, self.dims[1] // 2)
text_rect = pause_text.get_rect(center=center)
screen.blit(pause_text, text_rect)
elif winner == 0:
pause_text = self.font2.render("The winner is {:s}".format(str(type(self.right_robot)).split(".")[-1][:-2]), False, (0, 0, 0))
center = (self.dims[0] // 2, self.dims[1] // 2)
text_rect = pause_text.get_rect(center=center)
screen.blit(pause_text, text_rect)
else:
pause_text = self.font.render("Paused", False, (0, 0, 0))
center = (self.dims[0] // 2, self.dims[1] // 2)
text_rect = pause_text.get_rect(center=center)
screen.blit(pause_text, text_rect)
else:
time += 1
pygame.display.flip()
clock.tick(60)
pygame.quit()
if __name__ == '__main__':
attributes = {
"health": 500,
"armor": 90,
"health_regen": 19,
"damage": 65,
"self_speed": 3,
"projectile_initial_speed": 4,
"projectile_per_second": 0.6,
"g_health": 80,
"g_armor": 8,
"g_health_regen": 2,
"g_damage": 12,
"g_projectile_per_second": 0.05,
"max_self_speed": 5,
"max_projectile_initial_speed": 10,
"experience_for_level_up": 7,
"g_experience_for_level_up": 3
}
cps = 2
bots = pygame.sprite.Group()
bot1 = stalin_t_pose(x=150, y=325, **attributes)
bot2 = Zimabot(x=1050-150-4*32, y=325, turn_left=True,
projectile_color=(38, 162, 149), image_path="Resources/simple_robot_green.png",
**attributes)
mg = Combat(bot1, bot2, coin_per_second=cps)
mg.run()
|
zimatek/RobotCombat
|
combat.py
|
combat.py
|
py
| 11,303 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42452241894
|
#!/usr/bin/python3
if __name__ == "__main__":
import sys
from calculator_1 import add, sub, mul, div
args = sys.argv[1:]
no_of_args = len(args)
if no_of_args != 3:
print("Usage: ./100-my_calculator.py <a> <operator> <b>")
sys.exit(1)
op = args[1]
operators = {"+": add, "-": sub, "*": mul, "/": div}
if op not in operators:
print("Unknown operator. Available operators: +, -, * and /")
sys.exit(1)
a = int(args[0])
b = int(args[2])
res = operators[op](a, b)
print(f"{a} {op} {b} = {res}")
|
timmySpark/alx-higher_level_programming
|
0x02-python-import_modules/100-my_calculator.py
|
100-my_calculator.py
|
py
| 575 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32544533358
|
import json
import glob
from flask import Flask , send_file
import os
from flask_cors import CORS
app = Flask (__name__)
cors = CORS(app)
@app.route('/')
def DownloadMergedJson() -> str:
result = {}
logs = {}
node_ids =[]
for f in glob.glob(os.path.join("..", "history_*.json")):
print(str(f))
node_ids.append(str(f).split('.')[2].split('_')[1])
result["all_nodes"] = node_ids
for f in glob.glob(os.path.join("..", "history_*.json")):
node_id = str(f).split('.')[2].split('_')[1]
with open(f, "rb") as infile:
result[node_id] = json.load(infile)
return result
app.run()
|
SiyiGuo/COMP90020
|
pythonproxy/getNodeData.py
|
getNodeData.py
|
py
| 648 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25144954100
|
import requests
import collections
import csv
from bs4 import BeautifulSoup
from bs4.element import Tag
class ParseAnimals:
def __init__(self) -> None:
self.animals_names = {}
def parse(self) -> None:
"""
Make a while loop until calegory letter != Я
Saves each animal data on the page in dict: key - letter, value - list of all animals on the page
"""
url = 'https://ru.wikipedia.org/wiki/Категория:Животные_по_алфавиту'
letter = ''
while letter != 'Я':
data = self._get_page(url)
self._parse_animal_on_page(data=data)
url = self._check_end_page(data=data)
letter = collections.deque(self.animals_names, maxlen=1)[0][0]
print(letter)
self._get_csv()
def _get_page(self, url: str) -> Tag:
"""
Make a request on the page and gets all page data
"""
request = requests.get(url)
soup = BeautifulSoup(request.text, 'lxml')
return soup.find('div', id='mw-pages')
def _parse_animal_on_page(self, data: Tag) -> None:
"""
Saves all animals on the page in a dict with key = category (letter)
"""
for el in data.find_all('div', class_='mw-category-group'):
category = el.h3.text
animal_names = [[i.text, f"https://ru.wikipedia.org{i.a['href']}"] for i in el.find_all('li')]
if not self.animals_names.get(category):
self.animals_names[category] = []
self.animals_names[category] = self.animals_names[category] + animal_names
def _check_end_page(self, data: Tag) -> str:
"""
Return an url to the next page
"""
hrf = data.find_all('a')[-1]
return f"https://ru.wikipedia.org{hrf['href']}"
def _get_csv(self) -> None:
"""
Saves data (dict) into csv file
"""
with open('animals_names_count.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows([[f'{k}, {len(v)}'] for k, v in self.animals_names.items()])
if __name__ == '__main__':
parse = ParseAnimals()
parse.parse()
|
enamsaraev/tetrika-test
|
task2/solution.py
|
solution.py
|
py
| 2,239 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17650565567
|
import nltk
from newspaper import Article
# nltk.download('punkt') is a Python command that is used to download the "punkt" dataset or resource from the Natural Language Toolkit (NLTK) library.
# NLTK is a popular library in Python for working with human language data, including tasks like tokenization, parsing, and text classification.
# The "punkt" dataset in NLTK contains pre-trained models and data necessary for tokenization, which is the process of breaking down a text into individual words or tokens.
# These pre-trained models can be used to tokenize text in various languages, making it easier to work with natural language data in your Python projects.
nltk.download('punkt')
url='https://indianexpress.com/article/technology/tech-news-technology/apple-event-2-things-wowed-us-8938618/'
article = Article(url)
article.download()
article.parse()
article.nlp()
print(f'Title: {article.title}')
print(f'Authors: {article.authors}')
print(f'Publish Date: {article.publish_date}')
print(f'Summary: {article.summary}')
|
AnukulSri/summarize-news-article
|
news.py
|
news.py
|
py
| 1,033 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17669758792
|
"""2020_02_18
Revision ID: 000001
Revises:
Create Date: 2020-02-18 03:57:38.958091
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "000001"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"users",
sa.Column("added_on", sa.DateTime(), nullable=False),
sa.Column("modified_on", sa.DateTime(), nullable=False),
sa.Column("id", sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column("enabled", sa.Boolean(), nullable=False),
sa.Column("last_auth_time", sa.DateTime(), nullable=True),
sa.Column("username", sa.String(length=32), nullable=False),
sa.Column("password_hash", sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint("id", name=op.f("pk_users")),
)
op.create_index(op.f("ix_users_username"), "users", ["username"], unique=True)
op.create_table(
"user_accesses",
sa.Column("added_on", sa.DateTime(), nullable=False),
sa.Column("modified_on", sa.DateTime(), nullable=False),
sa.Column("id", sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column("enabled", sa.Boolean(), nullable=False),
sa.Column("ip_address", sa.String(length=15), nullable=False),
sa.Column("external_app_id", sa.String(length=15), nullable=False),
sa.Column("users_id", sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(
["users_id"],
["users.id"],
name=op.f("fk_user_accesses_users_id_users"),
onupdate="CASCADE",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_user_accesses")),
sa.UniqueConstraint(
"users_id",
"ip_address",
"external_app_id",
name=op.f("uq_user_accesses_users_id"),
),
)
op.create_index(
op.f("ix_user_accesses_external_app_id"),
"user_accesses",
["external_app_id"],
unique=True,
)
op.create_index(
op.f("ix_user_accesses_ip_address"),
"user_accesses",
["ip_address"],
unique=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_user_accesses_ip_address"), table_name="user_accesses")
op.drop_index(op.f("ix_user_accesses_external_app_id"), table_name="user_accesses")
op.drop_table("user_accesses")
op.drop_index(op.f("ix_users_username"), table_name="users")
op.drop_table("users")
# ### end Alembic commands ###
|
ichux/elog
|
migrations/versions/000001_2020_02_18.py
|
000001_2020_02_18.py
|
py
| 2,743 |
python
|
en
|
code
| 2 |
github-code
|
6
|
73264679548
|
from pyglet.text import Label
from audio import explosion
from fonts.fonts import press_start_2p
from interfaces.interface import Interface
from system import system
import menus.menu
import menus.game_over_menu
class GameOverInterface(Interface):
game_over_label: Label = None
game_over_menu: menus.menu.Menu = None
def __init__(self):
self.game_over_label = Label('GAME OVER', font_name=press_start_2p, font_size=48)
self.game_over_label.anchor_x = 'center'
self.game_over_label.anchor_y = 'center'
self.game_over_menu = menus.game_over_menu.GameOverMenu()
self.resize()
window = system.get_window()
window.on_key_press = self.game_over_menu.on_key_press
self.game_over_menu.focused = True
explosion.play()
def on_draw(self):
self.game_over_label.draw()
self.game_over_menu.draw()
def resize(self):
window = system.get_window()
self.game_over_menu.move(window.width / 2, 100)
self.game_over_label.x = window.width / 2
self.game_over_label.y = window.height / 2
|
KimPalao/Headshot
|
interfaces/game_over_interface.py
|
game_over_interface.py
|
py
| 1,110 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24363848040
|
# This script should be executed inside a NetAddiction Odoo 9 shell.
import json
def remove_duplicate_attributes(product):
seen_ids = set()
duplicate_list = []
for attr in product.attribute_value_ids:
if attr.attribute_id.id not in seen_ids:
seen_ids.add(attr.attribute_id.id)
else:
duplicate_list.append(attr)
if duplicate_list:
product.write({"attribute_value_ids": [(3, attr.id) for attr in duplicate_list]})
return duplicate_list
duplicates = []
products = self.env["product.product"].search([])
for count, product in enumerate(products):
duplicate = remove_duplicate_attributes(product)
if duplicate:
print(duplicate)
duplicates.append(
{
"product_id": product.id,
"duplicates": [{"name": a.name, "type": a.attribute_id.display_name} for a in duplicate],
}
)
if not count % 100:
self._cr.commit()
self._cr.commit()
if duplicates:
with open("~/duplicates_found.json", "w") as fp:
json.dump(duplicates, fp, sort_keys=True, indent=4, separators=(",", ": "))
|
suningwz/netaddiction_addons
|
scripts/remove_duplicates_attribute.py
|
remove_duplicates_attribute.py
|
py
| 1,150 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34879700956
|
import re
from requests import get
from sys import argv as cla
from readabilipy import simple_json_from_html_string
from ebooklib import epub
def valid_url(url):
regex = re.compile(
r'^(?:http)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return re.match(regex, str(url)) is not None
def slugify(s):
s = s.lower().strip()
s = ''.join(char for char in s if ord(char) < 128) #remove non-ascii characters
s = re.sub(r'[^\w\s-]', '', s)
s = re.sub(r'[\s_-]+', '-', s)
s = re.sub(r'^-+|-+$', '', s)
return s
def main():
if not cla[1]:
raise Exception("Invalid argument..")
if len(cla) != 2:
raise Exception("This script expects just one parameter.. Did you comma separate the URL's")
links = str(cla[1]).split(',')
for l in links:
if not valid_url(l):
raise Exception(str("This is not a valid url: "+l))
book = epub.EpubBook()
book.set_language('en')
chapters = ['nav']
epub_title = ""
epub_author = ""
toc = []
if len(links) > 1:
print("You're trying to download {0} links. Please provide title and author.".format(len(links)))
epub_title = input("ePub title: ")
epub_author = input("ePub author: ")
for idx, link in enumerate(links):
try:
request = get(link)
if bool(request.text) == False:
if input('Do you want to skip this URL and continue? [y/n]') == 'y':
continue
else:
print('Script stopped')
sys.exit(0)
else:
print('Extracting content from page..')
page_content = simple_json_from_html_string(request.text, use_readability=False)
chapter_content = page_content['plain_content']
if not epub_title:
epub_title = page_content['title']
if not epub_author:
epub_author = page_content['byline'] if page_content['byline'] else "Various authors"
print('Adding content to ePub..')
chapter = epub.EpubHtml(title=page_content['title'], file_name=str('chapter{}.xhtml'.format(idx+1)), lang='en')
chapter.content = u'{}'.format(chapter_content)
book.add_item(chapter)
chapters.append(chapter)
pass
except Exception as e:
raise e
print("Finishing epub..")
slug = slugify(epub_title)
book.set_identifier(slug)
book.set_title(epub_title)
book.add_item(epub.EpubNcx())
book.add_item(epub.EpubNav())
book.spine = chapters
if epub_author:
book.add_author(epub_author)
else:
book.add_author("Unknown Author")
try:
epub.write_epub('{}.epub'.format(slug), book, {})
print("Done! Saved to {}.epub".format(slug))
except Exception as e:
raise e
if __name__ == "__main__":
main()
|
eklop/web2epub
|
web2epub.py
|
web2epub.py
|
py
| 2,797 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2600836089
|
# Import thư viện
from sklearn.linear_model import LinearRegression
import numpy as np
import csv
from vnstock import*
data=[]
cp=listing_companies()
check='ngân hàng thương mại cổ phần'
nh=[]
for n in range(len(cp)):
if check in cp.loc[n][2].lower():
nh.append(cp.loc[n][0])
print(len(nh))
for ticket in nh:
linkfile='./nganhang/'+ticket+'.csv'
with open(linkfile) as file:
fp=csv.reader(file)
header=next(fp)
for row in fp:
data.append(row)
# Tạo dữ liệu giả định
K=[]
h=[]
for i in range(len(data)):
K.append([float(data[i][1]),float(data[i][2])])
h.append(float(data[i][4]))
# Tạo mô hình hồi quy tuyến tính
model = LinearRegression()
# Huấn luyện mô hình với dữ liệu
model.fit(K, h)
# In ra các hệ số của mô hình
print('Coefficients:', model.coef_)
# Dự đoán giá trị mới
x_new = np.array([[48850.0,48222.0]])
y_new = model.predict(x_new)
print('Predicted value:', y_new)
|
vanvy102/code
|
Code-test/linear.py
|
linear.py
|
py
| 1,070 |
python
|
vi
|
code
| 0 |
github-code
|
6
|
22779752732
|
def create_triangle(size):
triangle = ""
# first part
for r in range(1, size + 1):
for c in range(1, r + 1):
triangle += f"{c} "
triangle += "\n"
# second part
for r in range(size - 1, -1, -1):
for c in range(1, r + 1):
triangle += f"{c} "
triangle += "\n"
return triangle
def mathematical_operations(*args):
first, operator, second = args
operations = {
"/": lambda x, y: x / y,
"*": lambda x, y: x * y,
"-": lambda x, y: x - y,
"^": lambda x, y: x ** y,
}
return f"{operations[operator](float(first), float(second)):.2f}"
def fibonacci_sequence_module(*args):
# fibonacci logic
def fibonacci_sequence(n):
sequence = [0, 1]
for i in range(n - 2):
sequence.append(sequence[-1] + sequence[-2])
return sequence
def locate_number(n, seq):
try:
return f"The number - {n} is at index {seq.index(n)}"
except ValueError:
return f"The number {n} is not in the sequence"
action = args[0]
if action == "Create":
return fibonacci_sequence
elif action == "Locate":
return locate_number
|
DanieII/SoftUni-Advanced-2023-01
|
advanced/modules/module.py
|
module.py
|
py
| 1,228 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30410142101
|
import CryptoCurrency
import sqlite3 as sql
import requests
from datetime import datetime
import time
def get_crypto():
"""Récupères la liste des cryptomonnaies tradable sur le marché futures de Bybit
(!! 120 requests per second for 5 consecutive seconds maximum)
Returns:
list:liste des cryptomonnaies
"""
url = "https://api-testnet.bybit.com/v5/market/instruments-info?category=linear"
payload = {}
headers = {}
response = requests.request(
"GET", url, headers=headers, data=payload).json()
baseCoins = []
for crypto in response['result']['list']:
if crypto['baseCoin'][:5] == '10000' and crypto['quoteCoin'] == 'USDT' and crypto['baseCoin'] not in baseCoins:
# baseCoins += [crypto['baseCoin'][5:]]
# non traité pour le moment
pass
elif crypto['baseCoin'][:4] == '1000' and crypto['quoteCoin'] == 'USDT' and crypto['baseCoin'] not in baseCoins:
# baseCoins += [crypto['baseCoin'][4:]]
# non traité pour le moment
pass
elif crypto['quoteCoin'] == 'USDT' and crypto['baseCoin'] not in baseCoins and crypto['baseCoin'] != 'LUNA2' and crypto['baseCoin'] != 'PEOPLE':
# exception LUNA2 et PEOPLE à traiter
baseCoins += [crypto['baseCoin']]
return baseCoins
def get_price_history(interval, crypto):
"""renvoie un dicitonnaire qui permet de connaître le prix de la cryptomonnaie depuis l'apparition de son contrat futures sur l'échange de cryptomonnaie.
Args:
interval (string): interval de temps entre deux données (Kline interval. 1,3,5,15,30,60,120,240,360,720,D,M,W)
crypto (CryptoCurrency): la crypto dont on veut le prix
"""
listeDictionnaires = []
listeDictionnaires.append(crypto.get_price(interval, 1500000000000,
int(datetime.now().timestamp())*1000))
lastTimestamps = list(listeDictionnaires[0].keys())
lastTimestamps.sort()
if len(lastTimestamps) < 200:
return listeDictionnaires
# intervalInTimestamp = int(lastTimestamps[2])-int(lastTimestamps[1])
# jusqu'ici on a récupéré les 200 derniers timestamps
compteur = 1
while len(lastTimestamps) == 200:
listeDictionnaires.append(crypto.get_price(
interval, 1500000000000, int(lastTimestamps[0])))
# il ne faut pas dépasser les 120 requetes par 5 secondes
if compteur % 119 == 0:
time.sleep(5)
lastTimestamps = (list(listeDictionnaires[compteur].keys()))
lastTimestamps.sort()
compteur += 1
print(listeDictionnaires)
return listeDictionnaires
if __name__ == "__main__":
# fonctionnement normal
# print(get_crypto())
cryptos = get_crypto()
conn = sql.connect("cryptoDatabase.db")
curs = conn.cursor()
curs.execute("DROP TABLE IF EXISTS Crypto")
curs.execute(
"CREATE TABLE Crypto (nom VARCHAR, symbol VARCHAR PRIMARY KEY, whitepaperlink VARCHAR)")
curs.execute("DROP TABLE IF EXISTS Prix")
curs.execute(
"CREATE TABLE Prix (symbol VARCHAR, date VARCHAR, open FLOAT, high FLOAT, low FLOAT, close FLOAT,PRIMARY KEY (symbol, date),FOREIGN KEY (symbol) REFERENCES Crypto(symbol))")
cryptoCurrencies = []
for crypto in cryptos:
cryptoCurrencies += [CryptoCurrency.Cryptocurrency(crypto)]
for crypto in cryptoCurrencies:
infos = crypto.get_name_and_whitepaperlink()
# l'interval choisi ici est hebdomadaire si on veut plus de précision, on peut prendre un plus petit interval
price_history = get_price_history(
"W", crypto)
curs.execute("INSERT INTO Crypto(nom,symbol,whitepaperlink) VALUES (?,?,?)",
(infos["name"], crypto.symbol, infos["whitepaperLink"]))
conn.commit()
for prices in price_history:
timestamps = list(prices.keys())
for date in timestamps:
curs.execute("INSERT INTO Prix(symbol,date,open,high,low,close) VALUES (?,?,?,?,?,?)",
(crypto.symbol, datetime.fromtimestamp(int(date)/1000), prices[date]["open"], prices[date]["high"], prices[date]["low"], prices[date]["close"]))
conn.commit()
conn.commit()
conn.close()
# test
# nft = CryptoCurrency.Cryptocurrency('EOS')
# print(get_price_history("D", nft))
# bitcoin = CryptoCurrency.Cryptocurrency("BTC")
# get_price_history("D", bitcoin)
# infos = bitcoin.get_name_and_whitepaperlink()
# conn = sql.connect("cryptoDatabase.db")
# curs = conn.cursor()
# curs.execute("DROP TABLE IF EXISTS Crypto")
# curs.execute(
# "CREATE TABLE Crypto (nom VARCHAR PRIMARY KEY, symbole VARCHAR, whitepaperlink VARCHAR)")
# curs.execute(
# "INSERT INTO Crypto(nom,symbole,whitepaperlink) VALUES (?,?,?)", (infos["name"], bitcoin.symbol, infos["whitepaperLink"]))
# conn.commit()
# conn.close()
|
ArthurOnWeb/l-historique-du-prix-d-une-cryptomonnaie
|
Main.py
|
Main.py
|
py
| 5,015 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35445440233
|
from dexy.common import OrderedDict
import dexy.database
import dexy.doc
import dexy.parser
import dexy.reporter
import inspect
import json
import logging
import logging.handlers
import os
import shutil
class Wrapper(object):
"""
Class that assists in interacting with Dexy, including running Dexy.
"""
DEFAULT_ARTIFACTS_DIR = 'artifacts'
DEFAULT_CONFIG_FILE = 'dexy.conf' # Specification of dexy-wide config options.
DEFAULT_DANGER = False
DEFAULT_DB_ALIAS = 'sqlite3'
DEFAULT_DB_FILE = 'dexy.sqlite3'
DEFAULT_DISABLE_TESTS = False
DEFAULT_DONT_USE_CACHE = False
DEFAULT_DRYRUN = False
DEFAULT_EXCLUDE = ''
DEFAULT_GLOBALS = ''
DEFAULT_HASHFUNCTION = 'md5'
DEFAULT_IGNORE_NONZERO_EXIT = False
DEFAULT_LOG_DIR = 'logs'
DEFAULT_LOG_FILE = 'dexy.log'
DEFAULT_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
DEFAULT_LOG_LEVEL = 'DEBUG'
DEFAULT_RECURSE = True
DEFAULT_REPORTS = 'output'
DEFAULT_SILENT = False
LOG_LEVELS = {
'DEBUG' : logging.DEBUG,
'INFO' : logging.INFO,
'WARN' : logging.WARN
}
RENAME_PARAMS = {
'artifactsdir' : 'artifacts_dir',
'conf' : 'config_file',
'dbalias' : 'db_alias',
'dbfile' : 'db_file',
'disabletests' : 'disable_tests',
'dryrun' : 'dry_run',
'ignore' : 'ignore_nonzero_exit',
'logfile' : 'log_file',
'logformat' : 'log_format',
'loglevel' : 'log_level',
'logsdir' : 'log_dir',
'nocache' : 'dont_use_cache'
}
SKIP_KEYS = ['h', 'help', 'version']
def __init__(self, *args, **kwargs):
self.initialize_attribute_defaults()
self.check_config_file_location(kwargs)
self.load_config_file()
self.update_attributes_from_config(kwargs)
self.args = args
self.docs_to_run = []
self.tasks = OrderedDict()
self.pre_attrs = {}
self.state = None
def initialize_attribute_defaults(self):
self.artifacts_dir = self.DEFAULT_ARTIFACTS_DIR
self.config_file = self.DEFAULT_CONFIG_FILE
self.danger = self.DEFAULT_DANGER
self.db_alias = self.DEFAULT_DB_ALIAS
self.db_file = self.DEFAULT_DB_FILE
self.disable_tests = self.DEFAULT_DISABLE_TESTS
self.dont_use_cache = self.DEFAULT_DONT_USE_CACHE
self.dry_run = self.DEFAULT_DRYRUN
self.exclude = self.DEFAULT_EXCLUDE
self.globals = self.DEFAULT_GLOBALS
self.hashfunction = self.DEFAULT_HASHFUNCTION
self.ignore_nonzero_exit = self.DEFAULT_IGNORE_NONZERO_EXIT
self.log_dir = self.DEFAULT_LOG_DIR
self.log_file = self.DEFAULT_LOG_FILE
self.log_format = self.DEFAULT_LOG_FORMAT
self.log_level = self.DEFAULT_LOG_LEVEL
self.recurse = self.DEFAULT_RECURSE
self.reports = self.DEFAULT_REPORTS
self.silent = self.DEFAULT_SILENT
def check_config_file_location(self, kwargs):
self.update_attributes_from_config(kwargs)
def update_attributes_from_config(self, config):
for key, value in config.iteritems():
if not key in self.SKIP_KEYS:
corrected_key = self.RENAME_PARAMS.get(key, key)
if not hasattr(self, corrected_key):
raise Exception("no default for %s" % corrected_key)
setattr(self, corrected_key, value)
def load_config_file(self):
"""
Look for a config file in current working dir and loads it.
"""
if os.path.exists(self.config_file):
with open(self.config_file) as f:
try:
conf = json.load(f)
except ValueError as e:
msg = inspect.cleandoc("""Was unable to parse the json in your config file '%s'.
Here is information from the json parser:""" % self.config_file)
msg += "\n"
msg += str(e)
raise dexy.exceptions.UserFeedback(msg)
self.update_attributes_from_config(conf)
@classmethod
def default_config(klass):
conf = klass().__dict__.copy()
# Remove any attributes that aren't config options
del conf['args']
del conf['docs_to_run']
del conf['tasks']
for cl_key, internal_key in klass.RENAME_PARAMS.iteritems():
conf[cl_key] = conf[internal_key]
del conf[internal_key]
return conf
def db_path(self):
return os.path.join(self.artifacts_dir, self.db_file)
def log_path(self):
return os.path.join(self.log_dir, self.log_file)
def run(self):
self.setup_run()
self.log.debug("batch id is %s" % self.batch_id)
self.state = 'populating'
for doc in self.docs_to_run:
for task in doc:
task()
self.state = 'settingup'
for doc in self.docs_to_run:
for task in doc:
task()
self.state = 'running'
for doc in self.docs_to_run:
for task in doc:
task()
self.state = 'complete'
self.save_db()
self.setup_graph()
def setup_run(self):
self.check_dexy_dirs()
self.setup_log()
self.setup_db()
self.batch_id = self.db.next_batch_id()
if not self.docs_to_run:
self.setup_docs()
def setup_read(self, batch_id=None):
self.check_dexy_dirs()
self.setup_log()
self.setup_db()
if batch_id:
self.batch_id = batch_id
else:
self.batch_id = self.db.max_batch_id()
def check_dexy_dirs(self):
if not (os.path.exists(self.artifacts_dir) and os.path.exists(self.log_dir)):
raise dexy.exceptions.UserFeedback("You need to run 'dexy setup' in this directory first.")
def setup_dexy_dirs(self):
if not os.path.exists(self.artifacts_dir):
os.mkdir(self.artifacts_dir)
if not os.path.exists(self.log_dir):
os.mkdir(self.log_dir)
def remove_dexy_dirs(self):
shutil.rmtree(self.artifacts_dir)
shutil.rmtree(self.log_dir)
# TODO remove reports dirs
def setup_log(self):
try:
loglevel = self.LOG_LEVELS[self.log_level.upper()]
except KeyError:
msg = "'%s' is not a valid log level, check python logging module docs."
raise dexy.exceptions.UserFeedback(msg % self.log_level)
self.log = logging.getLogger('dexy')
self.log.setLevel(loglevel)
handler = logging.handlers.RotatingFileHandler(
self.log_path(),
encoding="utf-8")
formatter = logging.Formatter(self.log_format)
handler.setFormatter(formatter)
self.log.addHandler(handler)
def setup_db(self):
db_class = dexy.database.Database.aliases[self.db_alias]
self.db = db_class(self)
def setup_docs(self):
for arg in self.args:
self.log.debug("Processing arg %s" % arg)
doc = self.create_doc_from_arg(arg)
if not doc:
raise Exception("no doc created for %s" % arg)
doc.wrapper = self
self.docs_to_run.append(doc)
def create_doc_from_arg(self, arg, *children, **kwargs):
if isinstance(arg, dexy.task.Task):
return arg
elif isinstance(arg, list):
if not isinstance(arg[0], basestring):
msg = "First arg in %s should be a string" % arg
raise dexy.exceptions.UserFeedback(msg)
if not isinstance(arg[1], dict):
msg = "Second arg in %s should be a dict" % arg
raise dexy.exceptions.UserFeedback(msg)
if kwargs:
raise Exception("Shouldn't have kwargs if arg is a list")
if children:
raise Exception("Shouldn't have children if arg is a list")
alias, pattern = dexy.parser.AbstractSyntaxTree.qualify_key(arg[0])
return dexy.task.Task.create(alias, pattern, **arg[1])
elif isinstance(arg, basestring):
alias, pattern = dexy.parser.AbstractSyntaxTree.qualify_key(arg[0])
return dexy.task.Task.create(alias, pattern, *children, **kwargs)
else:
raise Exception("unknown arg type %s for arg %s" % (arg.__class__.__name__, arg))
def save_db(self):
self.db.save()
## DOCUMENTED above here..
def run_docs(self, *docs):
"""
Convenience method for testing to add docs and then run them.
"""
self.setup_dexy_dirs()
self.docs_to_run = docs
self.run()
def register(self, task):
"""
Register a task with the wrapper
"""
self.tasks[task.key_with_class()] = task
def registered_docs(self):
return [d for d in self.tasks.values() if isinstance(d, dexy.doc.Doc)]
def registered_doc_names(self):
return [d.name for d in self.registered_docs()]
def reports_dirs(self):
return [c.REPORTS_DIR for c in dexy.reporter.Reporter.plugins]
def report(self, *reporters):
"""
Runs reporters. Either runs reporters which have been passed in or, if
none, then runs all available reporters which have ALLREPORTS set to
true.
"""
if not reporters:
reporters = [c() for c in dexy.reporter.Reporter.plugins if c.ALLREPORTS]
for reporter in reporters:
self.log.debug("Running reporter %s" % reporter.ALIASES[0])
reporter.run(self)
def get_child_hashes_in_previous_batch(self, parent_hashstring):
return self.db.get_child_hashes_in_previous_batch(self.batch_id, parent_hashstring)
def load_doc_config(self):
"""
Look for document config files in current working dir and load them.
"""
parser_aliases = dexy.parser.Parser.aliases
for k in parser_aliases.keys():
if os.path.exists(k):
self.log.debug("found doc config file '%s'" % k)
parser = parser_aliases[k](self)
with open(k, "r") as f:
self.doc_config = f.read()
parser.parse(self.doc_config)
break
def setup_config(self):
self.setup_dexy_dirs()
self.setup_log()
self.load_doc_config()
def cleanup_partial_run(self):
if hasattr(self, 'db'):
# TODO remove any entries which don't have
self.db.save()
def setup_graph(self):
"""
Creates a dot representation of the tree.
"""
graph = ["digraph G {"]
for task in self.tasks.values():
if hasattr(task, 'artifacts'):
task_label = task.key_with_class().replace("|", "\|")
label = """ "%s" [shape=record, label="%s\\n\\n""" % (task.key_with_class(), task_label)
for child in task.artifacts:
label += "%s\l" % child.key_with_class().replace("|", "\|")
label += "\"];"
graph.append(label)
for child in task.children:
if not child in task.artifacts:
graph.append(""" "%s" -> "%s";""" % (task.key_with_class(), child.key_with_class()))
elif "Artifact" in task.__class__.__name__:
pass
else:
graph.append(""" "%s" [shape=record];""" % task.key_with_class())
for child in task.children:
graph.append(""" "%s" -> "%s";""" % (task.key_with_class(), child.key_with_class()))
graph.append("}")
self.graph = "\n".join(graph)
|
gotosprey/dexy
|
dexy/wrapper.py
|
wrapper.py
|
py
| 11,970 |
python
|
en
|
code
| null |
github-code
|
6
|
14572312600
|
############## THESE SPLINES ARE USING CATMULL SPLINES ##############
# https://en.wikipedia.org/wiki/Centripetal_Catmull%E2%80%93Rom_spline
#
# FOLLOWING javidx9's SPLINE VIDEOS:
# https://www.youtube.com/watch?v=9_aJGUTePYo&t=898s&ab_channel=javidx9
from typing import List
import pygame, math
from code_modules.spline.spline_point_2D import Spline_Point2D
### THE FONT IS USED TO SHOW fOffset AND fMarker ###
###############
class Spline:
def __init__(self):
self.points = []
self.activePoint = 0
self.isLooped = False
self.RIGHT = False
self.LEFT = False
self.UP = False
self.DOWN = False
self.totalLineLength = 0
############# DEBUG FONT #############
### THE FONT IS USED TO SHOW fOffset AND fMarker ###
self.font = pygame.font.SysFont(None, 20)
def update(self):
if self.RIGHT:
self.points[self.activePoint].x += 5
if self.LEFT:
self.points[self.activePoint].x -= 5
if self.UP:
self.points[self.activePoint].y -= 5
if self.DOWN:
self.points[self.activePoint].y += 5
### CALCULATE TOTAL LENGTH ###
self.totalLineLength = self.__getTotalLength()
def draw(self, canvas):
##### DRAW SPLINE POINTS #####
### LOOPED ###
if self.isLooped:
for t in range(0, len(self.points)*100, 1):
pos = self.getSplinePoint(t / 100)
pygame.draw.circle(canvas, (255,255,255), (pos.x, pos.y), 2)
### NOT LOOPED ###
else:
for t in range(0, (len(self.points)*100) - 300 , 1):
pos = self.getSplinePoint(t / 100)
pygame.draw.circle(canvas, (255,255,255), (pos.x, pos.y), 2)
##### DRAW CONTROL POINTS + TEXT #####
for i in range(len(self.points)):
### DRAW DISTANCE ###
tempImg = self.font.render(str(self.points[i].length), True, (200,200,200))
canvas.blit(tempImg, (self.points[i].x + 20, self.points[i].y))
##########################
##### CONTROL POINTS #####
if i == self.activePoint:
pygame.draw.circle(canvas, (255,255,0), (self.points[i].x, self.points[i].y), 5)
else:
pygame.draw.circle(canvas, (255,0,0), (self.points[i].x, self.points[i].y), 5)
tempImg = self.font.render(str(i), True, (255,255,255))
canvas.blit(tempImg, (self.points[i].x, self.points[i].y))
def getSplinePoint(self, t):
if not self.isLooped:
p1 = int(t) + 1
p2 = p1 + 1
p3 = p2 + 1
p0 = p1 - 1
else:
p1 = int(t)
p2 = (p1 + 1) % len(self.points)
p3 = (p2 + 1) % len(self.points)
if p1 >= 1:
p0 = p1 - 1
else:
p0 = len(self.points) - 1
t = t - int(t)
tSquare = t * t
tCube = tSquare * t
q1 = -tCube + 2 * tSquare - t
q2 = 3 * tCube - 5 * tSquare + 2
q3 = -3 * tCube + 4 * tSquare + t
q4 = tCube - tSquare
tx = 0.5 * (self.points[p0].x * q1 +
self.points[p1].x * q2 +
self.points[p2].x * q3 +
self.points[p3].x * q4)
ty = 0.5 * (self.points[p0].y * q1 +
self.points[p1].y * q2 +
self.points[p2].y * q3 +
self.points[p3].y * q4)
return Spline_Point2D(tx, ty)
def getSplineGradient(self, t):
if not self.isLooped:
p1 = int(t) + 1
p2 = p1 + 1
p3 = p2 + 1
p0 = p1 - 1
else:
p1 = int(t)
p2 = (p1 + 1) % len(self.points)
p3 = (p2 + 1) % len(self.points)
if p1 >= 1:
p0 = p1 - 1
else:
p0 = len(self.points) - 1
t = t - int(t)
tSquare = t * t
tCube = tSquare * t
q1 = -3*tSquare + 4*t - 1
q2 = 9*tSquare - 10*t
q3 = -9*tSquare + 8*t + 1
q4 = 3*tSquare - 2*t
tx = 0.5 * (self.points[p0].x * q1 +
self.points[p1].x * q2 +
self.points[p2].x * q3 +
self.points[p3].x * q4)
ty = 0.5 * (self.points[p0].y * q1 +
self.points[p1].y * q2 +
self.points[p2].y * q3 +
self.points[p3].y * q4)
return Spline_Point2D(tx, ty)
def __getTotalLength(self):
### CALCULATE TOTAL LENGTH ###
total = 0
if self.isLooped:
for i in range(len(self.points)):
self.points[i].length = self.__calculateSegmentLength(i)
total += self.points[i].length
else:
for i in range(len(self.points)-3):
self.points[i].length = self.__calculateSegmentLength(i)
total += self.points[i].length
return total
def __calculateSegmentLength(self, node):
fLength = 0
fStepSize = 3
old_point = self.getSplinePoint(node)
for t in range(0, 100, fStepSize):
new_point = self.getSplinePoint(node + t/100)
fLength += math.sqrt((new_point.x - old_point.x) * (new_point.x - old_point.x)
+ (new_point.y - old_point.y)*(new_point.y - old_point.y))
old_point = new_point
### You need to recalculate the segment lengths if the spline changes.
# which means its very innefficient to use splines dynamically. Preferrably
# you use them Statically.
return fLength
def getNormalizedOffset(self, p):
# Which node is the base?
i = 0
while p > self.points[i].length:
p -= self.points[i].length
i += 1
# The fractional is the offset
return i + (p / self.points[i].length)
|
EliasFredriksson/Tower_Defence_Reworked
|
code_modules/spline/spline.py
|
spline.py
|
py
| 6,006 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42679468935
|
'''
經典題型,由於羅馬字母原則上是由大至小排列,故若是發現某一數字大於先前的數字,則代表大 - 小 (e.g., XV = 4)
故作答上只需使用一個迴圈由左掃至右,判斷一下目前的羅罵字與上一個的大小關係,若發現小的字母在大的字母左側,則記得要減去2倍的先前字母值
(不是挺好解釋的,詳情見程式碼)
'''
class Solution:
def eval_roman(self, symbol):
answer = 0
if('I' == symbol):
answer = 1
elif('V' == symbol):
answer = 5
elif('X' == symbol):
answer = 10
elif('L' == symbol):
answer = 50
elif ('C' == symbol):
answer = 100
elif ('D' == symbol):
answer = 500
elif ('M' == symbol):
answer = 1000
return answer
def romanToInt(self, s):
answer, previous_val, current_val = 0, 0, 0
for i in s:
current_val = self.eval_roman(i)
answer = answer + current_val
if(current_val > previous_val):
answer = answer - 2 * previous_val
previous_val = current_val
return answer
# 本機端測試用
# sol = Solution()
# print(sol.romanToInt("MCMXCIV"))
# print(sol.romanToInt("XII"))
# print(sol.romanToInt("IV"))
# print(sol.romanToInt("IX"))
|
shawn2000100/LeetCode_Easy_Code
|
13. Roman to Integer.py
|
13. Roman to Integer.py
|
py
| 1,379 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70439517629
|
import pyautogui
import cv2 as cv
import numpy as np
import keyboard
import time
from math import sqrt
from PIL import ImageGrab
import win32api, win32con
# https://stackoverflow.com/questions/5906693/how-to-reduce-the-number-of-colors-in-an-image-with-opencv
def kmeans_color_quantization(image, clusters=8, rounds=1):
h, w = image.shape[:2]
samples = np.zeros([h*w,3], dtype=np.float32)
count = 0
for x in range(h):
for y in range(w):
samples[count] = image[x][y]
count += 1
compactness, labels, centers = cv.kmeans(samples,
clusters,
None,
(cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10000, 0.0001),
rounds,
cv.KMEANS_RANDOM_CENTERS)
centers = np.uint8(centers)
res = centers[labels.flatten()]
return res.reshape((image.shape))
class GarticBot:
def __init__(self, DEBUG=False):
self.debug = DEBUG
BOARD_ORIGIN = (692, 170)
BOARD_RESOLUTION = (962, 530)
PENCIL = (-150, 25)
PENCIL_SLIDER = (-147, 772)
PENCIL_SLIDER_MIN_RANGE = (790, 665)
PALLETE = (-100, 570)
# DRAWING_RESOLUTION = (120, 66)
# DRAWING_RESOLUTION = (150, 82)
DRAWING_RESOLUTION = (200, 110)
COLOR_VARIANCE = 128
WHITE_THRESHOLD = 55
CLICK_DELAY = 1e-10
CLICK_DELAY_INTERVAL = 5
pyautogui.PAUSE = CLICK_DELAY
def _getRelativePos(self, pos):
return (pos[0] + self.BOARD_ORIGIN[0], pos[1] + self.BOARD_ORIGIN[1])
def _downScale(self, image):
f1 = self.DRAWING_RESOLUTION[0] / image.shape[1]
f2 = self.DRAWING_RESOLUTION[1] / image.shape[0]
dim = (int(image.shape[1] * min(f1, f2)), int(image.shape[0] * min(f1, f2)))
resized = cv.resize(image, dim)
downscaled = kmeans_color_quantization(resized, clusters=self.COLOR_VARIANCE, rounds=1)
if self.debug:
cv.imshow("IMAGE", cv.resize(image, (600, int(image.shape[0]*600/image.shape[1])), interpolation=cv.INTER_AREA))
cv.waitKey(600)
cv.imshow("IMAGE", cv.resize(resized, (600, int(resized.shape[0]*600/resized.shape[1])), interpolation=cv.INTER_AREA))
cv.waitKey(600)
cv.imshow("IMAGE", cv.resize(downscaled, (600, int(downscaled.shape[0]*600/downscaled.shape[1])), interpolation=cv.INTER_AREA))
cv.waitKey(600)
cv.destroyAllWindows()
return downscaled
def _getColorClusters(self, image):
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
clusters = {}
for j in range(len(image)):
for i in range(len(image[0])):
color = f"{image[j][i][0]},{image[j][i][1]},{image[j][i][2]}"
if color in clusters:
clusters[color].append((i, j))
else:
clusters.update({color: [(i, j)]})
return clusters
def _equipPencil(self):
pyautogui.click(self._getRelativePos(self.PENCIL))
def _setColor(self, color):
pyautogui.click(self._getRelativePos(self.PALLETE))
time.sleep(0.1)
color = color.split(",")
keyboard.send('tab')
time.sleep(0.01)
keyboard.send('tab')
time.sleep(0.01)
keyboard.send('tab')
time.sleep(0.01)
keyboard.write(color[0])
time.sleep(0.01)
keyboard.send('tab')
time.sleep(0.01)
keyboard.write(color[1])
time.sleep(0.01)
keyboard.send('tab')
time.sleep(0.01)
keyboard.write(color[2])
time.sleep(0.01)
keyboard.send('enter')
time.sleep(0.1)
def _getClickPosition(self, pos):
upscale_factor_x = self.BOARD_RESOLUTION[0] / self.DRAWING_RESOLUTION[0]
upscale_factor_y = self.BOARD_RESOLUTION[1] / self.DRAWING_RESOLUTION[1]
pos = (int(pos[0]*upscale_factor_x), int(pos[1]*upscale_factor_y))
return pos
def _setPencilThickness(self, thickness):
pyautogui.moveTo(self._getRelativePos(self.PENCIL_SLIDER))
def draw(self, image):
print("DOWNSCALING")
downscaled = self._downScale(image)
clusters = self._getColorClusters(downscaled)
while True:
if keyboard.is_pressed('alt+s'):
print("STOPPING")
return
if keyboard.is_pressed('alt+q'):
quit()
if keyboard.is_pressed('alt+d'):
break
time.sleep(0.2)
print("DRAWING")
self._equipPencil()
for color in clusters:
channels = color.split(",")
dist = sqrt(pow(int(channels[0])-255, 2) + pow(int(channels[1])-255, 2) + pow(int(channels[2])-255, 2))
if dist < self.WHITE_THRESHOLD:
continue
print(f'Color: {color}')
self._setColor(color)
for i, pixel in enumerate(clusters[color]):
pos = self._getClickPosition(pixel)
pos = self._getRelativePos(pos)
win32api.mouse_event(win32con.MOUSEEVENTF_MOVE | win32con.MOUSEEVENTF_ABSOLUTE, int(pos[0]/win32api.GetSystemMetrics(0)*65535), int(pos[1]/win32api.GetSystemMetrics(1)*65535) ,0 ,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)
if i%self.CLICK_DELAY_INTERVAL==0: time.sleep(self.CLICK_DELAY)
if keyboard.is_pressed('alt+s'):
print("STOPED")
return
print("DONE")
def run(self):
while True:
if keyboard.is_pressed('alt+q'):
break
if keyboard.is_pressed('alt+c'):
image = np.array(ImageGrab.grabclipboard())[:,:,:3]
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
self.draw(image)
bot = GarticBot(DEBUG=True)
bot.run()
|
JirkaKlimes/gartic.io_bot
|
main.py
|
main.py
|
py
| 5,990 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27066265033
|
#! /usr/bin/python
def solutionOneChecker(combinedLetters: str, testWord: str, dictWordScores: dict) -> bool:
"""
Check if a word uses all letters of the box
True = Valid Solution
"""
fullScore = len(combinedLetters)
#fullScore = 7
if dictWordScores[testWord] > fullScore:
return True
return False
def topScoringWords(combinedLetters: str, dictWordScores: dict) -> bool:
"""
Prints the top scoring words above or equal to 8 points
"""
fullScore = len(combinedLetters)
for iScore in range(fullScore, 7, -1):
for iWord in dictWordScores:
if dictWordScores[iWord] == iScore:
print(iWord, iScore)
return True
def run():
return None
if __name__ == "__main__":
run()
|
tmangan/PonderThis
|
2022_December/Solution_Checker.py
|
Solution_Checker.py
|
py
| 782 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25528561437
|
import tensorflow as tf
# Defince a "Computation Graph"
a = tf.constant(1) # Defince a constant Tensor
b = tf.constant(1)
c = a + b # Equal to c = tf.add(a, b),c is a new Tensor created by Tensor a and Tesor b's add Operation
sess = tf.Session() # Initailize a Session
c_ = sess.run(c) # Session的run() will do actually computation to the nodes (Tensor) in the Computation Graph
print(c_)
|
snowkylin/TensorFlow-cn
|
source/_static/code/en/basic/graph/1plus1.py
|
1plus1.py
|
py
| 407 |
python
|
en
|
code
| 854 |
github-code
|
6
|
225940000
|
from sqlalchemy import Column, Integer, String, ForeignKey
from app.routers.db import Base
class Task(Base):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True, index=True)
title = Column(String)
body = Column(String)
|
gitdarsh/todo
|
todo/app/models/model.py
|
model.py
|
py
| 248 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31132813401
|
from abc import ABC
from collections import OrderedDict, defaultdict
import torch
import torch.nn.functional as F
from torch import flatten
from torch.nn import Module, Conv2d, Dropout, Linear, BatchNorm2d, ReLU, Sequential, MaxPool2d
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LRScheduler
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
class AbstractModule(Module, ABC): # TODO check that it's abstract
def __init__(self):
super().__init__()
self._optim = None
self._criterion = None
self._scheduler = None
self._pruner = None
def optimizer(self, optim: callable(Optimizer), **kwargs):
self._optim = optim(self.parameters(), **kwargs)
return self
def scheduler(self, scheduler: callable(LRScheduler), **kwargs):
self._scheduler = scheduler(self._optim, **kwargs)
return self
def criterion(self, criterion: Module):
self._criterion = criterion
return self
def fit(self,
dataloader: DataLoader,
epochs: int,
callbacks=None
) -> None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = self.to(device).train()
for epoch in range(1, epochs + 1):
loader_bar = tqdm(dataloader, desc='train', leave=False)
for inputs, targets in loader_bar:
inputs = inputs.to(device)
targets = targets.to(device)
# Reset the gradients (from the last iteration)
self._optim.zero_grad()
outputs = model(inputs)
loss = self._criterion(outputs, targets)
loss.backward()
self._optim.step()
if callbacks is not None:
for callback in callbacks:
callback()
loader_bar.set_description(f"Epoch [{epoch}/{epochs}]")
if self._scheduler is not None:
self._scheduler.step()
@torch.inference_mode()
def evaluate(self,
dataloader: DataLoader,
verbose=True,
) -> float:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = self.to(device).eval()
num_samples = 0
num_correct = 0
for inputs, targets in tqdm(dataloader, desc="eval", leave=False, disable=not verbose):
inputs = inputs.to(device)
targets = targets.to(device)
outputs = model(inputs)
outputs = outputs.argmax(dim=1)
# Update metrics
num_samples += targets.size(0)
num_correct += (outputs == targets).sum()
return (num_correct / num_samples * 100).item()
class BaseLineNet(AbstractModule):
def __init__(self):
super().__init__()
self.conv1 = Conv2d(1, 32, 3, 1) # 1 x 32 x 3 x 3 = 288 parameters
self.conv2 = Conv2d(32, 64, 3, 1) # 32 x 64 x 3 x 3=18,432 parameters
self.dropout1 = Dropout(0.25)
self.dropout2 = Dropout(0.5)
self.fc1 = Linear(9216, 128) # 9216 x 128 = 1,179,648 parameters
self.fc2 = Linear(128, 10) # 128 x 10 = 1,280 parameters
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
class VGG(AbstractModule):
ARCH = [64, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M']
def __init__(self) -> None:
super().__init__()
layers = []
counts = defaultdict(int)
def add(name: str, layer: Module) -> None:
layers.append((f"{name}{counts[name]}", layer))
counts[name] += 1
in_channels = 3
for x in self.ARCH:
if x != 'M':
# conv-bn-relu
add("conv", Conv2d(in_channels, x, 3, padding=1, bias=False))
add("bn", BatchNorm2d(x))
add("relu", ReLU(True))
in_channels = x
else:
add("pool", MaxPool2d(2))
self.backbone = Sequential(OrderedDict(layers))
self.classifier = Linear(512, 10)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# backbone: [N, 3, 32, 32] => [N, 512, 2, 2]
x = self.backbone(x)
# avgpool: [N, 512, 2, 2] => [N, 512]
x = x.mean([2, 3])
# classifier: [N, 512] => [N, 10]
x = self.classifier(x)
return x
|
bnwiran/tinyml-benchmark
|
models/models.py
|
models.py
|
py
| 4,749 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70396769467
|
""" JAX functions to Calculate moving average.
Author: Toshinori Kitamura
Affiliation: NAIST & OSX
"""
from __future__ import annotations
import jax
from chex import Array
from jax import lax
@jax.jit
def calc_ma(lr: float, idx1: Array, idx2: Array, tb: Array, tb_targ: Array) -> Array:
"""Calculate moving average.
The semantics of calc_ma are given by:
def calc_ma(lr, idx1, idx2, tb, tb_targ):
for s, a, targ in zip(idx1, idx2, tb_targ):
tb[s, a] = (1 - lr) * tb[s, a] + lr * targ
return tb
Args:
lr (float): Learning rate
idx1 (Array): (?, ) or (?, 1) array
idx2 (Array): (?, ) or (?, 1) array
tb (Array): (?, ?) initial array
tb_targ (Array): (?, ) or (?, 1) target array
Returns:
tb (Array): (?, ) array
"""
assert len(tb.shape) == 2 # dSxdA
idx1 = idx1.squeeze(axis=1) if len(idx1) == 2 else idx1
idx2 = idx2.squeeze(axis=1) if len(idx2) == 2 else idx2
tb_targ = tb_targ.squeeze(axis=1) if len(tb_targ) == 2 else tb_targ
def body_fn(i, tb):
i1, i2, t = idx1[i], idx2[i], tb_targ[i]
targ = (1 - lr) * tb[i1, i2] + lr * t
return tb.at[i1, i2].set(targ)
tb = lax.fori_loop(0, len(idx1), body_fn, tb)
return tb
|
omron-sinicx/ShinRL
|
shinrl/_calc/moving_average.py
|
moving_average.py
|
py
| 1,289 |
python
|
en
|
code
| 42 |
github-code
|
6
|
17469039054
|
from bs4 import BeautifulSoup
import requests
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
fx=open('WEB.txt','r',encoding="utf-8") ## FILENAME me file ka name dalna
line=fx.readline()
l=open('email_mailto.txt','a',encoding='utf-8')
def web_imrove(url):
print(url)
try:
source = requests.get(url)
except Exception :
l.write('\n')
return 0
plain_text = source.text
soup = BeautifulSoup(plain_text, 'html.parser')
emails = [a["href"] for a in soup.select('a[href^=mailto:]')]
popo = str(emails)
toto = popo.replace('mailto:', '')
hoho = toto.replace('[', '')
gogo = hoho.replace(']', '')
mm = gogo.replace("'", "")
if len(mm) is not 0:
l.write(mm)
l.write('\n')
print(mm)
return 1
else:
l.write('\n')
return 0
#print(mm)
while line:
if line is '\n':
l.write('\n')
line=fx.readline()
continue
p = line.strip()
if( web_imrove('http://'+p) ):
print('first')
elif( web_imrove('http://' + p+'/contact-us') ):
print('second')
else:
web_imrove('http://' + p+'/contactus')
line = fx.readline()
|
akkiei/Web_Scrapper
|
Mail_to.py
|
Mail_to.py
|
py
| 1,282 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17086061072
|
from datetime import datetime
#convert date from YYYY-MM-DD-T to Date, Month, Year (in words)
#dfdsf
#dsfds
datetime
def date_convert(date):
date=str(date)
data=date.split('-') #year/month/day+time all separated by dash
daydate=data[-1].split() #data[-1] is day+time, separated by a space
day=daydate[0] #discard time, keep day
day=day if day[0]!=0 else day[1] #otherwise single-digit days retain leading zero
year=str(data[0]) #data is list containing the year and the month
month=str(data[1])
#map month numbers to their names
months={'01':'January',
'02':'February',
'03':'March',
'04':'April',
'05':'May',
'06':'June',
'07':'July',
'08':'August',
'09':'September',
'10':'October',
'11':'November',
'12':'December'}
#adds appropriate suffix to day
if day[-1]=='1' and int(day)%100!=11: #checks if date ends with 1 and isn't 11
suffix='st'
elif day[-1]=='2' and int(day)%100!=12: #checks if date ends with 1 and isn't 11
suffix='nd'
elif day[-1]=='3':
suffix='rd'
else:
suffix='th' #including special cases 11 and 12 which were previously excluded
return day+suffix+' '+months[month]+', '+year #returns string with date in appropriate format
#test case
#date=datetime.now()
#print date_convert(date)
|
veliakiner/SmogonQDB
|
date_convert.py
|
date_convert.py
|
py
| 1,428 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19980146036
|
from pytube import YouTube
from PySimpleGUI import PySimpleGUI as sg
sg.theme("reddit")
layout = [
[sg.Text("URL"), sg.Input(key="url")],
[sg.Button("Fazer o Download")]
],
janela = sg.Window("Video Downloader", layout)
while True:
eventos, valores = janela.read()
if eventos == sg.WINDOW_CLOSED:
break
if eventos == "Fazer o Download":
link = valores["url"]
yt = YouTube(link)
stream = yt.streams.get_highest_resolution()
stream.download()
|
jopsfernandes/video_downloader
|
youtube.py
|
youtube.py
|
py
| 525 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38791493575
|
from flask import Flask
from flask_restful import Resource, Api
import __init__
app=Flask(__name__)
api=Api(app)
class Quote(Resource):
@app.route('/wifi/<int:id>')
def get(id):
x=main.main_(id)
if x==-1:
return 'Not found', 404
else:
return x, 200
@app.route('/trace')
def trace():
x=main.output()
return x, 200
if __name__ == '__main__':
app.run(host="0.0.0.0", port="8080",debug=True)
|
Kaedone/WI-FI_checker
|
api.py
|
api.py
|
py
| 510 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11956903610
|
"""
a pure python implementation of the heap sort algorithm
"""
def m_heap_sort(arr):
"""heap sort
:type arr: array
:rtype: array
>>> m_heap_sort([3, 2, 1, 4, 5])
[1, 2, 3, 4, 5]
>>> m_heap_sort([])
[]
>>> m_heap_sort([1])
[1]
"""
n = len(arr)
for i in range(n//2 - 1, -1, -1):
heapify(arr, n, i)
for i in range(n-1, -1, -1):
arr[i], arr[0] = arr[0], arr[i]
heapify(arr, i, 0)
return arr
def heapify(arr, n, i):
"""get max heap
:type arr: array
:type n: int, length
:type i: index, subtree root
:rtype: None
"""
largest = i
left = 2*i + 1
right = 2*i + 2
if left < n and arr[left] > arr[largest]:
largest = left
if right < n and arr[right] > arr[largest]:
largest = right
if largest != i:
arr[largest], arr[i] = arr[i], arr[largest]
heapify(arr, n, largest)
|
wancong/leetcode
|
sort/m_heap_sort.py
|
m_heap_sort.py
|
py
| 1,074 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10304764794
|
# Calculate how long it takes to save enough money make a down payment on a house
def app():
# INPUTS
annual_salary = float(input("Enter your annual salary: "))
portion_saved = float(input("Enter the percent of your salary to save, as a decimal: "))
total_cost = int(input("Enter the cost of your dream home: "))
portion_down_payment = 0.25
current_savings = 0
r = 0.04
# bonus = current_savings*r/12
monthly_salary = (annual_salary/12)
goal = (total_cost * portion_down_payment)
months = 0
while (current_savings <= goal):
current_savings += current_savings*r/12
current_savings += (monthly_salary * portion_saved)
months += 1
print(months)
if __name__ == "__main__":
app()
|
lsunl/cs60001-python
|
ps1a.py
|
ps1a.py
|
py
| 770 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19772192847
|
# -*- coding: utf-8 -*-
#-----------
#@utool.indent_func('[harn]')
@profile
def test_configurations(ibs, acfgstr_name_list, test_cfg_name_list):
r"""
Test harness driver function
CommandLine:
python -m ibeis.expt.harness --exec-test_configurations --verbtd
python -m ibeis.expt.harness --exec-test_configurations --verbtd --draw-rank-cdf --show
Example:
>>> # SLOW_DOCTEST
>>> from ibeis.expt.harness import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb('PZ_MTEST')
>>> acfgstr_name_list = ut.get_argval(('--aidcfg', '--acfg', '-a'), type_=list, default=['candidacy:qsize=20,dper_name=1,dsize=10', 'candidacy:qsize=20,dper_name=10,dsize=100'])
>>> test_cfg_name_list = ut.get_argval('-t', type_=list, default=['custom', 'custom:fg_on=False'])
>>> test_configurations(ibs, acfgstr_name_list, test_cfg_name_list)
>>> ut.show_if_requested()
"""
testres_list = run_test_configurations2(ibs, acfgstr_name_list, test_cfg_name_list)
for testres in testres_list:
if testres is None:
return
else:
experiment_printres.print_results(ibs, testres)
experiment_drawing.draw_results(ibs, testres)
return testres_list
#def get_cmdline_testres():
# ibs, qaids, daids = main_helpers.testdata_expanded_aids(verbose=False)
# test_cfg_name_list = ut.get_argval('-t', type_=list, default=['custom', 'custom:fg_on=False'])
# testres = run_test_configurations(ibs, qaids, daids, test_cfg_name_list)
# return ibs, testres
|
smenon8/ibeis
|
_broken/old_test_harness.py
|
old_test_harness.py
|
py
| 1,586 |
python
|
en
|
code
| null |
github-code
|
6
|
33983748034
|
from django.shortcuts import render, redirect
from django.views.generic import ListView, \
CreateView, DetailView, UpdateView, DeleteView
from .models import Post, Review
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from accounts.models import UserFollows
from .forms import PostForm, ReviewForm
from django.template.defaulttags import register
from django.contrib import messages
@register.filter
# helps us loop over the review's rating and
# add stars for the range of the int variable; rating
def get_range(value):
return range(value)
def flow(request):
following = UserFollows.objects.filter(following=request.user)
follower = UserFollows.objects.filter(follower=request.user)
posts = []
reviews = []
for post in Post.objects.all().order_by('-date_posted'):
posts.append(post)
for review in Review.objects.all().order_by('-date_posted'):
reviews.append(review)
posts_reviews = []
for post in posts:
if post.author == request.user:
posts_reviews.append(post)
print(post)
for contact in follower:
if post.author == contact.following:
posts_reviews.append(post)
for review in reviews:
if review.author == request.user:
posts_reviews.append(review)
for contact in follower:
if review.author == contact.following:
posts_reviews.append(review)
if review.ticket.author == request.user:
posts_reviews.append(review)
posts_reviews = list(set(posts_reviews))
posts_reviews.sort(key=lambda x: x.date_posted, reverse=True)
for p in posts_reviews:
print(p.type)
context = {
'follower': follower,
'following': following,
'post_review': posts_reviews
}
return render(request, 'flow.html', context)
class ReviewCreateView(LoginRequiredMixin, CreateView):
model = Review
fields = ['ticket', 'headline', 'rating', 'content', ]
def form_valid(self, form):
form.instance.author = self.request.user
try:
return super().form_valid(form)
except ValueError:
messages.add_message(self.request, messages.INFO, 'Hello world.')
class ReviewDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Review
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
else:
return False
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'content', 'header_image']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ['title', 'content', 'header_image']
def form_valid(self, form):
form.instance.author = self.request.user
self.object = form.save()
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
else:
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
else:
return False
class PostListView(ListView):
model = Post
context_object_name = 'posts'
ordering = ['-date_posted']
class ReviewListView(ListView):
model = Review
context_object_name = 'reviews'
ordering = ['-date_posted']
class PostDetailView(DetailView):
model = Post
class ReviewDetailView(DetailView):
model = Review
class ReviewUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Review
fields = ['headline', 'body', 'rating']
def form_valid(self, form):
form.instance.author = self.request.user
self.object = form.save()
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
else:
return False
def review_create_view(request):
form2 = PostForm(request.POST, request.FILES or None)
form = ReviewForm(request.POST or None)
context = {
"form2": form2,
"form": form,
}
if all([form2.is_valid(), form.is_valid()]):
current_user = request.user
parent = form2.save(commit=False)
parent.author_id = current_user.id
parent.reviewed = 'true'
parent.save()
child = form.save(commit=False)
child.author_id = current_user.id
child.ticket = parent
child.save()
print("form", form.cleaned_data)
print("form2", form2.cleaned_data)
context['message'] = 'data saved'
return redirect('flow')
# return render(request, 'reviews/review_create.html', context)
else:
return render(request, 'reviews/review_create.html', context)
def review_of_ticket(request, pk):
instance = Post.objects.get(id=pk)
form = ReviewForm(request.POST or None)
review_form_ticket = instance
context = {
"form": form,
"ticket": review_form_ticket,
}
if form.is_valid():
current_user = request.user
child = form.save(commit=False)
child.author_id = current_user.id
child.ticket = instance
instance.reviewed = 'true'
child.save()
instance.save()
form.save()
return redirect('flow')
else:
return render(request, "website/review_form.html", context,)
def view_tickets_reviews(request):
object1 = Post.objects.filter(author=request.user).order_by('-date_posted')
object2 = Review.objects.filter(
author=request.user).order_by('-date_posted')
context = {
'object1': object1,
'object2': object2,
}
return render(request, "website/review_post_detail.html", context)
|
maximesoydas/maxweb
|
website/views.py
|
views.py
|
py
| 6,223 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40132948104
|
import argparse
from math import sqrt
import Image
import ImageDraw
def color_map(v):
assert 0 <= v <= 255
if v == 0: return (0, 0, 0)
if v == 255: return (255, 255, 255)
if v < 4 * 8:
# 0 .. 31
return (0, 255 - (31 * 4) + v * 4, 0)
if v < 16 * 8:
# 32 .. 127
# 0 .. 95
return (128 + (v - 32) * 127 / 95, 0, 0)
return (0, v, v)
def convert():
if args.test:
data = map(chr, range(256))
else:
data = file(args.in_file).read()
size = len(data)
w = 1
while size / w > w * 8:
w *= 2
h = size / w
if size % w != 0: h += 1
image = Image.new('RGB', (w, h))
d = ImageDraw.Draw(image)
for i, c in enumerate(data):
d.point((i % w, i / w), color_map(ord(c)))
image.save(args.out_file, 'PNG')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Show binary in color pattern')
parser.add_argument('--test', action='store_true')
parser.add_argument('in_file', action='store')
parser.add_argument('--out_file', action='store', default='out.png')
args = parser.parse_args()
convert()
|
nishio/binary_color
|
binary_color.py
|
binary_color.py
|
py
| 1,161 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39784068604
|
import filecmp, os, sys
sys.path.append('c:\\dev\\pytWinc\\superpy')
sys.path.append('c:\\dev\\pytWinc\\superpy\\utils_superpy')
from utils.utils import calculate_inventory, get_path_to_directory_of_file
directory_of_testcase = "fn_calculate_inventory"
path_to_directory_of_testcase = get_path_to_directory_of_file(directory_of_testcase)
# input test files:
path_to_input_file_sold_test_01 = os.path.join(path_to_directory_of_testcase, "test_input", 'input_file_sold_for_testcase_01.csv')
path_to_input_file_cost_test_01 = os.path.join(path_to_directory_of_testcase, "test_input", 'input_file_cost_for_testcase_01.csv')
path_to_input_file_sold_test_02 = os.path.join(path_to_directory_of_testcase, "test_input", 'input_file_sold_for_testcase_02.csv')
path_to_input_file_cost_test_02 = os.path.join(path_to_directory_of_testcase, "test_input", 'input_file_cost_for_testcase_02.csv')
'''
about the data structure of expected testresult:
list of lists is a common and convenient (but not the only) way to create tables in Python.
This also applies to Rich.
So expected test results take the shape of a list with lists. This has worked
while testing fn calculate_expired_products_on_day.
'''
def test_01_calculate_inventory_happy_flow():
filecmp.clear_cache()
date_on_which_to_calculate_inventory = '2024-05-21'
expected_test_result = [['b_3', 'candle', '3.1', '2024-01-11', 'does not expire'], ['b_6', 'book', '0.5', '2024-01-15', 'does not expire'], ['b_39', 'skeelers', '1.1', '2024-04-20', 'does not expire'], ['b_45', 'shoes', '1.4', '2024-04-30', 'does not expire'], ['b_48', 'fish', '2.5', '2024-05-08', '2024-05-23'], ['b_51', 'kiwi', '0.5', '2024-05-15', '2024-05-30'], ['b_54', 'onion', '1.1', '2024-05-21', '2024-06-05']]
actual_result = calculate_inventory(date_on_which_to_calculate_inventory, path_to_input_file_sold_test_01,
path_to_input_file_cost_test_01)
assert actual_result == expected_test_result
def test_02_calculate_inventory_happy_flow():
filecmp.clear_cache()
date_on_which_to_calculate_inventory = '2023-11-15'
expected_test_result = [['b_6', 'garbage_bag', '5.2', '2023-10-17', 'does not expire'], ['b_26', 'tomato', '2.5', '2023-10-31', '2023-11-15'], ['b_28', 'lettuce', '0.5', '2023-11-01', '2023-11-16'], ['b_30', 'lettuce', '4.0', '2023-11-02', '2023-11-17'], ['b_32', 'tomato', '5.2', '2023-11-03', '2023-11-18'], ['b_34', 'lightbulb', '4.0', '2023-11-06', 'does not expire'], ['b_36', 'tomato', '4.0', '2023-11-07', '2023-11-22'], ['b_38', 'rice', '0.5', '2023-11-08', '2023-11-23'], ['b_40', 'cheese', '1.4', '2023-11-09', '2023-11-24'], ['b_42', 'book', '5.2', '2023-11-11', 'does not expire'],
['b_44', 'oats', '0.5', '2023-11-14', '2023-11-29']]
actual_result = calculate_inventory(date_on_which_to_calculate_inventory, path_to_input_file_sold_test_02,
path_to_input_file_cost_test_02)
assert actual_result == expected_test_result
|
davidjfk/David_Sneek_Superpy
|
test_utils/fn_calculate_inventory/test_calculate_inventory.py
|
test_calculate_inventory.py
|
py
| 2,936 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35430795829
|
class odds_compare:
def __init__(self, api_array):
self.api_array = api_array
# calculate odds comparisons for all available matches
def calculate_comparisons(self):
result = []
for i in self.api_array:
result_dict = {
"home": "",
"away": "",
"home_odds": [],
"away_odds": []
}
result_dict['home'] = i['home_team']
result_dict['away'] = [x for x in i['teams'] if x != i['home_team']][0]
home_odds = []
away_odds = []
# return all odds
for book in i['sites']:
if result_dict['home'] == i['teams'][0]:
home_odds.append((book['site_nice'], book['odds']['h2h'][0]))
away_odds.append((book['site_nice'], book['odds']['h2h'][1]))
else:
home_odds.append((book['site_nice'], book['odds']['h2h'][1]))
away_odds.append((book['site_nice'], book['odds']['h2h'][0]))
result_dict['home_odds'] = sorted(home_odds, key=lambda tup: tup[1], reverse=True)
result_dict['away_odds'] = sorted(away_odds, key=lambda tup: tup[1], reverse=True)
result.append(result_dict)
return result
# calculate odds comparisons for a specific team
def calculate_comparisons_team(self, team):
# print(self.api_array)
result = {
"home": "",
"away": "",
"home_odds": [],
"away_odds": []
}
# look for particular match
for i in self.api_array:
if team in i['teams']:
home_odds = []
away_odds = []
# return all odds
for book in i['sites']:
# print(book['site_nice']+ " - " +str(book['odds']['h2h']))
home_odds.append((book['site_nice'], book['odds']['h2h'][0]))
away_odds.append((book['site_nice'], book['odds']['h2h'][1]))
result['home_odds'] = sorted(home_odds, key=lambda tup: tup[1], reverse=True)
result['away_odds'] = sorted(away_odds, key=lambda tup: tup[1], reverse=True)
result['home'] = i['home_team']
result['away'] = [x for x in i['teams'] if x != i['home_team']][0]
break # only want first game with selected team from API
return result
|
abhid94/Compare_Odds_Bot
|
odds_compare.py
|
odds_compare.py
|
py
| 2,507 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43626494196
|
# VIIRS packge
from __future__ import division, print_function
import datetime
import numpy as np
from osgeo import gdal
from scipy import ndimage
import core
import env
bumper = env.environment()
class viirs(core.raster):
def __init__(self):
core.raster.__init__(self,'viirs')
return
def read(self,infile):
out = self._copy()
tree = '//HDFEOS/GRIDS/VNP_Grid_{}_2D/Data_Fields/'
field = 'SurfReflect_{0}{1}_1'
base = 'HDF5:"{0}":{1}{2}'
m = [i for i in range(12) if i not in [0,6,9]]
i = [i for i in range(1,4)]
bands = [m,i]
res = ['1km','500m']
mode = ['M','I']
band = gdal.Open(base.format(infile,tree.format('1km'),field.format('QF',1)))
out.metadata = band.GetMetadata()
cloudQA = self._extractBits(band.ReadAsArray(),2,3)
hiresCloudQA = ndimage.zoom(cloudQA,2,order=0)
band = None
band = gdal.Open(base.format(infile,tree.format('1km'),field.format('QF',2)))
shadowQA = self._extractBits(band.ReadAsArray(),3,3)
hiresShadowQA = ndimage.zoom(shadowQA,2,order=0)
# qa = (cloudQA>0)&(shadowQA<1)
mask = ~(hiresCloudQA>0)&(hiresShadowQA<1)
east,west = float(out.metadata['EastBoundingCoord']), float(out.metadata['WestBoundingCoord'])
north,south = float(out.metadata['NorthBoundingCoord']), float(out.metadata['SouthBoundingCoord'])
out.extent = [west,south,east,north]
databands = {'mask':mask}
bandNames = ['mask']
for i in range(2):
for j in range(len(bands[i])):
subdataset = base.format(infile,tree.format(res[i]),field.format(mode[i],bands[i][j]))
band = gdal.Open(subdataset)
if i == 0:
data = ndimage.zoom(band.ReadAsArray(),2,order=0)
else:
data = band.ReadAsArray()
data = np.ma.masked_where(data<0,data)
data = np.ma.masked_where(data>10000,data)
bName = '{0}{1}'.format(mode[i],bands[i][j])
databands[bName] = data.astype(np.int16)
bandNames.append(bName)
band = None
data = None
out.bands = databands
out.bandNames = bandNames
out.updateMask()
coords = {}
out.nativeCRS = {'init':'epsg:6974'}
out.proj = '+proj=sinu +R=6371007.181 +nadgrids=@null +wktext'
coords['lon'],coords['lat'] = self._geoGrid(out.extent,out.bands['I1'].shape,out.proj,wgsBounds=False)
out.coords = coords
out.gt = None
date = '{0}{1}{2}'.format(out.metadata['RangeBeginningDate'],out.metadata['RangeBeginningTime'],' UTC')
out.coords['date'] = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S.%f %Z')
return out
|
Servir-Mekong/bump
|
bump/viirs.py
|
viirs.py
|
py
| 2,977 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25497427443
|
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class TinyImageNet:
def __init__(self, root, train=True, transform=None, target_transform=None, test_transform=None, target_test_transform=None):
self.transform = transform
self.target_transform = target_transform
self.target_test_transform = target_test_transform
self.test_transform = test_transform
self.TrainData = []
self.TrainLabels = []
self.TestData = []
self.TestLabels = []
if train:
path = root + '/TinyImageNet/train/'
else:
path = root + '/TinyImageNet/val/'
self.data = np.load(path + 'data.npy')
self.targets = np.load(path + 'targets.npy')
def concatenate(self, datas, labels):
con_data = datas[0]
con_label = labels[0]
for i in range(1, len(datas)):
con_data = np.concatenate((con_data, datas[i]), axis=0)
con_label = np.concatenate((con_label, labels[i]), axis=0)
con_label = np.array(con_label, dtype=np.int64)
return con_data, con_label
def getTestData(self, classes):
datas, labels = [], []
for label in range(classes[0], classes[1]):
data = self.data[np.array(self.targets) == label]
datas.append(data)
labels.append(np.full((data.shape[0]), label))
datas, labels = self.concatenate(datas, labels)
self.TestData = datas if self.TestData == [] else np.concatenate((self.TestData, datas), axis=0)
self.TestLabels = labels if self.TestLabels == [] else np.concatenate((self.TestLabels, labels), axis=0)
print("the size of test set is %s" % (str(self.TestData.shape)))
print("the size of test label is %s" % str(self.TestLabels.shape))
def getTestData_up2now(self, classes):
datas, labels = [], []
for label in range(classes[0], classes[1]):
data = self.data[np.array(self.targets) == label]
datas.append(data)
labels.append(np.full((data.shape[0]), label))
datas, labels = self.concatenate(datas, labels)
self.TestData = datas
self.TestLabels = labels
print("the size of test set is %s" % (str(datas.shape)))
print("the size of test label is %s" % str(labels.shape))
def getTrainData(self, classes):
datas, labels = [], []
for label in range(classes[0], classes[1]):
data = self.data[np.array(self.targets) == label]
datas.append(data)
labels.append(np.full((data.shape[0]), label))
self.TrainData, self.TrainLabels = self.concatenate(datas, labels)
print("the size of train set is %s" % (str(self.TrainData.shape)))
print("the size of train label is %s" % str(self.TrainLabels.shape))
def getTrainItem(self, index):
img, target = Image.fromarray(self.TrainData[index]), self.TrainLabels[index]
if self.transform:
img = self.transform(img)
if self.target_transform:
target = self.target_transform(target)
return index, img, target
def getTestItem(self, index):
img, target = Image.fromarray(self.TestData[index]), self.TestLabels[index]
if self.test_transform:
img = self.test_transform(img)
if self.target_test_transform:
target = self.target_test_transform(target)
return index, img, target
def __getitem__(self, index):
if self.TrainData != []:
return self.getTrainItem(index)
elif self.TestData != []:
return self.getTestItem(index)
def __len__(self):
if self.TrainData != []:
return len(self.TrainData)
elif self.TestData != []:
return len(self.TestData)
def get_image_class(self, label):
return self.data[np.array(self.targets) == label]
|
ruixiang-wang/Incremental-Learning-Research
|
PRE-master/TinyImageNet.py
|
TinyImageNet.py
|
py
| 4,064 |
python
|
en
|
code
| 4 |
github-code
|
6
|
40565109992
|
# 1-Escribe un programa que pida al usuario una palabra y luego imprima cada
# letra de la palabra en una línea separada
def programa():
usuario = list(input('Ingresa una palabra: '))
contador = 0
while len(usuario) > contador:
print(usuario[contador])
contador += 1
programa()
|
maximiliano1997/informatorio-2023
|
Week-3/Ejercicios Estructuras de Datos Loops/ejercicio1.py
|
ejercicio1.py
|
py
| 313 |
python
|
es
|
code
| 0 |
github-code
|
6
|
38365303311
|
from datetime import datetime, timedelta
import logging
import os
import json
import pandas as pd
import requests
try:
from .exceptions import ApexApiException
except:
from exceptions import ApexApiException
class Apex_API:
def __init__(self, api_key: str):
self.api_key = api_key
logging.basicConfig(
level=logging.INFO,
format="[%(levelname)s] %(asctime)s %(message)s",
datefmt="%Y-%m-%d %I:%M:%S %p", # this defines the date format for the (asctime) part above
handlers=[logging.StreamHandler()],
# this means store logs to a example.log file as well as print them to the terminal
)
logging.getLogger("requests").setLevel(
logging.WARNING
) # get rid of https debug gahbage
def ___iter__(self):
logging.info("what")
def __str__(self):
return "Apex API Client Object"
def __repr__(self):
return "Apex API"
def get_apex_player_stats(self, player: str) -> pd.DataFrame:
try:
data = requests.get(
f"https://api.mozambiquehe.re/bridge?version=5&platform=PC&player={player}&auth={self.api_key}"
)
logging.info(
f"Grabbing data for Player {player}, Status Code was {data.status_code}"
)
df = data.json()
return df
except BaseException as e:
logging.error(e)
raise ApexApiException
def get_apex_map_rotation(self) -> pd.DataFrame:
try:
data = requests.get(
f"https://api.mozambiquehe.re/maprotation?version=2&auth={self.api_key}"
)
logging.info(
f"Grabbing data for current Map Rotation, Status Code was {data.status_code}"
)
df = data.json()
df_current = pd.DataFrame([df["battle_royale"]["current"]])
df_current["type"] = "current"
df_next = pd.DataFrame([df["battle_royale"]["next"]])
df_next["remainingSecs"] = 0
df_next["remainingMins"] = 0
df_next["remainingTimer"] = "00:00:00"
df_next["type"] = "next"
df_combo = pd.concat([df_current, df_next])
logging.info(f"Grabbing {len(df_combo)} Records for Apex Map Rotation")
return df_combo
except BaseException as e:
logging.error(e)
raise ApexApiException
|
jyablonski/apex_api_scraper
|
src/utils.py
|
utils.py
|
py
| 2,484 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71634094907
|
################### PRACTICAL_ EMAIL SLICING ##########################
name = input('please enter your name: ')
email = input('please enter your email: ')
name = name.strip().capitalize()
username = email[:email.index('@')]
username = username.strip().capitalize()
website = email[email.index('@') + 1:]
print(f'Hello {name} we are happy to meet you, ')
print(f'your username is {username} and your website is {website}')
|
AhmadFouda/Python-proplem-solving
|
email_slicing.py
|
email_slicing.py
|
py
| 425 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40129830394
|
"""
Get Distances of Shortest Path (Dijkstra)
edges: dict<from:int, dict<to:int, cost:number>>
"""
from heapq import heappush, heappop
def one_to_one(
start, goal, num_vertexes, edges,
INF=9223372036854775807, UNREACHABLE=-1):
distances = [INF] * num_vertexes
distances[start] = 0
queue = [(0, start)]
while queue:
d, frm = heappop(queue)
if distances[frm] < d:
# already know shorter path
continue
if frm == goal:
return d
for to in edges[frm]:
new_cost = distances[frm] + edges[frm][to]
if distances[to] > new_cost:
# found shorter path
distances[to] = new_cost
heappush(queue, (distances[to], to))
return UNREACHABLE
def one_to_all(
start, num_vertexes, edges,
INF=9223372036854775807):
distances = [INF] * num_vertexes
distances[start] = 0
queue = [(0, start)]
while queue:
d, frm = heappop(queue)
if distances[frm] < d:
# already know shorter path
continue
for to in edges[frm]:
new_cost = distances[frm] + edges[frm][to]
if distances[to] > new_cost:
# found shorter path
distances[to] = new_cost
heappush(queue, (distances[to], to))
return distances
def one_to_all_bfs(start, num_vertexes, edges, INF=9223372036854775807):
"""
when all cost is 1, BFS is faster (ABC170E)
"""
distances = [INF] * num_vertexes
distances[start] = 0
to_visit = [start]
while to_visit:
next_visit = []
for frm in to_visit:
for to in edges[frm]:
new_cost = distances[frm] + 1
if new_cost < distances[to]:
distances[to] = new_cost
next_visit.append(to)
to_visit = next_visit
return distances
# --- end of library ---
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def solve(N, M, edges):
INF = 9223372036854775807
ret = INF
for start in range(N):
distances = one_to_all(start, N, edges)
debug(distances, msg=":distances")
ret = min(ret, max(distances))
return ret
def main():
# verified https://atcoder.jp/contests/abc012/tasks/abc012_4
N, M = map(int, input().split())
from collections import defaultdict
edges = defaultdict(dict)
for _i in range(M):
A, B, T = map(int, input().split())
edges[A - 1][B - 1] = T
edges[B - 1][A - 1] = T
print(solve(N, M, edges))
# tests
T1 = """
3 2
1 2 10
2 3 10
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
10
"""
T2 = """
5 5
1 2 12
2 3 14
3 4 7
4 5 9
5 1 18
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
26
"""
T3 = """
4 6
1 2 1
2 3 1
3 4 1
4 1 1
1 3 1
4 2 1
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
1
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
# end of snippets/main.py
|
nishio/atcoder
|
libs/dijkstra.py
|
dijkstra.py
|
py
| 3,668 |
python
|
en
|
code
| 1 |
github-code
|
6
|
75189168508
|
import ALU, I_MEM, CLK, PC, REG_BANK, D_MEM, threading
#OpCode = 0000 0000
# Im[-1] instr
class CONTROL_UNIT(threading.Thread):
def __init__(self, LongRegFtoD, OpCode):
self.OpCode = OpCode
self.MyLongRegFtoD = LongRegFtoD
self.ALUControl = OpCode[4:8]
#indica si usa la Dir_B o si usa Reg[Dir_B]
#if(type(OpCode) == str):
self.Imm = int(OpCode[3:4],2)
#para mux de WB
self.MemOrRegW = int(OpCode[2:3],2)
#MemW/WE habilita a D_Mem para escritura
self.MemW = int(OpCode[1:2],2)
#RegW/WE habilita a REG_BANK para escritura
self.RegW = int(OpCode[0:1],2)
threading.Thread.__init__(self, target = self.setSignals, args = ())
#cuidado con el clock
def setSignals(self):
while True:
self.Imm = int(self.MyLongRegFtoD.OpCode[2:3],2)
self.MemW = int(self.MyLongRegFtoD.OpCode[1:2],2)
self.MemOrRegW = int(self.MyLongRegFtoD.OpCode[2:3],2)
self.RegW = int(self.MyLongRegFtoD.OpCode[2:3],2)
self.ALUControl = self.MyLongRegFtoD.OpCode[4:8]
|
ger534/Proyecto2Arqui2
|
procesador/CONTROL_UNIT.py
|
CONTROL_UNIT.py
|
py
| 1,126 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72579615228
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 17 11:47:02 2019
@author: hwan - Took out relevant code from dolfin's plotting.py _plot_matplotlib code
- To enter dolfin's own plotting code, use dl.plot(some_dolfin_object) wheresome_dolfin_object is a 3D object and an error will be thrown up
"""
import matplotlib.pyplot as plt
import dolfin.cpp as cpp
import pdb #Equivalent of keyboard in MATLAB, just add "pdb.set_trace()"
def plot_3D(obj, title, angle_1, angle_2):
# Importing this toolkit has side effects enabling 3d support
from mpl_toolkits.mplot3d import Axes3D # noqa
# Enabling the 3d toolbox requires some additional arguments
plt.title(title)
ax = plt.gca(projection='3d')
ax.set_aspect('auto')
ax.view_init(angle_1, angle_2)
# For dolfin.function.Function, extract cpp_object
if hasattr(obj, "cpp_object"):
obj = obj.cpp_object()
if isinstance(obj, cpp.function.Function):
return my_mplot_function(ax, obj,)
elif isinstance(obj, cpp.mesh.Mesh):
return my_mplot_mesh(ax, obj)
def my_mplot_mesh(ax, mesh):
tdim = mesh.topology().dim()
gdim = mesh.geometry().dim()
if gdim == 3 and tdim == 3:
bmesh = cpp.mesh.BoundaryMesh(mesh, "exterior", order=False)
my_mplot_mesh(ax, bmesh)
elif gdim == 3 and tdim == 2:
xy = mesh.coordinates()
return ax.plot_trisurf(*[xy[:, i] for i in range(gdim)],
triangles=mesh.cells())
def my_mplot_function(ax, f):
mesh = f.function_space().mesh()
gdim = mesh.geometry().dim()
C = f.compute_vertex_values(mesh)
X = [mesh.coordinates()[:, i] for i in range(gdim)]
return ax.scatter(*X, c=C)
|
cotran2/Thermal_Fin_Heat_Simulator
|
Utilities/plot_3D.py
|
plot_3D.py
|
py
| 1,852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19554797850
|
import sys
sys.setrecursionlimit(2500)
def dfs(graph, depth, node):
parent = graph[node] - 1
if -2 == parent or depth[node] + 1 <= depth[parent]:
return
depth[parent] = depth[node] + 1
dfs(graph, depth, parent)
def solution(n, managers):
depth = [1 for _ in range(n)]
for i in range(n):
dfs(managers, depth, i)
answer = max(depth)
return answer
if __name__ == "__main__":
n = int(input())
managers = [int(input()) for _ in range(n)]
print(solution(n, managers))
|
jiyoulee/problem-solving-v1
|
graph/115A.py
|
115A.py
|
py
| 554 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73785815229
|
"""empty message
Revision ID: 391b24b33343
Revises: e4338c095afb
Create Date: 2021-06-24 16:47:10.434392
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '391b24b33343'
down_revision = 'e4338c095afb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('post_id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(length=1000), nullable=False),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('post_reactions', sa.Column('reaction', sa.Boolean(), nullable=True))
op.drop_constraint('post_reactions_post_id_fkey', 'post_reactions', type_='foreignkey')
op.create_foreign_key(None, 'post_reactions', 'posts', ['post_id'], ['id'], ondelete='CASCADE')
op.drop_column('post_reactions', '_reaction')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('post_reactions', sa.Column('_reaction', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'post_reactions', type_='foreignkey')
op.create_foreign_key('post_reactions_post_id_fkey', 'post_reactions', 'posts', ['post_id'], ['id'])
op.drop_column('post_reactions', 'reaction')
op.drop_table('comments')
# ### end Alembic commands ###
|
composerben/flask-group-project
|
migrations/versions/20210624_164710_fix_migration.py
|
20210624_164710_fix_migration.py
|
py
| 1,640 |
python
|
en
|
code
| 13 |
github-code
|
6
|
29827630738
|
#This file will only be needed to run
import pandas as pd
import numpy as numpy
from datetime import date
import datetime
import os
class box:
def __init__(self):
self.task_done = ""
self.no_of_day = (datetime.date.today() - date(1997, 8, 21)).days
self.dest = ""
self.wake_up = "" #should change in future
self.sleep = ""
self.social_media_time = 0
self.self_time = ""
self.breakfast = False
self.food_type = False
self.GRE_quant = False
self.GRE_quant_count = 0
self.GRE_verbal = False
self.GRE_verbal_count = 0
self.ML = False
self.articles_read = 0
self.words_learned = 0
self.anger = False
self.exercise = False
self.sad_day = False
self.happy_day = False
self.got_love = False
self.pain = False
def log(self):
print("Enter your daily achievement: ")
self.task_done = str(input())
print("Did you go anywhere? (Leave blank if nowhere) :")
self.dest = str(input())
print("What time did you wake up? : ")
self.wake_up = str(input())
print("What time did you go to sleep? : ")
self.sleep = str(input())
print("How many hours on social media did you spend?")
self.social_media_time = float(input())
print("How many hours for self time did you take out?")
self.self_time = float(input())
#Health
print("Did you have breakfast? :")
self.breakfast = self._conv_bool(input())
print("Did I eat sufficiently? :")
self.food_type = self._conv_bool(input())
#Studies
print("Did you study Machine Learning? :")
self.ML = self._conv_bool(input())
#GREStudies
print("Did you study GRE_quant today? :")
self.GRE_quant = self._conv_bool(input())
self.GRE_quant_count = self._get_GRE(self.GRE_quant)
print("Did you study GRE verbal today? :")
self.GRE_verbal = self._conv_bool(input())
self.GRE_verbal_count = self._get_GRE(self.GRE_verbal)
print("How many articles did you read today? :")
self.articles_read = int(input())
print("How many words did you learn today? :")
self.words_learned = int(input())
#Day Review
print("Did you feel anger today? :")
self.anger = self._conv_bool(input())
print("Did you feel sad today? :")
self.sad_day = self._conv_bool(input())
print("Were you happy today? :")
self.happy_day = self._conv_bool(input())
print("Did someone love you today? :")
self.got_love = self._conv_bool(input())
print("Did you exercise today? :")
self.exercise = self._conv_bool(input())
print("Was your body in pain? :")
self.pain = self._conv_bool(input())
def _get_GRE(self, ip):
if self._conv_bool(ip):
print("How many questions did you solve?")
return int(input())
else:
return 0
def _conv_bool(self, x):
if x == 'Y' or x == 'y':
return True
else :
return False
if __name__ == '__main__':
import os
if not os.path.exists('./logs.csv'):
df = pd.DataFrame(data = None, columns =
['no_of_day', 'task_done', 'destination',
'wake_up_time', 'sleep_time', 'social_media_time',
'self_time', 'breakfast', 'food_type',
'GRE_quant', 'GRE_quant_count',
'GRE_verbal', 'GRE_verbal_count',
'Machine_Learning', 'articles_read', 'words_learned',
'anger', 'exercise', 'sad_day',
'happy_day', 'got_love', 'pain'])
print('File doesnt exist')
print(df.head())
else:
df = pd.read_csv('./logs.csv')
print('File exists')
print(df.head)
b = box()
b.log()
df_2 = pd.DataFrame(data = [[b.no_of_day, b.task_done, b.dest,
b.wake_up, b.sleep, b.social_media_time,
b.self_time, b.breakfast, b.food_type,
b.GRE_quant , b.GRE_quant_count,
b.GRE_verbal, b.GRE_verbal_count,
b.ML, b.articles_read, b.words_learned,
b.anger, b.exercise, b.sad_day,
b.happy_day, b.got_love, b.pain]],
columns = [
'no_of_day', 'task_done', 'destination',
'wake_up_time', 'sleep_time', 'social_media_time',
'self_time', 'breakfast', 'food_type',
'GRE_quant', 'GRE_quant_count',
'GRE_verbal', 'GRE_verbal_count',
'Machine_Learning', 'articles_read', 'words_learned',
'anger', 'exercise', 'sad_day', 'happy_day',
'got_love', 'pain'])
result = df.append(df_2)
result.to_csv('./logs.csv', index = False)
result.head()
print(os.getcwd())
|
Geeks-Sid/habit_organizer
|
main.py
|
main.py
|
py
| 4,237 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40370617674
|
'''
Leer un número entero de dos dígitos y determinar si los dos dígitos son iguales.
'''
def numero(n):
if int(n) >= 10 and int(n) <= 99:
if int(n[0]) == int(n[1]):
return True
else:
return False
Dosdigitos = input("Ingrese un número: ")
respuesta = numero(Dosdigitos)
if respuesta == True:
print("El número tiene dos digitos iguales")
else:
print("El número no tiene los dos digitos iguales")
|
Natacha7/Python
|
Ejercicios_unidad2.py/DosDigitos_iguales.py
|
DosDigitos_iguales.py
|
py
| 451 |
python
|
es
|
code
| 0 |
github-code
|
6
|
2580004662
|
from odoo import models, fields
class AccountTaxWithholdingRule(models.Model):
_name = "account.tax.withholding.rule"
_description = "account.tax.withholding.rule"
_order = "sequence"
sequence = fields.Integer(
default=10,
)
# name = fields.Char(
# required=True,
# )
domain = fields.Char(
required=True,
default="[]",
help='Write a domain over account voucher module'
)
tax_withholding_id = fields.Many2one(
'account.tax',
'Tax Withholding',
required=True,
ondelete='cascade',
)
percentage = fields.Float(
'Percentage',
digits=(16, 4),
help="Enter % ratio between 0-1."
)
fix_amount = fields.Float(
'Amount',
digits='Account',
help="Fixed Amount after percentaje"
)
|
ingadhoc/account-payment
|
account_withholding_automatic/models/account_tax_withholding_rule.py
|
account_tax_withholding_rule.py
|
py
| 854 |
python
|
en
|
code
| 42 |
github-code
|
6
|
73750770109
|
from operator import index
from meal import Meal
import json
import sqlite3
class Meal_Data:
"""Data layer to be used in conjunction with the Meal class"""
def __init__(self, filename = "foodinfo.json"):
"""Initializes Meal_Data"""
self.filename = filename
def meal_add(self, meal:Meal):
"""Stores an instance of the Meal class inside foodinfo.json"""
dupe_check = self.meal_find(meal.name)
if dupe_check == None:
meals = self.meal_get()
meals.append(meal)
self.meal_save(meals)
else:
error_message = f"A meal by the name '{meal.name.title()}' already exists."
print(error_message)
return
def meal_save(self, meals:list) -> None:
"""Saves a list of meals to the JSON"""
jsonmeals = []
# -- Following for loop converts objects in meal list (jsonmeals) into dictionaries --
for mealobj in meals:
jsonmeal = mealobj.as_dict()
jsonmeals.append(jsonmeal)
# -- Following two lines converts the list of dictionaries made above into JSON format and saves to foodinfo.json --
# TODO: Handle Missing File
f = open(self.filename, 'w')
f.flush()
json.dump(jsonmeals, f, indent=2)
f.close()
# -- Next two lines print out to string the list of Meals in JSON format --
# jsondump = json.dumps(jsonmeals, indent=2)
# print(jsondump)
return
# -- TODO : make a function to delete a Meal object that is stored inside foodinfo.json --
def meal_del(self, name:str):
"""Removes an instance of the Meal class inside foodinfo.json"""
meals = self.meal_get()
# Loop over all meals and remove
for meal in meals:
if meal.name == name:
index = meals.index(meal)
del meals[index]
else:
pass
# END FOR
self.meal_save(meals)
def meal_get(self) -> list[Meal]:
"""Returns a list of meals"""
try:
f = open(self.filename)
# TODO : If the foodinfo.json is not found it should make a .json file by that name --
except FileNotFoundError:
error_message = f"\nFile {self.filename} was not found.\n"
print(error_message)
return []
# Explicit flush to ensure we have the latest version of the file on disk
f.flush()
try:
jsondata = json.load(f)
# -- When the following error occurs, the list of meals is simply left as an empty list --
except json.JSONDecodeError:
# crete empty JSONData for following loop
jsondata = []
# Close file handle
f.close()
# -- The folowing for loop takes the JSON objects found in foodinfo.json and turns them into Python objects --
# -- and then appends those objects into the meals list
meals = []
for item in jsondata:
meal = Meal(item['name'],item['protein'],item['cost'],item['difficulty'])
meals.append(meal)
return meals
def meal_find(self, name:str) -> Meal:
"""Returns a specific meal object when searching for a meal by name"""
meals = self.meal_get()
# -- The following for loop cycles through the meals list looking for a matching meal name
# -- If the meal name inquired is not found - the loop will return None
for obj in meals:
if obj.name == name:
return obj
return None
|
zaepho/DinnerDecider
|
mealdata.py
|
mealdata.py
|
py
| 3,719 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22807758362
|
import time
import random
import threading
"""
信号量(英语:Semaphore)又称为信号标,是一个同步对象,用于保持在0至指定最大值之间的一个计数值。
当线程完成一次对该semaphore对象的等待(wait)时,该计数值减一;
当线程完成一次对semaphore对象的释放(release)时,计数值加一。
当计数值为0,则线程等待该semaphore对象不再能成功直至该semaphore对象变成signaled状态。
semaphore对象的计数值大于0,为signaled状态;计数值等于0,为nonsignaled状态.
semaphore对象适用于控制一个仅支持有限个用户的共享资源,是一种不需要使用忙碌等待(busy waiting)的方法。
"""
semaphore = threading.Semaphore(value=0) # 默认值为1, 赋值小于0会ValueError异常
def consumer():
print("consumer is waiting.")
semaphore.acquire() # 获取信号量
print("Consumer notify: consumered item number %s" %item)
def producer():
global item
time.sleep(3)
item = random.randint(0, 1000)
print("Producer notify: producer item number %s" %item)
# 释放信号量, 将内部的counter值加1, 当其值等于0时, 另一个线程会在此等待它的值变为大于0, 并唤醒该线程.
semaphore.release()
if __name__ == "__main__":
for _ in range(5):
th1 = threading.Thread(target=producer)
th2 = threading.Thread(target=consumer)
th1.start()
th2.start()
th1.join()
th2.join()
print("program terminated.")
|
sola1121/practice_code
|
python3/python并行编程手册/ch02/P42_使用信号量实现线程同步_生产者-消费者模型.py
|
P42_使用信号量实现线程同步_生产者-消费者模型.py
|
py
| 1,558 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
12056898935
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Code for this script is originally at:
https://github.com/dfm/george/blob/master/docs/_code/model.py
"""
from __future__ import division, print_function
import emcee
import triangle
import numpy as np
import cPickle
import matplotlib.pyplot as pl
import george
from george import kernels
def model(params, t):
amp, loc, sig2 = params
return amp * np.exp(-0.5 * (t - loc) ** 2 / sig2)
def lnprior_base(p):
""" notice how the p are inferred in the original scale """
amp, loc, sig2 = p
if not -10 < amp < 10:
return -np.inf
if not -5 < loc < 5:
return -np.inf
if not 0 < sig2 < 3.0:
return -np.inf
return 0.0
def fit_ind(initial, data, nwalkers=32):
ndim = len(initial)
p0 = [np.array(initial) + 1e-8 * np.random.randn(ndim)
for i in xrange(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob_ind, args=data)
print("Running burn-in")
p0, _, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
print("Running production")
p0, _, _ = sampler.run_mcmc(p0, 1000)
return sampler
def lnlike_gp(p, t, y, yerr):
""" notice how a and tau needs to be exponentiated
meaning that a and tau are supplied in the log scale
"""
a, tau = np.exp(p[:2])
gp = george.GP(a * kernels.Matern32Kernel(tau))
gp.compute(t, yerr)
return gp.lnlikelihood(y - model(p[2:], t))
def lnprior_gp(p):
"""more obvious that p is initiated in the log scale """
lna, lntau = p[:2]
if not -5 < lna < 5:
return -np.inf
if not -5 < lntau < 5:
return -np.inf
return lnprior_base(p[2:])
def lnprob_gp(p, t, y, yerr):
lp = lnprior_gp(p)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike_gp(p, t, y, yerr)
def fit_gp(initial, data, nwalkers=32):
ndim = len(initial)
# start chains at slightly different places in parameter space
p0 = [np.array(initial) + 1e-8 * np.random.randn(ndim)
for i in xrange(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob_gp, args=data)
print("Running burn-in")
p0, lnp, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
print("Running second burn-in")
p = p0[np.argmax(lnp)]
p0 = [p + 1e-8 * np.random.randn(ndim) for i in xrange(nwalkers)]
p0, _, _ = sampler.run_mcmc(p0, 500)
sampler.reset()
print("Running production")
p0, _, _ = sampler.run_mcmc(p0, 1000)
return sampler
def generate_data(params, N, rng=(-5, 5)):
gp = george.GP(params[0] * kernels.ExpSquaredKernel(params[1]))
# initialize t for drawing the data points
t = rng[0] + np.diff(rng) * np.sort(np.random.rand(N))
## modify the following
y = gp.sample(t)
y += model(params[2:], t)
yerr = 0.05 + 0.05 * np.random.rand(N)
y += yerr * np.random.randn(N)
# y = model(params[2:], t)
# yerr = gp.sample(t)
# 0.05 + 0.05 * np.random.rand(N)
#y += yerr * np.random.randn(N)
return t, y, yerr
if __name__ == "__main__":
np.random.seed(1234)
#truth = [0.1, 1.0, 0, 0.1, 0.4]
truth = [0.1, 3.3, -1.0, 0.1, 0.4]
t, y, yerr = generate_data(truth, 50)
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
pl.ylabel(r"$y$")
pl.xlabel(r"$t$")
pl.xlim(-5, 5)
pl.title("simulated data")
pl.savefig("data.png", dpi=150)
## Fit assuming independent.
# print("Fitting independent")
# data = (t, y, 1.0 / yerr ** 2)
# truth_ind = [0.0, 0.0] + truth
# sampler = fit_ind(truth_ind, data)
## Plot the samples in data space.
# print("Making plots")
# samples = sampler.flatchain
# x = np.linspace(-5, 5, 500)
# for s in samples[np.random.randint(len(samples), size=24)]:
# pl.plot(x, model(s[2:], x)+s[0]*x+s[1], color="#4682b4", alpha=0.3)
# pl.title("results assuming uncorrelated noise")
# pl.savefig("ind-results.png", dpi=150)
## Make the corner plot.
# fig = triangle.corner(samples[:, 2:], truths=truth, labels=labels)
# fig = triangle.corner(samples[:, :], truths=truth, labels=labels)
# fig.savefig("ind-corner.png", dpi=150)
# Fit assuming GP.
print("Fitting GP")
data = (t, y, yerr)
# truth is originally set to be [0.0, 0.0] by dfm, in log scale
truth_gp = truth + 1e-8 * np.random.randn(len(truth)) # [0.0, 0.0] + truth[2:]
sampler = fit_gp(truth_gp, data)
# Plot the samples in data space.
print("Making plots")
samples = sampler.flatchain
x = np.linspace(-5, 5, 500)
pl.figure()
pl.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
for s in samples[np.random.randint(len(samples), size=24)]:
# sampled parameters have to be exponentiated
gp = george.GP(np.exp(s[0]) * kernels.Matern32Kernel(np.exp(s[1])))
gp.compute(t, yerrtruth)
m = gp.sample_conditional(y - model(s[2:], t), x) + model(s[2:], x)
pl.plot(x, m, color="#4682b4", alpha=0.3)
pl.ylabel(r"$y$")
pl.xlabel(r"$t$")
pl.xlim(-5, 5)
pl.title("results with Gaussian process noise model")
pl.savefig("gp-results.png", dpi=150)
# Make the corner plot.
labels = [r"$\ln a^2$", r"$\ln \tau$", r"$\alpha$", r"$\ell$", r"$\sigma^2$"]
#fig = triangle.corner(samples[:, 2:], truths=truth, labels=labels)
# follow the original script to plot the hp in log space
truth[0] = np.log(truth[0])
truth[1] = np.log(truth[1])
cPickle.dump(truth, open("truth.pkl", "w"))
cPickle.dump(samples, open("samples.pkl", "w"))
# only plot the hyperparameters
fig = triangle.corner(samples, truths=truth, labels=labels, size=30)
fig.savefig("gp-corner.png", dpi=150)
|
karenyyng/shear_gp
|
george_examples/model.py
|
model.py
|
py
| 5,729 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27330667755
|
import requests
import time
from bs4 import BeautifulSoup
import urllib.request
import re
import json
start_time = time.time()
link_3 = []
link_4 = []
link_5 = []
link_6 = []
links = []
g = ""
b = ""
d = ""
y = ""
ya = ""
ask = ""
domain = ""
emails = []
new_emails = []
mails = []
def crawl(request_url):
try:
response = requests.get(request_url)
new_emails = re.findall(r"[a-z0-9\.\-+_]+@" + domain, response.text)
if new_emails:
emails.append(new_emails)
except:
pass
return emails
def get_links(url):
link_result = []
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
html_page = response.read()
soup = BeautifulSoup(html_page, "lxml")
for link in soup.findAll('a'):
d = link.get('href')
link_result.append(d)
return link_result
if __name__ == '__main__':
domain = input("enter the domain:")
url_d = 'https://duckduckgo.com/?q=email+"%40"+++'+domain+'+++""&ia=web&count=50&first=51'
link_3 = get_links(url_d)
url_y = 'https://in.search.yahoo.com/search?p=%5B%40"%20+%20'+domain+'%20+%20"%5D&pz=100'
link_4 = get_links(url_y)
url_ya = 'https://yandex.com/search/?text="%40"%20%20%20'+domain+'%20%20%20""&lr=20983'
link_5 = get_links(url_ya)
url_ask = "https://www.ask.com/web?q=email+"+domain+"&o=0&qo=homepageSearchBox"
link_6 = get_links(url_ask)
links = link_3 + link_4 + link_5 + link_6
nodup_link = list(set(links))
filtered_links = [i for i in nodup_link if re.search("http", i)]
final_links = list(set(filtered_links))
mails = [crawl(f) for f in final_links]
final_emails = []
for flat_lists in mails:
for flat_list in flat_lists:
item_list = list(set(flat_list))
for item in item_list:
if item not in final_emails:
final_emails.append(item)
print(final_emails)
data = {}
data.update({
'domain': domain,
'mails': final_emails
})
print(data)
with open('data.json', 'w') as outfile:
json.dump(data, outfile)
# print("--- %s seconds ---" % (time.time() - start_time))
|
realchief/EmailScraping-BeautifulSoup
|
filter_crwl_dft_srchegn_updated.py
|
filter_crwl_dft_srchegn_updated.py
|
py
| 2,298 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39792208434
|
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import utils
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from cryptography.hazmat.primitives import hashes, cmac
from cryptography.exceptions import InvalidSignature
from cryptography.exceptions import InvalidTag
import os
class Security:
def __init__(self,path,BackupPath):
"""
Initialize the security module loading,using the path passed as argument,if present the private and public key,
otherwise generating and saving it
:type path: String
:param path: The path of the pem file in which the private key must be written
:type backupPath: String
:param backupPath: The path of the pem file in which the private key must be written
"""
try:
with open(path,"rb") as pem:
try:
self.privateKey = serialization.load_pem_private_key(pem.read(),password=b'ServerMPSprivatekey',backend=default_backend())
self.publicKey = self.privateKey.public_key()
except ValueError:
try:
with open(BackupPath,"rb") as backup:
backup_key = serialization.load_pem_private_key(backup.read(),password=b'ServerMPSprivatekey',backend=default_backend())
with open(path,"wb") as pem_write:
self.privateKey = backup_key
self.publicKey = self.privateKey.public_key()
serializedPrivateKey = backup_key.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.PKCS8,encryption_algorithm=serialization.BestAvailableEncryption(b'ServerMPSprivatekey'))
pem_write.write(serializedPrivateKey)
except FileNotFoundError:
self.generate_key(path,BackupPath)
except FileNotFoundError:
try:
with open(BackupPath,"rb") as backup,open (path,"wb") as pem:
try:
backup_key = serialization.load_pem_private_key(backup.read(),password=b'ServerMPSprivatekey',backend=default_backend())
SerializedPrivateKey = backup_key.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.PKCS8,encryption_algorithm=serialization.BestAvailableEncryption(b'ServerMPSprivatekey'))
self.privateKey = backup_key
self.publicKey = self.privateKey.public_key()
pem.write(SerializedPrivateKey)
except ValueError:
self.generate_key(path,BackupPath)
except FileNotFoundError:
with open(path,"wb") as pem, open(BackupPath,"wb") as backup:
self.generate_key(path,BackupPath)
def generate_key(self,path,backupPath):
"""
Generate and write the private key
:type path: String
:param path: The path of the pem file in which the private key must be written
:type backupPath: String
:param backupPath: The path of the pem file in which the private key must be written
"""
with open(path,"wb") as pem, open(backupPath,"wb") as backup:
self.privateKey = rsa.generate_private_key(public_exponent=65537,\
key_size=8196,\
backend=default_backend())
self.publicKey = self.privateKey.public_key()
serializedPrivateKey = self.privateKey.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.BestAvailableEncryption(b'ServerMPSprivatekey'))
pem.write(serializedPrivateKey)
backup.write(serializedPrivateKey)
def RSAEncryptText(self,text):
"""
Encrypt the text using RSA with the public key of the handled client
:type text: Bytes
:param text: The plain text that must be encrypted
:rtype: Bytes
:return: The cipher text relative to the plain text passed as argument
"""
cipherText = self.ClientPublicKey.encrypt(text,
padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
return cipherText
def RSADecryptText(self,cipherText):
"""
Decrypt the message using your own private key
:type cipherText: Bytes
:param cipherText: The cipher text that must be decrypted
:rtype: Bytes
:return plaintext: the plain text obtained by decriptying the plain text passed as argument
"""
plaintext = self.privateKey.decrypt(cipherText,
padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
return plaintext
def splitMessage(self,data,len):
"""
Split the message in two part, usefull when you need to compare a message with a digest or a signature
:type data: Bytes
:param data: The Data that must be divided in two parts
:type len: Int
:param len: The point in which the list must be divided
:rtype: <Bytes,Bytes>
:return: The touple of lists obtained by dividing in two part the original data :
"""
return [data[0:len*(-1)],data[len*(-1):]]
def generateDigest(self,data):
"""
Generate the digest of the message (in bytes) using SHA-256
:type data: Bytes
:param data: The data of which we want generate the digest
:rtype: Bytes
:return: The digest of the data passed as argument
"""
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(data)
return digest.finalize()
def getSignature(self,data):
"""
Generate a signature by the private key
:type data: Bytes
:param data: The data we want to sign
:rtype: Bytes
:return:The signature of the data passed as argument
"""
signature = self.privateKey.sign(data,
padding.PSS(mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
return signature
def VerifySignature(self,data,signature):
"""
Verify if the signature,generated by the private key of the client,is associated to the data
:type data: Bytes
:param data: The data we want to verify
:type signature: Bytes
:param signature: The signature used to check
:rtype: Boolean
:return: If the signature is correct or not
"""
try:
self.ClientPublicKey.verify(signature,data,padding.PSS(mgf=padding.MGF1(hashes.SHA256()),salt_length=padding.PSS.MAX_LENGTH),hashes.SHA256())
return True
except InvalidSignature:
return False
def AddClientKey(self,key):
"""
Add the public key of the client, in order to use them when it is necessary to encrypt using RSA, pass the key encoded by 'utf-8'
:type key: Bytes
:param key: The public key of the client we want to add
"""
self.ClientPublicKey = serialization.load_pem_public_key(key,backend=default_backend())
def getSerializedPublicKey(self):
"""
Get the server public key serializable (it must be decoded) in order to get it printable and sendable
:rtype: Bytes
:return: The public key of the client
"""
return self.publicKey.public_bytes(encoding=serialization.Encoding.PEM,format=serialization.PublicFormat.SubjectPublicKeyInfo)
def getSerializedClientPublicKey(self):
"""
Get the server public key serializable (it must be decoded) in order to get it printable and sendable
:rtype: Bytes
:return: The public key of the client
"""
return self.ClientPublicKey.public_bytes(encoding=serialization.Encoding.PEM,format=serialization.PublicFormat.SubjectPublicKeyInfo)
def generateSymmetricKey(self,len,nonce):
"""
Generate a symmetric key used in AESGCM with a lenght (suggested 192/256 bit ) and pass a nonce used with the key
to cipher a text (each operation has its own couple of <key,nonce> in order to guarantee security)
:type len: Int
:param len: The lenght of the symmetric key (in bit)
:type nonce: Int
:param nonce: The nonce used to encrypt/decrypt
:rtype: Int
:return: The operations are done correctly
"""
self.nonce = nonce
self.len = len
self.SymmetricKey = AESGCM.generate_key(bit_length=self.len);
return 0
def getSymmetricKey(self):
"""
Get the symmetric key as bytes, if you want to serialize it you have to transform it (suggested in integer with a number of
intger nessary = bit_length of key / 8, becaues each integer reppresent a byte)
:rtype: Bytes
:return: The symmetric key used to encrypt/decrypt
"""
return self.SymmetricKey
def AddPacketNonce(self,nonce):
"""
Add the nonce used in the AES when is necessary to encapsulate some information about the starter of the conversation
between two user
:type nonce: Int
:param nonce: The nonce used to encrypt the packets necessary to exchange key from two clients
"""
self.packetNonce = nonce
def AESDecryptText(self,ct):
"""
Cipher text with AES and GCM in order to guarantee autenthicity and integrity of the message, the handling of the nonce
is provided by the function itself (each encyption/decryption must increment the nonce in order to maintain it always
synchronized on the two side )
:type ct: Bytes
:param ct: The cipher text to decrypt
:rtype: Bytes or None
:return: The plain text obtained by decrypting the cipher text passed as parameter
"""
try:
aescgm = AESGCM(self.SymmetricKey)
self.nonce = self.nonce+1
pt = aescgm.decrypt(self.nonce.to_bytes(16,byteorder='big'),ct,None)
return pt
except:
return None
def AESEncryptText(self,pt):
"""
Cipher text with AES and GCM in order to guarantee autenthicity and integrity of the message, the handling of the nonce
is provided by the function itself (each encyption/decryption must increment the nonce in order to maintain it always
synchronized on the two side )
:type pt: Bytes
:param pt: The plain text to encrypt
:type ct: Bytes or None
:param ct: The cipher text obtained by encrypting the plain text passed as argument
"""
try:
aesgcm = AESGCM(self.SymmetricKey)
self.nonce = self.nonce + 1
return aesgcm.encrypt(self.nonce.to_bytes(16,byteorder='big'), pt, None)
except:
return None
def PacketAESEncryptText(self,pt):
"""
Cipher text with AES and a special nonce (sended by the client during the login procedure) in order
to encapsulate some information useful for the exchange of key between two online user
:type pt: Bytes
:param pt: The plain text to encrypt
:rtype: Bytes or None
:return: The cipher text obtained by encrypting the plain text passed as argument
"""
try:
aesgcm = AESGCM(self.SymmetricKey)
self.packetNonce = self.packetNonce + 1
return aesgcm.encrypt(self.packetNonce.to_bytes(16,byteorder='big'), pt, None)
except:
return None
def addDHparameters(self,p,g):
"""
Add the DH parameter, in orde to retrieve efficiently when necessary
:type p: Int
:param p: the Diffie Hellman P parameter
:type g: Int
:param g: The Diffie Hellman G parameter
"""
self.p = p
self.g = g
def getDHparameters(self):
"""
Get the DH parameters as a list [p,g]
:rtype: [Int,Int]
:return: The tuple composed by the two DH parameters
"""
return [self.p,self.g]
def generateNonce(self,size):
"""
Generate a nonce of a dimension chosed (in bytes) a get it as an Integer encoded in Big Endian
:type size: Int
:param size: The size (in Bytes) of the nonce
:rtype: Int
:return: A nonce generated using the system call specific for cryptography purpose of the dimensione passed as argument
"""
return int.from_bytes(os.urandom(size),byteorder='big')
|
SieniAlessandro/E2E-Secure-Chat
|
Server/Security/Security.py
|
Security.py
|
py
| 14,411 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33346153360
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 05 11:52:00 2015
@author: Vahndi
"""
import pandas as pd
from numpy import float64, random, inf
class Optimiser(object):
def __init__(self, categoricalSettings,
oldResultsDataFrame = None, resultsColumn = 'Results',
noImprovementStoppingRounds = None, floatRounding = 3):
'''
Arguments:
:categoricalSettings: a {dict of settingName, [setting1, setting2, ...]}
:oldResultsDataFrame: a pandas DataFrame of existing results to use in the selection of new settings
:resultsColumn: the name of the results column in the results DataFrame
:floatRounding: the number of decimal places to round float values to
'''
self.categoricalSettings = categoricalSettings
self.resultsColumn = resultsColumn
self.resultsDataFrame = oldResultsDataFrame
self.floatRounding = floatRounding
self.categories = sorted(list(self.categoricalSettings.keys()))
self.numCategories = len(self.categories)
self.currentCategoryIndex = 0
self.noImprovementStoppingRounds = noImprovementStoppingRounds
# Initialise current settings to random values
self.initialiseRandomSettings()
def initialiseRandomSettings(self):
'''
Randomly set the settings to different values
'''
self.roundsNoImprovement = 0
self.currentSettings = {}
for category in self.categories:
self.currentSettings[category] = Optimiser._getRandomValue(self.categoricalSettings[category])
@classmethod
def _getRandomValue(cls, fromList):
return fromList[random.randint(len(fromList))]
def isFinished(self):
return False
def hasResultFor(self, settings):
if self.resultsDataFrame is None:
return False
else:
dfSub = self.resultsDataFrame
for category in settings.keys():
categoryValue = settings[category]
dfSub = dfSub[dfSub[category] == categoryValue]
return dfSub.shape[0] > 0
def getNextSettings(self):
'''
Returns a list of settings to try next
'''
if self.noImprovementStoppingRounds is not None:
if self.roundsNoImprovement == self.noImprovementStoppingRounds:
return None
# Get a list of settings across the dimension of the current category
nextSettings = []
numCategoriesTried = 0
# Loop until some new settings have been acquired or all categories have been tried
while not nextSettings and numCategoriesTried < self.numCategories:
loopCategory = self.categories[self.currentCategoryIndex]
for val in self.categoricalSettings[loopCategory]:
setting = {}
for category in self.categories:
if category == loopCategory:
setting[category] = val
else:
setting[category] = self.currentSettings[category]
nextSettings.append(setting)
# Remove any settings which already have results for
nonDuplicates = []
for setting in nextSettings:
if not self.hasResultFor(setting):
nonDuplicates.append(setting)
nextSettings = nonDuplicates
# Update loop and category parameters
numCategoriesTried += 1
self.currentCategoryIndex += 1
self.currentCategoryIndex = self.currentCategoryIndex % self.numCategories
# Return the list of settings or None if the run is finished
if not nextSettings:
return None
else:
self._currentSettingsParameter = self.categories[(self.currentCategoryIndex - 1) % self.numCategories]
return nextSettings
def getCurrentSettingsParameter(self):
return self._currentSettingsParameter
def currentBestResult(self):
if self.resultsDataFrame is None:
return -inf
else:
return self.resultsDataFrame[self.resultsColumn].max()
def addResults(self, resultsDataFrame):
'''
Adds a list of results to the existing results and changes the current
settings to the best result so far
'''
# Add results to any existing results
if self.resultsDataFrame is None:
self.resultsDataFrame = resultsDataFrame
else:
# Check for improvement
if self.currentBestResult() >= resultsDataFrame[self.resultsColumn].max():
self.roundsNoImprovement += 1
else:
self.roundsNoImprovement = 0
# Merge results with existing results
self.resultsDataFrame = pd.concat([self.resultsDataFrame, resultsDataFrame], ignore_index=True)
df = self.resultsDataFrame
# Get the best result so far and change the current settings to the settings that produced it
bestResult = df[self.resultsColumn].max()
bestSettingsRow = df[df[self.resultsColumn] == bestResult].iloc[0]
for category in self.categories:
categoryValue = bestSettingsRow[category]
if type(categoryValue) in (float64, float):
categoryValue = round(categoryValue, self.floatRounding)
if categoryValue == round(categoryValue, 0):
categoryValue = int(round(categoryValue, 0))
self.currentSettings[category] = categoryValue
def saveResults(self, filePath):
self.resultsDataFrame.to_csv(filePath)
|
vahndi/mazurka_classifier
|
ncd/optimiser.py
|
optimiser.py
|
py
| 5,947 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31036686251
|
tampungan_barang = []
# Tambah barang
def tambah_barang():
print(' TAMBAH BARANG ')
while True :
barang = input('Masukkan barang : ')
if barang in tampungan_barang:
print('Barang sudah tersedia')
pass
elif barang not in tampungan_barang:
tampungan_barang.append(barang)
pilihan = input("Tambahkan barang lagi? (y/n) : ")
print("LIST BARANG".center(44,'='))
if pilihan == 'y' :
print("|","Kode".center(12, ' '),"|", "Nama Barang".center(15, ' '),"|","\n")
for i in tampungan_barang :
print("+ ",(tampungan_barang.index(i)+1) ," |", (i).center(16, ' '),"+")
else :
break
# Menghapus barang
def hapus_barang():
print('Hapus barang'.center(44,'='))
while True :
hapus = input('Masukkan nama barang yang akan dihapus : ')
if hapus in tampungan_barang :
tampungan_barang.remove(hapus)
lanjut = input('Tekan y jika lanjut : ').upper()
if lanjut == "y" :
hapus_barang()
else :
break
else :
print('Barang tidak tersedia')
hapus_barang()
# mengedit barang :
def edit_barang() :
print("LIST BARANG".center(44,'='))
for i in tampungan_barang :
print("+ Kode Barang ",(tampungan_barang.index(i)+1) ,"|", (i).center(15, ' '),"+")
while True :
print('MENU EDIT BARANG'.center(44,'='))
caribarang = input('Masukkan nama barang yang mau di edit : ')
if caribarang in tampungan_barang :
ubah_ke = input('Ubah ke : ')
tampungan_barang[tampungan_barang.index(caribarang)] = ubah_ke
for i in tampungan_barang :
print("+ Kode Barang ".center(28," "),(tampungan_barang.index(i)+1) ,"|", (i).center(15, ' '),"+")
print("\n",'-'*50)
else :
print('Barang tidak ditemukan!')
pass
lanjut()
# lanjut
def lanjut():
lanjut = input('Lanjut (y/n) : ')
if lanjut == 'y' :
pass
else :
menu()
# Cek Nama Barang
def nama_barang():
print("LIST BARANG".center(44,'='))
for i in tampungan_barang :
print("+ Kode Barang ",(tampungan_barang.index(i)+1) ,"|", (i).center(15, ' '),"+")
exit = input('Tekan enter untuk keluar : ')
if exit == ' ':
menu()
else :
menu()
# Lihat barang
def daftar_barang():
while True :
print('MENU CEK BARANG'.center(44,'='))
cek = input('Nama barang : ')
if cek in tampungan_barang :
print(cek,'Tersedia!')
else :
print(cek,'Tidak tersedia!')
lanjut = input('Cek lagi? (y/n) :')
if lanjut == 'y' :
daftar_barang()
else :
break
# Cek indeks
def cek_indeks():
while True :
print('MENU CEK INDEKS BARANG'.center(44,'='))
cek = input('Masukkan nama barang : ')
if cek in tampungan_barang :
print('Barang berada pada indeks :', tampungan_barang.index(cek))
else :
print('Barang tidak ada!')
lanjut = input('Cek lagi? (y/n) :').upper()
if lanjut == 'y' :
pass
else :
break
# menu
def menu():
while True :
print('-'*44)
print("PROGRAM BARANG".center(44,'='))
print('''
1. Tambah Barang
2. Hapus Barang
3. Edit Barang
4. Cek Nama Barang
5. Daftar Barang
6. Indeks Barang ''')
print('-'*44)
pilihan = input("Pilih menu : ")
print('-'*44)
if pilihan == "1" :
tambah_barang()
elif pilihan == "2" :
hapus_barang()
elif pilihan == "3" :
edit_barang()
elif pilihan == "4" :
daftar_barang()
elif pilihan == "5" :
nama_barang()
elif pilihan == "6" :
cek_indeks()
A = False
exit = input('Enter untuk keluar : ')
if exit == ' ':
break
else :
print(' TERIMA KASIH '.center(44,"+"))
break
menu()
|
Mage29/Codingan_ASD_3
|
Program_Barang.py
|
Program_Barang.py
|
py
| 4,389 |
python
|
id
|
code
| 0 |
github-code
|
6
|
7545685477
|
import psycopg2
DBNAME = "news"
def fetch_all(query, params):
"""
execute a query and fetch all result from it
:param query: the query to execute
:param params: parameters of the query
:return: result of this query
"""
# it's kind time consuming every time we open and close a connection
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute(query, params)
ret = c.fetchall()
db.close()
return ret
def article_views(cnt=None):
"""
statistics about article views, article is consider a view if exists
a http request for the article path with GET method and 200 status code
:param cnt: int, optional
max number of articles
:return:
list of (article name, view_cnt) pair ordered by views in desc order
"""
query = """
select title, view_cnt
from articles, view_stat
where concat('/article/', slug) = path
order by view_cnt desc
"""
if cnt is not None:
query += "limit (%s)"
params = (cnt,)
else:
params = ()
return fetch_all(query, params)
def author_views(cnt=None):
"""
statistics about author's all articles views
:param cnt: int, optional
max number of authors
:return:
list of (author name, view_cnt) pair ordered by views in desc order
"""
query = """
select name, sum(view_cnt) as view_cnt
from articles, view_stat, authors
where concat('/article/', slug) = path
and articles.author = authors.id
group by authors.id
order by view_cnt desc
"""
if cnt is not None:
query += "limit (%s)"
params = (cnt,)
else:
params = ()
return fetch_all(query, params)
def error_stat(threshold):
"""
error rate stat by day, error rate is defined as total number of failed
requests(status is not 200) divided by the total number of requests a day.
if a day don't have any requests it will be ignored
:param threshold: double
error rate bigger or equal threshold will be returned
:return: list of (date, error rate)
"""
query = """
select date(time) as stat_date,
sum(cast(status != '200 OK' as integer))
/ cast(count(*) as real) as error_rate
from log
group by stat_date
having
sum(cast(status != '200 OK' as integer))
/ cast(count(*) as real) >= (%s);
"""
return fetch_all(query, (threshold,))
|
akudet/fsnd-proj3
|
reporter_db.py
|
reporter_db.py
|
py
| 2,454 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14785766794
|
"""User model tests."""
# run these tests like:
#
# python -m unittest test_user_model.py
import os
from unittest import TestCase
from models import db, User, Message, Follows, Likes
os.environ['DATABASE_URL'] = "postgresql:///warbler-test"
# Now we can import app
from app import app
# Create our tables (we do this here, so we only create the tables
# once for all tests --- in each test, we'll delete the data
# and create fresh new clean test data
db.drop_all()
db.create_all()
class MessageModelTestCase(TestCase):
"""Test views for messages."""
def setUp(self):
"""Create test client, add sample data."""
User.query.delete()
Message.query.delete()
Follows.query.delete()
self.client = app.test_client()
u = User(
email="[email protected]",
username="testuser",
password="HASHED_PASSWORD"
)
db.session.add(u)
db.session.commit()
self.u = u.id
def test_message_model(self):
"""Does our Basic model work?"""
# print('*********************')
# print(self.u)
# print('*********************')
m = Message(text="Test message", user_id=self.u)
db.session.add(m)
db.session.commit()
u = User.query.get(self.u)
#Confirming that our user does indeed have a message now
self.assertEqual(len(u.messages), 1)
# def test_user_must_exist(self):
# """Testing that a user_id must exist or a message will not submnit (will throw an error)"""
# m = Message(text="Test message", user_id=10)
# db.session.add(m)
# db.session.commit()
# all_messages = Message.query.all()
# self.assertEqual(len(all_messages), 1)
def test_multiple_messages(self):
"""Testing that a single user may indeed have mutiple messages"""
m = Message(text="Test message", user_id=self.u)
m2 = Message(text="Test message Take 2", user_id=self.u)
db.session.add_all([m,m2])
db.session.commit()
u = User.query.get(self.u)
self.assertEqual(len(u.messages), 2)
def test_message_date_time(self):
"""Testing our default date time creation if none is passed in."""
m = Message(text="Test message", user_id=self.u)
db.session.add(m)
db.session.commit()
# print('*********************')
new_message = Message.query.get(m.id)
# print(new_message.timestamp)
# print('*********************')
self.assertIsNotNone(new_message.timestamp)
def test_delete_a_msg(self):
"""Testing if a user is deleted than associated messages will be as well"""
m = Message(text="Test message", user_id=self.u)
db.session.add(m)
db.session.commit()
all_messages = Message.query.all()
self.assertEqual(len(all_messages),1)
new_message = Message.query.get(m.id)
db.session.delete(new_message)
db.session.commit()
new_total_messages = Message.query.all()
self.assertEqual(len(new_total_messages),0)
def test_message_user_relationship(self):
"""Testing if we can gather which user the message was created by via the user relationship"""
m = Message(text="Test message", user_id=self.u)
db.session.add(m)
db.session.commit()
u = User.query.get(self.u)
new_message = Message.query.get(m.id)
# print('*********************')
# print(new_message.user)
# print('*********************')
self.assertEqual(new_message.user, u)
|
mahado13/Twitter-Clone
|
test_message_model.py
|
test_message_model.py
|
py
| 3,685 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36282438996
|
import datetime
import requests
from bs4 import BeautifulSoup as bs4
from flask import Flask
from flask_restful import Resource, Api
OYK_URL = "https://oulunkylanyhteiskoulu.fi/"
def get_food() -> list:
with requests.Session() as s:
g = s.get(OYK_URL)
bs = bs4(g.text, 'html.parser')
today = datetime.date.today().weekday()
day = bs.select(".food__list")[today]
foods = day.find_all("p")[1].text.split("\n",)
clean_food = list(filter(None, foods))
return clean_food
app = Flask(__name__)
api = Api(app)
class Food(Resource):
def get(self):
try:
foods = get_food()
alfred = {"items": [{"title": food}
for food in foods]}
return alfred, 200
except:
return {}, 500
api.add_resource(Food, '/food')
if __name__ == "__main__":
app.run(debug=True, port=5000)
|
drstuggels/oyk-food
|
main.py
|
main.py
|
py
| 908 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74530223548
|
import numpy as np
class Board:
def __init__(self):
self.grid = np.zeros((12, 26), dtype=int)
self.score = 0
def check_in_console(self):
for j in range(0, 26):
for i in range(0, 12):
print(self.grid[i][j], end='')
print()
def insert_block(self, x, y, block, sizeX, sizeY):
for i in range(0, sizeX):
for j in range(0, sizeY):
try:
if block.grid[i][j] > 0:
self.grid[x + i][y + j] = block.grid[i][j]
except:
pass
def delete_block(self, x, y, block, sizeX, sizeY):
for i in range(0, sizeX):
for j in range(0, sizeY):
try:
if block.grid[i][j] > 0:
self.grid[x + i][y + j] = 0
except:
pass
def can_move_left(self, x, y, block, sizeX, sizeY):
result = True
for i in range(0, sizeY):
if x <= 0:
if block.grid[0 - x][i] != 0:
result = False
for j in range(0, sizeY):
for i in range(0, sizeX - 1):
if block.grid[i][j] != 0 and self.grid[x + i - 1][y + j] > 10:
result = False
# print("result = ", result)
return result
def can_move_right(self, x, y, block, sizeX, sizeY):
result = True
for i in range(0, sizeY):
if x >= 12 - sizeX:
if block.grid[11 - x][i] != 0:
result = False
for j in range(0, sizeY):
for i in range(1, sizeX):
try:
if block.grid[sizeX - i][j] != 0 and self.grid[x + sizeX - i + 1][y + j] > 10:
result = False
except:
pass
# print("result = ", result)
return result
def check_board(self):
canMoveDown = True
for j in range(0, 26):
for i in range(0, 12):
if 10 > self.grid[i][j] > 0:
if j == 25:
canMoveDown = False
return canMoveDown
if self.grid[i][j + 1] > 10:
canMoveDown = False
return canMoveDown
def delete_row(self, number):
for i in range(0, 12):
self.grid[i][number] = 0
for j in range(0, number):
for i in range(0, 12):
self.grid[i][number - j] = self.grid[i][number - 1 - j]
for i in range(0, 12):
self.grid[i][0] = 0
def upgrade_board(self):
scoreCalculated = 0
for j in range(0, 26):
for i in range(0, 12):
if 10 > self.grid[i][j] > 0:
self.grid[i][j] += 10
for j in range(0, 26):
completed = True
for i in range(0, 12):
if self.grid[i][j] == 0:
completed = False
if completed:
self.delete_row(j)
scoreCalculated += 1
completed = True
self.score = self.score + 2 ** scoreCalculated - 1
print('Score: ', self.score)
def print_board_console(self):
for j in range(0, 26):
for i in range(0, 12):
print(self.grid[i][j], end=' ')
print()
|
Jurand76/Z2J
|
tetris/board.py
|
board.py
|
py
| 3,436 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29457010542
|
#!/usr/bin/env python
import sys
import commands
import string
import datetime
import logging
import logging.handlers
from optparse import OptionParser
from random import choice
def print_error(ret, do_exit=False, msg=""):
"""
ret is the tuple returned by commands.getstatusoutput. If ret[0] is not 0,
then msg (if passed) or ret[1] is printed as an error. If do_exit is True,
the program also exits
"""
if ret[0] != 0:
if not msg:
msg = ret[1]
logging.error("Check the following information:")
logging.error(msg)
if do_exit:
sys.exit(ret[0])
def check_lcg_ce(ce):
"""Do the tests for a lcg-CE"""
# I will not waste much effort on this, since lcg-CE are condemned
# to disappear.
rets = []
ce, queue = ce.split("/", 1)
logging.info("\t\tchecking globus-job-run to ce")
cmd = "globus-job-run %s /bin/hostname" % ce
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
logging.info("\t\tchecking globus-job-run to fork")
cmd = "globus-job-run %s/jobmanager-fork /bin/pwd" % ce
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
logging.info("\t\tchecking globus-job-run to queue")
queue = queue.split("-")
cmd = "globus-job-run %s/%s-%s -queue %s /bin/pwd" % tuple([ce] + queue)
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
return rets
def check_cream_ce(ce):
"""Do the tests for a CREAM CE"""
rets = []
ce_hostport, dummy = ce.split("/", 1)
logging.info("\t\tchecking glite-ce-allowed-submission")
cmd = "glite-ce-allowed-submission -n %s" % ce_hostport
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
logging.info("\t\tchecking glite-ce-job-submit")
cmd = "glite-ce-job-submit -n -a -r %s test_submission.jdl" % ce # XXX
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
if ret[0] == 0:
url = ret[1].splitlines()[-1]
else:
return # XXX
logging.info("\t\t\tJob ID: %s", url)
while True:
cmd = "glite-ce-job-status -n %s" % url
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
if "[DONE-OK]" in ret[1]:
logging.info("\t\tsubmission ok, check the following job " + \
"id for further details %s", url)
break
elif "[DONE-FAILED]" in ret[1]:
ret = (1, ret[1])
print_error(ret)
break
print_error(ret)
rets.append(ret)
return rets
def check_gridftp(host):
"""Check gridftp on host"""
cmd = "uberftp %s ls" % host
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
if ret[0] != 0:
print_error(ret)
else:
logging.info("\t\tGridFTP OK")
def check_ces(bdii, vo):
"""Query the bdii for the available CE for VO vo, then check them"""
logging.info("Checking Computing Elements")
logging.info("\tQuerying the BDII for the CEs")
cmd = "lcg-info --list-ce --bdii %(bdii)s --sed --vo %(vo)s" % locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret, do_exit=True)
ces = ret[-1].splitlines()
logging.info("\t\tFound: " + ",\n\t\t\t".join(ces))
checked = []
for ce in ces:
if ce in checked:
continue
rets = []
checked.append(ce)
ce_host = ce.split(":")[0]
logging.info("\tChecking %s", ce_host)
# Check the GridFTP
check_gridftp(ce_host)
if "8443" in ce:
rets.extend(check_cream_ce(ce))
else:
# lcf-CE
rets.extend(check_lcg_ce(ce))
if not any([i[0] for i in rets]):
logging.info("\t\tJob submission seems OK")
else:
logging.critical("\t\tJob submission has problems, check errors")
def filter_and_join_ldap(data, query):
"""Filter results to only those of query and join
line breaks from ldapsearch."""
got = False
aux = []
for i in data.splitlines():
if i.startswith(query):
got = True
aux.append([i.split(":", 1)[-1].strip()])
elif i.startswith(" ") and got:
aux[-1].append(i.strip())
elif got:
got = False
return ["".join(i) for i in aux]
def check_ses(bdii, vo):
"""Query the bdii for the available SE for VO, then check them"""
logging.info("Checking Storage Elements")
logging.info("\tQuerying the BDII for the SEs")
cmd = "lcg-info --list-se --bdii %(bdii)s --sed --vo VO:%(vo)s" % locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret, do_exit=True)
ses = ret[-1].splitlines()
logging.info("\t\tFound: " + ",\n\t\t\t".join(ses))
checked = ["gridce05.ifca.es"]
for se in ses:
if se in checked:
continue
rets = []
checked.append(se)
logging.info("\tChecking %s", se)
cmd = "uberftp %s ls" % se
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
if ret[0] != 0:
print_error(ret)
else:
logging.info("\t\tGridFTP is up")
rets.append(ret)
cmd = "ldapsearch -x -LLL -H ldap://%(bdii)s -b o=grid \
'(&(objectClass=GlueSATop) \
(GlueVOInfoAccessControlBaseRule=VO:%(vo)s) \
(GlueChunkKey=GlueSEUniqueID=%(se)s))' \
GlueVOInfoPath" % locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
se_paths = filter_and_join_ldap(ret[1], "GlueVOInfoPath")
cmd = "ldapsearch -x -LLL -H ldap://%(bdii)s -b o=grid \
'(&(objectClass=GlueSEControlProtocol) \
(GlueChunkKey=GlueSEUniqueID=%(se)s) \
(GlueSEControlProtocolType=SRM) \
(GlueSEControlProtocolVersion=2.2.0))' \
GlueSEControlProtocolEndpoint" % locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
endpt = [i.replace("httpg", "srm") for i in filter_and_join_ldap(
ret[1], "GlueSEControlProtocolEndpoint")]
for endpoint in endpt:
for se_path in se_paths:
logging.info("\t\tUploading to %(endpoint)s/%(se_path)s",
locals())
randfile = ''.join([choice(string.letters + string.digits) \
for i in range(15)])
cmd = "lcg-cp -v -b --vo %(vo)s -D srmv2 file:/etc/issue \
%(endpoint)s/\?SFN=%(se_path)s/%(randfile)s" % locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
if ret[0] == 0:
logging.info("\t\tRemoving uploaded file")
cmd = "lcg-del -l -v -b --vo %(vo)s -D srmv2 \
%(endpoint)s/\?SFN=%(se_path)s/%(randfile)s" % \
locals()
logging.debug("Executing '%s'", cmd)
ret = commands.getstatusoutput(cmd)
print_error(ret)
rets.append(ret)
if not any([i[0] for i in rets]):
logging.info("\t\tData management seems OK")
else:
logging.critical("\t\tData management has problems, check errors")
def check_bdii(bdii):
"""Check bdii for correctness"""
logging.info("Checking BDII '%s' information (TBD)", bdii)
def get_proxy():
"""Check for proxy validity and return VO"""
ret = commands.getstatusoutput("voms-proxy-info -exists")
print_error(ret, do_exit=True, msg="VOMS: No valid proxy found!")
ret = commands.getstatusoutput("voms-proxy-info -vo")
print_error(ret, do_exit=True)
vo = ret[1]
return vo
def set_logging(level=logging.INFO):
"""Set up logging"""
outfile = "%s.log" % datetime.datetime.now().strftime("%Y%m%d_%H%M%S.%f")
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%m-%d %H:%M",
filename=outfile,
filemode="w")
console = logging.StreamHandler()
console.setLevel(level)
formatter = logging.Formatter('%(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("Detailed output for this run will be on '%s'", outfile)
def main():
"""Main program"""
usage = """%prog [options] <siteBDII host>:<port>"""
parser = OptionParser(usage=usage)
# parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
# default="False", help="Print verbose results")
parser.add_option("-c", "--ces", dest="onlyce", action="store_true",
default=False, help="Check only Computing Elements")
parser.add_option("-s", "--ses", dest="onlyse", action="store_true",
default=False, help="Check only Storage Elements")
(opts, args) = parser.parse_args()
if opts.onlyse and opts.onlyce:
parser.error("-s and -c options are mutually exclusive")
elif opts.onlyse or opts.onlyse:
all_ = False
else:
all_ = True
if len(args) != 1:
parser.error("Error, you have to specify one (and only one) siteBDII")
set_logging()
vo = get_proxy()
logging.info("Checking with VO '%s'", vo)
bdii = args[-1]
check_bdii(bdii)
if all_ or opts.onlyce:
check_ces(bdii, vo)
if all_ or opts.onlyse:
check_ses(bdii, vo)
if __name__ == "__main__":
main()
sys.exit(0)
|
alvarolopez/egi-certool
|
run_tests.py
|
run_tests.py
|
py
| 10,232 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72592445628
|
import numpy as np
from .lanczos import lanczos_resample_one, lanczos_resample_three
def coadd_psfs(
se_psfs, se_wcs_objs, coadd_wgts,
coadd_scale, coadd_dim):
"""Coadd the PSFs.
Note that this routine assumes that the PSFs in the SE image have their
centers at the image origin and that they should be interpolated to the
coadd plane so they are centered at the world origin.
Parameters
----------
se_psfs : list of np.ndarray
The list of SE PSF images to coadd.
se_wcs_objs : list of galsim.BaseWCS or children
The WCS objects for each of the SE PSFs.
coadd_wgts : 1d array-like object of floats
The relative coaddng weights for each of the SE PSFs.
coadd_scale : float
The pixel scale of desired coadded PSF image.
coadd_dim : int
The number of pixels desired for the final coadd PSF.
Returns
-------
psf : np.ndarray
The coadded PSF image.
"""
# coadd pixel coords
y, x = np.mgrid[0:coadd_dim, 0:coadd_dim]
u = x.ravel() * coadd_scale
v = y.ravel() * coadd_scale
coadd_image = np.zeros((coadd_dim, coadd_dim), dtype=np.float64)
wgts = coadd_wgts / np.sum(coadd_wgts)
for se_psf, se_wcs, wgt in zip(se_psfs, se_wcs_objs, wgts):
se_x, se_y = se_wcs.toImage(u, v)
im, _ = lanczos_resample_one(se_psf / se_wcs.pixelArea(), se_y, se_x)
coadd_image += (im.reshape((coadd_dim, coadd_dim)) * wgt)
coadd_image *= (coadd_scale**2)
return coadd_image
def coadd_image_noise_interpfrac(
se_images, se_noises, se_interp_fracs, se_wcs_objs,
coadd_wgts, coadd_scale, coadd_dim):
"""Coadd a set of SE images, noise fields, and interpolation fractions.
Parameters
----------
se_images : list of np.ndarray
The list of SE images to coadd.
se_noises : list of np.ndarray
The list of SE noise images to coadd.
se_interp_fracs : list of np.ndarray
The list of SE interpolated fraction images to coadd.
se_wcs_objs : list of galsim.BaseWCS or children
The WCS objects for each of the SE images.
coadd_wgts : 1d array-like object of floats
The relative coaddng weights for each of the SE images.
coadd_scale : float
The pixel scale of desired coadded image.
coadd_dim : int
The number of pixels desired for the final coadd image..
Returns
-------
img : np.ndarray, shape (coadd_dim, coadd_dim)
The coadd image.
nse : np.ndarray, shape (coadd_dim, coadd_dim)
The coadd noise image.
intp : np.ndarray, shape (coadd_dim, coadd_dim)
The interpolated flux fraction in each coadd pixel.
"""
# coadd pixel coords
y, x = np.mgrid[0:coadd_dim, 0:coadd_dim]
u = x.ravel() * coadd_scale
v = y.ravel() * coadd_scale
coadd_image = np.zeros((coadd_dim, coadd_dim), dtype=np.float64)
coadd_noise = np.zeros((coadd_dim, coadd_dim), dtype=np.float64)
coadd_intp = np.zeros((coadd_dim, coadd_dim), dtype=np.float32)
wgts = coadd_wgts / np.sum(coadd_wgts)
for se_im, se_nse, se_intp, se_wcs, wgt in zip(
se_images, se_noises, se_interp_fracs, se_wcs_objs, wgts):
se_x, se_y = se_wcs.toImage(u, v)
im, nse, intp, _ = lanczos_resample_three(
se_im / se_wcs.pixelArea(),
se_nse / se_wcs.pixelArea(),
se_intp,
se_y,
se_x)
coadd_image += (im.reshape((coadd_dim, coadd_dim)) * wgt)
coadd_noise += (nse.reshape((coadd_dim, coadd_dim)) * wgt)
coadd_intp += (intp.reshape((coadd_dim, coadd_dim)) * wgt)
coadd_image *= (coadd_scale**2)
coadd_noise *= (coadd_scale**2)
return coadd_image, coadd_noise, coadd_intp
|
beckermr/metadetect-coadding-sims
|
coadd_mdetsims/coadd.py
|
coadd.py
|
py
| 3,790 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32456637613
|
# The purpose of this function is to take the vertex property map returned when using gt.add_edge_list() with the option hashed=True and add it as an internal vertex property.
# - For some reason I have only gotten this to consistently work when I define the vertex property map by looping over vertices. I had issues with using get_2d_array([0]) when the ids are strings (e.g. for jids or occ2Xmesos)
def add_ids_as_veertex_property(graph, ids):
id_prop = graph.new_vertex_property("string")
graph.vp["ids"] = id_prop
for g in graph.vertices():
id_prop[g] = ids[g]
return graph, id_prop
new, id_prop = add_ids_as_property(g_jid, vmap)
new.vp.ids.get_2d_array([0])
|
jamiefogel/Networks
|
Code/Modules/add_ids_as_vertex_property.py
|
add_ids_as_vertex_property.py
|
py
| 693 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39269323605
|
from sqlalchemy import create_engine
from tests.util import RPCTest
class PDNSTest(RPCTest):
def cleanup_pdns_db(self, db_uri):
with create_engine(db_uri).begin() as conn:
conn.execute('delete from domains')
conn.execute('delete from domainmetadata')
conn.execute('delete from records')
def create_output_for_zone(self, zone, output, zone_group, db_uri):
self.r.output_create(output, plugin='pdns-db', db_uri=db_uri)
self.r.zone_group_create(zone_group)
self.r.zone_group_add_zone(zone_group, zone)
self.r.output_add_group(output, zone_group)
|
1and1/dim
|
dim-testsuite/tests/pdns_test.py
|
pdns_test.py
|
py
| 631 |
python
|
en
|
code
| 39 |
github-code
|
6
|
74472078906
|
import os
import pickle
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from openpyxl import Workbook
def save_pickle(data, filename):
with open(filename, 'wb') as file:
pickle.dump(data, file)
def load_pickle(filename):
with open(filename, 'rb') as file:
data = pickle.load(file)
return data
def save_parquet(data, filename):
df = pd.DataFrame(data)
table = pa.Table.from_pandas(df)
pq.write_table(table, filename)
def load_parquet(filename):
table = pq.read_table(filename)
df = table.to_pandas()
data = df.to_dict(orient='records')
return data
def save_xlsx(data, filename):
wb = Workbook()
ws = wb.active
for i, item in enumerate(data, start=1):
for j, value in enumerate(item.values(), start=1):
ws.cell(row=i, column=j, value=value)
wb.save(filename)
def load_xlsx(filename):
wb = pd.read_excel(filename)
data = wb.to_dict(orient='records')
return data
# Przykładowa kolekcja danych
collection = [{'id': i, 'value': i*2} for i in range(1, 101)]
# Zapisywanie i odczytywanie kolekcji za pomocą modułu pickle
save_pickle(collection, 'collection.pickle')
loaded_pickle = load_pickle('collection.pickle')
# Zapisywanie i odczytywanie kolekcji za pomocą Parquet
save_parquet(collection, 'collection.parquet')
loaded_parquet = load_parquet('collection.parquet')
# Zapisywanie i odczytywanie kolekcji za pomocą XLSX
save_xlsx(collection, 'collection.xlsx')
loaded_xlsx = load_xlsx('collection.xlsx')
print(f"Liczba elementów w kolekcji: {len(collection)}")
print("Moduł pickle:")
print(f" Zapis: {len(pickle.dumps(collection))} bajtów")
print(f" Odczyt: {len(pickle.dumps(loaded_pickle))} bajtów")
print("Parquet:")
print(f" Zapis: {os.path.getsize('collection.parquet')} bajtów")
print(f" Odczyt: {os.path.getsize('collection.parquet')} bajtów")
print("XLSX:")
print(f" Zapis: {os.path.getsize('collection.xlsx')} bajtów")
print(f" Odczyt: {os.path.getsize('collection.xlsx')} bajtów")
|
Lisiozmur/Njpo
|
Ćwiczenie5/Zadanie1.py
|
Zadanie1.py
|
py
| 2,117 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24510539081
|
import json
import frappe
from frappe.model.document import Document
from frappe.utils.safe_exec import get_safe_globals, safe_exec
from frappe.integrations.utils import make_post_request
from frappe.desk.form.utils import get_pdf_link
from frappe.utils.background_jobs import enqueue
def validate(self, method):
if self.channel == "WhatsApp":
fields = frappe.get_doc("DocType", self.document_type).fields
fields += frappe.get_all(
"Custom Field",
filters={"dt": self.document_type},
fields=["fieldname"]
)
# if not any(field.fieldname == self.custom_receiver_mobile for field in fields): # noqa
# frappe.throw(f"Field name {self.custom_receiver_mobile} does not exists")
def on_trash(self, method):
pass
# if self.channel == "WhatsApp":
# if self.notification_type == "Scheduler Event":
# frappe.delete_doc("Scheduled Job Type", self.name)
# frappe.cache().delete_value("whatsapp_notification_map")
def after_insert(self, method):
pass
# if self.channel == "WhatsApp":
# if self.notification_type == "Scheduler Event":
# method = f"whatsapp_erpnext.utils.trigger_whatsapp_notifications_{self.event_frequency.lower().replace(' ', '_')}" # noqa
# job = frappe.get_doc(
# {
# "doctype": "Scheduled Job Type",
# "method": method,
# "frequency": self.event_frequency
# }
# )
# job.insert()
def format_number(self, number):
if (number.startswith("+")):
number = number[1:len(number)]
return number
def send_scheduled_message(self) -> dict:
safe_exec(
self.condition, get_safe_globals(), dict(doc=self)
)
language_code = frappe.db.get_value(
"WhatsApp Templates", self.template,
fieldname='language_code'
)
if language_code:
for contact in self._contact_list:
data = {
"messaging_product": "whatsapp",
"to": self.format_number(contact),
"type": "template",
"template": {
"name": self.template,
"language": {
"code": language_code
},
"components": []
}
}
self.notify(data)
# return _globals.frappe.flags
def send_template_message(self, doc: Document, contact_no = None):
"""Specific to Document Event triggered Server Scripts."""
if not self.enabled:
return
doc_data = doc.as_dict()
if self.condition:
# check if condition satisfies
if not frappe.safe_eval(
self.condition, get_safe_globals(), dict(doc=doc_data)
):
return
template = frappe.db.get_value(
"WhatsApp Templates", self.custom_whatsapp_template,
fieldname='*'
)
if template:
for row in self.recipients:
if row.receiver_by_document_field != "owner":
if not contact_no:
contact_no = doc.get(row.receiver_by_document_field)
if contact_no:
data = {
"messaging_product": "whatsapp",
"to": contact_no,
"type": "template",
"template": {
"name": self.custom_whatsapp_template,
"language": {
"code": template.language_code
},
"components": []
}
}
# Pass parameter values
if self.fields:
parameters = []
for field in self.fields:
parameters.append({
"type": "text",
"text": doc.get_formatted(field.field_name)
})
data['template']["components"] = [{
"type": "body",
"parameters": parameters
}]
if self.attach_print:
key = doc.get_document_share_key()
frappe.db.commit()
link = get_pdf_link(
doc_data['doctype'],
doc_data['name'],
print_format=self.print_format or "Standard"
)
filename = f'{doc_data["name"]}.pdf'
url = f'{frappe.utils.get_url()}{link}&key={key}'
data['template']['components'].append({
"type": "header",
"parameters": [{
"type": "document",
"document": {
"link": url,
"filename": filename
}
}]
})
label = f"{doc_data['doctype']} - {doc_data['name']}"
notify(self, data, label)
def notify(self, data, label = None):
"""Notify."""
settings = frappe.get_doc(
"WhatsApp Settings", "WhatsApp Settings",
)
token = settings.get_password("token")
headers = {
"authorization": f"Bearer {token}",
"content-type": "application/json"
}
try:
response = make_post_request(
f"{settings.url}/{settings.version}/{settings.phone_id}/messages",
headers=headers, data=json.dumps(data)
)
message_id = response['messages'][0]['id']
enqueue(save_whatsapp_log, data = data, message_id = message_id, label = label)
frappe.msgprint("WhatsApp Message Triggered", indicator="green", alert=True)
except Exception as e:
response = frappe.flags.integration_request.json()['error']
error_message = response.get('Error', response.get("message"))
frappe.msgprint(
f"Failed to trigger whatsapp message: {error_message}",
indicator="red",
alert=True
)
finally:
status_response = frappe.flags.integration_request.json().get('error')
frappe.get_doc({
"doctype": "Integration Request",
"integration_request_service": self.custom_whatsapp_template,
"output": str(frappe.flags.integration_request.json()),
"status": "Failed" if status_response else "Completed"
}).insert(ignore_permissions=True)
def format_number(self, number):
if (number.startswith("+")):
number = number[1:len(number)]
return number
@frappe.whitelist()
def send_notification(notification, ref_doctype, ref_docname, mobile_no = None):
noti_doc = frappe.get_doc("Notification", notification)
ref_doc = frappe.get_doc(ref_doctype, ref_docname)
send_template_message(noti_doc, ref_doc, mobile_no)
def save_whatsapp_log(data, message_id, label = None):
frappe.get_doc({
"doctype": "WhatsApp Message",
"type": "Outgoing",
"message": str(data['template']),
"to": data['to'],
"message_type": "Template",
"message_id": message_id,
"content_type": "document",
"label": label
}).save(ignore_permissions=True)
|
finbyz/whatsapp_erpnext
|
whatsapp_erpnext/whatsapp_erpnext/doc_events/notification.py
|
notification.py
|
py
| 5,911 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6727912141
|
"""
Module for parsing arguments.
"""
import sys
import argparse
import os
from pathlib import Path
from typing import Any
__author__ = "Stijn Arends"
__version__ = "v0.1"
__data__ = "21-8-2022"
class ArgumentParser:
"""
Class to parse the input arguments.
"""
def __init__(self) -> None:
self.parser = self._create_argument_parser()
# Print help if no arguments are supplied and stop the program
if len(sys.argv) == 1:
self.parser.print_help(sys.stderr)
sys.exit(1)
self.arguments = self.parser.parse_args()
@staticmethod
def _create_argument_parser():
"""
Create an argument parser.
:returns
--------
parser - ArgumentParser
"""
parser = argparse.ArgumentParser(prog=f"python {os.path.basename(__file__)}",
description="Python script to parse NetWas results.",
epilog="Contact: [email protected]")
# Set version
parser.version = __version__
parser.add_argument('-f',
'--file', dest="file",
help='Input NetWas file - tab seperated txt or csv file',
required=True)
parser.add_argument('-t',
'--threshold', dest="threshold",
help='NetWas score threshold to select \'good\' reprioritized genes., default = None',
default=None, type=float)
parser.add_argument('-o',
'--output', dest="output",
help='Location and name of the ouput file.',
required=True)
parser.add_argument('--gene_list', dest="gene_list",
help='Specify if only gene symbols are written out."\
"Default is NetWas file with filtered genes',
action="store_true")
parser.add_argument('-v',
'--version',
help='Displays the version number of the script and exitst',
action='version')
return parser
def get_argument(self, argument_key: str) -> Any:
"""
Method to get an input argument.
:parameters
-----------
argument_key - str
Full command line argument (so --config for the configuration file argument).
:returns
--------
value - List or boolean
"""
if self.arguments is not None and argument_key in self.arguments:
value = getattr(self.arguments, argument_key)
else:
value = None
return value
def get_parser(self) -> argparse.ArgumentParser:
"""
Get the argument parser
:returns
--------
parser - argparse.ArgumentParser
Argument parser
"""
return self.parser
class CLIArgValidator:
"""
Class to check if arguments are valid.
"""
def validate_input_file(self, input_path: str) -> None:
"""
Validate the input files by checking if they actually exists
and the which extention they have.
:parameters
-----------
input_path - str
Path to a file
"""
input_path = Path(input_path)
self._validate_input_exists(input_path)
self._validate_input_extension(input_path)
@staticmethod
def _validate_input_exists(input_path: Path) -> None:
"""
Check if a file exists.
:parameters
-----------
input_path - str
Path to a file
"""
if not input_path.is_file():
raise FileNotFoundError('Input file does not exist!')
@staticmethod
def _validate_input_extension(input_path: Path) -> None:
"""
Check if a file has the right extension.
:parameters
-----------
input_path - str
Path to a file
"""
if not input_path.suffix in [".txt", ".csv"]:
raise FileNotFoundError('Input file should be either a .txt or .csv')
|
molgenis/benchmark-gwas-prio
|
prioritization_methods/NetWAS/arg_parser.py
|
arg_parser.py
|
py
| 3,976 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43266096059
|
import discord
import os
from keep_alive import keep_alive
from discord.ext import commands
from better_profanity import profanity
os.system('python3 -m commands')
profanity.load_censor_words_from_file('./profanity.txt')
client = commands.Bot(command_prefix = '$')
money_registry = []
list1 = ['myself', 'me', 'i']
@client.event
async def on_ready():
print('Bot is ready!')
await client.change_presence(activity=discord.Game('$help'))
@client.command()
async def displayembed(ctx, *, Title):
embed = discord.Embed(title= Title, description= Title, color = 6400 ) #,color=Hex code
await ctx.send(embed=embed)
@client.command()
async def ping(ctx):
await ctx.send(f'Pong! {round (client.latency * 1000)}ms')
@client.command()
async def kill(ctx, *, WhoToKill):
embed = discord.Embed(description=f'{WhoToKill} eats some mushrooms from the wild. Too bad they were poisonous...', color= 6400) #,color=Hex code
await ctx.send(embed=embed)
@client.event
async def on_message(message):
mention = f'<@!{client.user.id}>'
if mention in message.content:
embed = discord.Embed(description=f"_{message.author.mention} :bell: You ping me, I ping you._", color= 6400 )
await message.channel.send(embed=embed)
if str(message.channel) == "pictures" and message.content != '':
if message.author != client.user:
await message.channel.purge(limit=1)
embed = discord.Embed(description= f"Sorry{message.author.mention}! Only Pictures!", color = 6400)
await message.channel.send(embed=embed)
else:
pass
if '' in message.content:
embed = discord.Embed(title= "Self Roles", description = "React to this message to get these roles! ")
if not message.author.bot:
if profanity.contains_profanity(message.content):
await message.delete()
embed = discord.Embed(description= f"{message.author.mention} :octagonal_sign: Mind your language!", color = 6400)
await message.channel.send(embed=embed)
await client.process_commands(message)
@client.event
async def on_member_join(member):
print(f'{member} has joined the server! Welcome!')
@client.event
async def on_member_remove(member):
print(f'{member} has left! Goodbai! GLHF')
keep_alive()
client.run(os.getenv('TOKEN'))
|
LittlRayRay/Censorbot
|
main.py
|
main.py
|
py
| 2,297 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11458247441
|
# 首先要导入一个Select类
from selenium.webdriver.support.select import Select
from selenium import webdriver
import time
# 打开浏览器,进入携程旅行官网
driver = webdriver.Chrome()
driver.get('https://www.ctrip.com/?sid=155952&allianceid=4897&ouid=index')
driver.maximize_window() # 最大化窗口
# 休眠5秒钟
time.sleep(5)
# 通过Select类选择下拉框选项,只能是控件类型(tag_name)为select的控件
# 下拉框的选项都是属于下拉选择框,所以先要定位下拉选择框,然后再进行选择
# 如果下拉框的控件类型是dt(是一个表格),那么先要定位点击下拉选择框,然后再定位选项,点击选项
# 选择select标签类型下拉框的选项的方法:
# ① 通过选择项可见文本进行选择:Select(下拉框控件定位).select_by_visible_text(option标签的文本)
s = driver.find_element_by_id('J_roomCountList')
Select(s).select_by_visible_text('6间') # 选择6间
time.sleep(5)
# ② 通过option标签的value属性值进行选择:Select(下拉框控件定位).select_by_value(option标签的value属性值)
Select(s).select_by_value("5")
time.sleep(5)
# ③ 通过选项下标(所有选项当成一个列表,从0开始)进行选择,Select(下拉框控件定位).select_by_index(选项下标)
Select(s).select_by_index(7)
time.sleep(5)
driver.quit()
|
Ailian482/WebSelenium
|
Auto_Test/20_下拉框选择处理.py
|
20_下拉框选择处理.py
|
py
| 1,363 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
21368489956
|
import numpy as np
import matplotlib.pyplot as plt
import glob
import os
import ruamel.yaml
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
def _get_measurement_range_for_output(output_key, output, method):
# method = output['method']
# config = output[method]
# return np.arange(config['start'], config['stop'], config['step'])
method_keys = method.split('.') # e.g. ['freq_mod', 'span']
config = output
# find the method configuration inside the output-config
for key in method_keys:
config = config[key]
return np.arange(config['start'], config['stop'], config['step'])
def color_generator(N, colormap='gnuplot'):
""" Color generator for a given matplotlib colormap.
Usage:
------------------------------------------
import matplotlib.pylab as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
N = 20
color_gen = color_generator(N)
for i in N:
color = next(color_gen)
# do something with the color ...
"""
cm_map = plt.get_cmap(colormap)
c_norm = colors.Normalize(vmin=0, vmax=N)
scalar_map = cmx.ScalarMappable(norm=c_norm, cmap=cm_map)
for i in xrange(N):
yield scalar_map.to_rgba(i)
cell='4'
measno='3'
filename='N:/data/emily/magnetometer_test/cell{1:s}/remote/meas{0:s}'.format(str(measno), str(cell))
files=glob.glob(filename+"/*.csv")
files=sorted(files)
start=100
steps=100
a=np.loadtxt(files[0], delimiter=',')
a_fft=np.abs(np.fft.rfft(a, axis=0))
b=np.sum(a_fft[start::steps,:], axis=1)
color_gen = color_generator(len(b))
config_name = glob.glob(filename+'/config*.yaml')
with open(config_name[0], 'r') as ymlfile:
cfg = ruamel.yaml.load(ymlfile)
stack = cfg['stack']
meas_ranges = [None] * len(stack)
keys = [None] * len(stack)
outputs = [None] * len(stack)
methods = [None] * len(stack)
for i, stack_entry in enumerate(stack):
keys[i], method_index = stack_entry.split('.') # e.g. key='B1', method_index = '0'
method_index = int(method_index) # index gives the position of the method in the methods array
outputs[i] = cfg['outputs'][keys[i]]
methods[i] = outputs[i]['methods'][method_index]
meas_ranges[i] = _get_measurement_range_for_output(keys[i], outputs[i], methods[i])
b0_amp = cfg['outputs']['B0']['amp']['start']
b1_freq_center = cfg['outputs']['B1']['freq_mod']['center']
b1_freq_span = cfg['outputs']['B1']['freq_mod']['span']['start']
downsampling_factor = cfg['devices']['nidaq']['downsampling_factor']
measurement_time = cfg['devices']['nidaq']['measurement_time_s']
sample_rate = cfg['devices']['nidaq']['sample_rate']
x_axis_label = cfg['outputs'][keys[0]][methods[0]]['label']
data_points = sample_rate*measurement_time/downsampling_factor
datanew=np.zeros([len(b), len(files)])
plt.clf()
# for j in range(len(b)-1):
# #if j!=9: continue
# color = next(color_gen)
# plt.plot(a_fft[:,j], label=str(meas_ranges[1][j]), color=color)
# plt.title("$B_1$ frequency (Hz)", fontsize=16)
# plt.ylabel("FFT signal (a.u).", fontsize=16)
# plt.xlabel("Frequency (Hz)", fontsize=16)
# plt.ylim((0,8))
# plt.legend(ncol=3, prop={'size':10})
# plt.show()
# plt.savefig(filename+"/fft_0mV_{}.png".format(measno), dpi=300)
# plt.savefig(filename+"/fft_0mV_{}.pdf".format(measno))
# plt.clf()
# plt.plot(b)
# plt.ylabel("FFT signal a.u.", fontsize=16)
# plt.xlabel("Frequency (Hz)", fontsize=16)
# plt.ylim((0,9))
# plt.savefig(filename+"/fft_sum_0mV_{}.png".format(measno), dpi=300)
# plt.savefig(filename+"/fft_sum_0mV_{}.pdf".format(measno))
# plt.clf()
# raise
for i in range(len(files)):
data=np.loadtxt(files[i], delimiter=',')
data_fft=np.abs(np.fft.rfft(data, axis=0))
datanew[:,i]=np.sum(data_fft[start::steps,:], axis=1)
plt.imshow(datanew[-1::-1], aspect='auto', interpolation='nearest',
extent=[meas_ranges[0][0]*1000, meas_ranges[0][-1]*1000, start/1000, data_fft.shape[0]/1000], cmap='gnuplot')
plt.xlabel('R$_4$ offset (mV)', fontsize=20)
plt.ylabel('Frequency (kHz)', fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=20)
plt.colorbar()
plt.show()
plt.savefig(filename+"/all_together{0:s}_steps{1:s}.png".format(measno, str(steps)), dpi=300)
plt.savefig(filename+"/all_together{0:s}_steps{1:s}.pdf".format(measno, str(steps)))
|
physikier/magnetometer
|
src/analysis.py
|
analysis.py
|
py
| 4,786 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37985935295
|
#! /usr/bin/env python3
import audioInterface
import os
import yaml
import sys
from datetime import datetime
from gpiozero import Button
from signal import pause
from pydub import AudioSegment
from pydub.playback import play
try:
with open("config.yaml") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
except FileNotFoundError as e:
print(
f"Could not find the config.yaml file. FileNotFoundError: {e}. Check config location and retry."
)
sys.exit(1)
hook = Button(config["hook_gpio"])
def off_hook() -> None:
print("Phone off hook, ready to begin!")
audio_interface = audioInterface.AudioInterface(config, hook)
# playback voice message through speaker
print("Playing voicemail message...")
play(
AudioSegment.from_wav(
os.path.dirname(os.path.abspath(config["source_file"]))
+ "/sounds/voicemail.wav"
)
- config["playback_reduction"]
)
# start recording beep
print("Playing beep...")
play(
AudioSegment.from_wav(
os.path.dirname(os.path.abspath(config["source_file"])) + "/sounds/beep.wav"
)
- config["beep_reduction"]
)
# now, while phone is off the hook, record audio from the microphone
print("recording")
audio_interface.record()
audio_interface.stop()
output_file = (
os.path.dirname(os.path.abspath(config["source_file"]))
+ "/recordings/"
+ f"{datetime.now().isoformat()}"
)
audio_interface.close(output_file + ".wav")
print("Finished recording!")
def on_hook() -> None:
print("Phone on hook.\nSleeping...")
def main():
hook.when_pressed = off_hook
hook.when_released = on_hook
pause()
if __name__ == "__main__":
main()
|
nickpourazima/rotary-phone-audio-guestbook
|
audioGuestBook.py
|
audioGuestBook.py
|
py
| 1,781 |
python
|
en
|
code
| 13 |
github-code
|
6
|
74637154747
|
import time
import redis
cache = redis.StrictRedis(host='redis', decode_responses=True, db=0, port=6379)
def update_and_get_hit_count():
""""""
print('In utils/update_and_get_hit_count')
retries = 5
while True:
try:
return cache.incr('hits')
except redis.exceptions.ConnectionError as err:
if retries == 0:
raise err
retries -= 1
time.sleep(0.5)
def clear_hit_count():
""""""
print('in utils/clear_hit_count')
retries = 5
while True:
try:
return cache.set('hits', 0)
except redis.exceptions.ConnectionError as err:
if retries == 0:
raise err
retries -= 1
time.sleep(0.5)
|
ShukujiNeel13/composetest
|
utils.py
|
utils.py
|
py
| 770 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26135102637
|
import cv2 as cv
import numpy as np
img = cv.imread('/home/ai3/Desktop/common/ML/Day13/girl.jpg',0)
kernel = np.ones((2,2),np.uint8)
open1 = cv.morphologyEx(img,cv.MORPH_OPEN,kernel)
open2 = cv.morphologyEx(img,cv.MORPH_CLOSE,kernel)
open3 = cv.morphologyEx(open1,cv.MORPH_CLOSE,kernel)
img=np.hstack((open1,open2,open3))
img = cv.imshow('dst',img)
cv.waitKey(0)
|
94akshayraj/AI-program
|
ML ans/day13/3.py
|
3.py
|
py
| 365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5471431928
|
"""
Design-of-Experiments Driver.
"""
from __future__ import print_function
import traceback
import inspect
from openmdao.core.driver import Driver, RecordingDebugging
from openmdao.core.analysis_error import AnalysisError
from openmdao.utils.mpi import MPI
from openmdao.recorders.sqlite_recorder import SqliteRecorder
class DOEGenerator(object):
"""
Base class for a callable object that generates cases for a DOEDriver.
Attributes
----------
_num_samples : int
The number of samples generated (available after generator has been called).
"""
def __init__(self):
"""
Initialize the DOEGenerator.
"""
self._num_samples = 0
def __call__(self, design_vars):
"""
Generate case.
Parameters
----------
design_vars : dict
Dictionary of design variables for which to generate values.
Returns
-------
list
list of name, value tuples for the design variables.
"""
return []
class DOEDriver(Driver):
"""
Design-of-Experiments Driver.
Attributes
----------
_name : str
The name used to identify this driver in recorded cases.
_recorders : list
List of case recorders that have been added to this driver.
"""
def __init__(self, generator=None, **kwargs):
"""
Constructor.
Parameters
----------
generator : DOEGenerator or None
The case generator.
**kwargs : dict of keyword arguments
Keyword arguments that will be mapped into the Driver options.
"""
if generator and not isinstance(generator, DOEGenerator):
if inspect.isclass(generator):
raise TypeError("DOEDriver requires an instance of DOEGenerator, "
"but a class object was found: %s"
% generator.__name__)
else:
raise TypeError("DOEDriver requires an instance of DOEGenerator, "
"but an instance of %s was found."
% type(generator).__name__)
super(DOEDriver, self).__init__(**kwargs)
if generator is not None:
self.options['generator'] = generator
self._name = ''
self._recorders = []
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
self.options.declare('generator', types=(DOEGenerator), default=DOEGenerator(),
desc='The case generator. If default, no cases are generated.')
self.options.declare('parallel', default=False, types=(bool, int), lower=0,
desc='True or number of cases to run in parallel. '
'If True, cases will be run on all available processors. '
'If an integer, each case will get COMM.size/<number> '
'processors and <number> of cases will be run in parallel')
def _setup_comm(self, comm):
"""
Perform any driver-specific setup of communicators for the model.
Parameters
----------
comm : MPI.Comm or <FakeComm> or None
The communicator for the Problem.
Returns
-------
MPI.Comm or <FakeComm> or None
The communicator for the Problem model.
"""
parallel = self.options['parallel']
if MPI and parallel:
self._comm = comm
if parallel == 1: # True == 1
size = 1
color = self._color = comm.rank
else:
comm_size = comm.size
size = comm_size // parallel
if comm_size != size * parallel:
raise RuntimeError("The number of processors is not evenly divisable by "
"the specified number of parallel cases.\n Provide a "
"number of processors that is a multiple of %d, or "
"specify a number of parallel cases that divides "
"into %d." % (parallel, comm_size))
color = self._color = comm.rank % size
model_comm = comm.Split(color)
else:
self._comm = None
model_comm = comm
return model_comm
def _set_name(self):
"""
Set the name of this DOE driver and its case generator.
Returns
-------
str
The name of this DOE driver and its case generator.
"""
generator = self.options['generator']
gen_type = type(generator).__name__.replace('Generator', '')
if gen_type == 'DOEGenerator':
self._name = 'DOEDriver' # Empty generator
else:
self._name = 'DOEDriver_' + gen_type
return self._name
def _get_name(self):
"""
Get the name of this DOE driver and its case generator.
Returns
-------
str
The name of this DOE driver and its case generator.
"""
return self._name
def run(self):
"""
Generate cases and run the model for each set of generated input values.
Returns
-------
boolean
Failure flag; True if failed to converge, False is successful.
"""
self.iter_count = 0
# set driver name with current generator
self._set_name()
if self._comm:
case_gen = self._parallel_generator
else:
case_gen = self.options['generator']
for case in case_gen(self._designvars):
self._run_case(case)
self.iter_count += 1
return False
def _run_case(self, case):
"""
Run case, save exception info and mark the metadata if the case fails.
Parameters
----------
case : list
list of name, value tuples for the design variables.
"""
metadata = {}
for dv_name, dv_val in case:
self.set_design_var(dv_name, dv_val)
with RecordingDebugging(self._name, self.iter_count, self) as rec:
try:
failure_flag, _, _ = self._problem.model._solve_nonlinear()
metadata['success'] = not failure_flag
metadata['msg'] = ''
except AnalysisError:
metadata['success'] = 0
metadata['msg'] = traceback.format_exc()
except Exception:
metadata['success'] = 0
metadata['msg'] = traceback.format_exc()
print(metadata['msg'])
# save reference to metadata for use in record_iteration
self._metadata = metadata
def _parallel_generator(self, design_vars):
"""
Generate case for this processor when running under MPI.
Parameters
----------
design_vars : dict
Dictionary of design variables for which to generate values.
Yields
------
list
list of name, value tuples for the design variables.
"""
size = self._comm.size // self.options['parallel']
color = self._color
generator = self.options['generator']
for i, case in enumerate(generator(design_vars)):
if i % size == color:
yield case
def add_recorder(self, recorder):
"""
Add a recorder to the driver.
Parameters
----------
recorder : BaseRecorder
A recorder instance.
"""
# keep track of recorders so we can flag them as parallel
# if we end up running in parallel
self._recorders.append(recorder)
super(DOEDriver, self).add_recorder(recorder)
def _setup_recording(self):
"""
Set up case recording.
"""
parallel = self.options['parallel']
if MPI and parallel:
for recorder in self._recorders:
recorder._parallel = True
# if SqliteRecorder, write cases only on procs up to the number
# of parallel DOEs (i.e. on the root procs for the cases)
if isinstance(recorder, SqliteRecorder):
if parallel is True or self._comm.rank < parallel:
recorder._record_on_proc = True
else:
recorder._record_on_proc = False
super(DOEDriver, self)._setup_recording()
def record_iteration(self):
"""
Record an iteration of the current Driver.
"""
if not self._rec_mgr._recorders:
return
# Get the data to record (collective calls that get across all ranks)
opts = self.recording_options
filt = self._filtered_vars_to_record
if opts['record_desvars']:
des_vars = self.get_design_var_values(filt['des'])
else:
des_vars = {}
if opts['record_objectives']:
obj_vars = self.get_objective_values(filt['obj'])
else:
obj_vars = {}
if opts['record_constraints']:
con_vars = self.get_constraint_values(filt['con'])
else:
con_vars = {}
if opts['record_responses']:
# res_vars = self.get_response_values(filt['res']) # not really working yet
res_vars = {}
else:
res_vars = {}
model = self._problem.model
sys_vars = {}
in_vars = {}
outputs = model._outputs
inputs = model._inputs
views = outputs._views
views_in = inputs._views
sys_vars = {name: views[name] for name in outputs._names if name in filt['sys']}
if self.recording_options['record_inputs']:
in_vars = {name: views_in[name] for name in inputs._names if name in filt['in']}
outs = des_vars
outs.update(res_vars)
outs.update(obj_vars)
outs.update(con_vars)
outs.update(sys_vars)
data = {
'out': outs,
'in': in_vars
}
self._rec_mgr.record_iteration(self, data, self._metadata)
|
rowhit/OpenMDAO-1
|
openmdao/drivers/doe_driver.py
|
doe_driver.py
|
py
| 10,428 |
python
|
en
|
code
| null |
github-code
|
6
|
74668147066
|
import numpy as np
with open('in.txt') as f:
lines = f.read().strip().splitlines()
s = set([0])
h = 10 * [0]
for line in lines:
dir, cnt = line.split()
cnt = int(cnt)
for _ in range(cnt):
h[0] += {
'U': -1j,
'D': 1j,
'R': 1,
'L': -1
}[dir]
for i in range(9):
if abs(h[i + 1] - h[i]) < 2:
break
delta = h[i] - h[i + 1]
h[i + 1] += np.sign(delta.real) + 1j * np.sign(delta.imag)
s.add(h[-1])
print(len(s))
|
dionyziz/advent-of-code
|
2022/9/9b.py
|
9b.py
|
py
| 482 |
python
|
en
|
code
| 8 |
github-code
|
6
|
26238931261
|
#! /usr/bin/env python
'''
A script that will compare two Dawn IMG files.
'''
import sys
import os
import os.path
import dawn
def main(argv=None):
'''
Receives two dawn image filenames from the command line and compares the
data areas.
'''
if argv is None:
argv = sys.argv
direcory_name1 = argv[1]
direcory_name2 = argv[2]
compare_directories(direcory_name1, direcory_name2)
def compare_directories(directory1, directory2):
'''
Compares two directories of dawn image files.
Both directories must have the same structure.
'''
dirlist1 = get_dir_list(directory1)
dirlist2 = get_dir_list(directory2)
dirdict1 = dict(dirlist1)
dirdict2 = dict(dirlist2)
merged_dict = merge_dicts(dirdict1, dirdict2)
compare_contents(directory1, directory2, merged_dict)
def strip_version(filename):
return filename[:-5]
def merge_dicts(dirdict1, dirdict2):
result = {}
for dirname in dirdict1.keys() + dirdict2.keys():
if dirname in dirdict1 and dirname in dirdict2:
filedict1 = dict([(strip_version(x), x) for x in dirdict1[dirname]])
filedict2 = dict([(strip_version(x), x) for x in dirdict2[dirname]])
result[dirname] = dict([
(filedict1[x], filedict2[x])
for x in filedict1.keys()
if x in filedict1 and x in filedict2
])
return result
def compare_contents(directory1, directory2, dirdict):
for dirname in dirdict.keys():
filepairs = dirdict[dirname]
for filename1 in filepairs.keys():
filename2 = filepairs[filename1]
file1 = os.path.join(directory1, dirname, filename1)
file2 = os.path.join(directory2, dirname, filename2)
if os.path.exists(file1) and os.path.exists(file2):
dawn.compare_files(file1, file2)
elif os.path.exists(file1):
print(file1 + " <- " + file2)
elif os.path.exists(file2):
print(file1 + " -> " + file2)
else:
print("No files exist")
def is_dawn(filename):
return filename.lower().endswith('.img')
def get_dir_list(directory):
walkresult = [(dirpath, [x for x in filenames if is_dawn(x)]) for dirpath, _, filenames in os.walk(directory)]
return [(dirpath.replace(directory, '') , filenames) for dirpath, filenames in walkresult if filenames]
if __name__ == '__main__':
sys.exit(main())
|
sbn-psi/data-tools
|
dawn/dawndirdiff.py
|
dawndirdiff.py
|
py
| 2,520 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12688443618
|
# https://leetcode.com/problems/reverse-linked-list/
from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def reverseList_(self, head: Optional[ListNode]) -> ListNode: # input as list
# empty head
if len(head) <= 1:
return head
# first element in head
first_node = ListNode(val = head[0], next=None)
prev_node = first_node
list_val = [first_node.val]
# len(head) > 1
if len(head) > 1:
for i in range(1, len(head)):
curr_node = ListNode(val = head[i], next=None)
list_val.append(curr_node.val)
prev_node.next = curr_node
prev_node = curr_node
# # traverse forward
# next_node = first_node.next
# while next_node != None:
# # print("Next: ", next_node.val)
# next_node = next_node.next
# traverse reverse
return list_val[::-1]
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]: # input as Listnode; only works on leetcode
prev = None
curr = head
while curr:
nxt = curr.next
curr.next = prev
prev = curr
curr = nxt
return prev
solved = Solution()
# print(solved.reverseList_(head = [1,2,3,4,5]))
# print(solved.reverseList_(head = [1,2]))
# print(solved.reverseList_(head = [-1]))
# print(solved.reverseList_(head = []))
|
zvovov/competitive_coding
|
leetcode/neetcode_150/reverse_linked_list.py
|
reverse_linked_list.py
|
py
| 1,637 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6770936001
|
'''
Write a Python program to calculate the area of regular polygon.
Input number of sides: 4
Input the length of a side: 25
The area of the polygon is: 625
'''
from math import *
n=int(input())
lenght=int(input())
perimetr=n*lenght
apothem=lenght/(2*(tan(pi/n)))
area=(apothem*perimetr)/2
print(round(area))
|
AigerimKubeyeva/pp2
|
Week4/Lab4/math/3.py
|
3.py
|
py
| 313 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70435413308
|
import re
import emoji
def preprocess_string(text):
"""
입력받은 text 를 전처리 하는 함수.
:param text: str
:return : str
"""
# 이모티콘부터 제거
no_emoticon = ''
for char in text:
if char not in emoji.UNICODE_EMOJI:
no_emoticon += char
# 특수문자 기준 split
no_punctuation = re.split(r'([!,?]+)|([.]+)|([,]+)|(["])|([\'])|([&]+)|([(]+)|([)]+)|([~]+)|([♡]+)|([☆,★]+)',
no_emoticon.strip())
no_punctuation_text = []
for string in no_punctuation:
if (string == '') or (string is None): continue
no_punctuation_text.append(string)
no_punctuation_text = ' '.join(no_punctuation_text)
# 단독으로 쓰인 자모음 분리
split_char = re.split(r'([ㄱ-ㅣ0-9]+)', no_punctuation_text.strip())
split_char = ' '.join(split_char)
# 한국어에서 단독으로 자주 쓰이는 자모음 뭉치 분리
split_char = re.split(r'([ㅎ]{2,})|([ㅜ,ㅠ]{2,})|([ㅗ]+)|([ㅋ,ㄱ,ㄲ]{2,})|\s+', split_char.strip())
final_text = []
for string in split_char:
if (string == '') or (string is None): continue
final_text.append(string)
return ' '.join(final_text)
|
teammatmul/project-purifier
|
purifier/preprocess.py
|
preprocess.py
|
py
| 1,254 |
python
|
ko
|
code
| 78 |
github-code
|
6
|
37601085068
|
from sqlwrapper import gensql, dbget, dbput
import json
import datetime
def HOTEL_FD_POST_UPDATE_CheckinGuestArrivals(request):
d = request.json
res_id = d.get("Res_id")
unique_id = d.get("Res_unique_id")
pf_id = d.get("pf_id")
a = {}
RES_Log_Time = datetime.datetime.utcnow()+datetime.timedelta(hours=5, minutes=30)
RES_Log_Time = RES_Log_Time.time().strftime("%H:%M:%S")
print(RES_Log_Time)
RES_Log_Date = datetime.datetime.utcnow().date()
print(RES_Log_Date)
RES_Log_Date = str(RES_Log_Date)
arrival = dbget("select res_arrival, res_adults,res_room from reservation.res_reservation where res_id = '"+res_id+"' and pf_id = '"+pf_id+"' and res_unique_id = '"+unique_id+"'")
arrival = json.loads(arrival)
print(arrival)
print(arrival[0]['res_arrival'],type(arrival[0]['res_arrival']))
today_arrival = (arrival[0]['res_arrival'])
adult = arrival[0]['res_adults']
room = arrival[0]['res_room']
print(room,type(room))
print(today_arrival)
if RES_Log_Date == today_arrival:
p = {}
p['res_id'] = res_id
p['res_unique_id'] = unique_id
sql_value = gensql('select','room_management.rm_queue_room','rm_queue',p)
sql_value = json.loads(sql_value)
if len(sql_value) != 0:
psql = dbput("delete from room_management.rm_queue_room where res_id = '"+res_id+"' and res_unique_id = '"+unique_id+"'")
print(psql)
else:
pass
e = {}
e['Res_id'] = res_id
e['pf_id'] = pf_id
e['res_unique_id'] = unique_id
a['Res_guest_status'] = "checkin"
sql_value = gensql('update','reservation.res_reservation',a,e)
print(sql_value)
res_id = e.get("Res_id")
Emp_Id = '121'
Emp_Firstname = "daisy"
s = {}
s['Emp_Id'] = Emp_Id
s['Emp_Firstname'] = Emp_Firstname
s['RES_Log_Date'] = RES_Log_Date
s['RES_Log_Time'] = RES_Log_Time
s['RES_Action_Type'] = "Checkin a guest"
s['RES_Description'] = "Checked in a guest"
s['Res_id'] = res_id
sql_value = gensql('insert','reservation.res_activity_log',s)
fo_status = "occupied"
res_status = "checkin"
sql_value = dbput("update room_management.rm_room_list set rm_fo_status = '"+fo_status+"',rm_reservation_status = '"+res_status+"',rm_fo_person = "+str(adult)+" where rm_room in ("+str(room)+")")
print(sql_value)
alertcount = json.loads(dbget("select count(*) from reservation.res_alert where res_id = '"+str(res_id)+"' \
and res_unique_id = '"+str(unique_id)+"'"))
print(alertcount)
if alertcount[0]['count'] !=0:
alertvalue = json.loads(dbget("select * from reservation.res_alert where res_id = '"+str(res_id)+"' \
and res_unique_id = '"+str(unique_id)+"'"))
return(json.dumps({'Status': 'Success', 'StatusCode': '200', 'alertvalue':alertvalue,'Return': 'Alert Got Successfully','ReturnCode':'AGS'}, sort_keys=True, indent=4))
else:
return(json.dumps({'Status': 'Success', 'StatusCode': '200','Return': 'Record Updated Successfully','ReturnCode':'RUS'}, sort_keys=True, indent=4))
else:
return(json.dumps({'Status': 'Success', 'StatusCode': '200','Return': 'Checkin a Today Guest arrivals only','ReturnCode':'CTG'}, sort_keys=True, indent=4))
|
infocuittesting/hotel360-second-version
|
HOTEL_FD_POST_UPDATE_CheckinGuestArrivals.py
|
HOTEL_FD_POST_UPDATE_CheckinGuestArrivals.py
|
py
| 3,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2501424452
|
from os import listdir
from PIL import Image
#list src pic
DIR = 'pic'
#print listing of images
img_list = listdir("pic")
#enter and calculate ratio
sh_ent = int(input("Shakal ratio (compress ratio):"))
sh = 100 - sh_ent
#work with image
for filename in img_list:
outname = "out/" + filename
filename = "pic/" + filename
print(filename)
img = Image.open(filename)
#save with compress
img.save(outname, "JPEG", quality=sh)
|
vakarianplay/Pic_tools
|
shakal (compress)/shak.py
|
shak.py
|
py
| 505 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20423188987
|
#Give the Big-O performance of the following code fragment:
def findRepeated(L):
"""
determines whether all elements in a given list L are distinct
"""
n=len(L)
for i in range(n):
for j in range(i+1, n):
if L[i]==L[j]:
return True
return False
|
tsaoalbert/test.tensor.flow
|
1.weekend.asymptotic,stack.queue.deque.recursion.sorting/t.py
|
t.py
|
py
| 277 |
python
|
en
|
code
| 0 |
github-code
|
6
|
650532287
|
#! /bin/python
import os
import sys
import json
import luigi
import numpy as np
import nifty.tools as nt
import nifty
import nifty.graph.rag as nrag
from vigra.analysis import relabelConsecutive
from elf.segmentation.clustering import mala_clustering, agglomerative_clustering
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
#
# Agglomerate Tasks
#
# TODO it would be nice to be able to change the block shape compared to ws task
# so that we can agglomerate block boundaries.
# However, I am not sure how this interacts with the id-offsets, so haven't
# implemented this yet.
class AgglomerateBase(luigi.Task):
""" Agglomerate base class
"""
task_name = 'agglomerate'
src_file = os.path.abspath(__file__)
# input and output volumes
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
have_ignore_label = luigi.BoolParameter()
dependency = luigi.TaskParameter()
def requires(self):
return self.dependency
@staticmethod
def default_task_config():
# parameter:
# use_mala_agglomeration: whether to use thresholding based mala agglomeration
# or element number based agglomerative clustering
# threshold: threshold up to which to agglomerate (mala) or fraction of nodes
# after agglomeration (agglomerative clustering)
# size_regularizer: size regularizer in agglomerative clustering (wardness)
# invert_inputs: do we need to invert the inputs?
# offsets: offsets for affinities, set to None for boundaries
config = LocalTask.default_task_config()
config.update({'use_mala_agglomeration': True, 'threshold': .9,
'size_regularizer': .5, 'invert_inputs': False,
'offsets': None})
return config
def clean_up_for_retry(self, block_list):
super().clean_up_for_retry(block_list)
# TODO remove any output of failed blocks because it might be corrupted
def run_impl(self):
# get the global config and init configs
shebang, block_shape, roi_begin, roi_end = self.global_config_values()
self.init(shebang)
# get shape and make block config
shape = vu.get_shape(self.input_path, self.input_key)
if len(shape) == 4:
shape = shape[1:]
# load the agglomerate config
config = self.get_task_config()
# update the config with input and output paths and keys
# as well as block shape
config.update({'input_path': self.input_path, 'input_key': self.input_key,
'output_path': self.output_path, 'output_key': self.output_key,
'block_shape': block_shape, 'have_ignore_label': self.have_ignore_label})
if self.n_retries == 0:
block_list = vu.blocks_in_volume(shape, block_shape, roi_begin, roi_end)
else:
block_list = self.block_list
self.clean_up_for_retry(block_list)
self._write_log('scheduling %i blocks to be processed' % len(block_list))
n_jobs = min(len(block_list), self.max_jobs)
# prime and run the jobs
self.prepare_jobs(n_jobs, block_list, config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
self.check_jobs(n_jobs)
class AgglomerateLocal(AgglomerateBase, LocalTask):
"""
Agglomerate on local machine
"""
pass
class AgglomerateSlurm(AgglomerateBase, SlurmTask):
"""
Agglomerate on slurm cluster
"""
pass
class AgglomerateLSF(AgglomerateBase, LSFTask):
"""
Agglomerate on lsf cluster
"""
pass
#
# Implementation
#
def _agglomerate_block(blocking, block_id, ds_in, ds_out, config):
fu.log("start processing block %i" % block_id)
have_ignore_label = config['have_ignore_label']
use_mala_agglomeration = config.get('use_mala_agglomeration', True)
threshold = config.get('threshold', 0.9)
size_regularizer = config.get('size_regularizer', .5)
invert_inputs = config.get('invert_inputs', False)
offsets = config.get('offsets', None)
bb = vu.block_to_bb(blocking.getBlock(block_id))
# load the segmentation / output
seg = ds_out[bb]
# check if this block is empty
if np.sum(seg) == 0:
fu.log_block_success(block_id)
return
# load the input data
ndim_in = ds_in.ndim
if ndim_in == 4:
assert offsets is not None
assert len(offsets) <= ds_in.shape[0]
bb_in = (slice(0, len(offsets)),) + bb
input_ = vu.normalize(ds_in[bb_in])
else:
assert offsets is None
input_ = vu.normalize(ds_in[bb])
if invert_inputs:
input_ = 1. - input_
id_offset = int(seg[seg != 0].min())
# relabel the segmentation
_, max_id, _ = relabelConsecutive(seg, out=seg, keep_zeros=True, start_label=1)
seg = seg.astype('uint32')
# construct rag
rag = nrag.gridRag(seg, numberOfLabels=max_id + 1,
numberOfThreads=1)
# extract edge features
if offsets is None:
edge_features = nrag.accumulateEdgeMeanAndLength(rag, input_, numberOfThreads=1)
else:
edge_features = nrag.accumulateAffinityStandartFeatures(rag, input_, offsets,
numberOfThreads=1)
edge_features, edge_sizes = edge_features[:, 0], edge_features[:, -1]
uv_ids = rag.uvIds()
# set edges to ignore label to be maximally repulsive
if have_ignore_label:
ignore_mask = (uv_ids == 0).any(axis=1)
edge_features[ignore_mask] = 1
# build undirected graph
n_nodes = rag.numberOfNodes
graph = nifty.graph.undirectedGraph(n_nodes)
graph.insertEdges(uv_ids)
if use_mala_agglomeration:
node_labels = mala_clustering(graph, edge_features,
edge_sizes, threshold)
else:
node_ids, node_sizes = np.unique(seg, return_counts=True)
if node_ids[0] != 0:
node_sizes = np.concatenate([np.array([0]), node_sizes])
n_stop = int(threshold * n_nodes)
node_labels = agglomerative_clustering(graph, edge_features,
node_sizes, edge_sizes,
n_stop, size_regularizer)
# run clusteting
node_labels, max_id, _ = relabelConsecutive(node_labels, start_label=1, keep_zeros=True)
fu.log("reduced number of labels from %i to %i" % (n_nodes, max_id + 1))
# project node labels back to segmentation
seg = nrag.projectScalarNodeDataToPixels(rag, node_labels, numberOfThreads=1)
seg = seg.astype('uint64')
# add offset back to segmentation
seg[seg != 0] += id_offset
ds_out[bb] = seg
# log block success
fu.log_block_success(block_id)
def agglomerate(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
with open(config_path, 'r') as f:
config = json.load(f)
# read the input cofig
input_path = config['input_path']
input_key = config['input_key']
shape = list(vu.get_shape(input_path, input_key))
if len(shape) == 4:
shape = shape[1:]
block_shape = list(config['block_shape'])
block_list = config['block_list']
# read the output config
output_path = config['output_path']
output_key = config['output_key']
# get the blocking
blocking = nt.blocking([0, 0, 0], shape, block_shape)
# submit blocks
with vu.file_reader(input_path, 'r') as f_in, vu.file_reader(output_path) as f_out:
ds_in = f_in[input_key]
assert ds_in.ndim in (3, 4)
ds_out = f_out[output_key]
assert ds_out.ndim == 3
for block_id in block_list:
_agglomerate_block(blocking, block_id, ds_in, ds_out, config)
# log success
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
agglomerate(job_id, path)
|
constantinpape/cluster_tools
|
cluster_tools/watershed/agglomerate.py
|
agglomerate.py
|
py
| 8,389 |
python
|
en
|
code
| 32 |
github-code
|
6
|
10623814818
|
from asyncio import sleep
from discord import Forbidden
from discord.ext import commands
from Utils.domain_tester import get_domain_embed
from Utils.file_tester import get_file_embed
class DmCommands(commands.Cog, name="Dm Commands"):
"""
Cog including all Commands that are dm only
"""
def __init__(self, b):
self.b = b
print("Dm Commands succesfully added to the bot!")
@commands.command(name="check",
help="Takes given Input and runs a test over it. Only Dm Channels. Accepts URLs",
brief="Checks Input", aliases=["test"])
async def check(self, ctx, *arg):
if ctx.guild is not None:
try:
await ctx.message.delete()
await ctx.author.send("Only DM Available")
except Forbidden:
await ctx.reply("Only DM Available! Warning! The Above message might be milicious. "
"Dont click the file/url until you trust it! (for some reason i cant delete it)")
return
if arg is None and not ctx.message.attachments:
await ctx.send("Missing an url")
return
if ctx.message.attachments:
await ctx.reply("Starting testing of files. This takes some time")
for i in ctx.message.attachments:
msgn = await ctx.reply("Stand by...")
await msgn.edit(content=None, embed=await get_file_embed(i, ctx))
await sleep(30)
if len(arg) > 0:
domain = arg[0]
await ctx.reply(embed=get_domain_embed(domain, ctx))
|
veni-vidi-code/VirusTotalDiscordBot
|
Cogs/DmCommands.py
|
DmCommands.py
|
py
| 1,634 |
python
|
en
|
code
| 3 |
github-code
|
6
|
5285437188
|
from ...robot import Robot
from stt_watson.SttWatsonLogListener import SttWatsonLogListener
from recording.Record import Record
from watson_client.Client import Client
from utils.SignalHandler import SignalHandler
import threading
import signal
import os
class WatsonRobot(Robot):
def __init__(self, config, speaker, actions):
super(WatsonRobot, self).__init__(config, speaker, actions)
config['audio-chunk'] = 8000
config['audio-rate'] = 44100
config['channels'] = 1
self.listeners = []
sttWatsonLogListener = SttWatsonLogListener()
self.listeners.append(sttWatsonLogListener)
self.stopper = threading.Event()
self.record = Record(config, self.stopper)
self.workers = [self.record]
self.watsonClient = Client(config)
self.handler = SignalHandler(self.stopper, self.workers)
signal.signal(signal.SIGINT, self.handler)
def name(self):
return 'Watson'
def listen(self):
audioFd, writer = os.pipe()
self.record.setWriter(writer)
self.record.start()
self.watsonClient.setListeners(self.listeners)
self.watsonClient.startStt(audioFd)
|
lowdev/alfred
|
robot/stt/watson/watson.py
|
watson.py
|
py
| 1,199 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23932735079
|
import torch
from torch import nn
from torch.autograd import Variable
import numpy as np
from util import get_data
from torch.utils.data import DataLoader
from torch.nn import functional as F
from torch.optim import Adam
from variables import*
from matplotlib import pyplot as plt
class MnistRegression(object):
def __init__(self, train_data, test_data):
self.train_data = train_data
self.test_data = test_data
self.model = self.MnistModel()
class MnistModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(
in_features=input_shape,
out_features=output_shape
)
def forward(self, x):
x = x.reshape(-1, input_shape)
x = self.linear(x)
x = F.log_softmax(x, dim=1)
return x
def loss_fnc(self, Ypred, Y):
return F.cross_entropy(Ypred, Y)
def optimizer(self, learning_rate=0.1):
return Adam(self.model.parameters(), lr=learning_rate)
def evaluate(self, Y, Ypred):
P = torch.argmax(Ypred, dim=1).numpy()
Y = Y.numpy()
return np.sum(Y == P)
def train(self, num_epochs=100):
opt = self.optimizer()
total_train_loss = []
total_test_loss = []
for i in range(1,num_epochs+1):
n_correct = 0
n_total = 0
for X, Y in self.train_data:
Y = Y.to(dtype=torch.int64)
Ypred = self.model(X)
loss = self.loss_fnc(Ypred, Y)
loss.backward() # calculate gradients
total_train_loss.append(loss.item())
n_correct += self.evaluate(Y, Ypred)
n_total += batch_size
opt.step() # update parameters using claculated gradients
opt.zero_grad() # use to avoid accumilating the gradients
train_acc = round(n_correct/n_total, 3)
with torch.no_grad():
n_correct = 0
n_total = 0
for X, Y in self.test_data:
Y = Y.to(dtype=torch.int64)
Ypred = self.model(X)
loss = self.loss_fnc(Ypred, Y)
total_test_loss.append(loss.item())
n_correct += self.evaluate(Y, Ypred)
n_total += batch_size
test_acc = round(n_correct/n_total, 3)
print("Train Acc : {} Test Acc : {}".format(train_acc, test_acc))
plt.plot(total_train_loss, label='Train loss')
plt.plot(total_test_loss , label='Test loss')
plt.legend()
plt.show()
if __name__ == "__main__":
train_data, test_data = get_data()
model = MnistRegression(train_data, test_data)
model.train()
|
1zuu/Pytroch-Examples
|
Mnist/mnist_regression.py
|
mnist_regression.py
|
py
| 2,842 |
python
|
en
|
code
| 2 |
github-code
|
6
|
72151412028
|
from python_celery_worker.services.db import db_engine
def update_task(id):
"""
Update the task status in the database
:param id:
:return:
"""
conn = db_engine.connect()
conn.execute(
'update tasks set status = %s, message = %s, updated_at = NOW() where id = %s',
'completed',
'Updated by celery worker!',
id
)
|
fraserreed/blog-samples
|
laravel-tasks-celery-worker/python_celery_worker/python_celery_worker/services/db_tasks.py
|
db_tasks.py
|
py
| 378 |
python
|
en
|
code
| 9 |
github-code
|
6
|
40071040492
|
from collections import Counter
class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
# anagram: str with same histgram
res = []
lp = len(p) -1
ls = len(s)
pCount = Counter(p)
mCount = Counter(s[:lp]) # from 0 to lp - 2
for i in range(lp, ls):
mCount[s[i]]+=1
if mCount == pCount:
res.append(i-lp)
mCount[s[i-lp]]-=1
if mCount[s[i-lp]] == 0:
del mCount[s[i-lp]]
return res
|
lucy9215/leetcode-python
|
438_findAllAnagramsInAString.py
|
438_findAllAnagramsInAString.py
|
py
| 619 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31653842077
|
#!/usr/bin/env python3
""" Problem 8.7 in CtCI book
"""
def permute(my_str):
_permute("", my_str)
def _permute(so_far, remaining):
if len(remaining) == 0:
print(so_far)
else:
for i in range(len(remaining)):
following = so_far + remaining[i]
rest = remaining[:i] + remaining[i+1:]
_permute(following, rest)
|
ilee38/practice-python
|
coding_problems/CTCI_recursion_dp/perms_no_dups.py
|
perms_no_dups.py
|
py
| 338 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1112499487
|
""" VirtualMachineHandler provides remote access to VirtualMachineDB
The following methods are available in the Service interface:
- insertInstance
- declareInstanceSubmitted
- declareInstanceRunning
- instanceIDHeartBeat
- declareInstanceHalting
- getInstancesByStatus
- declareInstancesStopping
- getUniqueID( instanceID ) return cloud manager uniqueID form VMDIRAC instanceID
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
from subprocess import Popen, PIPE
import six
# DIRAC
from DIRAC import gLogger, S_ERROR, S_OK
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
# VMDIRAC
from VMDIRAC.WorkloadManagementSystem.DB.VirtualMachineDB import VirtualMachineDB
from VMDIRAC.Security import VmProperties
from VMDIRAC.Resources.Cloud.Utilities import STATE_MAP
from VMDIRAC.Resources.Cloud.ConfigHelper import getVMTypeConfig, getVMTypes
from VMDIRAC.Resources.Cloud.EndpointFactory import EndpointFactory
from VMDIRAC.WorkloadManagementSystem.Utilities.Utils import getProxyFileForCE
__RCSID__ = '$Id$'
# This is a global instance of the VirtualMachineDB class
gVirtualMachineDB = False
def initializeVirtualMachineManagerHandler(_serviceInfo):
global gVirtualMachineDB
gVirtualMachineDB = VirtualMachineDB()
haltStalledInstances()
checkStalledInstances()
if gVirtualMachineDB._connected:
gThreadScheduler.addPeriodicTask(60 * 15, checkStalledInstances)
return S_OK()
return S_ERROR()
def haltStalledInstances():
result = gVirtualMachineDB.getInstancesByStatus('Stalled')
if not result['OK']:
return result
uList = []
for image in result['Value']:
uList += result['Value'][image]
stallingList = []
for uID in uList:
result = gVirtualMachineDB.getInstanceID(uID)
if not result['OK']:
continue
stallingList.append(result['Value'])
return haltInstances(stallingList)
def getCEInstances(siteList=None, ceList=None, vo=None):
result = getVMTypes(siteList=siteList, ceList=ceList, vo=vo)
if not result['OK']:
return S_ERROR('Failed to get images from the CS')
imageDict = result['Value']
ceList = []
for site in imageDict:
for ce in imageDict[site]:
result = EndpointFactory().getCE(site, ce)
if not result['OK']:
continue
ceList.append((site, ce, result['Value']))
nodeDict = {}
for site, ceName, ce in ceList:
result = ce.getVMNodes()
if not result['OK']:
continue
for node in result['Value']:
if not node.name.startswith('DIRAC'):
continue
ip = (node.public_ips[0] if node.public_ips else 'None')
nodeState = node.state.upper() if not isinstance(node.state, six.integer_types) else STATE_MAP[node.state]
nodeDict[node.id] = {"Site": site,
"CEName": ceName,
"NodeName": node.name,
"PublicIP": ip,
"State": nodeState}
return S_OK(nodeDict)
def checkStalledInstances():
"""
To avoid stalling instances consuming resources at cloud endpoint,
attempts to halt the stalled list in the cloud endpoint
"""
result = gVirtualMachineDB.declareStalledInstances()
if not result['OK']:
return result
stallingList = result['Value']
return haltInstances(stallingList)
def stopInstance(site, endpoint, nodeID):
result = getVMTypeConfig(site, endpoint)
if not result['OK']:
return result
ceParams = result['Value']
ceFactory = EndpointFactory()
result = ceFactory.getCEObject(parameters=ceParams)
if not result['OK']:
return result
ce = result['Value']
result = ce.stopVM(nodeID)
return result
def createEndpoint(uniqueID):
result = gVirtualMachineDB.getEndpointFromInstance(uniqueID)
if not result['OK']:
return result
site, endpoint = result['Value'].split('::')
result = getVMTypeConfig(site, endpoint)
if not result['OK']:
return result
ceParams = result['Value']
ceFactory = EndpointFactory()
result = ceFactory.getCEObject(parameters=ceParams)
return result
def haltInstances(vmList):
"""
Common haltInstances for Running(from class VirtualMachineManagerHandler) and
Stalled(from checkStalledInstances periodic task) to Halt
"""
failed = {}
successful = {}
for instanceID in vmList:
instanceID = int(instanceID)
result = gVirtualMachineDB.getUniqueID(instanceID)
if not result['OK']:
gLogger.error('haltInstances: on getUniqueID call: %s' % result['Message'])
continue
uniqueID = result['Value']
result = createEndpoint(uniqueID)
if not result['OK']:
gLogger.error('haltInstances: on createEndpoint call: %s' % result['Message'])
continue
endpoint = result['Value']
# Get proxy to be used to connect to the cloud endpoint
authType = endpoint.parameters.get('Auth')
if authType and authType.lower() in ['x509', 'voms']:
siteName = endpoint.parameters['Site']
ceName = endpoint.parameters['CEName']
gLogger.verbose("Getting cloud proxy for %s/%s" % (siteName, ceName))
result = getProxyFileForCE(endpoint)
if not result['OK']:
continue
endpoint.setProxy(result['Value'])
result = endpoint.stopVM(uniqueID)
if result['OK']:
gVirtualMachineDB.recordDBHalt(instanceID, 0)
successful[instanceID] = True
else:
failed[instanceID] = result['Message']
return S_OK({"Successful": successful, "Failed": failed})
def getPilotOutput(pilotRef):
if not pilotRef.startswith('vm://'):
return S_ERROR('Invalid pilot reference %s' % pilotRef)
# Get the VM public IP
diracID, nPilot = os.path.basename(pilotRef).split(':')
result = gVirtualMachineDB.getUniqueIDByName(diracID)
if not result['OK']:
return result
uniqueID = result['Value']
result = gVirtualMachineDB.getInstanceID(uniqueID)
if not result['OK']:
return result
instanceID = result['Value']
result = gVirtualMachineDB.getInstanceParameter("PublicIP", instanceID)
if not result['OK']:
return result
publicIP = result['Value']
op = Operations()
privateKeyFile = op.getValue('/Cloud/PrivateKey', '')
diracUser = op.getValue('/Cloud/VMUser', '')
ssh_str = '%s@%s' % (diracUser, publicIP)
cmd = ['ssh', '-i', privateKeyFile, ssh_str,
"cat /etc/joboutputs/vm-pilot.%s.log" % nPilot]
inst = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output, stderr = inst.communicate()
if inst.returncode:
return S_ERROR('Failed to get pilot output: %s' % stderr)
else:
return S_OK(output)
class VirtualMachineManagerHandler(RequestHandler):
def initialize(self):
credDict = self.getRemoteCredentials()
self.rpcProperties = credDict['properties']
@staticmethod
def __logResult(methodName, result):
'''
Method that writes to log error messages
'''
if not result['OK']:
gLogger.error('%s: %s' % (methodName, result['Message']))
types_getCEInstances = [(list, type(None)), (list, type(None)), six.string_types]
def export_getCEInstances(self, siteList, ceList, vo):
if not siteList:
siteList = None
return getCEInstances(siteList=siteList, ceList=ceList, vo=vo)
types_stopInstance = [six.string_types, six.string_types, six.string_types]
def export_stopInstance(self, site, endpoint, nodeID):
return stopInstance(site, endpoint, nodeID)
types_getPilotOutput = [six.string_types]
def export_getPilotOutput(self, pilotReference):
return getPilotOutput(pilotReference)
types_checkVmWebOperation = [six.string_types]
def export_checkVmWebOperation(self, operation):
"""
return true if rpc has VM_WEB_OPERATION
"""
if VmProperties.VM_WEB_OPERATION in self.rpcProperties:
return S_OK('Auth')
return S_OK('Unauth')
types_insertInstance = [six.string_types, six.string_types, six.string_types, six.string_types, six.string_types]
def export_insertInstance(self, uniqueID, imageName, instanceName, endpoint, runningPodName):
"""
Check Status of a given image
Will insert a new Instance in the DB
"""
res = gVirtualMachineDB.insertInstance(uniqueID, imageName, instanceName, endpoint, runningPodName)
self.__logResult('insertInstance', res)
return res
types_getUniqueID = [six.string_types]
def export_getUniqueID(self, instanceID):
"""
return cloud manager uniqueID from VMDIRAC instanceID
"""
res = gVirtualMachineDB.getUniqueID(instanceID)
self.__logResult('getUniqueID', res)
return res
types_getUniqueIDByName = [six.string_types]
def export_getUniqueIDByName(self, instanceName):
"""
return cloud manager uniqueID from VMDIRAC name
"""
result = gVirtualMachineDB.getUniqueIDByName(instanceName)
self.__logResult('getUniqueIDByName', result)
return result
types_setInstanceUniqueID = [six.integer_types, six.string_types]
def export_setInstanceUniqueID(self, instanceID, uniqueID):
"""
Check Status of a given image
Will insert a new Instance in the DB
"""
res = gVirtualMachineDB.setInstanceUniqueID(instanceID, uniqueID)
self.__logResult('setInstanceUniqueID', res)
return res
types_declareInstanceSubmitted = [six.string_types]
def export_declareInstanceSubmitted(self, uniqueID):
"""
After submission of the instance the Director should declare the new Status
"""
res = gVirtualMachineDB.declareInstanceSubmitted(uniqueID)
self.__logResult('declareInstanceSubmitted', res)
return res
types_declareInstanceRunning = [six.string_types, six.string_types]
def export_declareInstanceRunning(self, uniqueID, privateIP):
"""
Declares an instance Running and sets its associated info (uniqueID, publicIP, privateIP)
Returns S_ERROR if:
- instanceName does not have a "Submitted" entry
- uniqueID is not unique
"""
gLogger.info('Declare instance Running uniqueID: %s' % (uniqueID))
if VmProperties.VM_RPC_OPERATION not in self.rpcProperties:
return S_ERROR("Unauthorized declareInstanceRunning RPC")
publicIP = self.getRemoteAddress()[0]
gLogger.info('Declare instance Running publicIP: %s' % (publicIP))
res = gVirtualMachineDB.declareInstanceRunning(uniqueID, publicIP, privateIP)
self.__logResult('declareInstanceRunning', res)
return res
types_instanceIDHeartBeat = [six.string_types, float, six.integer_types,
six.integer_types, six.integer_types]
def export_instanceIDHeartBeat(self, uniqueID, load, jobs,
transferredFiles, transferredBytes, uptime=0):
"""
Insert the heart beat info from a running instance
It checks the status of the instance and the corresponding image
Declares "Running" the instance and the image
It returns S_ERROR if the status is not OK
"""
if VmProperties.VM_RPC_OPERATION not in self.rpcProperties:
return S_ERROR("Unauthorized declareInstanceIDHeartBeat RPC")
try:
uptime = int(uptime)
except ValueError:
uptime = 0
res = gVirtualMachineDB.instanceIDHeartBeat(uniqueID, load, jobs,
transferredFiles, transferredBytes, uptime)
self.__logResult('instanceIDHeartBeat', res)
return res
types_declareInstancesStopping = [list]
def export_declareInstancesStopping(self, instanceIdList):
"""
Declares "Stopping" the instance because the Delete button of Browse Instances
The instanceID is the VMDIRAC VM id
When next instanceID heat beat with stopping status on the DB the VM will stop the job agent and terminates properly
It returns S_ERROR if the status is not OK
"""
if VmProperties.VM_WEB_OPERATION not in self.rpcProperties:
return S_ERROR("Unauthorized VM Stopping")
for instanceID in instanceIdList:
gLogger.info('Stopping DIRAC instanceID: %s' % (instanceID))
result = gVirtualMachineDB.getInstanceStatus(instanceID)
if not result['OK']:
self.__logResult('declareInstancesStopping on getInstanceStatus call: ', result)
return result
state = result['Value']
gLogger.info('Stopping DIRAC instanceID: %s, current state %s' % (instanceID, state))
if state == 'Stalled':
result = gVirtualMachineDB.getUniqueID(instanceID)
if not result['OK']:
self.__logResult('declareInstancesStopping on getUniqueID call: ', result)
return result
uniqueID = result['Value']
result = gVirtualMachineDB.getEndpointFromInstance(uniqueID)
if not result['OK']:
self.__logResult('declareInstancesStopping on getEndpointFromInstance call: ', result)
return result
endpoint = result['Value']
result = self.export_declareInstanceHalting(uniqueID, 0)
elif state == 'New':
result = gVirtualMachineDB.recordDBHalt(instanceID, 0)
self.__logResult('declareInstanceHalted', result)
else:
# this is only aplied to allowed trasitions
result = gVirtualMachineDB.declareInstanceStopping(instanceID)
self.__logResult('declareInstancesStopping: on declareInstanceStopping call: ', result)
return result
types_declareInstanceHalting = [six.string_types, float]
def export_declareInstanceHalting(self, uniqueID, load):
"""
Insert the heart beat info from a halting instance
The VM has the uniqueID, which is the Cloud manager VM id
Declares "Halted" the instance and the image
It returns S_ERROR if the status is not OK
"""
if VmProperties.VM_RPC_OPERATION not in self.rpcProperties:
return S_ERROR("Unauthorized declareInstanceHalting RPC")
endpoint = gVirtualMachineDB.getEndpointFromInstance(uniqueID)
if not endpoint['OK']:
self.__logResult('declareInstanceHalting', endpoint)
return endpoint
endpoint = endpoint['Value']
result = gVirtualMachineDB.declareInstanceHalting(uniqueID, load)
if not result['OK']:
if "Halted ->" not in result["Message"]:
self.__logResult('declareInstanceHalting on change status: ', result)
return result
else:
gLogger.info("Bad transition from Halted to something, will assume Halted")
haltingList = []
instanceID = gVirtualMachineDB.getInstanceID(uniqueID)
if not instanceID['OK']:
self.__logResult('declareInstanceHalting', instanceID)
return instanceID
instanceID = instanceID['Value']
haltingList.append(instanceID)
return haltInstances(haltingList)
types_getInstancesByStatus = [six.string_types]
def export_getInstancesByStatus(self, status):
"""
Get dictionary of Image Names with InstanceIDs in given status
"""
res = gVirtualMachineDB.getInstancesByStatus(status)
self.__logResult('getInstancesByStatus', res)
return res
types_getAllInfoForUniqueID = [six.string_types]
def export_getAllInfoForUniqueID(self, uniqueID):
"""
Get all the info for a UniqueID
"""
res = gVirtualMachineDB.getAllInfoForUniqueID(uniqueID)
self.__logResult('getAllInfoForUniqueID', res)
return res
types_getInstancesContent = [dict, (list, tuple),
six.integer_types, six.integer_types]
def export_getInstancesContent(self, selDict, sortDict, start, limit):
"""
Retrieve the contents of the DB
"""
res = gVirtualMachineDB.getInstancesContent(selDict, sortDict, start, limit)
self.__logResult('getInstancesContent', res)
return res
types_getHistoryForInstanceID = [six.integer_types]
def export_getHistoryForInstanceID(self, instanceId):
"""
Retrieve the contents of the DB
"""
res = gVirtualMachineDB.getHistoryForInstanceID(instanceId)
self.__logResult('getHistoryForInstanceID', res)
return res
types_getInstanceCounters = [six.string_types, dict]
def export_getInstanceCounters(self, groupField, selDict):
"""
Retrieve the contents of the DB
"""
res = gVirtualMachineDB.getInstanceCounters(groupField, selDict)
self.__logResult('getInstanceCounters', res)
return res
types_getHistoryValues = [int, dict]
def export_getHistoryValues(self, averageBucket, selDict, fields2Get=None, timespan=0):
"""
Retrieve the contents of the DB
"""
if not fields2Get:
fields2Get = []
res = gVirtualMachineDB.getHistoryValues(averageBucket, selDict, fields2Get, timespan)
self.__logResult('getHistoryValues', res)
return res
types_getRunningInstancesHistory = [int, int]
def export_getRunningInstancesHistory(self, timespan, bucketSize):
"""
Retrieve number of running instances in each bucket
"""
res = gVirtualMachineDB.getRunningInstancesHistory(timespan, bucketSize)
self.__logResult('getRunningInstancesHistory', res)
return res
types_getRunningInstancesBEPHistory = [int, int]
def export_getRunningInstancesBEPHistory(self, timespan, bucketSize):
"""
Retrieve number of running instances in each bucket by End-Point History
"""
res = gVirtualMachineDB.getRunningInstancesBEPHistory(timespan, bucketSize)
self.__logResult('getRunningInstancesBEPHistory', res)
return res
types_getRunningInstancesByRunningPodHistory = [int, int]
def export_getRunningInstancesByRunningPodHistory(self, timespan, bucketSize):
"""
Retrieve number of running instances in each bucket by Running Pod History
"""
res = gVirtualMachineDB.getRunningInstancesByRunningPodHistory(timespan, bucketSize)
self.__logResult('getRunningInstancesByRunningPodHistory', res)
return res
types_getRunningInstancesByImageHistory = [int, int]
def export_getRunningInstancesByImageHistory(self, timespan, bucketSize):
"""
Retrieve number of running instances in each bucket by Running Pod History
"""
res = gVirtualMachineDB.getRunningInstancesByImageHistory(timespan, bucketSize)
self.__logResult('getRunningInstancesByImageHistory', res)
return res
|
DIRACGrid/VMDIRAC
|
VMDIRAC/WorkloadManagementSystem/Service/VirtualMachineManagerHandler.py
|
VirtualMachineManagerHandler.py
|
py
| 18,285 |
python
|
en
|
code
| 6 |
github-code
|
6
|
19019856386
|
def nextInt(): return int(input())
def nextInts(): return map(int, input().split())
def nextIntList(): return list(nextInts())
MOD = 10**5
def calc(x):
y = 0
x_c = x
while x_c > 0:
x_c, y_c = divmod(x_c, 10)
y += y_c
return (x + y) % MOD
def solve():
N, K = nextInts()
start_list = [N]
routine_list = list()
count = 0
routine_flag = False
# find until routine
while True:
N = calc(N)
count += 1
if count == K:
break
if routine_flag:
if N == routine_list[0]:
break
routine_list.append(N)
else:
if N in start_list:
routine_flag = True
routine_list.append(N)
start_list.append(N)
if count == K:
print(N)
return
# calc routine
rem = K - count
pos = rem % len(routine_list)
print(routine_list[pos])
solve()
|
minheibis/atcoder
|
questions/typical90/058/myans_00.py
|
myans_00.py
|
py
| 951 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17424247870
|
from setuptools import setup
import dorm
with open("README.md", "r") as readme:
long_description = readme.read()
setup(
name="dorm",
version=dorm.version,
description="A tiny SQLite ORM for Python.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Dan Watson",
author_email="[email protected]",
url="https://github.com/dcwatson/dorm",
license="MIT",
py_modules=["dorm"],
entry_points={"console_scripts": ["dorm=dorm:main"]},
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Database",
],
)
|
dcwatson/dorm
|
setup.py
|
setup.py
|
py
| 804 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40695061264
|
"""empty message
Revision ID: 41124ac6e47e
Revises: 57296b50c499
Create Date: 2014-11-30 17:08:44.396000
"""
# revision identifiers, used by Alembic.
revision = '41124ac6e47e'
down_revision = '57296b50c499'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('provider', sa.Column('address', sa.String(length=250), nullable=True))
op.add_column('provider', sa.Column('emails', sa.String(length=250), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('provider', 'emails')
op.drop_column('provider', 'address')
### end Alembic commands ###
|
StasEvseev/adminbuy
|
migrations/versions/41124ac6e47e_.py
|
41124ac6e47e_.py
|
py
| 800 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12814211947
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0002_userinfo_grade'),
]
operations = [
migrations.CreateModel(
name='Events',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=80)),
('date', models.DateTimeField()),
('cost', models.DecimalField(max_digits=6, decimal_places=2)),
],
),
migrations.AddField(
model_name='userinfo',
name='balance',
field=models.DecimalField(default=0.0, max_digits=6, decimal_places=2),
preserve_default=False,
),
]
|
asp3/StudentAccounts
|
student/migrations/0003_auto_20151025_1630.py
|
0003_auto_20151025_1630.py
|
py
| 906 |
python
|
en
|
code
| 3 |
github-code
|
6
|
34862433797
|
from django import template
from django.urls import NoReverseMatch, reverse
from utilities.utils import get_viewname, prepare_cloned_fields
register = template.Library()
#
# Instance buttons
#
@register.inclusion_tag('buttons/clone.html')
def clone_button(instance):
url = reverse(get_viewname(instance, 'add'))
# Populate cloned field values
param_string = prepare_cloned_fields(instance).urlencode()
if param_string:
url = f'{url}?{param_string}'
return {
'url': url,
}
@register.inclusion_tag('buttons/edit.html')
def edit_button(instance):
viewname = get_viewname(instance, 'edit')
url = reverse(viewname, kwargs={'pk': instance.pk})
return {
'url': url,
}
@register.inclusion_tag('buttons/delete.html')
def delete_button(instance):
viewname = get_viewname(instance, 'delete')
url = reverse(viewname, kwargs={'pk': instance.pk})
return {
'url': url,
}
#
# List buttons
#
@register.inclusion_tag('buttons/add.html')
def add_button(model, action='add'):
try:
url = reverse(get_viewname(model, action))
except NoReverseMatch:
url = None
return {
'url': url,
}
@register.inclusion_tag('buttons/import.html')
def import_button(model, action='import'):
try:
url = reverse(get_viewname(model, action))
except NoReverseMatch:
url = None
return {
'url': url,
}
@register.inclusion_tag('buttons/bulk_edit.html')
def bulk_edit_button(model, action='bulk_edit', query_params=None):
try:
url = reverse(get_viewname(model, action))
if query_params:
url = f'{url}?{query_params.urlencode()}'
except NoReverseMatch:
url = None
return {
'url': url,
}
@register.inclusion_tag('buttons/bulk_delete.html')
def bulk_delete_button(model, action='bulk_delete', query_params=None):
try:
url = reverse(get_viewname(model, action))
if query_params:
url = f'{url}?{query_params.urlencode()}'
except NoReverseMatch:
url = None
return {
'url': url,
}
|
Status-Page/Status-Page
|
statuspage/utilities/templatetags/buttons.py
|
buttons.py
|
py
| 2,140 |
python
|
en
|
code
| 45 |
github-code
|
6
|
171117843
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from rp_ui_harness import RequestPolicyTestCase
TEST_URL = "http://www.maindomain.test/link_1.html"
PREF_DEFAULT_ALLOW = "extensions.requestpolicy.defaultPolicy.allow"
class TestOpenInCurrentTab(RequestPolicyTestCase):
def setUp(self):
RequestPolicyTestCase.setUp(self)
self.prefs.set_pref(PREF_DEFAULT_ALLOW, False)
def test_open_in_current_tab(self):
with self.marionette.using_context("content"):
# load the test url
self.marionette.navigate(TEST_URL)
# find the URL string and its wrapping element
url_wrapper = self.marionette.find_element("id", "text_url_1")
url = url_wrapper.text
# select the URL
self.web_utils.select_element_text(url_wrapper)
with self.marionette.using_context("chrome"):
# perform right-click and entry selection
self.ctx_menu.select_entry(
"context-openlinkincurrent", url_wrapper)
tab = self.browser.tabbar.selected_tab
self.tabs.wait_until_loaded(tab)
self.assertFalse(self.redir.is_shown(),
"Following the URL didn't cause a redirect.")
self.assertEqual(tab.location, url,
"The location is correct.")
|
RequestPolicyContinued/requestpolicy
|
tests/marionette/tests/links/text_selection/test_open_in_current_tab.py
|
test_open_in_current_tab.py
|
py
| 1,514 |
python
|
en
|
code
| 253 |
github-code
|
6
|
35426908825
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
a=0.7
b=0.6
X = np.arange(0,2.4,0.2)
Y = np.arange(0,2.4,0.2)
m,p = np.meshgrid(X,Y)
mdot = np.divide(1,1+np.square(p))- np.multiply(b,m)
pdot = np.subtract(m,np.multiply(a,p))
fig, ax = plt.subplots()
q=ax.quiver(p,m,pdot,mdot)
ax.quiverkey(q,X=0.3,Y=2.4, U=5,
label='Quiver key, length = 5', labelpos='E')
ax.plot(p,np.multiply(a,p))
ax.plot(p, np.divide( 1, np.multiply(b,(1+np.square(p)))))
ax.set_xlabel('p')
ax.set_ylabel('m')
def dydt_autoinhib(t,y,a,b):
y1,y2=y
dy1 = 1/(1+y2**2)-b*y1
dy2 = y1-a*y2
return (dy1,dy2)
# lambda trick so we can pass the right function into the solver
dydt_params = lambda t,y: dydt_autoinhib(t,y,a,b)
solution1 = scipy.integrate.solve_ivp(dydt_params, t_span=(0,20),y0=(2,2), method='RK45')
t1_ode45 = solution1.t
m1_ode45 = solution1.y[0]
p1_ode45 = solution1.y[1]
ax.plot(p1_ode45,m1_ode45)
plt.show()
|
martinaoliver/GTA
|
ssb/m1a/numeric/Practical_full_solutions_jupyter/python_script_solutions/phase_portrait_autorinhib_20190926.py
|
phase_portrait_autorinhib_20190926.py
|
py
| 991 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32111228276
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import fftpack, signal
# 고주파 성분만 날리는 fft
# def get_filtered_data(in_data, filter_value=0.004):
def del_high_freq(in_data, filter_value=0.004):
"""
:param in_data: 대상 시계열 신호
:param filter_value: filter_value이상의 주파수를 가지는 신호를 날림
:return: fft 결과
"""
sig_fft = fftpack.fft(in_data)
sample_freq = fftpack.fftfreq(in_data.size)
high_freq_fft = sig_fft.copy()
high_freq_fft[np.abs(sample_freq) > filter_value] = 0
filtered_data = fftpack.ifft(high_freq_fft)
return filtered_data
# 고주파, 저주파 성분을 날리는 fft
def del_high_and_low_freq(in_data, high_filter_value, low_filter_value):
"""
:param in_data: 대상 시계열 신호
:param high_filter_value: fft를 수행할 최대값, low_filter_value ~ high_filter_value값 사이의 신호를 fft
:param low_filter_value: fft를 수행할 최소값
:return: fft 결과
"""
sig_fft = fftpack.fft(in_data)
sample_freq = fftpack.fftfreq(in_data.size)
high_freq_fft = sig_fft.copy()
low_value1 = np.max(high_freq_fft)
high_freq_fft[np.abs(sample_freq) > high_filter_value] = 0
high_freq_fft[np.abs(sample_freq) < low_filter_value] = 0
low_value2 = np.max(high_freq_fft)
filtered_data = fftpack.ifft(high_freq_fft)
return filtered_data, low_value1, low_value2
def fft(pupil_list, minu=None, quar=None):
global section_frames, time
# 데이터에서 0, -1인 부분 제거
while 0 in pupil_list:
pupil_list.remove(0)
while -1 in pupil_list:
pupil_list.remove(-1)
if minu is not None:
time = minu * 1800
section_frames = len(pupil_list) // time
if quar is not None:
time = len(pupil_list) // quar
section_frames = quar
y = np.array(pupil_list)
# fft
# filtered_sig = del_high_freq(y, filter_value=0.005) # 고주파 필터링
filtered_sig, _, _ = del_high_and_low_freq(y, 0.0048, 0.0035) # 저주파, 고주파 필터링
filtered_sig = filtered_sig.astype(np.float)
# zero-crossing point
zero_crossings = np.where(np.diff(np.sign(np.diff(filtered_sig))))[0]
zero_crossings = np.insert(zero_crossings, 0, 0)
zero_crossings = np.append(zero_crossings, len(filtered_sig) - 1)
# 변화 속도 계산
change_rates_list = [[] for _ in range(section_frames)]
for section in range(section_frames):
# zero-crossing points 기준으로 원하는 위치(섹션) 가져오기
section_zero_crossing = zero_crossings[np.where(zero_crossings <= (section + 1) * time)]
section_zero_crossing = section_zero_crossing[np.where(section * time < section_zero_crossing)]
# 변화 속도 계산
for j in range(len(section_zero_crossing) - 1):
change_rate = abs((filtered_sig[section_zero_crossing[j + 1]] - filtered_sig[section_zero_crossing[j]]) / (
section_zero_crossing[j + 1] - section_zero_crossing[j]))
change_rates_list[section].append(change_rate)
return filtered_sig, zero_crossings, section_frames, change_rates_list
# fft를 수행한 결과 그래프 그리기
def draw_fft_graph(y, filtered_sig, zero_crossings, section_frames, savepath, minu=None, quar=None):
global time
x = np.arange(0, len(y))
if minu is not None:
time = minu * 1800
section_frames = len(y) // time
if quar is not None:
time = len(y) // quar
section_frames = quar
fig = plt.figure(dpi=150)
# plt.figure(figsize=(6, 5))
plt.rcParams["font.family"] = 'Malgun Gothic'
plt.figure(figsize=(14, 6))
plt.plot(x, y, label='Original signal')
plt.plot(x, filtered_sig, linewidth=2, label='Filtered signal')
# plt.plot(zero_crossings, filtered_sig[zero_crossings], marker='o', color='red', linestyle='--')
plt.legend(loc='upper right')
# 섹션 나눠진거 표시
for section in range(section_frames):
plt.axvline(x=section * time, ymin=0, ymax=1.0, color='r')
plt.axvline(x=(section + 1) * time, ymin=0, ymax=1.0, color='r')
# plt.xlim(0, 1800)
plt.title('동공크기 변화율')
plt.xlabel('Frame')
plt.ylabel('Pupil size')
plt.savefig(f'{savepath}')
plt.show()
# 2차식 추세선 그리기, 히스토그램 그래프 저장
def draw_trendline_fft(data, title, y_lim, y_label, savepath, quar = None, avg = False):
results = {}
# 추세선
x = np.arange(0, len(data))
y = []
for idx, value in enumerate(data):
y.append(value)
y = np.array(y) # 10개 구간에 해당하는 특징(깜빡임 횟수)
fit = np.polyfit(x, y, 2)
a = fit[0]
b = fit[1]
c = fit[2]
fit_equation = a * np.square(x) + b * x + c
results['coeffs'] = fit.tolist()
# r-squared
p = np.poly1d(fit)
# fit values, and mean
yhat = p(x)
ybar = np.sum(y) / len(y)
ssreg = np.sum((yhat - ybar) ** 2)
sstot = np.sum((y - ybar) ** 2)
results['r-squared'] = ssreg / sstot
r_squared = str(round(results['r-squared'], 3)) # 출력하기 위해 문자열로 변환
a = str(round(results['coeffs'][0], 3))
b = str(round(results['coeffs'][1], 3))
c = str(round(results['coeffs'][2], 3))
# print("R 제곱값: ", round(results['r-squared'], 3))
# print("추세선: "+"Y="+a+"xX^2 + "+b+"xX + "+c)
period = ['0~3분', '3~6분', '6~9분', '9~12분', '12~15분', '15~18분', '18~21분', '21~24분', '24~27분', '27~30분', '30~33분']
plt.rcParams["font.family"] = 'Malgun Gothic'
fig = plt.figure(dpi=150)
ax = fig.add_subplot(1, 1, 1)
for idx2, value2 in enumerate(data):
ax.bar(period[idx2], value2, color='b', alpha=0.5)
ax.plot(x, fit_equation, color='r', alpha=0.5, label='Polynomial fit', linewidth=3.0)
# ax.scatter(x, y, s = 5, color = 'b', label = 'Data points') # 추세선 예측에 사용한 좌표 그리기
# Plotting
plt.xticks(rotation=20)
plt.title(f'{title}')
plt.ylim(0, y_lim)
plt.xlabel('구간')
plt.ylabel(f'{y_label}')
# 동공 크기 변화율 출력할 때 위치 조정
if not avg:
plt.text(3.2, 0.055, "추세선: " + r'$y = $' + a + r'$x^2 + ($' + b + r'$)x + $' + c, fontdict={'size': 12})
plt.text(7.5, 0.05, r'$R^2 =$' + r_squared, fontdict={'size': 12})
# 평균 동공크기 변화율 출력할 때 위치 조정
else:
plt.text(3.2, 0.027, "추세선: " + r'$y = $' + a + r'$x^2 + ($' + b + r'$)x + $' + c, fontdict={'size': 12})
plt.text(7.5, 0.025, r'$R^2 =$' + r_squared, fontdict={'size': 12})
plt.tight_layout()
fig.canvas.draw()
img = np.array(fig.canvas.renderer._renderer)
spl = title.split('.')[0]
plt.savefig(f'{savepath}')
plt.imshow(img)
plt.show() # 그래프 잘 나오는지 띄우기
|
HanNayeoniee/visual-fatigue-analysis
|
analysis/fft.py
|
fft.py
|
py
| 6,952 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34958652342
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def normalize_l2(x):
"""
Expects x.shape == [N, C, H, W]
"""
norm = torch.norm(x.view(x.size(0), -1), p=2, dim=1)
norm = norm.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
return x / norm
def pair_cos_dist(x, y):
cos = nn.CosineSimilarity(dim=-1, eps=1e-6)
c = torch.clamp(1 - cos(x, y), min=0)
return c
class Feature_Targets(nn.Module):
def __init__(self, epsilon, num_steps, step_size, data_min = -1.0, data_max = 1.0, grad_sign=True, random_start = True):
super().__init__()
self.epsilon = epsilon
self.num_steps = num_steps
self.step_size = step_size
self.grad_sign = grad_sign
self.data_min = data_min
self.data_max = data_max
self.random_start = random_start
def forward(self, model, bx, by, target_bx):
"""
:param model: the classifier's forward method
:param bx: batch of images
:param by: true labels
:return: perturbed batch of images
"""
adv_bx = bx.detach().clone()
target = target_bx.detach().clone()
if self.random_start:
adv_bx += torch.zeros_like(adv_bx).uniform_(-self.epsilon, self.epsilon)
adv_bx = adv_bx.clamp(self.data_min, self.data_max)
target_feature, target_logits = model(target)
for i in range(self.num_steps):
adv_bx.requires_grad_()
with torch.enable_grad():
feature, logits = model(adv_bx)
loss = pair_cos_dist(feature, target_feature).mean()
grad = torch.autograd.grad(loss, adv_bx, only_inputs=True)[0]
if self.grad_sign:
adv_bx = adv_bx.detach() + self.step_size * torch.sign(grad.detach())
else:
grad = normalize_l2(grad.detach())
adv_bx = adv_bx.detach() + self.step_size * grad
adv_bx = torch.min(torch.max(adv_bx, bx - self.epsilon), bx + self.epsilon).clamp(self.data_min, self.data_max)
return adv_bx
|
arthur-qiu/adv_vis
|
attack_methods/feature_targets.py
|
feature_targets.py
|
py
| 2,092 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6112251845
|
import sys
#sys.path.append('/usr/local/Cellar/opencv3/3.2.0/lib/python2.7/site-packages')
sys.path.append("/usr/local/Cellar/opencv3/3.2.0/lib/python3.5/site-packages")
import cv2
import numpy as np
import os
import random
def show_image(im):
height, width = im.shape[:2]
res = cv2.resize(im,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
cv2.imshow("Image", res)
def show_imageOrig(im):
height, width = im.shape[:2]
res = cv2.resize(im,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
cv2.imshow("ImageOrig", res)
#kernel = np.ones((3,3),np.uint8)
#cap = cv2.VideoCapture("dogs.mp4")
#ret, frame1 = cap.read()
#prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
#hsv = np.zeros_like(frame1)
#hsv[...,1] = 255
#fgbg = cv2.createBackgroundSubtractorMOG2(50, 16, False)
#fgbg.setBackgroundRatio(0.8) # frames before object becomes foreground
#fgbg.setVarInit(500) # speed of adaption of new components
#i = 0
#while(1):
# ret, frame2 = cap.read()
# ret, frame2 = cap.read()
# frame2 = cv2.GaussianBlur(frame2,(9,9),0)
# fgmask = fgbg.apply(frame2)
# fgmask = fgbg.apply(frame2,fgmask, 0)
#fgmask = cv2.dilate(fgmask,kernel,iterations = 5)
#fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
'''
^^ Line above may or may not be good
'''
#if (i > 10 and i % 2 == 0):
#cv2.imwrite(str(i) + ".png",fgmask)
# show_image(fgmask)
# k = cv2.waitKey(30) & 0xff
# if k == 27:
# break
#i += 1
#cap.release()
#cv2.destroyAllWindows()
#errorCount = 0
np.random.seed(133)
numLabels = 101
image_size_x = 240
image_size_y = 320
dataRoot = "./UCF-101/"
def processFolder(folder):
#tick = 0
#global errorCount
print(dataRoot + folder)
try:
videoFileNames = os.listdir(dataRoot + folder)
except:
print("Not a directory, moving along.")
return None, None
#i = 0
#data = np.zeros(shape=(len(videoFileNames)*1, image_size_x, image_size_y), dtype=np.float32)
#labels = np.zeros(shape=(len(videoFileNames)*1, 101), dtype=np.float32)
for videoName in videoFileNames:
#if tick < 2:
# tick = tick + 1
# continue
#tick = 0
if random.random() < 0.98:
continue
try:
print(videoName)
cap = cv2.VideoCapture(dataRoot + folder + "/" + videoName)
#ret, frame1 = cap.read()
#prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
#hsv = np.zeros_like(frame1)
#hsv[...,1] = 255
fgbg = cv2.createBackgroundSubtractorMOG2(50, 16, False)
fgbg.setBackgroundRatio(0.8) # frames before object becomes foreground
fgbg.setVarInit(500) # speed of adaption of new components
i = 0
frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
while(cap.get(cv2.CAP_PROP_POS_FRAMES) < frames - 3):
#ret, frame2 = cap.read()
ret, frame2 = cap.read()
if ret == False:
continue
show_imageOrig(frame2)
frame2 = cv2.GaussianBlur(frame2,(9,9),0)
fgmask = fgbg.apply(frame2)
fgmask = fgbg.apply(frame2,fgmask, 0)
show_image(fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
#return data, labels
def iterData(folder):
labelNames = os.listdir(folder)
for i in range(len(labelNames)):#len(labelNames)
processFolder(labelNames[i])
iterData(dataRoot)
|
ltecot/humanMotionClassification
|
img_processing.py
|
img_processing.py
|
py
| 3,562 |
python
|
en
|
code
| 4 |
github-code
|
6
|
11552601944
|
import csv
import getopt, sys
from moviepy.editor import VideoFileClip, concatenate_videoclips
folder = '/Videos/'
# file name of the video and config file
event = '20221002 PREECNLBVA'
output_file = None # Create a file for each segment
#output_file = 'check' # Compile the clips with a check flag
output_file = 'highlight' # Compile the clips with a highligh flag
#output_file = '20221002 EYF Segments.mp4' # compile all segments in the config file
# --input
mp4_file = folder + '/' + event + '.mp4'
# --config
config_file = folder + '/' + event + '.csv'
# --output
def return_filename(desc, prefix, suffix):
return str(prefix or '') + str(desc or '') + str(suffix or '') + '.mp4'
def main():
global folder
global event
global output_file
global mp4_file
global config_file
argumentList = sys.argv[1:]
options = "i:c:o:"
long_options = ["input=","config=","output="]
try:
arguments, values = getopt.getopt(argumentList, options, long_options)
for currentArgument, currentValue in arguments:
if currentArgument in ("-i", "--input"):
mp4_file = currentValue
# print ("File: ", currentValue)
if currentArgument in ("-o", "--output"):
output_file = currentValue
if currentArgument in ("-c", "--config"):
config_file = currentValue
# print ("Config: ", currentValue)
except getopt.error as err:
print (str(err))
if mp4_file is None:
# If mp4 file is not provided, use config file name
mp4_file = config_file.replace(".csv", ".mp4")
# Read the config file
rows = csv.DictReader(open(config_file))
first = True
for row in rows:
if row['source'] == 'video':
min = int(row['min'])
sec = int(row['sec'])
if min > 0:
start_seconds = min * 60 + sec
else:
start_seconds = sec
length_in_sec = int(row['length_in_sec'])
end_seconds = start_seconds + length_in_sec
if start_seconds and end_seconds:
if output_file is None:
# MODE = Split the segments into separate files
clip = VideoFileClip(mp4_file).subclip(start_seconds, end_seconds)
file_name = return_filename(row['desc'], row['filename_prefix'], row['filename_suffix'])
clip.write_videofile(file_name)
else:
# MODE = Concatenate the segments into a single file
if (output_file == 'check' and row['filename_suffix'] == 'check') or \
(output_file == 'highlight' and row['filename_suffix'] == 'highlight') or \
(output_file != 'check' and output_file != 'highlight'):
# Save only if check or highlight or if all clips
if first:
final_clip = VideoFileClip(mp4_file).subclip(start_seconds, end_seconds)
first = False
else:
clip = VideoFileClip(mp4_file).subclip(start_seconds, end_seconds)
final_clip = concatenate_videoclips([final_clip,clip])
else:
print(f'Error with config settings for: {row}')
if output_file:
# Save the final clip
if output_file == 'check':
output_file = event + ' check.mp4'
elif output_file == 'highlight':
output_file = event + ' highlight.mp4'
final_clip.write_videofile(output_file)
if __name__ == "__main__":
main()
|
jordiyeh/video-cut
|
create_highlight_videos.py
|
create_highlight_videos.py
|
py
| 3,744 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4234376251
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import minimize
from sklearn.metrics import mean_squared_error as mse
def SIR():
def prediction(beta, gamma, population, i0, r0, d0, time_predict):
def SIR_model(y, t, beta, gamma, population):
s, i, r = y
dSdt = -beta * s * i / population
dIdt = beta * s * i / population - gamma * i
dRdt = gamma * i
return [dSdt, dIdt, dRdt]
s0 = population - i0 - r0 - d0
y_0 = [s0, i0, r0]
sol = odeint(SIR_model, y_0, time_predict, args=(beta, gamma, population))
sol = np.transpose(sol)
return sol
def error_model(point, cases, population, infected_0, recovered_0, dead_0):
beta, gamma = point
def SIR_model(y, t, beta, gamma, population):
s, i, r = y
dSdt = -beta * s * i / population
dIdt = beta * s * i / population - gamma * i
dRdt = gamma * i
return [dSdt, dIdt, dRdt]
suscepted_0 = population - infected_0 - recovered_0 - dead_0
y0 = [suscepted_0, infected_0, recovered_0]
sol = odeint(SIR_model, y0, np.arange(1, len(cases) + 1), args=(beta, gamma, population))
sol = np.transpose(sol)
error = mse(cases, sol[1])
return error
def trainer(cases, population, infected_0, recovered_0, dead_0):
optimal = minimize(error_model, np.array([0.001, 0.001]), args=(cases, population, infected_0, recovered_0, dead_0),
method='L-BFGS-B', bounds=[(0.000001, 1.0), (0.000001, 1.0)])
beta, gamma = optimal.x
return beta, gamma
def plot(s, i, r, initials_state, city_name, period_predict, time_predict, population):
plt.figure()
plt.title('Projeção do total de habitantes sucetíveis, infectados e recuperados em ' + city_name + '/' +
initials_state[0], fontsize=20)
plt.xlabel('Meses', fontsize=15)
plt.xticks(np.linspace(15, period_predict + 15, 7)[:-1], ('Abril', 'Maio', 'Junho', 'Julo', 'Agosto', 'Setembro'))
plt.ylabel('Número de habitantes', fontsize=15)
plt.yticks(np.arange(0, population, step=population * 0.03))
plt.plot(time_predict, s, label='Sucetíveis')
plt.plot(time_predict, i, label='Infectados')
plt.plot(time_predict, r, label='Recuperados')
plt.legend(loc='center left', bbox_to_anchor=(1.002, 0.7), fontsize=14)
plt.rcParams["figure.figsize"] = (20, 10)
plt.show()
|
FBWeimer/Plague-Doctor
|
Plague Doctor/plaguedoctor/__init__.py
|
__init__.py
|
py
| 2,604 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42483900439
|
import pandas as pd
import networkx as nx
import json
hierarchy_df = pd.read_csv('hierarchy_table.csv', index_col=0, dtype=str)
graph_network = nx.from_pandas_edgelist(
hierarchy_df,
source='Parent',
target='Child',
)
json_graph = json.dumps(graph_network, default=nx.node_link_data)
# Using a JSON string
with open('json_graph.json', 'w') as outfile:
outfile.write(json_graph)
|
diegopintossi/graph_network
|
graph_network.py
|
graph_network.py
|
py
| 398 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34607190454
|
#通过node2vec算法获取的节点向量表示计算相似度
import math
import os
import time
import pandas as pd
import numpy as np
#获取所有节点的向量表示形式
def getNodeVector(fileEMB, raw_dataset_path):
nodeVecDict = {}
raw_dataset = np.loadtxt(raw_dataset_path, delimiter=',')
m, n = raw_dataset.shape
pro_file = pd.read_csv(fileEMB)
pro = np.array(pro_file["name"])
vec = np.array(pro_file["node2vec"])
for i in range(len(pro)):
every_nodeVecList = []
node_vector_list = raw_dataset[i, 0:n - 2]
for j in range(len(node_vector_list)):
every_nodeVecList.append(float(node_vector_list[j]))
nodeVecDict[pro[i]] = every_nodeVecList
return nodeVecDict
def caculateNodeVecSim(nodeVecDict,path):
NodeVecSimDict = {}
filePathogenic = path + "/dataset/uploads/train.txt" # 已知宿主蛋白
fileCandidate = path + "/dataset/uploads/candidateNode_degree.txt" # 候选宿主蛋白
with open(filePathogenic, 'r') as f1:
pathogenic = f1.readlines()
with open(fileCandidate, 'r') as f2:
candidate = f2.readlines()
for i in candidate:
simCalcute = []
every_candidate = i.split("\t")[0]
for every_pathogenic in pathogenic:
every_pathogenic = every_pathogenic.strip("\n")
sim = math.sqrt(sum([(a - b) ** 2 for (a, b) in zip(nodeVecDict[every_candidate], nodeVecDict[every_pathogenic])]))
simCalcute.append(sim)
maxSim = max(simCalcute)
print(maxSim)
NodeVecSimDict[every_candidate] = maxSim
return NodeVecSimDict
def save(NodeVecSimDict):
fileSim = "../dataset/result/Candidate_NodeVecSim.txt"
with open(fileSim, "w") as fw:
for key, value in NodeVecSimDict.items():
fw.write(key + "\t" + str(value) + "\n")
fw.close()
if __name__ == '__main__':
path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
NodeVecTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("开始获取节点的向量表示:" + NodeVecTime)
fileEMB = path + "/dataset/temp/COVID_Node2Vec.csv"
raw_dataset_path = path + '/dataset/minmax_out/raw_minmax.out'
nodeVecDict = getNodeVector(fileEMB, raw_dataset_path)
simNodeVecTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print("基于向量表示开始计算相似度:" + simNodeVecTime)
NodeVecSimDict = caculateNodeVecSim(nodeVecDict,path)
save(NodeVecSimDict)
|
LittleBird120/DiseaseGenePredicition
|
DiseaseGenePredicition/20210316Disease_gene_prediction_algorithm_COVID-19/algorithm/simNode2vec.py
|
simNode2vec.py
|
py
| 2,528 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.