seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
โ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
23230602110
|
import ui
from PIL import Image as ImageP
import io
import random
mainWindow = ui.View()
mainWindow.name = 'Image Conversion'
mainWindow.background_color = 'white'
mainWindow.width = 700 #ui.get_screen_size().width
mainWindow.height = 700 #ui.get_screen_size().height
def pil2ui(pil_img):
with io.BytesIO() as buffer:
pil_img.save(buffer, format='PNG')
return ui.Image.from_data(buffer.getvalue())
path = "../Images/"
quarter = pil2ui(ImageP.open(path + "quarter.png").resize((70,70), ImageP.ANTIALIAS))
dime = pil2ui(ImageP.open(path + "dime.png").resize((50,50), ImageP.ANTIALIAS))
nickel = pil2ui(ImageP.open(path + "nickel.png").resize((60,60), ImageP.ANTIALIAS))
penny = pil2ui(ImageP.open(path + "penny.png").resize((55,55), ImageP.ANTIALIAS))
picture1 = ui.ImageView()
picture1.width = 70
picture1.height = 70
picture1.image = quarter
mainWindow.add_subview(picture1)
#mainWindow.present('fullscreen')
mainWindow.present('sheet')
|
WhittlinRich/python
|
ImageConversion.py
|
ImageConversion.py
|
py
| 955 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71935407547
|
import os
import numpy as np
from sklearn.metrics import confusion_matrix
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from glob import glob
import ffmpeg
import warnings
warnings.filterwarnings('ignore')
classes = {'ะฟัะพััะพะน': 0,
'ะฒัะฝัะถะดะตะฝะฝะฐั': 1,
'ัะฒะฐัะบะฐ': 2}
df_nine_left = pd.read_csv('nine_hour_left.csv')
df_nine_right = pd.read_csv('nine_hour_right.csv')
df_five_left = pd.read_csv('five_hour_left.csv')
df_five_right = pd.read_csv('five_hour_right.csv')
ann_train = []
ann_test = []
for _, r in tqdm(df_nine_left.iterrows()):
file_result = f'train_5s/{r["fname"]}'
line = file_result + ' ' + str(classes[r['label']]) + '\n'
if r['time'] > 7200:
ann_train.append(line)
else:
ann_test.append(line)
for _, r in tqdm(df_nine_right.iterrows()):
file_result = f'train_5s/{r["fname"]}'
line = file_result + ' ' + str(classes[r['label']]) + '\n'
if r['time'] > 7200:
ann_train.append(line)
else:
ann_test.append(line)
for _, r in tqdm(df_five_left.iterrows()):
file_result = f'train_5s/{r["fname"]}'
line = file_result + ' ' + str(classes[r['label']]) + '\n'
if r['time'] < 13845:
ann_train.append(line)
else:
ann_test.append(line)
for _, r in tqdm(df_five_right.iterrows()):
file_result = f'train_5s/{r["fname"]}'
line = file_result + ' ' + str(classes[r['label']]) + '\n'
if r['time'] < 13845:
ann_train.append(line)
else:
ann_test.append(line)
with open('ann_train.txt', 'w') as train_file, open('ann_test.txt', 'w') as test_file:
train_file.writelines(ann_train)
test_file.writelines(ann_test)
|
vitalymegabyte/koblik_family
|
train_scripts/split_train_val.py
|
split_train_val.py
|
py
| 1,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25987362528
|
import argparse
import dataclasses
import subprocess
import sys
from datetime import datetime
from typing import List
from testcase import get_cli_base_cmd
from testcase import Result
base_cmd = get_cli_base_cmd()
@dataclasses.dataclass
class CleanupItem:
name: str
list_cmd: str
del_cmd: str
items: List = dataclasses.field(default_factory=list)
def parse_args(*args) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Cleanup the CloudTruth environment")
parser.add_argument(
dest="needles",
nargs="*",
default=["Windows", "Linux", "macOS", "ci-cli", "testcli"],
help="Search strings to look for",
)
parser.add_argument(
"-q",
"--quiet",
dest="quiet",
action="store_true",
help="Do not show what the script is doing",
)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Detailed output")
parser.add_argument("--confirm", "--yes", dest="confirm", action="store_true", help="Skip confirmation prompt")
return parser.parse_args(*args)
def cli(cmd: str) -> Result:
updated = base_cmd + cmd.replace("'", '"') # allows this to work on Windows
start = datetime.now()
process = subprocess.run(updated, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
delta = datetime.now() - start
return Result(
return_value=process.returncode,
stdout=process.stdout.decode("us-ascii", errors="ignore").replace("\r", "").split("\n"),
stderr=process.stderr.decode("us-ascii", errors="ignore").replace("\r", "").split("\n"),
timediff=delta,
command=updated,
)
def yes_or_no(question: str) -> bool:
reply = str(input(question + " (y/n): ")).lower().strip()
if reply[0] == "y":
return True
if reply[0] == "n":
return False
else:
return yes_or_no("Please enter ")
def cloudtruth_cleanup(*args):
args = parse_args(*args)
if not args.needles:
print("No search strings provided")
return -1
# reset verbosity flags if they conflict with each other
if args.quiet and args.verbose:
args.quiet = False
args.verbose = False
result = cli("config curr -x")
print(result.command)
print(result.out())
# define a set of elements to cleanup
elements = [
CleanupItem(name="projects", list_cmd="proj tree", del_cmd="proj del -y"),
CleanupItem(name="environments", list_cmd="env tree", del_cmd="env del -y"),
CleanupItem(name="users", list_cmd="user ls", del_cmd="user del -y"),
CleanupItem(name="groups", list_cmd="group ls", del_cmd="group del -y"),
CleanupItem(name="invitations", list_cmd="user invite ls", del_cmd="user invite del -y"),
CleanupItem(name="types", list_cmd="types tree", del_cmd="types del -y"),
CleanupItem(name="pushes", list_cmd="action push ls", del_cmd="action push del -y"),
CleanupItem(name="imports", list_cmd="action import ls", del_cmd="action import del -y"),
]
for elem in elements:
if not args.quiet:
print(f"Looking for matching {elem.name}...")
result = cli(elem.list_cmd)
# use reverse order to accommodate the ordering `tree` commands
for line in reversed(result.stdout):
item = line.strip()
if any([needle for needle in args.needles if needle in item]):
elem.items.append(line.strip())
if not (any([x for x in elements if x.items])):
types = [x.name for x in elements]
type_list = ", ".join(types[:-1])
type_list += f", or {types[-1]}"
search_list = ", ".join(args.needles)
print(f"No {type_list} items found matching: {search_list}")
return 0
if not (args.confirm and args.quiet):
print("\n\nFound matches: ")
for elem in elements:
if not elem.items:
print(f" {elem.name}: None")
else:
print(f" {elem.name}:")
for item in elem.items:
print(f" {item}")
print("")
if not args.confirm and not yes_or_no("Delete the above items"):
print("No items deleted")
return 0
all_deleted = True
for elem in elements:
if not elem.items:
continue
for item in elem.items:
result = cli(elem.del_cmd + f" '{item}'")
if args.verbose:
print(result.command)
if result.return_value != 0:
all_deleted = False
print(f"Failed to delete {elem.name} {item}")
if args.verbose:
print(result.err())
if all_deleted:
print("Deleted all items")
return 0
if __name__ == "__main__":
sys.exit(cloudtruth_cleanup(sys.argv[1:]))
|
cloudtruth/cloudtruth-cli
|
integration-tests/cleanup.py
|
cleanup.py
|
py
| 4,892 |
python
|
en
|
code
| 4 |
github-code
|
6
|
22432578680
|
import pandas as pd
from openpyxl.styles import PatternFill
def rgb_to_hex(rgb):
return '%02x%02x%02x' % rgb
def darken(hex_code, shade):
shade = shade/10
#add something to convert shade number ie 9 actually equals 10% darker
RGB = tuple(int(hex_code[i:i + 2], 16) for i in (0, 2, 4))
r = RGB[0]
g = RGB[1]
b = RGB[2]
# darken by 10%
darken_R = int(round(r * shade))
darken_G = int(round(g * shade))
darken_B = int(round(b * shade))
rgbTuple = (darken_R, darken_G, darken_B) #tuple of RGB values to convert to Hex
return rgb_to_hex(rgbTuple)
def lighten(hex_code, shade):
shade = shade/10
RGB = tuple(int(hex_code[i:i + 2], 16) for i in (0, 2, 4))
r = RGB[0]
g = RGB[1]
b = RGB[2]
# lighten by 10%
lighten_R = int(round(r + ((255 - r) * shade), 0))
lighten_G = int(round(g + ((255 - g) * shade), 0))
lighten_B = int(round(b + ((255 - b) * shade), 0))
rgbTuple = (lighten_R, lighten_G, lighten_B) #tuple of RGB values to convert to Hex
return rgb_to_hex(rgbTuple)
def create_lighten_df(colorList):
empty_df_list = []
df = pd.DataFrame(columns=['Original Color Hex', 'Change %', 'Color Hex', 'Direction', 'Color'])
for item in colorList:
for num in range(11):
original_color = "#" + item
change_percent = str(num) + '0%'
color_hex = "#" + lighten(hex_code=item, shade=num)
direction = 'Lighter'
df = pd.concat([df,
pd.DataFrame.from_dict({'Original Color Hex': [original_color],
'Change %': [change_percent],
'Color Hex': [color_hex],
'Direction': [direction],
'Color' : ""})],
ignore_index=True)
return df
def create_darken_df(colorList):
empty_df_list = []
df = pd.DataFrame(columns=['Original Color Hex', 'Change %', 'Color Hex', 'Direction', 'Color'])
for item in colorList:
for num in range(11):
original_color = "#" + item
change_percent = str(num) + '0%'
color_hex = "#" + darken(hex_code=item, shade=num)
direction = 'Darken'
df = pd.concat([df,
pd.DataFrame.from_dict({'Original Color Hex': [original_color],
'Change %': [change_percent],
'Color Hex': [color_hex],
'Direction': [direction],
'Color' : ""})],
ignore_index=True)
print(df)
percDict = {
'100%': "00%",
'90%': "10%",
'80%': "20%",
'70%': "30%",
'60%': "40%",
'50%': "50%",
'40%': "60%",
'30%': "70%",
'20%': "80%",
'10%': "90%",
'00%': "100%"
}
df['Change %'] = df['Change %'].apply(lambda x: percDict[x])
df['Original Color Hex'] = df['Original Color Hex'].astype('category')
df = df.sort_index(ascending=False).reset_index(drop=True)
df['Sorting'] = df.index
sorter = ['#' + str(x) for x in colorList]
df['Original Color Hex'] = df['Original Color Hex'].cat.set_categories(sorter)
df = df.sort_values(by=['Original Color Hex', 'Sorting']).drop(columns='Sorting')
return df
###############start of work###################
# colorList is the list of hex codes that will be used to generate the report
colorList = ['CC0000', #red
'000000', #black
'4d4d4f', #dark gray
'969697', #medium gray
'dddddd', # light gray
'f3f3f3', # ultra light gray
'f58025', # ada compliant orange
'fdb913', # ada compliant yellow
'97ca3e', # ada compliant green
'479cd6', # ada compliant blue
'1d3c6d', # ada compliant navy
'751c59' # ada compliant purple
]
lighten_df = create_lighten_df(colorList) #create a dataframe of lightened colors
darken_df = create_darken_df(colorList) # create a dataframe of darkened colors
with pd.ExcelWriter("ColorShadeReferenceWorkbook.xlsx", engine="openpyxl") as writer:
# create the "Lighten" worksheet
sheet_name = "Lighten"
# Export DataFrame content
lighten_df.to_excel(writer, sheet_name=sheet_name, index=False)
# Set column width dimensions
sheet = writer.sheets[sheet_name]# open sheet
sheet.column_dimensions['A'].width = 18
sheet.column_dimensions['B'].width = 10
sheet.column_dimensions['C'].width = 10
sheet.column_dimensions['D'].width = 10
sheet.column_dimensions['E'].width = 9
# Set background colors depending on cell values
for cell, in sheet[f'E2:E{len(lighten_df) + 1}']: # Skip header row, process as many rows as there are DataFrames
value = lighten_df["Color Hex"].iloc[cell.row - 2] #set color value to row['Color Hex'] value
value = value.strip("#")
cell.fill = PatternFill(start_color=value, end_color=value, fill_type='solid')
#create the "Darken" worksheet
sheet_name = "Darken"
# Export DataFrame content
darken_df.to_excel(writer, sheet_name=sheet_name, index=False)
sheet = writer.sheets[sheet_name]
# Set column width dimensions
sheet.column_dimensions['A'].width = 18
sheet.column_dimensions['B'].width = 10
sheet.column_dimensions['C'].width = 10
sheet.column_dimensions['D'].width = 10
sheet.column_dimensions['E'].width = 9
# Set background colors depending on cell values
for cell, in sheet[f'E2:E{len(darken_df) + 1}']: # Skip header row, process as many rows as there are DataFrames
value = darken_df["Color Hex"].iloc[cell.row - 2] #set color value to row['Color Hex'] value
value = value.strip("#") #strip #
cell.fill = PatternFill(start_color=value, end_color=value, fill_type='solid') #fill cell colid color
|
mbgoodin/ColorShadeGenerator
|
main.py
|
main.py
|
py
| 6,351 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10929065262
|
from os import system
system('clear')
lista_membros = list()
familia = dict()
# familia = [{'nome': 'Joemar', 'idade': 60, 'cor dos olhos': 'Castanhos'},
# {'nome': 'Alessandra', 'idade': 50, 'cor dos olhos': 'Pretos'},
# {'nome': 'Jean', 'idade': 26, 'cor dos olhos': 'Pretos'},
# {'nome': 'Lara', 'idade': 22, 'cor dos olhos': 'Castanhos'},
# {'nome': 'Thalles', 'idade': 19, 'cor dos olhos': 'Castanhos'}]
for i in range(1, 6):
print('\033[1;34mMembros e caracterรญsticas da famรญlia\033[m')
familia.clear()
familia['nome'] = input(f'\033[1;34mPessoa {i}: \033[m')
familia['idade'] = int(input(f'\033[1;34mIdade do(a) {familia["nome"]}: \033[m'))
familia['cor dos olhos'] = input(f'\033[1;34mCor dos olhos do(a) {familia["nome"]}: \033[m')
lista_membros.append(familia)
familia = familia.copy()
system('clear')
print(f'\n\033[1;34mOs membros da famรญlia sรฃo: \033[m\033[31m{lista_membros}\033[m')
|
ThallesCansi/Programacao-para-Web
|
1ยบ Bimestre/Capรญtulo 01 - Fundamentos da Linguagem/Exercรญcio 1.29.py
|
Exercรญcio 1.29.py
|
py
| 999 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
21397176119
|
import os
import statistics
import sys
import httpx
from dotenv import load_dotenv
load_dotenv(sys.argv[1])
url = os.environ['url']
attempts = int(os.environ.get('attempts', 100))
payload = os.environ.get('payload')
auth_header = os.environ.get('auth_header', 'X-Racetrack-Auth')
auth_token = os.environ.get('auth_token', '')
reuse_connection: bool = os.environ.get('reuse_connection', 'true').lower() in {'true', '1'}
print(f'Testing URL {url}')
print(f'attempts: {attempts}, reuse_connection (connection pool): {reuse_connection}, payload: {payload}')
durations = []
headers = {
'User-Agent': 'curl/7.81.0',
'Accept': 'application/json',
'Content-Type': 'application/json',
auth_header: auth_token,
}
client = httpx.Client()
try:
for i in range(attempts):
if reuse_connection:
response = client.post(url, timeout=30, headers=headers, content=payload)
else:
response = httpx.post(url, timeout=30, headers=headers, content=payload)
duration = response.elapsed.total_seconds()
try:
response.raise_for_status()
except httpx.HTTPStatusError:
print(f'Response error: {response.content}')
raise
durations.append(duration)
print(f'Attempt #{i+1} - request duration: {duration*1000:.2f} ms')
except KeyboardInterrupt:
pass
finally:
client.close()
average = sum(durations) / len(durations)
min_duration = min(durations)
max_duration = max(durations)
median = statistics.median(durations)
print(f'----------')
print(f'Requests: {len(durations)}')
print(f'Median: {median*1000:.2f} ms')
print(f'Average: {average*1000:.2f} ms')
print(f'Min: {min_duration*1000:.2f} ms')
print(f'Max: {max_duration*1000:.2f} ms')
|
TheRacetrack/racetrack
|
tests/performance/response_time_test.py
|
response_time_test.py
|
py
| 1,757 |
python
|
en
|
code
| 27 |
github-code
|
6
|
73502078587
|
#from threading import Thread
from bs4 import BeautifulSoup
import pandas as pd
#import os
#import sys
from selenium import webdriver
#from selenium.webdriver.common.proxy import *
from time import gmtime, strftime, sleep
#import sqlite3
#from queue import Queue
#import re
import requests
cities_frame = pd.read_html('https://en.wikipedia.org/wiki/List_of_United_States_cities_by_population')
cities = cities_frame[4]
print(cities)
abbrs = { 'Alaska': 'AK', 'Alabama': 'AL', 'Arkansas': 'AR', 'Arizona': 'AZ', 'California': 'CA',
'Colorado': 'CO', 'Connecticut': 'CT', 'District of Columbia': 'DC', 'Delaware': 'DE',
'Florida': 'FL', 'Georgia': 'GA', 'Iowa': 'IA', 'Idaho': 'ID',
'Illinois': 'IL', 'Indiana': 'IN', 'Kansas': 'KS', 'Kentucky': 'KY',
'Louisiana': 'LA', 'Massachusetts': 'MA', 'Maryland': 'MD', 'Maine': 'ME',
'Michigan': 'MI', 'Minnesota': 'MN', 'Missouri': 'MO', 'Mississippi': 'MS',
'Montana': 'MT', 'North Carolina': 'NC', 'North Dakota': 'ND', 'Nebraska': 'NE',
'New Hampshire': 'NH', 'New Jersey': 'NJ', 'New Mexico': 'NM', 'Nevada': 'NV',
'New York': 'NY', 'Ohio': 'OH', 'Oklahoma': 'OK', 'Oregon': 'OR',
'Pennsylvania': 'PA', 'Rhode Island': 'RI', 'South Carolina': 'SC', 'South Dakota': 'SD',
'Tennessee': 'TN', 'Texas': 'TX', 'Utah': 'UT', 'Virginia': 'VA',
'Vermont': 'VT', 'Washington': 'WA', 'Wisconsin': 'WI', 'West Virginia': 'WV',
'Wyoming': 'WY'}
states = pd.read_csv('/home/val/coding/USA-states-cities.csv')
#print(cities)
cities_list = pd.DataFrame()
def crawling(search_url, file_name):
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver = webdriver.Chrome(executable_path = './chromedriver', options = options)
links = open(file_name, 'a')
driver.get(search_url)
print(search_url)
for i in range(20):
soup = BeautifulSoup(driver.page_source, 'lxml')
for each in soup.find_all(class_='iUh30 bc'):
try:
print(each.text.split(' ')[0])
links.write(each.text.split(' ')[0]+'\n')
except Exception:
print(each.text)
links.write(each.text+'\n')
try:
driver.find_element_by_class_name('pn').click()
except Exception:
return
print('--------')
i += 1
sleep(3)
links.close()
#print(cities_list)
websites_frame = pd.DataFrame(columns=['state', 'city', 'link'])
#states = states.drop_duplicates(zip('City', 'Abbreviation'))
for i in range(len(cities)):
# print(states.loc[i]['City'])
# print(states.loc[i]['City'])
try:
city = cities.loc[i]['City'].split('[')[0]
except Exception:
city = cities.loc[i]['City']
try:
state = abbrs[str(cities.loc[i]['State[c]'])]
except Exception:
continue
search_url = f'https://www.google.com/search?q=File+a+complaint+{state}+{city}'
#print('-----', search_url, '-----')
try:
state = str(cities.loc[i]['State[c]']).strip()
except Exception:
continue
crawling(search_url, 'file_companies_sites.csv')
search_url = f'https://www.google.com/search?q=File+a+complaint+{state}+{city}+attorney+general'
crawling(search_url, 'file_companies_sites.csv')
search_url = f'https://www.google.com/search?q=local+state+agencies+{state}+{city}'
crawling(search_url, 'local_agencies.csv')
search_url = f'https://www.google.com/search?q=consumer+protection+{state}+{city}'
crawling(search_url, 'file_companies_sites.csv')
|
erelin6613/crawler
|
file_companies.py
|
file_companies.py
|
py
| 3,325 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24301910796
|
import os
import re
from glob import glob
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from IPython import get_ipython
POSSIBLE_LABELS = 'yes no up down left right on off stop go silence unknown'.split()
id2name = {i: name for i, name in enumerate(POSSIBLE_LABELS)}
name2id = {name: i for i, name in id2name.items()}
len(id2name)
def load_data(data_dir):
""" Return 2 lists of tuples:
[(class_id, user_id, path), ...] for train
[(class_id, user_id, path), ...] for validation
"""
# Just a simple regexp for paths with three groups:
# prefix, label, user_id
pattern = re.compile("(.+\/)?(\w+)\/([^_]+)_.+wav")
all_files = glob(os.path.join(data_dir, 'train/audio/*/*wav'))
with open(os.path.join(data_dir, 'train/validation_list.txt'), 'r') as fin:
validation_files = fin.readlines()
valset = set()
for entry in validation_files:
r = re.match(pattern, entry)
if r:
valset.add(r.group(3))
possible = set(POSSIBLE_LABELS)
train, val = [], []
for entry in all_files:
r = re.match(pattern, entry)
if r:
label, uid = r.group(2), r.group(3)
if label == '_background_noise_':
label = 'silence'
if label not in possible:
label = 'unknown'
label_id = name2id[label]
sample = (label, label_id, uid, entry)
if uid in valset:
val.append(sample)
else:
train.append(sample)
print('There are {} train and {} val samples'.format(len(train), len(val)))
columns_list = ['label', 'label_id', 'user_id', 'wav_file']
train_df = pd.DataFrame(train, columns = columns_list)
valid_df = pd.DataFrame(val, columns = columns_list)
return train_df, valid_df
def process_wav_file(fname):
wav = read_wav_file(fname)
L = 16000 # 1 sec
if len(wav) > L:
i = np.random.randint(0, len(wav) - L)
wav = wav[i:(i+L)]
elif len(wav) < L:
rem_len = L - len(wav)
i = np.random.randint(0, len(silence_data) - rem_len)
silence_part = silence_data[i:(i+L)]
j = np.random.randint(0, rem_len)
silence_part_left = silence_part[0:j]
silence_part_right = silence_part[j:rem_len]
wav = np.concatenate([silence_part_left, wav, silence_part_right])
specgram = stft(wav, 16000, nperseg = 400, noverlap = 240, nfft = 512, padded = False, boundary = None)
phase = np.angle(specgram[2]) / np.pi
amp = np.log1p(np.abs(specgram[2]))
return np.stack([phase, amp], axis = 2)
train_df, valid_df = load_data('./train/')
train_df.head()
train_df.label.value_counts()
silence_files = train_df[train_df.label == 'silence']
train_df = train_df[train_df.label != 'silence']
from scipy.io import wavfile
from scipy.signal import stft
def read_wav_file(fname):
_, wav = wavfile.read(fname)
wav = wav.astype(np.float32) / np.iinfo(np.int16).max
return wav
silence_data = np.concatenate([read_wav_file(x) for x in silence_files.wav_file.values])
# last batch will contain padding, so remove duplicates
submission = dict()
for i in range(0, 10500):
fname, label = os.path.basename(test_paths[i]), id2name[classes[i]]
submission[fname] = label
with open('106368019_submission.csv', 'w') as fout:
fout.write('fname,label\n')
for fname, label in submission.items():
fout.write('{},{}\n'.format(fname, label))
|
105318102/Eric-Liu
|
test.py
|
test.py
|
py
| 3,536 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13818867772
|
"""
Demo of custom tick-labels with user-defined rotation.
"""
import math
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from matplotlib.widgets import Slider, Button, RadioButtons
z = int(input("how long you wanna go? "))
n1 = 2
n2 = int(input("another number "))
xcor = []
ycor =[]
lcor=[]
x = 0
i = 0
for j in range(0,z):
lcor.append(j*(math.log(2,math.e)/math.log(3,math.e)))
def pycode(x):
n = math.ceil(x*(math.log(n2,math.e)/math.log(n1,math.e))-1)#a1
return n
while x <= z:
x += 1
ycor.append((n2**x/n1**pycode(x)))
ycor.append((n1**(pycode(x)+1)/n2**x))
while i <= z*2+1: #math done to allign x and y coordinates
i+=1
xcor.append(i)
labels = []
#plt.subplot(111,polar=True)
plt.plot(xcor[0:z:2], ycor[0:z:2], 'co')
plt.plot(xcor[1:z:2], ycor[1:z:2], 'ro')
plt.plot((n2**x/n1**pycode(x)),'k')
#plt.plot(xcor,ycor,'k')
# You can specify a rotation for the tick labels in degrees or with keywords.
#plt.xticks(lor, labels, rotation='vertical')
plt.xticks(xcor, labels, rotation='vertical')
# Pad margins so that markers don't get clipped by the axes
plt.margins(0.2)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.15)
plt.show()
|
fastaro/FFimages
|
polar.py
|
polar.py
|
py
| 1,243 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28806070886
|
from __future__ import annotations
import copy
import datetime
import importlib.util
import json
import logging
import os
import random
import statistics
from abc import ABCMeta
from abc import abstractmethod
from typing import IO
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Type
from weakref import proxy
import keyring
import portion
from jira import JIRA
from rich.console import Console
from rich.progress import TaskID
from safdie import BaseCommand as SafdieBaseCommand
from safdie import get_entrypoints
from urllib3 import disable_warnings
from .constants import APP_NAME
from .constants import FORMATTER_ENTRYPOINT
from .constants import FUNCTION_ENTRYPOINT
from .constants import SOURCE_ENTRYPOINT
from .exceptions import ConfigurationError
from .types import ConfigDict
from .types import InstanceDefinition
from .types import SchemaRow
from .types import SelectFieldDefinition
from .utils import get_custom_function_dir
from .utils import get_functions_for_module
from .utils import save_config
if TYPE_CHECKING:
from .query import CounterChannel
from .query import Executor
from .query import Query
logger = logging.getLogger(__name__)
BUILTIN_FUNCTIONS: Dict[str, Callable] = {
# Built-ins
"abs": abs,
"all": all,
"any": any,
"bin": bin,
"bool": bool,
"hex": hex,
"int": int,
"len": len,
"max": max,
"min": min,
"oct": oct,
"ord": ord,
"pow": pow,
"range": range,
"reversed": reversed,
"round": round,
"set": set,
"sorted": sorted,
"str": str,
"sum": sum,
"tuple": tuple,
"map": map,
"filter": filter,
"type": lambda x: str(type(x)),
# Statistics
**get_functions_for_module(
statistics,
[
"fmean",
"geometric_mean",
"harmonic_mean",
"mean",
"median",
"median_grouped",
"median_high",
"median_low",
"mode",
"multimode",
"pstdev",
"pvariance",
"quantiles",
"stdev",
"variance",
],
),
# Random
**get_functions_for_module(
random,
["random", "randrange", "randint", "choice"],
),
# JSON
"json_loads": json.loads,
"empty_interval": portion.empty,
"closed_interval": portion.closed,
"open_interval": portion.open,
"openclosed_interval": portion.openclosed,
"closedopen_interval": portion.closedopen,
"datetime": datetime.datetime,
"timedelta": datetime.timedelta,
}
REGISTERED_FUNCTIONS: Dict[str, Callable] = {}
class BaseCommand(SafdieBaseCommand):
_jira: Optional[JIRA] = None
def __init__(self, *, config: ConfigDict, **kwargs):
self._config: ConfigDict = config
self._console = Console(highlight=False)
super().__init__(**kwargs)
@property
def config(self) -> ConfigDict:
"""Provides the configuration dictionary."""
return self._config
def save_config(self) -> None:
"""Saves the existing configuration dictionary."""
save_config(self.config, self.options.config)
@property
def console(self) -> Console:
"""Provides access to the console (see `rich.console.Console`."""
return self._console
@property
def jira(self) -> JIRA:
"""Provides access to the configured Jira instance."""
if self._jira is None:
instance = self.config.instances.get(
self.options.instance_name, InstanceDefinition()
)
instance_url = self.options.instance_url or instance.url
if not instance_url:
raise ConfigurationError(
"instance_url not set; please run `jira-select configure`."
)
username = self.options.username or instance.username
if not username:
raise ConfigurationError(
"username not set; please run `jira-select configure`."
)
password = self.options.password or instance.password
if not password:
password = keyring.get_password(APP_NAME, instance_url + username)
if not password:
raise ConfigurationError(
f"Password not stored for {instance_url} user {username}; "
"use the 'store-password' command to store the password "
"for this user account in your system keyring or use "
"`jira-select configure`."
)
verify = self.options.disable_certificate_verification or instance.verify
if verify is None:
verify = True
if verify is False:
disable_warnings()
self._jira = JIRA(
options={
"agile_rest_path": "agile",
"server": instance_url,
"verify": verify,
},
basic_auth=(username, password),
validate=False,
get_server_info=False,
)
return self._jira
def get_installed_formatters() -> Dict[str, Type[BaseFormatter]]:
return get_entrypoints(FORMATTER_ENTRYPOINT, BaseFormatter)
class BaseFormatter(metaclass=ABCMeta):
def __init__(self, executor: Executor, stream: IO[bytes]):
self._executor = proxy(executor)
self._stream = stream
@classmethod
@abstractmethod
def get_file_extension(cls) -> str:
...
@property
def executor(self):
return self._executor
@property
def stream(self):
return self._stream
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, traceback):
# Exception handling here
self.close()
def open(self):
return
def close(self):
return
@abstractmethod
def writerow(self, row: Dict[str, Any]):
...
def register_function(fn: Callable):
"""Register a callable to be a function available in queries."""
REGISTERED_FUNCTIONS[fn.__name__] = fn
def get_installed_functions(
jira: JIRA = None, executor: Executor = None
) -> Dict[str, Callable]:
possible_commands: Dict[str, Callable] = copy.copy(BUILTIN_FUNCTIONS)
# Import any modules in the custom functions directory; as a
# side-effect of this, the functions will become listed within
# REGISTERED_FUNCTIONS
function_dir = get_custom_function_dir()
for dirname, subdirlist, filelist in os.walk(function_dir):
for filename in filelist:
if filename.endswith(".py"):
module_path_parts = ["user_scripts"]
if dirname != function_dir:
module_path_parts.append(
dirname[len(function_dir) + 1 :]
.replace("/", ".")
.replace("\\", ".")
)
module_path_parts.append(os.path.splitext(filename)[0])
module_path = ".".join(module_path_parts)
full_path = os.path.join(function_dir, dirname, filename)
try:
spec = importlib.util.spec_from_file_location(
module_path, full_path
)
if not spec:
continue
user_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(user_module) # type: ignore
except Exception as e:
logger.error("Could not import user script at %s: %s", full_path, e)
possible_commands.update(REGISTERED_FUNCTIONS)
for fn_name, fn in get_entrypoints(FUNCTION_ENTRYPOINT, BaseFunction).items():
possible_commands[fn_name] = fn(jira, executor=executor)
return possible_commands
class BaseFunction(metaclass=ABCMeta):
def __init__(self, jira: Optional[JIRA], executor: Optional[Executor]):
self._jira = jira
self._executor = executor
@property
def query(self) -> Optional[Query]:
return self.executor.query if self.executor else None
@property
def executor(self) -> Optional[Executor]:
return self._executor
@property
def jira(self):
assert self._jira
return self._jira
@abstractmethod
def __call__(self, *args, **kwargs) -> Optional[Any]:
...
def get_installed_sources() -> Dict[str, Type[BaseSource]]:
return get_entrypoints(SOURCE_ENTRYPOINT, BaseSource)
class BaseSource(metaclass=ABCMeta):
SCHEMA: List[SchemaRow] = []
def __init__(self, executor: Executor, task: TaskID, out_channel: CounterChannel):
self._executor = executor
self._task = task
self._out_channel = out_channel
super().__init__()
@classmethod
def get_schema(cls, jira: JIRA) -> List[SchemaRow]:
return copy.deepcopy(cls.SCHEMA)
@classmethod
def get_all_fields(cls, jira: JIRA) -> List[SelectFieldDefinition]:
fields: List[SelectFieldDefinition] = []
for entry in cls.get_schema(jira):
fields.append(
SelectFieldDefinition(
expression=entry.id,
column=entry.id,
)
)
return fields
def remove_progress(self):
self._executor.progress.remove_task(self._task)
def update_progress(self, *args, **kwargs):
self._executor.progress.update(self._task, *args, **kwargs)
def update_count(self, value: int):
self._out_channel.set(value)
@property
def query(self) -> Query:
return self._executor.query
@property
def jira(self) -> JIRA:
return self._executor.jira
@abstractmethod
def rehydrate(self, value: Any) -> Any:
...
@abstractmethod
def __iter__(self) -> Iterator[Any]:
...
|
coddingtonbear/jira-select
|
jira_select/plugin.py
|
plugin.py
|
py
| 10,204 |
python
|
en
|
code
| 22 |
github-code
|
6
|
18016139374
|
from scapy.all import *
MAC_A = "02:42:0a:09:00:05"
IP_B = "192.168.60.5"
def spoof_pkt(pkt):
newpkt = IP(bytes(pkt[IP]))
del(newpkt.chksum)
del(newpkt[TCP].payload)
del(newpkt[TCP].chksum)
if pkt[TCP].payload:
data = pkt[TCP].payload.load
newdata = data.replace(b'seedlabs', b'AAAAAAAA')
send(newpkt/newdata)
else:
send(newpkt)
f = 'tcp and ether src {A} and ip dst {B}'.format(A=MAC_A, B=IP_B)
pkt = sniff(iface='eth0', filter=f, prn=spoof_pkt)
|
kimnamhyeon0112/2023-2_Information_Security
|
Week4_Prac02_Malicious_Router.py
|
Week4_Prac02_Malicious_Router.py
|
py
| 518 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16543533799
|
from durable.lang import *
from flask import request
from scripts import REDataHeader as Header
from scripts import REResponseCode as ResponseCode
from scripts.QuestionDomain.QuestionTree import QuestionTree
import json
STAGE_INIT = "1"
STAGE_VALIDATION = "2"
STAGE_VERIFICATION = "3"
STAGE_PROCESS = "4"
STAGE_TERMINATE = "5"
responseJSON = {
Header.RESPONSE_CODE : ResponseCode.NOT_PROCESS,
Header.RESPONSE_DATA : 'Not process'
}
with ruleset('question'):
@when_all(m.data.Stage == STAGE_INIT)
def init(c):
c.m.data.Stage = STAGE_VALIDATION
c.assert_fact(c.m)
@when_all(m.data.Stage == STAGE_VALIDATION)
def validation(c):
print('QnA - Validation')
c.m.data.Stage = STAGE_PROCESS
c.assert_fact(c.m)
@when_all(m.data.Stage == STAGE_PROCESS)
def process(c):
print('QnA - Process')
try:
tree = QuestionTree()
tree.loadTree()
answer = tree.searchfor(c.m.data[Header.QUESTION])
responseJSON = {
Header.RESPONSE_CODE: ResponseCode.SUCESS_QUESTION,
Header.RESPONSE_DATA: answer
}
c.m.data[Header.RESPONSE_HEAD] = responseJSON
c.m.data[Header.RESPONSE_HEAD] = responseJSON
request.data = str(c.m.data).replace("'",'"')
c.m.data.Stage = STAGE_TERMINATE
c.assert_fact(c.m)
except:
responseJSON = {
Header.RESPONSE_CODE: ResponseCode.QUESTION_NOTFOUND,
Header.RESPONSE_DATA: 'error'
}
c.m.data[Header.RESPONSE_HEAD] = responseJSON
request.data = str(c.m.data).replace("'", '"')
c.m.data.Stage = STAGE_TERMINATE
c.assert_fact(c.m)
@when_all( m.data.Stage == STAGE_TERMINATE)
def finalize(c):
print('QnA - finalize')
c.assert_fact(c.m)
|
Grimmii/TrainChatBot
|
src/scripts/RE_flow_QnA.py
|
RE_flow_QnA.py
|
py
| 1,923 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5092362437
|
# For taking space seperated integer variable inputs.
def invr():
return(map(int, input().split()))
def sum_of_digits(n):
s = 0
while n > 0:
s += n % 10
n = n // 10
return s
N, A, B = invr()
S = 0
for i in range(1, N+1):
s = sum_of_digits(i)
if s >= A and s <= B:
S += i
print(S)
|
sudiptob2/atcoder-training
|
Easy 100/51.some sums.py
|
51.some sums.py
|
py
| 333 |
python
|
en
|
code
| 2 |
github-code
|
6
|
27595046406
|
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import (
QHBoxLayout,
QListWidgetItem,
QCheckBox,
QGridLayout,
QWidget,
QComboBox,
QListWidget,
)
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib
import matplotlib.pyplot as plt
from code_SRC.composantes import Time
from datetime import date
class Save_plot_app(QWidget):
"""Plot app that shows a plot chart of a specific leaderboard.
#####################################################
# # #
# # #
# # #
# # #
# # Plot #
# DATA # CHART #
# LIST # #
# # #
# # #
# # #
# # #
#####################################################
"""
def __init__(self, data):
def list_widget() -> QListWidget:
self.listwidget = QListWidget()
self.listwidget.setFixedWidth(450)
for rank, entry in enumerate(runs_times, start=1):
delta = Time(entry - runs_times[0])
string = f"{rank:4} {Time(entry)} {str(delta):>10} (-{(runs_times[0] - entry) / runs_times[0]:>6.2%})"
one_line = QListWidgetItem(string)
one_line.setFont(QFont("Lucida Sans Typewriter", 10))
self.listwidget.addItem(one_line)
# self.listwidget.clicked.connect(self.list_clicked)
return self.listwidget
def plot_widget() -> FigureCanvas:
self.canvas = FigureCanvas(plt.Figure(tight_layout=True))
matplotlib.rc("font", **{"weight": "normal", "size": 16})
self.update_plot()
return self.canvas
runs_times, pb, WR_time = data
super().__init__()
self.data = runs_times
self.pb = pb.seconds
self.WR = WR_time
self.setMinimumSize(1400, 600)
self.layout = QGridLayout()
self.setLayout(self.layout)
self.layout.addWidget(list_widget(), 0, 0)
self.layout.addWidget(plot_widget(), 0, 1)
def plot(self):
self.canvas.figure.clf()
self.ax = self.canvas.figure.subplots()
self.ax.plot([x for x in range(1, len(self.data) + 1)], self.data)
self.ax.axhline(self.WR, color="goldenrod")
self.ax.set_yticks(self.ax.get_yticks())
self.ax.set_yticklabels([str(Time(x)) for x in self.ax.get_yticks()])
self.canvas.draw()
def update_plot(self):
self.plot()
|
GB127/SRC-statistics
|
plots/save_plot.py
|
save_plot.py
|
py
| 2,948 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41666390681
|
names = []
with open('employees.txt', 'r') as file:
for line in file:
name = line.strip()
names.append(name)
with open('template.txt', 'r') as file:
template = file.read()
import os
if not os.path.exists('christmasCards'):
os.mkdir('christmasCards')
for name in names:
card_content = template.replace('NAME', name)
with open(f'christmasCards/{name}.txt', 'w') as file:
file.write(card_content)
|
milanasokolova/christmasCards
|
christmas.py
|
christmas.py
|
py
| 442 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1584215151
|
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.http import (HttpResponseRedirect, HttpResponseBadRequest,
HttpResponseNotAllowed, HttpResponse, HttpResponseForbidden)
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.translation import ugettext_lazy as _
from minitrue.forms import SearchAndReplaceForm, ReplaceForm, ReplaceSingleForm
from minitrue.models import SearchAndReplace
class NewSearchAdmin(admin.ModelAdmin):
def get_urls(self):
from django.conf.urls.defaults import patterns, url
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.module_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = patterns('',
pat(r'replace/$', self.replace),
pat(r'replace-all/$', self.replace_all),
)
return url_patterns + super(NewSearchAdmin, self).get_urls()
def add_view(self, request):
return HttpResponseRedirect(reverse('admin:minitrue_searchandreplace_changelist'))
def has_change_permission(self, *args, **kwargs):
return False
has_delete_permission = has_change_permission
def changelist_view(self, request, extra_context=None):
if not self.has_add_permission(request):
return HttpResponseForbidden()
results = []
term = ''
replacement = ''
form = SearchAndReplaceForm(request.GET)
if form.is_valid():
term = form.cleaned_data['term']
replacement = form.cleaned_data['replacement']
results = form.get_results()
else:
form = SearchAndReplaceForm()
data = RequestContext(request)
data['form'] = form
data['results'] = results
data['term'] = term
data['replacement'] = replacement
data['title'] = _("Search and Replace")
return render_to_response('minitrue/search_and_replace.html', data)
def replace(self, request):
if not self.has_add_permission(request):
return HttpResponseForbidden()
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
form = ReplaceSingleForm(request.POST)
if form.is_valid():
form.replace()
return HttpResponse()
return HttpResponseBadRequest(form.errors.as_text())
def replace_all(self, request):
if not self.has_add_permission(request):
return HttpResponseForbidden()
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
form = ReplaceForm(request.POST)
if form.is_valid():
form.replace()
return HttpResponse()
return HttpResponseBadRequest(form.errors.as_text())
admin.site.register(SearchAndReplace, NewSearchAdmin)
|
beniwohli/django-minitrue
|
minitrue/admin.py
|
admin.py
|
py
| 2,968 |
python
|
en
|
code
| 4 |
github-code
|
6
|
11654464179
|
import numpy as np
import matplotlib.pyplot as plt
import sys
import datetime
import time
import os
from scipy import optimize
import yaml
from matplotlib import rcParams
import matplotlib.patches as mpatches
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import matplotlib as mpl
#############################
def kappafunc(Lambda,alpha,r0):
return alpha*np.exp(- r0/Lambda)*(1 + r0/Lambda)
def sigmafunc(Lambda,alpha,r0):
return alpha*np.exp(- r0/Lambda)*(2 + 2*r0/Lambda + r0**2/Lambda**2)
def Deltakappa(LambdaLog,alpha,r0,gNewt,muc,r,hbar,omegam,Mprobe,n,g0):
Lambda = 10**LambdaLog
kappa = kappafunc(Lambda,alpha,r0)
return 1/(np.sqrt(Mes)*gNewt)*(1/np.sqrt((muc**2*np.exp(4*r) + np.sinh(2*r)**2/2)))*np.sqrt(2*hbar*omegam**5/Mprobe)*(1/(8*np.pi*n*g0))*(1/kappa)-1
def Deltasigma(LambdaLog,alpha,r0,gNewt,muc,r,hbar,omegam,Mprobe,n,g0):
Lambda = 10**LambdaLog
sigma = sigmafunc(Lambda,alpha,r0)
return 1/(np.sqrt(Mes)*gNewt)*(1/np.sqrt((muc**2*np.exp(4*r) + np.sinh(2*r)**2/2)))*np.sqrt(2*hbar*omegam**5/Mprobe)*(1/(4*np.pi*n*g0*epsilon))*(1/sigma)-1
def Deltakappares(LambdaLog,alpha,r0,gNewt,muc,r,hbar,omegam,Mprobe,n,g0):
Lambda = 10**LambdaLog
kappa = kappafunc(Lambda,alpha,r0)
FN = gNewt*Mprobe
return gNewt*Mprobe*kappa/FN
def Deltasigmares(Lambda,alpha,r0,gNewt,muc,r,hbar,omegam,Mprobe,n,g0):
sigma = sigmafunc(Lambda,alpha,r0)
FN = gNewt*Mprobe
return gNewt*epsilon*Mprobe*sigma/FN
# Function to find all zeros, given a meshgrid:
def findAllZeros(func,X,Y,Z,r0,gNewt,muc,r,hbar,omegam,Mprobe,n,g0,bound,logX=True,logY=False):
zeroList = []
# If the x-values are in exponential form, convert to log
if logX:
Xuse = np.log10(X)
else:
Xuse = X
if logY:
Yuse = np.log10(Y)
else:
Yuse = Y
for k in range(0,len(X)):
rowList = []
for l in range(0,len(X[0])-1):
if Z[k,l]*Z[k,l+1] < 0 and np.isfinite(Z[k,l]) and np.isfinite(Z[k, l+1]):
# Found a zero:
xroot = optimize.brentq(func,Xuse[k,l],Xuse[k,l+1],args=(Yuse[k,l],r0,gNewt,muc,r,hbar,omegam,Mprobe,n,g0,bound))
yroot = Yuse[k,l]
rowList.append((xroot,yroot))
zeroList.append(rowList)
return zeroList
def extractZerosLine(zeroList,line=1):
zerox = np.zeros(len(zeroList))
zeroy = np.zeros(len(zeroList))
for k in range(0,len(zerox)):
if len(zeroList[k]) > line - 1:
zerox[k] = zeroList[k][line-1][0]
zeroy[k] = zeroList[k][line-1][1]
haveZeros = np.where(zerox != 0)[0]
return [zerox[haveZeros],zeroy[haveZeros]]
###################################
config = 'config.yaml'
# Load arguments from yaml file
args = {}
if type(config) == str:
with open(config) as cfile:
args.update(yaml.load(cfile))
elif type(config) == dict:
args.update(config)
else:
print("Failed to load config arguments")
hbar = float(args['hbar'])
G = float(args['G'])
c = float(args['c'])
e = float(args['e'])
muc = float(args['muc'])
g0 = float(args['g0'])
omegam = float(args['omegam'])
r0 = float(args['r0'])
epsilon = float(args['epsilon'])
r = float(args['r'])
n = float(args['n'])
Ms = float(args['Ms'])
Mes = float(args['Mes'])
Ms = float(args['Ms'])
rhos = float(args['rhos'])
rhop = float(args['rhop'])
Mprobe = float(args['Mprobe'])
rhobg = float(args['rhobg'])
Lambdamin = float(args['Lambdamin'])
Lambdamax = float(args['Lambdamax'])
alphamin = float(args['alphamin'])
alphamax = float(args['alphamax'])
lmin = float(args['lmin'])
lmax = float(args['lmax'])
nSample = int(args['nSample'])
gNewt = G*Ms/r0**2
alpharange = 10**(np.linspace(alphamin,alphamax,nSample))
lambdarange = 10**(np.linspace(lmin,lmax,nSample))
LambdaGrid, alphaGrid = np.meshgrid(lambdarange,alpharange)
# Plot the functions
rcParams.update({'figure.autolayout': True})
plt.rcParams['xtick.minor.size'] = 0
plt.rcParams['xtick.minor.width'] = 0
fig, ax = plt.subplots(figsize = (7, 6))
plt.xlabel('$\\lambda \, (\\mathrm{m})$', fontfamily = 'serif', fontsize = 15)
plt.ylabel('$|\\alpha|$', fontfamily = 'serif', fontsize = 15)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
ax.set_ylim(1e-8,1e8)
ax.set_xlim(1e-5,1)
# Start loop that prints each bound in bounds
DeltakappaGrid = np.log10(Deltakappares(LambdaGrid,alphaGrid,r0,gNewt,muc,r,hbar,omegam,Mprobe,n,g0))
DeltasigmaGrid = np.log10(Deltasigmares(LambdaGrid,alphaGrid,r0,gNewt,muc,r,hbar,omegam,Mprobe,n,g0))
#plt.loglog(10**zerox1sigmares, zeroy1sigmares, alpha = 1, color = 'black', label = str(bound))
viridis = cm.get_cmap('viridis', 8)
#loglevels = [1e-20,1e-18,1e-16,1e-14,1e-12,1e-10,1e-1]
#levels = [-20, -18, -16, -14, -12, -10, -8, -6]
levels = [-8, -6, -4, -2, 0, 2, 4, 6, 8]
CS = ax.contourf(LambdaGrid, alphaGrid, DeltasigmaGrid, levels = levels, colors = viridis.colors)
plt.xscale('log')
plt.yscale('log')
clb = fig.colorbar(CS, extend = 'both')
clb.ax.tick_params(labelsize=12)
clb.set_label('$\\log_{10}(\\Delta F)$', labelpad=-40, y=1.05, rotation=0, fontsize = 12)
#ax.legend(loc = 'lower left', labelspacing = 0.4, fontsize = 12)
#ax.clabel(CS, inline=True, fontsize=10)
plt.savefig('alphalambdaforceplot.pdf')
plt.show()
|
sqvarfort/modified-gravity-optomech
|
force_plot_alpha_lambda.py
|
force_plot_alpha_lambda.py
|
py
| 5,239 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16974087058
|
#!/usr/bin/python3
from pwn import *
# Telnet
sh = remote("ip", 30888)
# SSH:
# sh = ssh('user', 'ip', password='pass', port=22)
# Exec
# process('./exec')
# conn.sendlineafter(b"> ", b"1")
sh.sendline(b'ls')
flag = sh.recvline(timeout=5)
log.success(flag)
sh.interactive()
sh.close()
|
sawyerf/HackSheet
|
scripts/pwn-connect.py
|
pwn-connect.py
|
py
| 289 |
python
|
en
|
code
| 30 |
github-code
|
6
|
71379589627
|
#ContinuousQuantumWalkSearch
from numpy import *
from matplotlib.pyplot import *
import matplotlib
from scipy import linalg
import sys
from numpy import kron
from numpy.core.umath import absolute
matplotlib.rcParams.update({'font.size': 15})
rcParams['figure.figsize'] = 11, 8
def init(N):
psi0 = ones((N,1))/ sqrt(N)
return psi0
def adjMatrix(N):
adjM = ones((N,N)) - eye(N)
return adjM
def adjMatrixList(N):
adjM=[]
for n in N:
adjM.append(ones((n,n)) - eye(n))
return adjM
def gammaList(N):
gamma = []
for n in N:
gamma.append(1/n)
return gamma
def hamiltonean(N,adjM,marked,gamma):
H = -(gamma*adjM)
H[marked][marked] = -1
return H
def hamiltoneanList(N,adjM,marked,gammaList):
H = []
for (adjMatrix,gamma) in zip(adjM,gammaList):
H.append(-(gamma*adjMatrix))
for ham in H:
ham[marked][marked] = -1
return H
def evo(H,t):
U = linalg.expm(-1j*H*t)
return U
def fin(N,evo):
psiN = init(N)
psiN = evo.dot(psiN)
return psiN
def ampToProb(N,psiN,marked):
prob = zeros((N,1))
probMarked = zeros((N,1))
for x in range(N):
prob[x] += (absolute(psiN[x])**2)
probMarked[x] += (absolute(psiN[marked])**2)
return prob,probMarked
def spaceGen(N,numOfSamples):
stepVec = []
tVec = []
for n in N:
stepVec.append((pi/2) * sqrt(n))
for step in stepVec:
tVec.append(linspace(0,step,num=numOfSamples))
return tVec
def plotSearch(N,probT,tSpace,configVec):
plotName = ""
for T,walk,config,n in zip(tSpace,probT,configVec,N):
#print(config)
plot(T,walk,color=config[0],linestyle=config[1],label="N=%s"%n)
vlines(max(T),0,1,color=config[0],linestyle=config[2])
legend()
xlabel("Number of steps")
ylabel("Probability of the marked element")
for n in N:
plotName+=str(n)
savefig(r'/home/jaime/Programming/Jaime-Santos-Dissertation/Results/Simulations/ContQuantumWalk/Search/'+str(plotName))
clf()
def valueOfGamma(N,H,gamma):
x = []
y = []
plotName=""
for h,gamma in zip(hamList2,gammaList2):
eigValues = (linalg.eig(h))
maxEig = max((absolute(eigValues[0])))
sndMaxEig = second_largest(absolute(eigValues[0]))
x.append(gamma*N[0])
y.append(maxEig-sndMaxEig)
plot(x,y)
xlabel("ฮณN")
ylabel("ฮE")
plotName=N[0]
savefig(r'/home/jaime/Programming/Jaime-Santos-Dissertation/Results/Simulations/ContQuantumWalk/Search/gamma'+str(plotName))
clf()
def runSearch(N,marked,tSpace,configVec,hamList):
prob = []
probT = []
for (n,T,ham) in zip(N,tSpace,hamList):
for t in T:
evol = evo(ham,t)
psiN = fin(n,evol)
prob += [(absolute(psiN[marked][0])**2)]
#print(prob)
# print("Sqrt(N):%s\tprob:%s\n"%(1/n,prob[0]))
probT.append(prob)
prob = []
return probT
def second_smallest(numbers):
m1, m2 = float('inf'), float('inf')
for x in numbers:
if x <= m1:
m1, m2 = x, m1
elif x < m2:
m2 = x
return m2
def second_largest(numbers):
count = 0
m1 = m2 = float('-inf')
for x in numbers:
count += 1
if x > m2:
if x >= m1:
m1, m2 = x, m1
else:
m2 = x
return m2 if count >= 2 else None
NVec= [16,32,64]
marked = 0
gammaList = gammaList(NVec)
adjList = adjMatrixList(NVec)
hamList = hamiltoneanList(NVec,adjList,marked,gammaList)
#print("Ideal Ham:%s\n\n\n"%hamList)
nSamples = 100
TVec = spaceGen(NVec,nSamples)
colors = ['r','b','g','k']
lines = ['-','-','-','-']
lines2 = ['--','--','--','--']
configVec = zip(colors,lines,lines2)
contQWalk=runSearch(NVec,marked,TVec,configVec,hamList)
plotSearch(NVec,contQWalk,TVec,configVec)
Samples = 100
NVec2 = [512]*Samples
gammaList2 = linspace(0,2/NVec2[0],Samples)
adjList2 = adjMatrixList(NVec2)
hamList2 = hamiltoneanList(NVec2,adjList2,marked,gammaList2)
valueOfGamma(NVec2,hamList2,gammaList2)
# for h in hamList2:
# print("%s\n"%h)
# print(gamma2)
# x=[]
# y=[]
# # E1 -> 2 menor VP E0 -> menor
# for h,gamma in zip(hamList2,gammaList2):
# eigValues = (linalg.eig(h))
# maxEig = max((absolute(eigValues[0])))
# # print(maxEig)
# sndMaxEig = second_largest(absolute(eigValues[0]))
# # print(sndMaxEig)
# # print("Hamiltonian:%s \n Eigenvalues:%s \t\t MaxEigen:%s\t\t SndMaxEigen:%s\n\n"%(h,eigValues[0],maxEig,sndMaxEig))
# # print(sndMaxEig - maxEig)
# # print(maxEig - sndMaxEig)
# # x.append(sndMaxEig - maxEig)
# x.append(gamma*NVec2[0])
# y.append(maxEig - sndMaxEig)
# print(gamma*NVec2[0])
# print(gamma)
# plot(x,y)
# show()
|
JaimePSantos/Dissertation-Tex-Code
|
Coding/Python Simulations/ContinuousQW/Search/runSearch.py
|
runSearch.py
|
py
| 4,819 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23231610787
|
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from nav_client.serializers import (
DeviceSerializer,
GeozoneSerializer,
NavMtIdSerializer)
from nav_client.models import (Device,
SyncDate,
GeoZone,
NavMtId)
from nav_client.filter import GeozoneFilter, NavMtIdFilter
from datetime import datetime
from rest_framework.response import Response
try:
last_sync_date = SyncDate.objects.last()
except Exception:
last_sync_date = SyncDate.objects.all()
class DeviceListView(generics.ListAPIView):
"""
ะกะฟะธัะพะบ ะผะฐัะธะฝ
"""
# queryset = Device.objects.filter(sync_date=last_sync_date)
serializer_class = DeviceSerializer
permission_classes = (IsAuthenticated,)
def list(self, request, *args, **kwargs):
if request.query_params.get("date", None):
dt = datetime.strptime(
request.query_params["date"], "%Y-%m-%d")
sync_date = SyncDate.objects.filter(datetime__year=dt.year,
datetime__month=dt.month,
datetime__day=dt.day).first()
queryset = Device.objects.filter(sync_date=sync_date)
else:
sync_date = last_sync_date.datetime
queryset = Device.objects.filter(sync_date=last_sync_date)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.paginator.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class GeozoneListView(generics.ListAPIView):
"""
ะกะฟะธัะพะบ ะฟะปะพัะฐะดะพะบ(ะผัะฝะธัะธะฟะฐะปัะฝัั
ะพะฑัะฐะทะพะฒะฐะฝะธะน)
"""
# queryset = GeoZone.objects.all(sync_date=last_sync_date)
serializer_class = GeozoneSerializer
filterset_class = GeozoneFilter
permission_classes = (IsAuthenticated,)
def list(self, request, *args, **kwargs):
if request.query_params["date"]:
dt = datetime.strptime(
request.query_params["date"], "%Y-%m-%d")
sync_date = SyncDate.objects.filter(datetime__year=dt.year,
datetime__month=dt.month,
datetime__day=dt.day).first()
queryset = GeoZone.objects.filter(sync_date=sync_date)
else:
queryset = GeoZone.objects.filter(sync_date=last_sync_date)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.paginator.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class NavMtIdListView(generics.ListAPIView):
"""
ะกะฟะธัะพะบ ะฟะปะพัะฐะดะพะบ ะธะท ะะข
"""
# queryset = NavMtId.objects.filter(sync_date=last_sync_date)
serializer_class = NavMtIdSerializer
filterset_class = NavMtIdFilter
permission_classes = (IsAuthenticated,)
def list(self, request, *args, **kwargs):
if request.query_params["date"]:
dt = datetime.strptime(
request.query_params["date"], "%Y-%m-%d")
sync_date = SyncDate.objects.filter(datetime__year=dt.year,
datetime__month=dt.month,
datetime__day=dt.day).first()
queryset = NavMtId.objects.filter(sync_date=sync_date)
else:
queryset = NavMtId.objects.filter(sync_date=last_sync_date)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.paginator.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
|
alldevic/route-log
|
nav_client/views.py
|
views.py
|
py
| 4,160 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25418145080
|
from sys import stdin
from itertools import permutations
line = list(map(int, stdin.readline().split()))
N = line[0]
M = line[1]
arr = [x for x in range(1, N + 1)]
per = list(permutations(arr, M))
for p in per:
print(" ".join(map(str, p)))
|
jaehui327/Algo
|
๋ฐฑ์ค/Silver/15649.โ
N๊ณผโ
Mโ
๏ผ1๏ผ/N๊ณผโ
Mโ
๏ผ1๏ผ.py
|
N๊ณผโ
Mโ
๏ผ1๏ผ.py
|
py
| 245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32097638349
|
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import ctypes
import logging
import os
import pathlib
import sys
from ..common.pool import PoolConfigLoader
from ..common.workflow import Workflow
logger = logging.getLogger()
class PoolLauncher(Workflow):
"""Launcher for a fuzzing pool, using docker parameters from a private repo."""
def __init__(self, command, pool_name, preprocess=False):
super().__init__()
self.command = command.copy()
self.environment = os.environ.copy()
if pool_name is not None and "/" in pool_name:
self.apply, self.pool_name = pool_name.split("/")
else:
self.pool_name = pool_name
self.apply = None
self.preprocess = preprocess
self.log_dir = pathlib.Path("/logs")
def clone(self, config):
"""Clone remote repositories according to current setup"""
super().clone(config)
# Clone fuzzing & community configuration repos
self.fuzzing_config_dir = self.git_clone(**config["fuzzing_config"])
def load_params(self):
path = self.fuzzing_config_dir / f"{self.pool_name}.yml"
assert path.exists(), f"Missing pool {self.pool_name}"
# Build tasks needed for a specific pool
pool_config = PoolConfigLoader.from_file(path)
if self.preprocess:
pool_config = pool_config.create_preprocess()
assert pool_config is not None, "preprocess given, but could not be loaded"
if self.apply is not None:
pool_config = pool_config.apply(self.apply)
if pool_config.command:
assert not self.command, "Specify command-line args XOR pool.command"
self.command = pool_config.command.copy()
self.environment.update(pool_config.macros)
def exec(self):
assert self.command
if self.in_taskcluster:
logging.info(f"Creating private logs directory '{self.log_dir}/'")
if self.log_dir.is_dir():
self.log_dir.chmod(0o777)
else:
self.log_dir.mkdir(mode=0o777)
logging.info(f"Redirecting stdout/stderr to {self.log_dir}/live.log")
sys.stdout.flush()
sys.stderr.flush()
# redirect stdout/stderr to a log file
# not sure if the assertions would print
with (self.log_dir / "live.log").open("w") as log:
result = os.dup2(log.fileno(), 1)
assert result != -1, "dup2 failed: " + os.strerror(ctypes.get_errno())
result = os.dup2(log.fileno(), 2)
assert result != -1, "dup2 failed: " + os.strerror(ctypes.get_errno())
else:
sys.stdout.flush()
sys.stderr.flush()
os.execvpe(self.command[0], self.command, self.environment)
|
MozillaSecurity/fuzzing-tc
|
fuzzing_tc/pool_launch/launcher.py
|
launcher.py
|
py
| 3,020 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21353899395
|
from typing import List, Optional
from copy import deepcopy
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
n = len(wordList)
slen = len(wordList[0])
wordList = set(wordList)
if endWord not in wordList:
return []
wd2dist = dict()
wd2dist[beginWord] = 0
def ff():
curr = beginWord
que = [curr]
while len(que):
node = que[0]
del que[0]
curr_wd = node
for i in range(slen):
for j in range(ord('a'), ord('z')+1):
new_wd = curr_wd[:i] + chr(j) + curr_wd[i+1:]
if new_wd in wordList and new_wd not in wd2dist:
wd2dist[new_wd] = wd2dist[curr_wd] + 1
if new_wd == endWord:
return
que.append(new_wd)
ff()
ans = []
t_path = [endWord]
def dfs(node):
curr = node
if curr == beginWord:
ans.append(t_path[::-1])
que = [curr]
while len(que):
node = que[0]
del que[0]
curr_wd = node
for i in range(slen):
for j in range(ord('a'), ord('z')+1):
new_wd = curr_wd[:i] + chr(j) + curr_wd[i+1:]
if new_wd in wd2dist and wd2dist[new_wd]+1 == wd2dist[curr_wd]:
t_path.append(new_wd)
dfs(new_wd)
t_path.pop()
dfs(endWord)
return ans
s = Solution()
r = s.findLadders(
# "hit", "cog", ["hot","dot","dog","lot","log","cog"]
# "hit", "cog", ["hot","dot","dog","lot","log"]
# "hot", "dog", ["hot","dog"]
"red", "tax", ["ted","tex","red","tax","tad","den","rex","pee"]
)
print(r)
# [["red","ted","tad","tax"],
# ["red","ted","tex","tax"],
# ["red","rex","tex","tax"]]
|
Alex-Beng/ojs
|
FuckLeetcode/126. ๅ่ฏๆฅ้พ II.py
|
126. ๅ่ฏๆฅ้พ II.py
|
py
| 2,135 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8337411728
|
import os
import sys, shutil
def setup_run(rundir, inflow, outflow, update1, initial1, update2, initial2):
os.chdir(rundir)
shutil.copy('avida.cfg', 'avida.cfg.bak')
shutil.copy('environment.cfg', 'environment.cfg.bak')
shutil.copy('events.cfg', 'events.cfg.bak')
# modify environment.cfg
fp = open('environment.cfg', 'a')
fp.write('''
RESOURCE Echose:inflow=%s:outflow=%s
REACTION ECHO echo process:resource=Echose:value=1.0:type=pow
''' % (inflow, outflow))
fp.close()
# modify events.cfg
fp = open('events.cfg', 'a')
fp.write('''
u %s SetResourceInflow Echose initial=%s
u %s SetResourceInflow Echose initial=%s
''' % (update1, initial1, update2, initial2))
fp.close()
###
shutil.rmtree('/mnt/run.1')
shutil.copytree('/root/run.template', '/mnt/run.1')
setup_run('/mnt/run.1', 100, .01, 500, 1, 1000, 1000)
|
ctb/beacon
|
ltee/yuanjie-create-runs.py
|
yuanjie-create-runs.py
|
py
| 867 |
python
|
en
|
code
| 2 |
github-code
|
6
|
42493961152
|
#!/usr/bin/env python3
"""Program for testing constrained sequence alignm"""
import subprocess
from pprint import pprint
import json
import argparse
import sys
import string
import random
import datetime
from constrain_align import run_blast
AMINO_ACIDS = "GAVLIPFYWSTCMNQKRHDE"
NUM_PRINT = 50
def main():
"""Main function"""
args = parse_arguments()
if (args.random is None):
seqs = args.specific
elif (args.specific is None):
seqs = args.random
elif (args.random is None and args.specific is None):
raise ValueError("Please use either -r or -s")
else:
raise ValueError("Pleae only use one of -r or -s")
weights = args.w or [random.randrange(1,20) for _ in seqs]
if weights and len(weights) != len(seqs):
raise ValueError('Number of weights and number of sequences must be of same length')
aminos = args.aminos
if aminos:
if not all(x in AMINO_ACIDS for x in aminos):
raise ValueError('One of your amino acids is funny. Please choose amino acids from "{}"'.format(AMINO_ACIDS))
outfile = open(args.out, 'w') if args.out else sys.stdout
if (args.random):
test_seqs = [make_seqs(s, amino_acids=aminos)for s in seqs]
else:
test_seqs = seqs
print('Sequences are:', file=outfile)
print(test_seqs, file=outfile)
combined_seq = ""
for x in test_seqs:
combined_seq += x
print("Combined sequence for BLAST:", file=outfile)
print(combined_seq + '\n',file=outfile)
#Only print top NUM_PRINT results
blast_items = run_blast(combined_seq)
print('The blast says:', file=outfile)
bresults = len(blast_items)
#if NUM_PRINT < bresults:
# bresults = NUM_PRINT
if len(blast_items) > 0:
[print(blast_items[x], file=outfile) for x in range(0, bresults)]
#prints all blast results
#[print(x, file=outfile) for x in blast_items]
# Only print top NUM_PRINT results
#want top 10 distinct scored
our_program = subprocess.run(['python3', 'constrain_align.py', '-s'] + [x + ':{}'.format(w) for x, w in zip(test_seqs, weights)], stdout=subprocess.PIPE)
print('\nOur program says:', file=outfile)
our_items = our_program.stdout.decode('utf-8').split('\n')
oresults = int(len(our_items)/4)
#if NUM_PRINT < oresults:
# oresults = NUM_PRINT
if len(our_items) > 0:
idx = 0
limit = oresults*4
while idx < limit:
print(our_items[idx], file=outfile)
idx += 4
#prints all our programs results
#[print(x, file=outfile) for x in our_program.stdout.decode('utf-8').split('\n')]
print('-'*80, file=outfile)
def make_seqs (seq_len, amino_acids=None):
amino_acids = amino_acids or AMINO_ACIDS
return ''.join(random.choice(amino_acids) for x in range(seq_len))
def parse_arguments():
"""Parse command line arguments to the program"""
parser = argparse.ArgumentParser()
parser.add_argument('-s', "--specific", help="Specify a specific amino acid sequence to test", nargs="*", default=None)
parser.add_argument('-r',
"--random", help="Specify the length of random sequnece you want to test", nargs='*', type=int, default=None)
parser.add_argument('-w', "-weight",
help="Specify the weight you want to use for the constrained alignment", nargs='*', type=int, default=None)
parser.add_argument('-o', '--out', default=None,
help='Specify output filename.')
parser.add_argument('-a', '--aminos', default=None, type=str,
help='Specify amino acids.')
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
|
westelizabeth/conSequences
|
test_script.py
|
test_script.py
|
py
| 3,714 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18603320645
|
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.dispatcher import FSMContext
from aiogram.utils import executor
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.contrib.fsm_storage.memory import MemoryStorage
import logging
import sqlite3
from aiogram.types import ReplyKeyboardRemove, \
ReplyKeyboardMarkup, KeyboardButton, \
InlineKeyboardMarkup, InlineKeyboardButton
from config import TOKEN
import text_or_question as text
import keaboard as kb
import time
import datetime
import asyncio
from db_admin import DateBase
from sqlit import reg_user,obnova_members_status,count_member_in_status,info_members,send_status_no_rassilka,cheack_status
datebase = DateBase('users.db')
bot = Bot(token=TOKEN)
db = Dispatcher(bot, storage=MemoryStorage())
logging.basicConfig(level=logging.INFO)
class st_reg(StatesGroup):
st_name = State()
st_fname = State()
step_q = State()
step_regbutton = State()
user_list1 = []
user_list2 = []
user_list3 = []
user_list4 = []
user_list5 = []
user_list6 = []
user_list7 = []
user_list8 = []
class Form(StatesGroup):
info_text = State()
user_delete = State()
ADMIN_ID_1 = 494588959 #Cะฐะฝั
ADMIN_ID_2 = 44520977 #ะะพะปั
ADMIN_ID_3 = 678623761 #ะะตะบะธั
ADMIN_ID_4 = 941730379 #ะะถะตะนัะพะฝ
ADMIN_ID_5 = 807911349 #ะะฐะนะทะฐั
ADMIN_ID_6 = 1045832338 #ะะพะปั 2 (ะะะะะะ_ะะะะะฃะก)
ADMIN = [ADMIN_ID_1,ADMIN_ID_2,ADMIN_ID_3,ADMIN_ID_4,ADMIN_ID_5,ADMIN_ID_6]
user_1 = '@nikolanext' #ะะะฏ ะขะะฅ, ะะขะ ะะะะขะะซะ (1)
user02349 = '@NikolaOdnous' #ะะะฏ ะขะะฅ, ะะขะ ะะ (1)
@db.message_handler(commands=['start'])
async def greetings(message: types.Message):
user_id = message.chat.id
reg_user(user_id,0) #ะะฐะฟะธััะฒะฐะตะผ ะฒ ะฑะฐะทั ัะตะปะพะฒะตะบะฐ ัะพ ััะฐัััะพะผ 0
m = await message.answer_photo(text.hi_photo_id, caption=text.hi_text, reply_markup=kb.the_first_go_button)
go_new = 'ะ'
for i in range(1,14):
go_new+='ะพ'
await bot.edit_message_caption(chat_id=user_id,message_id=m.message_id,caption=text.hi_text.format(go_new),reply_markup=kb.the_first_go_button)
await asyncio.sleep(0.43)
@db.message_handler(commands=['admin'])
async def vienw_adminka(message: types.Message):
if message.chat.id in ADMIN:
button1 = KeyboardButton('๐ะกัะฐัะธััะธะบะฐ ะฒัะตั
ะฟะพะปัะทะพะฒะฐัะตะปะตะน')
button2 = KeyboardButton('๐ฟะะฐะทะฐ ะดะฐะฝะฝัั
')
button3 = KeyboardButton('๐ซะฃะดะฐะปะตะฝะธะต ัะตะปะพะฒ')
button4 = KeyboardButton('๐ะ ะฐัััะปะบะฐ ะฑะตัะฟะปะฐัะฝะธะบะฐะผ')
button5 = KeyboardButton('๐ฐะ ะฐัััะปะบะฐ ะฟะปะฐัะฝะธะบะฐะผ')
markup3 = ReplyKeyboardMarkup(resize_keyboard=True)
markup3 = markup3.add(button1)
markup3 = markup3.add(button2,button3)
markup3 = markup3.add(button4,button5)
await bot.send_message(chat_id=message.chat.id,text='ะัะบัััะฐ ะฐะดะผะธะฝะบะฐ ๐',reply_markup=markup3)
@db.message_handler(state=Form.user_delete,content_types=['video','voice','photo','video_note','file','document','text'])
async def delete_user(message: types.Message, state: FSMContext):
try:
user_id = message.forward_from.id
send_status_no_rassilka(user_id)
except Exception as e:
print(e)
markup = types.InlineKeyboardMarkup()
bat_otmena12 = types.InlineKeyboardButton(text='ะัะนัะธ ะธะท ัะตะถะธะผะฐ ัะดะฐะปะตะฝะธั',callback_data='exit_del')
markup.add(bat_otmena12)
await message.answer('ะะพะปัะทะพะฒะฐัะตะปั ัะดะฐะปัะฝ ๐ฉธ',reply_markup=markup)
@db.message_handler(state=Form.info_text, content_types=['text', 'photo', 'video_note', 'video', 'voice'])
async def send_mailing_text(message: types.Message, state: FSMContext):
if message.text == 'ะพัะผะตะฝะฐ':
await state.finish()
await message.answer('ะัะผะตะฝะตะฝะพ')
if message.text or message.photo or message.video:
for user_id in datebase.mailing_user_id():
if message.text and message.photo:
await bot.send_photo(user_id[1], message.photo[2].file_id, caption=message.text)
elif message.text and message.video:
await bot.send_video(user_id[1], message.video.file_id, caption=message.text)
elif message.photo:
await bot.send_photo(user_id[1], message.photo[2].file_id)
elif message.video:
await bot.send_video(user_id[1], message.video.file_id)
elif message.text:
await bot.send_message(user_id[1], message.text)
elif message.video_note:
for user_id in datebase.mailing_user_id():
await bot.send_video_note(user_id[1], message.video_note.file_id)
elif message.voice:
for user_id in datebase.mailing_user_id():
await bot.send_voice(user_id[1], message.voice.file_id)
await message.answer('ะ ะฐัััะปะบะฐ ะฟัะพะธะทะฒะตะดะตะฝะฐ.')
await message.answer(f'ะ ะฐัััะปะบั ะฟะพะปััะธะปะธ {datebase.count_string2()} ะธะท {datebase.count_string()}')
await state.finish()
@db.callback_query_handler(lambda call: True, state = '*')
async def answer_push_inline_button(call, state: FSMContext):
global user_list1
user_id = call.message.chat.id # ะฎะะะ ะงะะะ
status = (cheack_status(user_id))[0]
if status == 1:
username_contact = user_1
else:
username_contact = user02349
if call.data == 'go_button':
await state.finish()
await bot.send_message(chat_id=call.message.chat.id,text='ะัะผะตะฝะตะฝะพ. ะะบะปััะตะฝ ะพะฑััะฝัะน ัะตะถะธะผโ
')
if call.data == 'go_button':
await call.message.answer_video_note(text.video_note_id, reply_markup=kb.pass_the_five_question)
elif call.data == 'five_question':
await call.message.answer_animation(text.the_first_question_gif_id, caption=text.the_first_question_text,
reply_markup=kb.first_question_buttons)
elif call.data == 'first_question':
await call.message.delete()
await call.message.answer_animation(text.the_second_question_gif_id, caption=text.the_second_question_text,
reply_markup=kb.second_question_buttons)
elif call.data == 'second_question':
await call.message.delete()
await call.message.answer_animation(text.the_third_question_gif_id, caption=text.the_third_question_text,
reply_markup=kb.third_question_buttons)
elif call.data == 'third_question':
await call.message.delete()
await call.message.answer_animation(text.the_fourth_question_gif_id, caption=text.the_fourth_question_text,
reply_markup=kb.fourth_question_buttons)
elif call.data[:15] == 'fourth_question':
if call.data == 'fourth_question1': # ะงะตะปะพะฒะตะบ ะธะท ัะตะบะปะฐะผั ะธะฝัั
obnova_members_status(call.message.chat.id, 1)
if call.data == 'fourth_question2': # ะงะตะปะพะฒะตะบ ะธะท ะขะธะบะขะพะบะฐ
obnova_members_status(call.message.chat.id, 2)
if call.data == 'fourth_question3': # ะงะตะปะพะฒะตะบ ะธะท ะ ะธะปัะฐ
obnova_members_status(call.message.chat.id, 3)
if call.data == 'fourth_question4': # ะััะณะพะต
obnova_members_status(call.message.chat.id, 4)
await call.message.delete()
await call.message.answer_animation(text.the_five_question_gif_id, caption=text.the_five_question_text,
reply_markup=kb.five_question_buttons)
elif call.data == 'five_questions':
await call.message.delete()
await call.message.answer('๐บ๐ปะ ะฒะพั ะธ ะพะฑะตัะฐะฝะฝัะน ะฑะพะฝัั ๐บ๐ป')
await call.message.answer_document(text.bonus_dock_file_id)
await call.message.answer_photo(text.finished_text_file_id, caption=text.finished_text, reply_markup=kb.finished_text_button)
elif call.data == 'go_2':
await call.message.answer_video('BAACAgIAAxkBAAMmYV1W4yEZI3tZMuEFt7TzpRXmTtMAAskPAALV8iFKl-Icg7tg87IhBA')
# await call.message.answer('ะะตัะฒะพะต ะฒะธะดะตะพ')
await asyncio.sleep(78)#60
await call.message.answer(text='ะะผัะบะฐะน ะบะฝะพะฟะบั ะฟะพะบะฐ ะฝะต ัะฑะตะถะฐะปะฐ๐', reply_markup=kb.further_button)
elif call.data == 'further':
await call.message.answer_video('BAACAgIAAxkBAAMoYV1XarMS_OoOn_Vwr3oJ9liOtPkAAogVAALalClKikrq4brnf-0hBA')
# await call.message.answer('ะัะพัะพะต ะฒะธะดะตะพ')
await asyncio.sleep(136)#136
await call.message.answer(text='ะะผัะบะฐะน ะบะฝะพะฟะบั ะฟะพะบะฐ ะฝะต ัะฑะตะถะฐะปะฐ๐', reply_markup=kb.futher2_button)
elif call.data == 'further2':
await call.message.answer_video(f'BAACAgIAAxkBAAMqYV1XrtFkA-VnlCNrx2scKWuU6pUAAp0TAAKBgcBKIdx8Ive5nrYhBA')
# await call.message.answer('ะขัะตััะต ะฒะธะดะตะพ')
await asyncio.sleep(205)#205
await call.message.answer(text.last_text.format(username_contact))
user_id = call.message.chat.id
username = call.message.from_user.username
# ะัะปะธ ัะฐัััะปะบะฐ ัะตะปะพะฒะตะบ ะฝะต ัะพััะพะธั ะฝะธ ะฒ ะพะดะฝะพะน ะธะท ะณััะฟะฟะต, ัะพ ะดะพะฑะฐะปะฒัะตะผ ะตะณะพ ะฒ ะฟะตัะฒัั
if (user_id not in user_list1) and (user_id not in user_list2) and (user_id not in user_list3) and (user_id not in user_list4) and (user_id not in user_list5) and (user_id not in user_list6) and (user_id not in user_list7) and (user_id not in user_list8):
#ะะกะะ ะงะะะ ะะะขะฃ ะะ ะ ะะะะะ ะะ ะะ ะฃะะ ะะะฏ ะะ ะะะ ะะะ, ะขะ:
user_list1.append(user_id)
try:
datebase.records_of_mailing_users(username, user_id)
except Exception as e:
print(e)
@db.message_handler(content_types=['text', 'photo', 'video_note', 'animation', 'document', 'video','file'])
async def all_message(message: types.Message, state: FSMContext):
# try:
# print(message.video.file_id)
# except:
# pass
#
# try:
# print(message.photo[2].file_id)
# except:
# pass
#
# try:
# print(message.video_note.file_id)
# except:
# pass
#
# try:
# print(message.animation.file_id)
# except:
# pass
#
# try:
# print(message.document.file_id)
# except:
# pass
#
if message.chat.id in ADMIN:
if message.text == '๐ะกัะฐัะธััะธะบะฐ ะฒัะตั
ะฟะพะปัะทะพะฒะฐัะตะปะตะน':
all = info_members()# ะัะตะณะพ ะฟะพะปัะทะพะฒะฐัะตะปะตะน
s1 = count_member_in_status(1)
s2 = count_member_in_status(2)
s3 = count_member_in_status(3)
s4 = count_member_in_status(4)
s0 = count_member_in_status(0) #ะัะต ะฝะต ะฒัะฑัะฟะฐะปะธ ะพัะฒะตั
await bot.send_message(chat_id=message.chat.id,text=f"""<b>๐ฅะัะตะณะพ ะฟะพะปัะทะพะฒะฐัะตะปะตะน: {all}</b>
1๏ธโฃะะพะปัะทะพะฒะฐัะตะปะตะน ะธะท ะธะฝััั: {s1}
2๏ธโฃะะพะปัะทะพะฒะฐัะตะปะตะน ะธะท ะขะธะบะขะพะบะฐ : {s2}
3๏ธโฃะะพะปัะทะพะฒะฐัะตะปะตะน ะธะท ะ ะธะปัะฐ: {s3}
4๏ธโฃะะพะปัะทะพะฒะฐัะตะปะตะน ะธะท ยซะััะณะพะณะพยป: {s4}
๐กะัะต ะฝะต ะฒัะฑัะฐะปะธ ะพัะฒะตั: {s0}""",parse_mode='html')
if message.text == '๐ฟะะฐะทะฐ ะดะฐะฝะฝัั
':
await message.answer_document(open("server.db", "rb"))
if message.text == '๐ซะฃะดะฐะปะตะฝะธะต ัะตะปะพะฒ':
await message.answer('๐บะะบะปััะตะฝ ัะตะถะธะผ ัะดะฐะปะตะฝะธะต ัะตะปะพะฒ \n'
'๐ะะปั ะฒัั
ะพะดะฐ, ะฝะฐะฟะธัะธ "ะพัะผะตะฝะฐ"')
await Form.user_delete.set()
if message.text == '๐ะ ะฐัััะปะบะฐ ะฑะตัะฟะปะฐัะฝะธะบะฐะผ': #ะ ะฐัััะปะบะฐ ะฟะพ ะณััะฟะฟะต 2,3,4,0
murkap = types.InlineKeyboardMarkup()
bat0 = types.InlineKeyboardButton(text='ะะขะะะะ', callback_data='otemena')
murkap.add(bat0)
await bot.send_message(message.chat.id, 'ะะตัะตัะปะธ ะผะฝะต ัะถะต ะณะพัะพะฒัะน ะฟะพัั ะธ ั ัะฐะทะพัะปั ะตะณะพ ะฒัะตะผ ัะทะตัะฐะผ',
reply_markup=murkap)
await st_reg.step_q.set()
await state.update_data(type_rassilki = 2340) # ะขะะ ัะฐััะปัะบะธ ะฟะพ 2,3,4,0 ะณััะฟะฟะต
if message.text == '๐ฐะ ะฐัััะปะบะฐ ะฟะปะฐัะฝะธะบะฐะผ': #ะ ะฐัััะปะบะฐ ะฟะพ ะณััะฟะฟะต 1
murkap = types.InlineKeyboardMarkup()
bat0 = types.InlineKeyboardButton(text='ะะขะะะะ', callback_data='otemena')
murkap.add(bat0)
await bot.send_message(message.chat.id, 'ะะตัะตัะปะธ ะผะฝะต ัะถะต ะณะพัะพะฒัะน ะฟะพัั ะธ ั ัะฐะทะพัะปั ะตะณะพ ะฒัะตะผ ัะทะตัะฐะผ',
reply_markup=murkap)
await st_reg.step_q.set()
await state.update_data(type_rassilki=1) # ะขะะ ัะฐััะปัะบะธ ะฟะพ ะฟะตัะฒะพะน ะณััะฟะฟะต
#ะะ ะะะ ะะ
async def send_to_a_certain_hour():
while True:
offset = datetime.timezone(datetime.timedelta(hours=3))
now_time = datetime.datetime.now(offset)
if now_time.hour == 16:
for user7 in user_list7:
status = (cheack_status(user7))[0]
if status == 1:
username_contact = user_1
else:
username_contact = user02349
if status != 9:
await bot.send_message(user7, text=text.dayly_text7.format(username_contact))
user_list7.remove(user7)
user_list8.append(user7)
for user6 in user_list6:
status = (cheack_status(user6))[0]
if status == 1:
username_contact = user_1
else:
username_contact = user02349
if status != 9:
await bot.send_photo(user6, photo=text.dayly_photo_id6, caption=text.dayly_text6.format(username_contact))
user_list7.append(user6)
user_list6.remove(user6)
for user5 in user_list5:
status = (cheack_status(user5))[0]
if status == 1:
username_contact = user_1
else:
username_contact = user02349
if status != 9:
await bot.send_photo(user5, photo=text.dayly_photo_id5, caption=text.dayly_text5.format(username_contact))
user_list6.append(user5)
user_list5.remove(user5)
for user4 in user_list4:
status = (cheack_status(user4))[0]
if status == 1:
username_contact = user_1
else:
username_contact = user02349
if status != 9:
await bot.send_photo(user4, photo=text.dayly_photo_id4, caption=text.dayly_text4.format(username_contact))
user_list5.append(user4)
user_list4.remove(user4)
for user3 in user_list3:
status = (cheack_status(user3))[0]
if status == 1:
username_contact = user_1
else:
username_contact = user02349
if status != 9:
await bot.send_photo(user3, photo=text.dayly_photo_id3, caption=text.dayly_text3.format(username_contact))
user_list4.append(user3)
user_list3.remove(user3)
for user2 in user_list2:
status = (cheack_status(user2))[0]
if status == 1:
username_contact = user_1
else:
username_contact = user02349
if status != 9:
await bot.send_message(user2, text=text.dayly_text2.format(username_contact))
user_list3.append(user2)
user_list2.remove(user2)
for user1 in user_list1:
status = (cheack_status(user1))[0]
if status == 1:
username_contact = user_1
else:
username_contact = user02349
if status != 9:
await bot.send_photo(user1, photo=text.dayly_photo_id1, caption=text.dayly_text1.format(username_contact))
user_list2.append(user1)
user_list1.remove(user1)
await asyncio.sleep(3600)
async def shutdown(dispatcher: Dispatcher):
await dispatcher.storage.close()
await dispatcher.storage.wait_closed()
@db.callback_query_handler(text='otemena',state='*')
async def otmena_12(call: types.callback_query, state: FSMContext):
await bot.send_message(call.message.chat.id, 'ะัะผะตะฝะตะฝะพ')
await state.finish()
try:
await bot.delete_message(call.message.chat.id,message_id=call.message.message_id)
except: pass
@db.message_handler(state=st_reg.step_q,content_types=['text','photo','video','video_note','animation','voice','sticker']) # ะัะตะดะพัะผะพัั ะฟะพััะฐ
async def redarkt_post(message: types.Message, state: FSMContext):
await st_reg.st_name.set()
murkap = types.InlineKeyboardMarkup()
bat0 = types.InlineKeyboardButton(text='ะะขะะะะ', callback_data='otemena')
bat1 = types.InlineKeyboardButton(text='ะ ะะะะกะะะขะฌ', callback_data='send_ras')
bat2 = types.InlineKeyboardButton(text='ะะพะฑะฐะฒะธัั ะบะฝะพะฟะบะธ', callback_data='add_but')
murkap.add(bat1)
murkap.add(bat2)
murkap.add(bat0)
await message.copy_to(chat_id=message.chat.id)
q = message
await state.update_data(q=q)
await bot.send_message(chat_id=message.chat.id,text='ะะพัั ัะตะนัะฐั ะฒัะณะปัะดะธั ัะฐะบ ๐',reply_markup=murkap)
# ะะะกะขะ ะะะะ ะะะะะะ
@db.callback_query_handler(text='add_but',state=st_reg.st_name) # ะะพะฑะฐะฒะปะตะฝะธะต ะบะฝะพะฟะพะบ
async def addbutton(call: types.callback_query, state: FSMContext):
await bot.delete_message(chat_id=call.message.chat.id,message_id=call.message.message_id)
await bot.send_message(call.message.chat.id,text='ะัะฟัะฐะฒะปัะน ะผะฝะต ะบะฝะพะฟะบะธ ะฟะพ ะฟัะธะฝัะธะฟั Controller Bot\n\n'
'ะะพะบะฐ ะผะพะถะฝะพ ะดะพะฑะฐะฒะธัั ัะพะปัะบะพ ะพะดะฝั ะบะฝะพะฟะบั')
await st_reg.step_regbutton.set()
@db.message_handler(state=st_reg.step_regbutton,content_types=['text']) # ะขะตะบัั ะบะฝะพะฟะพะบ ะฒ ะฝะตัะพัะผะฐัะต
async def redarkt_button(message: types.Message, state: FSMContext):
arr2 = message.text.split('-')
k = -1 # ะฃะฑะธัะฐะตะผ ะฟัะพะฑะตะปั ะธะท ะบะฝะพะฟะพะบ
for i in arr2:
k+=1
if i[0] == ' ':
if i[-1] == ' ':
arr2[k] = (i[1:-1])
else:
arr2[k] = (i[1:])
else:
if i[-1] == ' ':
arr2[0] = (i[:-1])
else:
pass
# arr2 - ะะฐััะธะฒ ั ะดะฐะฝะฝัะผะธ
try:
murkap = types.InlineKeyboardMarkup() #ะะปะฐะฒะธะฐัััะฐ ั ะบะฝะพะฟะบะฐะผะธ
bat = types.InlineKeyboardButton(text= arr2[0], url=arr2[1])
murkap.add(bat)
data = await state.get_data()
mess = data['q'] # ID ัะพะพะฑัะตะฝะธั ะดะปั ัะฐัััะปะบะธ
await bot.copy_message(chat_id=message.chat.id, from_chat_id=message.chat.id,message_id=mess.message_id,reply_markup=murkap)
await state.update_data(text_but =arr2[0]) # ะะฑะฝะพะฒะปะตะฝะธะต ะกะตัะฐ
await state.update_data(url_but=arr2[1]) # ะะฑะฝะพะฒะปะตะฝะธะต ะกะตัะฐ
murkap2 = types.InlineKeyboardMarkup() # ะะปะฐะฒะธะฐัััะฐ - ะผะตะฝั
bat0 = types.InlineKeyboardButton(text='ะะขะะะะ', callback_data='otemena')
bat1 = types.InlineKeyboardButton(text='ะ ะะะะกะะะขะฌ', callback_data='send_ras')
murkap2.add(bat1)
murkap2.add(bat0)
await bot.send_message(chat_id=message.chat.id,text='ะขะตะฟะตัั ัะฒะพะน ะฟะพัั ะฒัะณะปัะดะธั ัะฐะบโ',reply_markup=murkap2)
except:
await bot.send_message(chat_id=message.chat.id,text='ะัะธะฑะบะฐ. ะัะผะตะฝะตะฝะพ')
await state.finish()
# ะะะะะฆ ะะะกะขะ ะะะะ ะะะะะะ
@db.callback_query_handler(text='send_ras',state="*") # ะ ะฐัััะปะบะฐ
async def fname_step(call: types.callback_query, state: FSMContext):
await bot.delete_message(chat_id=call.message.chat.id,message_id=call.message.message_id)
data = await state.get_data()
mess = data['q'] # ะกะพะพะฑัะตะฝะธั ะดะปั ัะฐัััะปะบะธ
type_rass = data['type_rassilki']
murkap = types.InlineKeyboardMarkup() # ะะปะฐะฒะธะฐัััะฐ ั ะบะฝะพะฟะบะฐะผะธ
try: #ะััะฐะตะผัั ะดะพะฑะฐะฒะธัั ะบะฝะพะฟะบะธ. ะัะปะธ ะธั
ะฝะตัั ะพััะฐะฒะปัะตะผ ะบะปะฐะฒั ะฟัััะพะน
text_but = data['text_but']
url_but = data['url_but']
bat = types.InlineKeyboardButton(text=text_but, url=url_but)
murkap.add(bat)
except: pass
db = sqlite3.connect('server.db')
sql = db.cursor()
await state.finish()
if type_rass == 1:
users = sql.execute(f"SELECT id FROM user_time WHERE status_active = 1").fetchall()
else:
users = sql.execute(f"SELECT id FROM user_time WHERE status_active = 0 or status_active = 2 or status_active = 3 or status_active =4").fetchall()
bad = 0
good = 0
await bot.send_message(call.message.chat.id, f"<b>ะัะตะณะพ ะฟะพะปัะทะพะฒะฐัะตะปะตะน: <code>{len(users)}</code></b>\n\n<b>ะ ะฐััะปัะบะฐ ะฝะฐัะฐัะฐ!</b>",
parse_mode="html")
for i in users:
await asyncio.sleep(1)
try:
await mess.copy_to(i[0],reply_markup=murkap)
good += 1
except:
bad += 1
await bot.send_message(
call.message.chat.id,
"<u>ะ ะฐัััะปะบะฐ ะพะบะพะฝัะตะฝะฐ\n\n</u>"
f"<b>ะัะตะณะพ ะฟะพะปัะทะพะฒะฐัะตะปะตะน:</b> <code>{len(users)}</code>\n"
f"<b>ะัะฟัะฐะฒะปะตะฝะพ:</b> <code>{good}</code>\n"
f"<b>ะะต ัะดะฐะปะพัั ะพัะฟัะฐะฒะธัั:</b> <code>{bad}</code>",
parse_mode="html"
)
#########################################################
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.create_task(send_to_a_certain_hour())
executor.start_polling(db, on_shutdown=shutdown,skip_updates=True)
|
pytera895143242/nasar3rep
|
nasar3bot.py
|
nasar3bot.py
|
py
| 23,352 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
1363896081
|
import datetime
from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union
from attrs import define as _attrs_define
from attrs import field as _attrs_field
from dateutil.parser import isoparse
from ..types import UNSET, Unset
if TYPE_CHECKING:
from ..models.actor_v2_response_body import ActorV2ResponseBody
from ..models.incident_status_v2_response_body import IncidentStatusV2ResponseBody
from ..models.severity_v2_response_body import SeverityV2ResponseBody
T = TypeVar("T", bound="IncidentUpdateV2ResponseBody")
@_attrs_define
class IncidentUpdateV2ResponseBody:
"""
Example:
{'created_at': '2021-08-17T13:28:57.801578Z', 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'incident_id':
'01FCNDV6P870EA6S7TK1DSYDG0', 'message': "We're working on a fix, hoping to ship in the next 30 minutes",
'new_incident_status': {'category': 'triage', 'created_at': '2021-08-17T13:28:57.801578Z', 'description':
"Impact has been **fully mitigated**, and we're ready to learn from this incident.", 'id':
'01FCNDV6P870EA6S7TK1DSYD5H', 'name': 'Closed', 'rank': 4, 'updated_at': '2021-08-17T13:28:57.801578Z'},
'new_severity': {'created_at': '2021-08-17T13:28:57.801578Z', 'description': 'Issues with **low impact**.',
'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1, 'updated_at': '2021-08-17T13:28:57.801578Z'},
'updater': {'api_key': {'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'My test API key'}, 'user': {'email':
'[email protected]', 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Lisa Karlin Curtis', 'role': 'viewer',
'slack_user_id': 'U02AYNF2XJM'}}}
Attributes:
created_at (datetime.datetime): When the update was created Example: 2021-08-17T13:28:57.801578Z.
id (str): Unique identifier for this incident update Example: 01FCNDV6P870EA6S7TK1DSYDG0.
incident_id (str): The incident this update relates to Example: 01FCNDV6P870EA6S7TK1DSYDG0.
new_incident_status (IncidentStatusV2ResponseBody): Example: {'category': 'triage', 'created_at':
'2021-08-17T13:28:57.801578Z', 'description': "Impact has been **fully mitigated**, and we're ready to learn
from this incident.", 'id': '01FCNDV6P870EA6S7TK1DSYD5H', 'name': 'Closed', 'rank': 4, 'updated_at':
'2021-08-17T13:28:57.801578Z'}.
updater (ActorV2ResponseBody): Example: {'api_key': {'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'My test API
key'}, 'user': {'email': '[email protected]', 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Lisa Karlin Curtis',
'role': 'viewer', 'slack_user_id': 'U02AYNF2XJM'}}.
message (Union[Unset, str]): Message that explains the context behind the update Example: We're working on a
fix, hoping to ship in the next 30 minutes.
new_severity (Union[Unset, SeverityV2ResponseBody]): Example: {'created_at': '2021-08-17T13:28:57.801578Z',
'description': 'Issues with **low impact**.', 'id': '01FCNDV6P870EA6S7TK1DSYDG0', 'name': 'Minor', 'rank': 1,
'updated_at': '2021-08-17T13:28:57.801578Z'}.
"""
created_at: datetime.datetime
id: str
incident_id: str
new_incident_status: "IncidentStatusV2ResponseBody"
updater: "ActorV2ResponseBody"
message: Union[Unset, str] = UNSET
new_severity: Union[Unset, "SeverityV2ResponseBody"] = UNSET
additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
created_at = self.created_at.isoformat()
id = self.id
incident_id = self.incident_id
new_incident_status = self.new_incident_status.to_dict()
updater = self.updater.to_dict()
message = self.message
new_severity: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.new_severity, Unset):
new_severity = self.new_severity.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"created_at": created_at,
"id": id,
"incident_id": incident_id,
"new_incident_status": new_incident_status,
"updater": updater,
}
)
if message is not UNSET:
field_dict["message"] = message
if new_severity is not UNSET:
field_dict["new_severity"] = new_severity
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
from ..models.actor_v2_response_body import ActorV2ResponseBody
from ..models.incident_status_v2_response_body import (
IncidentStatusV2ResponseBody,
)
from ..models.severity_v2_response_body import SeverityV2ResponseBody
d = src_dict.copy()
created_at = isoparse(d.pop("created_at"))
id = d.pop("id")
incident_id = d.pop("incident_id")
new_incident_status = IncidentStatusV2ResponseBody.from_dict(d.pop("new_incident_status"))
updater = ActorV2ResponseBody.from_dict(d.pop("updater"))
message = d.pop("message", UNSET)
_new_severity = d.pop("new_severity", UNSET)
new_severity: Union[Unset, SeverityV2ResponseBody]
if isinstance(_new_severity, Unset):
new_severity = UNSET
else:
new_severity = SeverityV2ResponseBody.from_dict(_new_severity)
incident_update_v2_response_body = cls(
created_at=created_at,
id=id,
incident_id=incident_id,
new_incident_status=new_incident_status,
updater=updater,
message=message,
new_severity=new_severity,
)
incident_update_v2_response_body.additional_properties = d
return incident_update_v2_response_body
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
expobrain/python-incidentio-client
|
incident_io_client/models/incident_update_v2_response_body.py
|
incident_update_v2_response_body.py
|
py
| 6,464 |
python
|
en
|
code
| 4 |
github-code
|
6
|
16119646284
|
# ์ธ์ด : Python, (์ฑ๊ณต/์คํจ) : 1/1, ๋ฉ๋ชจ๋ฆฌ : 30840KB, ์๊ฐ : 72ms
C = int(input())
for i in range(C):
N = list(map(int, input().split()))
avg = sum(N[1:]) / N[0]
count = 0
for i in N[1:]:
if (avg < i):
count += 1
print("{:.3f}%".format((count / N[0]) * 100))
|
sujeong11/Algorithm
|
๊ธฐ์ด/1์ฐจ์ ๋ฐฐ์ด/4344_ํ๊ท ์ ๋๊ฒ ์ง.py
|
4344_ํ๊ท ์ ๋๊ฒ ์ง.py
|
py
| 315 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
10381696055
|
'''
Question 8.2
'''
import math
import numpy as np
import matplotlib.pyplot as plt
## set up for initial parameters
k1 = 100
k2 = 600
k3 = 150
## define four functions of rate of changes of E, S ES and P
def fun_E (E, S, ES, P):
return -k1*E*S + (k2+k3)*ES
def fun_S (E, S, ES, P):
return -k1*E*S + k2*ES
def fun_ES (E, S, ES, P):
return k1*E*S - (k2+k3)*ES
def fun_P (E, S, ES, P):
return k3*ES
## Solve funtions using RK4 method:
def rk4_function (E, S, ES, P, n):
E_list = [E]
S_list = [S]
ES_list = [ES]
P_list = [P]
L = [0]
h = 0.0001
t = 0
while t < n:
t = t + h
L.append(t)
## first step:
E1 = fun_E(E, S, ES, P)
S1 = fun_S(E, S, ES, P)
P1 = fun_P(E, S, ES, P)
ES1 = fun_ES(E, S, ES, P)
## second step:
e2 = E + E1*h/2
s2 = S + S1*h/2
es2 = ES + ES1*h/2
p2 = P + P1*h/2
E2 = fun_E(e2, s2, es2, p2)
S2 = fun_S(e2, s2, es2, p2)
ES2 = fun_ES(e2, s2, es2, p2)
P2 = fun_P(e2, s2, es2, p2)
## third step:
e3 = E + E2*h/2
s3 = S + S2*h/2
es3 = ES + ES2*h/2
p3 = P + P2*h/2
E3 = fun_E(e3, s3, es3, p3)
S3 = fun_S(e3, s3, es3, p3)
ES3 = fun_ES(e3, s3, es3, p3)
P3 = fun_P(e3, s3, es3, p3)
## forth step:
e4 = E + E3*h/2
s4 = S + S3*h/2
es4 = ES + ES3*h/2
p4 = P + P3*h/2
E4 = fun_E(e4, s4, es4, p4)
S4 = fun_S(e4, s4, es4, p4)
ES4 = fun_ES(e4, s4, es4, p4)
P4 = fun_P(e4, s4, es4, p4)
E = E + (E1 + 2*E2 + 2*E3 + E4)* h/6
S = S + (S1 + 2*S2 + 2*S3 + S4)* h/6
ES = ES + (ES1 + 2*ES2 + 2*ES3 + ES4)* h/6
P = P + (P1 + 2*P2 + 2*P3 + P4)* h/6
E_list.append(E)
S_list.append(S)
ES_list.append(ES)
P_list.append(P)
return (E_list, S_list, ES_list, P_list, L)
## the inital parameters are given following:
E0 = 1
S0 = 10
ES0 = 0
P0 = 0
n = 0.35
## we can get the result as following:
results = rk4_function (E0, S0, ES0, P0, n)
## Plot the result:
E_result = results[0]
S_result = results[1]
ES_result = results[2]
P_result = results[3]
T = results[4]
plt.figure()
plt.plot(T, E_result, label = "[E]")
plt.plot(T, S_result, label = "[S]")
plt.plot(T, ES_result, label = "[ES]")
plt.plot(T, P_result, label = "[P]")
plt.title("Rate of Changes of E, S, ES, and P")
plt.legend()
plt.xlabel("Time")
plt.ylabel("Concentration")
plt.show()
'''
Question 8.3
'''
## The velocity of the enzymatic reaction
V = []
k3 = 150
for i in ES_result:
v = i*k3
V.append (v)
S = S_result
## To show the result of Vmax
print(max(V))
## Plot the result
plt.figure()
plt.plot(S, V)
plt.xlabel("Concentration of S")
plt.ylabel("Velocity V")
plt.show()
|
Sguwj/NTU-Test-Question
|
Answer to Question 2/Code for Question 2.py
|
Code for Question 2.py
|
py
| 3,042 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71969383869
|
from kfp.components import InputPath, OutputPath
def SchemaGen(
statistics_path: InputPath('ExampleStatistics'),
schema_path: OutputPath('Schema'),
infer_feature_shape: bool = None, # ? False
):
"""Constructs a SchemaGen component.
Args:
statistics: A Channel of `ExampleStatistics` type (required if spec is not
passed). This should contain at least a `train` split. Other splits are
currently ignored. _required_
infer_feature_shape: Boolean value indicating
whether or not to infer the shape of features. If the feature shape is
not inferred, downstream Tensorflow Transform component using the schema
will parse input as tf.SparseTensor.
Returns:
output: Output `Schema` channel for schema result.
"""
from tfx.components.schema_gen.component import SchemaGen as component_class
#Generated code
import json
import os
import tensorflow
from google.protobuf import json_format, message
from tfx.types import Artifact, channel_utils, artifact_utils
arguments = locals().copy()
component_class_args = {}
for name, execution_parameter in component_class.SPEC_CLASS.PARAMETERS.items():
argument_value_obj = argument_value = arguments.get(name, None)
if argument_value is None:
continue
parameter_type = execution_parameter.type
if isinstance(parameter_type, type) and issubclass(parameter_type, message.Message): # Maybe FIX: execution_parameter.type can also be a tuple
argument_value_obj = parameter_type()
json_format.Parse(argument_value, argument_value_obj)
component_class_args[name] = argument_value_obj
for name, channel_parameter in component_class.SPEC_CLASS.INPUTS.items():
artifact_path = arguments[name + '_path']
if artifact_path:
artifact = channel_parameter.type()
artifact.uri = artifact_path + '/' # ?
if channel_parameter.type.PROPERTIES and 'split_names' in channel_parameter.type.PROPERTIES:
# Recovering splits
subdirs = tensorflow.io.gfile.listdir(artifact_path)
artifact.split_names = artifact_utils.encode_split_names(sorted(subdirs))
component_class_args[name] = channel_utils.as_channel([artifact])
component_class_instance = component_class(**component_class_args)
input_dict = {name: channel.get() for name, channel in component_class_instance.inputs.get_all().items()}
output_dict = {name: channel.get() for name, channel in component_class_instance.outputs.get_all().items()}
exec_properties = component_class_instance.exec_properties
# Generating paths for output artifacts
for name, artifacts in output_dict.items():
base_artifact_path = arguments[name + '_path']
# Are there still cases where output channel has multiple artifacts?
for idx, artifact in enumerate(artifacts):
subdir = str(idx + 1) if idx > 0 else ''
artifact.uri = os.path.join(base_artifact_path, subdir) # Ends with '/'
print('component instance: ' + str(component_class_instance))
#executor = component_class.EXECUTOR_SPEC.executor_class() # Same
executor = component_class_instance.executor_spec.executor_class()
executor.Do(
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties,
)
#return (output_path,)
if __name__ == '__main__':
import kfp
kfp.components.func_to_container_op(
SchemaGen,
base_image='tensorflow/tfx:0.21.4',
output_component_file='component.yaml'
)
|
kubeflow/kfp-tekton-backend
|
components/tfx/SchemaGen/component.py
|
component.py
|
py
| 3,687 |
python
|
en
|
code
| 8 |
github-code
|
6
|
24957689753
|
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
adjList = [list() for _ in range(numCourses)]
degree = [0] * numCourses
for i in range(len(prerequisites)):
adjList[prerequisites[i][1]].append(prerequisites[i][0])
degree[prerequisites[i][0]] += 1
count = 0
queue = []
for i in range(len(degree)):
if degree[i] == 0:
queue.append(i)
while queue:
d = queue.pop(0)
count += 1
for j in adjList[d]:
degree[j] -= 1
if degree[j] == 0:
queue.append(j)
return count == numCourses
|
MrBmikhael/LeetCodeProblems
|
207-course-schedule/207-course-schedule.py
|
207-course-schedule.py
|
py
| 867 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41935110854
|
import sys
input = sys.stdin.readline
def promising(row):
# ๊ฐ์ ์ด์ด๋ฉด ์ ๋๊ณ , ๋๊ฐ์ ์์ ์์ด๋ ์ ๋๋ค.
for i in range(row):
if board[row] == board[i] or row - i == abs(board[row] - board[i]):
return 0
return 1
def dfs(row):
global ans
# ๋ง์ง๋ง ํ๊น์ง ์ํํ๊ณ ์ฌ๊ธฐ๊น์ง ์ค๋ฉด ์ฐพ๊ธฐ ์๋ฃ
if row == N:
ans += 1
return
for i in range(N):
board[row] = i
if promising(row):
dfs(row + 1)
N = int(input())
ans = 0
board = [0] * N
dfs(0)
print(ans)
|
hyungJinn/pythonStudy
|
BAEKJOON/Backtracking/9663_N-Queen.py
|
9663_N-Queen.py
|
py
| 609 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
73816206588
|
def int_trapezoidal(function, a, b, m):
'''Trapezna formula za rjesavanje odredenog intervala.'''
h = (b - a)/m #korak integracija
f_a = function(a) #vrijednost funkcije u pocetnoj tocki
f_b = function(b) #vrijednost funkcije u krajnjoj tocki
rez = (f_a + f_b)/2
k = 1
while k < m:
rez += function(a + k*h) #suma
k += 1
return rez*h
def int_Simpson(function, a, b, m):
'''Simpsonova metoda rjesavanja odredenog integrala.'''
h = (b - a)/m #korak integracija
f_a = function(a) #vrijednost funkcije u pocetnoj tocki
f_b = function(b) #vrijednost funkcije u krajnjoj tocki
rez = f_a + f_b
k = 1
while k < m:
if k % 2 == 0:
rez += 2*function(a + k*h) #za parne k
else:
rez += 4*function(a + k*h) #za neparne k
k += 1
return rez*h/3
|
FabjanJozic/MMF3
|
Predavanje8_integracija/integral.py
|
integral.py
|
py
| 868 |
python
|
hr
|
code
| 0 |
github-code
|
6
|
15365101221
|
import io
# For Data Lake
from hdfs import InsecureClient
# For Data Warehouse
from pyhive import hive
import pandas as pd
df_source = pd.read_csv(r'output/news.csv')
df_source['News'] = df_source['News'].str.replace(r',', '')
# Define HDFS interface
hdfs_interface = InsecureClient('http://localhost:50070')
hdfs_interface.list('/')
# Delete old data
hdfs_interface.delete('/wqd7005/raw_news', recursive=True, skip_trash=True)
# Create hdfs directories to store data
hdfs_interface.makedirs('/wqd7005')
hdfs_interface.makedirs('/wqd7005/raw_news')
hdfs_interface.list('/wqd7005')
# Write data to raw_news directory
# text buffer
s_buf = io.StringIO()
# saving a data frame to a buffer (same as with a regular file):
df_source.to_csv(s_buf, index=False, header=False)
hdfs_interface.write('/wqd7005/raw_news/000000_0',
data=s_buf.getvalue(),
overwrite=True,
encoding = 'utf-8')
# Check if file has been written correctly
with hdfs_interface.read('/wqd7005/raw_news/000000_0', length=1024) as reader:
content = reader.read()
content
# Create Hive Cursor
host_name = "localhost"
port = 10000
conn = hive.Connection(host=host_name, port=port, auth='NOSASL')
cur = conn.cursor()
# DATE
# Create External table for raw_news
cur.execute("DROP TABLE IF EXISTS raw_news")
cur.execute("CREATE EXTERNAL TABLE IF NOT EXISTS \
raw_news (tdate STRING, \
news STRING) \
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' \
STORED AS TEXTFILE LOCATION '/wqd7005/raw_news'")
# Check if warehousing successful:
cur.execute("SELECT * FROM raw_news LIMIT 10")
check=cur.fetchall()
df_check=pd.DataFrame(data=check)
|
Danesh-WQD180067/WQD7005-Group
|
data_mining/warehousing_news.py
|
warehousing_news.py
|
py
| 1,754 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14065976550
|
from django.db import models
# Create your models here.
PRIORITY = (('danger', 'high'),('info','normal'),('success','low')) # (ไฟๅญใใใใใผใฟ, ่กจ็คบใใผใฟ)
class TodoModel(models.Model):
title = models.CharField(max_length=100)
memo = models.TextField()
priority = models.CharField(
max_length=50,
choices = PRIORITY
)
duedate = models.DateField()
def __str__(self): # objectใซๅๅใใคใใ(object1ใจใใงใฏใชใ)
return self.title # titleๅใobjectๅใซใใ
|
takuya2816/Todo
|
todoapp/models.py
|
models.py
|
py
| 540 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38967021451
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.images import ImagesPipeline
import sqlite3
import scrapy
import re
from scrapy.exceptions import DropItem
class DeskZolPipeline(object):
def process_item(self, item, spider):
conn = sqlite3.connect('bizhi.db')
c = conn.cursor()
cursor = c.execute("SELECT * from bizhi where name = \'"+item['name']+'\'')
if cursor.fetchone(): #ๅป้
raise DropItem('duplicate item : %s' % item['name'])
else:
img_urls = ';'.join(item['image_urls'])
name = item['name']
url =item['url']
insert_sql = 'INSERT INTO bizhi VALUES ( '+ \
'\'' + name + '\',' + \
'\'' +url+ '\',' + \
'\'' +img_urls+ '\')'
c.execute(insert_sql) #ๅๅ
ฅๆฐๆฎๅบ
conn.commit()
conn.close()
return item
class MyImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
yield scrapy.Request(image_url,meta={'item': item})
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no images")
item['image_paths'] = image_paths
return item
def file_path(self, request, response=None, info=None):
"""
:param request: ๆฏไธไธชๅพ็ไธ่ฝฝ็ฎก้่ฏทๆฑ
:param response:
:param info:
:param strip :ๆธ
ๆดWindows็ณป็ป็ๆไปถๅคน้ๆณๅญ็ฌฆ๏ผ้ฟๅ
ๆ ๆณๅๅปบ็ฎๅฝ
:return: ๆฏๅฅๅพ็ๅ็ฑป็ฎๅฝ
"""
item = request.meta['item']
folder = item['name']
folder_strip = strip(folder)
image_guid = request.url.split('/')[-1]
return u'full/{0}/{1}'.format(folder_strip, image_guid)
def strip(path):
"""
:param path: ้่ฆๆธ
ๆด็ๆไปถๅคนๅๅญ
:return: ๆธ
ๆดๆWindows็ณป็ป้ๆณๆไปถๅคนๅๅญ็ๅญ็ฌฆไธฒ
"""
path = re.sub(r'[๏ผ\\*|โ<>:/]', '', str(path))
return path
|
zaoyubo/desk_zol
|
desk_zol/pipelines.py
|
pipelines.py
|
py
| 2,321 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17403072234
|
#https://pyformat.info/
"Hello {}, my name is {}".format('john', 'mike')
#'Hello john, my name is mike'.
"{1}, {0}".format('world', 'Hello')
#'Hello, world'
"{greeting}, {}".format('world', greeting='Hello')
#'Hello, world'
data = {'first': 'Hodor', 'last': 'Hodor!'}
'{first} {last}'.format(**data)
#'Hodor Hodor!'
'{first} {last}'.format(first='Hodor', last='Hodor!')
#'Hodor Hodor!'
'hello there %(5)s' % {'5': 'you'}
#'hello there you'
|
vdatasci/Python
|
ETL/Format.py
|
Format.py
|
py
| 447 |
python
|
en
|
code
| 1 |
github-code
|
6
|
11615085313
|
import os
def detector(file):
with open(file,'r') as f:
filecontent=f.read()
if 'binod' in filecontent.lower():
print(f"Binod is here in file {file}")
if __name__ == '__main__':
for i in os.listdir():
if i.endswith('py'):
detector(i)
|
Coder-X27/Python-CWH
|
CWH Playlist Problems/07-A particular word detector.py
|
07-A particular word detector.py
|
py
| 282 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16592394886
|
import cv2
import numpy as np
import importlib
from ProcessImage import PreProcessImages
if __name__ == '__main__':
filename = '../assets/models/Rectangle.png'
dim = (600, 600)
img = cv2.resize(cv2.imread(filename), dim)
# prepare main algorithm
algo = PreProcessImages()
# wrapped image
wrapped = algo.fourPointTransform(img)
# rect, wrapped = algo.getCorner(img)
cv2.imshow('Original Image', img)
cv2.imshow('wrapped', wrapped)
if cv2.waitKey(0) & 0xff == 27:
cv2.destroyAllWindows()
|
DesaleF/TRDP_AR
|
src/test-algo.py
|
test-algo.py
|
py
| 544 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4685056088
|
import argparse
import configparser
import importlib
import logging
import pathlib
import time
import telegram
from telegram.ext import Updater
from telegram.utils.helpers import escape_markdown
from handlers.message_handlers import UploadNewTorrent
from helpers import format_speed
PARSER = argparse.ArgumentParser(
description='Torrent downloading progress on Telegram')
PARSER.add_argument(
'--config',
action='store',
default='tdpt.ini.',
help='Path to configuration file',
metavar='PATH',
type=pathlib.Path,
)
ARGS = PARSER.parse_args()
CONFIG = configparser.ConfigParser()
CONFIG.read(ARGS.config)
BOT = telegram.Bot(CONFIG['Telegram']['bot_token'])
class TorrentStatusMessage:
text = ("""{}
```
Progress: {:.0%}
Speed: {}
ETA: {}
Peers: {}
```""")
def __init__(self, bot, chat_id):
self.bot = bot
self.chat_id = chat_id
self.message_id = None
self.message_content = None
def create_or_update(self, torrents):
if not self.message_id:
self._create(torrents)
else:
self._update(torrents)
def delete(self):
logging.info('Deleting message')
self.bot.delete_message(chat_id=self.chat_id,
message_id=self.message_id)
self.message_id = None
def _create(self, torrents):
logging.info('Creating new download message')
self.message_content = self._get_new_text(torrents)
try:
self.message_id = self.bot.send_message(
self.chat_id,
self.message_content,
disable_notification=True,
parse_mode='markdown').message_id
except telegram.error.TimedOut:
logging.warning("Timeout when creating a new download message")
def _update(self, torrents):
logging.info('Updating download message')
new_message_content = self._get_new_text(torrents)
if new_message_content == self.message_content:
logging.info('Message the same, skipping update')
return
self.message_content = new_message_content
try:
self.bot.edit_message_text(self.message_content,
chat_id=self.chat_id,
message_id=self.message_id,
parse_mode="markdown")
except telegram.error.BadRequest as bad_request:
if bad_request.message == 'Message to edit not found':
self.message_id = None
self._create(torrents)
logging.warning(bad_request)
except telegram.error.TimedOut:
logging.warning('Timeout when editing message')
def _get_new_text(self, torrents):
msg = ""
for torrent in torrents:
name = escape_markdown(torrent.name)
msg += self.text.format(name, torrent.percent_done,
format_speed(torrent.download_rate),
torrent.eta, torrent.peers_connected)
return msg
def init_polling(client):
updater = Updater(token=CONFIG['Telegram']['bot_token'], use_context=True)
dispatcher = updater.dispatcher
dispatcher.add_handler(UploadNewTorrent(client))
updater.start_polling()
def main():
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
backend_name = CONFIG.get('General', 'backend')
backend = importlib.import_module('backends.' + backend_name.lower())
client = backend.Client(CONFIG[backend_name])
init_polling(client)
message = TorrentStatusMessage(BOT, CONFIG['Telegram']['chat_id'])
prev_torrents_count = 0
while True:
torrents = []
for torrent in client.get_torrents():
if torrent.is_downloading():
torrents.append(torrent)
torrents_count = len(torrents)
if torrents_count > prev_torrents_count:
if message.message_id:
message.delete()
if torrents_count > 0:
message.create_or_update(torrents)
else:
if message.message_id:
message.delete()
prev_torrents_count = len(torrents)
time.sleep(3)
if __name__ == '__main__':
main()
|
dolohow/tdpt
|
tdpt/__main__.py
|
__main__.py
|
py
| 4,387 |
python
|
en
|
code
| 33 |
github-code
|
6
|
17883527707
|
import json
import os
from click.testing import CliRunner
from demisto_sdk.__main__ import main
from demisto_sdk.tests.test_files.validate_integration_test_valid_types import (
DASHBOARD, GENERIC_MODULE, UNIFIED_GENERIC_MODULE)
from TestSuite.test_tools import ChangeCWD
UNIFY_CMD = "unify"
class TestGenericModuleUnifier:
def test_unify_generic_module(self, mocker, repo):
"""
Given
- A pack with a valid generic module, and a dashboard that it's id matches a dashboard in the generic module.
When
- Running unify on it.
Then
- Ensure the module was unified successfully (i.e contains the dashboard's content) and saved successfully
in the output path.
"""
pack = repo.create_pack('PackName')
pack.create_generic_module("generic-module", GENERIC_MODULE)
generic_module_path = pack.generic_modules[0].path
dashboard_copy = DASHBOARD.copy()
dashboard_copy['id'] = 'asset_dashboard'
pack.create_dashboard('dashboard_1', dashboard_copy)
saving_path = os.path.join(pack._generic_modules_path,
f'{pack.generic_modules[0].name.rstrip(".json")}_unified.json')
with ChangeCWD(pack.repo_path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [UNIFY_CMD, '-i', generic_module_path], catch_exceptions=False)
assert result.exit_code == 0
assert os.path.isfile(saving_path)
with open(saving_path) as f:
saved_generic_module = json.load(f)
assert saved_generic_module == UNIFIED_GENERIC_MODULE
|
AdouniH/demisto-sdk
|
demisto_sdk/tests/integration_tests/unify_integration_test.py
|
unify_integration_test.py
|
py
| 1,653 |
python
|
en
|
code
| null |
github-code
|
6
|
71576261627
|
import os
import numpy as np
import pandas as pd
import torch
from ...StableDiffuser import StableDiffuser
def edit_output(activation, name):
activation[:] = 0.0
return activation
def main(inpath, outpath, device):
diffuser = StableDiffuser(scheduler='LMS').to(torch.device(device)).half()
layers = set([module_name for module_name, module in diffuser.named_modules() if module_name.endswith('attn2')])
generator = torch.manual_seed(42)
os.makedirs(outpath, exist_ok=True)
prompt = "Van Gogh"
nsteps = 50
images = diffuser(
prompt,
generator=generator,
n_steps=nsteps,
)
images[0][0].save(os.path.join(outpath, f"orig.png"))
for layer in layers:
generator = torch.manual_seed(42)
images, trace_steps = diffuser(
prompt,
generator=generator,
n_steps=nsteps,
trace_args={'layers' : [layer], 'edit_output': edit_output}
)
images[0][0].save(os.path.join(outpath, f"{layer}.png"))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('inpath')
parser.add_argument('outpath')
parser.add_argument('--device', default='cuda:0')
main(**vars(parser.parse_args()))
|
JadenFiotto-Kaufman/thesis
|
thesis/experiments/cr/cr.py
|
cr.py
|
py
| 1,304 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18322570607
|
# coding: utf-8
import pprint
import six
class ExchangeRateModel(object):
field_types = {
'from': 'str',
'to': 'str',
'rate': 'float'
}
attribute_map = {
'from_str': 'from',
'to': 'to',
'rate': 'rate'
}
def __init__(self, from_str=None, to=None, rate=None): # noqa: E501
self._from_str = None
self._to = None
self._rate = None
self.discriminator = None
if from_str is not None:
self.from_str = from_str
if to is not None:
self.to = to
if rate is not None:
self.rate = rate
@property
def from_str(self):
return self._from_str
@from_str.setter
def from_str(self, from_str):
self._from_str = from_str
@property
def to(self):
return self._to
@to.setter
def to(self, to):
self._to = to
@property
def rate(self):
return self._rate
@rate.setter
def rate(self, rate):
self._rate = rate
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.field_types):
if attr == 'from':
value = getattr(self, 'from_str')
else:
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ExchangeRateModel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ExchangeRateModel):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
UniCryptoLab/UniPaymentClient.Python
|
UniPaymentClient/unipayment/models/exchange_rate_model.py
|
exchange_rate_model.py
|
py
| 2,391 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32791878864
|
#!/usr/bin/python3
from dataclasses import dataclass
import csv
import json
from operator import itemgetter
class ViaFile:
def __init__(self, filename, object_type, image_filename = "", has_header = True):
header = has_header
if len(image_filename) > 0:
filter_image = True
else:
filter_image = False
self.entries = 0
self.object_type = object_type
self.column_headings = []
self.data_rows = []
max_row_len = 0
with open(filename, 'r') as f:
csv_reader = csv.reader(f)
for row in csv_reader:
if header:
column_headings = row
header = False
continue
if filter_image:
if row[0] != image_filename:
continue
max_row_len = max(max_row_len, len(row))
self.entries += 1
self.data_rows.append(ViaEntry(row, object_type, True))
f.close()
if len(self.column_headings) == 0 and len(self.data_rows) > 0:
self.column_headings = ['Col' + str(i) for i in range(max_row_len)]
def get_headings(self):
return self.column_headings
def get_row(self, row_id):
return self.data_rows[row_id]
class ViaEntry:
def __init__(self, entry_row, object_type, simplify=False):
self.region_id = entry_row[4]
self.shape_attributes = json.loads(entry_row[5])
self.object_type = object_type
if simplify:
self.simplify_line()
def get_shape(self):
return self.shape_attributes
def simplify_line(self):
if self.shape_attributes["name"] == "polyline":
this_x = self.shape_attributes["all_points_x"]
this_y = self.shape_attributes["all_points_y"]
new_x = [min(this_x), max(this_x)]
new_y = [min(this_y), min(this_y)]
self.shape_attributes["all_points_x"] = new_x
self.shape_attributes["all_points_y"] = new_y
if __name__ == '__main__':
V = ViaFile("../VIA/via_annotations_c54_18_10d_full - text lines.csv", "C 54-18 10d - tidy.jpg")
print(V.get_headings())
sorting_list = []
for i in range(V.entries):
print(V.get_row(i).shape_attributes)
sorting_list.append([i, min(V.get_row(i).shape_attributes["all_points_y"])])
sorting_list.sort(key=itemgetter(1))
|
mark-bell-tna/QSR
|
read_via.py
|
read_via.py
|
py
| 2,662 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24643551555
|
import mcpi.minecraft as minecraft
import mcpi.block as block
mc = minecraft.Minecraft.create()
mc.postToChat("Go find the block")
from random import randint
p = mc.player.getTilePos()
x = p.x + randint(-20, 20)
y = p.y + randint(-5, 5)
z = p.z + randint(-20, 20)
mc.setBlock(x, y, z, block.GOLD_BLOCK.id)
from gpiozero import LED, Buzzer
from time import sleep
led = LED(24)
buzz = Buzzer(17)
led.on()
buzz.on()
sleep(1)
led.off()
buzz.off()
from math import sqrt
dist = 0
gameover = False
while gameover == False:
p = mc.player.getTilePos()
xd = p.x - x
yd = p.y - y
zd = p.z - z
dist_now = sqrt((xd*xd) + (yd*yd) + (zd*zd))
if dist_now > dist:
buzz.on()
else:
buzz.off()
dist = dist_now
mc.postToChat(dist)
if dist_now < 5:
led.on()
else:
led.off()
if dist_now <1.5:
gameover = True()
mc.postToChat("You got Gold")
led.off()
buzz.off()
|
fpizzardo/physicalComputing
|
findablock.py
|
findablock.py
|
py
| 960 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25261775377
|
import json
import mimetypes
import re
from dataclasses import dataclass
from datetime import datetime
from os import walk
from os.path import exists, join, splitext
from typing import Dict, Optional
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
from dataclasses_json import dataclass_json
# Extension: ^(?:[^?]*\/)(?:[^/:?]+)(\.[^/:?]+)(?:\?.*)?$
# Domain: ^(?:https?:\/\/)?(?:www\.)?([^:\/?]+)
# Slug: ^(?:https?:\/\/)?(?:www\.)?([^:?]+)
HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"
}
@dataclass_json
@dataclass
class LinkPreview:
updated: datetime
url: str
title: Optional[str] = None
domain: Optional[str] = None
description: Optional[str] = None
thumbnailFile: Optional[str] = None
thumbnailUrl: Optional[str] = None
siteName: Optional[str] = None
def process_existing(link_data: Dict[str, LinkPreview], url: str):
print(f'- Existing url "{url}"')
def process_new(link_data: Dict[str, LinkPreview], url: str):
print(f'- New url "{url}"')
domain = urlparse(url).hostname.removeprefix("www.")
# Get meta tags with the property attribute
try:
r = requests.get(url, headers=HEADERS)
except:
link_data[url] = LinkPreview(
datetime.now(),
url,
domain=domain,
)
return
soup = BeautifulSoup(r.text, "html.parser")
metas = soup.select("meta[property]")
meta = {v["property"]: v["content"] for v in metas if v.has_attr("content")}
print(meta)
# Extract metadata
# TODO: Add twitter fallbacks? May not be necessary
if "og:title" in meta:
title = meta["og:title"]
else:
title = soup.title.text if soup.title is not None else None
description = meta["og:description"] if "og:description" in meta else None
site_name = meta["og:site_name"] if "og:site_name" in meta else None
# Download image
thumbnail_file = None
thumbnail_url = None
if "og:image" in meta:
thumbnail_url = meta["og:image"]
image_url_path = "".join(urlparse(thumbnail_url)[1:3])
image_name, _ = splitext(image_url_path)
response = requests.get(thumbnail_url, headers=HEADERS)
image_data = response.content
content_type = response.headers["content-type"]
image_ext = mimetypes.guess_extension(content_type, strict=False) or ""
thumbnail_file = (
re.sub(r'[<>:"/\\|?*]', "_", image_name).strip(". ") + image_ext
)
with open(join(link_thumb_dir, thumbnail_file), "wb") as f:
f.write(image_data)
link_data[url] = LinkPreview(
datetime.now(),
url,
title=title,
domain=domain,
description=description,
thumbnailFile=thumbnail_file,
thumbnailUrl=thumbnail_url,
siteName=site_name,
)
if __name__ == "__main__":
content_dir = join("content")
link_data_dir = join("data", "links")
link_data_file = join(link_data_dir, "linkData.json")
link_thumb_dir = join("assets", "images", "links")
filetypes = ("md",)
# Restore from existing json
link_data: Dict[str, LinkPreview] = {}
if exists(link_data_file):
with open(link_data_file, "r", encoding="utf-8") as f:
data = json.load(f)
previews = [
LinkPreview.from_json(json.dumps(preview_d)) for preview_d in data.values()
]
link_data = {preview.url: preview for preview in previews}
url_regex = re.compile(
"((https?:\/\/)([A-Z]|[a-z]|[0-9]|[-._~:/?#[\]@!$&'*+,;%=])+)"
)
# Find all urls in source (content) files and process them
for root, dirs, files in walk(content_dir):
for file in files:
if not file.lower().endswith(filetypes):
continue
filepath = join(root, file)
with open(filepath) as f:
matches = url_regex.findall(f.read())
if len(matches):
print(f"Found in {filepath}")
for url, *_ in matches:
if url in link_data:
process_existing(link_data, url)
else:
process_new(link_data, url)
# Save to json
data = {
preview.url: json.loads(preview.to_json()) for preview in link_data.values()
}
with open(link_data_file, "w", encoding="utf-8") as f:
json.dump(data, f)
|
evoth/blog
|
scripts/get_link_preview_data.py
|
get_link_preview_data.py
|
py
| 4,523 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5290873792
|
from gi.repository import Gtk, GObject
from .gi_composites import GtkTemplate
from .KanbanListView import KanbanListView
@GtkTemplate(ui='/org/gnome/kanban/ui/board.ui')
class BoardView(Gtk.Box):
__gtype_name__ = 'BoardView'
__gsignals__ = {
"signal-task-move-up": (GObject.SIGNAL_ACTION, None, ()),
"signal-task-move-down": (GObject.SIGNAL_ACTION, None, ()),
"signal-task-move-top": (GObject.SIGNAL_ACTION, None, ()),
"signal-task-move-bottom": (GObject.SIGNAL_ACTION, None, ()),
"signal-task-move-left-top": (GObject.SIGNAL_ACTION, None, ()),
"signal-task-move-right-top": (GObject.SIGNAL_ACTION, None, ()),
"signal-exit": (GObject.SIGNAL_ACTION, None, ()),
"task-move-up": (GObject.SIGNAL_RUN_FIRST, None, (str,)),
"task-move-down": (GObject.SIGNAL_RUN_FIRST, None, (str,)),
"task-move-top": (GObject.SIGNAL_RUN_FIRST, None, (str,)),
"task-move-bottom": (GObject.SIGNAL_RUN_FIRST, None, (str,)),
"task-move-left-top": (GObject.SIGNAL_RUN_FIRST, None, (str,)),
"task-move-right-top": (GObject.SIGNAL_RUN_FIRST, None, (str,))
}
headerbar, \
returnbutton = GtkTemplate.Child().widgets(2)
def __init__(self, board, window):
super().__init__(
orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
self.init_template()
self.board = board
self.window = window
self.window.bind_accelerator(self, "<Alt>Up", "signal-task-move-up")
self.window.bind_accelerator(
self, "<Alt>Down", "signal-task-move-down")
self.window.bind_accelerator(
self, "<Alt><Shift>Up", "signal-task-move-top")
self.window.bind_accelerator(
self, "<Alt><Shift>Down", "signal-task-move-bottom")
self.window.bind_accelerator(
self, "<Alt>Left", "signal-task-move-left-top")
self.window.bind_accelerator(
self, "<Alt>Right", "signal-task-move-right-top")
self.add_noneditable_accelerators()
self.connect("signal-task-move-up",
lambda w: self.emit("task-move-up", self.get_focus_list_name()))
self.connect("signal-task-move-down",
lambda w: self.emit("task-move-down", self.get_focus_list_name()))
self.connect("signal-task-move-top",
lambda w: self.emit("task-move-top", self.get_focus_list_name()))
self.connect("signal-task-move-bottom",
lambda w: self.emit("task-move-bottom", self.get_focus_list_name()))
self.connect("signal-task-move-left-top",
lambda w: self.emit("task-move-left-top", self.get_focus_list_name()))
self.connect("signal-task-move-right-top",
lambda w: self.emit("task-move-right-top", self.get_focus_list_name()))
self.connect("signal-exit", self.on_back_clicked)
self.headerbar.props.title = self.window.appname + " \u2013 " + self.board.title
self.window.set_titlebar(self.headerbar)
self.returnbutton.connect("clicked", self.on_back_clicked)
self.refresh()
def add_noneditable_accelerators(self):
self.window.bind_accelerator(self, "Escape", "signal-exit")
def remove_noneditable_accelerators(self):
self.window.remove_accelerator(self, "Escape")
def get_focus_list_name(self):
return self.get_focus_child().get_tasklist().get_title()
def add_tasklist_view(self, tasklist):
l = KanbanListView(tasklist, self)
l.get_tasklist().connect("modified", lambda w: self.window.user_settings.save())
self.lists.append(l)
self.pack_start(l, True, True, 0)
def get_list(self, index):
return self.lists[index]
def get_list_index(self, name):
for i, l in enumerate(self.lists):
if l.get_tasklist().get_title() == name:
return i
return None
def on_back_clicked(self, button):
self.window.draw_boards_list()
def get_title(self):
return self.board.title
def clear(self):
for child in self.get_children():
child.destroy()
self.lists = []
def refresh(self):
self.clear()
for l in self.board.tasklists:
self.add_tasklist_view(l)
if len(self.board.tasklists) > 0:
first_list = self.get_children()[0].get_tasklist()
first_elem = first_list.get_row_at_index(0)
first_list.select_row(first_elem)
first_elem.grab_focus()
|
pawel-jakubowski/kanban
|
src/view/BoardView.py
|
BoardView.py
|
py
| 4,582 |
python
|
en
|
code
| 6 |
github-code
|
6
|
34170958465
|
#Alejandro_Reyes_InClassExercise6
#Alejandro Reyes
#3/9/15
#This program will emulate a vendng machine
#It will provide the user with a menu from which to make a selection
def main():
# Initializing variables
selection = ""
cost = 0.0
payment = 0.0
change = 0.0
menu ()
# User will make a selection
userChoice = input("Enter your selection: ")
print()
# This will prompt the user to re enter it entry is invalid
while not determineChoice (userChoice):
print ("Invalid Entry, wrong code or multiple codes entered!")
userChoice = input("Please re-enter ONE item: ")
# Cost & user selection values will be returned from the function
cost, selection = determineChoice(userChoice)
# Payment from the user will be entered
print ()
print ("You selected ", selection, ".", sep = "")
print ("Total due: $", format(cost, '.2f'), ".", sep = "")
payment = float(input("Please enter payment, only $1 bills accepted :"))
print ()
# Will prompt user to re enter payment if input is too high or low
while not calcChange (payment, cost):
print ("Invalid Entry, Do Not Enter More Than $4, no less than $1!")
payment = float(input("Please re-enter payment: "))
# Change for the user will be returned from the function
change = calcChange (payment, cost)
# End message will print if payment entered is valid
if calcChange (payment, cost):
endmes (change, selection)
#This func will print out the menu
def menu ():
print ("Please select the code for the item you wish. (One item only Please)\n")
print("-----------------MENU------------------")
print ("Snickers\tM&Ms\t\tKitKat")
print ("$0.28\t\t$1.39\t\t$1.39")
print ("A1\t\tA2\t\tA3")
print ()
print ("Lays\t\tDoritos\t\tCheetos")
print ("$0.50\t\t$0.50\t\t$1.49")
print ("B1\t\tB2\t\tB3")
print ()
print ("TicTac\t\tStarburst\tSkittles")
print ("$1.04\t\t$0.79\t\t$2.49")
print ("C1\t\tC2\t\tC3")
print ()
#This will determine the user's choice and reutrn cost and item info
def determineChoice (userChoice):
if userChoice == "A1" or userChoice == "a1":
cost = 0.89
item = "Snickers"
return cost, item
elif userChoice == "A2" or userChoice == "a2":
cost = 1.39
item = "M&Ms"
return cost, item
elif userChoice == "A3" or userChoice == "a3":
cost = 1.39
item = "KitKat"
return cost, item
elif userChoice == "B1" or userChoice == "b1":
cost = 0.50
item = "Lays"
return cost, item
elif userChoice == "B2" or userChoice == "b2":
cost = 0.50
item = "Doritos"
return cost, item
elif userChoice == "B3" or userChoice == "b3":
cost = 1.49
item = "Cheetos"
return cost, item
elif userChoice == "C1" or userChoice == "c1":
cost = 1.04
item = "TicTac"
return cost, item
elif userChoice == "C2" or userChoice == "c2":
cost = 0.79
item = "Starbust"
return cost, item
elif userChoice == "C3" or userChoice == "c3":
cost = 2.49
item = "Skittles"
return cost, item
#This will validate user input and
#calculate the amount of change and return change due to user
def calcChange (payment, cost):
if payment >= 1 and payment <= 4:
change = payment - cost
return change
# End message
def endmes (change, selection):
print ()
print ("Thank you! Your change is $", format(change, '.2f'), sep = "")
print ("Enjoy your", selection, "!")
main()
|
alejandroereyes/vending_machine_py
|
vending_machine.py
|
vending_machine.py
|
py
| 3,652 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34012996402
|
import argparse
import scipy
from scipy import ndimage
import cv2
import numpy as np
import sys
import json
import torch
from torch.autograd import Variable
import torchvision.models as models
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils import data
# from networks.unet2D import UNet
from networks.efficient import EfficientSegBackbone
from dataset.MyoPSDataset import MyoPSDataSet, MyoPSDataSetVal
from collections import OrderedDict
import os
import scipy.ndimage as nd
from math import ceil
from PIL import Image as PILImage
import nibabel as nib
from utils.ParaFlop import print_model_parm_nums, print_model_parm_flops, torch_summarize_df
import matplotlib.pyplot as plt
# from utils.encoding import DataParallelModel, DataParallelCriterion
import torch.nn as nn
from engine import Engine
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="W-like Network for 3D Medical Image Segmentation.")
# paths
parser.add_argument("--data_dir", type=str, default='dataset/')
parser.add_argument("--val_list", type=str, default='list/MyoPS2020/val5f_5.txt')
parser.add_argument("--output_path", type=str, default='output/MyoPS2020/tmp/')
parser.add_argument("--restore_from", type=str, default='snapshots/MyoPS2020/all_r1_EB3_4GPU_bs32_FP16/MyoPS2020_all_r1_EB3_4GPU_bs32_FP16_final.pth')
# training details
parser.add_argument("--input_size", type=str, default='288,288')
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--num_gpus", type=int, default=1)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument("--FP16", type=bool, default=False)
parser.add_argument("--num_epochs", type=int, default=500)
parser.add_argument("--patience", type=int, default=3)
parser.add_argument("--start_epoch", type=int, default=0)
parser.add_argument("--save_pred_every", type=int, default=10)
parser.add_argument("--learning_rate", type=float, default=1e-3)
parser.add_argument("--num_classes", type=int, default=5 + 1)
parser.add_argument("--compound_coef", type=int, default=3) # B0-B7
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--weight_std", type=bool, default=True)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--power", type=float, default=0.9)
parser.add_argument("--weight_decay", type=float, default=0.0005)
parser.add_argument("--ignore_label", type=int, default=255)
parser.add_argument("--is_training", action="store_true")
parser.add_argument("--not_restore_last", action="store_true")
parser.add_argument("--save_num_images", type=int, default=2)
# data aug.
parser.add_argument("--random_mirror", type=bool, default=True, )
parser.add_argument("--random_scale", type=bool, default=True)
parser.add_argument("--random_seed", type=int, default=1234)
# others
parser.add_argument("--gpu", type=str, default='None')
parser.add_argument("--recurrence", type=int, default=1)
parser.add_argument("--ft", type=bool, default=False)
return parser
def get_palette(num_cls):
""" Returns the color map for visualizing the segmentation mask.
Args:
num_cls: Number of classes
Returns:
The color map
"""
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
def pad_image(img, target_size):
"""Pad an image up to the target size."""
deps_missing = target_size[0] - img.shape[2]
rows_missing = target_size[1] - img.shape[3]
cols_missing = target_size[2] - img.shape[4]
padded_img = np.pad(img, ((0, 0), (0, 0),(0, deps_missing), (0, rows_missing), (0, cols_missing)), 'constant')
return padded_img
def multi_net(net_list, img):
img = torch.from_numpy(img).cuda()
padded_prediction = net_list[0](img)
padded_prediction = torch.softmax(padded_prediction, 1)
for i in range(1, len(net_list)):
padded_prediction_i = net_list[i](img)
padded_prediction_i = torch.softmax(padded_prediction_i, 1)
padded_prediction += padded_prediction_i
padded_prediction /= len(net_list)
return padded_prediction.cpu().numpy()
# for 2D
def predict_sliding(net, image, tile_size, classes): # image: 1,3,5,256,256, tile_size:256x256
image_size = image.shape
overlap = 1 / 3
strideHW = ceil(tile_size[0] * (1 - overlap))
tile_rows = int(ceil((image_size[3] - tile_size[0]) / strideHW) + 1) # strided convolution formula
tile_cols = int(ceil((image_size[4] - tile_size[1]) / strideHW) + 1)
# print("Need %i x %i x %i prediction tiles @ stride %i x %i px" % (tile_deps, tile_cols, tile_rows, strideD, strideHW))
full_probs = np.zeros((image_size[0], classes, image_size[2], image_size[3], image_size[4])).astype(np.float32) # 1x4x155x240x240
count_predictions = np.zeros((image_size[0], classes, image_size[2], image_size[3], image_size[4])).astype(np.float32)
full_probs = torch.from_numpy(full_probs).cuda()
count_predictions = torch.from_numpy(count_predictions).cuda()
tile_counter = 0
for dep in range(image_size[2]):
for row in range(tile_rows):
for col in range(tile_cols):
x1 = int(col * strideHW)
y1 = int(row * strideHW)
x2 = min(x1 + tile_size[1], image_size[4])
y2 = min(y1 + tile_size[0], image_size[3])
x1 = max(int(x2 - tile_size[1]), 0) # for portrait images the x1 underflows sometimes
y1 = max(int(y2 - tile_size[0]), 0) # for very few rows y1 underflows
d1 = dep
d2 = dep+1
img = torch.squeeze(image[:, :, d1:d2, y1:y2, x1:x2], 2)
img = img.numpy()
tile_counter += 1
prediction1 = multi_net([net], img)
prediction2 = multi_net([net], img[:, :, :, ::-1].copy())[:, :, :, ::-1]
prediction3 = multi_net([net], img[:, :, ::-1, :].copy())[:, :, ::-1, :]
prediction = (prediction1 + prediction2 + prediction3) / 3.
prediction = torch.unsqueeze(torch.from_numpy(prediction).cuda(), 2)
if isinstance(prediction, list):
shape = np.array(prediction[0].shape)
shape[0] = prediction[0].shape[0] * len(prediction)
shape = tuple(shape)
preds = torch.zeros(shape).cuda()
bs_singlegpu = prediction[0].shape[0]
for i in range(len(prediction)):
preds[i * bs_singlegpu: (i + 1) * bs_singlegpu] = prediction[i]
count_predictions[:, :, d1:d2, y1:y2, x1:x2] += 1
full_probs[:, :, d1:d2, y1:y2, x1:x2] += preds
else:
count_predictions[:, :, d1:d2, y1:y2, x1:x2] += 1
full_probs[:, :, d1:d2, y1:y2, x1:x2] += prediction
# average the predictions in the overlapping regions
full_probs /= count_predictions
return full_probs.cpu().data.numpy()
def get_confusion_matrix(gt_label, pred_label, class_num):
"""
Calcute the confusion matrix by given label and pred
:param gt_label: the ground truth label
:param pred_label: the pred label
:param class_num: the nunber of class
:return: the confusion matrix
"""
index = (gt_label * class_num + pred_label).astype('int32')
label_count = np.bincount(index)
confusion_matrix = np.zeros((class_num, class_num))
for i_label in range(class_num):
for i_pred_label in range(class_num):
cur_index = i_label * class_num + i_pred_label
if cur_index < len(label_count):
confusion_matrix[i_label, i_pred_label] = label_count[cur_index]
return confusion_matrix
def dice_score(preds, labels):
assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match"
predict = preds.view().reshape(preds.shape[0], -1)
target = labels.view().reshape(labels.shape[0], -1)
num = np.sum(np.multiply(predict, target), axis=1)
den = np.sum(predict, axis=1) + np.sum(target, axis=1) +1
dice = 2*num / den
return dice.mean()
def main():
"""Create the model and start the evaluation process."""
parser= get_arguments()
#os.environ["CUDA_VISIBLE_DEVICES"] = '0'
with Engine(custom_parser=parser) as engine:
args = parser.parse_args()
torch.cuda.set_device(args.local_rank)
h, w = map(int, args.input_size.split(','))
input_size = (h, w)
cudnn.benchmark = True
seed = args.random_seed
if engine.distributed:
seed = args.local_rank
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
model = EfficientSegBackbone(num_classes=args.num_classes, compound_coef=args.compound_coef, load_weights=True)
print_model_parm_nums(model)
# print_model_parm_flops(model)
# model = nn.DataParallel(model)
print('loading from checkpoint: {}'.format(args.restore_from))
if os.path.exists(args.restore_from):
model.load_state_dict(torch.load(args.restore_from, map_location=torch.device('cpu')))
#model.load_state_dict(torch.load(args.restore_from, map_location=torch.device(args.local_rank)))
else:
print('File not exists in the reload path: {}'.format(args.restore_from))
model.eval()
model.cuda()
testloader = data.DataLoader(
MyoPSDataSetVal(args.data_dir, args.val_list),
batch_size=1, shuffle=False, pin_memory=True)
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
dice_LV = 0
dice_RV = 0
dice_MY = 0
dice_edema = 0
dice_scars = 0
for index, batch in enumerate(testloader):
# print('%d processd'%(index))
image, label, name, affine = batch
affine = affine[0].numpy()
with torch.no_grad():
output = predict_sliding(model, image, input_size, args.num_classes)
seg_pred = np.asarray(np.argmax(output, axis=1), dtype=np.uint8)
seg_pred = np.where(seg_pred == 1, 500, seg_pred)
seg_pred = np.where(seg_pred == 2, 600, seg_pred)
seg_pred = np.where(seg_pred == 3, 200, seg_pred)
seg_pred = np.where(seg_pred == 4, 1220, seg_pred)
seg_pred = np.where(seg_pred == 5, 2221, seg_pred)
seg_pred_background = (seg_pred == 0)
seg_pred_LV = (seg_pred == 500)
seg_pred_RV = (seg_pred == 600)
seg_pred_MY = (seg_pred == 200)
seg_pred_edema = (seg_pred == 1220)
seg_pred_scars = (seg_pred == 2221)
seg_gt = np.asarray(np.argmax(label.cpu().numpy(), axis=1), dtype=np.uint8)
seg_gt = np.where(seg_gt == 1, 500, seg_gt)
seg_gt = np.where(seg_gt == 2, 600, seg_gt)
seg_gt = np.where(seg_gt == 3, 200, seg_gt)
seg_gt = np.where(seg_gt == 4, 1220, seg_gt)
seg_gt = np.where(seg_gt == 5, 2221, seg_gt)
seg_gt_background = (seg_gt == 0)
seg_gt_LV = (seg_gt == 500)
seg_gt_RV = (seg_gt == 600)
seg_gt_MY = (seg_gt == 200)
seg_gt_edema = (seg_gt == 1220)
seg_gt_scars = (seg_gt == 2221)
dice_LV_i = dice_score(seg_pred_LV, seg_gt_LV)
dice_RV_i = dice_score(seg_pred_RV, seg_gt_RV)
dice_MY_i = dice_score(seg_pred_MY, seg_gt_MY)
dice_edema_i = dice_score(seg_pred_edema, seg_gt_edema)
dice_scars_i = dice_score(seg_pred_scars, seg_gt_scars)
print('Processing {}: LV = {:.4}, RV = {:.4}, MY = {:.4}, edema = {:.4}, scars = {:.4}'.format(name, dice_LV_i, dice_RV_i, dice_MY_i, dice_edema_i, dice_scars_i))
dice_LV += dice_LV_i
dice_RV += dice_RV_i
dice_MY += dice_MY_i
dice_edema += dice_edema_i
dice_scars += dice_scars_i
seg_pred = seg_pred[0].transpose((1,2,0)) #240x240x155
seg_gt = seg_gt[0].transpose((1,2,0))
seg_pred = nib.Nifti1Image(seg_pred, affine=affine)
seg_gt = nib.Nifti1Image(seg_gt, affine=affine)
# seg_name = name[0].replace("volume", "segmentation")
seg_name = name[0]
seg_save_p = os.path.join(args.output_path+'/%s.nii.gz' % (seg_name))
gt_save_p = os.path.join(args.output_path + '/%s_gt.nii.gz' % (seg_name))
nib.save(seg_pred, seg_save_p)
nib.save(seg_gt, gt_save_p)
dice_LV_avg = dice_LV / (index + 1)
dice_RV_avg = dice_RV / (index + 1)
dice_MY_avg = dice_MY / (index + 1)
dice_edema_avg = dice_edema / (index + 1)
dice_scanrs_avg = dice_scars / (index + 1)
print('Average score: LV = {:.4}, RV = {:.4}, MY = {:.4}, edema = {:.4}, scars = {:.4}'.format(dice_LV_avg, dice_RV_avg, dice_MY_avg, dice_edema_avg, dice_scanrs_avg))
if __name__ == '__main__':
main()
|
jianpengz/EfficientSeg
|
evaluate.py
|
evaluate.py
|
py
| 14,181 |
python
|
en
|
code
| 3 |
github-code
|
6
|
18642608045
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei'] #่งฃๅณไธญๆไนฑ็
plt.figure(figsize=(6,9)) #่ฐ่ๅพๅฝขๅคงๅฐ
labels = ['XL','L','M','S'] #ๅฎไนๆ ็ญพ
sizes = [461,253,789,660] #ๆฏๅๅผ
colors = ['red','yellowgreen','cyan','yellow'] #ๆฏๅ้ข่ฒๅฎไน
explode = (0,0,0,0.1) #ๅฐๆไธๅๅๅฒๅบๆฅ๏ผๅผ่ถๅคงๅๅฒๅบ็้ด้่ถๅคง
patches,text1,text2 = plt.pie(sizes,
explode=explode,
labels=labels,
colors=colors,
autopct = '%3.2f%%', #ๆฐๅผไฟ็ๅบๅฎๅฐๆฐไฝ
startangle =90, #้ๆถ้่ตทๅง่งๅบฆ่ฎพ็ฝฎ
pctdistance = 0.6) #ๆฐๅผ่ทๅๅฟๅๅพๅๆฐ่ท็ฆป
for t in text1:
t.set_size(20)
plt.title('ๆ่ฃ
่ฎพ่ฎก',fontsize=30)
plt.axis('equal')
plt.show()
|
RiddMa/ScrapyTest
|
Visualization2/LXY/hw/b.py
|
b.py
|
py
| 879 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2361991036
|
from googleapiclient.discovery import build
import os
import pickle
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.discovery import build
from datetime import date
# To use application:
# - must add email address for testing in Google developer console
# - requires client_screts.json - downloaded from Google OAuth, this should be kept secret
# - token.pickle may need to be deleted if this script was used a long time ago
def main():
credentials = None
# load credentials from previous successful logins
if os.path.exists("token.pickle"):
print("Loading Credentials From File...")
with open("token.pickle", "rb") as token:
credentials = pickle.load(token)
# no credentials or invalid
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
print("Refreshing Access Token...")
credentials.refresh(Request()) # refresh invalid token
else:
print("Fetching New Tokens...")
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
flow = InstalledAppFlow.from_client_secrets_file(
"client_secrets.json", scopes) # loading credentials
# ask to consent with built in server host
flow.run_local_server(port=8080, promt="consent")
credentials = flow.credentials
# save the credentials for the next run
with open("token.pickle", "wb") as f:
print("Saving Credentials for Future Use...")
pickle.dump(credentials, f)
youtube = build("youtube", "v3", credentials=credentials)
request = youtube.playlists().list(part="snippet,contentDetails", mine=True,
maxResults=50) # maxResult default is 5, can be 0 to 50 (per page)
response = request.execute()
# study / troubleshoot purposes
# print("RESPONSE")
# print(response)
# writing a text file
# some characters that cannot be read will return as errors
file = open(str(date.today()) + " Videos.txt", "w", errors="ignore")
# for each playlist, retrieve title
for item in response["items"]:
file.write("******************\n")
file.write("Playlist: " + item["snippet"]["title"] + "\n")
videoCount = 1
nextToken = None # playlist can only send a max of 50 videos. Next videos will be stored in the next token for request
while True:
# find videos from playlistId
videoRequest = youtube.playlistItems().list(part="snippet,contentDetails", playlistId=item["id"], maxResults=50, pageToken=nextToken) # maxResult default is 5, can be 0 to 50 (per page)
videoResponse = videoRequest.execute()
nextToken = videoResponse.get("nextPageToken")
# study / troubleshoot purposes
# print("VIDEORESPONSE")
# print(videoResponse)
# write each video's title
for video in videoResponse["items"]:
file.write(str(videoCount) + ": " +
video["snippet"]["title"] + "\n")
videoCount += 1
if not nextToken:
break
file.write("\n")
# close files and objects
youtube.close()
file.close()
if __name__ == "__main__":
main()
|
KentV12/playlistSaver
|
main.py
|
main.py
|
py
| 3,487 |
python
|
en
|
code
| 0 |
github-code
|
6
|
551185141
|
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
def convtime(year,doy,hour,min):
d = datetime.strptime(str(doy),'%j')
month = d.month
day = d.day
dt = datetime(int(year),int(month),int(day),int(hour),int(min))
return dt
df = pd.read_csv('./Output/test1_2005_RSL_60.txt',delim_whitespace=True)
datex = []
for ind in df.index:
datex.append(convtime(df.at[ind,'Year'],df.at[ind,'DOY'],df.at[ind,'Hour'],df.at[ind,'Min']))
df['datetime'] = datex
df.set_index('datetime',inplace=True)
print(df)
plt.figure(figsize=(12,5))
plt.pcolormesh(df.index,np.arange(0.1,3.1,0.1),df.iloc[:,5:35].T,cmap='YlGnBu',vmin=0,vmax=5)
plt.xlabel('Month-day hour')
plt.ylabel('$z/z_H$ [-]')
plt.colorbar(label='U [m s$^{-1}$]')
plt.draw()
plt.figure()
colormap = plt.cm.YlGnBu
plt.gca().set_color_cycle([colormap(i) for i in np.linspace(0, 0.9, 30)])
for x in np.arange(len(df.index)/4):
plt.plot(df.iloc[x,5:35], np.arange(0.1,3.1,0.1))
plt.xlabel('U [m s$^{-1}$]')
plt.ylabel('$z/z_H$ [-]')
plt.show()
|
Urban-Meteorology-Reading/SUEWS
|
Test/BaseRun/2018a/test.py
|
test.py
|
py
| 1,072 |
python
|
en
|
code
| 6 |
github-code
|
6
|
38453486962
|
from itertools import combinations
from collections import deque
n = int(input())
ppl = list(map(int,input().split()))
totalPpl = sum(ppl)
graph = [[] for _ in range(n+1)]
for i in range(n):
graph[i+1] = list(map(int,input().split()))[1:]
def bfs(L):
picked = [0]*(n+1)
for i in L:
picked[i] = 1
visited = [0]*(n+1)
visited[L[0]] = 1
dq = deque([L[0]])
while dq:
p = dq.popleft()
for i in graph[p]:
if picked[i] == 1 and visited[i] == 0:
visited[i] = 1
dq.append(i)
for i in L:
if visited[i] == 0:
return False
return True
def getPpl(L):
s = 0
for i in L:
s += ppl[i-1]
another = totalPpl - s
return abs(another - s)
ans = int(1e8)
N = list(range(1,n+1))
for i in range(1,n):
comb = combinations(N,i)
for A in comb:
B = list(set(N) - set(A))
if bfs(A) and bfs(B):
ans = min(ans,getPpl(A))
if ans == int(1e8):
print(-1)
else:
print(ans)
|
LightPotato99/baekjoon
|
math/combination/gerymander.py
|
gerymander.py
|
py
| 1,036 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34861920007
|
from dataclasses import dataclass
from typing import Optional
import django_tables2 as tables
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.db.models import DateField, DateTimeField
from django.template import Context, Template
from django.urls import reverse
from django.utils.encoding import escape_uri_path
from django.utils.formats import date_format
from django.utils.safestring import mark_safe
from django_tables2.columns import library
from utilities.utils import get_viewname, content_type_name, content_type_identifier
__all__ = (
'ActionsColumn',
'BooleanColumn',
'ChoiceFieldColumn',
'ColorColumn',
'ColoredLabelColumn',
'LinkedCountColumn',
'ManyToManyColumn',
'TagColumn',
'TemplateColumn',
'ToggleColumn',
'UtilizationColumn',
'TruncatedTextColumn',
'ContentTypeColumn',
'ContentTypesColumn',
)
#
# Django-tables2 overrides
#
@library.register
class DateColumn(tables.DateColumn):
"""
Overrides the default implementation of DateColumn to better handle null values, returning a default value for
tables and null when exporting data. It is registered in the tables library to use this class instead of the
default, making this behavior consistent in all fields of type DateField.
"""
def value(self, value):
return value
@classmethod
def from_field(cls, field, **kwargs):
if isinstance(field, DateField):
return cls(**kwargs)
@library.register
class DateTimeColumn(tables.DateTimeColumn):
"""
Overrides the default implementation of DateTimeColumn to better handle null values, returning a default value for
tables and null when exporting data. It is registered in the tables library to use this class instead of the
default, making this behavior consistent in all fields of type DateTimeField.
"""
def value(self, value):
if value:
return date_format(value, format="SHORT_DATETIME_FORMAT")
return None
@classmethod
def from_field(cls, field, **kwargs):
if isinstance(field, DateTimeField):
return cls(**kwargs)
class ManyToManyColumn(tables.ManyToManyColumn):
"""
Overrides django-tables2's stock ManyToManyColumn to ensure that value() returns only plaintext data.
"""
def value(self, value):
items = [self.transform(item) for item in self.filter(value)]
return self.separator.join(items)
class TemplateColumn(tables.TemplateColumn):
"""
Overrides django-tables2's stock TemplateColumn class to render a placeholder symbol if the returned value
is an empty string.
"""
PLACEHOLDER = mark_safe('—')
def __init__(self, export_raw=False, **kwargs):
"""
Args:
export_raw: If true, data export returns the raw field value rather than the rendered template. (Default:
False)
"""
super().__init__(**kwargs)
self.export_raw = export_raw
def render(self, *args, **kwargs):
ret = super().render(*args, **kwargs)
if not ret.strip():
return self.PLACEHOLDER
return ret
def value(self, **kwargs):
if self.export_raw:
# Skip template rendering and export raw value
return kwargs.get('value')
ret = super().value(**kwargs)
if ret == self.PLACEHOLDER:
return ''
return ret
#
# Custom columns
#
class ToggleColumn(tables.CheckBoxColumn):
"""
Extend CheckBoxColumn to add a "toggle all" checkbox in the column header.
"""
def __init__(self, *args, **kwargs):
default = kwargs.pop('default', '')
visible = kwargs.pop('visible', False)
if 'attrs' not in kwargs:
kwargs['attrs'] = {
'td': {
'class': 'whitespace-nowrap px-3 py-4 text-gray-500',
},
'input': {
'class': 'h-4 w-4 rounded border-gray-300 text-indigo-600'
}
}
super().__init__(*args, default=default, visible=visible, **kwargs)
@property
def header(self):
return mark_safe('<input type="checkbox" class="toggle h-4 w-4 rounded border-gray-300 text-indigo-600" title="Toggle All" />')
class BooleanColumn(tables.Column):
"""
Custom implementation of BooleanColumn to render a nicely-formatted checkmark or X icon instead of a Unicode
character.
"""
def render(self, value):
if value:
rendered = '<span class="text-green-500"><i class="mdi mdi-check-bold"></i></span>'
elif value is None:
rendered = '<span class="text-gray-500">—</span>'
else:
rendered = '<span class="text-red-500"><i class="mdi mdi-close-thick"></i></span>'
return mark_safe(rendered)
def value(self, value):
return str(value)
@dataclass
class ActionsItem:
title: str
icon: str
permission: Optional[str] = None
css_class: Optional[str] = 'bg-zinc-500 hover:bg-zinc-400'
class ActionsColumn(tables.Column):
"""
A dropdown menu which provides edit, delete, and changelog links for an object. Can optionally include
additional buttons rendered from a template string.
:param actions: The ordered list of dropdown menu items to include
:param extra_buttons: A Django template string which renders additional buttons preceding the actions dropdown
:param split_actions: When True, converts the actions dropdown menu into a split button with first action as the
direct button link and icon (default: True)
"""
attrs = {'td': {'class': 'whitespace-nowrap px-3 py-4 text-right text-black dark:text-gray-200 print:hidden'}}
empty_values = ()
actions = {
'edit': ActionsItem('Edit', 'pencil', 'change', 'bg-yellow-500 hover:bg-yellow-400'),
'delete': ActionsItem('Delete', 'trash-can-outline', 'delete', 'bg-red-500 hover:bg-red-400'),
'changelog': ActionsItem('Changelog', 'history'),
}
def __init__(self, *args, actions=('edit', 'delete', 'changelog'), extra_buttons='', split_actions=True, **kwargs):
super().__init__(*args, **kwargs)
self.extra_buttons = extra_buttons
self.split_actions = split_actions
# Determine which actions to enable
self.actions = {
name: self.actions[name] for name in actions
}
def header(self):
return ''
def render(self, record, table, **kwargs):
# Skip dummy records (e.g. available VLANs) or those with no actions
if not getattr(record, 'pk', None) or (not self.actions and not self.extra_buttons):
return ''
model = table.Meta.model
request = getattr(table, 'context', {}).get('request')
url_appendix = f'?return_url={escape_uri_path(request.get_full_path())}' if request else ''
html = ''
buttons = []
user = getattr(request, 'user', AnonymousUser())
for idx, (action, attrs) in enumerate(self.actions.items()):
permission = f'{model._meta.app_label}.{attrs.permission}_{model._meta.model_name}'
if attrs.permission is None or user.has_perm(permission):
url = reverse(get_viewname(model, action), kwargs={'pk': record.pk})
buttons.append(
f'<a class="px-2 py-1 rounded-md {attrs.css_class}" href="{url}{url_appendix}" type="button">'
f'<i class="mdi mdi-{attrs.icon}"></i></a>'
)
rendered_extra_buttons = ''
# Render any extra buttons from template code
if self.extra_buttons:
template = Template(self.extra_buttons)
context = getattr(table, "context", Context())
context.update({'record': record})
rendered_extra_buttons = template.render(context) + html
html += (
f'<div class="flex flex-row space-x-2 justify-end">'
f' {rendered_extra_buttons}'
f' {"".join(buttons)}'
f'</div>'
)
return mark_safe(html)
class ChoiceFieldColumn(tables.Column):
"""
Render a model's static ChoiceField with its value from `get_FOO_display()` as a colored badge. Background color is
set by the instance's get_FOO_color() method, if defined.
"""
DEFAULT_BG_COLOR = 'bg-gray-500'
def render(self, record, bound_column, value):
if value in self.empty_values:
return self.default
# Determine the background color to use (try calling object.get_FOO_color())
try:
bg_color = getattr(record, f'get_{bound_column.name}_color')() or self.DEFAULT_BG_COLOR
except AttributeError:
bg_color = self.DEFAULT_BG_COLOR
return mark_safe(f'<span class="inline-flex items-center rounded-full px-3 py-0.5 text-sm text-white font-medium {bg_color}">{value}</span>')
def value(self, value):
return value
class ContentTypeColumn(tables.Column):
"""
Display a ContentType instance.
"""
def render(self, value):
if value is None:
return None
return content_type_name(value)
def value(self, value):
if value is None:
return None
return content_type_identifier(value)
class ContentTypesColumn(tables.ManyToManyColumn):
"""
Display a list of ContentType instances.
"""
def __init__(self, separator=None, *args, **kwargs):
# Use a line break as the default separator
if separator is None:
separator = mark_safe('<br />')
super().__init__(separator=separator, *args, **kwargs)
def transform(self, obj):
return content_type_name(obj, include_app=False)
def value(self, value):
return ','.join([
content_type_identifier(ct) for ct in self.filter(value)
])
class ColorColumn(tables.Column):
"""
Display an arbitrary color value, specified in RRGGBB format.
"""
def render(self, value):
return mark_safe(
f'<span class="color-label" style="background-color: #{value}"> </span>'
)
def value(self, value):
return f'#{value}'
class ColoredLabelColumn(tables.TemplateColumn):
"""
Render a related object as a colored label. The related object must have a `color` attribute (specifying
an RRGGBB value) and a `get_absolute_url()` method.
"""
template_code = """
{% load helpers %}
{% if value %}
<span class="badge" style="color: {{ value.color|fgcolor }}; background-color: #{{ value.color }}">
<a href="{{ value.get_absolute_url }}">{{ value }}</a>
</span>
{% else %}
—
{% endif %}
"""
def __init__(self, *args, **kwargs):
super().__init__(template_code=self.template_code, *args, **kwargs)
def value(self, value):
return str(value)
class LinkedCountColumn(tables.Column):
"""
Render a count of related objects linked to a filtered URL.
:param viewname: The view name to use for URL resolution
:param view_kwargs: Additional kwargs to pass for URL resolution (optional)
:param url_params: A dict of query parameters to append to the URL (e.g. ?foo=bar) (optional)
"""
def __init__(self, viewname, *args, view_kwargs=None, url_params=None, default=0, **kwargs):
self.viewname = viewname
self.view_kwargs = view_kwargs or {}
self.url_params = url_params
super().__init__(*args, default=default, **kwargs)
def render(self, record, value):
if value:
url = reverse(self.viewname, kwargs=self.view_kwargs)
if self.url_params:
url += '?' + '&'.join([
f'{k}={getattr(record, v) or settings.FILTERS_NULL_CHOICE_VALUE}'
for k, v in self.url_params.items()
])
return mark_safe(f'<a href="{url}">{value}</a>')
return value
def value(self, value):
return value
class TagColumn(tables.TemplateColumn):
"""
Display a list of Tags assigned to the object.
"""
template_code = """
{% load helpers %}
{% for tag in value.all %}
{% tag tag url_name %}
{% empty %}
<span class="text-gray-400">—</span>
{% endfor %}
"""
def __init__(self, url_name=None):
super().__init__(
orderable=False,
template_code=self.template_code,
extra_context={'url_name': url_name}
)
def value(self, value):
return ",".join([tag.name for tag in value.all()])
class UtilizationColumn(tables.TemplateColumn):
"""
Display a colored utilization bar graph.
"""
template_code = """{% load helpers %}{% if record.pk %}{% utilization_graph value %}{% endif %}"""
def __init__(self, *args, **kwargs):
super().__init__(template_code=self.template_code, *args, **kwargs)
def value(self, value):
return f'{value}%'
class MarkdownColumn(tables.TemplateColumn):
"""
Render a Markdown string.
"""
template_code = """
{% if value %}
{{ value|markdown }}
{% else %}
—
{% endif %}
"""
def __init__(self):
super().__init__(
template_code=self.template_code
)
def value(self, value):
return value
class TruncatedTextColumn(tables.Column):
"""A Column to limit to 100 characters and add an ellipsis"""
def render(self, value):
if len(value) > 52:
return value[0:49] + '...'
return str(value)
|
Status-Page/Status-Page
|
statuspage/statuspage/tables/columns.py
|
columns.py
|
py
| 13,718 |
python
|
en
|
code
| 45 |
github-code
|
6
|
5694311921
|
import torch
import torch.nn as nn
from Model2.BottomUp import Encoder
from Model2.TopDown import Decoder
import torchsummary
class FeaturePyramidNetwork(nn.Module):
def __init__(self, n_classes=2):
super(FeaturePyramidNetwork, self).__init__()
self.encoder = Encoder()
self.decoder = Decoder()
self.output = nn.Conv2d(512, n_classes, kernel_size=(3, 3), stride=1, padding=1)
def _up_size(self, x):
return nn.functional.upsample(x, size=(224, 224), mode='nearest')
def forward(self, x):
c2, c3, c4, c5 = self.encoder(x)
m5, m4, m3, m2 = self.decoder(c2, c3, c4, c5)
p5 = self._up_size(m5)
p4 = self._up_size(m4)
p3 = self._up_size(m3)
p2 = self._up_size(m2)
p_cat = torch.cat([p5, p4, p3, p2], dim=1)
out = self.output(p_cat)
return out
model = FeaturePyramidNetwork()
torchsummary.summary(model, (3, 224, 224), device='cpu')
|
dmdm2002/FPN
|
Model/FPN.py
|
FPN.py
|
py
| 995 |
python
|
en
|
code
| 2 |
github-code
|
6
|
18023144134
|
import subprocess
import os
def collect_logs(start_time, end_time):
# 'log show' ๋ช
๋ น์ด๋ฅผ ์ฌ์ฉํ์ฌ ๋ก๊ทธ ์์ง
command = [
'log', 'show',
'--start', start_time,
'--end', end_time
]
result = subprocess.run(command, capture_output=True, text=True)
if result.returncode == 0:
return result.stdout
else:
print(f"Error collecting logs: {result.stderr}")
return None
def main():
# 'mac_result' ํด๋ ์์ฑ ํน์ ํ์ธ
results_folder = 'mac_result'
if not os.path.exists(results_folder):
os.makedirs(results_folder)
start_time = input("Enter the start time for log collection (format: 'yyyy-mm-dd hh:mm:ss'): ")
end_time = input("Enter the end time for log collection (format: 'yyyy-mm-dd hh:mm:ss'): ")
logs = collect_logs(start_time, end_time)
if logs:
# ๊ฒฐ๊ณผ๊ฐ ์ ์ฅ ๊ฒฝ๋ก๋ฅผ 'mac_result' ํด๋ ๋ด๋ก ์ง์
result_file_path = os.path.join(results_folder, "collected_logs.txt")
with open(result_file_path, 'w') as file:
file.write(logs)
print(f"Logs collected and saved to {result_file_path}.")
if __name__ == "__main__":
main()
|
KIMJOONSIG/Reboot3
|
Mac/Eventlog.py
|
Eventlog.py
|
py
| 1,232 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6518786122
|
#!/usr/bin/env python
import datetime
from elasticsearch import Elasticsearch
from jobs.lib import Configuration
from jobs.lib import Send_Alert
local_config = {
"minutes": 30,
"index": ["workstations-*", "servers-*"],
"max_results": 1000,
"severity": "medium"
}
# Query goes here
search_query = {
"query": {
"bool": {
"must": [],
"filter": [
{
"range": {
"@timestamp": {
"format": "strict_date_optional_time",
"gte": datetime.datetime.utcnow() - datetime.timedelta(minutes=local_config["minutes"]),
"lte": datetime.datetime.utcnow()
}
}
},
{
"match_phrase": {
"winlog.channel": "Microsoft-Windows-Sysmon/Operational"
}
},
{
"match_phrase": {
"winlog.event_id": "11"
}
},
{
"match_phrase": {
"process.name": "powershell.exe"
}
}
],
}
},
}
def init():
config = Configuration.readconfig()
connection = str(config["elasticsearch"]["connection"])
es = Elasticsearch([connection], verify_certs=False, ssl_show_warn=False)
res = es.search(index=local_config["index"], body=search_query, size=local_config["max_results"])
# Iterate through results
for doc in res['hits']['hits']:
if doc['_source']['file']['path'].startswith('C:\\example\\exclude_dir\\'):
continue
Send_Alert.send("Powershell on " + doc['_source']['host']['name'] +
" wrote " + doc['_source']['file']['path'], local_config["severity"])
|
0xbcf/elasticsearch_siem
|
jobs/PowershellFileWrite.py
|
PowershellFileWrite.py
|
py
| 1,669 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9293795541
|
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a list of integers
def preorderTraversal(self, root):
if not root:
return []
stack, result = [root], []
while stack:
node = stack.pop()
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
result.append(node.val)
return result
def preorderTraversal(self, root):
if not root:
return []
result = []
result.append(root.val)
result += self.preorderTraversal(root.left)
result += self.preorderTraversal(root.right)
return result
|
rioshen/Problems
|
leetcode/python/binary_tree_preorder_traversal.py
|
binary_tree_preorder_traversal.py
|
py
| 865 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9434899492
|
"""
ๅจ่ฟ้ๆทปๅ ๅ็ง่ชๅฎไน็ๆญ่จ๏ผๆญ่จๅคฑ่ดฅๆๅบAssertionErrorๅฐฑOKใ
ๅจassertion.pyไธญไฝ ๅฏไปฅๆทปๅ ๆดๅคๆดไธฐๅฏ็ๆญ่จ๏ผๅๅบๆญ่จใๆฅๅฟๆญ่จใๆฐๆฎๅบๆญ่จ็ญ็ญ๏ผ่ฏท่ช่กๅฐ่ฃ
ใ
"""
def assertHTTPCode(response, code_list=None):
res_code = response.status_code
if not code_list:
code_list = [200]
if res_code not in code_list:
raise AssertionError('ๅๅบcodeไธๅจๅ่กจไธญ๏ผ') # ๆๅบAssertionError๏ผunittestไผ่ชๅจๅคๅซไธบ็จไพFailure๏ผไธๆฏError
|
XiaoDjan/Test_framework
|
Test_framework/utils/assertion.py
|
assertion.py
|
py
| 543 |
python
|
zh
|
code
| 1 |
github-code
|
6
|
71233413949
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 11:14:40 2019
@author: AndrewW
"""
# A block cipher transforms a fixed-sized block (usually 8 or 16 bytes) of
# plaintext into ciphertext. But we almost never want to transform a single
# block; we encrypt irregularly-sized messages.
# One way we account for irregularly-sized messages is by padding, creating
# a plaintext that is an even multiple of the blocksize. The most popular
# padding scheme is called PKCS#7.
# So: pad any block to a specific block length, by appending the number of
# bytes of padding to the end of the block. For instance,
# "YELLOW SUBMARINE"
# ... padded to 20 bytes would be:
# "YELLOW SUBMARINE\x04\x04\x04\x04"
irregular = "YELLOW SUBMARINE"
length = int(input("How long should the block be? "))
padding = length - len(irregular)
padding_hex = hex(padding)
padding_str = str(padding_hex)
if len(str(padding_hex))==3:
padding_bin = "\\" + "x0" + padding_str[2::]
else:
padding_bin = "\\" + "x" + padding_str[2::]
for j in range (0,int(padding)):
irregular += str(padding_bin)
print(irregular)
print(len(irregular))
|
drewadwade/CTFs
|
Matasano Crypto Challenge/Set 2/Implement PKCS#7 padding.py
|
Implement PKCS#7 padding.py
|
py
| 1,173 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8910492018
|
import pytest, subprocess, os
from pathlib import Path
COMMAND = ['python', 'framextract', 'tests/FT.mp4']
def capture(command):
proc = subprocess.run(command, capture_output=True)
return proc.stdout, proc.stderr, proc.returncode
def test_framextract_no_param():
_, err, exitcode = capture(COMMAND[:-1])
assert exitcode != 0
assert err.startswith(b'usage: framextract [-h] [--version]')
def test_framextract(tmp_path):
pwd = Path.cwd()
command = [COMMAND[0], pwd/COMMAND[1], pwd/COMMAND[-1]]
os.chdir(tmp_path)
out, _, exitcode = capture(command)
os.chdir(pwd)
assert exitcode == 0
assert out.endswith(b'104 frames were extracted to "FT"\n')
def test_framextract_invalid_input_video():
command = COMMAND[:-1] + ['FT.mp4']
out, _, exitcode = capture(command)
assert exitcode == 0
assert out.endswith(b'Couldn\'t read video stream from file "FT.mp4"\n')
def test_framextract_output_framerate(tmp_path):
command = COMMAND + ['-o', tmp_path/'FT', '-f', '4']
out, _, exitcode = capture(command)
assert exitcode == 0
assert out.endswith(f'2 frames were extracted to "{tmp_path}/FT"\n'
.encode('utf-8'))
def test_framextract_get_info(tmp_path):
command = COMMAND + ['--get-info-only']
out, _, exitcode = capture(command)
assert exitcode == 0
assert out.endswith(b'Frame size: 960 X 540\n')
def test_framextract_small_framerate(tmp_path):
command = COMMAND + ['-o', tmp_path/'FT', '-f', '.01']
out, _, exitcode = capture(command)
assert exitcode == 0
assert out.endswith(f'104 frames were extracted to "{tmp_path}/FT"\n'
.encode('utf-8'))
|
FirmaTechnologies/framextract
|
tests/test_cli.py
|
test_cli.py
|
py
| 1,699 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6323481356
|
from typing import *
import asyncio
import logging
from datetime import datetime, timedelta, time, date
import traceback
import lightbulb
from lightbulb.commands.base import OptionModifier as OM
import hikari
import apscheduler
from apscheduler.triggers.interval import IntervalTrigger
from humanize import naturaldelta
from core import Table, getLogger, Inu, stopwatch
from utils import Reddit, AnimeCornerAPI, AnimeCornerPaginator2
log = getLogger(__name__)
METHOD_SYNC_TIME: int = 60*60*6
SYNCING = False
TARGET_TIME = time(16,00)
TRIGGER_NAME = "Anime Corner Trigger"
bot: Inu
plugin = lightbulb.Plugin("poll loader", "loads polls from database")
@plugin.listener(hikari.StartedEvent)
async def load_tasks(event: hikari.ShardReadyEvent):
global SYNCING
if SYNCING:
return
SYNCING = True
await asyncio.sleep(3)
await method()
logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
await init_method()
await defer_trigger_to_time()
async def defer_trigger_to_time(target_time: time | None = TARGET_TIME):
target_datetime = None
if target_time is not None:
current_time = datetime.now().time()
target_datetime = datetime.combine(date.today(), target_time)
if target_datetime.time() < current_time:
target_datetime += timedelta(days=1)
wait_time = (target_datetime - datetime.now()).total_seconds()
log.info(f"Waiting for {naturaldelta(timedelta(seconds=wait_time))} to shedule the {TRIGGER_NAME}")
trigger = IntervalTrigger(seconds=METHOD_SYNC_TIME, start_date=target_datetime)
plugin.bot.scheduler.add_job(method, trigger)
async def init_method():
pass
@stopwatch(
note=f"Task: Fetching and caching Anime Corner Ranking (Reddit + Anime Corner)",
cache_threshold=timedelta(microseconds=1)
)
async def method():
try:
submission = await Reddit.get_anime_of_the_week_post()
pag = AnimeCornerPaginator2()
pag.submission = submission
pag.title = submission.title
url = pag.anime_corner_url
api = AnimeCornerAPI()
await api.fetch_ranking(url)
except Exception as e:
log.error(
f"Error while fetching Anime Corner ranking with URL `{url}`\n"
f"{traceback.format_exc()}"
)
def load(inu: Inu):
global bot
bot = inu
global METHOD_SYNC_TIME
METHOD_SYNC_TIME = inu.conf.commands.anime_corner_sync_time * 60 * 60
inu.add_plugin(plugin)
|
zp33dy/inu
|
inu/ext/tasks/anime_corner.py
|
anime_corner.py
|
py
| 2,507 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41932079330
|
import numpy as np
from scipy import misc
import pandas as pd
import os
# 1) Examen
## 2) Crear un vector de ceros de tamaรฑo 10
vector_zeros_2 = np.zeros(10)
print('2) --> ', vector_zeros_2)
## 3) Crear un vector de ceros de tamaรฑo 10 y el de la posicion 5 sea igual a 1
vector_zeros_3 = np.zeros(10)
vector_zeros_3[5] = 1
print('3) --> ', vector_zeros_3)
## 4) Cambiar el orden de un vector de 50 elementos, el de la posicion 1 es el de la 50 etc.
vector_zeros_4 = np.arange(50)
vector_zeros_4 = vector_zeros_4[::-1]
print('4) --> ', vector_zeros_4)
## 5) Crear una matriz de 3 x 3 con valores del cero al 8
matriz = np.arange(9)
matriz = matriz.reshape((3,3))
print('5) --> ', matriz)
## 6) Encontrar los indices que no sean cero en un arreglo
arreglo_indices = [1,2,0,0,4,0]
arreglo_indices = np.array(arreglo_indices)
resultado = np.where(arreglo_indices != 0)[0]
print('6) --> ', resultado)
## 7) Crear una matriz de identidad 3 x 3
matriz_identidad = np.eye(3)
print('7) --> ', matriz_identidad)
## 8) Crear una matriz 3 x 3 x 3 con valores randomicos
matriz_randomica = np.random.randint(27, size=27).reshape(3,3,3)
print('8) --> ', matriz_randomica)
## 9) Crear una matriz 10 x 10 y encontrar el mayor y el menor
matriz_diez = np.arange(100).reshape(10,10)
menor_valor = matriz_diez.min()
mayor_valor = matriz_diez.max()
print('9) menor --> ', menor_valor)
print('9) mayor --> ', mayor_valor)
## 10) Sacar los colores RGB unicos en una imagen (cuales rgb existen ej: 0, 0, 0 - 255,255,255 -> 2 colores)
imagen = misc.face()
resultado = len(np.unique(imagen, axis=0))
print('10) --> ', resultado)
## 11) ยฟComo crear una serie de una lista, diccionario o arreglo?
mylist = list('abcdefghijklmnsopqrstuvwxyz')
myarr = np.arange(26)
mydict = dict(zip(mylist, myarr))
serie = pd.Series(mylist)
serie_diccionario = pd.Series(mydict)
serie_arreglo = pd.Series(myarr)
print('11) --> Serie de lista: ', serie, '\n')
print('11) --> Serie de diccionario: ', serie_diccionario, '\n')
print('11) --> Serie de arreglo ', serie_arreglo, '\n')
## 12) ยฟComo convertir el indice de una serie en una columna de un DataFrame?
mylist = list('abcedfghijklmnopqrstuvwxyz')
myarr = np.arange(26)
mydict = dict(zip(mylist, myarr))
ser = pd.Series(mydict)
df = pd.DataFrame(ser).reset_index()
# Transformar la serie en dataframe y hacer una columna indice
df1 = pd.DataFrame(ser, index=['a'])
## 13) ยฟComo combinar varias series para hacer un DataFrame?
ser1 = pd.Series(list('abcedfghijklmnopqrstuvwxyz'))
ser2 = pd.Series(np.arange(26))
df_combinado = pd.concat([ser1, ser2], axis = 1)
df_combinado = pd.DataFrame(df_combinado)
print('13) --> ', df_combinado)
## 14) ยฟComo obtener los items que esten en una serie A y no en una serie B?
ser1 = pd.Series([1, 2, 3, 4, 5])
ser2 = pd.Series([4, 5, 6, 7, 8])
items_diferencia = np.setdiff1d(ser1, ser2)
print('14) --> ', items_diferencia)
## 15) ยฟComo obtener los items que no son comunes en una serie A y serie B?
ser1 = pd.Series([1, 2, 3, 4, 5])
ser2 = pd.Series([4, 5, 6, 7, 8])
items_conjuncion = set(ser1) ^ set(ser2)
items_conjuncion = list(items_conjuncion)
items_conjuncion = pd.Series(items_conjuncion)
print('15) --> ', items_conjuncion, '\n')
## 16) ยฟComo obtener el numero de veces que se repite un valor en una serie?
ser = pd.Series(np.take(list('abcdefgh'), np.random.randint(8, size=30)))
repeticiones, contador = np.unique(ser, return_counts=True)
repeticiones = dict(zip(repeticiones, contador))
print(repeticiones)
print(contador)
print('16) --> ', repeticiones, '\n')
## 17) ยฟComo mantener los 2 valores mas repetidos de una serie, y a los demas valores cambiarles por 0 ?
np.random.RandomState(100)
ser = pd.Series(np.random.randint(1, 5, [12]))
valores_repetidos, contador = np.unique(ser, return_counts=True)
print('serie --> ', ser)
print('contador --> ', ser)
indice = np.argsort(-contador)
print('indice --> ', indice)
valores_repetidos = valores_repetidos[indice]
valores_repetidos[2:] = 0
print('17) --> Valores repetidos', valores_repetidos)
## 18) ยฟComo transformar una serie de un arreglo de numpy a un DataFrame con un `shape` definido?
ser = pd.Series(np.random.randint(1, 10, 35))
df_shape = pd.DataFrame(ser.values.reshape(7,5))
print('18) --> ', df_shape)
## 19) ยฟObtener los valores de una serie conociendo la posicion por indice?
ser = pd.Series(list('abcdefghijklmnopqrstuvwxyz'))
pos = [0, 4, 8, 14, 20]
# a e i o u
resultado = ser[pos]
print('19) --> ', resultado)
## 20) ยฟComo anadir series vertical u horizontalmente a un DataFrame?
ser1 = pd.Series(range(5))
ser2 = pd.Series(list('abcde'))
#Verical
df1 = pd.concat([pd.DataFrame(),ser2], ignore_index = True)
#Horizontal
df2 = pd.DataFrame().append(ser1, ignore_index=True)
print('20) Vertical --> ', df1)
print('21) Horizontal --> ', df2)
## 21)ยฟObtener la media de una serie agrupada por otra serie?
#`groupby` tambien esta disponible en series.
frutas = pd.Series(np.random.choice(['manzana', 'banana', 'zanahoria'], 10))
pesos = pd.Series(np.linspace(1, 10, 10))
print(pesos.tolist())
print(frutas.tolist())
#> [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
#> ['banana', 'carrot', 'apple', 'carrot', 'carrot', 'apple', 'banana', 'carrot', 'apple', 'carrot']
# Los valores van a cambiar por ser random
# apple 6.0
# banana 4.0
# carrot 5.8
# dtype: float64
media_agrupada = pd.concat([frutas, pesos], axis = 1)
media_agrupada = media_agrupada.groupby(media_agrupada[0], as_index=False)[1].mean()
print('21) --> \n', media_agrupada)
## 22)ยฟComo importar solo columnas especificas de un archivo csv?
#https://raw.githubusercontent.com/selva86/datasets/master/BostonHousing.csv.
path = "./archivo.csv"
data_csv = pd.read_csv(
path,
nrows = 10)
columnas = ['crim', 'zn', 'indus']
data_tres_columnas = pd.read_csv(path, nrows=10, usecols=columnas)
print('22) --> ', data_tres_columnas)
|
2020-A-JS-GR1/py-velasquez-revelo-jefferson-david
|
examen/examen.py
|
examen.py
|
py
| 5,945 |
python
|
es
|
code
| 0 |
github-code
|
6
|
70056771389
|
# week 8, binary search
# today's plan
# 1: binary_search
# 2: ex 6 (solution)
# 3: quiz 3 (solution)
def binary_search(L, s):
'''(list of int, int) -> bool
Return True iff s is an element of L, otherwise False
REQ: L must be sorted in increasing order
>>> binary_search([-5, 3, 4, 5, 7], 4)
True
>>> binary_search([], 3)
False
>>> binary_search([1, 2, 4, 5], 5)
True
>>> binary_search([4, 10, 11], 5)
False
'''
# BASE CASE: If L is empty, it's not in the list
if L == []:
result = False
# RECURSIVE DECOMP: Pick the middle element, if we found
# what we're looking for, great. If not, then we've at
# least cut the list in half
else:
# get the middle element of the list
midpoint = len(L) // 2
median = L[midpoint]
# if we found it, then we can stop
if s == median:
result = True
# if we didn't find it, then see whether we need to continue searching
# in the left side of the list, or the right
elif s < median:
result = binary_search(L[:midpoint], s)
else:
result = binary_search(L[midpoint+1:], s)
return result
# restore internal comments
def binary_search2(L, s):
'''(list of int, int) -> int
Return the index of s in the list L, or -1 if s is not in L
REQ: L must be sorted in increasing order
'''
#
if L == []:
result = -1
else:
#
midpoint = len(L) // 2
median = L[midpoint]
if s == median:
#
result = midpoint
elif s < median:
#
result = binary_search2(L[:midpoint], s)
else:
#
result = binary_search2(L[midpoint+1:], s)
#
if result != -1:
result += midpoint + 1
return result
def binary_search2(L, s):
'''(list of int, int) -> int
Return the index of s in the list L, or -1 if s is not in L
REQ: L must be sorted in increasing order
'''
# BASE CASE: If L is empty, return -1
if L == []:
result = -1
else:
# GENERAL CASE
# Get the middle value of L, if it's larger than s, then s must be in
# the first half of the list, so call binary_search on the first half,
# otherwise, call it on the second half of L, if it's equal to s, then
# we've found s and we can stop
midpoint = len(L) // 2
median = L[midpoint]
if s == median:
# found it. return its index
result = midpoint
elif s < median:
# if s is in L, it must be in the first half of the list
# so just perform a binary search on the first half of the list
# and return that search's result
result = binary_search2(L[:midpoint], s)
else:
# if s is in L, it must be in the latter half of the list
# so perform a binary search on the latter half of the list,
# however, this time, if we do get a result, we have to return
# its offset from our current midpoint
result = binary_search2(L[midpoint+1:], s)
# if we didn't find it, just pass on the -1, but if we did
# we have to add the index in the right list to the index of
# our middle element to get its index in our list
if result != -1:
result += midpoint + 1
return result
def binary_search3(L, s):
'''(list of int, int) -> int
Return the index of s in the list L, or -1 if s is not in L
REQ: L must be sorted in increasing order
'''
if L == []:
result = -1
else:
result = binary_search3_helper(L, s, 0, len(L)-1)
return result
def binary_search3_helper(L, s, start, end):
# Base Case: One item list, start=0, end=0
# If that element is the one we're looking for return its index otherwise return -1
if start == end:
if s == L[start]:
result = start
else:
result = -1
# recursive decomposition:
else:
midpoint = (start + end) // 2
median = L[midpoint]
# 3 cases, the element in the middle is the one we're looking for
if s == median:
result = midpoint
# the middle element is greater than the value for which we're
# searching, so look to the left
elif s < median:
result = binary_search3_helper(L, s, start, midpoint)
# the middle element is smaller than the value for which were
# searching, so look to the right
else:
result = binary_search3_helper(L, s, midpoint+1, end)
return result
def binary_search4(L, s):
'''(list of float) -> int
Return the index of s in the list L, or -1 if s is not in L
REQ: L must be a sorted list
'''
start = 0
end = len(L) - 1
found = False
result = -1
while not found and start <= end:
midpoint = (start + end) // 2
median = L[midpoint]
# the element in the middle is the one we're looking for
if s == median:
found = True
result = midpoint
else:
# the middle element is greater than the value for which we're
# searching, so look to the left
if s < median:
end = midpoint - 1
# the middle element is smaller than the value for which were
# searching, so look to the right
else:
start = midpoint + 1
return result
if(__name__ == "__main__"):
L = [1, 2, 4, 6, 8, 10, 12, 13, 15, 17, 19, 20, 25]
print(binary_search([], 3))
print(binary_search(L, 3))
print(binary_search(L, 15))
print(binary_search2([], 3))
print(binary_search2(L, 3))
print(binary_search2(L, 15))
print(binary_search3([], 3))
print(binary_search3(L, 3))
print(binary_search3(L, 15))
print(binary_search4([], 3))
print(binary_search4(L, 3))
print(binary_search4(L, 15))
|
BoZhaoUT/Teaching
|
Winter_2016_CSCA48_Intro_to_Computer_Science_II/Week_8_Binary_Search/week_8.py
|
week_8.py
|
py
| 6,132 |
python
|
en
|
code
| 2 |
github-code
|
6
|
20233659704
|
# Importamos las dependencias del cรกlculo de las similitudes
from metric.PearsonCorrelation import PearsonCorrelation
from metric.CosineDistance import CosineDistance
from metric.EuclideanDistance import EuclideanDistance
# Importamos las dependencias del cรกlculo de la predicciรณn
from prediction.SimplePrediction import SimplePrediction
from prediction.DifferenceAverage import DifferenceAverage
# Importamos la clase recomendadora
from Recommender import Recommender
# Importamos
import argparse
import sys
import time
if len(sys.argv) != 1:
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--filename", required=True, help="entering the name of the file you want to analyze")
parser.add_argument("-m", "--metric", required=True, help="introduction of the type of chosen metric --> [0] Pearson Correlation [1] Euclidean Distance [2] Cosine Distance")
parser.add_argument("-p", "--prediction", required=True, help="introduction of the type of chosen prediction --> [0] Simple Prediction [1] Difference Average")
parser.add_argument("-n", "--num_neighbors", required=True, help="introduction of the number of neighbors for the analysis of the recommendation")
args = parser.parse_args()
metric = int(args.metric)
prediction = int(args.prediction)
num_neighbors = int(args.num_neighbors)
# Leemos el fichero de entrada de datos
file_name = f"./data/{str(args.filename)}"
# Controlamos el caso en el que los vecinos son menores que 1
if (num_neighbors < 1):
print("The value of the number of neighbors must not be less than 1")
sys.exit(1)
# Controlamos el caso en el que se introduce incorrectamente el รญndice de la mรฉtrica
if (metric < 0 or metric > 2):
print("\nThe values of the metric must be between 0 and 2")
print("The possible values are : \n")
print("[0] Pearson Correlation")
print("[1] Euclidean Distance")
print("[2] Cosine Distance\n")
sys.exit(1)
# Controlamos el caso en el que se introduce incorrectamente el รญndice de la predicciรณn
if (prediction < 0 or prediction > 1):
print("\nThe values of the prediction must be between 0 and 1")
print("The possible values are : \n")
print("[0] Simple Prediction")
print("[1] Difference Avarage")
sys.exit(1)
else:
metric = -1
prediction = -1
num_neighbors = -1
dimension_examples = [(10,25), (100,1000), (25,100), (5,10), (50,250)]
for index, dimension in enumerate(dimension_examples):
print(f"[{index}] - {dimension}")
option_dimension_example = -1
while not (-1 < option_dimension_example < 5):
option_dimension_example = int(input("Enter the dimension of the matrix you want to run\n>> "))
option_dimension_example = dimension_examples[option_dimension_example]
number_dataset_dimension = 0
while not (0 < number_dataset_dimension < 11):
number_dataset_dimension = int(input("Enter the number of dataset you want to use [1-10]\n>> "))
file_name = f"./data/utility-matrix-{option_dimension_example[0]}-{option_dimension_example[1]}-{number_dataset_dimension}.txt"
while num_neighbors < 1:
num_neighbors = int(input("Indicates a number of neighbors, at least one\n>> "))
while metric < 0 or metric > 2:
print("Indicates the metric you want to use")
print("The possible values are : \n")
print("[0] Pearson Correlation")
print("[1] Euclidean Distance")
print("[2] Cosine Distance")
metric = int(input(">> "))
while prediction < 0 or prediction > 1:
print("Indicates the type of prediction you want to use")
print("The possible values are : \n")
print("[0] Simple Prediction")
print("[1] Difference Avarage")
prediction = int(input(">> "))
# Elecciรณn de la mรฉtrica
if metric == 0:
metric_function = PearsonCorrelation.similarity
elif metric == 1:
metric_function = EuclideanDistance.similarity
elif metric == 2:
metric_function = CosineDistance.similarity
# Elecciรณn de la predicciรณn
if prediction == 0:
prediction_function = SimplePrediction.predict
elif prediction == 1:
prediction_function = DifferenceAverage.predict
# Guardamos el tiempo de inicio de ejecuciรณn
start_time = time.time()
recommender = Recommender(file_name, num_neighbors, metric_function)
result = recommender.run(prediction_function)
print("\n\nMatrices resultantes #########################################################\n")
recommender.print_matrix(recommender.matrix)
recommender.print_matrix(recommender.unnormalized_matrix)
print()
# guardamos el tiempo de finalizaciรณn de nuestra ejecuciรณn
end_time = time.time()
print(f"EL TIEMPO DE CALCULO TOTAL HA SIDO\nEN SEGUNDOS --> {end_time - start_time}\nEN MINUTOS --> {(end_time - start_time) / 60}")
|
facu2002/RecommenderSystem
|
src/main.py
|
main.py
|
py
| 4,738 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36684709665
|
import pickle
import numpy as np
import os
with open('data/sst2-sentence/neg_db', 'rb') as f:
negation_database = pickle.load(f)
class Dataset(object):
def __init__(self, dir, filename, rev_vocab):
self.dir = dir
self.filename = filename
self.rev_vocab = rev_vocab
with open(os.path.join(dr, filename), 'rb') as f:
data = pickle.load(f)
self.sentences = \
[' '.join([rev_vocab[x] for x in sent['sentence'] if x != 0]) for sent in data]
def stats(self):
text = self.filename
print("=" * (len(text) + 4))
print("| %s |" % text)
print("=" * (len(text) + 4))
print("total sentences :- %d" % len(self.sentences))
length = [len(sent.split()) for sent in self.sentences]
print("average length :- %.4f +/- %.4f" % (np.mean(length), np.std(length)))
a_but_b = [sent for sent in self.sentences if has_but(sent) is True]
print("total A-but-B :- %d" % len(a_but_b))
length = [len(sent.split()) for sent in a_but_b]
print("average A-but-B length :- %.4f +/- %.4f" % (np.mean(length), np.std(length)))
length = [sent.split().index('but') for sent in a_but_b]
print("average A length :- %.4f +/- %.4f" % (np.mean(length), np.std(length)))
length = [len(sent.split()) - sent.split().index('but') - 1 for sent in a_but_b]
print("average B length :- %.4f +/- %.4f" % (np.mean(length), np.std(length)))
negation = [sent for sent in self.sentences if has_negation(sent) is True]
print("total negation :- %d" % len(negation))
length = [len(sent.split()) for sent in negation]
print("average negation length :- %.4f +/- %.4f" % (np.mean(length), np.std(length)))
discourse = [sent for sent in self.sentences if has_discourse(sent) is True]
print("total discourse :- %d" % len(discourse))
length = [len(sent.split()) for sent in discourse]
print("average discourse length :- %.4f +/- %.4f" % (np.mean(length), np.std(length)))
with open('analysis/discourse_%s.tsv' % self.filename, 'w') as f:
f.write('\n'.join(discourse))
def has_but(sentence):
return ' but ' in sentence
def has_negation(sentence):
return sentence in negation_database
def has_discourse(sentence):
return has_but(sentence) or has_negation(sentence)
def load_vocab(vocab_file):
with open(vocab_file, 'r') as f:
rev_vocab = f.read().split('\n')
vocab = {v: i for i, v in enumerate(rev_vocab)}
return vocab, rev_vocab
dirs = ['data/sst2/']
files = ['train.pickle', 'dev.pickle', 'test.pickle']
for dr in dirs:
print("=" * (len(dr) + 4))
print("| %s |" % dr)
print("=" * (len(dr) + 4))
vocab, rev_vocab = load_vocab(os.path.join(dr, 'vocab'))
for file in files:
dataset = Dataset(dr, file, rev_vocab)
dataset.stats()
|
martiansideofthemoon/logic-rules-sentiment
|
code/analysis/data-stats.py
|
data-stats.py
|
py
| 2,928 |
python
|
en
|
code
| 32 |
github-code
|
6
|
73993683069
|
from django.urls import path
from django.contrib.auth.decorators import login_required
from . import views
urlpatterns = [
path('all_lessons/', views.AllLessonsView.as_view(),
name='all_lessons'),
path('video_player/<int:id>', views.VideoPlayer.as_view(),
name='video_player'),
path('subscribe/', login_required(views.SubscriptionOptionsView.as_view()),
name='subscribe'),
path('create_checkout_session/<int:subscription_id>',
login_required(views.CreateStripeCheckoutSessionView.as_view()),
name='create_subscription_checkout_session'),
path('subscription_success/', views.SubscriptionSuccessView.as_view(),
name='subscription_success'),
]
|
johnrearden/strings_attached
|
video_lessons/urls.py
|
urls.py
|
py
| 720 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15767420313
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
# ะะฐะฒะฐะฝัะฐะถะตะฝะฝั ะดะฐะฝะธั
data = pd.read_csv("data_multivar_nb.txt", header=None, names=["Feature1", "Feature2", "Class"])
# ะ ะพะทะดัะปะตะฝะฝั ะฝะฐ ะพะทะฝะฐะบะธ ัะฐ ะผััะบะธ ะบะปะฐััะฒ
X = data[["Feature1", "Feature2"]]
y = data["Class"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
svm_model = SVC(kernel='linear', C=1.0, random_state=42)
svm_model.fit(X_train, y_train)
y_pred = svm_model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
conf_matrix = confusion_matrix(y_test, y_pred)
class_report = classification_report(y_test, y_pred)
print(f"Accuracy: {accuracy}")
print("Confusion Matrix:\n", conf_matrix)
print("Classification Report:\n", class_report)
|
IvanPaliy/A.I.-Lab-1-IPZ-Palii
|
LR_1_task_6.py
|
LR_1_task_6.py
|
py
| 946 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17365595109
|
import tkinter as tk
from tkinter import messagebox
def print_board(board):
for row in board:
print(" | ".join(row))
print("-" * 9)
def check_winner(board, player):
for row in board:
if all(cell == player for cell in row):
return True
for col in range(3):
if all(board[row][col] == player for row in range(3)):
return True
if all(board[i][i] == player for i in range(3)) or all(board[i][2 - i] == player for i in range(3)):
return True
return False
def is_draw(board):
return all(cell != " " for row in board for cell in row)
def minmax(board, depth, is_maximizing):
if check_winner(board, "X"):
return -10 + depth
elif check_winner(board, "O"):
return 10 - depth
elif is_draw(board):
return 0
if is_maximizing:
max_eval = -float("inf")
for row in range(3):
for col in range(3):
if board[row][col] == " ":
board[row][col] = "O"
eval = minmax(board, depth + 1, False)
board[row][col] = " "
max_eval = max(max_eval, eval)
return max_eval
else:
min_eval = float("inf")
for row in range(3):
for col in range(3):
if board[row][col] == " ":
board[row][col] = "X"
eval = minmax(board, depth + 1, True)
board[row][col] = " "
min_eval = min(min_eval, eval)
return min_eval
def best_move(board):
best_eval = -float("inf")
best_move = None
for row in range(3):
for col in range(3):
if board[row][col] == " ":
board[row][col] = "O"
eval = minmax(board, 0, False)
board[row][col] = " "
if eval > best_eval:
best_eval = eval
best_move = (row, col)
return best_move
def make_move(row, col):
global board, player_turn
if board[row][col] == " " and player_turn:
board[row][col] = "X"
buttons[row][col].config(text="X", state="disabled")
player_turn = not player_turn
if check_winner(board, "X"):
end_game("Human wins!")
elif is_draw(board):
end_game("It's a draw!")
else:
bot_row, bot_col = best_move(board)
board[bot_row][bot_col] = "O"
buttons[bot_row][bot_col].config(text="O", state="disabled")
player_turn = not player_turn
if check_winner(board, "O"):
end_game("Bot wins!")
elif is_draw(board):
end_game("It's a draw!")
def end_game(message):
global player_turn
player_turn = False
messagebox.showinfo("Game Over", message)
reset_board()
def reset_board():
global board, player_turn
board = [[" " for _ in range(3)] for _ in range(3)]
player_turn = True
for row in range(3):
for col in range(3):
buttons[row][col].config(text=" ", state="normal")
def main():
global buttons, board, player_turn
window = tk.Tk()
window.title("Tic-Tac-Toe")
menu_bar = tk.Menu(window)
window.config(menu=menu_bar)
file_menu = tk.Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="File", menu=file_menu)
file_menu.add_command(label="New Game", command=reset_board)
file_menu.add_separator()
file_menu.add_command(label="Exit", command=window.quit)
board = [[" " for _ in range(3)] for _ in range(3)]
player_turn = True
buttons = []
for row in range(3):
row_buttons = []
for col in range(3):
button = tk.Button(window, text=" ", font=("Helvetica", 24), width=5, height=2,
command=lambda r=row, c=col: make_move(r, c))
button.grid(row=row, column=col, padx=5, pady=5)
row_buttons.append(button)
buttons.append(row_buttons)
window.mainloop()
if __name__ == "__main__":
main()
|
lag25/SmartTTT-MinMax-Bot
|
tic_tac_toe_gui.py
|
tic_tac_toe_gui.py
|
py
| 4,097 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36703125478
|
from django.shortcuts import get_object_or_404
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from .models import Client, Mailing, Message
from .serializers import MailingSerializer, ClientSerializer, MailingMessagesSerializer
from .service import check_mailing
from .tasks import send_message
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
response_400 = openapi.Response('BAD_REQUEST')
client_response_get = openapi.Response('Client list', ClientSerializer(many=True))
client_response_post = openapi.Response('New Client', ClientSerializer)
client_response_detail_get = openapi.Response('Client object', ClientSerializer)
mailing_response_get = openapi.Response('Mailing list', MailingSerializer(many=True))
mailing_response_post = openapi.Response('New Mailing', MailingSerializer)
mailing_response_detail_get = openapi.Response('Mailing object', MailingSerializer)
mailing_statistics_response_get = openapi.Response('New Mailing', MailingMessagesSerializer(many=True))
@swagger_auto_schema(method='get', responses={200: client_response_get})
@swagger_auto_schema(method='post', request_body=ClientSerializer)
@api_view(['GET', 'POST'])
def api_client(request):
if request.method == 'GET':
clients = Client.objects.all()
serializer = ClientSerializer(clients, many=True)
return Response(serializer.data)
if request.method == 'POST':
serializer = ClientSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(method='get', responses={200: client_response_detail_get})
@swagger_auto_schema(methods=['put', 'patch'], request_body=ClientSerializer)
@api_view(['GET', 'PUT', 'PATCH', 'DELETE'])
def api_client_detail(request, pk):
client = get_object_or_404(Client, pk=pk)
if request.method == 'GET':
serializer = ClientSerializer(client)
return Response(serializer.data)
elif request.method == 'PUT' or request.method == 'PATCH':
serializer = ClientSerializer(client, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
client.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(method='get', responses={200: mailing_response_get})
@swagger_auto_schema(method='post', request_body=MailingSerializer)
@api_view(['GET', 'POST'])
def api_mailing(request):
if request.method == 'GET':
mailing = Mailing.objects.all()
serializer = MailingSerializer(mailing, many=True)
return Response(serializer.data)
if request.method == 'POST':
serializer = MailingSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
check_mailing(serializer.data['id'])
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(method='get', responses={200: mailing_response_detail_get})
@swagger_auto_schema(methods=['put', 'patch'], request_body=MailingSerializer)
@api_view(['GET', 'PUT', 'PATCH', 'DELETE'])
def api_mailing_detail(request, pk):
mailing = get_object_or_404(Mailing, pk=pk)
if request.method == 'GET':
return Response(MailingMessagesSerializer(mailing).data)
elif request.method == 'PUT' or request.method == 'PATCH':
serializer = MailingSerializer(mailing, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
mailing.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(method='get', responses={200: mailing_statistics_response_get})
@api_view(['GET'])
def api_mailing_statistics(request):
context = []
mailings = Mailing.objects.all()
for mailing in mailings:
context.append(MailingMessagesSerializer(instance=mailing).data)
return Response(context)
@api_view(['GET'])
def api_test(request):
if request.method == 'GET':
mailing = Mailing.objects.get(pk=10)
clients = mailing.get_clients()
serializer = ClientSerializer(clients, many=True)
return Response(serializer.data)
@api_view(['GET'])
def api_send_created_messages(request):
messages = Message.objects.filter(status=Message.Status.CREATED)
for i in messages:
send_message.delay(i.id,)
return Response([], status=status.HTTP_200_OK)
|
novelsk/notification_service
|
app/service/views.py
|
views.py
|
py
| 4,985 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2636855121
|
from codigo import Email, borrar, limpiar_inbox
def test_los_atributos_se_guardan_correctamente():
m = Email("espada nueva!", "hay una espada nueva")
assert m.asunto == "espada nueva!"
assert m.texto == "hay una espada nueva"
def test_mail_sano_no_genera_valor_de_spam():
m = Email("espada nueva!", "hay una espada nueva")
assert m.medir_spam() == 0
def test_mail_con_asunto_spamoso_es_detectado():
m = Email("espada en oferta!", "hay una espada nueva")
assert m.medir_spam() == 10
def test_mail_con_texto_spamoso_es_detectado():
m = Email("espada nueva!", "hay una espada nueva en oferta")
assert m.medir_spam() == 2
def test_modo_rapido_deja_pasar_textos_spamosos():
m = Email("espada nueva!", "hay una espada nueva en oferta")
assert m.medir_spam(modo_rapido=True) == 0
def test_spamosidad_final_es_resultado_de_spamosidad_de_texto_y_asunto():
m = Email("espada nueva oferta oferta!", "hay una espada nueva en oferta oferta")
assert m.medir_spam() == 24
def test_limpiar_inbox_borra_mails_spamosos(mocker):
fake_borrar = mocker.patch("codigo.borrar", return_value="borrado!")
m_spam1 = Email("espada nueva oferta!", "hay una espada nueva")
m_spam2 = Email("espada nueva oferta!", "hay una espada nueva")
m_bien1 = Email("espada nueva!", "hay una espada nueva")
m_bien2 = Email("espada nueva!", "hay una espada nueva")
inbox = [m_spam1, m_bien1, m_spam2, m_bien2]
inbox_limpio, emails_borrados, resultados_borrados = limpiar_inbox(inbox)
assert inbox_limpio == [m_bien1, m_bien2]
assert emails_borrados == [m_spam1, m_spam2]
assert resultados_borrados == ["borrado!", "borrado!"]
# fake_borrar.assert_called_once_with(m_spam1)
fake_borrar.assert_has_calls([
mocker.call(m_spam1),
mocker.call(m_spam2),
])
assert fake_borrar.call_count == 2
|
ucseiw-team/catedra
|
ejemplo_testing_2022/test_codigo.py
|
test_codigo.py
|
py
| 1,891 |
python
|
es
|
code
| 5 |
github-code
|
6
|
28074571183
|
import string
''' This program takes a text file and gives the total
length of the character in the file along with the frequency
of each alphabet and as well as most and least common 10 alphabets'''
"""Character frequency"""
'''Total number of characters'''
def character_analysis(filename):
hist = {}
file = open(filename) # process file object
listOfWords = [] # converts the file to the list
for i in file:
someword = i.strip() # removes the whitespace and extra lines
listOfWords.append(someword)
wordDict = {}
for word in listOfWords:
for letter in word:
if letter in wordDict: # checks to see if the letter is already in the dictionary
wordDict[letter] += 1 # add one if does otherwise create a new key and value
else:
wordDict[letter] = 1
return wordDict
def sumOfCharacters(aDict):
sum = 0
for i in aDict.values(): #sums all the values in the dictionary
sum = sum + i
return sum
def most_common(aDict):
t = []
for key, value in aDict.items():
t.append((value, key))
t.sort(reverse=True)
g = []
for i, value in enumerate(t):
g.append((t[i][1], t[i][0])) # to swap the value tuples
return g[:10]
def least_common(aDict):
t = []
for key, value in aDict.items():
t.append((value, key))
t.sort()
g = []
for i, value in enumerate(t):
g.append((t[i][1], t[i][0]))
return g[:10]
allCharacters = character_analysis('words.txt')
print('All Characters in the words.txt: ', allCharacters)
print('Sum of all characters in the text:', sumOfCharacters(allCharacters))
print('The ten most common characters: ', most_common(allCharacters))
print('The ten least common characters: ', least_common(allCharacters))
|
asifbux/Python-Course-ENSF-592
|
A03_1_char_frequency.py
|
A03_1_char_frequency.py
|
py
| 1,829 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4461100140
|
# Licensed under a MIT style license - see LICENSE.txt
"""MUSE-PHANGS plotting routines
"""
__authors__ = "Eric Emsellem"
__copyright__ = "(c) 2017, ESO + CRAL"
__license__ = "MIT License"
__contact__ = " <[email protected]>"
# This module provides some functions to plot and check the data reduction
#
# Eric Emsellem adapted a March 2018 version, provided by Rebecca
# and adapted it to the pymusepipe package
# Importing modules
import numpy as np
# Standard modules
from os.path import join as joinpath
# Plotting routines
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.gridspec as gridspec
from .mpdaf_pipe import MuseSetImages, MuseSetSpectra
from .util_image import my_linear_model, get_flux_range
__version__ = '0.0.1 (23 March 2018)'
############################################################
# BEGIN
# The following parameters can be adjusted for the need of
# the specific pipeline to be used
############################################################
############################################################
# END
############################################################
PLOT = '\033[1;35;20m'
ENDC = '\033[0m'
def print_fig(text) :
print(PLOT + "# GraphPipeInfo " + ENDC + text)
def open_new_wcs_figure(nfig, mywcs=None):
"""Open a new figure (with number nfig) with given wcs.
If not WCS is provided, just opens a subplot in that figure.
Input
-----
nfig : int
Figure number to consider
mywcs : astropy.wcs.WCS
Input WCS to open a new figure (Default value = None)
Returns
-------
fig, subplot
Figure itself with the subplots using the wcs projection
"""
# get the figure
fig = plt.figure(nfig)
# Clean the figure
plt.clf()
# Adding axes with WCS
if mywcs is None:
return fig, fig.add_subplot(1, 1, 1)
else:
return fig, fig.add_subplot(1, 1, 1, projection=mywcs)
def plot_compare_contours(data1, data2, plotwcs=None, labels=('Data1', 'Data2'), levels=None,
nlevels=10, fignum=1, namefig='dummy_contours.png', figfolder="",
savefig=False, **kwargs):
"""Creates a plot with the contours of two input datasets for comparison
Input
-----
data1
data2: 2d np.arrays
Input arrays to compare
plotwcs: WCS
WCS used to set the plot if provided
labels: tuple/list of 2 str
Labels for the plot
levels: list of floats
Levels to be used for the contours. Calculated if None.
fignum: int
Number for the figure
namefig: str
Name of the figure to be saved (if savefig is True)
figfolder: str
Name of the folder for the figure
savefig: bool
If True, will save the figure as namefig
Creates
-------
Plot with contours of the two input dataset
"""
np.seterr(divide='ignore', invalid='ignore')
# Getting the range of relevant fluxes
lowlevel_d1, highlevel_d1 = get_flux_range(data1)
fig, ax = open_new_wcs_figure(fignum, plotwcs)
# Defining the levels for MUSE
if levels is not None:
levels_d1 = levels
else:
levels_d1 = np.linspace(np.log10(lowlevel_d1),
np.log10(highlevel_d1),
nlevels)
# Plot contours for MUSE
cdata1 = ax.contour(np.log10(data1), levels_d1, colors='k',
origin='lower', linestyles='solid')
levels_d2 = cdata1.levels
# Plot contours for Ref
cdata2 = ax.contour(np.log10(data2), levels=levels_d2, colors='r',
origin='lower', alpha=0.5, linestyles='solid')
ax.set_aspect('equal')
h1, _ = cdata1.legend_elements()
h2, _ = cdata2.legend_elements()
ax.legend([h1[0], h2[0]], [labels[0], labels[1]])
if "title" in kwargs:
plt.title(kwargs.pop('title'))
plt.tight_layout()
if savefig:
plt.savefig(joinpath(figfolder, namefig))
np.seterr(divide='warn', invalid='warn')
def plot_compare_diff(data1, data2, plotwcs=None, figfolder="", percentage=5, fignum=1,
namefig="dummy_diff.ong", savefig=False, **kwargs):
"""
Parameters
----------
data1
data2
figfolder
fignum
namefig
savefig
kwargs
Returns
-------
"""
fig, ax = open_new_wcs_figure(fignum, plotwcs)
ratio = 100. * (data2 - data1) / (data1 + 1.e-12)
im = ax.imshow(ratio, vmin=-percentage, vmax=percentage)
cbar = fig.colorbar(im, shrink=0.8)
if "title" in kwargs:
plt.title(kwargs.pop('title'))
plt.tight_layout()
if savefig:
plt.savefig(joinpath(figfolder, namefig))
def plot_compare_cuts(data1, data2, labels=('X', 'Y'), figfolder="", fignum=1,
namefig="dummy_polypar.png", ncuts=11, savefig=False, **kwargs):
"""
Input
-----
data1
data2
label1
label2
figfolder
fignum
namefig
savefig
kwargs
Creates
-------
Plot with a comparison of the two data arrays using regular X and Y cuts
"""
fig, ax = open_new_wcs_figure(fignum)
# Getting the range of relevant fluxes
lowlevel_d1, highlevel_d1 = get_flux_range(data1)
diffima = (data2 - data1) * 200. / (lowlevel_d1 + highlevel_d1)
chunk_x = data1.shape[0] // (ncuts + 1)
chunk_y = data1.shape[1] // (ncuts + 1)
c1 = ax.plot(diffima[np.arange(ncuts) * chunk_x, :].T, 'k-', label=labels[0])
c2 = ax.plot(diffima[:, np.arange(ncuts) * chunk_y], 'r-', label=labels[1])
ax.legend(handles=[c1[0], c2[0]], loc=0)
ax.set_ylim(-20, 20)
ax.set_xlabel("[pixels]", fontsize=20)
ax.set_ylabel("[%]", fontsize=20)
if "title" in kwargs:
plt.title(kwargs.pop('title'))
plt.tight_layout()
if savefig:
plt.savefig(joinpath(figfolder, namefig))
def plot_polypar(polypar, labels=("Data 1","Data 2"), figfolder="", fignum=1,
namefig="dummy_polypar.png", savefig=False, **kwargs):
"""Creating a plot showing the normalisation arising from a polypar object
Parameters
----------
polypar
label1
label2
foldfig
namefig
Returns
-------
"""
# Opening the figure
fig, ax = open_new_wcs_figure(fignum)
# Getting the x, y to plot
(x, y) = (polypar.med[0][polypar.selection],
polypar.med[1][polypar.selection])
ax.plot(x, y, '.')
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.plot(x, my_linear_model(polypar.beta, x), 'k')
if "title" in kwargs:
plt.title(kwargs.pop('title'))
plt.tight_layout()
if savefig:
plt.savefig(joinpath(figfolder, namefig))
#########################################################################
# Main class
# GraphMuse
#########################################################################
class GraphMuse(object):
"""Graphic output to check MUSE data reduction products
"""
def __init__(self, pdf_name='drs_check.pdf',
figsize=(10,14), rect_layout=[0, 0.03, 1, 0.95], verbose=True):
"""Initialise the class for plotting the outcome results
"""
self.verbose = verbose
self.pdf_name = pdf_name
self.pp = PdfPages(pdf_name)
self.figsize = figsize
self.rect_layout = rect_layout
self.npages = 0
def close(self):
self.pp.close()
def savepage(self):
self.pp.savefig()
plt.close()
self.npages += 1
def start_page(self):
"""Start the page
"""
if self.verbose :
print_fig("Starting page {0}".format(self.npages+1))
plt.figure(figsize=self.figsize)
def plot_page(self, list_data):
"""Plot a set of blocks, each made of a set of spectra or
images. This is for 1 page
It first counts the number of lines needed according to the
separation for images (default is 2 per line, each image taking 2 lines)
and spectra (1 spectrum per line over 2 columns)
"""
if len(list_data) == 0 :
print_fig("WARNING: datalist is empty, no plots will be created")
return
self.start_page()
nspectra_blocks, nimages_blocks = 0, 0
nlines = 0
if isinstance(list_data, MuseSetSpectra) | isinstance(list_data, MuseSetImages):
plt.suptitle(list_data.subtitle)
list_data = [list_data]
for data in list_data :
if isinstance(data, MuseSetSpectra):
nspectra_blocks += 1
nlines += data.__len__()
elif isinstance(data, MuseSetImages):
nimages_blocks += 1
nlines += 2 * data.__len__()
self.gs = gridspec.GridSpec(nlines, 2)
self.list_ax = []
self.count_lines = 0
for data in list_data :
if isinstance(data, MuseSetSpectra):
self.plot_set_spectra(data)
elif isinstance(data, MuseSetImages):
self.plot_set_images(data)
self.savepage()
def plot_set_spectra(self, set_of_spectra=None):
"""Plotting a set of spectra
Skipping the ones that are 'None'
"""
# Set of sky lines to plot when relevant - add_sky_lines to True
sky_lines = [5577., 6300., 6864., 7914., 8344., 8827.]
if set_of_spectra is None:
print_fig("ERROR: list of spectra is empty")
return
for spec in set_of_spectra:
self.list_ax.append(plt.subplot(self.gs[self.count_lines,:]))
self.count_lines += 1
if spec is not None:
spec.plot(title=spec.title, ax=self.list_ax[-1])
if spec.add_sky_lines:
for line in sky_lines:
plt.axvline(x=line, color=spec.color_sky, linestyle=spec.linestyle_sky, alpha=spec.alpha_sky)
plt.tight_layout(rect=self.rect_layout)
def plot_set_images(self, set_of_images=None):
"""Plotting a set of images
Skipping the ones that are 'None'
"""
if set_of_images is None :
print_fig("ERROR: list of images is empty")
return
for i in range(set_of_images.__len__()):
image = set_of_images[i]
count_cols = i%2
if image is not None:
self.list_ax.append(plt.subplot(self.gs[self.count_lines: self.count_lines + 2, count_cols]))
image.plot(scale=image.scale, vmin=image.vmin, colorbar=image.colorbar, title=image.title, ax=self.list_ax[-1])
self.count_lines += count_cols * 2
plt.tight_layout(rect=self.rect_layout)
|
emsellem/pymusepipe
|
src/pymusepipe/graph_pipe.py
|
graph_pipe.py
|
py
| 10,892 |
python
|
en
|
code
| 7 |
github-code
|
6
|
70322602108
|
from src.admin.repository import AdminRepo
from src.admin.schemas import AdminUserData
from src.user.constants import SubscriptionType
class AdminService:
def __init__(self, admin_repo: AdminRepo):
self.admin_repo = admin_repo
async def get_users(
self,
subscription_type: SubscriptionType | None = None,
is_active: bool | None = None,
is_activated: bool | None = None,
search_keyword: str | None = None,
) -> list[AdminUserData]:
return await self.admin_repo.get_users(
search_keyword=search_keyword,
subscription_type=subscription_type,
is_active=is_active,
is_activated=is_activated,
)
|
ttq186/DressUp
|
src/admin/service.py
|
service.py
|
py
| 716 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17216033842
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='wutools',
version='0.0.3',
author='Jason Yunger',
author_email='[email protected]',
description='Testing installation of Package',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/jyunger/wutest',
project_urls = {
"Bug Tracker": "https://github.com/jyunger/wutest/issues"
},
license='MIT',
packages=setuptools.find_packages(),
install_requires=['numpy', 'pandas', 'psutil', 'pyathena', 'graphviz', 'sqlalchemy', 'sqlparse'],
)
|
jyunger/wutest
|
setup.py
|
setup.py
|
py
| 679 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40128878134
|
#!/usr/bin/env python3
import sys
sys.setrecursionlimit(10**6)
INF = 10 ** 9 + 1 # sys.maxsize # float("inf")
MOD = 10 ** 9 + 7
def debug(*x):
print(*x, file=sys.stderr)
def solve(SOLVE_PARAMS):
pass
def main():
N, M = map(int, input().split())
is_head = [True] * N
from collections import defaultdict
edges = defaultdict(list)
for q in range(M):
a, b = map(int, input().split())
a -= 1
b -= 1
is_head[b] = False
edges[a].append(b)
to_tail = [-1] * N
visited = [False] * N
def dfs(x):
buf = [0]
visited[x] = True
for v in edges[x]:
if visited[v]:
continue
ret = to_tail[v]
if ret == -1:
ret = dfs(v)
to_tail[v] = ret
buf.append(ret)
return max(buf) + 1
buf = [1]
for i in range(N):
visited = [False] * N
buf.append(dfs(i))
# debug(": to_tail", to_tail)
print(max(buf))
# tests
T1 = """
5 3
1 2
3 4
5 1
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
3
"""
T2 = """
4 10
1 2
2 1
1 2
2 1
1 2
1 3
1 4
2 3
2 4
3 4
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
4
"""
T3 = """
10 4
3 1
4 1
5 9
2 6
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
3
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
|
nishio/atcoder
|
abc177/d2.py
|
d2.py
|
py
| 1,871 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19054038098
|
import sys
import time
import os
import tempfile
import shutil
import contextlib
import numpy as np
import h5py
from . import __version__ as tool_version
import stag.align as align
def load_genome_DB(database, tool_version, verbose):
dirpath = tempfile.mkdtemp()
shutil.unpack_archive(database, dirpath, "gztar")
list_files = [f for f in os.listdir(dirpath) if os.path.isfile(os.path.join(dirpath, f))]
for f in ("threshold_file.tsv", "hmm_lengths_file.tsv", "concatenated_genes_STAG_database.HDF5"):
if f not in list_files:
raise ValueError(f"[E::align] Error: {f} is missing.")
with open(os.path.join(dirpath, "threshold_file.tsv")) as threshold_in:
gene_thresholds = dict(line.rstrip().split("\t") for line in threshold_in)
gene_order = list(gene_thresholds.keys())
with open(os.path.join(dirpath, "hmm_lengths_file.tsv")) as hmm_lengths_in:
ali_lengths = dict(line.rstrip().split("\t") for line in hmm_lengths_in)
list_files.remove("threshold_file.tsv")
list_files.remove("hmm_lengths_file.tsv")
list_files.remove("concatenated_genes_STAG_database.HDF5")
return list_files, dirpath, gene_thresholds, gene_order, ali_lengths, os.path.join(dirpath, "concatenated_genes_STAG_database.HDF5")
def load_db(hdf5_DB_path, protein_fasta_input=None, aligned_sequences=None, dir_output=None):
with h5py.File(hdf5_DB_path, 'r') as db_in:
params_out = open(os.path.join(dir_output, "parameters.tsv"), "w") if dir_output else contextlib.nullcontext()
with params_out:
# zero: tool version -------------------------------------------------------
db_tool_version = db_in['tool_version'][0]
use_proteins = db_in['align_protein'][0] # bool
use_cmalign = db_in['use_cmalign'][0] # bool
if dir_output:
params_out.write("Tool version: "+str(db_tool_version)+"\n")
params_out.write("Use proteins for the alignment: "+str(use_proteins)+"\n")
params_out.write("Use cmalign instead of hmmalign: "+str(use_cmalign)+"\n")
# check that it is the correct database, for 'classify', we need a single
# gene
if db_in['db_type'][0] != "single_gene":
sys.stderr.write("[E::main] Error: this database is not designed to run with stag classify\n")
sys.exit(1)
# check if we used proteins
if not aligned_sequences and not dir_output:
if protein_fasta_input and not db_in['align_protein'][0]:
# some proteins are provided in the classify but the db was constructed without using the proteins
raise ValueError("Protein provided, but the database was constructed on genes.\n")
elif not protein_fasta_input and db_in['align_protein'][0]:
# the classify do not have proteins but the db was constructed WITH the proteins
raise ValueError("Missing protein file (the database was constructed aligning proteins).\n")
# first, we save a temporary file with the hmm file ------------------------
hmm_file = tempfile.NamedTemporaryFile(delete=False, mode="w")
with hmm_file:
os.chmod(hmm_file.name, 0o644)
hmm_file.write(db_in['hmm_file'][0])
hmm_file.flush()
os.fsync(hmm_file.fileno())
if dir_output:
shutil.move(hmm_file.name, os.path.join(dir_output, "hmmfile.hmm"))
# second if we need to use cm_align ----------------------------------------
use_cmalign = db_in['use_cmalign'][0] # bool
# third: taxonomy ----------------------------------------------------------
taxonomy = {key: list(db_in['taxonomy/{}'.format(key)]) for key in db_in['taxonomy']}
if dir_output:
tax_out = open(os.path.join(dir_output, "node_hierarchy.tsv"), "w")
with tax_out:
print("Node", "Children", sep="\t", file=tax_out)
for key, values in taxonomy.items():
print(key, *map(str, values), sep="\t", file=tax_out)
# fourth: tax_function -----------------------------------------------------
tax_function = {str(key): np.array(db_in['tax_function/{}'.format(key)], dtype=np.float64)
for key in db_in['tax_function']}
if dir_output:
tax_func_out = open(os.path.join(dir_output, "taxonomy_function.tsv"), "w")
with tax_func_out:
for key, value in tax_function.items():
print(key, value, sep="\t", file=tax_func_out)
# fifth: the classifiers ---------------------------------------------------
classifiers = dict()
class_out = open(os.path.join(dir_output, "classifiers_weights.tsv"), "w") if dir_output else contextlib.nullcontext()
with class_out:
for key in db_in['classifiers']:
classifier = db_in['classifiers/{}'.format(key)]
if not isinstance(classifier[0], str):
classifiers[key] = np.array(classifier, dtype=np.float64)
else:
classifiers[key] = "no_negative_examples"
if dir_output:
print(key, *classifiers[key], sep="\t", file=class_out)
return hmm_file.name, use_cmalign, taxonomy, tax_function, classifiers, db_tool_version
def save_to_file(classifiers, full_taxonomy, tax_function, use_cmalign, output, hmm_file_path=None, protein_fasta_input=None):
string_dt = h5py.special_dtype(vlen=str)
with h5py.File(output, "w") as h5p_out:
# zero: tool version -------------------------------------------------------
h5p_out.create_dataset('tool_version', data=np.array([str(tool_version)], "S100"), dtype=string_dt)
# and type of database
h5p_out.create_dataset('db_type', data=np.array(["single_gene"], "S100"), dtype=string_dt)
# was the alignment done at the protein level?
h5p_out.create_dataset('align_protein', data=np.array([bool(protein_fasta_input)]), dtype=bool)
# first we save the hmm file -----------------------------------------------
hmm_string = "".join(line for line in open(hmm_file_path)) if hmm_file_path else "NA"
h5p_out.create_dataset('hmm_file', data=np.array([hmm_string], "S" + str(len(hmm_string) + 100)), dtype=string_dt, compression="gzip")
# second, save the use_cmalign info ----------------------------------------
h5p_out.create_dataset('use_cmalign', data=np.array([use_cmalign]), dtype=bool)
# third, we save the taxonomy ---------------------------------------------
h5p_out.create_group("taxonomy")
for node, _ in full_taxonomy.get_all_nodes(get_root=True):
h5p_out.create_dataset(f"taxonomy/{node}", data=np.array(list(full_taxonomy[node].children.keys()), "S10000"), dtype=string_dt, compression="gzip")
# fourth, the taxonomy function --------------------------------------------
h5p_out.create_group("tax_function")
for c in tax_function:
# we append the intercept at the head (will have position 0)
vals = np.append(tax_function[c].intercept_, tax_function[c].coef_)
h5p_out.create_dataset("tax_function/" + str(c), data=vals, dtype=np.float64, compression="gzip")
# fifth, save the classifiers ----------------------------------------------
h5p_out.create_group("classifiers")
for c in classifiers:
if classifiers[c] != "no_negative_examples":
vals = np.append(classifiers[c].intercept_, classifiers[c].coef_)
h5p_out.create_dataset("classifiers/" + c, data=vals, dtype=np.float64, compression="gzip", compression_opts=8)
else:
# in this case, it always predict 1, we save it as an array of
# with the string "no_negative_examples"
h5p_out.create_dataset("classifiers/" + c, data=np.array(["no_negative_examples"], "S40"), dtype=string_dt, compression="gzip")
h5p_out.flush()
|
zellerlab/stag
|
stag/databases.py
|
databases.py
|
py
| 8,159 |
python
|
en
|
code
| 7 |
github-code
|
6
|
73510567227
|
class Solution:
def largestLocal(self, grid: List[List[int]]) -> List[List[int]]:
ans = []
row = len(grid)
col = len(grid[0])
for i in range(row-2):
temp = []
for j in range(col-2):
maxi = 0
for k in range(i,i+3):
for z in range(j,j+3):
maxi = max(maxi,grid[k][z])
temp.append(maxi)
ans.append(temp)
# count = 0
# for i in range(row-2):
# temp = []
# for j in range(col-2):
# temp.append(arr[count])
# count += 1
# ans.append(temp)
return ans
|
yonaSisay/a2sv-competitive-programming
|
2373-largest-local-values-in-a-matrix/2373-largest-local-values-in-a-matrix.py
|
2373-largest-local-values-in-a-matrix.py
|
py
| 721 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11250679083
|
#%%
import numpy as np
import os
from sklearn.feature_extraction.text import CountVectorizer
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
import numba as nb
import time
import cupy as cp
#%% ่ฎๅๆชๆก
queriesPath ='C:\\Users\\User\\Desktop\\NTUST\\IR\\data\\ntust-ir-2020_hw4_v2\\queries'
docsPath = 'C:\\Users\\User\\Desktop\\NTUST\\IR\\data\\ntust-ir-2020_hw4_v2\\docs'
queriesList = os.listdir(queriesPath)
queriesList.sort()
docsList = os.listdir(docsPath)
docsList.sort()
queriesContext = []
docsContext = []
for query in queriesList:
path = os.path.join(queriesPath,query)
f = open(path)
context = f.read()
queriesContext.append(context)
for doc in docsList:
path = os.path.join(docsPath,doc)
f = open(path)
context = f.read()
docsContext.append(context)
#%% ่จ็ฎๅ็ฏๆ็ซ ้ทๅบฆ ๏ผ ๅพ้ข่ฆ็จๅฐ
doc_len = np.zeros(len(docsContext))
for doc_id ,doc in enumerate(docsContext):
words = doc.split()
doc_len[doc_id] = len(words)
#%% tf matrix & get sparse matrix
vectorizer = CountVectorizer(stop_words=None, token_pattern="(?u)\\b\\w+\\b") #get tf-matrix
X = vectorizer.fit_transform(docsContext) #ๅพๅฐๆ็ซ ๅฐๆผๆๆword็ tf - matrix
print(X.shape)
#%%
#@nb.jit()
# axis = 0 ไฟ็่ก
# axis = 1 ไฟ็ๅ
def PLSA_sparseMatrix(matrix , iteration , numOfTopic):
sparse = coo_matrix(matrix)
sparse_data = sparse.data
sparse_row = sparse.row
sparse_col = sparse.col
sparse_size = sparse_data.shape[0]
p_tk_wi_dj = cp.zeros((sparse_size , numOfTopic))
p_tk_dj = cp.zeros((numOfTopic , matrix.shape[0]))
p_wi_tk = cp.zeros(( matrix.shape[1] , numOfTopic))
print ("Initializing...")
# randomly assign values
p_tk_dj = cp.random.random(size = (numOfTopic , matrix.shape[0]))
doc_toic_sum = cp.sum(p_tk_dj,axis = 0)
p_tk_dj = p_tk_dj / doc_toic_sum
p_wi_tk = cp.random.random(size = (matrix.shape[1] , numOfTopic))
term_topic_sum = cp.sum(p_wi_tk , axis = 0)
p_wi_tk = p_wi_tk / term_topic_sum
for loop in range(iteration):
# E_step
print('iter : ' + str(loop))
s1 = time.time()
print('E step')
for index in range(len(sparse_data)):
p_tk_wi_dj[index,:] = p_wi_tk[sparse_col[index] , :] * p_tk_dj[ : , sparse_row[index]]
topic_normal = cp.sum(p_tk_wi_dj , axis = 1)
for topic in range(numOfTopic):
p_tk_wi_dj [:,topic] = p_tk_wi_dj [:,topic] / topic_normal
print('end E step')
# M step
print('M step')
M_step_molecular = sparse_data[:,np.newaxis] * p_tk_wi_dj
for index in range(len(sparse_data)):
#if index % 10000 == 0:
#print(index)
p_wi_tk[sparse_col[index],:] += M_step_molecular[index,:]
m_step_normalize = cp.sum(p_wi_tk, axis = 0)[np.newaxis,:]
print(cp.sum(p_wi_tk, axis = 0)[cp.newaxis,:].shape)
p_wi_tk /= m_step_normalize
print('end M step')
# update p(tk|dj)
print('update p(tk|dj)')
for index in range(len(sparse_data)):
doc_index = sparse_row[index]
p_tk_dj[:,doc_index] += M_step_molecular[index,:]
tk_dj_normalize = cp.sum(p_tk_dj, axis = 0)[cp.newaxis,:]
print(np.sum(p_tk_dj, axis = 0)[cp.newaxis,:].shape)
p_tk_dj /= tk_dj_normalize
print('end p(tk|dj)')
s2 = time.time()
print(str(loop) + ' time : ' , str(s2 - s1) + 's')
return p_tk_dj,p_wi_tk
#%%
p_tk_dj,p_wi_tk = PLSA_sparseMatrix(X,45,128)
#%%
queryword = []
for query in queriesContext:
words = query.split()
print(words)
for word in words:
if word not in queryword:
queryword.append(word)
#%% cal_p(wi|dj)
p_wi_dj = np.zeros([14955,len(queryword)])
print(p_wi_dj[:,0].shape)
for i ,name in enumerate(queryword):
index = vectorizer.get_feature_names().index(name) # ๅพๅฐquery_word_index
p_wi_dj[:,i] = X[:,index].toarray().reshape(14955)
p_wi_dj[:,i] /= doc_len
#%% cal_ p(wi | BG)
p_wi_BG = np.zeros(len(queryword))
totalword = np.sum(doc_len)
for i,name in enumerate(queryword):
index = vectorizer.get_feature_names().index(name)
p_wi_BG[i] = np.sum(X[:,index].toarray()) / totalword
#%% p(q|dj)
p_q_dj = np.zeros([len(docsContext),len(queriesContext)])
alpha =0.7 #่ช่จ็ๅฏ่ชฟๅๆธalpha
beta = 0.2
for num , query in enumerate(queriesContext):
words = query.split() #้็ฏqueryๅนพๅๅฎๅญ
out_arr = np.zeros(len(docsContext))
for index , word in enumerate (words):
w_index = vectorizer.get_feature_names().index(word)
em = p_wi_tk[w_index,:].reshape(p_wi_tk.shape[1],-1) * p_tk_dj # sumation p(wi|tk) * p(tk|dj) #all_topic
em = np.sum(em, axis = 0)
middle = beta * em
front = alpha * p_wi_dj[:, queryword.index(word)]
back = (1 - alpha - beta) * p_wi_BG[queryword.index(word)]
prob = front + middle + back
if index == 0:
out_arr = prob
else:
out_arr = out_arr * prob
p_q_dj[: , num] = out_arr
#%%
res = {}
save_file_name = 'C:\\Users\\ipprlab\\Desktop\\2020-information-retrieval-and-applications-hw4-v2\\sparse_test2.txt'
fp = open(save_file_name, "w")
fp.seek(0)
fp.write("Query,RetrievedDocuments\n")
for loop in range (len(queriesList)):
write_string = queriesList[loop][0:-4] + ","
for i,j in zip(docsList,p_q_dj[:,loop]):
res[i[0:-4]] = j
sorted_x = sorted(res.items(), key=lambda kv: kv[1],reverse = True)
for iteration , doc in enumerate(sorted_x):
if iteration >= 1000:
break
write_string += doc[0] + " "
write_string += "\n"
fp.write(write_string)
res.clear()
sorted_x.clear()
fp.truncate()
fp.close()
|
ericdddddd/NTUST_Information-Retrieval
|
Hw4/Hw4-sparse_matrix.py
|
Hw4-sparse_matrix.py
|
py
| 5,824 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25009122511
|
from osv import fields,osv
from osv import orm
import pooler
def test_prof(self, cr, uid, prof_id, pid, answers_ids):
#return True if the partner pid fetch the profile rule prof_id
ids_to_check = pooler.get_pool(cr.dbname).get('segmentation.profile').get_parents(cr, uid, [prof_id])
[yes_answers, no_answers] = pooler.get_pool(cr.dbname).get('segmentation.profile').get_answers(cr, uid, ids_to_check)
temp = True
for y_ans in yes_answers:
if y_ans not in answers_ids:
temp = False
break
if temp:
for ans in answers_ids:
if ans in no_answers:
temp = False
break
if temp:
return True
return False
def _recompute_profiles(self, cr, uid, pid, answers_ids):
ok = []
cr.execute('''
select id
from segmentation_profile
order by id''')
for prof_id in cr.fetchall():
if test_prof(self, cr, uid, prof_id[0], pid, answers_ids):
ok.append(prof_id[0])
return ok
class question(osv.osv):
_name="segmentation.question"
_description= "Question"
_columns={
'name': fields.char("Question",size=128, required=True),
'answers_ids': fields.one2many("segmentation.answer","question_id","Avalaible answers",),
}
question()
class answer(osv.osv):
_name="segmentation.answer"
_description="Answer"
_columns={
"name": fields.char("Answer",size=128, required=True),
"question_id": fields.many2one('segmentation.question',"Question"),
}
answer()
class questionnaire(osv.osv):
_name="segmentation.questionnaire"
_description= "Questionnaire"
_columns={
'name': fields.char("Questionnaire",size=128, required=True),
'description':fields.text("Description", required=True),
'questions_ids': fields.many2many('segmentation.question','profile_questionnaire_quest_rel','questionnaire', 'question', "Questions"),
}
questionnaire()
class profile(osv.osv):
def get_answers(self, cr, uid, ids):
query = """
select distinct(answer)
from profile_question_yes_rel
where profile in (%s)"""% ','.join([str(i) for i in ids ])
cr.execute(query)
ans_yes = [x[0] for x in cr.fetchall()]
query = """
select distinct(answer)
from profile_question_no_rel
where profile in (%s)"""% ','.join([str(i) for i in ids ])
cr.execute(query)
ans_no = [x[0] for x in cr.fetchall()]
return [ans_yes, ans_no]
def get_parents(self, cr, uid, ids):
ids_to_check = ids
cr.execute("""
select distinct(parent_id)
from segmentation_profile
where parent_id is not null
and id in (%s)""" % ','.join([str(i) for i in ids ]))
parent_ids = [x[0] for x in cr.fetchall()]
trigger = False
for x in parent_ids:
if x not in ids_to_check:
ids_to_check.append(x)
trigger = True
if trigger:
ids_to_check = pooler.get_pool(cr.dbname).get('segmentation.profile').get_parents(cr, uid, ids_to_check)
return ids_to_check
def process_continue(self, cr, uid, ids, state=False):
cr.execute('delete from partner_profile_rel where profile_id=%s', (ids[0],))
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
to_remove_list=[]
for pid in partners:
cr.execute('select distinct(answer) from partner_question_rel where partner=%s' % pid)
answers_ids = [x[0] for x in cr.fetchall()]
if (not test_prof(self, cr, uid, ids[0], pid, answers_ids)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner_id in partners:
cr.execute('insert into partner_profile_rel (profile_id,partner_id) values (%s,%s)', (ids[0],partner_id))
cr.commit()
cr.commit()
return True
_name="segmentation.profile"
_description="Profile"
_columns={
"name": fields.char("Description",size=128, required=True),
"answer_yes": fields.many2many("segmentation.answer","profile_question_yes_rel","profile","answer","Inclued Answers"),
"answer_no": fields.many2many("segmentation.answer","profile_question_no_rel","profile","answer","Excluded Answers"),
'parent_id': fields.many2one('segmentation.profile', 'Parent Profile'),
'child_ids': fields.one2many('segmentation.profile', 'parent_id', 'Children Profile'),
}
_constraints = [
(orm.orm.check_recursion, 'Error ! You can not create recursive profiles.', ['parent_id'])
]
profile()
class partner(osv.osv):
def write(self, cr, uid, ids, vals, context=None):
if not context:
context={}
if 'answers_ids' in vals:
vals['profiles_ids']=[[6, 0, _recompute_profiles(self, cr, uid, ids[0],vals['answers_ids'][0][2])]]
return super(partner, self).write(cr, uid, ids, vals, context=context)
_inherit="res.partner"
_columns={
"answers_ids": fields.many2many("segmentation.answer","partner_question_rel","partner","answer","Answers"),
"profiles_ids":fields.many2many("segmentation.profile","partner_profile_rel","partner_id","profile_id","Matching Profiles", readonly=True, select="2"),
}
partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
factorlibre/openerp-extra-6.1
|
segmentation/segmentation.py
|
segmentation.py
|
py
| 5,561 |
python
|
en
|
code
| 9 |
github-code
|
6
|
33833694339
|
from db.dbconnect import connection
from flask import jsonify
def querydb(data, operation, check=None, user2flower_id=None):
try:
c, conn = connection()
if c == {'msg': 'Circuit breaker is open, reconnection in porgress'}:
return c, 500
if operation == 'POST':
executeQuery(c, conn, data)
return {"msg": "New user2flower added to DB."}, 201
if operation == 'GET':
c.execute(data)
if check == 'list':
data = c.fetchall()
payload = []
if data is not None and c.rowcount != 0:
for userflower in data:
date = userflower[3]
date = date.strftime('%Y-%m-%d')
content = {"user2flower_id": str(userflower[0]), "user_id": str(userflower[1]),
"flower_id": str(userflower[2]), "date_of_inception": date,
"email": userflower[4]}
payload.append(content)
c.close()
conn.close()
return jsonify(payload)
else:
c.close()
conn.close()
return {'msg': 'No data to return.'}
if check == 'tuple':
data = c.fetchone()
if data is not None and c.rowcount != 0:
date = data[3]
date = date.strftime('%Y-%m-%d')
content = {"user2flower_id": str(data[0]), "user_id": str(data[1]),
"flower_id": str(data[2]), "date_of_inception": date,
"email": data[4]}
c.close()
conn.close()
return content
else:
c.close()
conn.close()
return "No data to return."
if operation == 'PUT':
executeQuery(c, conn, data)
return {"msg": "User2flower with user2flower_id " + str(user2flower_id) + " is updated."}
if operation == 'DELETE':
executeQuery(c, conn, data)
return {"msg": "User2flower with user2flower_id " + str(user2flower_id) + " is deleted from DB."}
except Exception as e:
c.close()
conn.close()
print(e)
return {'msg': 'Something went wrong while executing ' + operation + ' operation on users2flowers.'}, 500
def executeQuery(c, conn, data):
c.execute(data)
conn.commit()
c.close()
conn.close()
|
markocrnic/user2flower
|
app/db/dbquery.py
|
dbquery.py
|
py
| 2,656 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15159066053
|
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#https://www.tensorflow.org/versions/r1.12/api_docs/python/tf/ones
a= tf.ones((4,3))
b = tf.ones((2,3))
with tf.Session():
result = a.eval()
rb = b.eval()
tf.assert_negative (a)
print(result,rb )
|
sofiathefirst/AIcode
|
04TensorflowAPI/ones.py
|
ones.py
|
py
| 281 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31331765759
|
import torch
from game import DataGenArena
from torch.autograd import Variable
from torch.utils.data import DataLoader
import os
# criterion = torch.nn.SmoothL1Loss()
# optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
class AITrainer:
# TODO: concurrent usage of self.model and player.network makes me nervous
# TODO: add name? hmmm
# also unsure about loss and optimizer params
# TODO: remove gen parameter (?)
def __init__(self, player, model_name=None, model_dir=None, matches=100, batch_size=8,
loss=torch.nn.SmoothL1Loss, optimizer=torch.optim.SGD, lr=0.001, gen=0):
self.player = player
self.matches = matches
self.batch_size = batch_size
self.epochs = 2
self.model_dir = model_dir
self.gen = gen
# TODO: fix use of preexisting model
self.model_path = model_name
if self.model_path is not None:
print("Hi")
self.player.load_model(self.model_path)
# TODO: figure out how to get gen from somewhere
self.gen = gen
else:
self.gen = 0
self.model_path = self.full_path()
self.player.save_model(self.model_path)
# self.model = player.network
self.loss = loss()
self.optimizer = optimizer(self.player.network.parameters(), lr=lr)
self.generations()
def generation(self):
print(self.gen)
path = self.full_path()
self.player.load_model(path)
model = self.player.network
data = DataGenArena(self.player, matches=self.matches)
train_loader = DataLoader(dataset=data,
batch_size=self.batch_size,
shuffle=True,
num_workers=2)
for epoch in range(self.epochs):
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels = data
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# Forward pass: Compute predicted y by passing x to the model
y_pred = model(inputs.float())
# Compute and print loss
loss = self.loss(y_pred, labels)
print(epoch, i, loss.data.item())
# Zero gradients, perform a backward pass, and update the weights.
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.gen += 1
path = self.full_path()
model.save_model(path)
def generations(self):
while True:
self.generation()
def full_path(self):
# if not os.path.exists(file_path):
# os.makedirs(file_path)
# TODO: implement player __repr__?
file_name = f'{self.player.name}{self.player.shape}_gen{self.gen}.pt'
if self.model_dir is not None:
path = os.path.join(self.model_dir, file_name)
else:
path = file_name
return path
if __name__ == '__main__':
from players.nn_ai import NNAI
shape = 7, 7
gen = 4
ai = NNAI("nnai", *shape, exploration_turns=5, explore_chance=0.15)
file_name = f'{ai.name}{ai.shape}_gen{gen}.pt'
trainer = AITrainer(ai, model_name=file_name, gen=gen)
|
messej/dots_and_boxes
|
game/ai_trainer.py
|
ai_trainer.py
|
py
| 3,391 |
python
|
en
|
code
| null |
github-code
|
6
|
74212739388
|
import sqlite3
def create_table():
sqlstr = "create table user (sid int(5) primary key, \
name varchar(10), email varchar(25))"
conn.execute(sqlstr)
print("create table successfully")
conn.close()
def initiate_table():
cur = conn.cursor()
sqlstr1 = "insert into user(sid, name, email) values(1001, 'ๅผ ๅคงๅฑฑ', '[email protected]') "
cur.execute(sqlstr1)
sqlstr2 = "insert into user(sid, name, email) values(1002, 'ๆๆไธฝ', '[email protected]') "
cur.execute(sqlstr2)
sqlstr3 = "insert into user(sid, name, email) values(1003, '่ตตๅๆน', '[email protected]') "
cur.execute(sqlstr3)
conn.commit()
print("Records created successfully")
conn.close()
def get_data():
cur = conn.cursor()
sqlstr = "select * from user"
s = cur.execute(sqlstr)
for row in s: #ๅพๅฐ็่ตๅผ็ปs
print("sid=", row[0])
print("name=", row[1])
print("email=", row[2], '\n')
conn.close()
def updata_data():
cur = conn.cursor()
sql_update = "update user set email='[email protected]' where sid=1003"
cur.execute(sql_update)
conn.commit()
sql_select = "select * from user where sid=1003"
s = cur.execute(sql_select)
for row in s:
print("sid=", row[0])
print("name=", row[1])
print("email=", row[2], '\n')
conn.close()
def delete_data():
cur = conn.cursor()
sql_update = "delete from user where sid=1002"
cur.execute(sql_update)
conn.commit()
sql_select = "select * from user"
s = cur.execute(sql_select)
for row in s:
print("sid=", row[0])
print("name=", row[1])
print("email=", row[2], '\n')
conn.close()
if __name__=="__main__":
conn = sqlite3.connect("d:/groupdatabase.db")
get_data()
# conn = sqlite3.connect("d:/groupdatabase.db")
# updata_data()
# conn = sqlite3.connect("d:/groupdatabase.db")
# delete_data()
|
lhqiao/python_project
|
kaohexinagmu/main.py
|
main.py
|
py
| 2,235 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7874968494
|
"""
Yonan Abraha
Lab 5
"""
def main():
#encodedWord = "WBLARF8TTS"
#encodedWord = "L8KAOUL"
#encodedWord = "E8N8N8"
#encodedWord = "8TRA8DY T8LA"
#encodedWord = "8TT LHA TILLTA LIMAS"
#encodedWord = "LHA GRAAN FIATD GTA8MS IN LHA W8RM SUNEABMS"
encodedWord = "TONG T8E T8CKS L8SLY L8CO LIMA 8L TA8SL T8LATY"
#encodedWord = "UUHO" #Used for Bonus
#encodedWord = "EOUUUUOUU" #Used for Bonus
print(DecodeWord(encodedWord))
#Your code goes here.
def DecodeWord(encodword): #creating a function
decodedWord = "" # needed a variable to exist for the next line of codes
for i in encodword: # checking each letter in the encodedword
if i=="L": # if the letter is "L"
decodedWord += "T" # REPLACE IT WITH "T"
elif i=="T": # if the letter is "T"
decodedWord += "L"# REPLACE IT WITH "L"
elif i=="8": #if the letter is "8"
decodedWord += "A"# REPLACE IT WITH "A"
elif i=="B": #if the letter is "B"
decodedWord += "A"# REPLACE IT WITH "A"
elif i=="A":#if the letter is "A"
decodedWord += "E"# REPLACE IT WITH "E"
elif i=="E":#if the letter is "E"
decodedWord += "B"# REPLACE IT WITH "B"
else:
decodedWord += i
return decodedWord
#This code triggers the main to run
#we'll talk about this more in chapters 6,7, & 8.
if __name__ == "__main__":
main()
|
yonanma/CIS121
|
lab_5.py
|
lab_5.py
|
py
| 1,361 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15207383827
|
"""
https://www.algoexpert.io/questions/common-characters
"""
def commonCharacters(strings):
# Write your code here.
result = []
first = strings.pop(0)
size = len(strings)
for char in first:
count = 0
for arr in strings:
if char not in arr:
continue
count += 1
if count == size:
if char not in result:
result.append(char)
return result
|
koizo/algoexpert
|
strings/common_characters.py
|
common_characters.py
|
py
| 478 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29482823780
|
import raw_read
import subprocess as sp
from matplotlib import pyplot
import numpy as np
from files import *
from sklearn.metrics import mean_squared_error
[arrs1,plots]=raw_read.rawread('output/rawfile.raw')
# time
stime = arrs1[0]
res_out = arrs1[1:-1]
x=res_out.T
temp = np.linalg.pinv(x)
vref=np.load("output/ref.npy")
vref=vref.T
w_out = np.load("output/w_out.npy")
outlayer=1
readout=np.zeros((res_out.shape[1],outlayer))
for t in range(0, res_out.shape[1]):
# readout[t]=np.dot(res[:,:,t],w_out)
readout[t] = np.dot(res_out[:, t], w_out)
# error = abs(readout - vref)
error = abs(readout[1:] - vref[1:])
error1= mean_squared_error(readout[1:], vref[1:],squared=False)
print(error1)
pyplot.figure(figsize=(10,6))
pyplot.subplots_adjust(wspace =0, hspace =0.2)
pyplot.plot(stime[1:],vref[1:],'r')
pyplot.plot(stime[1:],readout[1:],'mediumspringgreen')
pyplot.xlabel("Time (s)",fontsize=15)
pyplot.show()
###################################plot########################################
# stime = arrs1[0]
# current = arrs1[1:19]
# memstate=arrs1[21:39]
#
# current1=arrs1[1]
# state1=arrs1[21]
#
#
# pyplot.figure(figsize=(10,3))
# pyplot.subplots_adjust(wspace =0, hspace =0.2)
# # pyplot.plot(stime[1:],current1[1:],'r')
# pyplot.plot(stime[1:],state1[1:],'mediumspringgreen')
#
# pyplot.xlabel("Time (s)",fontsize=15)
# pyplot.show()
#
# pyplot.figure(figsize=(10,3))
# pyplot.subplots_adjust(wspace =0, hspace =0.2)
# pyplot.plot(stime[1:],current1[1:],'r')
# # pyplot.plot(stime[1:],state1[1:],'mediumspringgreen')
# pyplot.xlabel("Time (s)",fontsize=15)
# pyplot.show()
|
embeddedsky/ExplainableMRC
|
Plot_file.py
|
Plot_file.py
|
py
| 1,665 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29800628522
|
from django.urls import path
from cart.views import cart_add, cart_remove, cart_details, cart_clear, cart_update
urlpatterns = [
path('cart_details/', cart_details, name='cart_details'),
path('cart_add/', cart_add, name='cart_add'),
path('cart_update/', cart_update, name='cart_update'),
path('cart_remove/', cart_remove, name='cart_remove'),
path('cart_clear/', cart_clear, name='cart_clear'),
]
|
Aliaksei-Hrazhynski/DP
|
cart/urls.py
|
urls.py
|
py
| 420 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10424789791
|
#-*- coding: utf-8 -*-
u"""
.. moduleauthor:: Martรญ Congost <[email protected]>
"""
import cherrypy
from cocktail.pkgutils import resolve
from woost.controllers.basecmscontroller import BaseCMSController
from woost.extensions.payments.paymentgateway import PaymentGateway
class PaymentRootController(BaseCMSController):
"""Root controller for all payment related requests.
This controller does nothing by itself, it merely identifies the payment
gateway that should handle an incomming request and forwards the request to
the gateway's controller (an instance of L{PaymentGatewayController}).
"""
def resolve(self, path):
if not path:
raise cherrypy.NotFound()
# Identify the gateway involved in the payment
gateway_id = path.pop(0)
try:
gateway_id = int(gateway_id)
except:
raise cherrypy.HTTPError(400)
gateway = PaymentGateway.get_instance(gateway_id)
if gateway is None:
raise cherrypy.NotFound()
# Forward the request to the gateway's controller
controller_class = resolve(gateway.payment_gateway_controller_class)
return controller_class(gateway)
|
marticongost/woost
|
woost/extensions/payments/paymentrootcontroller.py
|
paymentrootcontroller.py
|
py
| 1,224 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30063385074
|
import veri
import logs
class uartClass(logs.driverClass):
def __init__(self,Path,Monitors,rxd='rxd',txd='txd'):
logs.driverClass.__init__(self,Path,Monitors)
self.rxd = rxd
self.txd = txd
self.baudRate = 100*32
self.txQueue = []
self.txWaiting=0
self.rxState = 'idle'
self.rxQueue = []
self.rxWaiting=0
self.rxByte=''
self.force(self.rxd,1)
self.RxStr = ''
self.Silence = 100
self.silentCounter = 0
def busy(self,Why=False):
Busy0 = self.silentCounter < (self.Silence * self.baudRate)
if Why:
logs.log_info("UART BUSY busy0=%s tx=%s rx=%s wtx=%s wrx=%s" % (Busy0,len(self.txQueue),len(self.rxQueue),(self.txWaiting>0),(self.rxWaiting>0)))
return Busy0 or (self.rxState!='idle') or (self.txQueue != []) or (self.rxQueue != []) or (self.txWaiting>0) or (self.rxWaiting>0)
def run(self):
self.runTx()
self.runRx()
Rxd = self.peek(self.txd)
if Rxd == 0:
self.silentCounter = 0
else:
self.silentCounter += 1
def action(self,Str,Origs=[]):
Wrds = Str.split()
if Wrds[0]=='txfile':
Fname = Wrds[1]
File = open(Fname)
Str = File.read()
File.close()
Words = Str.split()
for Wrd in Words:
self.send(Wrd)
self.send('\x0a')
elif Wrds[0]=='tx':
for Wrd in Wrds[1:]:
self.send(Wrd)
logs.log_info("TXUART %s %s" % (self.txd,str(Wrds[1:])),'fpga')
self.send('\x0a')
elif Wrds[0]=='baudrate':
self.baudRate = eval(Wrds[1])
elif Wrds[0]=='rx':
logs.log_info('UART RX "%s" '%(self.RxStr))
else:
logs.log_error('action by uart is not : %s'%(Str))
def send(self,Byte):
if type(Byte) is str:
if len(Byte)==1:
Byte = ord(Byte)
self.send(Byte)
return
elif Byte == 'CRLF':
self.send(10)
return
else:
for Chr in Byte:
self.send(Chr)
return
self.txQueue.append(0)
for II in range(0,8):
Val = (Byte>>II)&1
self.txQueue.append(Val)
logs.log_info('txQueue %02x %d' % (Byte,self.baudRate))
self.txQueue.append(1)
self.txQueue.append(1)
self.txQueue.append(1)
def runTx(self):
if self.txWaiting>0:
self.txWaiting -= 1
return
if self.txQueue==[]:
self.force(self.rxd,1)
return
Bit = self.txQueue.pop(0)
self.force(self.rxd,Bit)
self.txWaiting = self.baudRate
def runRx(self):
if self.rxWaiting>0:
self.rxWaiting -= 1
veri.force('tb.midbit','0')
# if (self.rxWaiting == 1):
# logs.log_info("rxwait %s %s " % (self.rxState,self.rxByte))
return
if self.rxState in ['bitx','bit0','stop']:
veri.force('tb.midbit','1')
else:
veri.force('tb.midbit','0')
if self.rxState=='idle':
Rxd = self.peek(self.txd)
if Rxd==0:
self.rxState='bit0'
self.rxWaiting = self.baudRate/2
return
elif self.rxState=='bit0':
self.rxState='bitx'
self.rxWaiting = self.baudRate
Rxd = self.peek(self.txd)
if Rxd!=0:
logs.log_error('ilia rxd start bit isnt there')
self.rxState='idle'
return
elif self.rxState=='bitx':
Rxd = self.peek(self.txd)
self.rxByte = str(Rxd) + self.rxByte
if len(self.rxByte)==8:
self.rxState = 'stop'
# self.rxQueue.append(self.rxByte)
Int = logs.intx(self.rxByte)
if ((Int>=0)and(Int<256)):
Chr = chr(logs.intx(self.rxByte))
else:
Chr = '(ERR%s)'%self.rxByte
self.RxStr += Chr
# logs.log_info('uart rxByte %s "%s" '%(self.rxByte,self.RxStr))
try:
if ord(Chr) <= 10:
logs.log_info('UART RX "%s" '%(self.RxStr[:-1]))
self.RxStr = ''
except:
logs.log_error('UART RX ERROR |%s| "%s" '%(Chr,self.RxStr))
self.RxStr = ''
# veri.force('tb.marker','0b'+self.rxByte)
self.rxByte=''
self.rxWaiting = self.baudRate
elif self.rxState=='stop':
Rxd = self.peek(self.txd)
if Rxd==0:
logs.log_error('ilia rxd stop bit isnt there')
self.rxState='idle'
def onFinish(self):
return
|
greenblat/vlsistuff
|
verification_libs3/uartClass.py
|
uartClass.py
|
py
| 5,084 |
python
|
en
|
code
| 41 |
github-code
|
6
|
19109871856
|
#!/usr/bin/env python3
#
# graph timing from timing file
import sys
import pylab
import numpy
import matplotlib.pyplot as plot
from argparse import ArgumentParser
from collections import defaultdict
time_xlabel="Time in seconds"
bytes_ylabel="GB processed"
def parse_pair(line):
h = line[11:13]
m = line[14:16]
s = line[17:19]
time = (int(h) * 60 + int(m)) * 60 + int(s)
count = int(line[24:])
return time, count
def read_timing(timing_file):
time_list = list()
total_bytes_list = list()
with open(timing_file, 'r') as infile:
first = True
offset = 0
total = 0
for line in infile:
time, count = parse_pair(line)
if first:
offset = time
first = False
time_list.append(time - offset)
total += count
total_bytes_list.append(total/1000000000.0)
return (time_list, total_bytes_list)
def plot_timing(time_list, total_bytes_list, outfile):
plot.plot(time_list, total_bytes_list, '-bo')
plot.xlabel(time_xlabel)
plot.ylabel(bytes_ylabel)
plot.title("Bytes processed over time")
plot.savefig(outfile)
if __name__=="__main__":
parser = ArgumentParser(prog='plot_dots.py',
description='Plot timestamp progress')
parser.add_argument('timing_file', help= 'timing file')
args = parser.parse_args()
timing_file = args.timing_file
if timing_file[:7] != 'logfile':
raise ValueError("Invalid prefix '%s'" % timing_file[:7])
time_list, total_bytes_list = read_timing(timing_file)
for time in time_list:
print("time: %d\n" % time)
for total_bytes in total_bytes_list:
print("total bytes: %d\n" % total_bytes)
outfile = "out_%s.png" % timing_file
plot_timing(time_list, total_bytes_list, outfile)
|
NPS-DEEP/big_data_test
|
be_cluster/doc/plot_dots.py
|
plot_dots.py
|
py
| 1,844 |
python
|
en
|
code
| 1 |
github-code
|
6
|
15191375755
|
from collections import defaultdict, deque
class Solution:
def isBipartite(self, graph: List[List[int]]) -> bool:
color = defaultdict(int)
seen = set()
q = deque()
for node1 in range(len(graph)):
if node1 in seen:
continue
color[node1] = 1
parent_color = 1
q.extend([n for n in graph[node1] if n not in seen])
seen.add(node1)
while q:
l = len(q)
for _ in range(l):
node = q.popleft()
must_color = parent_color * -1 #invert color
if color[node] == 0:
color[node] = must_color
seen.add(node)
elif color[node] != must_color:
#print(node, graph[node], q, color[node], must_color)
return False
q.extend([n for n in graph[node] if n not in seen])
parent_color = parent_color * -1 #invert color
return True
|
Dumbris/leetcode
|
medium/785.is-graph-bipartite.py
|
785.is-graph-bipartite.py
|
py
| 1,085 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2706181917
|
import rhino_unwrapper.meshUtils.meshLoad as meshLoad
import rhino_unwrapper.meshUtils.mesh as mesh
import rhino_unwrapper.cutSelection.userCuts as userCuts
import rhino_unwrapper.cutSelection.autoCuts as autoCuts
import rhino_unwrapper.weight_functions as weight_functions
import rhino_unwrapper.unfold as unfold
import rhino_unwrapper.distribute as distribute
reload(meshLoad)
reload(mesh)
reload(userCuts)
reload(autoCuts)
reload(weight_functions)
reload(unfold)
reload(distribute)
#def all_weight_functions():
#return dict([m for m in inspect.getmembers(weight_functions, inspect.isfunction)])
#hardcode a selection for wieght function for now
weight_function = weight_functions.edgeAngle
# SET UP MESH
uMesh = meshLoad.user_select_mesh()
jMesh = mesh.Mesh(uMesh)
displayer = mesh.MeshDisplayer(jMesh)
# SET CUTS
user_cuts = userCuts.get_user_cuts(jMesh,displayer)
cuts = autoCuts.auto_fill_cuts(jMesh,user_cuts,weight_function)
displayer.display_edges(cuts) #NOTE right now this will duplicate-draw user-cuts
# UNFOLD
unfolder = unfold.UnFolder(jMesh)
net = unfolder.unfold()
net.display()
#DISTRIBUTE
distribute.spread_out_islands_horizontally(net)
|
jlopezbi/rhinoUnfolder
|
prototype_cutAndUnfold.py
|
prototype_cutAndUnfold.py
|
py
| 1,170 |
python
|
en
|
code
| 8 |
github-code
|
6
|
41675036380
|
# ์ค๋์ฟ
#
# ์ค๋์ฟ ๋ฅผ ๋๋ฉด์ ๋น์๋ฆฌ(-1)๋ฅผ ํ์ํ๋ค
# ๋น์๋ฆฌ์ 1~9 ๋ฅผ ๋ฃ์ด๋ณด๊ณ ๊ฐ๋ฅํ๋ค๋ฉด ์ฌ๊ทํจ์๋ฅผ ํธ์ถํ๋ค.
# ๋ง์ฝ ๊ฐ๋ฅํ ์ผ์ด์ค๊ฐ ๋ฐ๊ฒฌ๋๋ฉด ์ฆ์ ํจ์๋ฅผ ์ข
๋ฃํ๋ค.
# ์ด์ -> test ํจ์์์ ์ํํ ๋ ๋น์๋ฆฌ์ ๋ญ๊ฐ ๋ค์ด๊ฐ ์ ์๋์ง ์
๋ฐ์ดํธํด์ ์ฌ๊ฒ์ฌ๋ฅผ ๋ฐฉ์งํ๋ค.
from copy import deepcopy
# row,col์ ์๋ก์ด ๊ฐ์ด ๋ค์ด์์ ๋, ๊ฐ๋ฅํ์ง ๊ฒ์ฌํ๋ ํจ์
def test(s: list[list[int]], row: int, col: int) -> bool:
# row ๊ฒ์ฌ
cnt = 0
nums = [] # 0์ด ์๋ ๋ชจ๋ ์๋ฅผ ์ ์ฅ
empty = [] # ์ซ์๊ฐ ๋น์๋ฆฌ๋ฅผ ์ ์ฅ
for i in range(9):
if s[row][i] == 0: # 0์ด๋ฉด empty์ ๋ฃ๊ธฐ
empty.append((row, i))
elif i != col: # 0์ด ์๋๊ณ ์๊ธฐ์์ ๋ ์๋๋ฉด nums์ ๋ฃ๊ธฐ
nums.append(s[row][i])
if s[row][i] != s[row][col] and i != col: # ์ซ์๊ฐ ์๊ฒน์น๋ฉด
cnt += 1
if len(empty) > 0:
for e in empty:
# ๋ค์ ๊บผ๋ด์ nums์ ์๋ ๋ชจ๋ ์๋ฅผ ์ ์ธํ๊ณ ์ ์ฅ
poss_value[e] = [x for x in poss_value[e] if x not in nums]
if cnt != 8:
return False
# col ๊ฒ์ฌ
cnt = 0
nums = [] # 0์ด ์๋ ๋ชจ๋ ์๋ฅผ ์ ์ฅ
empty = [] # ์ซ์๊ฐ ๋น์๋ฆฌ๋ฅผ ์ ์ฅ
for i in range(9):
if s[i][col] == 0:
empty.append((i, col))
elif i != row:
nums.append(s[i][col])
if s[i][col] != s[row][col] and i != row:
cnt += 1
if len(empty) > 0:
for e in empty:
# ๋ค์ ๊บผ๋ด์ nums์ ์๋ ๋ชจ๋ ์๋ฅผ ์ ์ธํ๊ณ ์ ์ฅ
poss_value[e] = [x for x in poss_value[e] if x not in nums]
if cnt != 8:
return False
# ์นธ ๊ฒ์ฌ
sections = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
row_section = [] # ์นธ์ ํด๋นํ๋ ํ๋ค
col_section = [] # ์นธ์ ํด๋นํ๋ ์ด๋ค
for section in sections:
if row in section:
row_section = section
break
for section in sections:
if col in section:
col_section = section
break
cnt = 0
nums = [] # 0์ด ์๋ ๋ชจ๋ ์๋ฅผ ์ ์ฅ
empty = [] # ์ซ์๊ฐ ๋น์๋ฆฌ๋ฅผ ์ ์ฅ
for r in row_section:
for c in col_section:
if s[r][c] == 0:
empty.append((r, c))
elif (r, c) != (row, col):
nums.append(s[r][c])
if s[r][c] != s[row][col] and (r, c) != (row, col):
cnt += 1
if len(empty) > 0:
for e in empty:
# ๋ค์ ๊บผ๋ด์ nums์ ์๋ ๋ชจ๋ ์๋ฅผ ์ ์ธํ๊ณ ์ ์ฅ
poss_value[e] = [x for x in poss_value[e] if x not in nums]
if cnt != 8:
return False
return True # ๋ค ํต๊ณผํ๋ฉด ๊ฐ๋ฅ
sudoku = [list(map(int, input().split())) for _ in range(9)] # ์ค๋์ฟ ํ
empty_place: list[tuple[int, int]] = [] # ๋น์๋ฆฌ [(row,col)]
for row in range(9):
for col in range(9):
if sudoku[row][col] == 0:
empty_place.append((row, col))
poss_value = {} # ๊ฐ ๋น์๋ฆฌ๋ง๋ค ๋ค์ด๊ฐ ์ ์๋ ๊ฐ์ ๊ด๋ฆฌํ๋ ๋์
๋๋ฆฌ (์ด๊ธฐ๊ฐ์ 1~9)
for e in empty_place:
poss_value[e] = list(range(1, 10))
res = [[]]
def dfs(s: list[list[int]], empty_place: list[tuple[int, int]]):
if len(empty_place) == 0:
global res
res = s
return True
for i in range(len(empty_place)):
row = empty_place[i][0]
col = empty_place[i][1]
for num in poss_value[empty_place[i]]:
new_sudoku = deepcopy(s)
new_sudoku[row][col] = num
new_empty_place = deepcopy(empty_place)
del new_empty_place[i]
if test(new_sudoku, row, col): # ๊ทธ๊ณณ์ num์ ๋ฃ์ด๋ ๊ด์ฐฎ์๊ฐ?
flag = dfs(new_sudoku, new_empty_place)
if flag == True:
return True
print()
dfs(sudoku, empty_place)
# for r in res:
# for c in r:
# print(c, end=" ")
# print()
# for p in poss_value.keys():
# print(p, poss_value[p])
result = [
[1, 3, 5, 4, 6, 9, 2, 7, 8],
[7, 8, 2, 1, 3, 5, 6, 4, 9],
[4, 6, 9, 2, 7, 8, 1, 3, 5],
[3, 2, 1, 5, 4, 6, 8, 9, 7],
[8, 7, 4, 9, 1, 3, 5, 2, 6],
[5, 9, 6, 8, 2, 7, 4, 1, 3],
[9, 1, 7, 6, 5, 2, 3, 8, 4],
[6, 4, 3, 7, 8, 1, 9, 5, 2],
[2, 5, 8, 3, 9, 4, 7, 6, 1],
]
print(res == result)
|
jisupark123/Python-Coding-Test
|
DFS/2580.py
|
2580.py
|
py
| 4,495 |
python
|
ko
|
code
| 1 |
github-code
|
6
|
17763725801
|
#1)list using Array
import sys
def getListDetails(l):
#size = sys.getsizeof(l)
#capacity = (size of list - size of empty list) // (size of one block)
#size of empty list maens total number of bits for empty list
capacity = (sys.getsizeof(l)-64)//8
left_size = ((sys.getsizeof(l)-64)-len(l)*8)//8
#(size-36)//4 for 32bit system
#(size-64)//8 for 64bit system
print('size of list is ',sys.getsizeof(l))
print('capacity is ',capacity)
print('remaining capacity is ',left_size)
def insertAtBegin(l,ele):
l.insert(0,ele)
def insertInBetween(l,index,ele):
l.insert(index,ele)
def insertAtEnd(l,ele):
l.append(ele)
def deleteAtBegin(l):
l.pop(0)
def deleteAtBetween(l,index):
l.pop(index)
def deleteAtEnd(l):
l.pop(-1)
l = list()
getListDetails(l)
#inserting element at begin
insertAtBegin(l,1)
#inserting element in between
insertInBetween(l,1,2)
#inserting element at end
insertAtEnd(l,3)
#deleting at begin
deleteAtBegin(l)
print(l)
getListDetails(l)
'''
= RESTART: F:/python/python_programs/5competetive programming/7linear data structures/3.linklinst.py
size of list is 64
capacity is 0
remaining capacity is 0
[2, 3]
size of list is 96
capacity is 4
remaining capacity is 2
>>>
'''
#but implementing link list using dynamic array i.e list have some disadvantages
'''
such that even it perform dynamic memory allocation still it having some issues like
1)involes shiftng of elements in insertion and deletion.
2)while insertion when list is full then it increases its capacity by double of its previous capacity,
this may cause wastage of memory in the form reserved space
Exaple,
consider list l of capacity 10,
<insert element 1 to 10 #it will insert 1...10 elements in list
<insert element 11 # here list is full , so it creates list of size 10*2=20(previous capacity * 2) and
shifts the elements this new list
and add new element 11 to increased caacity list.
when is capacity get reache then it again increase the size by 20*2=40 and perform shifting of elements
and insert new element and so on.
therefore array list involves shifting of elements and memory wastage.
'''
print('------------------------------------------singly link list------------------------------------------')
#using classes and objects
#class to create node
class Node:
def __init__(self,data):
self.__data = data
self.__next = None
def getData(self):
return self.__data
def setData(self,data):
self.__data = data
def getNext(self):
return self.__next
def setNext(self,next_node):
self.__next = next_node
class LinkedList:
def __init__(self):
self.__head = None
def getHead(self):
return self.__head
def insertAtBegin(self,node):
if self.__head == None :
self.__head = node
else :
node.setNext(self.__head)
self.__head = node
print('insertion done successfully...')
def insertInBetween(self,key,node):
temp = self.__head
while(temp.getNext()!=None and temp.getData()!=key):
temp = temp.getNext()
if(temp.getData()!=key):
print('key not found\nInsertion faild!!!')
else:
node.setNext(temp.getNext())
temp.setNext(node)
print('insertion done successfully...')
def insertAtEnd(self,node):
if self.__head == None :
self.__head = node
else :
temp = self.__head
while(temp.getNext()!=None):
temp = temp.getNext()
temp.setNext(node)
print('insertion done successfully...')
def deleteAtBegin(self):
self.__head = self.__head.getNext()
print('deletion done successfully')
def deleteInBetween(self,key):
temp = self.__head
while(temp.getNext()!=None and temp.getData()!=key):
delete_node = temp
temp = temp.getNext()
if(temp.getData()!=key):
print('key not found\nInsertion faild!!!')
else:
delete_node.setNext((delete_node.getNext()).getNext())
print('deletion done successfully')
def deleteAtEnd(self):
temp = self.__head
while(temp.getNext()!=None ):
delete_node = temp
temp = temp.getNext()
delete_node.setNext(None)
print('deletion done successfully')
def travelsing(self):
if self.__head == None :
print('Empty List!!!')
else:
temp = self.__head
#print(temp.getData())
while(temp!=None):
print(temp.getData())
temp = temp.getNext()
link_list = LinkedList() #it will create empty list
while(True):
print('\n1.insertion\n2.deletion\n3.display\n4.quit\n Enter your choice')
ch = int(input())
if ch == 1:
#create node
data = int(input('Enter data '))
node = Node(data)
#insertion of node in link list
print('1.Insert At begining\n 2.Insert In Between\n 3.Insert At End')
ch2 = int(input('Enter your choice'))
if ch2 == 1:
link_list.insertAtBegin(node)
elif ch2 == 2:
key = int(input('Enter after which you want to insert node'))
link_list.insertInBetween(key,node)
elif ch2 == 3:
link_list.insertAtEnd(node)
else : print('Enter correct choice !!!')
elif ch == 2:
#ask for where to delete
if link_list.getHead() == None:
print('list is Empty!!!')
else :
print('1.Delete At begining\n 2.Delete In Between\n 3.Delete At End')
ch2 = int(input('Enter your choice'))
if ch2 == 1:
link_list.deleteAtBegin()
elif ch2 == 2:
key = int(input('Enter which you want to delete node'))
link_list.deleteInBetween(key)
elif ch2 == 3:
link_list.deleteAtEnd()
else : print('Enter correct choice !!!')
elif ch == 3:
link_list.travelsing()
elif ch == 4:
break
else : print('Enter correct choice !!!')
print('------------------------------------------doubly link list------------------------------------------')
#class to create node
class Node:
def __init__(self,data):
self.__pre = None
self.__data = data
self.__next = None
def getData(self):
return self.__data
def setData(self,data):
self.__data = data
def getNext(self):
return self.__next
def setNext(self,next_node):
self.__next = next_node
def getpre(self):
return self.__pre
def setNext(self,pre_node):
self.__pre = pre_node
|
aparna0/competitive-programs
|
7Algo and Datastructures/1linear DS/3.linklinst.py
|
3.linklinst.py
|
py
| 7,108 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39023255902
|
import pandas as pd
from objects_API.StrainJ import StrainJson
from objects_API.BacteriumJ import BacteriumJson
from objects_API.BacteriophageJ import BacteriophageJson
from objects_API.CoupleJ import CoupleJson
from configuration.configuration_api import ConfigurationAPI
from rest_client.AuthenticationRest import AuthenticationAPI
from objects_new.Couples_new import Couple
from objects_new.Organisms_new import Organism
def readCSVToDF(path_csv):
data = pd.read_csv(path_csv)
return data
def getBacteriophageByACCNEWDB(acc_value:str):
bacteriophage_obj_new = BacteriophageJson.getByAccnumber(acc_value)
id_bacteriophage_new = bacteriophage_obj_new.id
return id_bacteriophage_new
def getBacteriumByACCNEWDB(acc_value:str):
bacterium_obj_new = BacteriumJson.getByAccnumber(acc_value)
id_bacterium_new = bacterium_obj_new.id
return id_bacterium_new
def obtainBacteriumIdFromOldDBId(id_bacterium_old_db:int):
organism_obj = Organism.get_organism_by_id(id_bacterium_old_db)
fk_strain_old_db = organism_obj.fk_strain
return fk_strain_old_db
def obtainBacteriumACCnumberFromOldDBId(id_bacterium_old_db:int):
organism_obj = Organism.get_organism_by_id(id_bacterium_old_db)
acc_number = organism_obj.acc_num
return acc_number
def obtainphageACCnumberFromOldDBId(id_bacteriophage_old_db:int):
organism_obj = Organism.get_organism_by_id(id_bacteriophage_old_db)
acc_number = organism_obj.acc_num
return acc_number
def getIdStrainNewDBByStrainBactOldDB(strain_id_old:int, dataframe_strains_id):
dataframe_line = dataframe_strains_id.loc[dataframe_strains_id['strain_db'] == strain_id_old]
new_db_id_strain = int(dataframe_line['strain_api'].values[0])
return new_db_id_strain
def getBacteriumListIdsByStrainId(strain_id:int):
list_bacterium_ids_treated = []
strain_obj = StrainJson.getByID(strain_id)
list_bacterium_ids = strain_obj.bacteria
for bacterium in list_bacterium_ids:
bacterium = bacterium.replace('http://trex.lan.iict.ch:8080/api/bacterium/','')[:-1]
list_bacterium_ids_treated.append(bacterium)
return list_bacterium_ids_treated
conf_obj = ConfigurationAPI()
conf_obj.load_data_from_ini()
AuthenticationAPI().createAutenthicationToken()
path_csv_strains_correspondence = 'correspondenceIDSStrains2.csv'
datafram_csv = readCSVToDF(path_csv_strains_correspondence)
list_couples_old_db = Couple.get_all_couples()
count_error = 0
count_many = 0
count_pos_list = 0
#list_couples_old_db = list_couples_old_db[9602:]
#list_couples_old_db = list_couples_old_db[-2:]
dict_convert_phages_id = {}
dict_convert_phages_id[4656] = 6265
for couple_element in list_couples_old_db:
if couple_element.fk_source_data == 1 and couple_element.fk_level_interact == 3:
print('It is the {0} : couple id {1} : pos in list total {2}'.format(count_many, couple_element.id_couple, count_pos_list))
id_bacterium = couple_element.fk_bacteria
acc_bacterium = obtainBacteriumACCnumberFromOldDBId(id_bacterium)
id_new_bacterium_db = -1
try:
id_new_bacterium_db = getBacteriumByACCNEWDB(acc_bacterium)
except:
id_new_bacterium_db = -1
#If necessary, check if they have the acc for public data for bacterium
#strain_id = obtainBacteriumIdFromOldDBId(id_bacterium)
#strain_id_new_db = getIdStrainNewDBByStrainBactOldDB(strain_id, datafram_csv)
#list_bacterium_id = getBacteriumListIdsByStrainId(strain_id_new_db)
#
id_phage = couple_element.fk_phage
acc_bacteriophage = obtainphageACCnumberFromOldDBId(id_phage)
id_new_phage_db = -1
if id_phage in dict_convert_phages_id:
id_new_phage_db = dict_convert_phages_id[id_phage]
else:
try:
id_new_phage_db = getBacteriophageByACCNEWDB(acc_bacteriophage)
except:
id_new_phage_db = -1
if id_new_phage_db != -1 and id_new_bacterium_db != -1:
try:
couple_obj = CoupleJson.getByBacteriumPhageIds(id_new_bacterium_db, id_new_phage_db)
except:
count_error += 1
interaction_type_cp = couple_element.interact_pn
id_bacterium_cp = id_new_bacterium_db
id_phage_cp = id_new_phage_db
validity_id_cp = 4 #not validate
level_interaction_cp = 2
source_data_cp = 1
person_responsible_cp = 3
couple_obj_json = CoupleJson(interaction_type = interaction_type_cp,
bacteriophage = id_phage_cp,
bacterium = id_bacterium_cp,
level = level_interaction_cp,
person_responsible = person_responsible_cp,
source_data = source_data_cp,
validity = validity_id_cp)
couple_obj = couple_obj_json.setCouple()
print(couple_obj)
print('INSERTEDDD NEW')
else:
count_error += 1
#print(couple_obj)
count_many += 1
count_pos_list += 1
print(len(list_couples_old_db))
print('Hello')
|
diogo1790/inphinity
|
CorrectLevelLysis.py
|
CorrectLevelLysis.py
|
py
| 5,366 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40920994409
|
from datetime import datetime
from models.library_item import LibraryItem
from models.rent import Rent
from models.user import User
from services.handlers.rent_handlers.apply_discount_handler import ApplyDiscountHandler
from services.handlers.rent_handlers.calculate_fine_price_handler import (
CalculateFinePriceHandler,
)
from services.handlers.rent_handlers.calculate_rent_price_handler import (
CalculateRentPriceHandler,
)
from services.handlers.rent_handlers.inspect_item_handlers import InspectItemHandler
from services.handlers.rent_handlers.rent_request_payment_handler import (
RentRequestPaymentHandler,
)
from services.handlers.rent_handlers.rent_return_payment_handler import (
RentReturnPaymentHandler,
)
from services.handlers.rent_handlers.save_rent_handler import SaveRentHandler
from services.handlers.rent_handlers.update_book_catalogue_handler import (
UpdateBookCatalogueHandler,
)
from services.payment.bank_account_payment_strategy import BankAccountPaymentStrategy
from services.payment.card_payment_strategy import CardPaymentStrategy
from services.payment.payment_strategy import PaymentStrategy
from services.singleton.singleton import singleton
@singleton
class LibraryManager:
@classmethod
def rent_library_item(
cls,
library_item: LibraryItem,
user: User,
expected_rent_end_date: datetime,
payment_type: str,
):
rent = cls.__create_rent(
library_item=library_item,
user=user,
expected_rent_end_date=expected_rent_end_date,
)
payment_strategy = cls.__get_payment_strategy_from_type(payment_type)
handler = cls.__get_rent_request_handlers()
handler.handle(rent, payment_strategy)
return rent
@classmethod
def return_library_item(
cls,
rent: Rent,
payment_type: str,
):
rent.rent_end_date = datetime.utcnow().date()
payment_strategy = cls.__get_payment_strategy_from_type(payment_type)
handler = cls.__get_rent_return_handlers()
handler.handle(rent, payment_strategy)
return rent
@staticmethod
def __create_rent(
library_item: LibraryItem,
user: User,
expected_rent_end_date: datetime,
) -> Rent:
rent = Rent(
library_item=library_item,
library_user=user,
rent_start_date=datetime.utcnow(),
expected_rent_end_date=expected_rent_end_date,
)
return rent
@staticmethod
def __get_rent_request_handlers():
handler = CalculateRentPriceHandler()
handler.set_next(ApplyDiscountHandler()).set_next(
RentRequestPaymentHandler()
).set_next(UpdateBookCatalogueHandler()).set_next(SaveRentHandler())
return handler
@staticmethod
def __get_rent_return_handlers():
handler = InspectItemHandler()
handler.set_next(CalculateFinePriceHandler()).set_next(
RentReturnPaymentHandler()
).set_next(UpdateBookCatalogueHandler()).set_next(SaveRentHandler())
return handler
@staticmethod
def __get_payment_strategy_from_type(payment_type: str) -> PaymentStrategy:
if payment_type == "card":
payment_strategy = CardPaymentStrategy()
else:
payment_strategy = BankAccountPaymentStrategy()
return payment_strategy
|
Ari100telll/LibrarySDD
|
services/library_manager/library_manager.py
|
library_manager.py
|
py
| 3,424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70941800508
|
# a simple client socket
import socket
# define socket address
TCP_IP = 'pip install request' # ip of the server we want to connect to
TCP_PORT = 5000 # port used for communicating with the server
# create socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print ("Socket created successfully.")
# connect to server
s.connect((TCP_IP, TCP_PORT))
print ("Established connection with the server." )
message = "I've been sent from the client!"
# send message to the server
s.send(message)
print ("Message sent to server.")
|
DiegoPython/CS50-NASA-Vending-Machine
|
Python/testsocket.py
|
testsocket.py
|
py
| 554 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29848209236
|
import pygame
from random import randint
class Enemy(pygame.sprite.Sprite):
def __init__(self, x, y, level):
super().__init__()
self.image_list = []
self.level = level
self.frame = 0
for index in range(10):
image_name = 'Pic\\Enemy' + str(level) + '\\Enemy' + str(level) + '_' + str(index+1) + '.png'
self.image_list.append(pygame.image.load(image_name).convert_alpha())
self.image_order = 0
self.image = self.image_list[self.image_order]
# self.image = pygame.image.load('Pic\Enemy1\Enemy1.png').convert_alpha()
self.rect = self.image.get_rect(topleft=(x, y))
self.speed = 1
'''speed for each level'''
# if self.level%4 >= 0.5:
# self.speed = 1*ceil(self.level/4)
# elif self.level%4 < 0.5:
# self.speed = 1*floor(self.level/4)
# print(f'level: {level}')
# print(self.speed)
self.lasers = pygame.sprite.Group()
'''Make it collide perfectly'''
self.mask = pygame.mask.from_surface(self.image)
'''special enemy'''
special_number = randint(0, 10)
if special_number == 4:
self.special = 'heart'
elif special_number == 1:
self.special = 'item'
else:
self.special = 'none'
self.check_game_over = True
def move(self):
'''Movement speed of Enemy1'''
if self.rect.x > 0:
self.rect.x -= self.speed
elif self.rect.x == 0:
'''Constraint for enemies pos'''
self.check_game_over = False
# pygame.quit()
# sys.exit()
def pic(self):
self.frame += 1
if self.frame == 12:
self.frame = 0
self.image_order += 1
if self.image_order >= len(self.image_list):
self.image_order = 0
self.image = self.image_list[self.image_order]
def update(self):
self.move()
self.pic()
if self.check_game_over is False:
return False
|
Karnpitcha-kasemsirinavin/The_Lost_Duck
|
Enemy.py
|
Enemy.py
|
py
| 2,176 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24698018564
|
import numpy as np
import torch
from torch.utils.data import Dataset
import constants as C
def _get_existing_group(gb, i):
try: group_df = gb.get_group(i)
except KeyError: group_df = None
return group_df
def get_dist_matrix(struct_df):
locs = struct_df[['x','y','z']].values
n_atoms = len(locs)
loc_tile = np.tile(locs.T, (n_atoms,1,1))
dist_mat = np.sqrt(((loc_tile - loc_tile.T)**2).sum(axis=1))
return dist_mat
class MoleculeDataset(Dataset):
"""Dataset returning inputs and targets per molecule."""
def __init__(self, mol_ids, gb_mol_sc, gb_mol_atom, gb_mol_bond,
gb_mol_struct, gb_mol_angle_in, gb_mol_angle_out,
gb_mol_graph_dist):
"""Dataset is constructed from dataframes grouped by molecule_id."""
self.n = len(mol_ids)
self.mol_ids = mol_ids
self.gb_mol_sc = gb_mol_sc
self.gb_mol_atom = gb_mol_atom
self.gb_mol_bond = gb_mol_bond
self.gb_mol_struct = gb_mol_struct
self.gb_mol_angle_in = gb_mol_angle_in
self.gb_mol_angle_out = gb_mol_angle_out
self.gb_mol_graph_dist = gb_mol_graph_dist
def __len__(self):
return self.n
def __getitem__(self, idx):
return (self.gb_mol_sc.get_group(self.mol_ids[idx]),
self.gb_mol_atom.get_group(self.mol_ids[idx]),
self.gb_mol_bond.get_group(self.mol_ids[idx]),
self.gb_mol_struct.get_group(self.mol_ids[idx]),
self.gb_mol_angle_in.get_group(self.mol_ids[idx]),
_get_existing_group(self.gb_mol_angle_out, self.mol_ids[idx]),
self.gb_mol_graph_dist.get_group(self.mol_ids[idx]))
def arr_lst_to_padded_batch(arr_lst, dtype=torch.float,
pad_val=C.BATCH_PAD_VAL):
tensor_list = [torch.Tensor(arr).type(dtype) for arr in arr_lst]
batch = torch.nn.utils.rnn.pad_sequence(
tensor_list, batch_first=True, padding_value=pad_val)
return batch.contiguous()
def collate_parallel_fn(batch, test=False):
"""
Transforms input dataframes grouped by molecule into a batch of input and
target tensors for a 'batch_size' number of molecules. The first dimension
is used as the batch dimension.
Returns:
- atom_x: features at the atom level
- bond_x: features at the chemical bond level
- sc_x: features describing the scalar coupling atom_0 and atom_1 pairs
- sc_m_x: in addition to the set of features in 'sc_x', includes
features at the molecule level.
- eucl_dists: 3D euclidean distance matrices
- graph_dists: graph distance matrices
- angles: cosine angles between all chemical bonds
- mask: binary mask of dim=(batch_size, max_n_atoms, max_n_atoms),
where max_n_atoms is the largest number of atoms per molecule in
'batch'
- bond_idx: tensor of dim=(batch_size, max_n_bonds, 2), containing the
indices of atom_0 and atom_1 pairs that form chemical bonds
- sc_idx: tensor of dim=(batch_size, max_n_sc, 2), containing the
indices of atom_0 and atom_1 pairs that form a scalar coupling
pair
- angles_idx: tensor of dim=(batch_size, max_n_angles, 1), mapping
angles to the chemical bonds in the molecule.
- sc_types: scalar coupling types
- sc_vals: scalar coupling contributions (first 4 columns) and constant
(last column)
"""
batch_size, n_atom_sum, n_pairs_sum = len(batch), 0, 0
atom_x, bond_x, sc_x, sc_m_x = [], [], [], []
eucl_dists, graph_dists = [], []
angles_in, angles_out = [], []
mask, bond_idx, sc_idx = [], [], []
angles_in_idx, angles_out_idx = [], []
sc_types, sc_vals = [], []
for b in range(batch_size):
(sc_df, atom_df, bond_df, struct_df, angle_in_df, angle_out_df,
graph_dist_df) = batch[b]
n_atoms, n_pairs, n_sc = len(atom_df), len(bond_df), len(sc_df)
n_pad = C.MAX_N_ATOMS - n_atoms
eucl_dists_ = get_dist_matrix(struct_df)
eucl_dists_ = np.pad(eucl_dists_, [(0, 0), (0, n_pad)], 'constant',
constant_values=999)
atom_x.append(atom_df[C.ATOM_FEATS].values)
bond_x.append(bond_df[C.BOND_FEATS].values)
sc_x.append(sc_df[C.SC_EDGE_FEATS].values)
sc_m_x.append(sc_df[C.SC_MOL_FEATS].values)
sc_types.append(sc_df['type'].values)
if not test:
n_sc_pad = C.MAX_N_SC - n_sc
sc_vals_ = sc_df[C.CONTRIB_COLS+[C.TARGET_COL]].values
sc_vals.append(np.pad(sc_vals_, [(0, n_sc_pad), (0, 0)], 'constant',
constant_values=-999))
eucl_dists.append(eucl_dists_)
graph_dists.append(graph_dist_df.values[:,:-1])
angles_in.append(angle_in_df['cos_angle'].values)
if angle_out_df is not None:
angles_out.append(angle_out_df['cos_angle'].values)
else:
angles_out.append(np.array([C.BATCH_PAD_VAL]))
mask.append(np.pad(np.ones(2 * [n_atoms]), [(0, 0), (0, n_pad)],
'constant'))
bond_idx.append(bond_df[['idx_0', 'idx_1']].values)
sc_idx.append(sc_df[['atom_index_0', 'atom_index_1']].values)
angles_in_idx.append(angle_in_df['b_idx'].values)
if angle_out_df is not None:
angles_out_idx.append(angle_out_df['b_idx'].values)
else:
angles_out_idx.append(np.array([0.]))
n_atom_sum += n_atoms
n_pairs_sum += n_pairs
atom_x = arr_lst_to_padded_batch(atom_x, pad_val=0.)
bond_x = arr_lst_to_padded_batch(bond_x)
max_n_atoms = atom_x.size(1)
max_n_bonds = bond_x.size(1)
angles_out_idx = [a + max_n_bonds for a in angles_out_idx]
sc_x = arr_lst_to_padded_batch(sc_x)
sc_m_x =arr_lst_to_padded_batch(sc_m_x)
if not test: sc_vals = arr_lst_to_padded_batch(sc_vals)
else: sc_vals = torch.tensor([0.] * batch_size)
sc_types = arr_lst_to_padded_batch(sc_types, torch.long)
mask = arr_lst_to_padded_batch(mask, torch.uint8, 0)
mask = mask[:,:,:max_n_atoms].contiguous()
bond_idx = arr_lst_to_padded_batch(bond_idx, torch.long, 0)
sc_idx = arr_lst_to_padded_batch(sc_idx, torch.long, 0)
angles_in_idx = arr_lst_to_padded_batch(angles_in_idx, torch.long, 0)
angles_out_idx = arr_lst_to_padded_batch(angles_out_idx, torch.long, 0)
angles_idx = torch.cat((angles_in_idx, angles_out_idx), dim=-1).contiguous()
eucl_dists = arr_lst_to_padded_batch(eucl_dists, pad_val=999)
eucl_dists = eucl_dists[:,:,:max_n_atoms].contiguous()
graph_dists = arr_lst_to_padded_batch(graph_dists, torch.long, 10)
graph_dists = graph_dists[:,:,:max_n_atoms].contiguous()
angles_in = arr_lst_to_padded_batch(angles_in)
angles_out = arr_lst_to_padded_batch(angles_out)
angles = torch.cat((angles_in, angles_out), dim=-1).contiguous()
return (atom_x, bond_x, sc_x, sc_m_x, eucl_dists, graph_dists, angles, mask,
bond_idx, sc_idx, angles_idx, sc_types), sc_vals
|
robinniesert/kaggle-champs
|
moldataset.py
|
moldataset.py
|
py
| 7,217 |
python
|
en
|
code
| 48 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.