seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
โ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
32328440349
|
#--------------------------------- ๋ผ์ด๋ธ๋ฌ๋ฆฌ ---------------------------------
# ์์ผ ๊ด๋ จ ๋ผ์ด๋ธ๋ฌ๋ฆฌ
from PIL import Image, ImageFile
from io import BytesIO
import socket
from PIL import Image
import pybase64
# ๋ชจ๋ธ ๊ด๋ จ ๋ผ์ด๋ธ๋ฌ๋ฆฌ
from PIL import Image
import tensorflow as tf
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing import image
import numpy as np
model = tf.keras.models.load_model('./model/ResNet50_Adadelta_Patience10.h5')
# DB ๊ด๋ จ ๋ผ์ด๋ธ๋ฌ๋ฆฌ
# !pip3 install influxdb-client
# pip install tensorflow
import influxdb_client, os, time
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
#--------------------------------- ๋ณ์ ์ค์ ---------------------------------
# ์์ผ ๊ด๋ จ ๋ณ์
global buf
buf = b''
global data
global result
#๋ชจ๋ธ ๊ด๋ จ ๋ณ์
fish_weight = 0
fish_img = './fish.jpg'
img_size = 224
fish_id=0
small_cnt=0 # ์น์ด ํฌํ๋
# DB ๊ด๋ จ ๋ณ์ (์ปค๋ฅ์
์ค์ )
bucket="SeaProject"
org = "[email protected]"
token = "Q7-n7NN5Bf-1tTgpr2eOs6-hi6e7S7g8_z2vYR98KsQXM-1j75-ytnnSOue8dMm_cWSjMMGDzqXMTWTa0xU1NA=="
url = "https://europe-west1-1.gcp.cloud2.influxdata.com"
client = influxdb_client.InfluxDBClient(url=url, token=token, org=org)
write_api = client.write_api(write_options=SYNCHRONOUS) # ์ฐ๊ธฐ API ๊ตฌ์ฑ : ์ค์๊ฐ ๋๊ธฐํ
# --------------------------------- ์์ผ ํจ์ 1 (ํด๋ผ์ด์ธํธ์ ๋ฐ์ดํฐ ์์ ) ---------------------------------
# ํด๋ผ์ด์ธํธ ์ธก์ ๋ฉ์์ง๋ฅผ ๋ฐ๊ธฐ ์ํ ํจ์
def _get_bytes_stream(sock, length):
global buf
global data
data = b''
# recv ํจ์์ ํ ๋น๋ ๋ฒํผ ํฌ๊ธฐ๋ณด๋ค ํด๋ผ์ด์ธํธ ๋ฉ์์ง๊ฐ ๋ ํฐ ๊ฒฝ์ฐ๋ฅผ ๋๋น
try:
step = length
while True:
# ํด๋ผ์ด์ธํธ ์ธก์ ๋ฉ์์ง ์์
data = sock.recv(step)
buf += data
# ๋น๋ฌธ์์ด์ ์์ ํ๋ค๋ฉด ๋ฃจํ ์ข
๋ฃ
if data==b'':
break
# ๋ฉ์์ง๊ฐ ๋ ๋จ์์๋ค๋ฉด ์คํ
elif len(buf) < length:
step = length - len(buf)
except Exception as e:
print(e)
return buf[:length]
#--------------------------------- ์์ผ ํจ์ 2 ( Model์ ์๋ฒ๋ฅผ ์คํ ) ---------------------------------
def PC_server(HOST,PORT) :
server_HOST = HOST # hostname, ip address, ๋น ๋ฌธ์์ด ""์ด ๋ ์ ์์
server_PORT = PORT # ํด๋ผ์ด์ธํธ ์ ์์ ๋๊ธฐํ๋ ํฌํธ ๋ฒํธ. 1-65535 ์ฌ์ด์ ์ซ์ ์ฌ์ฉ ๊ฐ๋ฅ
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # ์์ผ ๊ฐ์ฒด ์์ฑ. ์ฃผ์์ฒด๊ณ: IPv4, ์์ผํ์
: TCP ์ฌ์ฉ
# ํฌํธ ์ฌ์ฉ์ค์ด๋ผ ์ฐ๊ฒฐํ ์ ์๋ค๋ WinError 10048 ์๋ฌ ํด๊ฒฐ์ ์ํด ํ์
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# ์์ผ์ ํน์ ๋คํธ์ํฌ ์ธํฐํ์ด์ค์ ํฌํธ ๋ฒํธ์ ์ฐ๊ฒฐํ๋๋ฐ ์ฌ์ฉ
# ๋น ๋ฌธ์์ด์ด๋ฉด ๋ชจ๋ ๋คํธ์ํฌ ์ธํฐํ์ด์ค๋ก๋ถํฐ์ ์ ์์ ํ์ฉ
server_socket.bind((server_HOST, server_PORT))
# ์๋ฒ๊ฐ ํด๋ผ์ด์ธํธ์ ์ ์ ํ์ฉ
server_socket.listen()
# accept ํจ์์์ ๋๊ธฐํ๋ค๊ฐ ํด๋ผ์ด์ธํธ๊ฐ ์ ์ํ๋ฉด ์๋ก์ด ์์ผ์ ๋ฆฌํด
client_socket, addr = server_socket.accept()
# ์ ์ํ ํด๋ผ์ด์ธํธ์ ์ฃผ์
print('Connected by', addr)
_get_bytes_stream(client_socket,10000)
client_socket.close()
server_socket.close()
# -------------------------------- ์์ผ ํจ์ 3 ( RaspberryPi => Model ๋ฐ์ดํฐ๊ฐ ์ ์ก ) ---------------------------------
def receive_data(data):
global fish_img
ImageFile.LOAD_TRUNCATED_IMAGES = True
try:
fish_weight=int.from_bytes(data[-2:],"little") # ์์ ๋ฌด๊ฒ ๋ฐ๊ธฐ
buf_new = data + bytes('=','utf-8') * (4-len(data) % 4) # ์ ์ก ๋ฐ์ ๋ฐ์ดํฐ์ ๋ฌธ์ ๋ฐ์ ๋ฐฉ์ง๋ฅผ ์ํ ์ฝ๋
img = Image.open(BytesIO(pybase64.b64decode(buf_new)))
img = img.convert('RGB')
finally:
ImageFile.LOAD_TRUNCATED_IMAGES = False
img.save('fish.jpg',"JPEG") # ์ด๋ฏธ์ง ์ ์ฅ
fish_img = image.load_img(fish_img, target_size=(img_size,img_size)) #์ด๋ฏธ์ง๋ก๋
# -------------------------------- ์์ผ ํจ์ 4 ( Model => RaspberryPi ๋ฐ์ดํฐ๊ฐ ์ ์ก ) ---------------------------------
def PC_client(HOST, PORT):
global result
client_HOST = HOST
client_PORT = PORT
fish_type = bytes(result[0],'utf-8')
fish_check = bytes(result[1],'utf-8')
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((client_HOST, client_PORT))
client_socket.send(fish_type)
client_socket.send(fish_check)
client_socket.close()
#--------------------------------- Model ํจ์ ( Input : ์ด๋ฏธ์ง, ๋ฌด๊ฒ => Output: ์ด์ข
, ์น์ด์ฌ๋ถ ) ---------------------------------
def AI_check(fish_img, fish_weight):
global result
#์ํ์ด๋ฏธ์ง ์ ์ฒ๋ฆฌ
model_fish_img = image.img_to_array(fish_img)
model_fish_img = np.expand_dims(fish_img, axis=0)
model_fish_img = preprocess_input(model_fish_img)
# Model ๊ฐ๋
result_img = model.predict(model_fish_img)
# ์ด์ข
ํ๋ณ, ๊ธฐ์ค๋ฌด๊ฒ ์ค์
if np.argmax(result_img) == 0: # ๊ฐ์ฑ๋
fish_type='BP'
standard_weight= 392
elif np.argmax(result_img) == 1: # ๋๋
fish_type= 'RB'
standard_weight= 331
elif np.argmax(result_img) == 2: # ์ฐธ๋
fish_type='RS'
standard_weight= 210
# ์น์ดํ๋ณ, ์น์ด๊ฐฏ์ ์ถ๊ฐ
if fish_weight < standard_weight:
fish_check = 'small'
else:
fish_check = 'adult'
# ๋ผ์ฆ๋ฒ ๋ฆฌํ์ด๋ก ์ ์ก๋๋ ๊ฒฐ๊ณผ
result = [fish_type, fish_check] # ์ด์ข
/ ์น์ด์ฌ๋ถ
return result
#--------------------------------- DB ์ ์ก์ ์ํ ๋ฐ์ดํฐ ๊ฐ๊ณต ํจ์ ( Input : ์ด์ข
, ์น์ด์ฌ๋ถ => Output: ID, ์น์ด๋น์จ, ์ด์ข
, ์น์ด์ฌ๋ถ ) ---------------------------------
def DB_preprocess(fish_type, fish_check):
global fish_id
global small_rate
global small_cnt
# ์ด์ข
ํ๊ธ ๋ณํ
if fish_type =='BP':
fish_type ='๊ฐ์ฑ๋'
elif fish_type =='RB':
fish_type = '๋๋'
elif fish_type =='RS':
fish_type ='์ฐธ๋'
# ์น์ด์ฌ๋ถ ํ๊ธ ๋ณํ
if fish_check =='adult':
fish_check ='์ฑ์ด'
elif fish_check =='small':
fish_check = '์น์ด'
small_cnt+=1 # ์น์ด ๋ง๋ฆฌ์ ๊ณ์ฐ
fish_id += 1 # ๋ฌผ๊ณ ๊ธฐ ๋ง๋ฆฌ์ ๊ณ์ฐ
small_rate= (small_cnt/ fish_id)*100 # ๋ฌผ๊ณ ๊ธฐ ๋น์จ ๊ณ์ฐ
result = [fish_id, small_rate, fish_type, fish_check] # ์์ด๋, ์น์ด ๋น์จ, ์ด์ข
, ์น์ด ์ฌ๋ถ
return result
#--------------------------------- DB ์ ์ก ํจ์ ---------------------------------
def send_to_DB(id, small_rate, fish_type, fish_check):
points = (
Point("์ด์ข
4") # Point1: ID, ์ด์ข
.tag(key="id", value=id)
.field(fish_type, value=int(1)),
Point("์น์ด์ฌ๋ถ4") # Point2: ID, ์น์ด์ฌ๋ถ
.tag(key="id", value=id)
.field(fish_check, value=int(1)),
Point("์น์ด๋น์จ4" ) # Point3: ์น์ด๋น์จ
.field("์น์ด_๋น์จ", value=small_rate)
)
write_api = client.write_api(write_options=SYNCHRONOUS) # ์ฐ๊ธฐ API ๊ตฌ์ฑ : ์ค์๊ฐ ๋๊ธฐํ
return points
def final ():
global buf
global fish_img
global fish_weight
PC_server('',9999) # Model ์๋ฒ Open
while True:
if not buf=='': # buf์ ๋ฐ์ดํฐ๊ฐ์ด ๋ค์ด์๋ค๋ฉด
receive_data(buf) # RaspberryPi์์ ๋ฐ์ดํฐ๊ฐ ์์
AI_result = AI_check(fish_img, fish_weight) # Model ๊ฐ๋
PC_client('192.168.1.44', 9999) # RaspberryPi์ Model ๊ฒฐ๊ณผ๊ฐ ์ ์ก
DB_result= DB_preprocess(AI_result[0], AI_result[1]) # DB์ ๋ณด๋ผ ๋ฐ์ดํฐ๊ฐ ๊ฐ๊ณต
points = send_to_DB(DB_result[0], DB_result[1], DB_result[2], DB_result[3])
write_api.write(bucket=bucket, org=org, record=points) # DB์ ๋ฐ์ดํฐ๊ฐ ์ ์ก
buf='' # buf์ ๋ฐ์ดํฐ๊ฐ ์ญ์
break
# final() # ๋ฌดํ๋ฐ๋ณต
# ์ด๋ฌ๋ฉด ํน์ measurement
# client = InfluxDBClient(url=url, token=token, org=org)
# delete_api = client.delete_api()
# delete_api.delete('1970-01-01T00:00:00Z', '2022-11-11T00:00:00Z', '_measurement="์ฌ๊ธฐ๋ค๊ฐ measurement ์ด๋ฆ์ ๋ฃ์ผ์ธ์"',bucket=bucket )
final()
|
MultiFinal/Fish_Project
|
PC.py
|
PC.py
|
py
| 8,991 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
41734453103
|
from __future__ import absolute_import, unicode_literals
from lol_stats_api.helpers.redis import db_metadata, db_matchlist
from celery.decorators import task, periodic_task
from celery.schedules import crontab
from redis import Redis
from celery_singleton import Singleton, clear_locks
import os
from datetime import datetime as dt, timedelta as td
from lol_stats_api.helpers.variables import LAST_IMPORTANT_PATCH, DAYS_TO_REMOVE_DATA
from stats import get_players, get_matches, calculations
from assets.load_data import load_data
from lol_stats_api.helpers.mongodb import get_mongo_stats
from celery.signals import worker_ready
from assets.ddragon_routes import get_current_version
from lol_stats_api.helpers.mongodb import get_saved_version, get_last_calculated_patch
from stats.models import *
from lol_stats_api.celeryApp import app
import json
db_stats = get_mongo_stats()
@worker_ready.connect
def unlock_all(**kwargs):
print("Test")
clear_locks(app)
# Jugadores
@app.task(name='periodically_update_player_list', base=Singleton)
def periodically_update_player_list():
"""
Actualiza periodicamente la lista de jugadores
"""
update_player_list.delay()
@app.task(base=Singleton, name="update_player_list")
def update_player_list():
print("Inicio el updateo de jugadores")
get_players.update_player_list()
@task(base=Singleton, name="update_players")
def update_player_detail_in_celery(current_player):
"""
Actualiza la informacion de un jugador
"""
get_players.update_player_detail(current_player)
# Limpieza periodica
@app.task(base=Singleton, name="clear_old_data")
def clear_old_data():
"""
Elimina los datos viejos
"""
timestamp = get_matches.x_days_ago(DAYS_TO_REMOVE_DATA)
more_time_ago = get_matches.x_days_ago(DAYS_TO_REMOVE_DATA + 2)
# Parche 10.23 en adelante
timestamp = max(LAST_IMPORTANT_PATCH, timestamp)
timestamp = max(LAST_IMPORTANT_PATCH, more_time_ago)
print("Eliminando datos anteriores a {}".format(timestamp))
# Timelines
print("Eliminando timelines")
Timeline.objects.filter(gameTimestamp__lt=more_time_ago).delete()
print("Eliminando skill_ups")
SkillUp.objects.filter(timestamp__lt=more_time_ago).delete()
# Bans
print("Eliminando bans")
Ban.objects.filter(timestamp__lt=timestamp).delete()
# Champ data
print("Eliminando champ data")
ChampData.objects.filter(timestamp__lt=timestamp).delete()
# Playstyle
print("Eliminando champ playstyle")
ChampPlaystyle.objects.filter(timestamp__lt=timestamp).delete()
print("Eliminando first buy")
FirstBuy.objects.filter(timestamp__lt=more_time_ago).delete()
@app.task(name='clear_redis_old_data', base=Singleton)
def clear_redis_old_data():
"""
Reviso key por key si la ultima es muy vieja, y mientras lo sea sigo eliminando
"""
for server in db_matchlist.keys():
print("Revisando server - {}".format(server))
while True:
# Tomo la ultima del actual
match = db_matchlist.rpop(server)
if match is None:
break
data_match = json.loads(match)
timestamp = get_matches.x_days_ago(DAYS_TO_REMOVE_DATA)
# Si esta dentro del rango, la vuelvo a colocar y continuo
if data_match['timestamp'] > timestamp:
db_matchlist.rpush(server, match)
break
print("Elimino match: {}".format(match))
# Matches
@app.task(base=Singleton, name="process_match")
def process_match_with_celery(match):
"""
Procesa una partida con celery
"""
get_matches.process_match(match)
# Estadisticas
@app.task(base=Singleton, name="periodically_generate_new_stats")
def periodically_generate_new_stats():
"""
Ejecuta periodicamente el calculo de estadisticas
"""
generate_new_stats.delay()
@task(base=Singleton, name="generate_stats")
def generate_new_stats():
"""
Genera las estadisticas
"""
calculations.generate_builds_stats_by_champ()
# Assets
@app.task(base=Singleton, name="periodically_update_assets")
def periodically_update_assets():
"""
Ejecuta periodicamente el update de assets
"""
update_assets.delay()
@app.task(base=Singleton, name="update_assets")
def update_assets():
"""
Actualiza los assets
"""
saved_version = get_saved_version()
game_version = get_current_version()
# Si la version es distinta actualizo
if saved_version != game_version:
load_data()
last_calculated_patch = get_last_calculated_patch()
if last_calculated_patch != game_version:
# Recalculo las estadisticas
generate_new_stats.delay()
|
fabran99/LolStatisticsBackend
|
bard_app_api/lol_stats_api/tasks.py
|
tasks.py
|
py
| 4,723 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17779473058
|
#!/usr/bin/python3
"""Query reddit API for work count in hot list using recusion"""
import requests
def count_words(subreddit, word_list, after=None, count={}):
"""Count words in word_list in subreddit"""
if after is None:
subred_URL = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)
else:
subred_URL = 'https://www.reddit.com/r/{}/hot.json?after={}'.format(
subreddit, after)
subreddit_req = requests.get(subred_URL,
headers={"user-agent": "user"},
allow_redirects=False)
try:
data = subreddit_req.json().get("data")
except:
return
for word in word_list:
word = word.lower()
if word not in count.keys():
count[word] = 0
children = data.get("children")
for child in children:
title = (child.get("data").get("title").lower())
title = title.split(' ')
for word in word_list:
word = word.lower()
count[word] += title.count(word)
after = data.get("after")
if after is not None:
return count_words(subreddit, word_list, after, count)
else:
sorted_subs = sorted(count.items(), key=lambda x: (-x[1], x[0]))
for i in sorted_subs:
if i[1] != 0:
print(i[0] + ": " + str(i[1]))
|
robertrowe1013/holbertonschool-interview
|
0x13-count_it/0-count.py
|
0-count.py
|
py
| 1,364 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21360428026
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
path1 = 'project_data/DC_Crime.csv'
path2 = 'project_data/DC_Properties.csv'
path3 = 'project_data/DC_crime_test.csv'
data = pd.read_csv(path1)
Features =['SHIFT', 'OFFENSE', 'METHOD','BID',"NEIGHBORHOOD_CLUSTER",'ucr-rank',\
'sector','ANC','BLOCK_GROUP','BLOCK', 'DISTRICT','location','offensegroup',\
'PSA','WARD','VOTING_PRECINCT','CCN','END_DATE','OCTO_RECORD_ID','offense-text',\
'offensekey', 'XBLOCK', 'YBLOCK', 'START_DATE','REPORT_DAT','CENSUS_TRACT']
X = data.drop(columns=Features)
y= data['offensegroup']
X_train,X_test,y_train,y_test = train_test_split(X,y,stratify =y)
imp= SimpleImputer()
X_train = imp.fit_transform(X_train)
X_test = imp.transform(X_test)
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier(max_depth=50, random_state=40)
dtree.fit(X_train,y_train)
y_pred=dtree.predict(X_test)
treefeatures=dtree.feature_importances_
print(dtree.score(X_train,y_train))
print(list(y_pred))
#้ขๆต็จdtree.predict,dataๆฏๅฏผๅ
ฅๆฐๆฎ
|
montpelllier/MA333_Introduction-to-Big-Data-Science
|
decisiontree.py
|
decisiontree.py
|
py
| 1,129 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31099193784
|
"""
Greg McClellan
Created: 8/25/13
Last Edited: 8/25/13
Problem:
n! means n ร (n โ 1) ร ... ร 3 ร 2 ร 1
For example, 10! = 10 ร 9 ร ... ร 3 ร 2 ร 1 = 3628800,
and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!
"""
from math import factorial
def factorial_digit_sum(n):
#returns the sum of digits of n!
digit_sum = 0
n = str(factorial(n))
for digit in n:
digit_sum += int(digit)
return digit_sum
print(factorial_digit_sum(100))
|
gsmcclellan/project_euler
|
Factorial_digit_sum.py
|
Factorial_digit_sum.py
|
py
| 565 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7485160594
|
from datetime import datetime
from os.path import basename
from types import SimpleNamespace
import math
import numpy as np
__version__ = "2020.10.06"
def solve(length, supports, loads, EI, GA, top, bottom, shear): # {{{
"""Solve the beam problem.
Arguments:
length: The length of the beam in mm. This will be rounded to
an integer value.
supports: Either None or a 2-tuple of numbers between 0 and length.
If None, the beam will be assumed to be clamped at the origin.
loads: Either a Load or an iterable of Loads.
EI: An iterable of size length+1 or a float containing the bending
stiffenss in every mm of the cross-section of the beam.
GA: An iterable of size length+1 or a float containing the shear
stiffenss in every mm of the cross-section of the beam.
top: An iterable of size length+1 or a float containing the height
above the neutral line in every mm of the cross-section of the beam.
bottom: An iterable of size length+1 or a float containing the height
under the neutral line in every mm of the cross-section of the beam.
shear: A boolean indication if shear deformations should be
included. Will be added and set to 'True' if not provided.
Returns:
This function returns a types.SimpleNamespace with following items:
* D: A numpy array containing the shear force in the cross-section
at each mm of the beam.
* M: A numpy array containing the bending moment in the cross-section
at each mm of the beam.
* dy: A numpy array containing the deflection angle at each mm
of the beam.
* y: A numpy array containing the vertical displacement at each mm
of the beam.
* a: A numpy array containing angle between the tangent line of the beam
and the x-axis in radians at each mm of the beam.
* etop: A numpy array containing the strain at the top of the
cross-section at each mm of the beam.
* ebot: A numpy array containing the strain at the bottom of the
cross-section at each mm of the beam.
* R: If 'supports' was provided, R is a 2-tuple of the reaction
forces at said supports. Else R[0] is the reaction force at the
clamped x=0 and R[1] is the reaction moment at that point.
* length: Length in mm.
"""
length, s1, s2 = _check_length_supports(length, supports)
loads = _check_loads(loads)
loads = [ld for ld in loads] # make a copy since we modifiy it!
EI, GA, top, bot = _check_arrays(length, EI, GA, top, bottom)
if shear not in (True, False):
raise ValueError("shear should be a boolean")
# Calculate support loads.
moment = sum([ld.moment(s1) for ld in loads])
if s2:
R2 = Load(force=-moment / (s2 - s1), pos=s2)
loads.append(R2)
else: # clamped at x = 0
R2 = -moment
# Force equilibrium
R1 = Load(force=-sum([ld.size for ld in loads]), pos=s1)
loads.append(R1)
# Calculate shear force
D = np.sum(np.array([ld.shear(length) for ld in loads]), axis=0)
# Calculate bending moment
M = np.cumsum(D)
Mstep = np.sum(
np.array(
[ld.moment_array(length) for ld in loads if isinstance(ld, MomentLoad)]
),
axis=0,
)
M += Mstep
if s2 is None:
M -= M[-1]
ddy_b = M / EI
etop, ebot = -top * ddy_b, -bot * ddy_b
dy = np.cumsum(ddy_b)
if shear:
dy += -1.5 * D / GA # shear
y = np.cumsum(dy)
if s2:
# First, translate the whole list so that the value at the
# index anchor is zero.
y = y - y[s1]
# Then rotate around the anchor so that the deflection at the other
# support is also 0.
delta = -y[s2] / math.fabs(s1 - s2)
slope = (
np.concatenate((np.arange(-s1, 1, 1), np.arange(1, len(y) - s1))) * delta
)
dy += delta
y = y + slope
results = SimpleNamespace()
results.length = length
results.D, results.M = D, M
results.dy, results.y, results.R = dy, y, (R1, R2)
results.a = np.arctan(dy)
results.etop, results.ebot = etop, ebot
return results # }}}
def save(results, path): # {{{
"""
Save the data from a solved results to a file as columns of numbers.
It writes the following columns to the file:
* position
* shear force
* bending moment
* displacement
* strain at top
* strain at bottom
* deflection angle
Arguments:
results: Results dictionary.
path: Location where the data should be solved
Raises:
AttributeError if the results have not been solved yet.
"""
data = np.vstack(
(
np.arange(results.length + 1),
results.D,
results.M,
results.y,
results.etop,
results.ebot,
results.dy,
)
).T
p = basename(path)
d = str(datetime.now())[:-7]
h = f"file: {p}\ngenerated: {d}\nx D M y et eb dy"
np.savetxt(path, data, fmt="%g", header=h) # }}}
def EI(sections, normal=None): # {{{
"""Calculate the bending stiffnes of a cross-section.
The cross-section is composed out of rectangular nonoverlapping sections
that can have different Young's moduli.
Each section is represented by a 4-tuple (width, height, offset, E).
The offset is the distance from the top of the section to the top of the
highest section. This should always be a positive value.
E is the Young's modulus of the material of this section.
Arguments:
sections: Iterable of section properties.
normal: The Young's modulus to which the total cross-section will be
normalized. (Not used anymore, retained for compatibility.)
Returns:
Tuple of EI, top and bottom. Top and bottom are with respect to the
neutral line.
Examples:
>>> E = 210000
>>> B = 100
>>> H = 20
>>> sections = ((B, H, 0, E),)
>>> EI(sections)
(14000000000.0, 10.0, -10.0)
>>> B = 100
>>> h = 18
>>> t = 1
>>> H = h + 2 * t
>>> E = 210000
>>> sections = ((B, t, 0, E), (B, t, h+t, E))
>>> EI(sections)
(3794000000.0, 10.0, -10.0)
>>> E1, E2 = 200000, 71000
>>> t1, t2 = 1.5, 2.5
>>> H = 31
>>> B = 100
>>> sections = ((B, t1, 0, E1), (B, t2, H-t2, E2))
>>> EI(sections)
(9393560891.143106, 11.530104712041885, -19.469895287958117)
"""
normal = sections[0][-1]
normalized = tuple((w * E / normal, h, offs) for w, h, offs, E in sections)
A = sum(w * h for w, h, _ in normalized)
S = sum(w * h * (offs + h / 2) for w, h, offs in normalized)
yn = S / A
# Find any geometry that straddles yn.
to_split = tuple(g for g in sections if g[2] < yn and g[1] + g[2] > yn)
geom = tuple(g for g in sections if g not in to_split)
# split that geometry.
# The new tuple has the format (width, height, top, bottom)
new_geom = []
for w, h, offs, E in to_split:
h1 = yn - offs
h2 = h - h1
new_geom.append((w, h1, h1, 0, E))
new_geom.append((w, h2, 0, -h2, E))
# Convert the remaining geometry to reference yn.
for w, h, offs, E in geom:
new_geom.append((w, h, yn - offs, yn - offs - h, E))
EI = sum(E * w * (top ** 3 - bot ** 3) / 3 for w, h, top, bot, E in new_geom)
top = max(g[-3] for g in new_geom)
bot = min(g[-2] for g in new_geom)
return EI, top, bot # }}}
def interpolate(tuples): # {{{
"""
Creates a numpy array and fills it by interpolation.
Arguments:
tuples: A list of 2-tuples (n, v). Note that the n values will be
rounded and converted to integers.
Returns:
A numpy array with interpolated values so that at index n the array has
the value v.
Examples:
>>> import numpy as np
>>> interpolate([(0,0), (3,3)])
array([0., 1., 2., 3.])
>>> interpolate([(0,0), (4,3), (6,-1)])
array([ 0. , 0.75, 1.5 , 2.25, 3. , 1. , -1. ])
>>> interpolate([(1,1), (4,4), (6,-3)])
array([ 1. , 2. , 3. , 4. , 0.5, -3. ])
"""
x = np.array([int(round(x)) for x, _ in tuples])
y = np.array([y for _, y in tuples])
startx, starty = x[0], y[0]
arrays = []
for dx, dy in zip(x[1:] - x[:-1], y[1:] - y[:-1]):
if dx > 0:
a = np.linspace(starty, starty + dy, num=dx + 1, endpoint=True)
arrays.append(a[:-1])
startx += dx
starty += dy
arrays.append(np.array([y[-1]]))
return np.concatenate(arrays) # }}}
def patientload(**kwargs): # {{{
"""
Returns a list of DistLoads that represent a patient
load according to IEC 60601 specs. For this calculation the patient is
assumed to be lying with his feet pointing to the origin.
Named arguments:
kg: Mass of the patient in kg.
force: The gravitational force of the patient in N. Note that this
should be a *negative* number.
feet: Location of the patient's feet in mm.
head: Location of the patient's head in mm. This is an alternative for
'feet'. Either 'feet' or 'head' must be present or a ValueError
will be raised.
Returns:
A list of DistLoads.
"""
f = _force(**kwargs)
if "feet" in kwargs:
s = round(float(kwargs["feet"]))
elif "head" in kwargs:
s = round(float(kwargs["head"])) - 1900
else:
raise ValueError("No 'feet' nor 'head' given.")
fractions = [
(0.148 * f, (s + 0, s + 450)), # l. legs, 14.7% from 0--450 mm
(0.222 * f, (s + 450, s + 1000)), # upper legs
(0.074 * f, (s + 1000, s + 1180)), # hands
(0.408 * f, (s + 1000, s + 1700)), # torso
(0.074 * f, (s + 1200, s + 1700)), # arms
(0.074 * f, (s + 1220, s + 1900)),
] # head
return [DistLoad(force=i[0], pos=i[1]) for i in fractions] # }}}
class Load(object): # {{{
"""Point load."""
def __init__(self, **kwargs):
"""
Create a point load.
Named arguments:
force: Force in Newtons. N.B: downwards force should be a
*negative* number.
kg: Weight of a mass in kg, alternative for force. N.B: a weight
of 1 kg will translate into a force of -9.81 N.
pos: Distance from the origin to the location of the force in mm.
Examples:
>>> str(Load(kg=150, pos=100))
'point load of -1471.5 N @ 100 mm.'
"""
self.size = _force(**kwargs)
self.pos = round(float(kwargs["pos"]))
def __str__(self):
return f"point load of {self.size} N @ {self.pos} mm."
def moment(self, pos):
"""
Returns the bending moment that the load exerts at pos.
"""
return (self.pos - pos) * self.size
def shear(self, length):
"""
Return the contribution of the load to the shear.
Arguments:
length: length of the array to return.
Returns:
An array that contains the contribution of this load.
"""
rv = np.zeros(length + 1)
rv[self.pos :] = self.size
return rv # }}}
class MomentLoad(Load): # {{{
def __init__(self, moment, pos):
"""Create a local bending moment load.
Arguments:
moment: bending moment in Nmm
pos: position of the bending moment.
"""
self.m = float(moment)
Load.__init__(self, force=0, pos=pos)
def __str__(self):
return f"moment of {self.m} Nmm @ {self.pos}"
def moment(self, pos):
"""
Returns the bending moment that the load exerts at pos.
"""
return self.m
def shear(self, length):
"""
Return the contribution of the load to the shear.
Arguments:
length: length of the array to return.
Returns:
An array that contains the contribution of this load.
"""
return np.zeros(length + 1)
def moment_array(self, length):
"""
Return the contribution of the load to the bending moment.
Arguments:
length: length of the array to return.
Returns:
An array that contains the contribution of this load.
"""
rv = np.zeros(length + 1)
rv[self.pos :] = -self.m
return rv # }}}
class DistLoad(Load): # {{{
"""Evenly distributed load."""
def __init__(self, **kwargs):
"""
Create an evenly distributed load.
Named arguments:
force: Force in Newtons. N.B: downwards force should be a
*negative* number.
kg: Weight of a mass in kg, alternative for force. N.B: a weight
of 1 kg will translate into a force of -9.81 N.
start: Begin of the distributed load. Must be used in combination
with the 'end' argument.
end: End of the distributed load.
pos: 2-tuple containing the borders of the distributed load.
You can use this instead of start and end.
"""
size = _force(**kwargs)
self.start, self.end = _start_end(**kwargs)
if self.start > self.end:
self.start, self.end = self.end, self.start
Load.__init__(self, force=size, pos=float(self.start + self.end) / 2)
def __str__(self):
return (
f"constant distributed load of {self.size} N @ {self.start}--{self.end} mm."
)
def shear(self, length):
rem = length + 1 - self.end
d = self.end - self.start
q = self.size
parts = (np.zeros(self.start), np.linspace(0, q, d), np.ones(rem) * q)
return np.concatenate(parts) # }}}
class TriangleLoad(DistLoad): # {{{
"""Linearly rising distributed load."""
def __init__(self, **kwargs):
"""
Create an linearly rising distributed load.
Named arguments:
force: Force in Newtons. N.B: downwards force should be a
*negative* number.
kg: Weight of a mass in kg, alternative for force. N.B: a weight
of 1 kg will translate into a force of -9.81 N.
start: Begin of the distributed load. Must be used in combination
with the 'end' argument.
end: End of the distributed load.
"""
DistLoad.__init__(self, **kwargs)
length = abs(self.start - self.end)
pos = (self.start, self.end)
self.pos = round(min(pos)) + 2.0 * length / 3.0
self.q = 2 * self.size / length
def __str__(self):
if self.start < self.end:
d = "ascending"
else:
d = "descending"
return f"linearly {d} distributed load of {self.size} N @ {self.start}--{self.end} mm."
def shear(self, length):
rem = length + 1 - self.end
parts = (
np.zeros(self.start),
np.linspace(0, self.q, self.end - self.start),
np.ones(rem) * self.q,
)
dv = np.concatenate(parts)
return np.cumsum(dv) # }}}
# Everything below is internal to the module.
def _force(**kwargs): # {{{
"""
Determine the force. See Load.__init__()
Returns:
The force as a float.
Examples:
>>> _force(kg=1)
-9.81
"""
if "force" in kwargs:
force = float(kwargs["force"])
elif "kg" in kwargs:
force = -9.81 * float(kwargs["kg"])
else:
raise KeyError("No 'force' or 'kg' present")
return force # }}}
def _start_end(**kwargs): # {{{
"""
Validate the position arguments. See DistLoad.__init_()
Returns:
Postition as a (start, end) tuple
Examples:
>>> _start_end(pos=(100, 200))
(100, 200)
>>> _start_end(start=100, end=200)
(100, 200)
"""
if "pos" in kwargs:
p = kwargs["pos"]
if not isinstance(p, tuple) and len(p) != 2:
raise ValueError("'pos' should be a 2-tuple")
pos = (round(float(kwargs["pos"][0])), round(float(kwargs["pos"][1])))
elif "start" in kwargs and "end" in kwargs:
pos = (round(float(kwargs["start"])), round(float(kwargs["end"])))
else:
raise KeyError("Neither 'pos' or 'start' and 'end' present")
return pos # }}}
def _check_length_supports(length, supports): # {{{
"""
Validate the length and supports. See solve().
Returns:
A tuple (length, support1, support2)
"""
length = int(round(length))
if length < 1:
raise ValueError("length must be โฅ1")
if supports is not None:
if len(supports) != 2:
t = "The problem definition must contain exactly two supports."
raise ValueError(t)
s = (int(round(supports[0])), int(round(supports[1])))
if s[0] == s[1]:
raise ValueError("Two identical supports found!")
elif s[0] > s[1]:
s = (s[1], s[0])
if s[0] < 0 or s[1] > length:
raise ValueError("Support(s) outside of the beam!")
else:
s = (0, None)
return (length, s[0], s[1]) # }}}
def _check_loads(loads): # {{{
"""
Validate the loads in the problem. See solve().
Returns:
A list of Loads
"""
if isinstance(loads, Load):
loads = [loads]
if loads is None or len(loads) == 0:
raise ValueError("No loads specified")
for ld in loads:
if not isinstance(ld, Load):
raise ValueError("Loads must be Load instances")
return list(loads) # }}}
def _check_arrays(L, EI, GA, top, bottom): # {{{
"""
Validate the length of the EI, GA, top and bot iterables and converts
them into numpy arrays. See solve().
Returns:
The modified EI, GA, top and bottom arrays.
"""
rv = []
for name, ar in zip(("EI", "GA", "top", "bottom"), (EI, GA, top, bottom)):
# Convert single number to an ndarray.
if isinstance(ar, (int, float)):
ar = np.ones(L + 1) * ar
# Convert list/tuple to ndarray.
elif isinstance(ar, (list, tuple)):
ar = np.array(ar)
elif isinstance(ar, np.ndarray):
pass
else:
raise ValueError(
f"{name} is not a int, float, list, tuple or numpy.ndarray"
)
la = len(ar)
if la != L + 1:
raise ValueError(
f"Length of {name} ({la}) doesn't match beam length ({L}) + 1 ."
)
rv.append(ar)
return rv # }}}
|
rsmith-nl/beammech
|
beammech.py
|
beammech.py
|
py
| 18,941 |
python
|
en
|
code
| 26 |
github-code
|
6
|
12799024256
|
import webapp2, jinja2, os
# Import requests with app engine adapter
import requests, requests_toolbelt.adapters.appengine
from bs4 import BeautifulSoup
import datetime, time
from google.appengine.ext import ndb
# Patch adapter
requests_toolbelt.adapters.appengine.monkeypatch()
# os.path.dirname(__file__) is the current location of the file
# os.path.join joins the current location with templates
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
baseUrl = "https://woodroffehs.ocdsb.ca/Home%20Page%20Images/"
srcLocation = 'http://woodroffehs.ocdsb.ca/Home%20Page%20Images/Forms/AllItems.aspx'
garbageImages = [
'',
]
acceptableFormats = [
'.png',
'.PNG',
'.jpg',
'.JPG'
]
for item in range(len(garbageImages)):
garbageImages[item] = srcLocation + garbageImages[item]
def GetImages():
images = []
page = BeautifulSoup(requests.get(srcLocation).content)
for image in page.findAll('img'):
#images.append(image)
try:
alt = str(image['alt']).replace(" ", '%20')
if alt not in garbageImages and any(acceptable in alt for acceptable in acceptableFormats):
images.append(baseUrl + alt)
except KeyError:
pass
return images
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
# MainPage is a child of Handler, therefore it has all the functions and variables of Handler
class MainPage(Handler):
'''
def get(self):
now = datetime.datetime.now()
latestUpdateObj = Timeout.query().fetch()[0]
latestUpdate = latestUpdateObj.updateTime
images = Image.query().fetch()
self.render('slideshow.html', images=images)
if now >= now:
latestUpdate.updateTime = now + datetime.timedelta(hours=2)
ndb.delete_multi(Image.query().fetch())
for image in GetImages():
img = Image(link = image)
img.put()
#self.render('slideshow.html', images=GetImages())
'''
def get(self):
now = datetime.datetime.now()
class GenerateInitial(Handler):
def get(self):
timeout = Timeout(updateTime = datetime.datetime.now())
timeout.put()
for image in GetImages():
img = Image(link = image)
img.put()
class EscapeCache(Handler):
def get(self):
self.render('slideshow.html', images=GetImages())
class Image(ndb.Model):
link = ndb.StringProperty(required = True)
uuid = ndb.StringProperty()
updateTime = ndb.DateTimeProperty()
app = webapp2.WSGIApplication([
('/', MainPage),
('/init', GenerateInitial),
('/escape', EscapeCache)
], debug=True)
|
jQwotos/better-shareproint-slides
|
main.py
|
main.py
|
py
| 3,061 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29675107674
|
from trees.binary_sort_tree import BinarySortTree
def build_dictBinTree(entries):
dic = BinarySortTree()
for k, v in entries.items():
dic.insert(k, v)
dic.insert(20, 20)
return dic
def main():
dic = build_dictBinTree(
{57: 57, 36: 36, 89: 89, 7: 7, 43: 43, 65: 65, 96: 96, 18: 18, 52: 52, 60: 60, 74: 74})
dic.print()
if __name__ == '__main__':
main()
|
caominglong/data_structures_and_algorithms
|
trees/test/binary_sort_tree_test.py
|
binary_sort_tree_test.py
|
py
| 402 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31655899927
|
def find_anagrams(word, candidates):
anagrams = []
for w in candidates:
if len(w) == len(word) and w.lower() != word.lower():
if sorted(w.lower()) == sorted(word.lower()):
anagrams.append(w)
return anagrams
""" The solution below only fails 1 test:
when the words have repeated letters. e.g. "tapper" and "patter"
The reason is that the set keeps unique elements. So, with the
two words above, if we eliminate the repeated "p" and "t", they
do make an anagram, but that's incorrect
"""
# A = set(word.lower())
# for w in candidates:
# if len(w) == len(word) and w.lower() != word.lower():
# B = set(w.lower())
# C = A - B
# if len(C) == 0:
# anagrams.append(w)
# return anagrams
|
ilee38/exercism-io-coding-exercises
|
python/anagram/anagram.py
|
anagram.py
|
py
| 838 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25649315535
|
import ctypes
import sys
class DynamicArray(object):
def __init__(self):
self.n = 0 #Actual count of the actual elements
self.capacity = 1 #Default capacity
self.A = self.make_array(self.capacity) #To call make.array
def __len__(self):
return self.n
def __getitem__(self,k): #Return the elements at index k
if not 0 <= k < self.n: #If index(k) is NOT between 0 and the actual count of elements
return IndexError('K is out of bounds!') #Return IndexError
return self.A[k] #Return index k
def append(self,ele): #To add ele to the end of the array
if self.n == self.capacity: #If the capacity is full
self._resize(2*self.capacity) # 2x if capacity isn't enough
self.A[self.n] = ele
self.n += 1
def _resize(self,new_cap): #To increase the capacity
B = self.make_array(new_cap) #The new array with bigger capacity
for k in range(self.n): #All the existant values
B[k] = self.A[k] #Referncin all the existing values from A to the B.
self.A = B #Assigning our array to the new bigger one
self.capacity = new_cap #Updating capacity
def make_array(self,new_cap): #
return (new_cap * ctypes.py_object)() #
arr = DynamicArray()
arr.append(1)
print(len(arr))
arr.append(2)
arr.append(3)
print(sys.getsizeof(arr))
|
Emre-Yaz/emre-yaz
|
DS-A/ArraySequences/DynamicArrayImp.py
|
DynamicArrayImp.py
|
py
| 1,536 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22949757973
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# k2hat.py - Waqas Bhatti ([email protected]) - 07/15
# License: MIT. See the LICENCE file for license text.
'''
This contains functions for reading K2 CSV light-curves produced by the HAT
Project into a Python dictionary. Requires numpy.
The only external function here is::
read_csv_lightcurve(lcfile)
Example:
Reading the best aperture LC for EPIC201183188 = UCAC4-428-055298 (see
http://k2.hatsurveys.org to search for this object and download the light
curve):
>>> import k2hat
>>> lcdict = k2hat.read_csv_lightcurve('UCAC4-428-055298-75d3f4357b314ff5ac458e917e6dfeb964877b60affe9193d4f65088-k2lc.csv.gz')
The Python dict lcdict contains the metadata and all columns.
>>> lcdict.keys()
['decl', 'objectid', 'bjdoffset', 'qualflag', 'fovchannel', 'BGV',
'aperpixradius', 'IM04', 'TF17', 'EP01', 'CF01', 'ra', 'fovmodule', 'columns',
'k2campaign', 'EQ01', 'fovccd', 'FRN', 'IE04', 'kepid', 'YCC', 'XCC', 'BJD',
'napertures', 'ucac4id', 'IQ04', 'kepmag', 'ndet','kernelspec']
The columns for the light curve are stored in the columns key of the dict. To
get a list of the columns:
>>> lcdict['columns']
['BJD', 'BGV', 'FRN', 'XCC', 'YCC', 'IM04', 'IE04', 'IQ04', 'EP01', 'EQ01',
'TF17', 'CF01']
To get columns:
>>> bjd, epdmags = lcdict['BJD'], lcdict['EP01']
>>> bjd
array([ 2456808.1787283, 2456808.1991608, 2456808.2195932, ...,
2456890.2535691, 2456890.274001 , 2456890.2944328])
>>> epdmags
array([ 16.03474, 16.02773, 16.01826, ..., 15.76997, 15.76577,
15.76263])
'''
# put this in here because k2hat can be used as a standalone module
__version__ = '0.5.3'
#############
## LOGGING ##
#############
# the basic logging styles common to all astrobase modules
log_sub = '{'
log_fmt = '[{levelname:1.1} {asctime} {module}:{lineno}] {message}'
log_date_fmt = '%y%m%d %H:%M:%S'
import logging
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import os.path
import gzip
import numpy as np
########################
## COLUMN DEFINITIONS ##
########################
# LC column definitions
# the first elem is the column description, the second is the format to use when
# writing a CSV LC column, the third is the type to use when parsing a CSV LC
# column
COLUMNDEFS = {
'BJD':['time in Baryocentric Julian Date','%.7f',float],
'BGV':['Background value (ADU)','%.5f',float],
'BGE':['Background value (ADU)','%.5f',float],
'FRN':['cadence number of observation','%i',int],
'XCC':['x coordinate on module', '%.3f',float],
'YCC':['y coordinate on module', '%.3f',float],
'ARC':['arc length parameter', '%.3f', float],
# APERture 00
'IM00':['K2 instrumental magnitude (aperture 00)','%.5f',float],
'IE00':['K2 instrumental mag. error (aperture 00)','%.5f',float],
'IQ00':['K2 instrumental mag. quality flag (aperture 00)','%s',str],
'EP00':['detrended magnitude (aperture 00)','%.5f',float],
'EQ00':['detrended mag. quality flag (aperture 00)','%i',int],
'TF00':['TFA magnitude (aperture 00)','%.5f',float],
'CF00':['Cosine filtered magnitude (aperture 00)','%.5f',float],
# APERture 01
'IM01':['K2 instrumental magnitude (aperture 01)','%.5f',float],
'IE01':['K2 instrumental mag. error (aperture 01)','%.5f',float],
'IQ01':['K2 instrumental mag. quality flag (aperture 01)','%s',str],
'EP01':['detrended magnitude (aperture 01)','%.5f',float],
'EQ01':['detrended mag. quality flag (aperture 01)','%i',int],
'TF01':['TFA magnitude (aperture 01)','%.5f',float],
'CF01':['Cosine filtered magnitude (aperture 01)','%.5f',float],
# APERture 02
'IM02':['K2 instrumental magnitude (aperture 02)','%.5f',float],
'IE02':['K2 instrumental mag. error (aperture 02)','%.5f',float],
'IQ02':['K2 instrumental mag. quality flag (aperture 02)','%s',str],
'EP02':['detrended magnitude (aperture 02)','%.5f',float],
'EQ02':['detrended mag. quality flag (aperture 02)','%i',int],
'TF02':['TFA magnitude (aperture 02)','%.5f',float],
'CF02':['Cosine filtered magnitude (aperture 02)','%.5f',float],
# APERture 03
'IM03':['K2 instrumental magnitude (aperture 03)','%.5f',float],
'IE03':['K2 instrumental mag. error (aperture 03)','%.5f',float],
'IQ03':['K2 instrumental mag. quality flag (aperture 03)','%s',str],
'EP03':['detrended magnitude (aperture 03)','%.5f',float],
'EQ03':['detrended mag. quality flag (aperture 03)','%i',int],
'TF03':['TFA magnitude (aperture 03)','%.5f',float],
'CF03':['Cosine filtered magnitude (aperture 03)','%.5f',float],
# APERture 04
'IM04':['K2 instrumental magnitude (aperture 04)','%.5f',float],
'IE04':['K2 instrumental mag. error (aperture 04)','%.5f',float],
'IQ04':['K2 instrumental mag. quality flag (aperture 04)','%s',str],
'EP04':['detrended magnitude (aperture 04)','%.5f',float],
'EQ04':['detrended mag. quality flag (aperture 04)','%i',int],
'TF04':['TFA magnitude (aperture 04)','%.5f',float],
'CF04':['Cosine filtered magnitude (aperture 04)','%.5f',float],
# APERture 05
'IM05':['K2 instrumental magnitude (aperture 05)','%.5f',float],
'IE05':['K2 instrumental mag. error (aperture 05)','%.5f',float],
'IQ05':['K2 instrumental mag. quality flag (aperture 05)','%s',str],
'EP05':['detrended magnitude (aperture 05)','%.5f',float],
'EQ05':['detrended mag. quality flag (aperture 05)','%i',int],
'TF05':['TFA magnitude (aperture 05)','%.5f',float],
'CF05':['Cosine filtered magnitude (aperture 05)','%.5f',float],
# APERture 06
'IM06':['K2 instrumental magnitude (aperture 06)','%.5f',float],
'IE06':['K2 instrumental mag. error (aperture 06)','%.5f',float],
'IQ06':['K2 instrumental mag. quality flag (aperture 06)','%s',str],
'EP06':['detrended magnitude (aperture 06)','%.5f',float],
'EQ06':['detrended mag. quality flag (aperture 06)','%i',int],
'TF06':['TFA magnitude (aperture 06)','%.5f',float],
'CF06':['Cosine filtered magnitude (aperture 06)','%.5f',float],
# APERture 07
'IM07':['K2 instrumental magnitude (aperture 07)','%.5f',float],
'IE07':['K2 instrumental mag. error (aperture 07)','%.5f',float],
'IQ07':['K2 instrumental mag. quality flag (aperture 07)','%s',str],
'EP07':['detrended magnitude (aperture 07)','%.5f',float],
'EQ07':['detrended mag. quality flag (aperture 07)','%i',int],
'TF07':['TFA magnitude (aperture 07)','%.5f',float],
'CF07':['Cosine filtered magnitude (aperture 07)','%.5f',float],
# APERture 08
'IM08':['K2 instrumental magnitude (aperture 08)','%.5f',float],
'IE08':['K2 instrumental mag. error (aperture 08)','%.5f',float],
'IQ08':['K2 instrumental mag. quality flag (aperture 08)','%s',str],
'EP08':['detrended magnitude (aperture 08)','%.5f',float],
'EQ08':['detrended mag. quality flag (aperture 08)','%i',int],
'TF08':['TFA magnitude (aperture 08)','%.5f',float],
'CF08':['Cosine filtered magnitude (aperture 08)','%.5f',float],
# APERture 09
'IM09':['K2 instrumental magnitude (aperture 09)','%.5f',float],
'IE09':['K2 instrumental mag. error (aperture 09)','%.5f',float],
'IQ09':['K2 instrumental mag. quality flag (aperture 09)','%s',str],
'EP09':['detrended magnitude (aperture 09)','%.5f',float],
'EQ09':['detrended mag. quality flag (aperture 09)','%i',int],
'TF09':['TFA magnitude (aperture 09)','%.5f',float],
'CF09':['Cosine filtered magnitude (aperture 09)','%.5f',float],
# APERture 10
'IM10':['K2 instrumental magnitude (aperture 10)','%.5f',float],
'IE10':['K2 instrumental mag. error (aperture 10)','%.5f',float],
'IQ10':['K2 instrumental mag. quality flag (aperture 10)','%s',str],
'EP10':['detrended magnitude (aperture 10)','%.5f',float],
'EQ10':['detrended mag. quality flag (aperture 10)','%i',int],
'TF10':['TFA magnitude (aperture 10)','%.5f',float],
'CF10':['Cosine filtered magnitude (aperture 10)','%.5f',float],
# APERture 11
'IM11':['K2 instrumental magnitude (aperture 11)','%.5f',float],
'IE11':['K2 instrumental mag. error (aperture 11)','%.5f',float],
'IQ11':['K2 instrumental mag. quality flag (aperture 11)','%s',str],
'EP11':['detrended magnitude (aperture 11)','%.5f',float],
'EQ11':['detrended mag. quality flag (aperture 11)','%i',int],
'TF11':['TFA magnitude (aperture 11)','%.5f',float],
'CF11':['Cosine filtered magnitude (aperture 11)','%.5f',float],
# APERture 12
'IM12':['K2 instrumental magnitude (aperture 12)','%.5f',float],
'IE12':['K2 instrumental mag. error (aperture 12)','%.5f',float],
'IQ12':['K2 instrumental mag. quality flag (aperture 12)','%s',str],
'EP12':['detrended magnitude (aperture 12)','%.5f',float],
'EQ12':['detrended mag. quality flag (aperture 12)','%i',int],
'TF12':['TFA magnitude (aperture 12)','%.5f',float],
'CF12':['Cosine filtered magnitude (aperture 12)','%.5f',float],
# APERture 13
'IM13':['K2 instrumental magnitude (aperture 13)','%.5f',float],
'IE13':['K2 instrumental mag. error (aperture 13)','%.5f',float],
'IQ13':['K2 instrumental mag. quality flag (aperture 13)','%s',str],
'EP13':['detrended magnitude (aperture 13)','%.5f',float],
'EQ13':['detrended mag. quality flag (aperture 13)','%i',int],
'TF13':['TFA magnitude (aperture 13)','%.5f',float],
'CF13':['Cosine filtered magnitude (aperture 13)','%.5f',float],
# APERture 14
'IM14':['K2 instrumental magnitude (aperture 14)','%.5f',float],
'IE14':['K2 instrumental mag. error (aperture 14)','%.5f',float],
'IQ14':['K2 instrumental mag. quality flag (aperture 14)','%s',str],
'EP14':['detrended magnitude (aperture 14)','%.5f',float],
'EQ14':['detrended mag. quality flag (aperture 14)','%i',int],
'TF14':['TFA magnitude (aperture 14)','%.5f',float],
'CF14':['Cosine filtered magnitude (aperture 14)','%.5f',float],
# APERture 15
'IM15':['K2 instrumental magnitude (aperture 15)','%.5f',float],
'IE15':['K2 instrumental mag. error (aperture 15)','%.5f',float],
'IQ15':['K2 instrumental mag. quality flag (aperture 15)','%s',str],
'EP15':['detrended magnitude (aperture 15)','%.5f',float],
'EQ15':['detrended mag. quality flag (aperture 15)','%i',int],
'TF15':['TFA magnitude (aperture 15)','%.5f',float],
'CF15':['Cosine filtered magnitude (aperture 15)','%.5f',float],
# APERture 16
'IM16':['K2 instrumental magnitude (aperture 16)','%.5f',float],
'IE16':['K2 instrumental mag. error (aperture 16)','%.5f',float],
'IQ16':['K2 instrumental mag. quality flag (aperture 16)','%s',str],
'EP16':['detrended magnitude (aperture 16)','%.5f',float],
'EQ16':['detrended mag. quality flag (aperture 16)','%i',int],
'TF16':['TFA magnitude (aperture 16)','%.5f',float],
'CF16':['Cosine filtered magnitude (aperture 16)','%.5f',float],
# APERture 17
'IM17':['K2 instrumental magnitude (aperture 17)','%.5f',float],
'IE17':['K2 instrumental mag. error (aperture 17)','%.5f',float],
'IQ17':['K2 instrumental mag. quality flag (aperture 17)','%s',str],
'EP17':['detrended magnitude (aperture 17)','%.5f',float],
'EQ17':['detrended mag. quality flag (aperture 17)','%i',int],
'TF17':['TFA magnitude (aperture 17)','%.5f',float],
'CF17':['Cosine filtered magnitude (aperture 17)','%.5f',float],
# APERture 18
'IM18':['K2 instrumental magnitude (aperture 18)','%.5f',float],
'IE18':['K2 instrumental mag. error (aperture 18)','%.5f',float],
'IQ18':['K2 instrumental mag. quality flag (aperture 18)','%s',str],
'EP18':['detrended magnitude (aperture 18)','%.5f',float],
'EQ18':['detrended mag. quality flag (aperture 18)','%i',int],
'TF18':['TFA magnitude (aperture 18)','%.5f',float],
'CF18':['Cosine filtered magnitude (aperture 18)','%.5f',float],
# APERture 19
'IM19':['K2 instrumental magnitude (aperture 19)','%.5f',float],
'IE19':['K2 instrumental mag. error (aperture 19)','%.5f',float],
'IQ19':['K2 instrumental mag. quality flag (aperture 19)','%s',str],
'EP19':['detrended magnitude (aperture 19)','%.5f',float],
'EQ19':['detrended mag. quality flag (aperture 19)','%i',int],
'TF19':['TFA magnitude (aperture 19)','%.5f',float],
'CF19':['Cosine filtered magnitude (aperture 19)','%.5f',float],
# APERture 20
'IM20':['K2 instrumental magnitude (aperture 20)','%.5f',float],
'IE20':['K2 instrumental mag. error (aperture 20)','%.5f',float],
'IQ20':['K2 instrumental mag. quality flag (aperture 20)','%s',str],
'EP20':['detrended magnitude (aperture 20)','%.5f',float],
'EQ20':['detrended mag. quality flag (aperture 20)','%i',int],
'TF20':['TFA magnitude (aperture 20)','%.5f',float],
'CF20':['Cosine filtered magnitude (aperture 20)','%.5f',float],
# APERture 20
'IM21':['K2 instrumental magnitude (aperture 21)','%.5f',float],
'IE21':['K2 instrumental mag. error (aperture 21)','%.5f',float],
'IQ21':['K2 instrumental mag. quality flag (aperture 21)','%s',str],
'EP21':['detrended magnitude (aperture 21)','%.5f',float],
'EQ21':['detrended mag. quality flag (aperture 21)','%i',int],
'TF21':['TFA magnitude (aperture 21)','%.5f',float],
'CF21':['Cosine filtered magnitude (aperture 21)','%.5f',float],
# APERture 21
'IM22':['K2 instrumental magnitude (aperture 22)','%.5f',float],
'IE22':['K2 instrumental mag. error (aperture 22)','%.5f',float],
'IQ22':['K2 instrumental mag. quality flag (aperture 22)','%s',str],
'EP22':['detrended magnitude (aperture 22)','%.5f',float],
'EQ22':['detrended mag. quality flag (aperture 22)','%i',int],
'TF22':['TFA magnitude (aperture 22)','%.5f',float],
'CF22':['Cosine filtered magnitude (aperture 22)','%.5f',float],
# APERture 22
'IM23':['K2 instrumental magnitude (aperture 23)','%.5f',float],
'IE23':['K2 instrumental mag. error (aperture 23)','%.5f',float],
'IQ23':['K2 instrumental mag. quality flag (aperture 23)','%s',str],
'EP23':['detrended magnitude (aperture 23)','%.5f',float],
'EQ23':['detrended mag. quality flag (aperture 23)','%i',int],
'TF23':['TFA magnitude (aperture 23)','%.5f',float],
'CF23':['Cosine filtered magnitude (aperture 23)','%.5f',float],
# APERture 23
'IM24':['K2 instrumental magnitude (aperture 24)','%.5f',float],
'IE24':['K2 instrumental mag. error (aperture 24)','%.5f',float],
'IQ24':['K2 instrumental mag. quality flag (aperture 24)','%s',str],
'EP24':['detrended magnitude (aperture 24)','%.5f',float],
'EQ24':['detrended mag. quality flag (aperture 24)','%i',int],
'TF24':['TFA magnitude (aperture 24)','%.5f',float],
'CF24':['Cosine filtered magnitude (aperture 24)','%.5f',float],
# APERture 24
'IM25':['K2 instrumental magnitude (aperture 25)','%.5f',float],
'IE25':['K2 instrumental mag. error (aperture 25)','%.5f',float],
'IQ25':['K2 instrumental mag. quality flag (aperture 25)','%s',str],
'EP25':['detrended magnitude (aperture 25)','%.5f',float],
'EQ25':['detrended mag. quality flag (aperture 25)','%i',int],
'TF25':['TFA magnitude (aperture 25)','%.5f',float],
'CF25':['Cosine filtered magnitude (aperture 25)','%.5f',float],
# APERture 25
'IM26':['K2 instrumental magnitude (aperture 26)','%.5f',float],
'IE26':['K2 instrumental mag. error (aperture 26)','%.5f',float],
'IQ26':['K2 instrumental mag. quality flag (aperture 26)','%s',str],
'EP26':['detrended magnitude (aperture 26)','%.5f',float],
'EQ26':['detrended mag. quality flag (aperture 26)','%i',int],
'TF26':['TFA magnitude (aperture 26)','%.5f',float],
'CF26':['Cosine filtered magnitude (aperture 26)','%.5f',float],
# APERture 26
'IM27':['K2 instrumental magnitude (aperture 27)','%.5f',float],
'IE27':['K2 instrumental mag. error (aperture 27)','%.5f',float],
'IQ27':['K2 instrumental mag. quality flag (aperture 27)','%s',str],
'EP27':['detrended magnitude (aperture 27)','%.5f',float],
'EQ27':['detrended mag. quality flag (aperture 27)','%i',int],
'TF27':['TFA magnitude (aperture 27)','%.5f',float],
'CF27':['Cosine filtered magnitude (aperture 27)','%.5f',float],
# APERture 27
'IM28':['K2 instrumental magnitude (aperture 28)','%.5f',float],
'IE28':['K2 instrumental mag. error (aperture 28)','%.5f',float],
'IQ28':['K2 instrumental mag. quality flag (aperture 28)','%s',str],
'EP28':['detrended magnitude (aperture 28)','%.5f',float],
'EQ28':['detrended mag. quality flag (aperture 28)','%i',int],
'TF28':['TFA magnitude (aperture 28)','%.5f',float],
'CF28':['Cosine filtered magnitude (aperture 28)','%.5f',float],
# APERture 28
'IM29':['K2 instrumental magnitude (aperture 29)','%.5f',float],
'IE29':['K2 instrumental mag. error (aperture 29)','%.5f',float],
'IQ29':['K2 instrumental mag. quality flag (aperture 29)','%s',str],
'EP29':['detrended magnitude (aperture 29)','%.5f',float],
'EQ29':['detrended mag. quality flag (aperture 29)','%i',int],
'TF29':['TFA magnitude (aperture 29)','%.5f',float],
'CF29':['Cosine filtered magnitude (aperture 29)','%.5f',float],
# APERture 29
'IM30':['K2 instrumental magnitude (aperture 30)','%.5f',float],
'IE30':['K2 instrumental mag. error (aperture 30)','%.5f',float],
'IQ30':['K2 instrumental mag. quality flag (aperture 30)','%s',str],
'EP30':['detrended magnitude (aperture 30)','%.5f',float],
'EQ30':['detrended mag. quality flag (aperture 30)','%i',int],
'TF30':['TFA magnitude (aperture 30)','%.5f',float],
'CF30':['Cosine filtered magnitude (aperture 30)','%.5f',float],
# APERture 30
'IM31':['K2 instrumental magnitude (aperture 31)','%.5f',float],
'IE31':['K2 instrumental mag. error (aperture 31)','%.5f',float],
'IQ31':['K2 instrumental mag. quality flag (aperture 31)','%s',str],
'EP31':['detrended magnitude (aperture 31)','%.5f',float],
'EQ31':['detrended mag. quality flag (aperture 31)','%i',int],
'TF31':['TFA magnitude (aperture 31)','%.5f',float],
'CF31':['Cosine filtered magnitude (aperture 31)','%.5f',float],
# APERture 31
'IM32':['K2 instrumental magnitude (aperture 32)','%.5f',float],
'IE32':['K2 instrumental mag. error (aperture 32)','%.5f',float],
'IQ32':['K2 instrumental mag. quality flag (aperture 32)','%s',str],
'EP32':['detrended magnitude (aperture 32)','%.5f',float],
'EQ32':['detrended mag. quality flag (aperture 32)','%i',int],
'TF32':['TFA magnitude (aperture 32)','%.5f',float],
'CF32':['Cosine filtered magnitude (aperture 32)','%.5f',float],
# APERture 33
'IM33':['K2 instrumental magnitude (aperture 33)','%.5f',float],
'IE33':['K2 instrumental mag. error (aperture 33)','%.5f',float],
'IQ33':['K2 instrumental mag. quality flag (aperture 33)','%s',str],
'EP33':['detrended magnitude (aperture 33)','%.5f',float],
'EQ33':['detrended mag. quality flag (aperture 33)','%i',int],
'TF33':['TFA magnitude (aperture 33)','%.5f',float],
'CF33':['Cosine filtered magnitude (aperture 33)','%.5f',float],
# APERture 34
'IM34':['K2 instrumental magnitude (aperture 34)','%.5f',float],
'IE34':['K2 instrumental mag. error (aperture 34)','%.5f',float],
'IQ34':['K2 instrumental mag. quality flag (aperture 34)','%s',str],
'EP34':['detrended magnitude (aperture 34)','%.5f',float],
'EQ34':['detrended mag. quality flag (aperture 34)','%i',int],
'TF34':['TFA magnitude (aperture 34)','%.5f',float],
'CF34':['Cosine filtered magnitude (aperture 34)','%.5f',float],
# APERture 35
'IM35':['K2 instrumental magnitude (aperture 35)','%.5f',float],
'IE35':['K2 instrumental mag. error (aperture 35)','%.5f',float],
'IQ35':['K2 instrumental mag. quality flag (aperture 35)','%s',str],
'EP35':['detrended magnitude (aperture 35)','%.5f',float],
'EQ35':['detrended mag. quality flag (aperture 35)','%i',int],
'TF35':['TFA magnitude (aperture 35)','%.5f',float],
'CF35':['Cosine filtered magnitude (aperture 35)','%.5f',float],
}
##################################
## FUNCTIONS TO READ K2 HAT LCS ##
##################################
def _parse_csv_header(header):
'''This parses a CSV header from a K2 CSV LC.
Returns a dict that can be used to update an existing lcdict with the
relevant metadata info needed to form a full LC.
'''
# first, break into lines
headerlines = header.split('\n')
headerlines = [x.lstrip('# ') for x in headerlines]
# next, find the indices of the '# COLUMNS' line and '# LIGHTCURVE' line
metadatastart = headerlines.index('METADATA')
columnstart = headerlines.index('COLUMNS')
lcstart = headerlines.index('LIGHTCURVE')
# get the lines for the metadata and columndefs
metadata = headerlines[metadatastart+1:columnstart-1]
columndefs = headerlines[columnstart+1:lcstart-1]
# parse the metadata
metainfo = [x.split(',') for x in metadata][:-1]
aperpixradius = metadata[-1]
objectid, kepid, ucac4id, kepmag = metainfo[0]
objectid, kepid, ucac4id, kepmag = (objectid.split(' = ')[-1],
kepid.split(' = ')[-1],
ucac4id.split(' = ')[-1],
kepmag.split(' = ')[-1])
kepmag = float(kepmag) if kepmag else None
ra, decl, ndet, k2campaign = metainfo[1]
ra, decl, ndet, k2campaign = (ra.split(' = ')[-1],
decl.split(' = ')[-1],
int(ndet.split(' = ')[-1]),
int(k2campaign.split(' = ')[-1]))
fovccd, fovchannel, fovmodule = metainfo[2]
fovccd, fovchannel, fovmodule = (int(fovccd.split(' = ')[-1]),
int(fovchannel.split(' = ')[-1]),
int(fovmodule.split(' = ')[-1]))
try:
qualflag, bjdoffset, napertures = metainfo[3]
qualflag, bjdoffset, napertures = (int(qualflag.split(' = ')[-1]),
float(bjdoffset.split(' = ')[-1]),
int(napertures.split(' = ')[-1]))
kernelspec = None
except Exception:
qualflag, bjdoffset, napertures, kernelspec = metainfo[3]
qualflag, bjdoffset, napertures, kernelspec = (
int(qualflag.split(' = ')[-1]),
float(bjdoffset.split(' = ')[-1]),
int(napertures.split(' = ')[-1]),
str(kernelspec.split(' = ')[-1])
)
aperpixradius = aperpixradius.split(' = ')[-1].split(',')
aperpixradius = [float(x) for x in aperpixradius]
# parse the columndefs
columns = [x.split(' - ')[1] for x in columndefs]
metadict = {'objectid':objectid,
'objectinfo':{
'objectid':objectid,
'kepid':kepid,
'ucac4id':ucac4id,
'kepmag':kepmag,
'ra':ra,
'decl':decl,
'ndet':ndet,
'k2campaign':k2campaign,
'fovccd':fovccd,
'fovchannel':fovchannel,
'fovmodule':fovmodule,
'qualflag':qualflag,
'bjdoffset':bjdoffset,
'napertures':napertures,
'kernelspec':kernelspec,
'aperpixradius':aperpixradius,
},
'columns':columns}
return metadict
def read_csv_lightcurve(lcfile):
'''
This reads in a K2 lightcurve in CSV format. Transparently reads gzipped
files.
Parameters
----------
lcfile : str
The light curve file to read.
Returns
-------
dict
Returns an lcdict.
'''
# read in the file first
if '.gz' in os.path.basename(lcfile):
LOGINFO('reading gzipped K2 LC: %s' % lcfile)
infd = gzip.open(lcfile,'rb')
else:
LOGINFO('reading K2 LC: %s' % lcfile)
infd = open(lcfile,'rb')
lctext = infd.read().decode()
infd.close()
# figure out the header and get the LC columns
lcstart = lctext.index('# LIGHTCURVE\n')
lcheader = lctext[:lcstart+12]
lccolumns = lctext[lcstart+13:].split('\n')
lccolumns = [x.split(',') for x in lccolumns if len(x) > 0]
# initialize the lcdict and parse the CSV header
lcdict = _parse_csv_header(lcheader)
# tranpose the LC rows into columns
lccolumns = list(zip(*lccolumns))
# write the columns to the dict
for colind, col in enumerate(lcdict['columns']):
# this picks out the caster to use when reading each column using the
# definitions in the lcutils.COLUMNDEFS dictionary
lcdict[col.lower()] = np.array([COLUMNDEFS[col][2](x)
for x in lccolumns[colind]])
lcdict['columns'] = [x.lower() for x in lcdict['columns']]
return lcdict
|
waqasbhatti/astrobase
|
astrobase/hatsurveys/k2hat.py
|
k2hat.py
|
py
| 25,449 |
python
|
en
|
code
| 50 |
github-code
|
6
|
30906484831
|
from reportlab.platypus import (SimpleDocTemplate, Paragraph, PageBreak, Image, Spacer, Table, TableStyle)
from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
from reportlab.pdfgen import canvas
from reportlab.graphics.shapes import Line, LineShape, Drawing
from reportlab.lib.pagesizes import LETTER, inch
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.lib.colors import Color
from scripts.python.pdfGen.table_generation import table_handler
from datetime import datetime
pdfmetrics.registerFont(TTFont('Poppins-Bold', '/home/eggzo/airflow/scripts/python/pdfGen/fonts/Poppins-Bold.ttf'))
pdfmetrics.registerFont(TTFont('Poppins-Light', '/home/eggzo/airflow/scripts/python/pdfGen/fonts/Poppins-Light.ttf'))
pdfmetrics.registerFont(TTFont('Poppins-Medium', '/home/eggzo/airflow/scripts/python/pdfGen/fonts/Poppins-Medium.ttf'))
pdfmetrics.registerFont(TTFont('Poppins-Regular', '/home/eggzo/airflow/scripts/python/pdfGen/fonts/Poppins-Regular.ttf'))
pdfmetrics.registerFont(TTFont('Poppins-SemiBold', '/home/eggzo/airflow/scripts/python/pdfGen/fonts/Poppins-SemiBold.ttf'))
class page_format_handler(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self.pages = []
self.width, self.height = LETTER
def showPage(self):
self.pages.append(dict(self.__dict__))
self._startPage()
def save(self):
page_count = len(self.pages)
for page in self.pages:
self.__dict__.update(page)
if (self._pageNumber > 1):
self.draw_canvas(page_count)
elif (self._pageNumber <= 1):
self.draw_front_page()
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
# Function to handle header and footer sans front page
def draw_canvas(self, page_count):
page = "Page %s of %s" % (self._pageNumber, page_count)
copyright = f'ยฉ 2022 - {datetime.today().year} Holmly Ltd. All Rights Reserved'
x = 128
self.saveState()
self.setStrokeColorRGB(0, 0, 0)
self.setLineWidth(0.5)
self.setFont('Poppins-Bold', 16)
self.drawImage("/home/eggzo/airflow/scripts/python/pdfGen/sps_logo.png", self.width - inch * 9 + 30, self.height - 45, width=100, height=35,
preserveAspectRatio=True, mask='auto')
self.drawString(66, 755, "Scottish Property Sourcing")
self.line(30, 740, LETTER[0] - 50, 740)
self.line(66, 78, LETTER[0] - 66, 78)
self.setFont('Poppins-Light', 10)
self.drawString(LETTER[0] - x, 65, page)
self.drawString(66, 65, copyright)
self.restoreState()
# Function to handle formatting for the front page
def draw_front_page(self):
self.saveState()
self.setFont('Poppins-Light', 10)
self.drawImage("/home/eggzo/airflow/scripts/python/pdfGen/sps_logo.png", inch * 4 - 20, -inch * 0.3, width=700, height=700,
preserveAspectRatio=True, mask='auto')
self.restoreState()
class colour_handler():
def __init__(self):
self.colour_theme = {}
def add_colour(self, colour_name, r, g, b, alpha_val):
self.colour_theme[colour_name] = Color((r / 255), (g / 255), (b / 255), alpha=alpha_val)
def gc(self, colour_name):
return(self.colour_theme[colour_name])
def front_page(elements):
title_style = ParagraphStyle('title', fontName='Poppins-Bold', fontSize=70, leading=72,
alignment=TA_LEFT, leftIndent=0)
subtitle_style = ParagraphStyle('title', fontName='Poppins-SemiBold', fontSize=36, leading=72,
alignment=TA_LEFT, leftIndent=0)
summary_style = ParagraphStyle('summary', fontName='Poppins-Light', fontSize=12, leading=20, justifyBreaks=1,
alignment=TA_LEFT, justifyLastLine=1)
title_text = 'Scottish Property Sourcing'
subtitle_text = 'Daily Report'
summary_text = f"""
Report Type: Top Properties For Sale<br/>
Publication Date: {datetime.today().strftime("%b %d %Y")}<br/>
"""
title = Paragraph(title_text, title_style)
elements.append(title)
spacer = Spacer(10, 280)
elements.append(spacer)
subtitle = Paragraph(subtitle_text, subtitle_style)
elements.append(subtitle)
spacer = Spacer(10, 10)
elements.append(spacer)
paragraph_report_summary = Paragraph(summary_text, summary_style)
elements.append(paragraph_report_summary)
elements.append(PageBreak())
return elements
def information_page(elements, colours, title, description, input_dataframe):
page_title_style = ParagraphStyle('Hed0', fontSize=16, alignment=TA_LEFT, borderWidth=3,
textColor=colours.gc('colorGreen0'))
normal_style = ParagraphStyle('summary', fontName='Poppins-Light', fontSize=12, leading=20, justifyBreaks=1,
alignment=TA_LEFT, justifyLastLine=1)
page_title = Paragraph(title, page_title_style)
page_description = Paragraph(description, normal_style)
elements.append(page_title)
spacer = Spacer(10, 10)
elements.append(spacer)
d = Drawing(500, 1)
line = Line(-15, 0, 483, 0)
line.strokeColor = colours.gc('colorBlue0')
line.strokeWidth = 2
d.add(line)
elements.append(d)
spacer = Spacer(10, 1)
elements.append(spacer)
d = Drawing(500, 1)
line = Line(-15, 0, 483, 0)
line.strokeColor = colours.gc('colorBlue0')
line.strokeWidth = 0.5
d.add(line)
elements.append(d)
spacer = Spacer(10, 10)
elements.append(spacer)
elements.append(page_description)
elements = table_handler(elements, input_dataframe, colours)
return elements
|
GregorMonsonFD/holmly_sourcing_legacy
|
scripts/python/pdfGen/page_format_handler.py
|
page_format_handler.py
|
py
| 5,949 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3847347050
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
import tinyarray
import matplotlib.pyplot as plt
from scipy.sparse import spdiags
from scipy.sparse import eye
from scipy.sparse import kron
from scipy.sparse.linalg import inv
from scipy.sparse import csr_matrix
import adaptive
from functools import partial
from scipy.interpolate import griddata
import sys
from mpi4py.futures import MPIPoolExecutor
import argparse
s0 = tinyarray.array([[1, 0], [0, 1]]);
sx = tinyarray.array([[0, 1], [1, 0]]);
sy = tinyarray.array([[0, -1j], [1j, 0]]);
sz = tinyarray.array([[1, 0], [0, -1]]);
def hdis(a,mu,delta,vz,alpha_R,dim,vimp):
t=25/a**2
alpha=alpha_R/(2*a)
band11sm=spdiags(np.vstack([np.ones(dim),np.ones(dim)]),np.array([-1,1]),dim,dim,format = 'csr')
band1m1sm=spdiags(np.vstack([np.ones(dim),-np.ones(dim)]),np.array([-1,1]),dim,dim,format = 'csr')
eyesm=eye(dim)
mulist=mu*np.ones(dim)-vimp
diagmulist=spdiags(mulist,0,dim,dim)
return kron(sz,(kron(eye(2),-t*band11sm+(2*t)*eyesm-diagmulist)+kron(sy,1j*alpha*band1m1sm)))\
+kron(eye(2),kron(sz,vz*eyesm))+kron(sx,kron(eye(2),delta*eyesm))
def ldosall_dis(a,mu,Delta,Vz,alpha_R,mulist,dim,omega,delta):
ham=hdis(a,mu,Delta,Vz,alpha_R,dim,mulist);
hh=csr_matrix((omega+1j*delta)*eye(4*dim)-ham)
G=inv(hh)
Gdiag=(G).diagonal()
return -np.sum((np.reshape(Gdiag,(4,-1))),0).imag/np.pi
def LDOS_dis(p,a,mu,Delta,alpha_R,mulist,dim,delta):
Vz,energy=p
z=ldosall_dis(a,mu,Delta,Vz,alpha_R,mulist,dim,energy,delta)
return np.array([z.mean(),z[0],z[int(dim/2)],z[-1]])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--loss', default=0.1)
parser.add_argument('--dim', default=100)
parser.add_argument('--mu', default=1)
parser.add_argument('--Delta', default=0.2)
parser.add_argument('--alpha_R', default=5)
parser.add_argument('--muVar', default=0)
parser.add_argument('--mulist', default=0)
parser.add_argument('--NmuVar', default=0)
parser.add_argument('--Vzmax', default=2.048)
parser.add_argument('--Vbiasmax', default=0.3)
args = parser.parse_args();
print("loss = %s" % args.loss)
print("dim = %s" % args.dim)
print("mu = %s" % args.mu)
print("Delta = %s" % args.Delta)
print("alpha_R = %s" % args.alpha_R)
print("muVar = %s" % args.muVar)
print("mulist = %s" % args.mulist)
print("NmuVar = %s" % args.NmuVar)
print("Vzmax = %s" % args.Vzmax)
print("Vbiasmax = %s" % args.Vbiasmax)
loss=float(args.loss)
dim=int(args.dim)
mu=float(args.mu)
Delta=float(args.Delta)
alpha_R=float(args.alpha_R)
muVar=float(args.muVar)
NmuVar=float(args.NmuVar)
Vzmax=float(args.Vzmax)
Vbiasmax=float(args.Vbiasmax)
if isinstance(args.mulist,str):
muVarfn=args.mulist
print('Use disorder file:',muVarfn)
try:
mulist=np.loadtxt(muVarfn)
except:
print('Cannot find disorder file: ',muVarfn)
elif muVar!=0:
mulist=np.random.normal(0,muVar,int(NmuVar))
mulist=[mulist.flatten()[int(NmuVar/dim*x)] for x in range(dim)]
else:
mulist=args.mulist
fn='loss'+str(loss)+'m'+str(mu)+'D'+str(Delta)+'muVar'+str(muVar)+'L'+str(dim)
fname=fn+'.sav'
learner = adaptive.Learner2D(partial(LDOS_dis,a=1,mu=mu,Delta=Delta,alpha_R=alpha_R,mulist=mulist,dim=dim,delta=1e-3),\
bounds=[(0., Vzmax), (-Vbiasmax, Vbiasmax)])
learner.load(fname)
runner = adaptive.Runner(learner, executor=MPIPoolExecutor(),shutdown_executor=True,\
goal=lambda l: l.loss() < loss)
runner.start_periodic_saving(dict(fname=fname), interval=600)
runner.ioloop.run_until_complete(runner.task)
learner.save(fname)
dd=np.array(list(learner.data.items()))
dz=dd[:,1]
dx=np.empty(dd.shape[0])
dy=np.empty(dd.shape[0])
for i in range(dd.shape[0]):
dx[i],dy[i]=dd[i,0]
dz=np.vstack(dz)
dxx, dyy = np.meshgrid(np.linspace(0,Vzmax,401),np.linspace(-Vbiasmax,Vbiasmax,401))
dzz0 = griddata((dx,dy),dz[:,0],(dxx,dyy), method='linear')
dzz1 = griddata((dx,dy),dz[:,1],(dxx,dyy), method='linear')
dzz2 = griddata((dx,dy),dz[:,2],(dxx,dyy), method='linear')
dzz3 = griddata((dx,dy),dz[:,3],(dxx,dyy), method='linear')
fig,ax=plt.subplots()
ax.pcolormesh(dxx,dyy,dzz0)
fig.savefig(fn+'_DOS.png')
fig,ax=plt.subplots()
ax.pcolormesh(dxx,dyy,dzz1)
fig.savefig(fn+'_LDOS_L.png')
fig,ax=plt.subplots()
ax.pcolormesh(dxx,dyy,dzz2)
fig.savefig(fn+'_LDOS_M.png')
fig,ax=plt.subplots()
ax.pcolormesh(dxx,dyy,dzz3)
fig.savefig(fn+'_LDOS_R.png')
np.savetxt(fn+'_Vz.dat',dxx)
np.savetxt(fn+'_Vbias.dat',dyy)
np.savetxt(fn+'_DOS.dat',dzz0)
np.savetxt(fn+'_LDOS_L.dat',dzz1)
np.savetxt(fn+'_LDOS_M.dat',dzz2)
np.savetxt(fn+'_LDOS_R.dat',dzz3)
scatterpts=np.vstack([dx,dy,dz.T]).T
np.savetxt(fn+'_s.dat',scatterpts)
if muVar!=0:
np.savetxt(fn+'_randlist.dat',mulist)
if __name__=="__main__":
main()
|
hainingpan/nanowire_matlab
|
Ldos_dis.py
|
Ldos_dis.py
|
py
| 5,110 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74740608828
|
from tkinter import *
from PIL import Image,ImageTk
from tkinter import messagebox
import pymysql
def bookRegister():
## When the user clicks the submit button this bookRegister function is run
# BookInfos are stored in these variables.
# and then these are uploaded to the database using the cursor method of pymysql
#
bookid = bookInfo1.get()
title = bookInfo2.get()
title = title.upper()
author = bookInfo3.get()
author = author.upper()
status = bookInfo4
insertBook = "insert into "+bookTable+" values('"+bookid+"','"+title+"','"+author+"','"+status+"')"
print(insertBook) ### debug purpose
try:
cur.execute(insertBook)
con.commit()
messagebox.showinfo("Success","Added the book successfully")
except:
messagebox.showinfo("Error","Cant add to Database, errors occurred")
print(bookid)
print(title)
print(author) ###### debug purposes
print(status)
root.destroy()
def addBook():
global bookInfo1,bookInfo2,bookInfo3,bookInfo4,Canvas1,con,cur,bookTable,root
root = Tk() ##this creates a gui window
root.title("Library")
root.minsize(width=400,height=400)
root.geometry("600x500")
# Add your own database name and password here to reflect in the code
mypass = "abc_123"
mydatabase="library_db"
con = pymysql.connect(host="localhost",user="librarian",password=mypass,database=mydatabase)
cur = con.cursor()
# Enter Table Names here
bookTable = "books" # Book Table
Canvas1 = Canvas(root)
Canvas1.config(bg="#ff6e40")
Canvas1.pack(expand=True,fill=BOTH)
headingFrame1 = Frame(root,bg="#050300",bd=5)
headingFrame1.place(relx=0.25,rely=0.1,relwidth=0.5,relheight=0.13)
headingLabel = Label(headingFrame1, text="Add Books", bg='yellow', fg='blue', font=('Courier',15))
headingLabel.place(relx=0,rely=0, relwidth=1, relheight=1)
labelFrame = Frame(root,bg='black')
labelFrame.place(relx=0.1,rely=0.4,relwidth=0.8,relheight=0.4)
# Book ID
lb1 = Label(labelFrame,text="Book ID : ", bg='black', fg='white')
lb1.place(relx=0.05,rely=0.2, relheight=0.08)
bookInfo1 = Entry(labelFrame) ## creates the text entry box
bookInfo1.place(relx=0.3,rely=0.2, relwidth=0.62, relheight=0.08)
# Title
lb2 = Label(labelFrame,text="Title : ", bg='black', fg='white')
lb2.place(relx=0.05,rely=0.35, relheight=0.08)
bookInfo2 = Entry(labelFrame)
bookInfo2.place(relx=0.3,rely=0.35, relwidth=0.62, relheight=0.08)
# Book Author
lb3 = Label(labelFrame,text="Author : ", bg='black', fg='white')
lb3.place(relx=0.05,rely=0.50, relheight=0.08)
bookInfo3 = Entry(labelFrame)
bookInfo3.place(relx=0.3,rely=0.50, relwidth=0.62, relheight=0.08)
bookInfo4 = 'avail'
#Submit Button
SubmitBtn = Button(root,text="SUBMIT",bg='#d1ccc0', fg='red',command=bookRegister)
SubmitBtn.place(relx=0.28,rely=0.9, relwidth=0.18,relheight=0.08)
quitBtn = Button(root,text="Quit",bg='#f7f1e3', fg='red', command=root.destroy)
quitBtn.place(relx=0.53,rely=0.9, relwidth=0.18,relheight=0.08)
root.mainloop()
|
DarkCodeOrg/library_management_system
|
AddBook.py
|
AddBook.py
|
py
| 3,270 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10430336572
|
from pathlib import Path
import argparse
import sys
import random
from lib.conll import CoNLLReader
def main():
parser = argparse.ArgumentParser(description="""Extract data based on comments info""")
parser.add_argument('input', help="conllu file")
parser.add_argument('output', help="target file", type=Path)
parser.add_argument('--input-format', choices=['conll2006', 'conll2006dense', 'conllu'], default="conllu")
parser.add_argument('--mapping', help="mapping file", required=True)
args = parser.parse_args()
lines=[line.strip() for line in open(args.mapping)]
mapping={}
for line in lines:
commentpart, target = line.split()
mapping[commentpart] = target
print("loaded mapping:", mapping, file=sys.stderr)
cio = CoNLLReader()
if args.input_format == "conllu":
orig_treebank = cio.read_conll_u(args.input)
elif args.input_format == "conll2006":
orig_treebank = cio.read_conll_2006(args.input)
elif args.input_format == "conll2006dense":
orig_treebank = cio.read_conll_2006_dense(args.input)
num_trees = len(orig_treebank)
print("Loaded treebank {} with {} sentences".format(args.input,num_trees), file=sys.stderr)
split = {mapping[k] : [] for k in mapping.keys()}
default = "various"
split[default] = []
for tree in orig_treebank:
found_mapping=False
for token in " ".join(tree.graph['comment']).strip().split():
if token in mapping:
split[mapping[token]].append(tree)
found_mapping=True
continue
if not found_mapping:
split[default].append(tree)
for key in split:
print(key, len(split[key]), file=sys.stderr)
cio.write_conll(split[key], Path(args.output.name + "_" + key), "conll2006")
#sample = orig_treebank[0:args.k]
#print("sampled {} trees. seed: {}".format(len(sample), args.seed))
#cio.write_conll(sample, args.output, "conll2006")
if __name__ == "__main__":
main()
|
coastalcph/ud-conversion-tools
|
extract.py
|
extract.py
|
py
| 2,049 |
python
|
en
|
code
| 3 |
github-code
|
6
|
40025821479
|
from django.urls import path
from . import views
from .views import (
TicketCreateView,
AssignCreateView,
StatusCreateView,
StatusLstCreateView,
CgyCreateView,
CgyListView,
TicketListView
)
urlpatterns = [
path('', views.home, name='sticket-home'),
path('categories', CgyListView.as_view(), name='categories'),
path('new_stkt', TicketCreateView.as_view(), name='new-stkt'),
path('new_assgn', AssignCreateView.as_view(), name='new-assgn'),
path('new_status', StatusCreateView.as_view(), name='new-status'),
path('add_status', StatusLstCreateView.as_view(), name='add-status'),
path('add_cgy', CgyCreateView.as_view(), name='add-cgy'),
path('ticket_list', TicketListView.as_view(), name='ticket_list')
]
|
uppgrayedd1/webapp
|
webapp/sticket/urls.py
|
urls.py
|
py
| 783 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43266445764
|
from urllib.request import urlopen
import json
import matplotlib.pyplot as plt
url = "http://cyrilserver.ddns.net:8080/hardware/esp32/all"
# store the response of URL
response = urlopen(url)
arrData = []
# storing the JSON response
# from url in data
data_json = json.loads(response.read())
for i in range(len(data_json)):
arrData.append(data_json[i]['data_esp32'])
plt.plot(arrData)
plt.show()
# print the json response
|
Monest-eco/Tools
|
graphData/allData.py
|
allData.py
|
py
| 434 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6836155009
|
# -*- coding: utf-8 -*-
from windows import DSFWindow, PlateResWindow
from epyparser import viia_parser, exparser
from matplotlib.pyplot import figure, show
from optim import linmelt
from scipy import array, sqrt
import csv
def info(args):
if args.csv_wells is not None:
well_info = exparser(args.csv_wells,args.csvexp,args.csvregexp)
expnames = well_info.get_experiments()
print('Listed experiments:')
print('\n'.join(['%30s (%3d wells)' % (k,v) for k,v in expnames.items()]))
def plot(args):
dataset = viia_parser(args.input_file)
fig = figure(FigureClass=DSFWindow)
well_info = None
if args.csv_wells is not None:
well_info = exparser(args.csv_wells,args.csvexp,args.csvregexp)
args.wells = well_info.get_wells()
fig.set_data(dataset.get_all_readings(args.wells), well_info, tm_guess=args.tm_guess, kfac=args.basespan)
fig.plot_well()
fig.canvas.mpl_connect('key_press_event', fig.onkeypress)
show()
def fit(args):
dataset = viia_parser(args.input_file)
wtf = dataset.get_all_readings(args.wells)
if args.csv_output:
fout = open(args.output_file,'w')
fout.write('Well,Tm,deltaT\n')
if args.wells is not None:
wells = list(map(int, args.wells.split(',')))
elif args.csv_wells is not None:
wells = exparser(args.csv_wells,args.csvexp,args.csvregexp).get_wells()
else:
wells = sorted(wtf)
for well in wells:
wtfit = linmelt(wtf[well][0], wtf[well][1], tm=args.tm_guess)
p = wtfit.fmin()
if args.csv_output:
fout.write('%d,%f,%f\n' % (well,wtfit.tm(),wtfit.dt()))
print('Well #%d: %s' % (well,wtfit.report()))
if args.csv_output:
fout.close()
def plate(args):
dataset = viia_parser(args.input_file)
if args.cwells is not None or (args.csvcontrol is not None and args.csv_wells is not None):
if args.cwells is not None:
cwells = list(map(int, args.cwells.split(',')))
else:
cwells = exparser(args.csv_wells,args.csvcontrol).get_wells()
cwtf = dataset.get_all_readings(cwells)
c_parms = []
for well in cwells:
wtfit = linmelt(cwtf[well][0], cwtf[well][1], tm=args.tm_guess)
p = wtfit.fmin()
for i in range(args.nbro):
wtfit.w2delta(kfac=args.basespan)
p = wtfit.fmin()
print('Control well #%03d: %s' % (well,wtfit.report()))
c_parms.append([wtfit.tm(),wtfit.dt()])
c_parms = array(c_parms)
mtm, mdt = c_parms.mean(0)
stm, sdt = c_parms.std(0,ddof=1)
print('--------\nAverages:')
print(' Tm = %.1f +- %.1f' % (mtm, stm))
print(' deltaT = %.1f +- %.1f' % (mdt, sdt))
contrflag = True
else:
contrflag = False
print('--------\nResults:')
wtf = dataset.get_all_readings(args.wells)
if args.wells is not None:
wells = list(map(int, args.wells.split(',')))
elif args.csv_wells is not None:
well_info = exparser(args.csv_wells,args.csvexp,args.csvregexp)
wells = well_info.get_wells()
else:
wells = sorted(wtf)
wtfits = {}
a_parms = []
for well in wells:
if contrflag:
wtfit = linmelt(wtf[well][0], wtf[well][1], tm=273.15+mtm)
else:
wtfit = linmelt(wtf[well][0], wtf[well][1], tm=args.tm_guess)
p = wtfit.fmin()
for i in range(args.nbro):
wtfit.w2delta(kfac=args.basespan)
p = wtfit.fmin()
outline = 'Well #%03d: %s' % (well,wtfit.report())
if contrflag:
outline += ' ZTm=%8.1f' % ((wtfit.tm()-mtm)/stm/sqrt(1+len(cwells)))
if args.csv_wells is not None:
outline += ' : ' + well_info.get_well_value(well, 'info')
well_info.set_well_value(well, 'tm', wtfit.tm())
well_info.set_well_value(well, 'dt', wtfit.dt())
print(outline)
wtfits[well] = wtfit
a_parms.append([wtfit.tm(),wtfit.dt()])
a_parms = array(a_parms)
mtm, mdt = a_parms.mean(0)
stm, sdt = a_parms.std(0,ddof=1)
print('--------\nAverages:')
print(' Tm = %.1f +- %.1f' % (mtm, stm))
print(' deltaT = %.1f +- %.1f' % (mdt, sdt))
if args.csv_wells is not None:
x,tm,dt,fmt,wellnum = list(zip(*[(float(v.get('x')),v.get('tm'),v.get('dt'),v.get('format','ro'),k) for k,v in well_info.iteritems()]))
if args.output_file is not None:
with open(args.output_file,'w') as fout:
if args.csv_output:
fout.write('Concentration,Tm,deltaT\n')
for xx,yy,zz in zip(*(x,tm,dt)):
fout.write('%f,%f,%f\n' % (xx,yy,zz))
else:
for xx,yy in zip(*(x,tm)):
fout.write('%f %f\n' % (xx,yy))
fig = figure(FigureClass=PlateResWindow)
if contrflag:
fig.set_data({'tm':{'x':x,'y':tm,'format':fmt,'my':mtm,'sy':stm}, 'wells':wellnum})
else:
fig.set_data({'tm':{'x':x,'y':tm,'format':fmt}, 'wells':wellnum})
fig.plot('tm', args.logplot)
if args.ylabel is None:
args.ylabel = "Tm, ยฐC"
fig.set_axlabels(args.xlabel, args.ylabel)
fig.canvas.mpl_connect('key_press_event', fig.onkeypress)
fig.canvas.mpl_connect('button_press_event', fig.onmouseclick)
figCurve = figure(FigureClass=DSFWindow)
figCurve.set_data(dataset.get_all_readings(wells), well_info, wtfit=wtfits, kfac=args.basespan)
figCurve.canvas.mpl_connect('key_press_event', figCurve.onkeypress)
fig.attach_curves(figCurve)
show()
|
pozharski/epydsf
|
dsfactions.py
|
dsfactions.py
|
py
| 5,760 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32628905214
|
from odoo import models, fields,api
class NticCherifProduct(models.Model):
_inherit = "sn_sales.product"
displayed_tags = fields.Text(string='List des prix', compute='_compute_displayed_tags')
@api.depends('pricelist_item_ids')
def _compute_displayed_tags(self):
for record in self:
tags = record.pricelist_item_ids.mapped(lambda r:'{1:,.2f}:[{0:.0f}]'.format(r.pricelist_id.numberOfMonths,r.fixed_price))
formatted_tags = ' , '.join(tags) # Join the extracted names
record.displayed_tags = formatted_tags
|
soufnet39/ntic-cherif
|
clients/cherif/models/product.py
|
product.py
|
py
| 582 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26917994924
|
"""Crie um programa que vai ler vรกrios nรบmeros e colocar em uma lista.
Depois disso, mostre:
A) Quantos nรบmeros foram digitados.
B) A lista de valores, ordenada de forma decrescente.
C) Se o valor 5 foi digitado e estรก ou nรฃo na lista."""
print("\n", "DESAFIO 81".center(60), "\n")
lista = list()
while True:
lista.append(int(input("Digite um nรบmero: "))) # Recebe e adiciona ร lista um nรบmero inteiro digitado pelo usuรกrio.
while True: # Cria um looping para garantir que a resposta do usuรกrio seja correta.
resposta = input("Quer continuar? [S/N] ").strip().upper()[0]
if resposta in 'SN':
break
else: # Enquanto o usuรกrio nรฃo responder corretamente serรก solicitado que "tente novamente".
print("Tente novamente.", end=" ")
if resposta == 'N': # Caso a resposta seja "N", o looping principal รฉ interrompido.
lista.sort(reverse=True) # Coloca os valores da lista em ordem decrescente.
break
# Exibe ao usuรกrio os resultados.
print("-" * 60)
print(f"Vocรช digitou {len(lista)} nรบmeros.")
print(f"A lista ordenada de forma decrescente รฉ: {lista}")
print(f"O valor 5 faz parte da lista!" if 5 in lista else "O valor 5 nรฃo faz parte da lista!")
|
mcsilva-dev/Exercicios-Curso-em-Video
|
ex081.py
|
ex081.py
|
py
| 1,272 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
3029439171
|
#!/usr/bin/env python3
import os
import csv
import sys
import matplotlib.pyplot as plt
# Get theta values from file (if it exists)
def readTheta (thetaFile):
theta0 = 0
theta1 = 0
dataFile = ""
if os.path.isfile(thetaFile):
with open(thetaFile, newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
if row[0] == 'dataFile':
dataFile = row[1]
if (row[0] == 'theta0'):
try:
theta0 = float(row[1])
except ValueError:
print(filename, "is invalid.")
sys.exit()
if (row[0] == 'theta1'):
try:
theta1 = float(row[1])
except ValueError:
print(filename, "is invalid.")
sys.exit()
return theta0, theta1, dataFile
# Reading data CSV for x and y values
def readData (dataFile):
x = []
y = []
if not os.path.isfile(dataFile):
print("Error : data file doesn't exist")
sys.exit()
with open(dataFile, newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
try:
x.append(int(row[0]))
y.append(int(row[1]))
except ValueError:
pass
return x, y
# Loop until user supplies mileage value
def getMileage ():
waiting_for_input = True
while (waiting_for_input):
mileage = input("Type a mileage to see the estimated price : ")
try:
mileage = int(mileage)
waiting_for_input = False
except ValueError:
print("Please enter an integer !\n")
return mileage
def displayEstimate (x, y, theta0, theta1, mileage):
price = theta0 + (theta1 * mileage)
plt.title('Relationship between a car mileage and its price', fontdict = {'family':'serif','color':'black','size':16})
plt.xlabel('Mileage in km', fontdict = {'family':'serif','color':'green','size':13})
plt.ylabel('Price in $', fontdict = {'family':'serif','color':'green','size':13})
plt.plot([min(x), max(x)], [theta0 + theta1 * min(x), theta0 + theta1 * max(x)], color='C1', label="f(x) = {0}*x + {1}".format(round(theta1, 2), round(theta0, 2)))
plt.plot(x, y, 'o', color='C0')
plt.stem([mileage], [price], bottom=(theta0 + theta1 * max(x)), orientation='vertical', linefmt='--C2', markerfmt='oC2')
plt.stem([price], [mileage], bottom=min(x), orientation='horizontal', linefmt='--C2', markerfmt='oC2')
plt.legend()
plt.show()
##########################################################################
################## MAIN ################
##########################################################################
# Theta file
thetaFile = './theta.csv'
# Get data
theta0, theta1, dataFile = readTheta(thetaFile)
mileage = getMileage()
# Output estimation based on theta values
print("\nBased on current predictions, a car with a mileage of", mileage, "kilometers would be worth :")
print("$", int(theta0 + (theta1 * mileage)))
if (theta0 == 0 and theta1 == 0):
print("\n(Without a trained model, estimating won't get us far...)")
if len(sys.argv) == 2 and sys.argv[1] == '--visualize':
x, y = readData(dataFile)
displayEstimate(x, y, theta0, theta1, mileage)
|
cclaude42/ft_linear_regression
|
estimate.py
|
estimate.py
|
py
| 3,472 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9178811450
|
MyPoorlyDocumentedInfo = provider()
MyFooInfo = provider(
doc = "Stores information about a foo.",
fields = ["bar", "baz"],
)
MyVeryDocumentedInfo = provider(
doc = """
A provider with some really neat documentation.
Look on my works, ye mighty, and despair!
""",
fields = {
"favorite_food": "A string representing my favorite food",
"favorite_color": "A string representing my favorite color",
},
)
named_providers_are_hashable = {
MyFooInfo: "MyFooInfo is hashable",
MyVeryDocumentedInfo: "So is MyVeryDocumentedInfo",
}
|
bazelbuild/bazel
|
src/test/java/com/google/devtools/build/skydoc/testdata/provider_basic_test/input.bzl
|
input.bzl
|
bzl
| 570 |
python
|
en
|
code
| 21,632 |
github-code
|
6
|
37076376474
|
"""Level Order Traversal"""
def level_order_traversal(self):
queue = []
current = self.root
queue.append(current)
while queue:
count = len(queue)
while count > 0:
visited = queue[0]
print(visited.data, end=' ')
if visited.left:
queue.append(visited.left)
if visited.right:
queue.append(visited.right)
queue = queue[1:]
count -= 1
print("\n")
|
piyush9194/data_structures_with_python
|
data_structures/trees/traversal/breadth_first_search_traverasl_using_list.py
|
breadth_first_search_traverasl_using_list.py
|
py
| 486 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32398148937
|
import os
#Make this a module
def get_all_filenames_from_location(folder_path):
print("json_file_v1 received the folder path: "+folder_path)
#initialize returning list
filenames = []
#Get a list of all files in the folder
files = os.listdir(folder_path)
#Print the file names
for file in files:
filename, extension = os.path.splitext(file)
filenames.append(filename)
return filenames
# To test this file just remove comment from these 2 lines
# Define the folder path
#folder_path = 'data/'
#print(get_all_filenames_from_location(folder_path))
|
alif666/market-app-v1
|
json_file_v1.py
|
json_file_v1.py
|
py
| 622 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30571932153
|
import asyncio
import datetime
from queue import PriorityQueue
import validators
import youtube_dl
class Player:
queue = asyncio.Queue()
# queue = PriorityQueue()
play_next_song = asyncio.Event()
next_song = None
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': False,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0',
'verbose': True,
'skip_download': True
}
ffmpeg_options = {
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
def __init__(self, discord, client):
self.discord = discord
self.client = client
async def add_song_to_queue(self, ctx, url, voice_client):
is_valid_url = validators.url(url)
if is_valid_url:
info = self.ytdl.extract_info(url, download=False)
else:
info = self.ytdl.extract_info('ytsearch:{0}'.format(url), download=False)['entries'][0]
audio = self.discord.FFmpegPCMAudio(info['url'], **self.ffmpeg_options)
if self.queue.empty():
self.next_song = info['title']
await self.queue.put({
'audio': audio,
'info': info,
'ctx': ctx,
'voice_client': voice_client
})
return info
async def player_queue_task(self):
while True:
self.play_next_song.clear()
current = await self.queue.get()
if self.queue.empty():
self.next_song = None
await self.play_song(current['ctx'], current['info'], current['voice_client'], current['audio'])
await self.play_next_song.wait()
async def play_song(self, ctx, info, voice_client, audio):
async with ctx.typing():
embed = self.discord.Embed(title=':notes: Teraz leci ta piosenka :notes:',
colour=self.discord.Color.green(),
description='```css\n{0}\n```'.format(info['title']),
url=info['webpage_url'])
embed.set_image(url=info['thumbnail'])
embed.add_field(name='Czas trwania:', value=datetime.timedelta(seconds=info['duration']), inline=True)
embed.add_field(name='Dodaล:', value='<@{0}>'.format(ctx.message.author.id), inline=True)
embed.add_field(name='Nastฤpne w kolejce:', value=self.get_next_song())
await ctx.send(embed=embed)
voice_client.play(audio, after=self.toggle_next)
def get_next_song(self):
if not self.next_song:
return 'Kolejka jest pusta'
else:
return self.next_song
def toggle_next(self, idk_why_i_need_this):
self.client.loop.call_soon_threadsafe(self.play_next_song.set)
|
MKA01/grajdelko
|
pl/savera/grajdelko/player/Player.py
|
Player.py
|
py
| 3,155 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30804251376
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
driver = webdriver.PhantomJS()
driver.get('http://fund.eastmoney.com/fund.html')
page_text = driver.find_element_by_id('pager').find_element_by_xpath('span[@class="nv"]').text
page_count = ''.join(filter(str.isdigit, page_text))
# ๅพช็ฏ่ทๅ้กต้ขๆฐๆฎ
def get_data(start, end):
for x in range(start, end+1):
tonum = driver.find_element_by_id('tonum') # ่ทๅๆๆฌๆก
btn_jump = driver.find_element_by_id('btn_jump') # ่ทๅ็นๅปๆ้ฎ
tonum.clear()
tonum.send_keys(str(x))
btn_jump.click()
WebDriverWait(driver, 20).until(lambda driver:driver.find_element_by_id('pager').find_element_by_xpath('span[@value="{0}" and @class != "end page"]'.format(x))\
.get_attribute("class").find("at") != -1)
with open('./htmls/page_{0}.txt'.format(x), 'wb') as f:
f.write(driver.find_element_by_id('tableDiv').get_attribute('innerHTML').encode('utf-8'))
f.close()
get_data(1, 5)
|
bobchi/learn_py
|
14.py
|
14.py
|
py
| 1,125 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18648883751
|
#!/usr/bin/env python3.7
import matplotlib
matplotlib.use('Agg')
import pylab as pl
from matplotlib import rc
import matplotlib.cm as cm
from matplotlib.colors import Normalize
rc('text', usetex=True)
import numpy as np
import numpy.linalg as nl
import numpy.random as nr
import os.path
from numpy import cos, sin
import sys
basename = os.path.splitext(sys.argv[0])[0]
pdfname = basename + '.pdf'
pngname = basename + '.png'
n = 4000
r=25.000
tx=np.pi*0
ty=np.pi/3
tz=np.pi
Rx = np.array([[1, 0, 0], [0, cos(tx), -sin(tx)], [0, sin(tx), cos(tx)]])
Ry = np.array([[cos(ty), 0, sin(ty)], [0, 1, 0], [-sin(ty), 0, cos(ty)]])
Rz = np.array([[cos(tz), -sin(tz), 0],[sin(tz), cos(tz), 0], [0,0,1]])
def mkmat(x):
ret = np.zeros( (len(x), len(x)))
for i,xi in enumerate(x):
for j,xj in enumerate(x):
ret[i,j]= np.exp(-r*nl.norm(xi-xj))
return ret
nr.seed(3)
X0 = nr.random((n,2))
X0[:,0]*=2*np.pi
X0[:,1]-=0.5
X0[:,1]*=2
X = np.array([((1+(t/2)*cos(s/2))*cos(s), (1+(t/2)*cos(s/2))*sin(s), t/2*sin(s/2)) for s,t in X0])
X = X@Rx@Ry@Rz
M = mkmat(X)
IM = nl.inv(M)
print(sum(sum(IM)))
Xx = [_[0] for _ in X]
Xy = [_[1] for _ in X]
Xz = [_[2] for _ in X]
fig = pl.figure(figsize=(6,3))
ax = fig.add_subplot(121, projection='3d')
# ax = fig.add_subplot(121)
bx = fig.add_subplot(122)
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
ax.axes.zaxis.set_ticklabels([])
ax.xaxis._axinfo["grid"]['linewidth'] = 0.1
ax.yaxis._axinfo["grid"]['linewidth'] = 0.1
ax.zaxis._axinfo["grid"]['linewidth'] = 0.1
C = IM.dot(np.ones(len(X)))
sa = 50*abs(C)**3
sb = sa
cmap = pl.get_cmap()
norm = Normalize(vmin=min(sa), vmax=max(sa))
for idx, row in enumerate(X):
ax.plot(row[0], row[1], '.', color = cmap(norm(sa[idx])), markersize=sa[idx], markeredgecolor='none')
# ax.plot(Xx, Xy, Xz, c=C, markersize=sa/10, lw=0)
sc = bx.scatter(X0[:,0], X0[:,1], c=C, s=sb, lw=0)
xoffset=0.08
bx.arrow(0-xoffset, -1, 0, 2, length_includes_head=True, head_width=0.2, head_length=0.1, fc='0')
bx.arrow(2*np.pi+xoffset, 1, 0, -2, length_includes_head=True, head_width=0.2, head_length=0.1, fc='0')
bx.axis('off')
pl.colorbar(sc, shrink=0.9)
pl.savefig(pngname, dpi=300)
|
AmFamMLTeam/metric-space-magnitude
|
src/mobius.py
|
mobius.py
|
py
| 2,198 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19265168130
|
import smtplib
from email.message import EmailMessage, MIMEPart
import time
from typing import Tuple
class SendAMessage():
def __init__(self,action,msg_body,config,attach=None):
self.config = config
self.msg_body = msg_body
self.attach = attach
self.action = action
self.setup_message_subject()
self.recipients = self.config.mms_recipients
self.user = self.config.email
self.password = self.config.token
if self.config.csv:
self.recipients = [self.config.csv]
with open(self.attach, 'rb') as content_file:
self.content = content_file.read()
self.build_and_send_msg()
else:
if self.config.mms_enabled:
self.recipients = self.config.mms_recipients
if not self.config.mms_subject:
self.build_subject = False
self.build_and_send_msg()
if self.config.email_enabled:
self.build_subject = True
if self.config.mms_enabled:
time.sleep(5)
self.recipients = self.config.email_recipients
self.build_and_send_msg()
def build_and_send_msg(self):
self.emailObj = EmailMessage()
if self.config.csv:
self.emailObj.add_attachment(self.content, maintype='application', subtype='pdf', filename=self.attach)
if self.build_subject:
self.emailObj['subject'] = self.subject
self.emailObj['from'] = self.user
self.emailObj.set_content(self.msg_body)
self.emailObj['to'] = self.user
self.emailObj['bcc'] = self.recipients
# print(f"Sending MMS to: {to}") # console debugging, informative.
self.enable_smtp_server()
self.server.send_message(self.emailObj)
self.quit_server()
def setup_message_subject(self):
# check what we need to do
if self.action == "normal" or self.action == "auto" or self.action == "error":
self.subject = f"CONSTELLATION {self.config.node_name}"
elif self.action == "health":
self.subject = f"HEALTH CHECK {self.config.node_name}"
# come back to re-enable this later...
# if self.action == "error":
# self.subject = "ERROR CONST DARSTAR"
def enable_smtp_server(self):
self.server = smtplib.SMTP("smtp.gmail.com", 587)
self.server.starttls()
self.server.login(self.user, self.password)
def quit_server(self):
self.server.quit()
if __name__ == "__main__":
print("This class module is not designed to be run independently, please refer to the documentation")
|
netmet1/constellation-node-automation
|
classes/send_sms_email.py
|
send_sms_email.py
|
py
| 2,744 |
python
|
en
|
code
| 2 |
github-code
|
6
|
74309704827
|
import pandas
data = pandas.read_csv("weather_data.csv")
# print(data["temp"]) Series
# print(data) Data-Frame
# print(data["temp"].max())
# print(data.condition)
# print(data[data.day == "Monday"])
# print(data[data.temp == data.temp.max()].temp)
# fahr = (9/5)*(data[data.day == "Monday"].temp)+32
# print(fahr)
# Create data-frame from scratch
mydict = {
"student name": ["Ramesh", "Suresh", "Rajesh"],
"roll_num": [12, 13, 4]
}
mydata = pandas.DataFrame(mydict)
mydata.to_csv("mydata.csv")
|
shuklaritvik06/PythonProjects
|
Day - 25/main.py
|
main.py
|
py
| 504 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36261973545
|
from collections import deque
def solution1(graph):
queue = deque([(0,0,0)])
n = len(graph)
m = len(graph[0])
while queue:
x,y,v = queue.popleft()
if x>=n or y>=m or x<0 or y<0:
continue
if graph[x][y] == 1:
graph[x][y] += v
queue.append((x+1, y, graph[x][y]))
queue.append((x, y+1, graph[x][y]))
queue.append((x-1, y, graph[x][y]))
queue.append((x, y-1, graph[x][y]))
return graph[n-1][m-1]
def solution2(graph):
# ์ด๋ํ ๋ค ๋ฐฉํฅ ์ ์(์, ํ, ์ข, ์ฐ)
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
# graph ํฌ๊ธฐ ์ ์
n = len(graph)
m = len(graph[0])
# BFS ์์ค์ฝ๋ ๊ตฌํ
def bfs(x, y):
# ํ(Queue) ๊ตฌํ์ ์ํด deque ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ฌ์ฉ
queue = deque()
queue.append((x, y))
# ํ๊ฐ ๋น ๋๊น์ง ๋ฐ๋ณต
while queue:
x, y = queue.popleft()
# ํ์ฌ ์์น์์ ๋ค ๋ฐฉํฅ์ผ๋ก์ ์์น ํ์ธ
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
# ๋ฏธ๋ก ์ฐพ๊ธฐ ๊ณต๊ฐ์ ๋ฒ์ด๋ ๊ฒฝ์ฐ ๋ฌด์
if nx < 0 or ny < 0 or nx >= n or ny >= m:
continue
# ๋ฒฝ์ธ ๊ฒฝ์ฐ ๋ฌด์
if graph[nx][ny] == 0:
continue
# ํด๋น ๋
ธ๋๋ฅผ ์ฒ์ ๋ฐฉ๋ฌธํ๋ ๊ฒฝ์ฐ์๋ง ์ต๋จ ๊ฑฐ๋ฆฌ ๊ธฐ๋ก
if graph[nx][ny] == 1:
graph[nx][ny] = graph[x][y] + 1
queue.append((nx, ny))
# ๊ฐ์ฅ ์ค๋ฅธ์ชฝ ์๋๊น์ง์ ์ต๋จ ๊ฑฐ๋ฆฌ ๋ฐํ
return graph[n - 1][m - 1]
return bfs(0,0)
if __name__ == "__main__":
graph = [
[1,0,1,0,1,0],
[1,0,1,1,1,1],
[1,0,1,1,1,0],
[1,0,1,0,1,0],
[1,1,1,0,1,1]
]
print(solution1(graph))
|
hon99oo/PythonAlgorithmStudy
|
์ด์ฝํ
/DFS_BFS/์์ _๋ฏธ๋ก ํ์ถ/solution.py
|
solution.py
|
py
| 1,924 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
42112403480
|
from oauth2_provider.models import get_application_model
# create oauth application for export-opportunities
Application = get_application_model()
if Application.objects.count() == 0:
Application.objects.create(
name='export-opportunities',
redirect_uris='http://opportunities.trade.great:8002/export-opportunities/users/auth/exporting_is_great/callback',
skip_authorization=True,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
client_id='efcy4CUD2bhChR3We8K1LunKLSmwVe8uW4qa2Ipv',
client_secret='VbNAcpsal6bvqLoALsebAsC6gVj8XtoxiO58ukI7M8AyOcp7gowal0f0y6aN0KQrmDFfvBuhXZFwSAwmt4SHMnBXy1tDs0uttK8CQAiWGY1DRHPjXoCSyP6GLZUiLTeg',
)
|
mkieblesz/local-workspace
|
patches/directory-sso/fixtures/sso_api_clients.py
|
sso_api_clients.py
|
py
| 756 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22899839710
|
from evennia import create_object
from evennia import DefaultCharacter
from evennia.utils.test_resources import EvenniaTest
from world import space
class TestSpace(EvenniaTest):
"""
Unit tests for Space. A modification of unit tests for Wilderness Contrib
Only minor changes were required to make this work for Space, mostly
new names and additional exits.
Tests:
- Creation of Space with default name
- Creation of Space with custom name
- A PC entering space at the default coordinates
- A PC entering space at specific coodinates
- A PC entering space where space has a custom name
- Space has the correct exits
- Room creation, including splitting and combining rooms as PCs move
- Verifying new coordinates are correct as a PC uses each exit
"""
def setUp(self):
super(TestSpace, self).setUp()
self.char1 = create_object(DefaultCharacter, key="char1")
self.char2 = create_object(DefaultCharacter, key="char2")
def get_space_script(self, name="default"):
s = space.SpaceScript.objects.get("default")
return s
def test_create_space_default_name(self):
space.create_space()
s = self.get_space_script()
self.assertIsNotNone(s)
def test_create_space_custom_name(self):
name = "customname"
space.create_space(name)
s = self.get_space_script(name)
self.assertIsNotNone(s)
def test_enter_space(self):
space.create_space()
space.enter_space(self.char1)
self.assertIsInstance(self.char1.location, space.SpaceRoom)
s = self.get_space_script()
self.assertEquals(s.db.itemcoordinates[self.char1], (0, 0, 0))
def test_enter_space_custom_coordinates(self):
space.create_space()
space.enter_space(self.char1, coordinates=(1, 2, 3))
self.assertIsInstance(self.char1.location, space.SpaceRoom)
s = self.get_space_script()
self.assertEquals(s.db.itemcoordinates[self.char1], (1, 2, 3))
def test_enter_space_custom_name(self):
name = "customnname"
space.create_space(name)
space.enter_space(self.char1, name=name)
self.assertIsInstance(self.char1.location, space.SpaceRoom)
def test_space_correct_exits(self):
space.create_space()
space.enter_space(self.char1)
# By default we enter in the center (0, 0, 0), so all exits should
# be visible / traversable
# TODO: Double check that there's no case of fewer exits (IE corners)
space.enter_space(self.char1, coordinates=(0, 0, 0))
exits = [i for i in self.char1.location.contents
if i.destination and (
i.access(self.char1, "view") or
i.access(self.char1, "traverse"))]
self.assertEquals(len(exits), 10)
exitsok = ["north", "northeast", "east", "southeast", "south",
"southwest", "west", "northwest", "up", "down"]
for each_exit in exitsok:
self.assertTrue(any([e for e in exits if e.key == each_exit]))
def test_room_creation(self):
# Pretend that both char1 and char2 are connected...
self.char1.sessions.add(1)
self.char2.sessions.add(1)
self.assertTrue(self.char1.has_account)
self.assertTrue(self.char2.has_account)
space.create_space()
s = self.get_space_script()
# We should have no unused room after moving the first account in.
self.assertEquals(len(s.db.unused_rooms), 0)
s.move_obj(self.char1, (0, 0, 0))
self.assertEquals(len(s.db.unused_rooms), 0)
# And also no unused room after moving the second one in.
s.move_obj(self.char2, (1, 1, 1))
self.assertEquals(len(s.db.unused_rooms), 0)
# But if char2 moves into char1's room, we should have one unused room
# Which should be char2's old room that got created.
s.move_obj(self.char2, (0, 0, 0))
self.assertEquals(len(s.db.unused_rooms), 1)
self.assertEquals(self.char1.location, self.char2.location)
# And if char2 moves back out, that unused room should be put back to
# use again.
s.move_obj(self.char2, (1, 1, 1))
self.assertNotEquals(self.char1.location, self.char2.location)
self.assertEquals(len(s.db.unused_rooms), 0)
def test_get_new_coordinates(self):
loc = (1, 1, 1)
directions = {"north": (1, 2, 1),
"northeast": (2, 2, 1),
"east": (2, 1, 1),
"southeast": (2, 0, 1),
"south": (1, 0, 1),
"southwest": (0, 0, 1),
"west": (0, 1, 1),
"northwest": (0, 2, 1),
"up": (1, 1, 2),
"down": (1, 1, 0)}
for direction, correct_loc in directions.iteritems(): # Not compatible with Python 3
new_loc = space.get_new_coordinates(loc, direction)
self.assertEquals(new_loc, correct_loc, direction)
|
QBFreak/SolarWinds-Evennia
|
world/test_space.py
|
test_space.py
|
py
| 5,129 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17308026422
|
#!/usr/bin/python3
import collections
import fileinput
import functools
import heapq
import itertools
import math
import re
import sys
rps = {
'A': 'rock',
'B': 'paper',
'C': 'scissors',
}
loseto = {
'rock': 'scissors',
'paper': 'rock',
'scissors': 'paper',
}
defeats = {v: k for k, v in loseto.items()}
points = {
'rock': 1,
'paper': 2,
'scissors': 3,
}
def solve(inp):
sum = 0
for l in inp:
themIn, outcome = l.strip().split()
them = rps[themIn]
if outcome == 'X': # lose
score = points[loseto[them]]
elif outcome == 'Y': #draw
score = 3 + points[them]
else: # win
score = 6 + points[defeats[them]]
sum += score
print(sum)
def tests():
print("--- tests done ----")
if __name__ == "__main__":
tests()
solve(fileinput.input(sys.argv[1]))
|
zmerlynn/advent-of-code
|
2022/d02p2.py
|
d02p2.py
|
py
| 890 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28199024080
|
import logging
import os
import json
from flask import Flask
from flask_ask import Ask, request, session, question, statement
import datetime as DT
os.system('sh transactions.sh > output.json')
data = json.load(open('output.json'))
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
# function that converts the amount of money to an string
# that can be said
def say_money(value):
neg = ''
startv = 0
if value < 0:
neg = 'negative'
startv = startv + 1
value = str(value)
cents = value[-2:]
if cents == '.0':
cents = '0'
dollars = value[startv:-2]
return neg + ' ' + dollars + ' dollars and ' + cents + ' cents'
# check balance
def check_bal(idn):
idn = str(idn)
os.system('sh accounts'+idn+'.sh > output.json')
data = json.load(open('output.json'))
return ('your current available balance is: ' + say_money(data['account_balances'][0]['available']))
# most recent transaction
def most_recent_transaction(idn):
idn = str(idn)
os.system('sh transactions'+idn+'.sh > output.json')
data = json.load(open('output.json'))
val = data['transactions'][0]['amount']
return ('Your most recent transaction was: ' + say_money(val))
# how much did i spend total
def total_yr_spend_value(idn):
idn = str(idn)
os.system('sh transactions'+idn+'.sh > output.json')
data = json.load(open('output.json'))
arr = data['transactions']
total = 0
for x in arr:
total = total + x['amount']
print('Your total spending over the last year was: ' + say_money(total))
return total
def total_yr_spend(idn):
idn = str(idn)
os.system('sh transactions'+idn+'.sh > output.json')
data = json.load(open('output.json'))
arr = data['transactions']
total = 0
for x in arr:
total = total + x['amount']
return ('Your total spending over the last year was: ' + say_money(total))
# how much did i spend last week
def week_spend(idn):
idn = str(idn)
os.system('sh transactions'+idn+'.sh > output.json')
data = json.load(open('output.json'))
total = 0
today = DT.date.today()
week_ago = today - DT.timedelta(days=14)
arr = data['transactions']
for x in arr:
strdate = str(x['settled_at'])
strdate = strdate[0:10]
print(strdate)
curr_day = DT.datetime.strptime(strdate, '%Y-%m-%d').date()
if curr_day >= week_ago:
total = total + x['amount']
ret_str = ''
ret_str = ret_str + ('Your total spending over the past two weeks was: ' + say_money(total) + '. ')
past_two = total
past_year = total_yr_spend_value()
ret_str = ret_str + ('The percentage from the past two weeks is ' + str(round(past_two*100/past_year, 2)) + ' percent of your spending over the past year')
return ret_str
@ask.launch
def launch():
speech_text = 'Welcome to EchoDog, your loyal fincancial companion.'
return question(speech_text).reprompt(speech_text).simple_card('HelloWorld', speech_text)
# ELLEN's
#
#
@ask.intent('BiWeeklyPercentage')
def BiWeekPercent():
speech_text = week_spend(1) + 'That is pretty good, keep it up'
return statement(speech_text).simple_card('BiWeeklyPercentage', speech_text)
@ask.intent('YearTotal')
def year_total():
speech_text = total_yr_spend(1) + 'That is a lot of money.'
return statement(speech_text).simple_card('YearTotal', speech_text)
@ask.intent('CheckBalance')
def chk_bal():
speech_text = check_bal(1) + ' You are doing pretty well for yourself'
return statement(speech_text).simple_card('CheckBalance', speech_text)
@ask.intent('MostRecent')
def most_recent():
speech_text = most_recent_transaction(1)
return statement(speech_text).simple_card('MostRecent', speech_text)
# JACOB's
#
#
@ask.intent('BiWeeklyPercentagetwo')
def BiWeekPercent2():
speech_text = week_spend(2)
return statement(speech_text).simple_card('BiWeeklyPercentage', speech_text)
@ask.intent('YearTotaltwo')
def year_total2():
speech_text = total_yr_spend(2)
return statement(speech_text).simple_card('YearTotal', speech_text)
@ask.intent('CheckBalancetwo')
def chk_bal2():
speech_text = check_bal(2)
return statement(speech_text).simple_card('CheckBalance', speech_text)
@ask.intent('MostRecenttwo')
def most_recent2():
speech_text = most_recent_transaction(2)
return statement(speech_text).simple_card('MostRecent', speech_text)
# MIKE
#
#
@ask.intent('BiWeeklyPercentagethree')
def BiWeekPercent3():
speech_text = 'The percentage over the past two weeks that you have' +'spent is 50 percent of your spending over the past year. ' + 'Boy you need to save more and stop being so yolo swag. Dabs'
return statement(speech_text).simple_card('BiWeeklyPercentage', speech_text)
@ask.intent('YearTotalthree')
def year_total3():
speech_text = 'You did alright this year but you could use a while lot of improvement. Your killing me Mike'
return statement(speech_text).simple_card('YearTotal', speech_text)
@ask.intent('CheckBalancethree')
def chk_bal():
speech_text = 'I am not sure if you want to know your balance, but you have 5 dollars and 37 cents in your account,'
return statement(speech_text).simple_card('CheckBalance', speech_text)
@ask.intent('MostRecentthree')
def most_recent():
speech_text = 'You spent 50 dollars on garlic bread maybe you need to rethink your life choices'
return statement(speech_text).simple_card('MostRecent', speech_text)
############################
@ask.intent('Unhandled')
def unhandled():
unhandled_response="Sorry, I did not understand that command. Say help for assitance"
return question().reprompt(unhandled_response)
@ask.intent('HelpFunc')
def help_func():
helplist="You are able to ask for most recent transaction, check your balance, spending stats for two weeks, and weekly total spending"
return question(helplist).simple_card('HelpFunc', helplist)
@ask.intent('AMAZON.HelpIntent')
def help():
unhandled_response="Sorry, I did not understand that command. Say help for assitance."
return question().reprompt(unhandled_response)
@ask.session_ended
def session_ended():
return "{}", 200
if __name__ == '__main__':
if 'ASK_VERIFY_REQUESTS' in os.environ:
verify = str(os.environ.get('ASK_VERIFY_REQUESTS', '')).lower()
if verify == 'false':
app.config['ASK_VERIFY_REQUESTS'] = False
app.run(debug=True)
# how much did i spend last week
# how much did i spend last week compared to the entire
|
Interplay/HoyaHacks-18
|
main.py
|
main.py
|
py
| 6,607 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21379769063
|
import test, gui, wx, config
from unittests import dummy
from domain import task, effort, category
class ViewerContainerTest(test.wxTestCase):
def setUp(self):
self.settings = config.Settings(load=False)
self.taskList = task.sorter.Sorter(task.TaskList(),
settings=self.settings)
self.container = gui.viewercontainer.ViewerNotebook(self.frame,
self.settings, 'mainviewer')
self.container.addViewer(dummy.ViewerWithDummyWidget(self.container,
self.taskList, gui.uicommand.UICommands(self.frame, None, None,
self.settings, self.taskList, effort.EffortList(self.taskList),
category.CategoryList()), self.settings), 'Dummy')
def testCreate(self):
self.assertEqual(0, self.container.size())
def testAddTask(self):
self.taskList.append(task.Task())
self.assertEqual(1, self.container.size())
|
HieronymusCH/TaskCoach
|
branches/Release0_62_Branch/taskcoach/tests/unittests/guiTests/ViewerContainerTest.py
|
ViewerContainerTest.py
|
py
| 928 |
python
|
en
|
code
| 2 |
github-code
|
6
|
34084281401
|
from rc.resources.apps.scrape.loader import GenericLoader
from rc.resources.apps.education.models import AcademicCenter, \
AcademicCenterType
from rc.resources.apps.education.models import CampusSustainabilityCourseTeacher
from rc.resources.apps.education.models import StudyAbroadProgram
from aashe.organization.models import Organization
ACADEMIC_CENTERS_RESET_YET = False
class AcademicCenterLoader(GenericLoader):
def __init__(self, parser_class, model_or_string, reset=False):
'''AcademicCenters are loaded by a bunch of parsers. If we
reset after each one, only the last one in remains. So here
we intercept the reset flag, and do it only once.
'''
global ACADEMIC_CENTERS_RESET_YET
if reset and not ACADEMIC_CENTERS_RESET_YET:
AcademicCenter.objects.all().delete()
# since AcademicCenterTypes are created as side effect in
# get_center_type() below, better blow 'em away here, too:
AcademicCenterType.objects.all().delete()
ACADEMIC_CENTERS_RESET_YET = True
super(AcademicCenterLoader, self).__init__(parser_class,
model_or_string,
reset=False)
def create_instance(self, data):
data['type'] = self.get_center_type(data['category'])
super(AcademicCenterLoader, self).create_instance(data)
def get_center_type(self, type_code):
'''
Return the AcademicCenterType for type_code, creating it
if necessary.
'''
center_types = dict(AcademicCenterType.CENTER_TYPES)
center_type, new_object = AcademicCenterType.objects.get_or_create(
**{'type': type_code})
if new_object:
center_type.description = center_types[type_code]
center_type.save()
return center_type
class CampusSustainabilityCourseLoader(GenericLoader):
def create_instance(self, data):
# each datum is a school, and each school can have >1 course.
# since we're loading courses, not schools, we need to call
# GenericLoader.create_instance() for each course. note, this
# breaks the pattern of create_instance() returning the db
# object that was created.
for course in data['courses']:
course['institution'] = data['school_name']
# save off the course teachers since GenericLoader.create_instance()
# doesn't like keyword arguments that are lists:
teachers = course['teachers']
del(course['teachers'])
course_on_sustainability = super(CampusSustainabilityCourseLoader,
self).create_instance(course)
# attach instanc(es) of CoursesOnSustainabilityTeacher:
for teacher in self.teacher_instances(teacher_data=teachers):
course_on_sustainability.teachers.add(teacher)
course_on_sustainability.save()
def teacher_instances(self, teacher_data):
csc_teachers = list()
for teacher in teacher_data:
csc_teacher, created = \
CampusSustainabilityCourseTeacher.objects.get_or_create(
**teacher)
csc_teacher.save()
csc_teachers.append(csc_teacher)
return csc_teachers
def reset_model(self):
CampusSustainabilityCourseTeacher.objects.all().delete()
super(CampusSustainabilityCourseLoader, self).reset_model()
class StudyAbroadProgramLoader(GenericLoader):
def create_instance(self, data):
# if no matching institution exists, create one
if data.has_key('institution'):
try:
inst_query = data['institution'].strip().lower()
institution_obj = Organization.objects.get(
name__iexact=inst_query)
data['organization'] = institution_obj
except:
Organization.objects.create(
name=data['institution'], picklist_name=data['institution'])
super(StudyAbroadProgramLoader, self).create_instance(data)
|
AASHE/django-irc
|
rc/resources/apps/scrape/loader/education.py
|
education.py
|
py
| 4,212 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31490583356
|
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.shortcuts import render, redirect
from django.http import JsonResponse
import PathFinder.PathFinderModels.pathfinder_chat_bot as qamodel
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from .forms import EditProfileForm, UpdateProfile
from .models import Notes
from .forms import NotesForm, ContactForm
from django.contrib.auth.models import User
from .forms import ContactForm
from django.conf import settings
from django.core.mail import EmailMessage
import os
import json
# from PathFinder.PathFinderApp.forms import RegisterUserForm, ChatBotForm
# from django.contrib.auth.forms import UserCreationForm
# from django.core.exceptions import ObjectDoesNotExist
# from django.contrib.auth.decorators import login_required
# import smtplib
# from django.core.mail import send_mail
# import pickle
# import warnings
# import openai
import pinecone
from django.urls import reverse_lazy
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.views import PasswordChangeView
api_key = os.environ.get("OPENAI_API_KEY")
# warnings.filterwarnings("ignore")
pinecone.init(
api_key="5bf2927b-0fb7-423b-b8f1-2f6a3347a15d", environment="asia-northeast1-gcp"
)
vectorstore = Pinecone.from_existing_index("teamprojindex", OpenAIEmbeddings())
pathfinder_chatbot = qamodel.make_chain(vectorstore)
def render_chatbotview(request):
return render(request, "chatwindow.html")
def send_chat_response(request):
pathfinder_response = ""
if request.method == "POST":
json_user_input = json.loads(request.body)
user_message = json_user_input["user_message"]
if user_message is not None:
response = pathfinder_chatbot(
{
"question": user_message,
# [("Q","A")]
"chat_history": [("", "")],
}
) # query the chatbot
# print(user_message)
pathfinder_response = response["answer"]
# context = {'pathfinder_response': pathfinder_response}
# jsondata = json.dumps(jsonresp)
# reverse('/chatbox/')
return JsonResponse({"pathfinder_response": pathfinder_response})
# return JsonResponse({'pathfinder_response': pathfinder_response})
# return render(request, 'chatwindow.html', {'pathfinder_response': pathfinder_response, 'pathfinder_api_url': reverse('chatbot')})
def index(request):
return render(request, "index.html")
def gdpr(request):
return render(request, "gdpr.html")
def about(request):
return render(request, "about.html")
### For the Games Page ###
def games(request):
return render(request, "games.html")
def brickbreaker(request):
return render(request, "brickbreaker.html")
def remembergame(request):
return render(request, "remem.html")
def rockps(request):
return render(request, "rockps.html")
def tictakpro(request):
return render(request, "tictakpro.html")
### For the Notemaker Page ###
def noteindex(request):
notes = Notes.objects.all()
return render(request, "noteindex.html", {"notes": notes})
def new_note(request):
form = NotesForm()
if request.method == "POST":
form = NotesForm(request.POST)
if form.is_valid():
form.save()
return redirect("noteindex")
return render(request, "noteupdate.html", {"form": form})
def note_detail(request, pk):
note = Notes.objects.get(id=pk)
form = NotesForm(instance=note)
if request.method == "POST":
form = NotesForm(request.POST, instance=note)
if form.is_valid():
form.save()
return redirect("noteindex")
return render(request, "noteupdate.html", {"note": note, "form": form})
def delete_note(request, pk):
note = Notes.objects.get(id=pk)
form = NotesForm(instance=note)
if request.method == "POST":
note.delete()
messages.info(request, "The note has been deleted")
return redirect("noteindex")
return render(request, "notedelete.html", {"note": note, "form": form})
def search_page(request):
if request.method == "POST":
search_text = request.POST["search"]
notes = Notes.objects.filter(
heading__icontains=search_text
) | Notes.objects.filter(text__icontains=search_text)
# if notes is None:
# messages.info(request, "Note not found")
return render(request, "notesearch.html", {"notes": notes})
# m = openai.Model.list()
# print([m['id'] for m in m['data'] if m['id'].startswith('gpt')])
# models= list(openai.Model.list().values())[1]
# print(models)
# print(list(filter(lambda x: re.match('*gpt*', x) , models)))
# *ASIDE: Function for token counting queries.
# def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301"):
# try:
# encoding = tiktoken.encoding_for_model(model)
# except KeyError:
# encoding = tiktoken.get_encoding("cl100k_base")
# if model == "gpt-3.5-turbo-0301": # note: future models may deviate from this
# num_tokens = 0
# for message in messages:
# num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
# for key, value in message.items():
# num_tokens += len(encoding.encode(value))
# if key == "name": # if there's a name, the role is omitted
# num_tokens += -1 # role is always required and always 1 token
# num_tokens += 2 # every reply is primed with <im_start>assistant
# return num_tokens
# else:
# raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
# See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
# def login_user(request):
# if request.method == "POST":
# username = request.POST['username']
# password = request.POST['password']
# user = authenticate(request, username=username, password=password)
# if user is not None:
# login(request, user)
# return redirect('index')
# else:
# messages.success(
# request, ("There was an error logging in, try again..."))
# return redirect('login')
# else:
# return render(request, 'login.html', {})
# def logout_user(request):
# logout(request)
# messages.success(request, ("You have successfully logged out."))
# return redirect('index')
# def register_user(request):
# if request.method == "POST":
# form = RegisterUserForm(request.POST)
# if form.is_valid():
# form.save()
# username = form.cleaned_data['username']
# password = form.cleaned_data['password1']
# user = authenticate(username=username, password=password)
# login(request, user)
# messages.success(request, ("Account successfuly created!"))
# return redirect('index')
# else:
# form = RegisterUserForm()
# return render(request, 'signup.html', {'form': form, })
### Google sign in ###
# def logout_view(request):
# logout(request)
# return redirect("index")
# def signup_redirect(request):
# messages.error(
# request, "Something wrong here, it may be that you already have account!")
# return redirect("index")
# from django.contrib.auth import authenticate, login
# from django.shortcuts import render, redirect
# def login_view(request):
# # Handle user login
# if request.method == 'POST':
# username = request.POST['username']
# password = request.POST['password']
# user = authenticate(request, username=username, password=password)
# if user is not None:
# login(request, user)
# print(user.username)
# request.session['username'] = user.username
# return redirect('home')
# else:
# # Handle login failure
# pass
# else:
# # Display login page
# return render(request, 'bloglogin.html')
def edit_profile(request):
if not request.user.is_authenticated:
return redirect("/login")
if request.method == "POST":
user_form = EditProfileForm(request.POST, instance=request.user)
profile_form = UpdateProfile(request.POST, request.FILES, instance=request.user)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, "Your profile is updated successfully")
return redirect("/profile")
else:
user_form = EditProfileForm(instance=request.user)
profile_form = UpdateProfile(instance=request.user)
if not request.user.is_authenticated:
return redirect("/login")
else:
return render(
request, "edit.html", {"user_form": user_form, "profile_form": profile_form}
)
# def profile(request):
# context = {}
# return render(request, 'profile.html', context)
def contact(request):
if request.method == "POST":
form = ContactForm(request.POST)
if form.is_valid():
name = form.cleaned_data["name"]
email = form.cleaned_data["email"]
message = form.cleaned_data["message"]
email_subject = "New Contact Form Submission"
email_body = f"Name: {name}\nEmail: {email}\nMessage: {message}"
email = EmailMessage(
email_subject,
email_body,
settings.DEFAULT_FROM_EMAIL,
[settings.CONTACT_EMAIL],
reply_to=[email],
)
email.from_email = email
email.send(fail_silently=False)
return render(request, "thanks.html")
else:
form = ContactForm()
return render(request, "form.html", {"form": form})
# For Blog
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from .models import Profile, Meep
from .forms import MeepForm, SignUpForm, ProfilePicForm, EditProfileForm
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.contrib.auth.models import User
def bloghome(request):
if request.user.is_authenticated:
form = MeepForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
meep = form.save(commit=False)
meep.user = request.user
meep.save()
messages.success(request, ("Your Discovery Has Been Posted!"))
return redirect("bloghome")
meeps = Meep.objects.all().order_by("-created_at")
return render(request, "home.html", {"meeps": meeps, "form": form})
else:
meeps = Meep.objects.all().order_by("-created_at")
return render(request, "home.html", {"meeps": meeps})
def profilelist(request):
if request.user.is_authenticated:
profiles = Profile.objects.exclude(user=request.user)
return render(request, "profile_list.html", {"profiles": profiles})
else:
messages.success(request, ("You Must Be Logged In To View This Page..."))
return redirect("bloghome")
def profile(request, pk):
if request.user.is_authenticated:
profile = Profile.objects.get(user_id=pk)
meeps = Meep.objects.filter(user_id=pk).order_by("-created_at")
# Post Form logic
if request.method == "POST":
# Get current user
current_user_profile = request.user.profile
# Get form data
action = request.POST["follow"]
# Decide to follow or unfollow
if action == "unfollow":
current_user_profile.follows.remove(profile)
elif action == "follow":
current_user_profile.follows.add(profile)
# Save the profile
current_user_profile.save()
return render(request, "profile.html", {"profile": profile, "meeps": meeps})
else:
messages.success(request, ("You Must Be Logged In To View This Page..."))
return redirect("bloghome")
def login_user(request):
if request.method == "POST":
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
messages.success(request, ("You Have Been Logged In!"))
return redirect("index")
else:
messages.success(
request, ("There was an error logging in. Please Try Again...")
)
return redirect("login")
else:
return render(request, "login.html", {})
# def logout_user(request):
# return render(request, 'profile_list.html')
def logout_user(request):
logout(request)
messages.success(request, ("You Have Been Logged Out. Till we meet again..."))
return redirect("index")
def register_user(request):
form = SignUpForm()
if request.method == "POST":
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data["username"]
password = form.cleaned_data["password1"]
# first_name = form.cleaned_data['first_name']
# second_name = form.cleaned_data['second_name']
# email = form.cleaned_data['email']
# Log in user
user = authenticate(username=username, password=password)
login(request, user)
messages.success(request, ("You have successfully registered! Welcome!"))
return redirect("index")
return render(request, "signup.html", {"form": form})
def update_user(request):
if request.user.is_authenticated:
current_user = User.objects.get(id=request.user.id)
profile_user = Profile.objects.get(user__id=request.user.id)
# Get Forms
user_form = EditProfileForm(
request.POST or None, request.FILES or None, instance=current_user
)
profile_form = ProfilePicForm(
request.POST or None, request.FILES or None, instance=profile_user
)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
# login(request, current_user)
messages.success(request, ("Your Profile Has Been Updated!"))
return redirect("bloghome")
return render(
request,
"update_user.html",
{"user_form": user_form, "profile_form": profile_form},
)
else:
messages.success(request, ("You Must Be Logged In To View That Page..."))
return redirect("bloghome")
def meep_like(request, pk):
if request.user.is_authenticated:
meep = get_object_or_404(Meep, id=pk)
if meep.likes.filter(id=request.user.id):
meep.likes.remove(request.user)
else:
meep.likes.add(request.user)
return redirect(request.META.get("HTTP_REFERER"))
else:
messages.success(request, ("You Must Be Logged In To View That Page..."))
return redirect("bloghome")
def meep_show(request, pk):
meep = get_object_or_404(Meep, id=pk)
if meep:
return render(request, "show_meep.html", {"meep": meep})
else:
messages.success(request, ("That Post Does Not Exist..."))
return redirect("bloghome")
def my_view(request):
# Get the current user
user = request.user
# Get the user's first name
first_name = user.first_name
# Add the first name to the context dictionary
context = {"first_name": first_name}
return render(request, "my_template.html", context)
class ChangePasswordView(SuccessMessageMixin, PasswordChangeView):
template_name = "password.html"
success_message = "Successfully changed your pasword"
success_url = reverse_lazy("profile")
|
Susa0823/PathFinderProject
|
PathFinder/PathFinderApp/views.py
|
views.py
|
py
| 16,953 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37974761109
|
'''
Not a true display, but a stub for directly running applications from the menu
Designed for Light Demos
Generic classs customize by the config file and main_menu setup
Author: Howard Webb
Date: 2/28/2021
'''
from exp import exp
from Exp_Util import save
from Client import SocketClient
from variables import UP, DOWN, LEFT, RIGHT, CENTER, LOOP, INIT
_sunrise_demo={'name': 'Sunrise', 'current_phase': 0, 'phases': [{'name':None, 'start_date': None, 'lights': {
'on': {"cmd":"LIGHT_SUNRISE", 'time': None, "function":{"module":"Light_Custom", "class":"Sunrise"}},
'off': {"cmd":"LIGHT_SUNSET",'time':None, "function":{"module":"Light_Custom", "class":"Sunset"}}
}}
]}
class DisplayRun(object):
def __init__(self, title, config):
# override column list to use as function list
self._socket = SocketClient()
self._config = config
print("init DisplayRun", title)
exp_hold = exp
save(self._config)
# Call Client_Helper and pass command
self.run()
# restore last exp
save(exp_hold)
def receive(self, action):
# dummy
return None, None, LEFT
def run(self):
# could be over-ridden for other actions
print("run")
message = self._config["phases"][self._config["current_phase"]]["lights"]["on"]["cmd"]
self.send(message)
message = self._config["phases"][self._config["current_phase"]]["lights"]["off"]["cmd"]
self.send(message)
def send(self, message):
#cmd = {"cmd":message}
#print("Send", message, {"cmd":message})
response = self._socket.transmit(message)
print("Resp:", response)
def test():
print("Test Sunrise Demo")
d = SunriseDemo(None, None)
if __name__ == "__main__":
test()
|
webbhm/GBE-Digital
|
python/Display_Run.py
|
Display_Run.py
|
py
| 1,932 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4669064041
|
from Bio.Seq import Seq
with open('rosalind_ba1b.txt') as file:
text = file.readline().rstrip()
k = int(file.readline().rstrip())
def get_pattern_count_dict(text, length=3):
pattern_dict = {}
seq = Seq(text)
for i in range(len(text) - length + 1):
pattern = text[i:i + length]
if pattern in pattern_dict:
continue
count = seq.count_overlap(pattern)
pattern_dict[pattern] = count
return pattern_dict
def get_most_freq_patterns(text, length=3):
dict = get_pattern_count_dict(text, length)
dict = sorted(dict.items(), key=lambda x: x[1], reverse=True)
most_frequent_k_mers = [dict[0][0]]
for i in range(1, len(dict)):
if dict[i][1] != dict[i - 1][1]:
break
most_frequent_k_mers.append(dict[i][0])
return most_frequent_k_mers
k_mers = get_most_freq_patterns(text, k)
output = ' '.join(k_mers)
print(output)
with open('output.txt', 'w') as file:
file.write(output)
|
Partha-Sarker/Rosalind-Problems
|
Lab Assignment - 1/chapter 1/ba1b Find the Most Frequent Words in a String.py
|
ba1b Find the Most Frequent Words in a String.py
|
py
| 987 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2207915794
|
import unittest
from util.env_pool import *
class TestEnvPool(unittest.TestCase):
def setUp(self):
self.ep = EnvPool("Pong", 2)
def tearDown(self):
self.ep.close()
def test_reset_state(self):
obs = self.ep.reset()
obs_, reward, done, info = self.ep.step([1, 2])
self.ep.reset_state()
obs = self.ep.reset()
obs_, reward, done, info = self.ep.step([1, 2])
if __name__ == '__main__':
unittest.main()
|
Seraphli/gym-rl
|
test/test_env_pool.py
|
test_env_pool.py
|
py
| 475 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33378605635
|
"""
1. construct the head by taking all the starting string before first *, then check if there is a string that can satisfy all
2. construct tail using strings after last *
3. take all leftovers (between first * and last *) and put them, in any order of rules, from left to right.
4. Profit?
"""
if __name__ == "__main__":
T = int(input())
for i in range(1, T + 1):
N = int(input())
patterns = []
for j in range(N):
patterns.append(input())
heads = []
tails = []
mids = []
for pattern in patterns:
heads.append(pattern[:pattern.find("*")])
tails.append(pattern[pattern.rfind("*") + 1:])
mids.extend(pattern[pattern.find("*") + 1 : pattern.rfind("*")].split("*"))
max_head = max(heads, key = lambda s: len(s))
max_tail = max(tails, key=lambda s: len(s))
for head in heads:
if len(max_head) > 0 and len(head) > 0 and max_head.find(head) != 0:
print("Case #{}: *".format(i))
break
else:
for tail in tails:
if len(max_tail) > 0 and len(tail) > 0 and max_tail.rfind(tail) + len(tail) != len(max_tail):
print("Case #{}: *".format(i))
break
else:
res = ''.join([max_head] + mids + [max_tail])
print("Case #{}: {}".format(i, res))
|
shstan/codejam_1a_2020
|
pattern_matching.py
|
pattern_matching.py
|
py
| 1,428 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30916279052
|
import smtplib
file = "students.txt"
students = {}
with open(file, "r") as f:
for line in f:
data = line.strip().split(",")
email = data[0]
name = data[1]
surname = data[2]
points = int(data[3])
if len(data) > 4:
grade = int(data[4])
status = data[5]
else:
grade = None
status = None
students[email] = {"name": name, "surname": surname, "points": points, "grade": grade, "status": status}
for email, student in students.items():
if student["status"] != "GRADED" and student["status"] != "MAILED":
if student["points"] >= 90:
student["grade"] = 5
elif student["points"] >= 75:
student["grade"] = 4
elif student["points"] >= 60:
student["grade"] = 3
else:
student["grade"] = 2
student["status"] = "GRADED"
def add_student(email, name, surname, points):
if email in students:
print("Student with this email already exists")
else:
students[email] = {"name": name, "surname": surname, "points": points, "grade": None,
"status": None}
print("Student added successfully")
def remove_student(email):
if email in students:
del students[email]
print("Student removed successfully")
else:
print("Student with this email does not exist")
def send_email(email, grade):
if email in students and students[email]["status"] != "MAILED":
smtp_server = "smtp.gmail.com"
port = 587
sender_email = "[email protected]"
receiver_email = email
password = "pjatk123"
message = f"Subject: Your grade\n\nDear {students[email]['name']},\n\nYour grade for the Python Programming course has been calculated. You received a {grade}.\n\nBest regards,\nYour teacher"
with smtplib.SMTP(smtp_server, port) as server:
server.starttls()
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
students[email]["status"] = "MAILED"
print("Email sent successfully")
elif email in students:
print("Email already sent to this student")
else:
print("Student with this email does not exist")
with open(file, "w") as f:
for email, student in students.items():
data = [email, student["name"], student["surname"], str(student["points"])]
if student["grade"] != None:
data.append(str(student["grade"]))
data.append(student["status"])
f.write(",".join(data) + "\n")
|
opaciorkowski/ppy5
|
main.py
|
main.py
|
py
| 2,649 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13389188618
|
from __future__ import print_function
import torch
import torchvision
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
class Vgg16c(torch.nn.Module):
def __init__(self):
super(Vgg16c, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features
modified_pretrained = nn.Sequential(*list(vgg_pretrained_features.children())[:-1])
for param in modified_pretrained.parameters():
param.requires_grad = False
self.features = modified_pretrained
self.up = nn.PixelShuffle(2)
self.filter1a = NL_Conv3(ksize=3, in_ch=8, out_ch=8)
self.filter1b = NL_Conv3(ksize=3, in_ch=16, out_ch=4)
self.filter1c = NL_Conv3(ksize=3, in_ch=20, out_ch=16)
self.filter3a = NL_Conv3N(ksize=3, in_ch=8, out_ch=8)
self.filter3b = NL_Conv3N(ksize=3, in_ch=16, out_ch=4)
self.filter3c = NL_Conv3N(ksize=3, in_ch=20, out_ch=16)
self.sk1 = nn.Conv2d(512, 8, 1)
self.sk2 = nn.Conv2d(8, 512, 1)
self.classifier2 = nn.Conv2d(512, 8, 1)
self.skclassifier2 = nn.Conv2d(8, 256, 1)
self.classifier3 = nn.Conv2d(256, 8, 1)
self.skclassifier3 = nn.Conv2d(8, 128, 1)
self.classifier4 = nn.Conv2d(128, 8, 1)
self.skclassifier4 = nn.Conv2d(8, 64, 1)
self.classifier5 = nn.Conv2d(64, 1, 1, 1)
self.c1 = nn.Sequential(*list(vgg_pretrained_features.children())[:-8])
self.c2 = nn.Sequential(*list(vgg_pretrained_features.children())[:-15])
self.c3 = nn.Sequential(*list(vgg_pretrained_features.children())[:-22])
self.c4 = nn.Sequential(*list(vgg_pretrained_features.children())[:-27])
def nlcn(self,x):
x1 = self.filter1a(x) # 8
x1t = torch.cat((x, x1), dim=1) # 16
x1 = self.filter1b(x1t) # 4
x1t = torch.cat((x1t, x1), dim=1) # 20
x1 = self.filter1c(x1t) # 16
# x1t = torch.cat((x1t,x1),dim=1) # 16
# x1 = self.up(x1t) #4
x2 = self.filter3a(x) # 8
x2t = torch.cat((x, x2), dim=1) # 16
x2 = self.filter3b(x2t) # 4
x2t = torch.cat((x2t, x2), dim=1) # 20
x2 = self.filter3c(x2t) # 16
# x2t = torch.cat((x2t,x2),dim=1) # 16
# x2 = self.up(x2t) #4
x = torch.cat((x1, x2), dim=1) # 32
x = self.up(x) # 8
return x
def forward(self, x):
xc1 = self.c1(x)
xc2 = self.c2(x)
xc3 = self.c3(x)
xc4 = self.c4(x)
# print('xc1:',xc1.shape)
# print('xc2:',xc2.shape)
# print('xc3:',xc3.shape)
# print('xc4:',xc4.shape)
# print("........")
# print('Input:',x.shape)
x = self.features(x)
# print('Features:',x.shape)
x = self.sk1(x)
# print('after sk1:',x.shape)
x = self.nlcn(x)
# print('after nlcn:',x.shape)
x = self.sk2(x)
# print('after sk2:',x.shape)
# x = self.classifier1(x)
x = self.classifier2(x + xc1)
x = self.nlcn(x)
# print('after classifier2(xc1 added) and nlcn:',x.shape)
x = self.skclassifier2(x)
# print('after skclassifier2:',x.shape)
x = self.classifier3(x + xc2)
x = self.nlcn(x)
# print('after classifier3(xc2 added) and nlcn:',x.shape)
x = self.skclassifier3(x)
# print('after skclassifier3:',x.shape)
x = self.classifier4(x + xc3)
x = self.nlcn(x)
# print('after classifier4(xc3 added) and nlcn:',x.shape)
x = self.skclassifier4(x)
# print('after skclassifier4:',x.shape)
x = self.classifier5(x + xc4)
# print('after classifier5(xc4 added) :',x.shape)
# return x1+x2
return x
class NL_Conv3(nn.Module):
"""NON LInear Convolution Layer"""
def __init__(self,ksize,in_ch,out_ch):
super(NL_Conv3, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch,ksize*ksize*in_ch*out_ch , kernel_size=ksize, padding=ksize//2, bias=False),
nn.ReLU()
)#ksize*ksize*out_ch*in_ch
self.ksize= ksize
self.in_ch= in_ch
self.out_ch= out_ch
self.por= ksize*ksize*in_ch
def forward(self, x):
dims=x.shape
xc=torch.clone(x) # Initialize xc as several copy of x
for i in range(self.ksize*self.ksize-1):
xc=torch.cat((xc,x),dim=1)
ind=0
for i in range(-(self.ksize//2),self.ksize//2+1):
for j in range(-(self.ksize//2),self.ksize//2+1):
# tmp=x.roll(i,-1).roll(j,-2).view(dims[0],1,dims[2],dims[3])
# xc[:,ind,:,:]=tmp[:,0,:,:]
xc[:,ind*self.in_ch:(ind+1)*self.in_ch,:,:]=\
x.roll(i,-1).roll(j,-2).view(dims[0],self.in_ch,dims[2],dims[3])\
[:,0:self.in_ch,:,:]
ind=ind+1
w=self.conv(x)+.0001
# out=torch.clone(xc).narrow(1,0,self.out_ch)
out=torch.empty(dims[0],self.out_ch,dims[2],dims[3]).to(xc.device)
for i in range(self.out_ch):
w_por=w[:,i*self.por:(i+1)*self.por,:,:]
w_sum=torch.sum(w_por,dim=1).view(-1,1,dims[2],dims[3])
w_norm=w_por/w_sum # normalization along Dim=1
xp=w_norm*xc
x1=torch.sum(xp,dim=1).view(-1,1,dims[2],dims[3])
out[:,i:i+1,:,:]=x1.view(-1,1,dims[2],dims[3])
return out
class NL_Conv3N(nn.Module):
"""NON LInear Convolution Layer"""
def __init__(self,ksize,in_ch,out_ch):
super(NL_Conv3N, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch,ksize*ksize*out_ch*in_ch , kernel_size=ksize, padding=ksize//2, bias=False)
# nn.Hardtanh()
)#ksize*ksize*out_ch*in_ch
self.ksize = ksize
self.in_ch = in_ch
self.out_ch = out_ch
self.por = ksize*ksize*in_ch
def forward(self, x):
dims=x.shape
xc=torch.clone(x) # Initialize xc as several copy of x
for i in range(self.ksize*self.ksize-1):
xc=torch.cat((xc,x),dim=1)
ind=0
for i in range(-(self.ksize//2),self.ksize//2+1):
for j in range(-(self.ksize//2),self.ksize//2+1):
xc[:,ind*self.in_ch:(ind+1)*self.in_ch,:,:]=\
x.roll(i,-1).roll(j,-2).view(dims[0],self.in_ch,dims[2],dims[3])\
[:,0:self.in_ch,:,:]
ind=ind+1
w=self.conv(x)
w=torch.sign(w)*(torch.abs(w)+.0001)
out=torch.empty(dims[0],self.out_ch,dims[2],dims[3]).to(xc.device)
for i in range(self.out_ch):
w_por=w[:,i*self.por:(i+1)*self.por,:,:]
w_sum=torch.sum(torch.abs(w_por),dim=1).view(-1,1,dims[2],dims[3])
w_norm=w_por/w_sum # normalization along Dim=1
xp=w_norm*xc
x1=torch.sum(xp,dim=1).view(-1,1,dims[2],dims[3])
out[:,i:i+1,:,:]=x1.view(-1,1,dims[2],dims[3])
return out
|
jhilikb/NLBM
|
model/vgg_nlbm_cuhk.py
|
vgg_nlbm_cuhk.py
|
py
| 7,190 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43263411443
|
def plot_data_with_fit(data, fit_curve, format_x, format_y):
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as mp
mp.title('Final Curve Plot')
format_x
format_y
plt.scatter(data[0],data[1], label='Data', s=1,)
plt.plot(fit_curve[0],fit_curve[1], 'blue')
return plt.show()
|
UW-ParksidePhysics/Delgado-Omar
|
plot_data_with_fit.py
|
plot_data_with_fit.py
|
py
| 352 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42360574813
|
# Using Python 3
# https://open.kattis.com/problems/mountainbiking
from math import pow
from math import cos
from math import radians
from math import sqrt
N, gravity = input().split(' ')
N = int(N)
gravity = float(gravity)
seg = []
for _ in range(N):
dist, angle = input().split()
dist = float(dist)
angle = float(angle)
accel = gravity * cos(radians(angle))
seg.append((dist, angle, accel))
for i in range(N):
speed = 0.0
for dist, angle, accel in seg[i:]:
speed = sqrt((dist * 2 * accel) + pow(speed,2))
print(speed)
|
Resethel/Kattis
|
Problems/mountainbiking/Python3/mountainbiking.py
|
mountainbiking.py
|
py
| 569 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26008064699
|
import json
from pathlib import Path
def get_average_mark_student(student):
overall_mark = 0
for mark in student:
if mark in subjects:
overall_mark += student[mark]
student['average'] = overall_mark / len(subjects)
return student # return student report card with added average mark
# add mark for subject to dictionary of individual subject marks
def get_average_mark_subject(student):
for mark in student:
if mark in subjects:
if mark in subject_marks:
subject_marks[mark] = subject_marks.get(mark) + student[mark]
else:
subject_marks[mark] = student[mark]
# add average mark for subject to dictionary of individual grade marks
def get_average_mark_grade(student):
student_grade = student.get('grade')
if student_grade in grade_marks:
grade_marks[student_grade] = grade_marks.get(
student_grade) + student.get('average')
else:
grade_marks[student_grade] = student.get('average')
files = Path('./students').glob('*')
subjects = ['math', 'science', 'history', 'english', 'geography']
report_cards = []
subject_marks = {}
grade_marks = {}
average_grade = 0
for file in files: # iterate through all files
with open(file, 'r') as f: # open file
data = json.load(f) # load data
# append student report card to list of report cards
report_cards.append(get_average_mark_student(data))
for card in report_cards: # iterate through report cards
# add mark to dictionary of individual subject's marks
get_average_mark_subject(card)
# add student average mark to dictionary of individual grade's marks
get_average_mark_grade(card)
# add student average to the running total of average marks
average_grade += card.get('average')
# find the student card with the lowest average mark
worst_student = min(report_cards, key=lambda card: card['average'])
# find the student card with the highest average mark
best_student = max(report_cards, key=lambda card: card['average'])
print(f'''
Average Student Grade: {(average_grade / len(report_cards)):.2f}
Hardest Subject: {min(subject_marks, key=subject_marks.get)}
Easiest Subject: {max(subject_marks, key=subject_marks.get)}
Best Performing Grade: {max(grade_marks, key=grade_marks.get)}
Worst Performing Grade: {min(grade_marks, key=grade_marks.get)}
Best Student ID: {best_student['id']}
Worst Student ID: {worst_student['id']}
''')
|
1lubo/Student-Performance
|
main.py
|
main.py
|
py
| 2,482 |
python
|
en
|
code
| 0 |
github-code
|
6
|
580231828
|
# ะะฐะฟะธัะฐัั ัะฒะพะน ะธัะตัะฐัะพั(ัะตะฐะปะธะทะพะฒะฐัั ั ะฝะตะณะพ ะธ ะผะตัะพะด __next__ ะธ __iter__),
# ััะพะฑั ะฟัะธ ะพะฑั
ะพะดะต ัะธะบะปะพะผ ะพะฝ ะพัะดะฐะฒะฐะป ัะพะปัะบะพ ัะปะตะผะตะฝัั ะฝะฐ ัะตัะฝัั
ะธะฝะดะตะบัะฐั
, ะฒะพะทะฒะตะดะตะฝะฝัะต ะฒ ะบะฒะฐะดัะฐั.
class MyIterator:
def __init__(self, collection, cursor=-1):
self._collection = collection
self._cursor = cursor
def __iter__(self):
return self
def __next__(self):
if self._cursor >= len(self._collection):
raise StopIteration
if self._cursor % 2 == 0:
res = self._collection[self._cursor] ** 2
self._cursor += 1
return res
self._cursor += 1
collection_1 = MyIterator([1, 2, 3, 4, 5])
for i in collection_1:
if i is None:
continue
else:
print(i)
|
MrDumper/Roma
|
14.2HW.py
|
14.2HW.py
|
py
| 874 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
15757093517
|
from flask import Flask, request, abort
import os
import face_detect as f # face_detect.py
import base64
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage, ImageMessage #ImageMessageใ่ฟฝๅ
)
app = Flask(__name__)
# ็ฐๅขๅคๆฐๅๅพ
YOUR_CHANNEL_ACCESS_TOKEN = os.environ["BWQIpTAFRQyPzoL7/93GUuew5IW72b6qjhv6U3P21bLs5vY6krM/XYN/Cj4trhPU92yoaOB8Ycgfxh2CDNwJcb/X4YMRe8WxIAV3S+NcOLe3Kc9ThWxkCdGBcVjlipsjG58AsQprbT5V6aDWifZFqwdB04t89/1O/w1cDnyilFU="]
YOUR_CHANNEL_SECRET = os.environ["d8df9a5cef0760cad1bc328f942872bd"]
line_bot_api = LineBotApi(YOUR_CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(YOUR_CHANNEL_SECRET)
@app.route("/")
def hello_world():
return "hello world!"
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
# ใใญในใใฎๅ ดๅใฏใชใฆใ ่ฟใ
push_text = event.message.text
line_bot_api.reply_message(event.reply_token, TextSendMessage(text=push_text))
@handler.add(MessageEvent, message=ImageMessage)
def handle_image_message(event):
push_img_id = event.message.id # ๆ็จฟใใใ็ปๅIDใๅๅพ
message_content = line_bot_api.get_message_content(push_img_id) # LINEใตใผใใผไธใซ่ชๅไฟๅญใใใ็ปๅใๅๅพ
push_img = b""
for chunk in message_content.iter_content():
push_img += chunk #็ปๅใiter_contentใงpush_imgใซ้ ๆฌกไปฃๅ
ฅ
push_img = base64.b64encode(push_img) # APIใซ้ใใใbase64ใจใณใณใผใ
msg = f.face_detect(push_img)
line_bot_api.reply_message(event.reply_token, TextSendMessage(text=msg))
if __name__ == "__main__":
# app.run()
port = int(os.getenv("PORT"))
app.run(host="0.0.0.0", port=port)
|
kentamseisyou/myahutest
|
main.py
|
main.py
|
py
| 2,201 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28075967300
|
import numpy as np
import matplotlib
matplotlib.use("Qt5Agg")
print("BACKEND: ", matplotlib.get_backend())
from matplotlib import pyplot as plt
import utility as ut
import network as nt
from tqdm import tqdm as tqdm
import plot as pt
delta_T = 1e-3
# bars
spiking_input = False
dim = 8
n_outputs = 2*dim
n_inputs = dim*dim
r_net = 2 # 0.5
m_k = 1.0/n_outputs
X = ut.generate_bars(10000, dim, dim, p=1.7/8.0)
X = np.reshape(X, (-1, dim*dim))
if spiking_input:
X = X * 70.0 + 20.0
X_spikes = ut.generate_spike_trains(X, 1000, delta_T=delta_T)
else:
X_spikes = ut.generate_constant_trains(X, 1000, delta_T=delta_T)
"""
# visualize spike trains
test_spikes = list(X_spikes)[0]
pt.plot_spiketrain(test_spikes, delta_T, tmax=2)
plt.show()
"""
net = nt.BinaryWTANetwork(n_inputs=n_inputs, n_outputs=n_outputs,
delta_T=delta_T, r_net=r_net, m_k=m_k, eta_v=1e2, eta_b=1e5)
# train
from plot import WeightPCAPlotter, WeightPlotter
pca_plotter = WeightPCAPlotter(X, np.zeros(X.shape[0]), n_outputs, [0, 0], annotations=True)
weights_plotter = WeightPlotter(ut.sigmoid(net._V).reshape((-1, dim, dim)))
from collections import deque
average_length_likelihood = 500
pbar = tqdm(enumerate(X_spikes))
for batch_index, sample_batch in pbar:
# update figure here
log_likelihoods = deque([])
for sample in sample_batch:
net.step(sample)
# log likelihood
Ak = np.sum(np.log(1+np.exp(net._V)), -1)
pi = ut.sigmoid(net._V)
log_likelihoods.append(np.log(1.0/n_outputs) + np.log(np.sum(np.prod(sample * pi + (1-sample) * (1-pi), axis=-1))))
if len(log_likelihoods) > average_length_likelihood:
log_likelihoods.popleft()
weights = ut.sigmoid(net._V)
pca_plotter.update(weights)
weights_plotter.update(weights)
pbar.set_description(f'<sigma(V)> = {np.mean(weights):.4f}, <b> = {np.mean(net._b):.4f}, <L(y)> = {np.mean(log_likelihoods)}')
|
zimmerrol/spiking-bayesian-networks
|
bars_binary.py
|
bars_binary.py
|
py
| 1,960 |
python
|
en
|
code
| 6 |
github-code
|
6
|
13415308592
|
from pickletools import uint8
import time
import numpy as np
from onnxruntime import InferenceSession
import cv2
import numpy as np
# ๅ ่ฝฝONNXๆจกๅ
sess = InferenceSession('output.onnx')
image = cv2.imread('38.jpg')
image=cv2.resize(image,(1024,512))
cv2.normalize(image,image,0,255,cv2.NORM_MINMAX)
#print(image)
image=image.transpose((2,0,1))
img=np.array(image).astype('float32')
# ๅๅค่พๅ
ฅ
inputname=sess.get_inputs()[0].name
outputname=sess.get_outputs()[0].name
# ๆจกๅ้ขๆต
start = time.time()
ort_outs = sess.run(None, input_feed={inputname: img[None, :, :, :]})
pred = ort_outs[0].astype('uint8')
pred = pred.flatten()
print(pred)
#ret,thres = cv2.threshold(array,1,255,cv2.THRESH_BINARY)
cv2.imshow('t',pred)
cv2.waitKey()
end = time.time()
|
Tommy-Bie/Logistics-Package-Separation-Software
|
DatasetUtils/test.py
|
test.py
|
py
| 763 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38029553646
|
# ๅๆฐ/ๆฆ็่งๅพ
from rest_framework import generics, filters, status
from rest_framework.response import Response
from ..models import User, Score, Probability, Question, History, Detail
from ..serializer.results import ScoreSerializer, ProbabilitySerialzer
from ..serializer.history import HistorySerializer, DetailSerializer
class CalcProbability(generics.CreateAPIView):
"""
่ฎก็ฎ5/10ๅนดๆ็็ๆฆ็
"""
serializer_class = ScoreSerializer
def smoking_status(self):
answers = self.request.data['answers']
# print(answers)
if (answers['smoking'] == 1):
smoking = 'NEVER'
else:
if (answers['packYear'] == 3):
smoking = 'HEAVY'
else:
smoking = 'LIGHT'
return smoking
def get_queryset(self):
query_set = Score.objects.filter(smoke=self.smoking_status())
return query_set
def create(self, request, *args, **kwargs):
# ่ฎก็ฎๅๆฐ๏ผ่ฎก็ฎๆฆ็
answers = request.data['answers']
queryset = self.filter_queryset(self.get_queryset())
score = 0
year = 'five'
prob_dict = {}
for query in queryset.iterator():
serializer = ScoreSerializer(query)
for k, v in answers.items():
if (serializer.data['questionid'] == k and serializer.data['choice'] == v):
score += serializer.data['score']
# print(serializer.data)
prob_queryset = Probability.objects.filter(
year=year, smoke=self.smoking_status(), point=score)
prob_serializer = ProbabilitySerialzer(
prob_queryset, many=True)
prob_dict[k] = prob_serializer.data[0]['probability']
prob_queryset = Probability.objects.filter(
year=year, smoke=self.smoking_status(), point=score)
prob_serializer = ProbabilitySerialzer(prob_queryset, many=True)
# ๅญๅจ็ปๆ
userid = request.data['userid']
user = User.objects.get(userid=userid)
history_serializer = HistorySerializer()
history = history_serializer.create(validated_data={
'smoke': self.smoking_status(),
'probability': prob_serializer.data[0]['probability'],
'userid': user
})
# print({
# 'smoke': self.smoking_status(),
# 'probability': prob_serializer.data[0]['probability'],
# 'userid': user
# })
history = HistorySerializer(history)
# print(history.data)
# ๅญๅจanswers
detail_serializer = DetailSerializer()
for k, v in answers.items():
question = Question.objects.get(questionid=k)
item = detail_serializer.create(validated_data={
'pollid_id': history.data['pollid'],
'choice': v,
'questionid': question,
'probability': prob_dict[k]})
return Response({
"smoking": self.smoking_status(),
"probability": prob_dict,
"pollid": history.data['pollid']
})
|
Frank-LSY/LungCancerModel
|
back/lungcancer/polls/view/results.py
|
results.py
|
py
| 3,205 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74286785467
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeTwoLists(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
res=ListNode() #This made a node with 0 val and None .next address
cur=res #now i create a copy of the head
while l1 and l2:
if l1.val<l2.val:
cur.next=l1
l1=l1.next
else:
cur.next=l2
l2=l2.next
cur=cur.next
if l1: #If any list is not empty even now then we add its link only one time
cur.next=l1 #cuz that will add the remaining linked list which got left
if l2:
cur.next=l2
return res.next #We give the .next address so that we can avoid that extra node that we
#added in the start
|
aameen07/Leetcode_Solutions
|
0021-merge-two-sorted-lists/0021-merge-two-sorted-lists.py
|
0021-merge-two-sorted-lists.py
|
py
| 1,118 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20206845666
|
def eelarve(kรผlalised):
rent = 55
summa = kรผlalised * 10 + rent
return summa
tulijad = 0
ma_ei_tea_inimesed = 0
file = open(input("Sisestage failinimi: "), "r")
for line in file:
for char in line:
if char == "+":
tulijad += 1
elif char == "?":
ma_ei_tea_inimesed += 1
kokku_inimesed = tulijad + ma_ei_tea_inimesed
print("Kutsutud on", kokku_inimesed, "inimest")
print(tulijad, "inimest tuleb")
print("Maksimaalne eelarve:", eelarve(kokku_inimesed), "EUR")
print("Minimaalne eelarve:", eelarve(tulijad), "EUR")
|
Ax-SylvesterHommuk/proge_alused
|
Praks 1/7.4 Tรคiendatud peo eelarve.py
|
7.4 Tรคiendatud peo eelarve.py
|
py
| 584 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71484315388
|
N, K = map(int, input().split())
mod = 10**9+7
def inv(x):
return pow(x, mod-2, mod)
def nCk(n, k):
ret = 1
for i in range(k):
ret *= n-i
ret %= mod
ret *= inv(i+1)
ret %= mod
return ret
def nHk(n, k):
return nCk(n+k-1, k-1)
if N <= K:
gs, gl = K % N, N - K % N
K = min(gs, gl)
print(nCk(N, K))
else:
print(nHk(K, N))
|
knuu/competitive-programming
|
atcoder/arc/arc039_b.py
|
arc039_b.py
|
py
| 387 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70267336189
|
import config
from epyk.core.Page import Report
# Create a basic report object
page = Report()
page.ui.text("#This is a text", options={"markdown": True})
page.ui.button("This is a test").click([
page.js.alert("test")
])
page.outs.publish(server="node", app_path=config.OUTPUT_PATHS_LOCALS_TS, module=config.OUT_FILENAME)
|
epykure/epyk-templates
|
web/app_nodejs.py
|
app_nodejs.py
|
py
| 328 |
python
|
en
|
code
| 17 |
github-code
|
6
|
26377974734
|
def partial_sums(*numbs):
numbs = list(numbs)
if numbs == []:
return [0]
default = [0, numbs[0]]
if len(numbs) == 1:
return default
result = []
for i in range(2, len(numbs) + 1):
result.append(sum(numbs[:i]))
result = default + result
return result
|
tatanaratko/python
|
Yandex.Lyceum/partial_sums.py
|
partial_sums.py
|
py
| 309 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3578850080
|
filepath = 'input.txt'
lines = []
with open(filepath) as fp:
line = fp.readline()
while line:
lines.append(line)
line = fp.readline()
print(len(lines))
total = 0
for n in lines:
num = int(n)
num = int(num/3)
num = num - 2
if(int(num/3) -2 > 0):
test = int(num/3) -2
while(test > 0):
total += test
test = int(test/3) - 2
total = total + num
print(total)
|
Sami1309/adventofcode
|
day1.py
|
day1.py
|
py
| 440 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7074661101
|
import pandas as pd
import pandas_datareader as web
import matplotlib.pyplot as plt
import datetime as dt
start = dt.datetime(2021,1,1)
end = dt.datetime.now()
ticker_symbol = input('Enter the stock ticker which you wish to analyse: ')
data = web.DataReader(ticker_symbol, 'yahoo', start, end)
#print(data)
delta = data['Adj Close'].diff(1) #Calculate difference to the day before that
delta.dropna(inplace = True) # Keep the DataFrame with valid entries in the same variable.
positive = delta.copy()
negative = delta.copy()
positive[positive < 0] = 0
negative[negative > 0] = 0
days = 14 # Standard, but can be lowered to increase sensitivity or raised to decrease sensitivity.
average_gain = positive.rolling(window = days).mean()
average_loss = abs(negative.rolling(window = days).mean())
relative_strength = average_gain/average_loss
RSI = 100.0 - (100.0 / (1.0 + relative_strength)) # Formula
combined = pd.DataFrame()
combined['Adj Close'] = data['Adj Close']
combined['RSI'] = RSI
plt.figure(figsize=(12,8))
ax1 = plt.subplot(211) # subplot(nrows, ncols, plot_number) hence nrows=2, ncols=1, plot_number=1
ax1.plot(combined.index, combined['Adj Close'], color = 'lightgray')
ax1.set_title("{} Adjusted Close Price".format(ticker_symbol), color = 'white')
ax1.grid(True, color = "#555555")
ax1.set_axisbelow(True)
ax1.set_facecolor('black')
ax1.figure.set_facecolor('#121212')
ax1.tick_params(axis = 'x', colors = 'white')
ax1.tick_params(axis = 'y', colors = 'white')
# RSI Values of 70 or above indicate an overbought or overvalued condition.
# RSI Values of 30 or below indicates an oversold or undervalued condition.
ax2 = plt.subplot(212, sharex = ax1) # Share same x axis.
ax2.plot(combined.index, combined['RSI'], color = 'lightgray')
ax2.axhline(0, linestyle='--',alpha=0.5, color = '#ff0000')
ax2.axhline(10, linestyle='--',alpha=0.5, color = '#ffaa00')
ax2.axhline(20, linestyle='--',alpha=0.5, color = '#00ff00')
ax2.axhline(30, linestyle='--',alpha=0.5, color = '#cccccc')
ax2.axhline(70, linestyle='--',alpha=0.5, color = '#cccccc')
ax2.axhline(80, linestyle='--',alpha=0.5, color = '#00ff00')
ax2.axhline(90, linestyle='--',alpha=0.5, color = '#ffaa00')
ax2.axhline(100, linestyle='--',alpha=0.5, color = '#ff0000')
ax2.set_title('{} RSI Value'.format(ticker_symbol), color = 'white')
ax2.grid(False)
ax2.set_axisbelow(True)
ax2.set_facecolor('black')
ax2.tick_params(axis = 'x', colors = 'white')
ax2.tick_params(axis = 'y', colors = 'white')
plt.show()
|
amanpanditap/Python_Projects
|
finance_python/technical_stock_analysis/technical_stock_analysis.py
|
technical_stock_analysis.py
|
py
| 2,507 |
python
|
en
|
code
| 3 |
github-code
|
6
|
72231186747
|
from __future__ import print_function, division, unicode_literals
import os
import yaml
from pymatgen.io.vasp.inputs import Kpoints, Incar
from pymatgen.io.vasp.outputs import Vasprun
import twod_materials.utils as utl
from pymatgen.matproj.rest import MPRester
from monty.serialization import loadfn
import twod_materials
PACKAGE_PATH = twod_materials.__file__.replace('__init__.pyc', '')
PACKAGE_PATH = PACKAGE_PATH.replace('__init__.py', '')
PACKAGE_PATH = '/'.join(PACKAGE_PATH.split('/')[:-2])
try:
config_vars = loadfn(os.path.join(os.path.expanduser('~'), 'config.yaml'))
except:
print('WARNING: No config.yaml file was found. please configure the '\
'config.yaml and put it in your home directory.')
# Still set them for testing purposes.
config_vars = loadfn(os.path.join(PACKAGE_PATH, 'config.yaml'))
if 'MP_API' in os.environ: # Also for testing purposes.
MPR = MPRester(os.environ['MP_API'])
else:
MPR = MPRester(config_vars['mp_api'])
VASP = config_vars['normal_binary']
VASP_2D = config_vars['twod_binary']
if 'queue_system' in config_vars:
QUEUE = config_vars['queue_system'].lower()
elif '/ufrc/' in os.getcwd():
QUEUE = 'slurm'
elif '/scratch/' in os.getcwd():
QUEUE = 'pbs'
class Calibrator():
def __init__(self, incar_dict, potcar_dict, n_kpts_per_atom=500,
ncores=1, nprocs=16, pmem='600mb', walltime='6:00:00',
binary='vasp'):
"""
Args:
incar_dict (dict): dictionary of all input parameters
used in the given framework.
potcar_dict (dict): dictionary of all species to be
calibrated and the potcar hashes used in the
given framework, e.g. {'Mo': 'pv', 'S': ''}.
n_kpts_per_atom (int): Create kpoints at specified
density per atom. Defaults to 500.
n_cores, n_procs, pmem, walltime, binary: runjob
parameters. Defaults established for a regular
sized job on hipergator.
"""
self._incar_dict = incar_dict
self._n_kpts_per_atom = n_kpts_per_atom
self._potcar_dict = potcar_dict
self._ncores = ncores
self._nprocs = nprocs
self._pmem = pmem
self._walltime = walltime
self._binary = binary
self._config = loadfn('/home/mashton/cal_config.yaml')
def prepare(self, submit=False):
"""
Set up calculation directories to calibrate
the ion corrections to match a specified framework of INCAR
parameters, k-points, and potcar hashes.
Args:
submit (bool): whether or not to submit each job
after preparing it.
"""
for elt in self._potcar_dict:
# Set up reference directory for the pure element.
if not os.path.isdir(elt):
os.mkdir(elt)
os.chdir(elt)
# Poscar
s = MPR.get_structure_by_material_id(
self._config['Mpids'][elt]['self']
)
s.to('POSCAR', 'POSCAR')
plines = open('POSCAR').readlines()
elements = plines[5].split()
# Kpoints
kp = Kpoints.automatic_density(s, self._n_kpts_per_atom)
kp.write_file('KPOINTS')
# Incar
incar = Incar.from_dict(self._incar_dict)
incar.write_file('INCAR')
# Potcar
utl.write_potcar(types=[self._potcar_dict[el] for el in elements])
# Runjob
if QUEUE == 'pbs':
utl.write_pbs_runjob('{}_cal'.format(elt), self._ncores,
self._nprocs, self._pmem, self._walltime,
self._binary)
submission_command = 'qsub runjob'
elif QUEUE == 'slurm':
utl.write_slurm_runjob('{}_cal'.format(elt), self._nprocs,
self._pmem, self._walltime,
self._binary)
submission_command = 'sbatch runjob'
if submit:
os.system(submission_command)
# Set up reference oxide compound subdirectory.
if elt not in ['O', 'S', 'F', 'Cl', 'Br', 'I']:
if not os.path.isdir('ref'):
os.mkdir('ref')
os.chdir('ref')
# Poscar
s = MPR.get_structure_by_material_id(
self._config['Mpids'][elt]['ref']
)
s.to('POSCAR', 'POSCAR')
plines = open('POSCAR').readlines()
elements = plines[5].split()
# Kpoints
kp = Kpoints.automatic_density(s, self._n_kpts_per_atom)
kp.write_file('KPOINTS')
# Incar
incar = Incar.from_dict(self._incar_dict)
incar.write_file('INCAR')
# Potcar
utl.write_potcar(
types=[self._potcar_dict[el] for el in elements])
# Runjob
if QUEUE == 'slurm':
utl.write_pbs_runjob('{}_cal'.format(elt), self._ncores,
self._nprocs, self._pmem,
self._walltime, self._binary)
submission_command = 'qsub runjob'
elif QUEUE == 'pbs':
utl.write_slurm_runjob('{}_cal'.format(elt), self._nprocs,
self._pmem, self._walltime,
self._binary)
submission_command = 'sbatch runjob'
if submit:
os.system(submission_command)
os.chdir('../')
os.chdir('../')
def get_corrections(self, parent_dir=os.getcwd(), write_yaml=False,
oxide_corr=0.708):
"""
Pulls the corrections to be added for each element.
Args:
parent_dir (str): path to parent directory containing
subdirectories created by prepare(). Defaults to cwd.
write_yaml (bool): whether or not to write the
corrections to ion_corrections.yaml and the mu0
values to end_members.yaml.
oxide_corr (float): additional correction added for oxygen
to get water's formation energy right.
Returns:
dict. elements as keys and their corrections as values,
in eV per atom, e.g. {'Mo': 0.135, 'S': -0.664}.
"""
mu0 = dict()
corrections = dict()
os.chdir(parent_dir)
special_cases = ['O', 'S', 'F', 'Cl', 'Br', 'I']
elts = [elt for elt in self._potcar_dict if elt not in special_cases]
# Add entropic correction for special elements (S * 298K)
specials = [elt for elt in self._potcar_dict if elt in special_cases]
for elt in specials:
os.chdir(elt)
vasprun = Vasprun('vasprun.xml')
composition = vasprun.final_structure.composition
n_formula_units = composition.get_integer_formula_and_factor()[1]
mu0[elt] = (
round(vasprun.final_energy / n_formula_units
+ self._config['OtherCorrections'][elt], 3)
)
os.chdir(parent_dir)
# Oxide correction from Materials Project
mu0['O'] += oxide_corr
for elt in elts:
os.chdir(elt)
vasprun = Vasprun('vasprun.xml')
composition = vasprun.final_structure.composition
n_formula_units = composition.get_integer_formula_and_factor()[1]
mu0[elt] = round(vasprun.final_energy / n_formula_units, 3)
# Nitrogen needs both kinds of corrections
if elt == 'N':
mu0[elt] -= 0.296
os.chdir(parent_dir)
for elt in elts:
os.chdir('{}/ref'.format(elt))
vasprun = Vasprun('vasprun.xml')
composition = vasprun.final_structure.composition
n_formula_units = composition.get_integer_formula_and_factor()[1]
fH_exp = self._config['Experimental_fH'][elt]
try:
fH_dft = vasprun.final_energy / n_formula_units
plines = open('POSCAR').readlines()
elements = plines[5].split()
stoichiometries = plines[6].split()
comp_as_dict = {}
for element in elements:
comp_as_dict[element] = 0
for i, element in enumerate(elements):
comp_as_dict[element] += int(stoichiometries[i])
n_elt_per_fu = (
int(comp_as_dict[elt]) / n_formula_units
)
for el in comp_as_dict:
fH_dft -= (
mu0[el] * int(comp_as_dict[el])
/ n_formula_units
)
corrections[elt] = round((fH_dft - fH_exp) / n_elt_per_fu, 3)
except UnboundLocalError:
corrections[elt] = 'Not finished'
os.chdir(parent_dir)
if write_yaml:
with open('ion_corrections.yaml', 'w') as icy:
icy.write(yaml.dump(corrections, default_flow_style=False))
with open('end_members.yaml', 'w') as emy:
emy.write(yaml.dump(mu0, default_flow_style=False))
return corrections
|
ashtonmv/twod_materials
|
twod_materials/pourbaix/startup.py
|
startup.py
|
py
| 9,663 |
python
|
en
|
code
| 18 |
github-code
|
6
|
23978857817
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def compute_dct_coeffs(blockSize):
T = np.zeros((blockSize, blockSize))
T[0, :] = np.sqrt(1.0/blockSize)
for i in range(1, blockSize):
for j in range(blockSize):
T[i][j] = np.sqrt(2.0/blockSize)*np.cos(np.pi*(2.0*j+1.0)*i/(2.0*blockSize))
return T
def viewing_dct_matrix(dct_matrix, out_dir):
fig, ax = plt.subplots()
ax.matshow(dct_matrix, cmap='viridis')
for (i, j), z in np.ndenumerate(dct_matrix):
if z < -0.35: # for better visualization when the colour is dark
ax.text(j, i, np.round(z,2), ha='center', va='center', color='white')
else:
ax.text(j, i, np.round(z,2), ha='center', va='center', color='black')
plt.title("The 64 DCT coefficients")
plt.savefig(out_dir+"dct_matrix.png")
def viewing_dct_for_a_random_selected_block(yDCT, crDCT, cbDCT, h_luma, w_luma, h_chroma, w_chroma, blockSize, out_dir):
xlabels=[0,1,2,3,4,5,6,7]
ylabels=[0,1,2,3,4,5,6,7]
nbh_luma = np.ceil(h_luma / blockSize)
nbw_luma = np.ceil(w_luma / blockSize)
i = np.random.randint(0, nbh_luma-1)
j = np.random.randint(0, nbw_luma-1)
row_ind_1 = i*blockSize
row_ind_2 = row_ind_1+blockSize
col_ind_1 = j*blockSize
col_ind_2 = col_ind_1+blockSize
fig=plt.figure(figsize=(15,7.5))
fig.suptitle("DCT for randoms selected Y,Cb,Cr blocks")
ax = fig.add_subplot(2,3,1)
plt.title('yDCT')
plt.imshow(yDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2],cmap='jet')
plt.colorbar(shrink=1)
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
ax = fig.add_subplot(234, projection='3d')
x, y = np.meshgrid(np.arange(blockSize), np.arange(blockSize)) # creating a 3D grid from the size of one block
ax.plot_surface(x, y, yDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2], cmap='jet') # drawing of the 3D-surface
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('yDCT Coefficient')
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
nbh_chroma = np.ceil(h_chroma / blockSize)
nbw_chroma = np.ceil(w_chroma / blockSize)
i = np.random.randint(0, nbh_chroma-1)
j = np.random.randint(0, nbw_chroma-1)
row_ind_1 = i*blockSize
row_ind_2 = row_ind_1+blockSize
col_ind_1 = j*blockSize
col_ind_2 = col_ind_1+blockSize
ax = fig.add_subplot(2,3,2)
plt.title('cbDCT')
plt.imshow(cbDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2],cmap='jet')
plt.colorbar(shrink=1)
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
ax = fig.add_subplot(2,3,3)
plt.title('crDCT')
plt.imshow(crDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2],cmap='jet')
plt.colorbar(shrink=1)
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
ax = fig.add_subplot(235, projection='3d')
ax.plot_surface(x, y, cbDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2], cmap='jet')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('cbDCT Coefficient')
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
ax = fig.add_subplot(236, projection='3d')
ax.plot_surface(x, y, crDCT[row_ind_1:row_ind_2, col_ind_1:col_ind_2], cmap='jet')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('crDCT Coefficient')
ax.set_xticks(xlabels, xlabels)
ax.set_yticks(ylabels, ylabels)
plt.savefig(out_dir+"dct_for_a_random_selected_block.png")
|
vince-robin/Image-compression
|
soft/functions/dct.py
|
dct.py
|
py
| 3,672 |
python
|
en
|
code
| 2 |
github-code
|
6
|
39839888743
|
#!/usr/bin/python3
"""
script that fetches https://alx-intranet.hbtn.io/status
"""
import urllib.request
with urllib.request.urlopen("https://alx-intranet.hbtn.io/status") as response:
data = response.read()
decoded = str(data.decode("utf-8"))
t = type(data)
p1 = "Body response:\n\t- type:"
p2 = "- content:"
p3 = "- utf8 content:"
print("{0} {1}\n\t{2} {3}\n\t{4} {5}".format(p1, t, p2, data, p3, decoded))
|
George-9/alx-higher_level_programming
|
0x11-python-network_1/0-hbtn_status.py
|
0-hbtn_status.py
|
py
| 446 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18886739040
|
import re
import ast
from tkinter import Tk, Button, Text, Scrollbar, END
from pathlib import Path
from retroperm.project import RetropermProject
from retroperm.rules import Rule
from retroperm.rules.filesystem_rule import FilesystemRule
from retroperm.rules.ban_library_function_rule import BanLibraryFunctionRule
from retroperm.rules.ban_category_rule import BanCategoryRule
# TEST_BINARIES = Path("test_binaries")
TEST_BINARIES = Path(__file__).parent.parent / "tests" / "executables"
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class VstTester:
def __init__(self):
self.retro_proj_clean = RetropermProject(TEST_BINARIES / "GoodArpeggiator.so.o")
self.retro_proj_mal = RetropermProject(TEST_BINARIES / "BadArpeggiator.so.o")
def iterprint(self, header: str, payload: dict):
result = bcolors.HEADER + header + bcolors.ENDC + "\n"
for key, v in payload.items():
if v.startswith("Failed"):
result += f'{bcolors.WARNING}{key}: {v}{bcolors.ENDC}\n'
else:
result += f'{bcolors.OKGREEN}{key}: {v}{bcolors.ENDC}\n'
return result
def eval_flow(self, proj: RetropermProject, header: str):
ban_filesystem = BanCategoryRule('filesystem')
ban_network = BanCategoryRule('network')
my_rule_good = FilesystemRule("/home/mahaloz/.global.bsconf", 'filename', is_whitelist=True, is_dir=False)
my_rule_bad = FilesystemRule("/etc/passwd", 'filename', is_whitelist=False, is_dir=False)
rule_list = [ban_filesystem, ban_network, my_rule_good, my_rule_bad]
proj.init_rules(rule_list, override_default=True)
output = proj.validate_rules()
result = self.iterprint(header, output)
result += "\n"
if output[ban_filesystem].startswith("Failed"):
resolved_data = self.retro_proj_mal.resolve_abusable_functions()
rfo = resolved_data['resolved_function_data']
match_list = ast.literal_eval(re.findall(r'\[.*\]', output[my_rule_bad])[0])
for match in match_list:
if match not in rfo:
continue
match_rfo = rfo[match]
vals = list(match_rfo.args_by_location.values())
result += f'{bcolors.OKCYAN}{str(vals)}{bcolors.ENDC}\n'
return result
def run_test(self):
resolved_data_clean = self.retro_proj_clean.resolve_abusable_functions()
resolved_data_mal = self.retro_proj_mal.resolve_abusable_functions()
results = []
results.append(self.eval_flow(self.retro_proj_clean, '`CleanVST` Rule Validation'))
results.append(self.eval_flow(self.retro_proj_mal, '`MalVST` Rule Validation'))
return "\n".join(results)
def run_test():
tester = VstTester()
result_text.delete(1.0, END)
result_text.insert(END, tester.run_test())
root = Tk()
root.title("VST Tester")
test_button = Button(root, text="Run Test", command=run_test)
test_button.pack()
result_text = Text(root, wrap="word", bg="white", fg="black")
result_text.pack(expand=True, fill="both")
scrollbar = Scrollbar(root, command=result_text.yview)
scrollbar.pack(side="right", fill="y")
result_text.config(yscrollcommand=scrollbar.set)
root.geometry("800x600")
root.mainloop()
|
SpiritSeal/retroperm
|
ui/gui2.py
|
gui2.py
|
py
| 3,484 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74918964666
|
import re
from collections import defaultdict
XMIN = -2
def find(rules,current):
if len(current) < 5:
return ""
if current in rules:
return rules[current]
elif len(current) == 5:
return "."
else:
size = len(current)
left=find(rules,current[0:size-1])
right=find(rules,current[size-5:])
rules[current] = left+right
return rules[current]
def read_file(file):
rules = defaultdict(lambda: ".")
rule_prog = re.compile("([.#]+) => ([.#])")
with open(file) as f:
lines = f.readlines()
state = lines[0].split(": ")[1].strip()
for line in lines[2:]:
m = rule_prog.match(line.strip())
rules[m.group(1)] = m.group(2)
return state,rules
def print_state(state):
print(state)
def sum_pots(state):
n = 0
for i,c in enumerate(state):
if c == "#":
n += i + XMIN
return n
def day12(file):
global XMIN
state,rules = read_file(file)
XMAX = len(state)+1
state = "..%s.." % state
sums = list()
i = 0
while len(sums) < 3 or sums[-1]-sums[-2] != sums[-2]-sums[-3]:
state = find(rules,"..%s.." % state)
if state[0] == "." and state[1] == "." and state[2] == "." and state[3] == ".":
state = state[2:]
XMIN += 2
if state[0] == "#" or state[1] == "#":
state = "..%s" % state
XMIN -= 2
if state[-1] == "#" or state[-2] == "#":
state = "%s.." % state
sums.append(sum_pots(state))
i += 1
diff = sums[-1]-sums[-2]
missing = 50000000000 - i
n = missing*diff + sums[-1]
return n
|
aarroyoc/advent-of-code-2018
|
python/day12/day12_2.py
|
day12_2.py
|
py
| 1,670 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8747023453
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 13:34:12 2019
@author: ADMIN
"""
import AllFunctions as af
import pandas as pd
import numpy as np
import pandas_profiling
#import H2OHandler as hh
df=pd.read_csv('train.csv')
orgCC = df['cc_cons'].copy()
df['isTrain']=True
df2=pd.read_csv('test.csv')
df2['isTrain']=False
df2['cc_cons']=-1
fillCountMinPer = 50
idCols=['id']
distCatPer=2
onehotColumns=[]
pred_variable_type='regression'
target_variable = 'cc_cons'
TrainCleanVars={}
TrainCleanVars['dropCols']=[]
# Account Desc
dfDescT=af.getDFDesc(df)
dfDescT2=af.getDFDesc(df2)
df3=pd.concat([df,df2],ignore_index=True)
dfDescT3=af.getDFDesc(df3)
df=df3.reset_index(drop=True)
dfDescT=af.getDFDesc(df)
#profile = df.profile_report(title='Pandas Profiling Report')
#profile.to_file(output_file="output.html")
#rejected_variables = profile.get_rejected_variables(threshold=0.9)
#age has some unusual values like 224 which are quite invalid hence we will trim all such values to 75
df.loc[df['age'] > 75, 'age'] = 75
#Many amount columns are skewed lets take log and profile the results
cols=['card_lim', 'cc_cons_apr',
'cc_cons_jun', 'cc_cons_may', 'cc_count_apr', 'cc_count_jun',
'cc_count_may', 'credit_amount_apr', 'credit_amount_jun',
'credit_amount_may', 'credit_count_apr', 'credit_count_jun',
'credit_count_may', 'dc_cons_apr', 'dc_cons_jun', 'dc_cons_may',
'dc_count_apr', 'dc_count_jun', 'dc_count_may', 'debit_amount_apr',
'debit_amount_jun', 'debit_amount_may', 'debit_count_apr',
'debit_count_jun', 'debit_count_may', 'emi_active',
'max_credit_amount_apr', 'max_credit_amount_jun',
'max_credit_amount_may']
#for col in cols:
# df[col]=np.log(df[col]+1)
#profile = df.profile_report(title='Pandas Profiling Report after amount log')
#profile.to_file(output_file="output_log.html")
"""import matplotlib.pyplot as plt
plt.matshow(df.corr())
plt.show()
f = plt.figure(figsize=(19, 15))
plt.matshow(df.corr(), fignum=f.number)
plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=45)
plt.yticks(range(df.shape[1]), df.columns, fontsize=14)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
plt.title('Correlation Matrix', fontsize=16)
f.savefig('CorrMatrix.png')
"""
"""columns=['personal_loan_active','personal_loan_closed','vehicle_loan_active', 'vehicle_loan_closed','investment_1', 'investment_2', 'investment_3', 'investment_4']
df[columns]=df[columns].fillna(0)
df['loan_enq']=df['loan_enq'].fillna('N')
dfDescT=af.getDFDesc(df)"""
TrainCleanVars['dropCols'].extend(idCols)
df.drop(columns=idCols,inplace=True)
print("Dropping cols as declared as id cols in config : ",idCols)
#Missing Value Imputation
# Now here many columns have missing values especially debit ones related, we have to fill them using data dictionary
df['cc_cons_highest'] = df[['cc_cons_apr','cc_cons_may','cc_cons_jun']].max(axis=1)
df['cc_cons_lowest'] = df[['cc_cons_apr','cc_cons_may','cc_cons_jun']].min(axis=1)
df['cc_cons_total'] = df[['cc_cons_apr','cc_cons_may','cc_cons_jun']].sum(axis=1)
df['cc_cons_average'] = df[['cc_cons_apr','cc_cons_may','cc_cons_jun']].mean(axis=1)
df['cc_cons_trans_avg']=df['cc_cons_total']/df[['cc_count_apr','cc_count_may','cc_count_jun']].sum(axis=1)
df['cc_cons_high_low_range']=df['cc_cons_highest']-df['cc_cons_lowest']
df['cc_cons_limit_crossed']=df['cc_cons_highest']>df['card_lim']
df['cc_cons_total_lim_ratio']=(df['cc_cons_total']/3)/df['card_lim']
"""df['dc_cons_highest'] = df[['dc_cons_apr','dc_cons_may','dc_cons_jun']].max(axis=1)
df['dc_cons_lowest'] = df[['dc_cons_apr','dc_cons_may','dc_cons_jun']].min(axis=1)
df['dc_cons_total'] = df[['dc_cons_apr','dc_cons_may','dc_cons_jun']].sum(axis=1)
df['dc_cons_average'] = df[['dc_cons_apr','dc_cons_may','dc_cons_jun']].mean(axis=1)
df['dc_cons_trans_avg']=df['dc_cons_total']/df[['dc_count_apr','dc_count_may','dc_count_jun']].sum(axis=1)
df['dc_cons_high_low_range']=df['dc_cons_highest']-df['dc_cons_lowest']
df['debit_amount_highest'] = df[['debit_amount_apr','debit_amount_may','debit_amount_jun']].max(axis=1)
df['debit_amount_lowest'] = df[['debit_amount_apr','debit_amount_may','debit_amount_jun']].min(axis=1)
df['debit_amount_total'] = df[['debit_amount_apr','debit_amount_may','debit_amount_jun']].sum(axis=1)
df['debit_amount_average'] = df[['debit_amount_apr','debit_amount_may','debit_amount_jun']].mean(axis=1)
df['debit_amount_trans_avg']=df['debit_amount_total']/df[['dc_count_apr','dc_count_may','dc_count_jun']].sum(axis=1)
df['debit_amount_high_low_range']=df['debit_amount_highest']-df['debit_amount_lowest']
df['credit_amount_highest'] = df[['credit_amount_apr','credit_amount_may','credit_amount_jun']].max(axis=1)
df['credit_amount_lowest'] = df[['credit_amount_apr','credit_amount_may','credit_amount_jun']].min(axis=1)
df['credit_amount_total'] = df[['credit_amount_apr','credit_amount_may','credit_amount_jun']].sum(axis=1)
df['credit_amount_average'] = df[['credit_amount_apr','credit_amount_may','credit_amount_jun']].mean(axis=1)
df['credit_amount_trans_avg']=df['credit_amount_total']/df[['dc_count_apr','dc_count_may','dc_count_jun']].sum(axis=1)
df['credit_amount_high_low_range']=df['credit_amount_highest']-df['credit_amount_lowest']
df['max_credit_amount_highest'] = df[['max_credit_amount_apr','max_credit_amount_may','max_credit_amount_jun']].max(axis=1)
df['max_credit_amount_lowest'] = df[['max_credit_amount_apr','max_credit_amount_may','max_credit_amount_jun']].min(axis=1)
df['max_credit_amount_total'] = df[['max_credit_amount_apr','max_credit_amount_may','max_credit_amount_jun']].sum(axis=1)
df['max_credit_amount_average'] = df[['max_credit_amount_apr','max_credit_amount_may','max_credit_amount_jun']].mean(axis=1)
df['max_credit_amount_trans_avg']=df['max_credit_amount_total']/df[['dc_count_apr','dc_count_may','dc_count_jun']].sum(axis=1)
df['max_credit_amount_high_low_range']=df['max_credit_amount_highest']-df['max_credit_amount_lowest']
df['cc_dc_cons_ratio'] = df['cc_cons_total'] / df['dc_cons_total']
df['credit_debit_ratio'] = df['credit_amount_total'] / df['debit_amount_total']
df['dc_count_total']=df[['dc_count_apr','dc_count_may','dc_count_jun']].sum(axis=1)
df['cc_count_total']=df[['cc_count_apr','cc_count_may','cc_count_jun']].sum(axis=1)
df['cc_dc_count_ratio']=df['cc_count_total']/df['cc_count_total']"""
df=df.replace([np.inf, -np.inf], np.nan)
dfDescT=af.getDFDesc(df)
#lets drop cols which are less than minimum fillcount for now. We can later revisit them if required
dropFlag=dfDescT[(dfDescT['fillCount']<fillCountMinPer) | (dfDescT['unique']==1) | (dfDescT['std']==0)]
dropCols=list(dropFlag.index)
TrainCleanVars['dropCols'].extend(dropCols)
print("Dropping cols as unique count less or fillcount less or std is zero : ",dropCols)
df.drop(columns=dropCols,inplace=True)
df.to_csv('AfterFeature.csv',index=False)
"""plt.subplot(1, 2, 1)
plt.scatter(df['cc_cons_highest'],df['card_lim'],c="b")
plt.xlabel("highest spend")
plt.ylabel("card_lim")
plt.subplot(1, 2, 2)
plt.scatter(df['cc_cons_lowest'],df['card_lim'],c="r")
plt.xlabel("lowest spend")
plt.ylabel("card_lim")
plt.show()
div_val=10000
sc=plt.scatter(df['cc_cons_lowest']/div_val,df['cc_cons_highest']/div_val,c=df['card_lim']/div_val)
plt.colorbar(sc)
plt.xlabel("lowest spend")
plt.ylabel("highest spend")
plt.show()
plt.scatter(df['cc_cons_highest']/div_val,df['card_lim']/div_val,c=df['cc_cons_limit_crossed'])
plt.xlabel("highest spend")
plt.ylabel("card_lim")
plt.hist(df['card_lim'].dropna())
plt.show()
plt.hist(np.log(df.loc[df['isTrain']==True,'cc_cons']+1))
plt.show()
"""
#df.loc[df['isTrain']==True,'cc_cons']=np.log(df.loc[df['isTrain']==True,'cc_cons']+1)
dfDescT=af.getDFDesc(df)
catFlag=dfDescT[(dfDescT['distCount']<=distCatPer)]
catCols=list(catFlag.index)
df=af.categorizeCols(df,catCols)
catCols=list(set(catCols)-set(onehotColumns))
df=af.LabelEncodeCols(df.copy(),catCols,onehotColumns)
zeroOneCols=df.apply(lambda x: af.ChkZeroOne(x))
standarizeCols=list(zeroOneCols[zeroOneCols==False].index)
#standarizeCols.remove(target_variable)
"""profile = df.profile_report(title='Pandas Profiling Report')
profile.to_file(output_file="outputFeature.html")
rejected_variables = profile.get_rejected_variables(threshold=0.9)
df.drop(columns=rejected_variables,inplace=True)
standarizeCols = list(set(standarizeCols) - set(rejected_variables))
"""
X=df
X_trainVal=X[X['isTrain']==True]
X_test=X[X['isTrain']==False]
X_trainVal.reset_index(inplace=True,drop=True)
X_test.reset_index(inplace=True,drop=True)
X_trainVal.drop(columns=['isTrain'],inplace=True)
X_test.drop(columns=['isTrain'],inplace=True)
X_trainVal,misDict=af.missing_value(X_trainVal)
X_test,_=af.missing_value(X_test,misDict=misDict)
outlierlist=af.getOutliers(X_trainVal)
y_pred_outliers=np.array(outlierlist[0][1])
df_outliers=X_trainVal[y_pred_outliers==1]
dfDescT=af.getDFDesc(df_outliers)
X_trainVal=X_trainVal[y_pred_outliers==0]
dfDescT2=af.getDFDesc(X_trainVal)
X_trainVal,scaler=af.normalize(X_trainVal,standarizeCols)
#standarizeCols.remove(target_variable)
X_test=af.normalize(X_test,standarizeCols,scaler)
X_test.drop(columns=[target_variable],inplace=True)
dfDesc=X_test.describe(include='all')
dfDescT=dfDesc.T
trainVal_frame=X_trainVal
x_cols=list(X_trainVal.columns)
y_col=target_variable
import H2OHandler as hh
print("Start H2O model training")
res,PredDF,predtrain=hh.GetBestH2OModel(trainVal_frame,x_cols,y_col,pred_variable_type == "categorical",X_test)
TrainCleanVars['H2OBestModel']=res.leader
X_test[target_variable]=PredDF['predict']
X_test[standarizeCols]=scaler.inverse_transform(X_test[standarizeCols])
ts=af.GetTimeStamp()
af.PickleWrite(TrainCleanVars,"TrainCleanVars"+str(ts)+".pkl")
X_test[X_test < 0]=0 #Need to fix this
X_test['id']=df2['id']
final_sub=X_test[['id',target_variable]]
final_sub.to_csv('samplesubmission'+str(ts)+'.csv',index=False)
lb=res.leaderboard
lbres=lb[:5,"model_id"]
import h2o
m = h2o.get_model(lb[0,"model_id"])
varimpres=m.varimp(use_pandas=True)
trainVal_frameCopy=trainVal_frame.copy()
trainVal_frameCopy.reset_index(inplace=True,drop=True)
trainVal_frameCopy['cc_cons']=predtrain
trainVal_frameCopy[standarizeCols]=scaler.inverse_transform(trainVal_frameCopy[standarizeCols])
trainVal_frameCopy[trainVal_frameCopy < 0]=0
orgCC=orgCC[y_pred_outliers==0]
trainVal_frameCopy['cc_cons_org']=orgCC
trainVal_frameCopy['diff']=trainVal_frameCopy['cc_cons_org']-trainVal_frameCopy['cc_cons']
trainCompare=trainVal_frameCopy[['cc_cons_org','cc_cons','diff']]
from sklearn.metrics import mean_squared_log_error
rmsle=np.sqrt(mean_squared_log_error(orgCC, trainVal_frameCopy['cc_cons']))
print(rmsle)
|
kinjaldand/MLProjects
|
CreditCardConsumptionPatternAMEX/InitialExplore.py
|
InitialExplore.py
|
py
| 10,737 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39907609427
|
"""
============================
Project: python_class
Author:ๆ ๆชฌ็ญ-Tricy
Time:2021/8/14 19:19
E-mail:[email protected]
Company:ๆนๅ้ถๆชฌไฟกๆฏๆๆฏๆ้ๅ
ฌๅธ
Site: http://www.lemonban.com
Forum: http://testingpai.com
============================
"""
'''
ๅญๅ
ธ๏ผ-- dict -{} --้่ฆ
1ใๅ
็ด ๏ผ ๅคไธช้ฎๅผๅฏน key ๏ผ value
2ใไฝฟ็จๅบๆฏ๏ผไปไนๆถๅ้่ฆไฝฟ็จๅญๅ
ธไฟๅญๆฐๆฎ๏ผ -- ๅฑๆงๅๅญ-ๅฑๆงๅผ == ๆนไพฟๅๅผ
ไบบ็ไฟกๆฏ๏ผๅๅญ๏ผๅนด้พ๏ผ่บซ้ซ ไฝ้ ็ฑๅฅฝ ็ต่ฏๅท็
3ใkey ไธๅฏไปฅ่ขซๆนๅ็๏ผๅ่กจ๏ผๅญๅ
ธ--ๅญ็ฌฆไธฒๅไธบkey๏ผ๏ผไธ่ฝ้ๅค็ ๏ผๅฏไธ็๏ผ;value ๅฏไปฅๆฏไปปไฝๆฐๆฎ็ฑปๅ๏ผๅฏไปฅๅๅ็๏ผ
4ใๅญๅ
ธ็ๅๅผ: ๅญๅ
ธๆ ๅบ็--ๆฒกๆ็ดขๅผ็ == ้่ฟkey ๅvalue
5ใๅญๅ
ธ้็valueๅฏไปฅๆนๅ-- ๅขๅ ๅ ้ค ไฟฎๆน
6ใ้ฟๅบฆ --
'''
dict_info = {"name":"Rella","age":"18","height":"165cm","weight":"45kg"}
# ๅขๅ ใไฟฎๆน
dict_info["hobby"] = "sing" #่ตๅผๆฐ็้ฎๅผๅฏน --keyไธๅญๅจ๏ผๆฐๅข
dict_info["age"] = "19" # ่ตๅผๆฐ็้ฎๅผๅฏน--keyๅญๅจ ๏ผไฟฎๆน
# print(dict_info)
# ๅขๅ ๅคไธช -- ๅญๅ
ธๅๅนถ
dict_info.update({"city":"ๅไบฌ","gender":"female"}) # ไธๆฌกๆงๆทปๅ ๅคไธช้ฎๅผๅฏน
# print(dict_info)
# ๅ ้ค
dict_info.pop("weight") # ๆ ๅบ --ๆๅไธไธช๏ผ๏ผ== ๅช่ฝๆๅฎkeyๅ ้ค
# print(dict_info)
# print(dict_info["name"]) # ้่ฟkeyๅๅผvalue
# print(dict_info.get("name")) # ้่ฟkeyๅๅผvalue
# print(dict_info.keys())
# print(dict_info.values())
# print(dict_info.items())
# print(len(dict_info))
'''
้ๅ-- set {} --ๆฉๅฑๅ
ๅฎน
1ใๅ
็ด ๆฏไธ่ฝ้ๅค
2ใๆ ๅบ็
3ใไฝฟ็จๅบๆฏ -- ๅฏไปฅ็ปๅ่กจ ๅป้
'''
# list1 = [11,22,33,44,44,77,77,11]
# print(list1)
# # ไธ้ๅคๅ
็ด ็ไธชๆฐ -- ๆๅ่กจ่ฝฌๅไธบ้ๅ -่ชๅจๅป้
# set1 = set(list1) # ๆๅ่กจ่ฝฌๅไธบ้ๅ --set() --ๅ
็ฝฎๅฝๆฐ
# print(set1)
# list2 = list(set1)
# print(list2)
'''
Pythonๆงๅถๆต๏ผifๅคๆญ๏ผ forๅพช็ฏ
ifๅคๆญ --ๅๆฏ
่ฏญๆณ๏ผ
1ใelifๅฏไปฅๆฒกๆ๏ผๅฏไปฅๆๅคไธช
2ใๆกไปถไธๅฎๆฏๆ็ซ็--ๆไผ่ฟๅ
ฅๅๆฏ -- True
3ใๅๅท-- ็ถๅญๅ
ณ็ณป --ๅญไปฃ็ == ๅไธช็ฉบๆ ผ็ผฉ่ฟ๏ผtab้ฎ๏ผ
if ๆกไปถ๏ผ่บซ้ซ180๏ผ๏ผ-- ๆกไปถๆ็ซ
ๅๆฏ-ๆง่ก่ฏญๅฅ๏ผ็ทๆๅ๏ผ
elif ๆกไปถ๏ผๅธ
๏ผ๏ผ
ๅๆฏ-ๆง่ก่ฏญๅฅ๏ผ่ๅฏ๏ผ
elif ๆกไปถ๏ผ้ฑ๏ผ๏ผ
ๅๆฏ-ๆง่ก่ฏญๅฅ๏ผ่ๅฏ๏ผ
elif ๆกไปถ๏ผๆๅ๏ผ๏ผ
ๅๆฏ-ๆง่ก่ฏญๅฅ๏ผ่ๅฏ๏ผ
else:--- ๆฒกๆๆกไปถ๏ผ
ๅ่บซๆฑช
'''
# money = int(input("่ฏท่พๅ
ฅไฝ ็่ดขไบง้้ข๏ผ")) # input่พๅ
ฅๅ
ๅฎน-ๅญ็ฌฆไธฒ
# if money >= 200: # True
# print("ไนฐๅคงๆฟๅญ๏ผ")
# elif money >=100:
# print("ไป้ฆไป")
# elif money >= 50:
# print("ไนฐ่ฝฆ")
# elif money >=10:
# print("ๅๅฅฝ็๏ผๆ
ๆธธ๏ผ")
# else:
# print("ไนไนๅญฆไน ๏ผๅฅฝๅฅฝๅทฅไฝ๏ผ")
'''
forๅพช็ฏ๏ผ้ๅๆๆ็ๅ
็ด -- ๅญ็ฌฆไธฒ๏ผๅ่กจ ๅ
็ป ๅญๅ
ธ
ๅๅท๏ผ ็ผฉ่ฟ--ๅญไปฃ็ == ๅพช็ฏไฝ
1ใๅพช็ฏๆฌกๆฐ-- ็ฑ่ฐๆฅๅณๅฎ๏ผ==ๅ
็ด ไธชๆฐ๏ผ้ฟๅบฆ๏ผ
2ใdebug --่ฐ่ฏ
1๏ผ ๆๆญ็น -- ่ฐ่ฏไผ่ฟ่กๅผๅงๆง่ก
2๏ผ ็นๅปdbeug ๆ้ฎ
3๏ผ ๅๆญฅๆง่ก
3ใ่ทณๅบๅพช็ฏ
1) break: ๆปก่ถณๆกไปถ็ๆถๅ่ทณๅบๆดไธชๅพช็ฏ --ๅ้ข็ๆๆ็ๅพช็ฏ้ฝไธไผๆง่ก
2)continue๏ผๆปก่ถณๆกไปถ็ๆถๅ่ทณๅบๅฝๆฌกๅพช็ฏ --ๅ้ข็ๆๆ็ๅพช็ฏ็ปง็ปญๆง่ก
4ใๅ
็ฝฎๅฝๆฐ็ปๅธธไธ่ตทไฝฟ็จ-- range
็ๆไธไธชๆดๆฐๅบๅ -- range() range(0,10,1) -- 0,1,2,3,4,5,6,7,8,9
ๅผๅงๆฐๅญ๏ผ0 --็็ฅ --้ป่ฎค0
็ปๆๆฐๅญ:10 --ๅๅคดไธๅๅฐพ -- ๅฟ
้กป่ฆๅ
ๆญฅ้ฟ:1 --้ป่ฎค1
'''
# count = 0
# str3 = "ๆ ๆชฌ็ญๅ
จ็จ็ญ็ๅญฆ็ๆฏๆๆฃ็๏ผ"
# for i in str3:
# if i == "ๅ
จ":
# # break
# continue
# print(i)
# print("*" * 20)
# count += 1
# print(count)
# print(len(str3))
# for i in range(5):
# print(i)
'''
Python็ๅฝๆฐ:
ๅฆๆๆไธๆฎตไปฃ็ ้่ฆ่ขซ้ๅค็ไฝฟ็จ-- ๅฐ่ฃ
ๆๅฝๆฐ -- ่ฐ็จ่ฟไธชๅฝๆฐๅฐฑๅฏไปฅไบ = ๆ้ซไปฃ็ ๅค็จ็
1ใๅฐ่ฃ
ๅฝๆฐ็่ฏญๆณ๏ผ
def ๅฝๆฐๅ(): -- ่ชๅทฑๅๅ็== ๆ ่ฏ็ฌฆ๏ผ็ฌฆๅๅฝๅ่งๅ
ๅฝๆฐไฝ๏ผๅฎ็ฐๅฝๆฐๅ่ฝ็ๅ
ทไฝไปฃ็ ๏ผ
2ใๅฝๆฐๅฎไนๅฎไนๅ๏ผ่ฆ่ฐ็จๆไผ่ขซ่ฟ่ก๏ผ๏ผ--ๆไน่ฐ็จ๏ผ--ไฝฟ็จๅฝๆฐๅ่ฐ็จ๏ผ
3ใๅฆๆๆฏๅฎนๆๅๅ็ๅผ-ไธไผๅๆญปๅจๅฝๆฐ้--ไผ ๅ
ฅ==ๅฎไนๆไธบๅฝๆฐ็ๅๆฐ
็จๅ้ไปฃๆฟๅ
ทไฝ็ๅผ -- ๆฌๅท้== ๅๆฐ-- ๅฝขๅ
3.1 ๅฎไนๅฝๆฐ็ๆถๅ๏ผๅฎไน็ๅๆฐ
ๅฟ
ๅคๅๆฐ๏ผๅฎไนไบๅฟ
้กป่ฆไผ ๏ผไธไผ ๆฅ้๏ผ
้ป่ฎคๅๆฐ๏ผๅฝๅๆฐๆไธไบ็ปๅธธไฝฟ็จ็ๅผ็ๆถๅ๏ผ่ฎพ็ฝฎไธบ้ป่ฎคๅๆฐ==ๅฎไน้ป่ฎคๅผ๏ผๅฏไปฅไธไผ ๏ผๅฏไปฅไผ ๅ
ฅ-- ไปฅๅฎๅไธบๅ๏ผ
ไฝ็ฝฎ่ฆๆฑ๏ผ้ป่ฎคๅๆฐ่ฆๅจๅฟ
ๅคๅๆฐ็ๅ้ข๏ผ
ไธๅฎ้ฟๅๆฐ๏ผไธ็กฎๅฎๆ่ฟๆฏๆฒกๆ๏ผๆๅคๅฐ๏ผ
*args๏ผๅ้ข็ๅฟ
ๅคๅๆฐ ้ป่ฎคๅๆฐ้ฝๆฅๅๅฎไบ๏ผๅฉไธ็ๆๆ็ๅๆฐ้ฝ่ขซargsๅๆฐๆฅๅ๏ผๅนถไธไปฅๅ
็ป็ๆ ผๅผไฟๅญใ--- ไฝ็ฝฎไผ ๅ
**kwargs: ๅ้ข็ๅฟ
ๅคๅๆฐ ้ป่ฎคๅๆฐ้ฝๆฅๅๅฎไบ๏ผๅฉไธ็ๆๆ็ๅๆฐ้ฝ่ขซkwargsๅๆฐๆฅๅ,ๅนถไปฅๅญๅ
ธ็ๆ ผๅผไฟๅญ -- ๅ
ณ้ฎๅญไผ ๅ
3.2 ่ฐ็จๅฝๆฐ็ๆถๅ๏ผไผ ๅ
ฅ็ๅๆฐ๏ผ ๅฎๅ
1) ไฝ็ฝฎไผ ๅ๏ผๅๆฐ็ธๅ
ณ็
2๏ผๅ
ณ้ฎๅญไผ ๅ๏ผไธไผ่ทไฝ็ฝฎๆงๆ ผ๏ผ้กบๅบ้ๆฒกๆๅ
ณ็ณป -- ็ฒพ็กฎ
3) ๆททๅไผ ๅ๏ผๅ
ณ้ฎๅญไผ ๅ ไธๅฎ่ฆๆพๅจไฝ็ฝฎไผ ๅ็ๅ้ข
ๅฝๆฐๆ่ฟๆๅบ-- ่ฟ- ๅๆฐ๏ผๅบ-่ฟๅๅผ
่ฟๅๅผ๏ผๅฝๆฐ่ฟ่กๅฎไผๅ๏ผๆๆฐๆฎ่ฆ็ปๅซไบบ็จ็ -- ่ฟไธชๆฐๆฎๅฎไนไธบ่ฟๅๅผ๏ผ==ๅพๅฐ่ฟไธช่ฟๅๅผใ
ๆไนๅฎไน๏ผ -- return
ๅพๅฐ่ฟๅๅผ๏ผ-- ่ฐ็จๆถๅ๏ผๆฅๅ่ฟไธช่ฟๅๅผ
1ใ่ฟๅๅผ ๅฏไปฅๆฒกๆ-- None
2ใไนๅฏไปฅๆไธไธช๏ผๅฏไปฅๆๅคไธช --้ๅท้ๅผ๏ผๆฅๅ็ๆฏ็จๅ
็ปไฟๅญ็ใ
3ใๅฎไน่ฟๅๅผ--- ๆๅณ็ๅฝๆฐ็ปๆ๏ผๅ้ข็ไปฃ็ ้ฝไธไผๅ่ฟ่กไบ๏ผ
'''
# print('66666') # ๆ ็จไปฃ็
def good_job(salary,bonus,subsidy=500,*args,**kwargs): # ๅฝขๅ
print("salaryๅๆฐ็ๅผๆฏ๏ผ{}".format(salary))
print("bonusๅๆฐ็ๅผๆฏ๏ผ{}".format(bonus))
print("subsidy๏ผ{}".format(subsidy))
print("argesๅๆฐ็ๅผๆฏ๏ผ{}".format(args))
print("kwargesๅๆฐ็ๅผๆฏ๏ผ{}".format(kwargs))
sum1 = salary + bonus + subsidy
for i in args:
sum1 += i
for j in kwargs:
sum1 += kwargs.get(j)
print("่ฟไธชๅทฅไฝ็ๅทฅ่ตๆปๅๆฏ๏ผ{}".format(sum1))
return sum1, salary # ๅฎไนไบ่ฟๅๅผ
print("่ฟไธชไปฃ็ ่ฟ่กๅฎไบไน๏ผ")
result = good_job(9000,2000,800,100,200,300,a=100,b=200,c=300) # ๅฝๆฐ็่ฐ็จ--ๅๆฐ็ไผ ๅ
ฅ = ๅฎๅ
print(result)
# ๅ้ๆฅๆฅๅๅฝๆฐ็่ฐ็จ-- ่ฟๅๅผ
# if result > 10000:
# print("่ฟๆฏไธไธชๅฅฝ็ๅทฅไฝ๏ผ")
# else:
# print("่ฟไธชๅทฅ่ตไธ่ก๏ผ")
'''
ๅ
็ฝฎๅฝๆฐ๏ผ
1ใprint๏ผtype isinstance , input
2ใlen(),range()
3ใint, float, bool,str, list , tuple, set, dict
4ใๅญ็ฌฆไธฒ็ๆนๆณ๏ผ str.index str.find str.replace str.split count
5ใๅ่กจ ๅญๅ
ธ็ๆนๆณ๏ผ list.append๏ผlist.insert๏ผlist.pop, list.remove
'''
dic1 = {"name":"้ๆ ๅญ","age":"18"}
dict2 = dict(name="้ๆ ๅญ",age=18)
print(type(dict2))
|
GDSDvzz/vzz_01
|
pythonc/lesson_03.py
|
lesson_03.py
|
py
| 7,037 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
17868781057
|
# How to Hit Inaccurate Text with a Vector (Arrow) !
from manimlib.imports import *
class HittingInaccurateText(Scene):
def construct(self):
vector = Vector(3 * RIGHT)
runtime = 5
parts = 300
part = 0
theta = 0
while part <= parts:
self.play(Rotating(vector, radians=TAU/parts, about_point=ORIGIN), run_time=5/300)
text = TexMobject("e^{i" + str(theta) + "}")
text.add_updater(lambda t: t.next_to(vector, RIGHT, buff=0.05))
self.add(text)
part += 1
self.wait()
# This is a failed attempt of code for rotating circles and arrows for my Fourier Series Explanation video.
|
tinfungster/My_Animations
|
HittingInaccurateText.py
|
HittingInaccurateText.py
|
py
| 695 |
python
|
en
|
code
| 1 |
github-code
|
6
|
45342937416
|
import torch
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.autograd import Variable
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.input_variational_dropout import InputVariationalDropout
class Char_RNN(nn.Module):
def __init__(self, char_to_index, char_embed_size, hidden_size,output_size,dropout,cuda_flag, batch_first=True):
"""
Args:
char_to_index:
char_embed_size: char embeddings dim
hidden_size: lstm reccurent dim
dropout: dropout probability
batch_first: batch first option
"""
super(Char_RNN, self).__init__()
self.char_to_index = char_to_index
self.char_embed_size = char_embed_size
self.hidden_size = hidden_size
self.dropout = dropout
self.output_size = output_size
self.batch_first = batch_first
self.padding_index = self.char_to_index['__PADDING__']
self.cuda_flag = cuda_flag
self.char_encoder = nn.Embedding(len(self.char_to_index), self.char_embed_size, sparse=True, padding_idx= self.padding_index)
torch.nn.init.xavier_uniform_(self.char_encoder.weight.data)
self.char_rnn = AugmentedLstm(input_size= self.char_embed_size, hidden_size = self.hidden_size,go_forward = True, recurrent_dropout_probability = self.dropout,
use_highway = False, use_input_projection_bias = False)
self.char_rnn.state_linearity.bias.data.fill_(0.0)
self.var_drop = InputVariationalDropout(self.dropout)
self.w_atten = nn.Linear(self.hidden_size,1,bias=False)
self.w_atten.weight.data.fill_(0.0)
self.char_projection = nn.Linear(self.hidden_size*2,self.output_size,bias=True)
self.char_projection.weight.data.fill_(0.0)
self.char_projection.bias.data.fill_(0.0)
self.drp = nn.Dropout(self.dropout)
def forward(self,char_ids,seq_lengths):
tokenIdChars = []
for sent in char_ids:
tokenIdChars.extend([idChars for idChars in sent])
tokenIdChars_set = set(map(tuple,tokenIdChars))
tokenIdChars = list(map(list,tokenIdChars_set))
tokenIdChars.sort(key=lambda x: -len(x))
max_len = len(max(tokenIdChars,key=len))
batch_size = len(tokenIdChars)
char_tensor = torch.zeros(batch_size,max_len).long()
char_tensor.fill_(self.padding_index)
for idx in range(len(tokenIdChars)):
for jdx in range(len(tokenIdChars[idx])):
char_tensor[idx,jdx] = tokenIdChars[idx][jdx]
if self.cuda_flag:
char_tensor = char_tensor.cuda()
char_embed = self.char_encoder(char_tensor)
char_embed = self.var_drop(char_embed)
char_seq_lengths = np.array([len(char) for char in tokenIdChars])
packed_input = pack_padded_sequence(char_embed, char_seq_lengths,batch_first=True)
packed_output, (ht,cell) = self.char_rnn(packed_input, None)
out_rnn, lengths = pad_packed_sequence(packed_output, batch_first=True)
out_rnn = self.var_drop(out_rnn)
w_att = self.w_atten(out_rnn)
if self.cuda_flag:
mask = torch.ones(w_att.size()).cuda()
else:
mask = torch.ones(w_att.size())
for i, l in enumerate(lengths):
if l < out_rnn.size()[1]:
mask[i, l:] = 0
w_att = w_att.masked_fill(mask == 0, -1e9)
#compute and apply attention
attentions = F.softmax(w_att.squeeze(),dim=1)
weighted = torch.mul(out_rnn, attentions.unsqueeze(-1).expand_as(out_rnn))
char_att = weighted.sum(1).squeeze()
char_embs = torch.cat((char_att,cell.squeeze(0)),1)
char_embs = self.drp(char_embs)
proj_char_embs = self.char_projection(char_embs)
RNN_embs = {}
for idx in range(len(tokenIdChars)):
RNN_embs[str(tokenIdChars[idx])] = proj_char_embs[idx,:]
max_seq = torch.max(seq_lengths).cpu().numpy().tolist()
if self.cuda_flag:
char_emb_tensor = Variable(torch.zeros(len(char_ids),max_seq,self.output_size)).cuda()
else:
char_emb_tensor = Variable(torch.zeros(len(char_ids),max_seq,self.output_size))
for idx in range(len(char_ids)):
for jdx in range(len(char_ids[idx])):
char_emb_tensor[idx,jdx,:] = RNN_embs[str(char_ids[idx][jdx])]
return char_emb_tensor
|
makyr90/DL_Syntax_Models
|
Biaffine_parser_PyTorch/char_lstm.py
|
char_lstm.py
|
py
| 4,586 |
python
|
en
|
code
| 2 |
github-code
|
6
|
70571557627
|
'''Auxiliary functions'''
import numpy as _np
import pandas as _pd
def update_mindex(dataframe, lvl_name,loc=0,axis=1):
'''Inserts a level named as lvl_name into dataframe df in loc position.
Level can be inserted either in columns (default axis=1) or index (axis=0)'''
mindex_df = dataframe.columns if axis == 1 else dataframe.index
mindex_df = mindex_df.to_frame(index=False)
if loc == -1:
loc = mindex_df.shape[1] #can insert below levels
mindex_df.insert(loc = loc,column = 'add',value = lvl_name)
mindex_df_updated = _pd.MultiIndex.from_arrays(mindex_df.values.T)
if axis == 1:
dataframe.columns = mindex_df_updated
else:
dataframe.index = mindex_df_updated
return dataframe
def get_common_index(*dfs,level=None):
index_sets = [set(df.index.values if level is None else df.index.levels[level].values) for df in dfs]
return set.intersection(*index_sets)
def sync_snx_sites(*dfs):
'''Finds common sites present in all gathers and outputs
a list of gathers with common sites only'''
sites = get_common_index(*dfs,level=0)
# index.remove_unused_levels() may be required
return [snx_df.loc[sites] for snx_df in dfs]
def code_pt_comboindex(vec):
'''returns combo index as CODE + PT'''
tmp_index = vec.index
site_code = tmp_index.droplevel([1,2])
site_pt = tmp_index.droplevel([0,1])
return _pd.Index(site_code.values + site_pt.values.astype(object))
def sync_pt_vec(vec1,vec2):
'''returns sinex vectors synced on the common site name
and takes care of PT monument type'''
cindex1 = code_pt_comboindex(vec1)
cindex2 = code_pt_comboindex(vec2)
return vec1[cindex1.isin(cindex2)],vec2[cindex2.isin(cindex1)]
def unique_cols(df:_pd.DataFrame)->_np.ndarray:
'''returns True for a df row with all duplicates'''
a = df.to_numpy() # df.values (pandas<0.24)
return (a[:,0][:,None] == a).all(1)
|
aaronhammondgagovau/ginan
|
scripts/gn_lib/gn_aux.py
|
gn_aux.py
|
py
| 1,941 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5510824333
|
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
import cv2
import numpy as np
from detectron2 import model_zoo
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg.MODEL.WEIGHTS = 'weights/model_segmentation.pth' # path to the model we just trained
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.95 # set a custom testing threshold
predictor = DefaultPredictor(cfg)
def get_segment_crop(img, tol=0, mask=None):
if mask is None:
mask = img > tol
return img[np.ix_(mask.any(1), mask.any(0))]
def segment_single_images(image, save_img=False):
error_ims = []
segmen_info = []
# image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
output_predictor = predictor(image)
if output_predictor['instances'].pred_masks.shape[0] > 1:
mask_check = output_predictor['instances'].pred_masks.cpu().numpy()
masks = output_predictor['instances'].pred_masks.cpu().numpy()
mask_binary = masks[np.argmax(np.sum(masks, axis=(1, 2))) ,:,:]
else:
mask_binary = np.squeeze(output_predictor['instances'].pred_masks.permute(1, 2, 0).cpu().numpy())
try:
crop_mask = get_segment_crop(img = image, mask = mask_binary)
except ValueError:
print("error")
origin_mask = cv2.cvtColor(np.float32(mask_binary) * 255.0, cv2.COLOR_GRAY2RGB)
for j in range(image.shape[2]):
image[:,:,j] = image[:,:,j] * origin_mask[:,:,j] * 255
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
hoainv99/mc-ocr
|
modules/image_segmentation/predict.py
|
predict.py
|
py
| 1,630 |
python
|
en
|
code
| 26 |
github-code
|
6
|
74934799227
|
def cal(s, d):
m = min(s)
t = ''
i = s.index(m)
if(m < d):
t += m + d * i
s = s[i + 1::]
else:
s = []
return s, t
# print(i)
# print(min(s))
for _ in range(int(input())):
n,d = [x for x in input().split()]
s = list(n)
# print(s)
ans = []
ans_final = ''
while(len(s) > 0):
# print(s)
s, t = cal(s, d)
ans.append(t)
# print(s,t)
# ans += t
# print(ans)
c = 0
for i in ans:
if(len(i) > 0):
c += 1
ans_final += i[0]
# if(len(i) > 1):
# c += 1
# ans_final += i[:len(i) - 1:]
# else:ans_final += i
ans_final += d * (len(n) - c)
print(ans_final)
|
Chhekur/codechef-solutions
|
MARCH19B/CHDIGER.py
|
CHDIGER.py
|
py
| 592 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70211689149
|
from math import sqrt
from os import system
""" ะะฐััะฐะฝั 29
ะะปั ะทะฐะดะฐะฝะพะณะพ ะฝะฐัััะฐะปัะฝะพะณะพ ัะธัะปะฐ n ะพะฑัะธัะปะธัะธ """
sum = 0
n = int(input('ะะฒะตะดััั ะบัะปัะบัััั ัะปะตะฝัะฒ n: ')) # ะะฒะตะดะตะฝะฝั ะบัะปัะบะพััั ัะปะตะฝัะฒ
if n < 1: # ะะตัะตะฒััะบะฐ ะบัะปัะบะพััั ัะปะตะฝัะฒ ะฝะฐ ะฝะฐัััะฐะปัะฝัััั
print('ะงะธัะปะพ n ะฝะต ะผะพะถะต ะฑััะธ ะผะตะฝัะธะผ ะทะฐ 1')
exit(0)
for i in range(n):
sum = sqrt(2 + sum) # ะะฑัะธัะปะตะฝะฝั ะฟัะพะผัะถะฝะพั ััะผะธ
print('sum = %.10f' % sum) # ะะธะฒะตะดะตะฝะฝั ะฝะฐ ะตะบัะฐะฝ ะฟัะพะผัะถะฝะพั ััะผะธ
print('ะ ะตะทัะปััะฐั: %.10f' % sum) # ะะธะฒะตะดะตะฝะฝั ัะตะทัะปััะฐัั
system('pause')
|
Compich/KPI-FICT
|
ะัะฝะพะฒั ะฟัะพะณัะฐะผะผะธัะพะฒะฐะฝะธั/1 ะบััั/ะะฐะฑะพัะฐัะพัะฝะฐั ัะฐะฑะพัะฐ โ4/Python/main.py
|
main.py
|
py
| 756 |
python
|
uk
|
code
| 0 |
github-code
|
6
|
39508101385
|
import numpy as np
import matplotlib.pyplot as plt
baseline = np.loadtxt('sub-AD4009_ses-baseline_acq-AP_date-2011-07-07_trc-av45_pet.csv', delimiter=',')
followup = np.loadtxt('sub-AD4009_ses-followup_acq-AP_date-2013-07-03_trc-av45_pet.csv', delimiter=',')
prediction = followup + np.random.normal(0, .025, size=followup.shape)
plt.figure(figsize=(20,10))
#plt.plot(baseline, '-', marker='o', c='#390099', label='baseline', linewidth=1)
plt.plot(followup, '-', marker='o', c='#00A6FB', linewidth=1)
plt.plot(prediction, '-', marker='o', c='#FF0054', linewidth=6, alpha = 0.2)
#plt.legend(bbox_to_anchor=(0, 1), loc='upper left', fontsize=22)
#plt.ylabel('Regional Error', fontsize=18)
#plt.yticks(fontsize=14)
plt.xlim(-1, len(baseline))
plt.tick_params(
axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False, left=False, labelleft=False)
#plt.xticks(np.arange(len(baseline), step=2), fontsize=12, rotation=40)
#plt.xlabel('ROI (166 AAL3 regions)', fontsize=18)
plt.grid(True)
plt.tight_layout()
plt.savefig('followup.png')
#plt.savefig('baseline.png')
|
SanoScience/MP-spreading-prediction
|
pictures/Graphical_abstract/plot.py
|
plot.py
|
py
| 1,090 |
python
|
en
|
code
| 4 |
github-code
|
6
|
5683288254
|
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch
import torch.nn.functional as F
class DNN(nn.Module):
def __init__(self, n_input, n_hidden, n_output, real):
super(DNN, self).__init__()
self.loss = 0
self.hidden1 = nn.Linear(n_input, n_hidden, True)
self.hidden2 = nn.Linear(n_hidden, n_output, True)
self.o = nn.Linear(n_output, real, True)
self.sig = nn.Sigmoid()
def forward(self, x):
x = self.hidden1(x)
x = self.hidden2(x)
x = self.o(x)
x = self.sig(x)
return x.squeeze(1)
|
asd1354403003/NON
|
DNN.py
|
DNN.py
|
py
| 637 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73558579389
|
import re
import math
def bigram_letter_count(text):
bigram_letter = re.findall(r'(?=(\w{2}))', text)
bigram_letter_count = {}
for item in bigram_letter:
if item in bigram_letter_count:
bigram_letter_count[item] += 1
else:
bigram_letter_count[item] = 1
return bigram_letter_count
def single_letter_count(text):
single_letter = re.findall(r'\w', text)
single_letter_count = {}
for item in single_letter:
if item in single_letter_count:
single_letter_count[item] += 1
else:
single_letter_count[item] = 1
return single_letter_count
def cal_probability(test, bigram, single):
p = 1
for item in test:
if(item[0] not in single and re.compile(r'[^0-9]').match(item[0])):
return 0
if(item in bigram):
p = p * bigram[item] / single[item[0]]
return p
# read and lowercase the file
f = open('HW2english.txt', encoding="utf-8")
eng_text = f.read().lower()
f.close()
f = open('HW2french.txt', encoding="utf-8")
french_text = f.read().lower()
f.close()
f = open('HW2german.txt', encoding="utf-8")
german_text = f.read().lower()
f.close()
test_text = []
with open('LangID.test.txt', encoding="utf-8") as f:
for line in f:
test_text.append(line.lstrip('0123456789. ').lower())
# bigram letter count and single letter count
eng_bigram_letter = bigram_letter_count(eng_text)
eng_single_letter = single_letter_count(eng_text)
french_bigram_letter = bigram_letter_count(french_text)
french_single_letter = single_letter_count(french_text)
german_bigram_letter = bigram_letter_count(german_text)
german_single_letter = single_letter_count(german_text)
with open('BigramLetterLangId.out', 'w') as f:
f.write('ID LANG\n')
h_en = 0
h_fr = 0
h_gr = 0
with open('BigramLetterLangId.out', 'a') as f:
for i in range(len(test_text)):
test_bigram = re.findall(r'(?=(\w{2}))', test_text[i])
test_eng_pro = cal_probability(
test_bigram, eng_bigram_letter, eng_single_letter)
test_french_pro = cal_probability(
test_bigram, french_bigram_letter, french_single_letter)
test_german_pro = cal_probability(
test_bigram, german_bigram_letter, german_single_letter)
if test_eng_pro != 0:
h_en += -1 * test_eng_pro * math.log2(test_eng_pro)
if test_french_pro != 0:
h_fr += -1 * test_french_pro * math.log(test_french_pro, 2)
if test_german_pro != 0:
h_gr += -1 * test_german_pro * math.log(test_german_pro, 2)
if(test_eng_pro > test_french_pro and test_eng_pro > test_german_pro):
f.write(str(i + 1) + " EN\n")
elif(test_french_pro > test_eng_pro and test_french_pro > test_german_pro):
f.write(str(i + 1) + " FR\n")
elif(test_german_pro > test_eng_pro and test_german_pro > test_french_pro):
f.write(str(i + 1) + " GR\n")
else:
f.write(str(i + 1) + " ERROR\n")
pp_en = 2**h_en
pp_fr = 2**h_fr
pp_gr = 2**h_gr
|
bs-feng/GWU_NLP_2017Fall
|
hw2/letterLangld.py
|
letterLangld.py
|
py
| 3,087 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38710107620
|
from discord_slash import cog_ext
from discord.ext import commands
from txns import get_wallet
import asyncio
from embeds import *
import discord
import pytz
from datetime import datetime
import random
from client import client
from txns import *
from whitelist import ghosts, ghostsIcons, fo_rank1, fo_rank2, fo_rank3, fo_rank4, fo_rank5
sign_ups = []
def get_guild():
guild = client.get_guild(936698039941345370)
return guild
guild = get_guild()
class DripCog(commands.Cog):
@cog_ext.cog_slash(name="drip", description="Drips Out 1-5 $EXP Every 6 Hours!")
@commands.cooldown(1, 10, commands.BucketType.guild)
async def drip_claim(self, ctx):
if ctx.channel.id == 937750181154279485 or ctx.channel.id == 936801867340582982:
await ctx.send(embed=embedWrongChannelDrip, hidden=True)
return
else:
userid = str(ctx.author.id)
wallet, name, won, lost, expwon, explost, lastdrip, drip_exp = await get_wallet(userid)
if wallet == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
return
else:
balance = await get_balance(wallet, 811721471)
if balance == -1:
await ctx.send(embed=embedNoOptEXP)
return
else:
utc = pytz.timezone('UTC')
lastdrip_datetime = datetime.strptime(lastdrip, '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=utc)
now = datetime.now(utc)
time_diff = now - lastdrip_datetime
total_seconds = time_diff.total_seconds()
if total_seconds < 6 * 60 * 60:
next_claim = ((60*60*6) - total_seconds)
timer = ((datetime.utcfromtimestamp(next_claim)).strftime('%HH %MM %SS')).lstrip('0')
if timer.startswith("H "):
dt = timer[2:]
else:
dt = timer
embedNoDrip = discord.Embed(
title=f"You have already made a drip claim less than 6 hours ago!",
description=f"Please come back when your timer resets...",
color=0xFF1C0A,
)
embedNoDrip.set_footer(text=f"Next Claim In {dt} โฑ๏ธ")
await ctx.send(embed=embedNoDrip, hidden=True)
return
else:
exp = [1, 2, 3, 4, 5]
random_exp = random.choice(exp)
new_exp = int(drip_exp + random_exp)
current_time = (datetime.now(utc)).strftime('%Y-%m-%dT%H:%M:%SZ')
txnid = await send_assets("Angels Of Ares", fallen_order_main, wallet, 811721471, "EXP", random_exp)
embedDrip.add_field(name=f"Dripped out {random_exp} $EXP to <@{ctx.author.id}>!", value=f"[Txn Link](https://algoexplorer.io/tx/{txnid})", inline=True)
await ctx.send(embed=embedDrip)
embedDrip.clear_fields()
await add_drip(wallet, current_time, new_exp)
@drip_claim.error
async def drip_claim_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(embed=embedCD, hidden=True)
class WingRevenueCog(commands.Cog):
@cog_ext.cog_slash(name="wing-revenue", description="Admin Use Only!")
async def wing_count(self, ctx):
await ctx.defer()
if ctx.author.id != 805453439499894815:
await ctx.send(embed=embedAdminOnly)
return
else:
totalwings = 0
send_data = []
wallets = await get_all_wallets()
for wallet in wallets:
wingscount = 0
if wallet["address"] != "AOAZMP5WTCCHOPKZZICV5KEZ7IH6BRFIDI47ONQU42QNOTTAW4ACZVXDHA":
account_info = algod_client.account_info(wallet['address'])
assets = account_info.get("assets", [])
for asset in assets:
if asset["amount"] > 0 and asset["asset-id"] in angel_wings:
wingscount = asset["amount"]
if wingscount != 0:
send_data.append([wallet["address"], wingscount, wallet["userid"]])
totalwings += wingscount
current_time = (datetime.now()).strftime('%Y-%m-%dT%H:%M:%SZ')
await update_wings(wallet["address"], current_time, wingscount)
totalwings_with_angel = int(totalwings*1.33333)
payment_per_wing = round(350/totalwings_with_angel, 3)
embedAW.set_footer(text=f"All Algorand Drops Are Successful! ๐งโโ๏ธ")
await send_revenue(send_data, payment_per_wing)
embedAW.add_field(name=f"View Revenue Wallet Below:", value=f"[AlgoExplorer Link](https://algoexplorer.io/address/{angel_wings_wallet})", inline=False)
embedAW.add_field(name=f"-----------------------------------------------", value="", inline=False)
embedAW.add_field(name=f"Total Staked Angel Wings", value=f"{totalwings_with_angel}", inline=False)
embedAW.add_field(name=f"Payment Sent Per Angel Wing", value=f"{payment_per_wing}A", inline=False)
await ctx.send(embed=embedAW)
embedAW.clear_fields()
send_data = []
class StakingCog(commands.Cog):
@cog_ext.cog_slash(name="admin-staking-drop", description="Admin Use Only!")
async def send_staking(self, ctx):
if ctx.author.id != 805453439499894815:
await ctx.send(embed=embedAdminOnly)
return
else:
embedStaking = discord.Embed(
title="Staking Rewards Drop Commencing...",
color=0xFF1C0A,
)
embedStaking.set_footer(text=f"Please wait while I gather The Order and The Ghosts Of Algo ๐งโโ๏ธ")
message = await ctx.send(embed=embedStaking)
send_data = []
wallets = await get_all_wallets()
total_staked = 0
total_staked_ghosts = 0
total_order_sent = 0
total_exp_sent = 0
for wallet in wallets:
if wallet["address"] != "AOAZMP5WTCCHOPKZZICV5KEZ7IH6BRFIDI47ONQU42QNOTTAW4ACZVXDHA":
account_info = algod_client.account_info(wallet['address'])
assets = account_info.get("assets", [])
ghostcount = 0
ghosticoncount = 0
fo_1count = 0
fo_2count = 0
fo_3count = 0
fo_4count = 0
fo_5count = 0
for asset in assets:
if asset["amount"] > 0 and asset["asset-id"] in ghosts:
ghostcount += 1
if asset["amount"] > 0 and asset["asset-id"] in ghostsIcons:
ghosticoncount += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank1:
fo_1count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank2:
fo_2count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank3:
fo_3count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank4:
fo_4count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank5:
fo_5count += 1
count = fo_1count + fo_2count + fo_3count + fo_4count + fo_5count
ghosts_final_count = ghostcount + ghosticoncount
total_exp = ghostcount + (ghosticoncount*5) + (fo_1count*3) + (fo_2count*5) + (fo_3count*8) + (fo_4count*12) + (fo_5count*25)
total_order = count
send_data.append([wallet["address"], count, ghosts_final_count, total_order, total_exp])
total_staked += count
total_staked_ghosts += ghosts_final_count
total_order_sent += total_order
total_exp_sent += total_exp
await staking_rewards(send_data)
embedStaking = discord.Embed(
title="Staking Rewards Drop Complete!",
color=0xFF1C0A,
)
embedStaking.add_field(name=f"Staked Fallen Order", value=f"{total_staked}", inline=False)
embedStaking.add_field(name=f"Staked Ghosts Of Algo", value=f"{total_staked_ghosts}", inline=False)
embedStaking.add_field(name=f"Total Staking Rewards Sent", value=f"{total_order_sent} $ORDER | {total_exp_sent} $EXP", inline=False)
embedStaking.set_footer(text=f"Play some games and upgrade your characters! ๐งโโ๏ธ")
embedStaking.set_image(url="https://bunny-cdn.algoxnft.com/production/collections/fallen-order---main-assets-thumb.png?width=240")
await message.edit(embed=embedStaking)
send_data = []
class BuyTicketsCog(commands.Cog):
@cog_ext.cog_slash(name="tickets", description="Buy $RAFFLE Tickets With ORDER/EXP", options=[
{
"name": "payment",
"description": "Payment Currency",
"type": 3,
"required": True,
"choices": [
{
"name": "ORDER",
"value": "ORDER"
},
{
"name": "EXP",
"value": "EXP"
}
]
},
{
"name": "amount",
"description": "Amount Of Tickets To Buy",
"type": 4,
"required": True
}
])
async def buy_tickets(self, ctx, payment, amount):
if payment == "ORDER":
token_id = 811718424
cost = amount * 5
elif payment == "EXP":
token_id = 811721471
cost = amount * 50
sender = ctx.author.id
sender_name = ctx.author.name
wallet, name, won, lost, expwon, explost, lastdrip, drip_exp = await get_wallet(sender)
if wallet == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
else:
sender_balance = await get_balance(wallet, token_id)
sender_balance_raffle = await get_balance(wallet, 815766197)
if sender_balance == 0:
await ctx.send(embed=embedErr, hidden=True)
elif sender_balance < amount:
await ctx.send(embed=embedErr, hidden=True)
else:
txnid = await send_assets(sender_name, wallet, fallen_order_main, token_id, payment, cost)
txnid2 = await send_assets("Fallen Order Raffles", fallen_order_main, wallet, 815766197, "RAFFLE", amount)
new_sender_bal = sender_balance - cost
new_sender_bal_raffle = sender_balance_raffle + amount
embedPurchased = discord.Embed(
title=f"I have transformed {cost} ${payment} into {amount} $RAFFLE Tickets for <@{sender}>",
description=f"[Payment Txn](https://algoexplorer.io/tx/{txnid}) | [Receipt Txn](https://algoexplorer.io/tx/{txnid2})",
color=0xFFFB0A
)
embedPurchased.set_footer(text=f"New ${payment} Balance: {new_sender_bal}\nNew $RAFFLE Balance: {new_sender_bal_raffle}")
embedPurchased.set_image(url="https://nft-media.algoexplorerapi.io/images/bafkreiabe7amkqwuz6kip7xnx6c5bx7v73bw2qofuaoqhu23nufrwfnn4e")
await ctx.send(embed=embedPurchased)
return
class BuyEXPCog(commands.Cog):
@cog_ext.cog_slash(name="orderexp", description="Swap $ORDER for $EXP", options=[
{
"name": "amount",
"description": "Amount Of ORDER To Swap",
"type": 4,
"required": True
}
])
async def buy_tickets(self, ctx, amount):
exp_amount = amount * 10
sender = str(ctx.author.id)
sender_name = ctx.author.name
wallet, name, won, lost, expwon, explost, lastdrip, drip_exp = await get_wallet(sender)
if wallet == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
else:
sender_balance_exp = await get_balance(wallet, 811721471)
sender_balance_order = await get_balance(wallet, 811718424)
if sender_balance_order == 0:
await ctx.send(embed=embedErr, hidden=True)
elif sender_balance_order < amount:
await ctx.send(embed=embedErr, hidden=True)
else:
txnid = await send_assets(sender_name, wallet, fallen_order_main, 811718424, "ORDER", amount)
txnid2 = await send_assets("Token Swap. The Order", fallen_order_main, wallet, 811721471, "EXP", exp_amount)
new_sender_bal_order = sender_balance_order - amount
new_sender_bal_exp = sender_balance_exp + exp_amount
embedSwapped = discord.Embed(
title=f"I have swapped {amount} $ORDER to {exp_amount} $EXP on <@{sender}>'s behalf",
description=f"[Payment Txn](https://algoexplorer.io/tx/{txnid}) | [Receipt Txn](https://algoexplorer.io/tx/{txnid2})",
color=0xFFFB0A
)
embedSwapped.set_footer(text=f"New $ORDER Balance: {new_sender_bal_order}\nNew $EXP Balance: {new_sender_bal_exp}")
embedSwapped.set_thumbnail(url="https://bunny-cdn.algoxnft.com/production/collections/fallen-order---main-assets-thumb.png?width=240")
await ctx.send(embed=embedSwapped)
class SendTokensCog(commands.Cog):
@cog_ext.cog_slash(name="send", description="Send EXP/ORDER/RAFFLE/Logs to other users", options=[
{
"name": "user",
"description": "Receiving User",
"type": 6,
"required": True
},
{
"name": "token",
"description": "Token To Send",
"type": 3,
"required": True,
"choices": [
{
"name": "EXP",
"value": "EXP"
},
{
"name": "RAFFLE",
"value": "RAFFLE"
},
{
"name": "ORDER",
"value": "ORDER"
},
{
"name": "Oak Logs",
"value": "Oak Logs"
}
]
},
{
"name": "amount",
"description": "Amount To Send",
"type": 4,
"required": True
}
])
async def send(self, ctx, user, token, amount):
if token == "ORDER":
token_id = 811718424
elif token == "EXP":
token_id = 811721471
elif token == "RAFFLE":
token_id = 815766197
elif token == "Oak Logs":
token_id = 1064863037
sender = str(ctx.author.id)
receiver = str(user.id)
receiver_name = user.name
sender_name = ctx.author.name
wallet1, name1, won1, lost1, expwon1, explost1, lastdrip1, drip_exp1 = await get_wallet(sender)
wallet2, name2, won2, lost2, expwon2, explost2, lastdrip1, drip_exp1 = await get_wallet(receiver)
if wallet1 == '' or wallet2 == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
return
else:
sender_balance = await get_balance(wallet1, token_id)
receiver_balance = await get_balance(wallet2, token_id)
if sender_balance == -1 or receiver_balance == -1:
if token == "ORDER":
await ctx.send(embed=embedNoOptORDER)
if token == "EXP":
await ctx.send(embed=embedNoOptEXP)
if token == "RAFFLE":
await ctx.send(embed=embedNoOptRAFFLE)
if token == "Oak Logs":
embedNoOpt = discord.Embed(
title=f"You are not opted into Oak Logs!",
description=f"Please [click here](https://www.randgallery.com/algo-collection/?address=1064863037) to opt in and try again...",
color=0xFF0000
)
embedNoOpt.set_thumbnail(url="https://bunny-cdn.algoxnft.com/production/collections/fallen-order---main-assets-thumb.png?width=240")
await ctx.send(embed=embedNoOpt)
elif sender_balance == 0:
await ctx.send(embed=embedErr, hidden=True)
elif sender_balance < amount:
await ctx.send(embed=embedErr, hidden=True)
else:
if token == "Oak Logs":
txnid = await trade_logs(sender_name, wallet1, wallet2, 1064863037, amount)
else:
txnid = await send_assets(sender_name, wallet1, wallet2, token_id, token, amount)
new_sender_bal = sender_balance - amount
new_receiver_bal = receiver_balance + amount
embedSent = discord.Embed(
title=f"I have bestowed {amount} ${token} upon <@{receiver}>",
description=f"Sent By: <@{sender}> ๐ [Txn Link](https://algoexplorer.io/tx/{txnid})",
color=0xFFFB0A
)
embedSent.set_footer(text=f"{sender_name}'s New Balance: {new_sender_bal} ${token}\n{receiver_name}'s New Balance: {new_receiver_bal} ${token}")
await ctx.send(embed=embedSent)
class AdminSendCog(commands.Cog):
@cog_ext.cog_slash(name="admin-send", description="ADMIN ONLY! Send EXP/ORDER/RAFFLE to other users", options=[
{
"name": "sender",
"description": "Receiving Address",
"type": 3,
"required": True
},
{
"name": "receiver",
"description": "Receiving Address",
"type": 3,
"required": True
},
{
"name": "token",
"description": "Token To Send",
"type": 3,
"required": True,
"choices": [
{
"name": "EXP",
"value": "EXP"
},
{
"name": "RAFFLE",
"value": "RAFFLE"
},
{
"name": "ORDER",
"value": "ORDER"
}
]
},
{
"name": "amount",
"description": "Amount To Send",
"type": 4,
"required": True
}
])
async def admin_clawback(self, ctx, sender, receiver, token, amount):
if ctx.author.id != 805453439499894815:
await ctx.send(embed=embedAdminOnly)
return
else:
if token == "ORDER":
token_id = 811718424
elif token == "EXP":
token_id = 811721471
elif token == "RAFFLE":
token_id = 815766197
sender_balance = await get_balance(sender, token_id)
receiver_balance = await get_balance(receiver, token_id)
sender_short = sender[:5] + "..." + sender[-5:]
receiver_short = receiver[:5] + "..." + receiver[-5:]
if sender_balance == -1 or receiver_balance == -1:
if token == "ORDER":
await ctx.send(embed=embedNoOptORDER)
if token == "EXP":
await ctx.send(embed=embedNoOptEXP)
elif sender_balance == 0:
await ctx.send(embed=embedErr)
elif sender_balance < amount:
await ctx.send(embed=embedErr)
else:
new_sender_bal = sender_balance - amount
new_receiver_bal = receiver_balance + amount
txnid = await send_assets(sender_short, sender, receiver, token_id, token, amount)
embedSent = discord.Embed(
title=f"I have bestowed {amount} ${token} upon {receiver_short}",
description=f"Sent By: {sender_short} ๐ [Txn Link](https://algoexplorer.io/tx/{txnid})",
color=0xFFFB0A
)
embedSent.set_footer(text=f"{sender_short}'s New Balance: {new_sender_bal} ${token}\n{receiver_short}'s New Balance: {new_receiver_bal} ${token}")
await ctx.send(embed=embedSent)
class ManualSendTokensCog(commands.Cog):
@cog_ext.cog_slash(name="manual-send", description="Send EXP/ORDER/RAFFLE to a specific address!", options=[
{
"name": "address",
"description": "Receiving Wallet Address",
"type": 3,
"required": True
},
{
"name": "token",
"description": "Token To Send",
"type": 3,
"required": True,
"choices": [
{
"name": "EXP",
"value": "EXP"
},
{
"name": "RAFFLE",
"value": "RAFFLE"
},
{
"name": "ORDER",
"value": "ORDER"
}
]
},
{
"name": "amount",
"description": "Amount To Send",
"type": 4,
"required": True
}
])
async def manual_send(self, ctx, address, token, amount):
if token == "ORDER":
token_id = 811718424
elif token == "EXP":
token_id = 811721471
elif token == "RAFFLE":
token_id = 815766197
sender = str(ctx.author.id)
sender_name = ctx.author.name
wallet, name, won, lost, expwon, explost, lastdrip, drip_exp = await get_wallet(sender)
if wallet == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
return
else:
sender_balance = await get_balance(wallet, token_id)
receiver_balance = await get_balance(address, token_id)
if sender_balance == -1 or receiver_balance == -1:
if token == "ORDER":
await ctx.send(embed=embedNoOptORDER)
elif token == "EXP":
await ctx.send(embed=embedNoOptEXP)
else:
await ctx.send(embed=embedNoOptRAFFLE)
elif sender_balance == 0:
await ctx.send(embed=embedErr, hidden=True)
elif sender_balance < amount:
await ctx.send(embed=embedErr, hidden=True)
else:
txnid = await send_assets(sender_name, wallet, address, token_id, token, amount)
new_sender_bal = sender_balance - amount
embedSent = discord.Embed(
title=f"I have bestowed {amount} ${token} upon {address}",
description=f"Sent By: <@{sender}> ๐ [Txn Link](https://algoexplorer.io/tx/{txnid})",
color=0xFFFB0A
)
embedSent.set_footer(text=f"{sender_name}'s New Balance: {new_sender_bal} ${token}")
await ctx.send(embed=embedSent)
return
class BalanceCog(commands.Cog):
@cog_ext.cog_slash(name="balance", description="Check Your On Chain Balances!")
async def get_all_balances(self, ctx):
await ctx.defer()
wallet, name, won, lost, expwon, explost, lastdrip, drip_exp = await get_wallet(str(ctx.author.id))
if wallet == '':
embedNoReg = discord.Embed(
title="Click Here To Register!",
url="https://app.fallenorder.xyz",
description=f"Please verify your wallet via our website to continue..\nEnsure you copy your user id below for the verification process:\nUser ID: {ctx.author.id}",
color=0xFF1C0A,
)
await ctx.send(embed=embedNoReg)
game_active -= 1
return
account_info = algod_client.account_info(wallet)
assets = account_info.get("assets", [])
ghostcount = 0
ghosticoncount = 0
wingcount = 0
aoa = 0
order = 0
exp = 0
raffle = 0
ghost = 0
fo_1count = 0
fo_2count = 0
fo_3count = 0
fo_4count = 0
fo_5count = 0
for asset in assets:
if asset["amount"] > 0 and asset["asset-id"] in angel_wings:
wingcount = asset["amount"]
if asset["asset-id"] == balance_list[0]:
aoa = asset["amount"]
if asset["asset-id"] == balance_list[1]:
order = asset["amount"]
if asset["asset-id"] == balance_list[2]:
exp = asset["amount"]
if asset["asset-id"] == balance_list[3]:
raffle = asset["amount"]
if asset["asset-id"] == balance_list[4]:
ghost = asset["amount"]/10000
if asset["amount"] > 0 and asset["asset-id"] in fo_rank1:
fo_1count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank2:
fo_2count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank3:
fo_3count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank4:
fo_4count += 1
if asset["amount"] > 0 and asset["asset-id"] in fo_rank5:
fo_5count += 1
if asset["amount"] > 0 and asset["asset-id"] in ghosts:
ghostcount += 1
if asset["amount"] > 0 and asset["asset-id"] in ghostsIcons:
ghosticoncount += 1
balances = [aoa, order, exp, raffle, ghost]
balances_formatted = []
for balance in balances:
if balance >= 1000000000:
formatted_bal = f"{balance / 1000000000:.3f}B"
elif balance >= 1000000000:
formatted_bal = f"{balance / 1000000:.3f}M"
elif balance >= 1000000:
formatted_bal = f"{balance / 1000000:.3f}M"
elif balance >= 1000:
formatted_bal = f"{balance / 1000:.3f}K"
else:
formatted_bal = str(balance)
balances_formatted.append(formatted_bal)
embedBalances = discord.Embed(
title=f"Current Holdings - {ctx.author.name}",
url=f"https://algoexplorer.io/address/{wallet}",
color=0xFCE303
)
embedBalances.add_field(name=f"AoA", value=f"{balances_formatted[0]}", inline=False)
embedBalances.add_field(name=f"ORDER", value=f"{balances_formatted[1]}", inline=False)
embedBalances.add_field(name=f"EXP", value=f"{balances_formatted[2]}", inline=False)
embedBalances.add_field(name=f"RAFFLE", value=f"{balances_formatted[3]} Tickets", inline=False)
embedBalances.add_field(name=f"GHOST", value=f"{balances_formatted[4]}", inline=False)
embedBalances.add_field(name=f"Angel Wings", value=f"{wingcount}", inline=False)
embedBalances.add_field(name=f"Fallen Order", value=f"{fo_1count} Angel | {fo_2count} Celestial | {fo_3count} Ethereal | {fo_4count} Empyreal | {fo_5count} Immortal ", inline=False)
embedBalances.add_field(name=f"Ghosts Of Algo", value=f"{ghostcount} Ghosties | {ghosticoncount} Icon", inline=False)
embedBalances.set_thumbnail(url="https://bunny-cdn.algoxnft.com/production/collections/fallen-order---main-assets-thumb.png?width=240")
embedBalances.set_footer(text=f"*Holdings displayed are on chain and real time*", icon_url="https://s3.amazonaws.com/algorand-wallet-mainnet-thumbnails/prism-images/media/asset_verification_requests_logo_png/2022/06/22/d2c56a8e61244bd78017e38180d15c91.png--resize--w__200--q__70.webp")
await ctx.send(embed=embedBalances)
def setup(client: client):
client.add_cog(DripCog(client))
client.add_cog(WingRevenueCog(client))
client.add_cog(StakingCog(client))
client.add_cog(BuyTicketsCog(client))
client.add_cog(BuyEXPCog(client))
client.add_cog(SendTokensCog(client))
client.add_cog(AdminSendCog(client))
client.add_cog(ManualSendTokensCog(client))
client.add_cog(BalanceCog(client))
|
AngelsOfAres/Fallen-Order-Keepers
|
c_heimdall/transfers.py
|
transfers.py
|
py
| 32,658 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12026015047
|
import pygame
# Global Consts
# Colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
BLUE = ( 0, 0, 255)
RED = ( 255, 0, 0)
GREEN = ( 0, 255, 0)
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
class Player(pygame.sprite.Sprite):
# -- Attribute
# Set speed vector
change_x = 0
change_y = 0
# list of sprites we can bumb against
level = None
# -- Methods
def __init__(self):
# call parents constructor
super().__init__()
# Create image of the block
width = 40
height = 60
self.image = pygame.Surface([width, height])
self.image.fill(RED)
# Set a reference to the image rect.
self.rect = self.image.get_rect()
def update(self):
# Gravity
self.calc_grav()
# movement
self.rect.x += self.change_x
# Check for collision
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# If moving right, set right side to left side of object we are colliding with
if self.change_x > 0:
self.rect.right = block.rect.left
elif self.change_x < 0:
# do the opposite if we are moving left
self.rect.left = block.rect.right
# move up or down
self.rect.y += self.change_y
# check for collision
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# reset position based on top/bottom of the object
if self.change_y > 0:
self.rect.bottom = block.rect.top
elif self.change_y < 0:
self.rect.top = block.rect.bottom
# stop vertical movement
self.change_y = 0
def calc_grav(self):
""" Caclulate effect of grav"""
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .35
# check if we are on the ground
if self.rect.y >= SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = SCREEN_HEIGHT - self.rect.height
def jump(self):
""" Called when jump is pressed """
# move down a bit and see if there is a platform below us.
# Move down 2 pixels because it doesn't work well if we only move down 1
# when working with a platform moving down.
self.rect.y += 2
platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
self.rect.y -= 2
# If it is ok to jump, set speed upwards
if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:
self.change_y = -10
# Player controlled movement
def go_left(self):
self.change_x = -6
def go_right(self):
self.change_x = 6
def stop(self):
self.change_x = 0
class Platform(pygame.sprite.Sprite):
""" Platforms to jump on """
def __init__(self, width, height):
"""Platform constructor"""
super().__init__()
self.image = pygame.Surface([width, height])
self.image.fill(GREEN)
self.rect = self.image.get_rect()
class Level():
"""Generic super-class to define a level. Creates a child class with level-specific info"""
#List o f sprites used in each level
platform_list = None
enemy_list = None
#how far the world has been scrolled left/right
world_shift = 0
def __init__(self, player):
"""Constructor. NEeded for when moving platforms collide w the player"""
self.platform_list = pygame.sprite.Group()
self.enemy_list = pygame.sprite.Group()
self.player = player
#Update everything on the level
def update(self):
"""update everything in the level"""
self.platform_list.update()
self.enemy_list.update()
def draw(self, screen):
"""Draw everything on the level"""
screen.fill(BLUE)
self.platform_list.draw(screen)
self.enemy_list.draw(screen)
def shift_world(self,shift_x):
"""scroll left and right when the player moves"""
#keep track of shift amount
self.world_shift += shift_x
#go through sprite list and shift
for platform in self.platform_list:
platform.rect.x += shift_x
for enemy in self.enemy_list:
enemy.rect.x += shift_x
#Create platforms
class Level_01(Level):
"""Def for level 1"""
def __init__(self, player):
#Call parent constructor
Level.__init__(self,player)
self.level_limit = -1000
#array with width, height, x and y of platforms
level = [[210, 70, 500, 500],
[210, 70, 800, 400],
[210, 70, 800, 400],
[210, 70, 800, 400],
]
#Go through the array above and add platforms
for platform in level:
block = Platform(platform[0], platform[1])
block.rect.x = platform[2]
block.rect.y = platform[3]
block.player = self.player
self.platform_list.add(block)
#Create platforms for level 2
class Level_02(Level):
"""Def for level 2"""
def __init__(self, player):
Level.__init__(self,player)
self.level_limit = -1000
level = [[210, 30, 450, 570],
[210, 30, 850, 420],
[210, 30, 1000, 520],
[210, 30, 1120, 280],
]
# Go through array above
for platform in level:
block = Platform(platform[0], platform[1])
block.rect.x = platform[2]
block.rect.y = platform[3]
block.player = self.player
self.platform_list.add(block)
def main():
"""Main Program"""
pygame.init()
#set height and width of screen
size = [SCREEN_WIDTH, SCREEN_HEIGHT]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Side-Scrolling platformer")
#Create player
player = Player()
#create all levels
level_list = []
level_list.append(Level_01(player))
level_list.append(Level_02(player))
#set the current level
current_level_no = 0
current_level = level_list[current_level_no]
active_sprite_list = pygame.sprite.Group()
player.level = current_level
player.rect.x = 340
player.rect.y = SCREEN_HEIGHT - player.rect.height
active_sprite_list.add(player)
#loop unitl user clicks the close button
done = False
#Used to manage how fast the screen update
clock = pygame.time.Clock()
#------Main Program Loop--------
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player.go_left()
if event.key == pygame.K_RIGHT:
player.go_right()
if event.key == pygame.K_UP:
player.jump()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT and player.change_x < 0:
player.stop()
if event.key == pygame.K_RIGHT and player.change_x > 0:
player.stop()
#Update the player
active_sprite_list.update()
#Update items
current_level.update()
#If player nears the right side, shift the world left
if player.rect.right >= 500:
diff = player.rect.right -500
player.rect.right = 500
current_level.shift_world(-diff)
#If player nears left side, shift right
if player.rect.left <= 120:
diff = 120 - player.rect.left
player.rect.left = 120
current_level.shift_world(diff)
#If player reaches the end of the level, go to the next
current_position = player.rect.x + current_level.world_shift
if current_position < current_level.level_limit:
player.rect.x =120
if current_level_no < len(level_list)-1:
current_level_no += 1
current_level = level_list[current_level_no]
player.level = current_level
current_level.draw(screen)
active_sprite_list.draw(screen)
#limit to 60fps
clock.tick(60)
#update screen w what we've drawn
pygame.display.flip()
pygame.quit()
if __name__ == "__main__":
main()
|
danielp28/Python-Testing
|
platformer.py
|
platformer.py
|
py
| 8,838 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30324581341
|
import os, sys, re, pickle, json
import numpy as np
import cv2
import pandas as pd
def get_seq(seq_dir, seq_name):
seq_file = seq_dir + "/" + seq_name + ".pkl"
seq = pickle.load(open(seq_file, "rb"), encoding='latin1')
return seq
def get_3dkeypoints(seq, frame_id, model_id):
"""
SMPL joints
0: 'pelvis',
1: 'left_hip',
2: 'right_hip',
3: 'spine1',
4: 'left_knee',
5: 'right_knee',
6: 'spine2',
7: 'left_ankle',
8: 'right_ankle',
9: 'spine3',
10: 'left_foot',
11: 'right_foot',
12: 'neck',
13: 'left_collar',
14: 'right_collar',
15: 'head',
16: 'left_shoulder',
17: 'right_shoulder',
18: 'left_elbow',
19: 'right_elbow',
20: 'left_wrist',
21: 'right_wrist',
22: 'left_hand',
23: 'right_hand'
:param seq:
:param frame_id:
:param model_id:
:return:
"""
_3d_keypoints = seq["jointPositions"][model_id][frame_id]
_3d_keypoints = _3d_keypoints.reshape(-1, 3)
return _3d_keypoints
def get_cam_params(seq, frame_id):
intrinsic = seq["cam_intrinsics"]
extrinsic = seq["cam_poses"][frame_id]
R = extrinsic[:3,:3]
t = extrinsic[:-1, -1]
t = np.expand_dims(t, axis=1)
return intrinsic, R, t, extrinsic
def estimate_from_3d(seq, frame_id, model_id):
keypoints3d = get_3dkeypoints(seq, frame_id, model_id)
intrinsic, R, t, extrinsic = get_cam_params(seq, frame_id)
estimated_keypoints_2d, _ = cv2.projectPoints(keypoints3d, R, t, intrinsic, None)
estimated_keypoints_2d = np.squeeze(estimated_keypoints_2d, axis=1)
return estimated_keypoints_2d
def approximate_bb(keypoints):
x_offset = 30
y_offset_max = 80
y_offset_min = 130
xs = keypoints.T[0]
ys = keypoints.T[1]
x_min = int(xs.min()) - x_offset
x_max = int(xs.max()) + x_offset
y_min = int(ys.min()) - y_offset_min
y_max = int(ys.max()) + y_offset_max
top_left = [x_min, y_min]
bottom_right = [x_max, y_max]
return top_left, bottom_right
def smpl2coco(smpl_pose):
"""
smpl_format = ["pelvis", "left_hip", "right_hip", "lower_spine", "left_knee", "right_knee", # 0-5
"middle_spine", "left_ankle", "right_ankle", "upper_spine", "left_foot", "right_foot", # 6-11
"neck", "left_collar", "right_collar", "head", "left_shoulder", "right_shoulder", # 12-17
"left_elbow", "right_elbow", "left_wrist", "right_wrist", "left_hand", "right_hand"] # 18-23
coco_format = ['right_shoulder', 'right_elbow', 'right_wrist', 'left_shoulder',
'left_elbow', 'left_wrist', 'right_hip', 'right_knee', 'right_ankle',
'left_hip', 'left_knee', 'left_ankle', 'head', 'neck', 'right_ear',
'left_ear', 'nose', 'right_eye', 'left_eye']
"""
offset = 0
num_models = len(smpl_pose)
coco_poses = np.zeros((num_models, 19, 2))
#(smpl, coco)
common_joints = [(1, 9), (2, 6), (4, 10), (5, 7), (7, 11), (8, 8), (12, 13), (15, 12), (16, 3), (17, 0), (18, 4), (19, 1), (20, 5), (21, 2)]
for model_id in range(num_models):
for (smpl_joint, coco_joint) in common_joints:
coco_poses[model_id][coco_joint] = smpl_pose[model_id][smpl_joint]
coco_poses[model_id][14] = coco_poses[model_id][12] + offset # right_ear = head
coco_poses[model_id][15] = coco_poses[model_id][12] + offset # left_ear = head
coco_poses[model_id][16] = coco_poses[model_id][12] + offset # nose = head
coco_poses[model_id][17] = coco_poses[model_id][12] + offset # right_eye = head
coco_poses[model_id][18] = coco_poses[model_id][12] + offset # left_eye = head
return coco_poses
def dump_sorted(filename_list, index_list, occ_status, subset_name, scene_name="courtyard_basketball_00", folder_name = "./3dpw/selected_frames"):
selected = zip(filename_list, index_list, occ_status)
selected_sorted = sorted(selected, key=lambda x: x[1], reverse=True) # sort by occlusion value in descending order
os.makedirs(folder_name + "/" + scene_name, exist_ok=True)
with open(folder_name + "/" + scene_name + "/" + subset_name+".txt", "w+") as dump_file :
for result in selected_sorted:
dump_file.write(
"{} {} #{}\n".format(result[0], result[1], result[2])
)
|
egirgin/occlusionIndex
|
3dpw/src/utils.py
|
utils.py
|
py
| 4,338 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21253269122
|
from django.urls import path
from .views import TagContentView #ๅฏผๅ
ฅTagContentView
from .views import XieyiConfigDateView
from .views import NodeConfigMakeDevRequest,NodeConfigCopyRequest,NodeConfigReadAndSaveRequest,NodeConfigDeleteRequest
from .views import XieyiConfigDateOrderView,XieyiTestCaseView,SenderHexDataOrderView,RecriminatDataOrderView
urlpatterns = [
# ่็น้
็ฝฎ้กต้ข็url้
็ฝฎ
path('tagcontent/<path:tagcontent_id>/', TagContentView.as_view(), name="tag_content_id"),
# ้
็ฝฎๅคๅถๆฐๅขๆต่ฏ็จไพurl,namespaceๆๆๅฝๅ็ฉบ้ด๏ผ็จๅฝๅ็ฉบ้ดๅ้ๅฎ
# ๅ่ฎฎๆต่ฏ็จไพ้กต้ข็url้
็ฝฎ
path('xieyiconfigdate/<path:xieyiconfigdate_id>/', XieyiConfigDateView.as_view(), name="xie_yi_config_date_id"),
# ้
็ฝฎๅคๅถๆฐๅขๆต่ฏ็จไพurl,namespaceๆๆๅฝๅ็ฉบ้ด๏ผ็จๅฝๅ็ฉบ้ดๅ้ๅฎ
# ่็น้
็ฝฎNodeConfig้กต้ข็url้
็ฝฎ-็ๆdev็้
็ฝฎ
path('nodeconfigmakedev/<path:nodeconfig_id>/', NodeConfigMakeDevRequest, name="node_config_make_dev_id"),
# ้
็ฝฎๅคๅถๆฐๅขๆต่ฏ็จไพurl,namespaceๆๆๅฝๅ็ฉบ้ด๏ผ็จๅฝๅ็ฉบ้ดๅ้ๅฎ
# ่็น้
็ฝฎNodeConfig้กต้ข็url้
็ฝฎ-ๅฎๅ
จๅคๅถ
path('nodeconfigallcopy/<path:nodeconfig_id>/', NodeConfigCopyRequest, name="node_config_all_copy_id"),
# ้
็ฝฎๅคๅถๆฐๅขๆต่ฏ็จไพurl,namespaceๆๆๅฝๅ็ฉบ้ด๏ผ็จๅฝๅ็ฉบ้ดๅ้ๅฎ
# ่็น้
็ฝฎNodeConfig้กต้ข็url้
็ฝฎ-ๅฐไธไผ ็ๆไปถๅ
ฅๅบ
path('nodeconfigreadandsave/<path:nodeconfig_id>/', NodeConfigReadAndSaveRequest, name="node_config_read_and_save_id"),
# ้
็ฝฎๅคๅถๆฐๅขๆต่ฏ็จไพurl,namespaceๆๆๅฝๅ็ฉบ้ด๏ผ็จๅฝๅ็ฉบ้ดๅ้ๅฎ
# ่็น้
็ฝฎNodeConfig้กต้ข็url้
็ฝฎ-ๅ ้คๆฌๆกๆฐๆฎ
path('nodeconfigalldelete/<path:nodeconfig_id>/', NodeConfigDeleteRequest, name="node_config_all_delete_id"),
# ้
็ฝฎๅคๅถๆฐๅขๆต่ฏ็จไพurl,namespaceๆๆๅฝๅ็ฉบ้ด๏ผ็จๅฝๅ็ฉบ้ดๅ้ๅฎ
# ๅ่ฎฎๆต่ฏ็จไพไนไพ่ต้
็ฝฎurl้
็ฝฎ
path('xieyiconfigdateorder/<path:xieyiconfigdateorder_id>/', XieyiConfigDateOrderView.as_view(), name="xie_yi_config_date_order_id"),
# ้
็ฝฎๅคๅถๆฐๅขๆต่ฏ็จไพurl,namespaceๆๆๅฝๅ็ฉบ้ด๏ผ็จๅฝๅ็ฉบ้ดๅ้ๅฎ
# ๅ่ฎฎๆต่ฏ็จไพไนๆต่ฏ็จไพurl้
็ฝฎ
path('xieyitestcase/<path:xieyitestcase_id>/', XieyiTestCaseView.as_view(),
name="new_xie_yi_test_case_id"),
# ้
็ฝฎๅคๅถๆฐๅขๆต่ฏ็จไพurl,namespaceๆๆๅฝๅ็ฉบ้ด๏ผ็จๅฝๅ็ฉบ้ดๅ้ๅฎ
# ๅ่ฎฎๆต่ฏ็จไพไนไธฒๅฃๆถๅๆฐๆฎurl้
็ฝฎ
path('senderhexdataorder/<path:senderhexdataorder_id>/', SenderHexDataOrderView.as_view(),name="sender_hex_date_order_id"),
# ้
็ฝฎๅคๅถๆฐๅขๆต่ฏ็จไพurl,namespaceๆๆๅฝๅ็ฉบ้ด๏ผ็จๅฝๅ็ฉบ้ดๅ้ๅฎ
# ๅ่ฎฎๆต่ฏ็จไพไนๅๆงๆถๅๆฐๆฎurl้
็ฝฎ
path('recriminatdataorder/<path:recriminatdataorder_id>/', RecriminatDataOrderView.as_view(),
name="recriminat_data_order_id"),
# ้
็ฝฎๅคๅถๆฐๅขๆต่ฏ็จไพurl,namespaceๆๆๅฝๅ็ฉบ้ด๏ผ็จๅฝๅ็ฉบ้ดๅ้ๅฎ
]
app_name = 'shucaiyidate'
|
wawj901124/shangbaogongju
|
apps/shucaiyidate/urls.py
|
urls.py
|
py
| 3,118 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
6428170708
|
# File: LangChainchatOpenAI.py
# Author: Denys L
# Date: October 8, 2023
# Description:
import os
import sys
import hashlib
from typing import Any
import streamlit as st
from dotenv import load_dotenv
from langchain.callbacks.base import BaseCallbackHandler
from fundamentals.langchain_utils import StuffSummarizerByChapter
class StreamingStdOutCallbackHandlerPersonal(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
st.session_state.full_response = st.session_state.full_response + token
st.session_state.placeholder.markdown(
st.session_state.full_response + "รขโล")
sys.stdout.write(token)
sys.stdout.flush()
def process_book(uploaded_file):
temp_file_path = f'.trash/{uploaded_file.name}'
with open(temp_file_path, 'wb') as file:
file.write(uploaded_file.read())
st.session_state.full_response = ""
st.session_state.handler_ia_message = st.chat_message(
"assistant", avatar="รฐลธยคโ")
st.session_state.placeholder = st.session_state.handler_ia_message.empty()
# magic
st.session_state.llm.summarize(temp_file_path)
# print output
st.session_state.placeholder.markdown(st.session_state.full_response)
st.session_state.messages.append(
{"role": "assistant", "content": st.session_state.full_response, "avatar": "รฐลธยคโ"})
st.session_state.full_response = ""
# remove temp file
os.remove(temp_file_path)
def main():
load_dotenv()
st.title("Storyteller")
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.handler = StreamingStdOutCallbackHandlerPersonal()
st.session_state.llm = StuffSummarizerByChapter(
st.session_state.handler)
for message in st.session_state.messages:
with st.chat_message(message["role"], avatar=message["avatar"]):
st.markdown(message["content"])
st.sidebar.subheader("Your books")
uploaded_file = st.sidebar.file_uploader(
"Upload your Books here and click on 'Process' to start the story", accept_multiple_files=False)
if st.sidebar.button("Process"):
with st.spinner("Processing"):
process_book(uploaded_file)
if __name__ == '__main__':
main()
|
lyepustin/bookNLP
|
app.py
|
app.py
|
py
| 2,293 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71723953788
|
# coding:utf-8
import datetime
from sqlalchemy import Column, Integer, DateTime, Numeric, create_engine, VARCHAR
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from config import DB_CONFIG, DEFAULT_SCORE
'''
sqlๆไฝ็ๅบ็ฑป
ๅ
ๆฌip๏ผ็ซฏๅฃ๏ผtypes็ฑปๅ(0้ซๅฟๅ๏ผ1้ๆ)๏ผprotocol(0 http,1 https http),country(ๅฝๅฎถ),area(็ๅธ),updatetime(ๆดๆฐๆถ้ด)
speed(่ฟๆฅ้ๅบฆ)
'''
BaseModel = declarative_base()
class Proxy(BaseModel):
__tablename__ = 'proxy'
id = Column(Integer, primary_key=True, autoincrement=True)
ip = Column(VARCHAR(16), nullable=False)
port = Column(Integer, nullable=False)
types = Column(Integer, nullable=False)
protocol = Column(Integer, nullable=False, default=0)
country = Column(VARCHAR(100), nullable=False)
area = Column(VARCHAR(100), nullable=False)
updatetime = Column(DateTime(), default=datetime.datetime.utcnow)
speed = Column(Numeric(5, 2), nullable=False)
score = Column(Integer, nullable=False, default=DEFAULT_SCORE)
def get_proxy(self):
if self.protocol < 0:
return None
return ("http://%s:%d" if self.protocol == 0 else "https://%s:%d") % (self.ip, self.port)
class SqlHelper(object):
params = {'id': Proxy.id, 'ip': Proxy.ip, 'port': Proxy.port, 'types': Proxy.types, 'protocol': Proxy.protocol,
'country': Proxy.country, 'area': Proxy.area, 'score': Proxy.score}
def __init__(self):
if 'sqlite' in DB_CONFIG['DB_CONNECT_STRING']:
connect_args = {'check_same_thread': False}
self.engine = create_engine(DB_CONFIG['DB_CONNECT_STRING'], echo=False, connect_args=connect_args)
else:
self.engine = create_engine(DB_CONFIG['DB_CONNECT_STRING'], echo=False)
DB_Session = sessionmaker(bind=self.engine)
self.session = DB_Session()
def init_db(self):
BaseModel.metadata.create_all(self.engine)
# def drop_db(self):
# BaseModel.metadata.drop_all(self.engine)
def delete(self, conditions=None):
if conditions:
conditon_list = []
for key in list(conditions.keys()):
if self.params.get(key, None):
conditon_list.append(self.params.get(key) == conditions.get(key))
conditions = conditon_list
query = self.session.query(Proxy)
for condition in conditions:
query = query.filter(condition)
deleteNum = query.delete()
self.session.commit()
else:
deleteNum = 0
return ('deleteNum', deleteNum)
def update(self, conditions=None, value=None):
'''
conditions็ๆ ผๅผๆฏไธชๅญๅ
ธใ็ฑปไผผself.params
:param conditions:
:param value:ไนๆฏไธชๅญๅ
ธ๏ผ{'ip':192.168.0.1}
:return:
'''
if conditions and value:
conditon_list = []
for key in list(conditions.keys()):
if self.params.get(key, None):
conditon_list.append(self.params.get(key) == conditions.get(key))
conditions = conditon_list
query = self.session.query(Proxy)
for condition in conditions:
query = query.filter(condition)
updatevalue = {}
for key in list(value.keys()):
if self.params.get(key, None):
updatevalue[self.params.get(key, None)] = value.get(key)
updateNum = query.update(updatevalue)
self.session.commit()
else:
updateNum = 0
return {'updateNum': updateNum}
def select(self, count=None, conditions=None):
'''
conditions็ๆ ผๅผๆฏไธชๅญๅ
ธใ็ฑปไผผself.params
:param count:
:param conditions:
:return:
'''
if conditions:
conditon_list = []
for key in list(conditions.keys()):
if self.params.get(key, None):
conditon_list.append(self.params.get(key) == conditions.get(key))
conditions = conditon_list
else:
conditions = []
query = self.session.query(Proxy.id, Proxy.ip, Proxy.port, Proxy.score, Proxy.protocol)
if len(conditions) > 0 and count:
for condition in conditions:
query = query.filter(condition)
return query.order_by(Proxy.score.desc(), Proxy.speed).limit(count).all()
elif count:
return query.order_by(Proxy.score.desc(), Proxy.speed).limit(count).all()
elif len(conditions) > 0:
for condition in conditions:
query = query.filter(condition)
return query.order_by(Proxy.score.desc(), Proxy.speed).all()
else:
return query.order_by(Proxy.score.desc(), Proxy.speed).all()
def select_valid(self, count=None, conditions=None):
'''
conditions็ๆ ผๅผๆฏไธชๅญๅ
ธใ็ฑปไผผself.params
:param count:
:param conditions:
:return:
'''
if conditions:
conditon_list = []
for key in list(conditions.keys()):
if self.params.get(key, None):
conditon_list.append(self.params.get(key) == conditions.get(key))
conditions = conditon_list
else:
conditions = []
query = self.session.query(Proxy.id, Proxy.ip, Proxy.port, Proxy.score, Proxy.protocol)
query = query.filter(Proxy.score > 0)
if len(conditions) > 0 and count:
for condition in conditions:
query = query.filter(condition)
return query.order_by(Proxy.id.desc(), Proxy.score.desc(), Proxy.speed.desc()).limit(count).all()
elif count:
return query.order_by(Proxy.id.desc(), Proxy.score.desc(), Proxy.speed.desc()).limit(count).all()
elif len(conditions) > 0:
for condition in conditions:
query = query.filter(condition)
return query.order_by(Proxy.score.desc(), Proxy.speed.desc()).all()
else:
return query.order_by(Proxy.score.desc(), Proxy.speed.desc()).all()
def close(self):
pass
if __name__ == '__main__':
sqlhelper = SqlHelper()
condition = {"country": "ๅฝๅ
"}
ips = sqlhelper.select(conditions=condition)
print(len(ips))
for ip in ips:
proxy = Proxy(id=ip.id, ip=ip.ip, port=ip.port, protocol=ip.protocol, score=ip.score)
print(proxy.get_proxy())
print(ips[0].id)
sqlhelper.update(conditions={"id": ips[0].id}, value={"score": 0})
proxy = Proxy(ip="127.0.0.1", port=8080, protocol=0)
print(proxy.get_proxy())
|
xindemeng/python-projects
|
jd_spider/jd_spider/db/SqlHelper.py
|
SqlHelper.py
|
py
| 6,761 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18100621514
|
"""
1971. Find if Path Exists in Graph
https://leetcode.com/problems/find-if-path-exists-in-graph/
"""
from typing import List, Tuple
from unittest import TestCase, main
class UnionFind:
def __init__(self, n: int) -> None:
self.root = list(range(n))
def find(self, a: int) -> int:
"""Returns the root value"""
if self.root[a] != a:
self.root[a] = self.find(self.root[a])
# Now self.root[a] points to the current root edge
return self.root[a]
def union(self, a: int, b: int):
"""Unoins two nodes"""
# Find the root of A and B
root_a, root_b = self.find(a), self.find(b)
# Always make root A smaller than root B
if root_b < root_a:
root_a, root_b = root_b, root_a
# Update root of B to point the root of A
# so that the two groups are now connected
self.root[root_b] = root_a
class Solution:
def validPath(
self, n: int, edges: List[List[int]], source: int, destination: int
) -> bool:
uf = UnionFind(n)
# Union
for a, b in edges:
uf.union(a, b)
# Find
return uf.find(source) == uf.find(destination)
class Test(TestCase):
data: List[Tuple[int, List[List[int]], int, int, bool]] = [
(
10,
[
[0, 7],
[0, 8],
[6, 1],
[2, 0],
[0, 4],
[5, 8],
[4, 7],
[1, 3],
[3, 5],
[6, 5],
],
7,
5,
True,
),
(3, [[0, 1], [1, 2], [2, 0]], 0, 2, True),
(6, [[0, 1], [0, 2], [3, 5], [5, 4], [4, 3]], 0, 5, False),
]
def test_solution(self):
s = Solution()
for n, edges, source, destination, expected in self.data:
self.assertEqual(s.validPath(n, edges, source, destination), expected)
if __name__ == "__main__":
main()
|
hirotake111/leetcode_diary
|
leetcode/1971/solution.py
|
solution.py
|
py
| 2,023 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24177027496
|
import pygame
from pygame.locals import *
class MyPlane(pygame.sprite.Sprite):
def __init__(self,bg_size, screen):
pygame.sprite.Sprite.__init__(self)
self.screen = screen
self.image1 = pygame.image.load('../img/hero1.png').convert_alpha()
self.image2 = pygame.image.load('../img/hero2.png').convert_alpha()
self.active = True
self.image = self.image1
self.rect = self.image.get_rect()
self.rect.left = (bg_size[0] - 102) / 2
self.rect.top = 550
self.speed = 4
self.HP = 5
self.destroy_images = []
self.destroy_images.extend([pygame.image.load('../img/hero_blowup_n1.png').convert_alpha(),
pygame.image.load('../img/hero_blowup_n2.png').convert_alpha(),
pygame.image.load('../img/hero_blowup_n3.png').convert_alpha(),
pygame.image.load('../img/hero_blowup_n4.png').convert_alpha()])
self.destroy_index = 0
self.timer = 0
self.mask = pygame.mask.from_surface(self.image)
self.bomb = 5
self.double_fire = False
self.f = 20
def move(self):
self.timer += 1
if self.active:
key_pressed = pygame.key.get_pressed()
if key_pressed[K_w] or key_pressed[K_UP]:
self.rect.top = self.rect.top - self.speed
if key_pressed[K_s] or key_pressed[K_DOWN]:
self.rect.top = self.rect.top + self.speed
if key_pressed[K_a] or key_pressed[K_LEFT]:
self.rect.left = self.rect.left - self.speed
if key_pressed[K_d] or key_pressed[K_RIGHT]:
self.rect.left = self.rect.left + self.speed
if self.rect.left < 0:
self.rect.left = 0
if self.rect.top > 574:
self.rect.top = 574
if self.rect.left > 378:
self.rect.left = 378
if self.rect.top < 0:
self.rect.top = 0
if self.image == self.image1:
self.image = self.image2
else:
self.image = self.image1
else:
if self.destroy_index < 4:
self.image = self.destroy_images[self.destroy_index]
if self.timer % 25 == 0:
self.destroy_index += 1
def draw(self):
self.screen.blit(self.image,[self.rect.left,self.rect.top])
def hit(self):
self.active = False
def reset(self):
self.active = True
self.image = self.image1
self.destroy_index = 0
self.bomb = 5
class Bullet(pygame.sprite.Sprite):
def __init__(self,plane, pos = 1):
pygame.sprite.Sprite.__init__(self) # pos: 0 - left, 1 - middle, 2 - right
self.plane = plane
self.active = True
self.pos = pos
self.img1 = pygame.image.load('../img/bullet1.png').convert_alpha()
self.img2 = pygame.image.load('../img/bullet2.png').convert_alpha()
self.img = self.img1
#self.sound = pygame.mixer.music.load('bullet.mp3')
self.rect = self.img.get_rect()
if pos == 1:
self.rect.left = plane.rect.left + 50
self.rect.top = plane.rect.top + 50
elif pos == 0:
self.img = self.img2
self.rect.left = plane.rect.left + 25
self.rect.top = plane.rect.top + 50
elif pos == 2:
self.rect.left = plane.rect.left + 75
self.rect.top = plane.rect.top + 50
self.img = self.img2
def move(self):
self.rect.top -= 10
if self.rect.top<0:
self.active = False
def draw(self,screen):
screen.blit(self.img, [self.rect.left, self.rect.top])
self.mask = pygame.mask.from_surface(self.img)
|
daniel-yaoyuan/paperplane
|
src/hero.py
|
hero.py
|
py
| 4,088 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23138969943
|
import argparse
from random import sample
def load_data(fr_file,fw_file):
all_users = []
all_moives = []
for lines in fr_file:
if lines.startswith('i'):
all_moives.append(lines.replace('\n',''))
if lines.startswith('u'):
all_users.append(lines.replace('\n',''))
for users in all_users:
item_candidate = sample(all_moives,300)
for items in item_candidate:
line = users +','+ items + '\n'
fw_file.write(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=''' mine all paths''')
parser.add_argument('--all_nodes', type=str, dest='all_nodes', default='data/all_nodes.txt')
parser.add_argument('--candidate_user_items', type=str, dest='candidate_user_items', default='data/candidate_user_items.txt')
parsed_args = parser.parse_args()
all_nodes = parsed_args.all_nodes
candidate_user_items = parsed_args.candidate_user_items
fr_file = open(all_nodes, 'r')
fw_file = open(candidate_user_items,'w')
load_data(fr_file,fw_file)
fr_file.close()
fw_file.close()
|
55TFSI/RKGE
|
all_paths.py
|
all_paths.py
|
py
| 1,138 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10909511760
|
from datetime import datetime
from settings import ORDER_TTL, TCS_ACCOUNT_ID
from tinkoff.invest import OrderDirection, OrderType
from tinkoff.invest.schemas import StopOrderDirection as SODir
from tinkoff.invest.schemas import StopOrderExpirationType as SType
from tinkoff.invest.schemas import StopOrderType as SOType
from tools.utils import delta_minutes_to_utc
class OrderAdapter:
DIRECTIONS = {
'sell': OrderDirection.ORDER_DIRECTION_SELL,
'buy': OrderDirection.ORDER_DIRECTION_BUY,
}
ORDER_TYPES = {
'market': OrderType.ORDER_TYPE_MARKET,
'limit': OrderType.ORDER_TYPE_LIMIT,
}
def __init__(self, asset, order_type: str) -> None:
self._asset = asset
self._order_type = order_type
@property
def order_params(self):
params = {
'account_id': TCS_ACCOUNT_ID,
'order_type': self.ORDER_TYPES[self._order_type],
'order_id': str(datetime.utcnow().timestamp()),
'figi': self._asset.figi,
'quantity': self._asset.get_lots(self._asset.next_order_amount),
}
params['direction'] = (
self.DIRECTIONS['sell']
if self._asset.sell
else self.DIRECTIONS['buy']
)
if self._order_type == 'limit':
params['price'] = self._asset.price
return params
class StopOrderAdapter:
ORDER_TYPES = {
'stop_loss': SOType.STOP_ORDER_TYPE_STOP_LOSS,
'take_profit': SOType.STOP_ORDER_TYPE_TAKE_PROFIT,
'stop_limit': SOType.STOP_ORDER_TYPE_STOP_LIMIT,
}
EXPIRATION_TYPES = {
'gtd': SType.STOP_ORDER_EXPIRATION_TYPE_GOOD_TILL_DATE,
'gtc': SType.STOP_ORDER_EXPIRATION_TYPE_GOOD_TILL_CANCEL,
}
DIRECTIONS = {
'sell': SODir.STOP_ORDER_DIRECTION_SELL,
'buy': SODir.STOP_ORDER_DIRECTION_BUY,
}
def __init__(self, stop_order):
self._asset = stop_order.asset
self._price = self._asset.get_correct_price(stop_order.price)
self._params = {
'figi': self._asset.figi,
'price': self._price,
'stop_price': self._price,
'quantity': self._asset.get_lots(
int(stop_order.sum / stop_order.price)
),
'account_id': TCS_ACCOUNT_ID,
'direction': self.DIRECTIONS[stop_order.params.direction],
'stop_order_type': self.ORDER_TYPES[stop_order.params.stop_type],
'expiration_type': self.EXPIRATION_TYPES[
stop_order.params.expiration
],
}
if stop_order.params.expiration == 'gtd':
self._params['expire_date'] = delta_minutes_to_utc(ORDER_TTL)
@property
def order_params(self):
return self._params
class SpreadToJsonAdapter:
def __init__(self, spread) -> None:
self._spread = spread
@property
def output(self):
return {
'far_leg': {
'executed': self._spread.far_leg.executed,
'avg_exec_price': str(self._spread.far_leg.avg_exec_price),
},
'near_leg': {
'executed': self._spread.near_leg.executed,
'avg_exec_price': str(self._spread.near_leg.avg_exec_price),
},
}
class SellBuyToJsonAdapter:
def __init__(self, sellbuy) -> None:
self._sellbuy = sellbuy
@property
def output(self):
return ({
'executed': self._sellbuy.executed,
'avg_exec_price': str(self._sellbuy.avg_exec_price),
})
|
holohup/trademan-1.0-alpha-public
|
bot/tools/adapters.py
|
adapters.py
|
py
| 3,580 |
python
|
en
|
code
| 2 |
github-code
|
6
|
27597631498
|
import math
def coinSums(coins, target):
coins.sort(reverse=True)
if(len(coins) == 1):
if(target % coins[0] == 0):
return 1
else:
return 0
c = coins[0]
del coins[0]
newCoinSum = 0
for i in range(0, math.floor(target/c)+1):
newtarget = target-i*c
newCoinSum += coinSums(list(coins), newtarget)
return newCoinSum
print(coinSums([1, 2, 5, 10, 20, 50, 100, 200], 200))
|
AbhishekVangipuram/ProjectEuler
|
031.py
|
031.py
|
py
| 454 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75112397948
|
from flask import Flask, render_template, request, redirect, url_for
import requests
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def index():
try:
nom = request.form['NOM']
prenom = request.form['PRENOM']
email = request.form['EMAIL']
return redirect(url_for(".test", nom=nom, prenom=prenom, email=email))
except:
return render_template("index.html", linkbr="/brnews", linkstw="/stwnews", linkmap="/map", linkbanners="/banners", linkgetplayer="/getplayer")
@app.route("/getplayer", methods=["GET", "POST"])
def getplayer():
try:
name = request.form['name']
type = request.form.get('checkbox')
print(type)
inttype = int(type)
if inttype == 1:
accounttype = "epic"
return redirect(url_for(".playerstats", name=name, type=accounttype))
elif inttype == 2:
accounttype = "psn"
return redirect(url_for(".playerstats", name=name, type=accounttype))
elif inttype == 3:
accounttype = "xbl"
return redirect(url_for(".playerstats", name=name, type=accounttype))
except:
return render_template("getplayer.html")
@app.route("/playerstats", methods=["GET", "POST"])
def playerstats():
try:
name = request.args['name']
accounttype = request.args['type']
url = 'https://fortnite-api.com/v2/stats/br/v2'
headers = {
'Authorization': 'd1341b3c-4723-4ff6-a667-153f6c9f238d'
}
params = {
'name': name,
'accountType': accounttype
}
rep = requests.get(url, headers=headers, params=params)
jsonn = rep.json()
all = jsonn['data']['stats']['all']
minutesPlayed = all['overall']["minutesPlayed"]
hoursPlayed = minutesPlayed / 60
daysPlayed = hoursPlayed / 24
idcount = jsonn["data"]
# with open("fooddata.json", "w", encoding='utf-8') as jsonfile:
# json.dump(jsonn, jsonfile, ensure_ascii=False, indent= 4)
return render_template("playerstats.html", idcount=idcount, name=name, all=all, solo=jsonn['data']['stats']['all']['trio'] , hoursPlayed=round(hoursPlayed, 1), daysPlayed=round(daysPlayed, 1), battlePass=jsonn['data']["battlePass"])
except:
return render_template("errorplayerstats.html", linkgetplayer="/getplayer")
@app.route("/test", methods=["GET", "POST"])
def test():
nom = request.args['nom']
prenom = request.args['prenom']
email = request.args['email']
return render_template("test.html", nom=nom, prenom=prenom, email=email)
@app.route("/map")
def map():
url = 'https://fortnite-api.com/v1/map'
params = {
'language': 'fr'
}
rep = requests.get(url, params=params)
jsonn = rep.json()
return render_template("map.html", link_image=jsonn['data']['images']['pois'])
@app.route("/banners")
def banners():
url = 'https://fortnite-api.com/v1/banners'
params = {
'language': 'fr'
}
rep = requests.get(url, params=params)
jsonn = rep.json()
# embedvar = discord.Embed(title=jsonn["data"][r]["name"], description=f"De : {jsonn['data'][r]['devName']}",
# color=0x00ff00)
# embedvar.add_field(name="Catรฉgorie : ", value=jsonn['data'][r]['category'])
# embedvar.set_image(url=jsonn["data"][r]["images"]["icon"])
return render_template("banner.html", data=jsonn["data"])
@app.route("/stwnews")
def stwNews():
url = 'https://fortnite-api.com/v2/news/stw'
params = {
'language': 'fr'
}
rep = requests.get(url, params=params)
jsonn = rep.json()
# with open('stw.json', encoding='utf-8') as mon_fichier:
# jsonn = json.load(mon_fichier)
return render_template("stwnews.html", data=jsonn["data"]["messages"], len_data=len(jsonn["data"]["messages"]))
@app.route("/brnews")
def brNews():
url = 'https://fortnite-api.com/v2/news/br'
params = {
'language': 'fr'
}
rep = requests.get(url, params=params)
jsonn = rep.json()
# with open('example.json', encoding='utf-8') as mon_fichier:
# jsonn = json.load(mon_fichier)
return render_template("brnews.html", data=jsonn["data"]["motds"], len_data=len(jsonn["data"]["motds"]))
if __name__ == "__main__":
app.run(debug=True)
|
Foodjubi/fortnite-news
|
app.py
|
app.py
|
py
| 4,379 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34528771620
|
# https://medium.com/javarevisited/the-ultimate-guide-to-binary-trees-47112269e6fc
# There are two ways check both
#udacity course way
class Node(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BinaryTree(object):
def __init__(self, root):
self.root = Node(root)
def search(self, find_val):
"""Return True if the value
is in the tree, return
False otherwise."""
return self.preorder_search(tree.root, find_val)
def print_tree(self):
"""Print out all tree nodes
as they are visited in
a pre-order traversal."""
return self.preorder_print(tree.root, "")[:-1]
def preorder_search(self, start, find_val):
"""Helper method - use this to create a
recursive search solution."""
if start:
if start.value == find_val:
return True
else:
return self.preorder_search(start.left, find_val) or self.preorder_search(start.right, find_val)
return False
def preorder_print(self, start, traversal):
"""Helper method - use this to create a
recursive print solution."""
if start:
traversal += (str(start.value) + "-")
traversal = self.preorder_print(start.left, traversal)
traversal = self.preorder_print(start.right, traversal)
return traversal
# Set up tree
tree = BinaryTree(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
# Test search
# Should be True
print( tree.search(4))
# Should be False
print( tree.search(6))
# Test print(_tree
# Should be 1-2-4-5-3
print( tree.print_tree())
############## Grokking teh coding interview method #######################
from collections import deque
class TreeNode():
def __init__(self,val):
self.val =val
self.left, self.right = None, None
def traverse(root):
result = []
if root is None:
return result
queue = deque()
queue.append(root)
while queue:
levelSize= len(queue)
currentLevel = []
for _ in range(levelSize):
currentNode = queue.popleft()
#add the node to teh current level
currentLevel.append(currentNode.val)
#insert children of current node in queue
if currentNode.left :
queue.append(currentNode.left)
if currentNode.right :
queue.append(currentNode.right)
result.append(currentLevel)
return result
# Time complexity #
# The time complexity of the above algorithm is O(N)O(N), where โNโ is the total number of nodes in the tree. This is due to the fact that we traverse each node once.
# Space complexity #
# The space complexity of the above algorithm will be O(N)O(N) as we need to return a list containing the level order traversal. We will also need O(N)O(N) space for the queue. Since we can have a maximum of N/2N/2 nodes at any level (this could happen only at the lowest level), therefore we will need O(N)O(N) space to store them in the queue.
if __name__ == "__main__":
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(8)
root.left.left = TreeNode(9)
root.right.left = TreeNode(10)
root.right.right = TreeNode(11)
print("Level order traversal of binary tree is :\n", str(traverse(root)))
|
ved93/PythonPractice
|
data-strutures/binary_tree.py
|
binary_tree.py
|
py
| 3,507 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37366648878
|
import configparser
working_dir_list = ['./examples/test-ex1-50d/', './examples/test-ex2']
task_name_list = ['example 1', 'example 2']
task_id = 1
conjugated_eigvec_flag = 0
with_FVD_solution = False
#with_FVD_solution = True
working_dir_name = working_dir_list[task_id]
task_name = task_name_list[task_id]
# read parameters from config file
config = configparser.ConfigParser()
config.read_file(open('../%s/params.cfg' % working_dir_name))
md_flag = config['default'].getboolean('md_data_flag')
num_k = config['Training'].getint('eig_k')
eig_file_name_prefix = config['default'].get('eig_file_name_prefix')
log_filename = config['default'].get('log_filename')
if md_flag:
data_filename_prefix = config['MD'].get('data_filename_prefix')
data_filename_prefix_validation = config['MD'].get('data_filename_prefix_validation')
else :
dim = config['SDE'].getint('dim')
data_filename_prefix = config['SDE'].get('data_filename_prefix')
|
zwpku/EigenPDE-NN
|
plot_scripts/common.py
|
common.py
|
py
| 954 |
python
|
en
|
code
| 3 |
github-code
|
6
|
34042177473
|
import datetime
import logging
from django.contrib import auth
from django.http import HttpResponseRedirect, HttpResponseNotFound
from django.utils.translation import check_for_language
from django.shortcuts import render
from blueapps.account.components.bk_token.forms import AuthenticationForm
from gcloud.core.signals import user_enter
from gcloud.conf import settings
logger = logging.getLogger("root")
def page_not_found(request, exception):
if request.path.startswith(settings.STATIC_URL):
return HttpResponseNotFound()
user = _user_authenticate(request)
# ๆช็ปๅฝ้ๅฎๅๅฐ้ฆ้กต๏ผ่ทณๅฐ็ปๅฝ้กต้ข
if not user:
return HttpResponseRedirect(
settings.SITE_URL + "?{}={}".format(settings.PAGE_NOT_FOUND_URL_KEY, request.build_absolute_uri())
)
request.user = user
# not home url enter
user_enter.send(username=user.username, sender=user.username)
return render(request, "core/base_vue.html", {})
def _user_authenticate(request):
# ๅ
ๅๆฐๆฎๆธ
ๆดๅๆง่ก้ป่พ
form = AuthenticationForm(request.COOKIES)
if not form.is_valid():
return None
bk_token = form.cleaned_data["bk_token"]
# ็กฎ่ฎค cookie ไธญ็ bk_token ๅ session ไธญ็ๆฏๅฆไธ่ด
# ๅฆๆ็ปๅบๅ ้ค cookie ๅ session ๅญๅจ is_match ไธบFalse
is_match = bk_token == request.session.get("bk_token")
if is_match and request.user.is_authenticated:
return request.user
user = auth.authenticate(request=request, bk_token=bk_token)
if user:
# ็ปๅฝๆๅ๏ผ่ฎฐๅฝ user ไฟกๆฏ
auth.login(request, user)
request.session["bk_token"] = bk_token
return user
def home(request):
try:
username = request.user.username
# home url enter
user_enter.send(username=username, sender=username)
except Exception:
logger.exception("user_enter signal send failed.")
return render(request, "core/base_vue.html")
def set_language(request):
request_params = getattr(request, request.method)
next_url = request_params.get("next", None) or request.META.get("HTTP_REFERER", "/")
response = HttpResponseRedirect(next_url)
if request.method == "GET":
lang_code = request.GET.get("language", None)
if lang_code and check_for_language(lang_code):
if hasattr(request, "session"):
request.session["blueking_language"] = lang_code
max_age = 60 * 60 * 24 * 365
expires = datetime.datetime.strftime(
datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age), "%a, %d-%b-%Y %H:%M:%S GMT",
)
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code, max_age, expires)
return response
|
caiyj/bk-sops
|
gcloud/core/views.py
|
views.py
|
py
| 2,776 |
python
|
en
|
code
| null |
github-code
|
6
|
38870006486
|
import gym
import tensorflow as tf
from tensorflow import keras
import random
import numpy as np
import datetime as dt
import imageio
import os
#
# conda activate tf
# export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib/
# conda install -c conda-forge cudatoolkit=11.2 cudnn=8.1.0
# o pip install tensorflow
# pip install tensorflow-gpu
# o pip install gym
# o pip install gym[atari]
# o pip install autorom[accept-rom-license]
# pip install keras
# pip install keras-rl2
# o pip install imageio
# https://adventuresinmachinelearning.com/atari-space-invaders-dueling-q/
#
# uses PERSI to make training more efficient (take this out?)
# Fix for weird thing
# https://stackoverflow.com/questions/68614547/tensorflow-libdevice-not-found-why-is-it-not-found-in-the-searched-path
# export XLA_FLAGS=--xla_gpu_cuda_data_dir=/home/kali/.local/lib/python3.8/site-packages/jaxlib/cuda
#Use xming X server for windows
#run using
#echo "export DISPLAY=localhost:0.0" >> ~/.bashrc
#. ~/.bashrc
# export DISPLAY=[IP]:0.0
STORE_PATH = "tensorboard" # Path to where tensorboard logs are stored
MAX_EPSILON = 1 # Maximum probability of choosing a random action in epsilon-greedy algorithm
MIN_EPSILON = 0.1 # Minimum probability of choosing a random action in epsilon-greedy algorithm
EPSILON_MIN_ITER = 500000 # Number of iterations after which epsilon will have decreased from MAX_EPSILON to MIN_EPSILON
GAMMA = 0.99 # Discount factor in reinforcement learning
BATCH_SIZE = 32 # Number of samples used in each iteration of training
TAU = 0.08 # Hyperparameter for soft updating of the target network
POST_PROCESS_IMAGE_SIZE = (105, 80, 1) # Size of processed images used as input to the neural network
DELAY_TRAINING = 50000 # Number of time steps to wait before starting training
BETA_DECAY_ITERS = 500000 # Number of iterations after which beta will have decayed from MAX_BETA to MIN_BETA
MIN_BETA = 0.4 # Minimum value of beta parameter
MAX_BETA = 1.0 # Maximum value of beta parameter
NUM_FRAMES = 4 # Number of frames stacked together as input to the neural network
GIF_RECORDING_FREQ = 100 # Frequency with which GIFs are recorded during training
MODEL_SAVE_FREQ = 100 # Frequency with which the trained model is saved
# Create an environment for the Space Invaders game, using the RGB array render mode
env = gym.make("SpaceInvaders-v0", render_mode="rgb_array")
# Get the number of possible actions in the game
num_actions = env.action_space.n
class DQModel(keras.Model):
def __init__(self, hidden_size: int, num_actions: int, dueling: bool):
# Initialize the model using the parent's constructor
super(DQModel, self).__init__()
# Save whether the model uses the dueling architecture
self.dueling = dueling
# Create the first convolutional layer with 16 filters, each of size 8x8, using a stride of 4
self.conv1 = keras.layers.Conv2D(16, (8, 8), (4, 4), activation='relu')
# Create the second convolutional layer with 32 filters, each of size 4x4, using a stride of 2
self.conv2 = keras.layers.Conv2D(32, (4, 4), (2, 2), activation='relu')
# Create a flatten layer to flatten the output of the second convolutional layer
self.flatten = keras.layers.Flatten()
# Create a dense layer with the specified hidden size, using the He normal kernel initializer
self.adv_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
# Create a dense layer with the specified number of actions, using the He normal kernel initializer
self.adv_out = keras.layers.Dense(num_actions,
kernel_initializer=keras.initializers.he_normal())
# If the model uses the dueling architecture
if dueling:
# Create a dense layer with the specified hidden size, using the He normal kernel initializer
self.v_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
# Create a dense layer with a single output, using the He normal kernel initializer
self.v_out = keras.layers.Dense(1, kernel_initializer=keras.initializers.he_normal())
# Create a lambda layer to subtract the mean from the outputs of the advantage layer
self.lambda_layer = keras.layers.Lambda(lambda x: x - tf.reduce_mean(x))
# Create an Add layer to combine the value and advantage outputs
self.combine = keras.layers.Add()
# Define the forward pass of the model
def call(self, input):
# Pass the input through the first convolutional layer and apply ReLU activation
x = self.conv1(input)
# Pass the output of the first convolutional layer through the second convolutional layer and apply ReLU activation
x = self.conv2(x)
# Flatten the output of the second convolutional layer
x = self.flatten(x)
# Pass the output of the flatten layer through the advantage dense layer and apply ReLU activation
adv = self.adv_dense(x)
# Pass the output of the advantage dense layer through the advantage output layer
adv = self.adv_out(adv)
# If the model uses the dueling architecture
if self.dueling:
# Pass the output of the flatten layer through the value dense layer and apply ReLU activation
v = self.v_dense(x)
# Pass the output of the value dense layer through the value output layer
v = self.v_out(v)
# Pass the output of the advantage output layer through the lambda layer to subtract the mean
norm_adv = self.lambda_layer(adv)
# Pass the value and advantage outputs through the Add layer to combine them
combined = self.combine([v, norm_adv])
# Return the combined output
return combined
# If the model doesn't use the dueling architecture, return the advantage output
return adv
def huber_loss(loss):
return 0.5 * loss ** 2 if abs(loss) < 1.0 else abs(loss) - 0.5
# The Huber loss function is a loss function that is more robust
# than the mean squared error loss function. It is defined as the
# mean squared error loss function for small values of the error,
# but becomes a mean absolute error loss function for larger values of the error.
# This makes it more resilient to the effects of outliers, since the loss for these points
# is not squared and therefore not disproportionately large compared to the rest of the data.
# Was experimenting with this, but tf.keras.losses.Huber() is more efficient.
primary_network = DQModel(256, num_actions, True)
target_network = DQModel(256, num_actions, True)
# each model has 256 hidden units.
primary_network.compile(optimizer=keras.optimizers.Adam(), loss=tf.keras.losses.Huber())
# make target_network = primary_network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(e)
class Node:
def __init__(self, left, right, is_leaf: bool = False, idx = None):
self.left = left
self.right = right
self.is_leaf = is_leaf
self.value = sum(n.value for n in (left, right) if n is not None)
self.parent = None
self.idx = idx # this value is only set for leaf nodes
if left is not None:
left.parent = self
if right is not None:
right.parent = self
@classmethod
def create_leaf(cls, value, idx):
leaf = cls(None, None, is_leaf=True, idx=idx)
leaf.value = value
return leaf
# This code defines a basic class for a Node in a tree data structure.
# The Node class has several attributes, including left and right for
# the left and right child nodes, is_leaf for whether the node is a leaf node,
# value for the value of the node, parent for the parent node, and idx for the index of the node.
# The __init__ method is used to initialize a new Node object, and takes several arguments
# including left, right, is_leaf, and idx. The value attribute is set to the sum of
# the values of the left and right child nodes, and the parent attributes of the left and
# right child nodes are set to the new Node object. The create_leaf class method can be used
# to create a new leaf Node with a given value and index.
def create_tree(input: list):
nodes = [Node.create_leaf(v, i) for i, v in enumerate(input)]
leaf_nodes = nodes
while len(nodes) > 1:
inodes = iter(nodes)
nodes = [Node(*pair) for pair in zip(inodes, inodes)]
return nodes[0], leaf_nodes
# This code defines a method to create a tree of nodes
def retrieve(value: float, node: Node):
if node.is_leaf:
return node
if node.left.value >= value:
return retrieve(value, node.left)
else:
return retrieve(value - node.left.value, node.right)
# This code defines a method to create a tree of nodes
def update(node: Node, new_value: float):
change = new_value - node.value
node.value = new_value
propagate_changes(change, node.parent)
def propagate_changes(change: float, node: Node):
node.value += change
if node.parent is not None:
propagate_changes(change, node.parent)
class Memory(object):
def __init__(self, size: int):
self.size = size
self.curr_write_idx = 0
self.available_samples = 0
self.buffer = [(np.zeros((POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1]), dtype=np.float32), 0.0, 0.0, 0.0) for i in range(self.size)]
self.base_node, self.leaf_nodes = create_tree([0 for i in range(self.size)])
self.frame_idx = 0
self.action_idx = 1
self.reward_idx = 2
self.terminal_idx = 3
self.beta = 0.4
self.alpha = 0.6
self.min_priority = 0.01
def append(self, experience: tuple, priority: float):
self.buffer[self.curr_write_idx] = experience
self.update(self.curr_write_idx, priority)
self.curr_write_idx += 1
# reset the current writer position index if creater than the allowed size
if self.curr_write_idx >= self.size:
self.curr_write_idx = 0
# max out available samples at the memory buffer size
if self.available_samples + 1 < self.size:
self.available_samples += 1
else:
self.available_samples = self.size - 1
def update(self, idx: int, priority: float):
update(self.leaf_nodes[idx], self.adjust_priority(priority))
def adjust_priority(self, priority: float):
return np.power(priority + self.min_priority, self.alpha)
def sample(self, num_samples: int):
sampled_idxs = []
is_weights = []
sample_no = 0
while sample_no < num_samples:
sample_val = np.random.uniform(0, self.base_node.value)
samp_node = retrieve(sample_val, self.base_node)
if NUM_FRAMES - 1 < samp_node.idx < self.available_samples - 1:
sampled_idxs.append(samp_node.idx)
p = samp_node.value / self.base_node.value
is_weights.append((self.available_samples + 1) * p)
sample_no += 1
# apply the beta factor and normalise so that the maximum is_weight < 1
is_weights = np.array(is_weights)
is_weights = np.power(is_weights, -self.beta)
is_weights = is_weights / np.max(is_weights)
# now load up the state and next state variables according to sampled idxs
states = np.zeros((num_samples, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
next_states = np.zeros((num_samples, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
actions, rewards, terminal = [], [], []
for i, idx in enumerate(sampled_idxs):
for j in range(NUM_FRAMES):
states[i, :, :, j] = self.buffer[idx + j - NUM_FRAMES + 1][self.frame_idx][:, :, 0]
next_states[i, :, :, j] = self.buffer[idx + j - NUM_FRAMES + 2][self.frame_idx][:, :, 0]
actions.append(self.buffer[idx][self.action_idx])
rewards.append(self.buffer[idx][self.reward_idx])
terminal.append(self.buffer[idx][self.terminal_idx])
return states, np.array(actions), np.array(rewards), next_states, np.array(terminal), sampled_idxs, is_weights
# The Memory class is used to store past experiences from the environment in a replay buffer,
# which is then used to train the reinforcement learning model. The Memory class uses a priority
# queue implemented as a sum tree data structure to prioritize experiences in the replay buffer
# according to their importance, with more important experiences being more likely to be sampled for training.
memory = Memory(200000)
# preprocesses an image to be inputted into the network
def image_preprocess(image, new_size=(105, 80)):
# convert to greyscale, resize and normalize the image
# image = image[0]
#print(image)
image = tf.image.rgb_to_grayscale(image)
image = tf.image.resize(image, new_size)
image = image / 255
return image
# chooses an action (epsilon greedy function)
def choose_action(state, primary_network, eps, step):
if step < DELAY_TRAINING:
return random.randint(0, num_actions - 1)
else:
if random.random() < eps:
return random.randint(0, num_actions - 1)
else:
return np.argmax(primary_network(tf.reshape(state, (1, POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)).numpy()))
# Updates from primary network
def update_network(primary_network, target_network):
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(t * (1 - TAU) + e * TAU)
# Processes the state stack.
def process_state_stack(state_stack, state):
for i in range(1, state_stack.shape[-1]):
state_stack[:, :, i - 1].assign(state_stack[:, :, i])
state_stack[:, :, -1].assign(state[:, :, 0])
return state_stack
# Records a gif replay of the entire game using imageio.
def record_gif(frame_list, episode, fps=50):
if(len(frame_list) > 50):
imageio.mimsave(STORE_PATH + "\\SPACE_INVADERS_EPISODE-eps{}-r{}.gif".format(episode, reward), frame_list, fps=fps) #duration=duration_per_frame)ation_per_frame)
def get_per_error(states, actions, rewards, next_states, terminal, primary_network, target_network):
# predict Q(s,a) given the batch of states
prim_qt = primary_network(states)
# predict Q(s',a') from the evaluation network
prim_qtp1 = primary_network(next_states)
# copy the prim_qt tensor into the target_q tensor - we then will update one index corresponding to the max action
target_q = prim_qt.numpy()
# the action selection from the primary / online network
prim_action_tp1 = np.argmax(prim_qtp1.numpy(), axis=1)
# the q value for the prim_action_tp1 from the target network
q_from_target = target_network(next_states)
updates = rewards + (1 - terminal) * GAMMA * q_from_target.numpy()[:, prim_action_tp1]
target_q[:, actions] = updates
# calculate the loss / error to update priorites
error = [huber_loss(target_q[i, actions[i]] - prim_qt.numpy()[i, actions[i]]) for i in range(states.shape[0])]
return target_q, error
def train(primary_network, memory, target_network):
states, actions, rewards, next_states, terminal, idxs, is_weights = memory.sample(BATCH_SIZE)
target_q, error = get_per_error(states, actions, rewards, next_states, terminal, primary_network, target_network)
for i in range(len(idxs)):
memory.update(idxs[i], error[i])
loss = primary_network.train_on_batch(states, target_q, is_weights)
return loss
num_episodes = 1501
# In practice, model weights are saved as multiples of 100. Therefore, set num_episodes to be a multiple of 100 + 1 (0 counts as an episode)
eps = MAX_EPSILON
render = False # If true, will show bot working in real time. Set false to save on graphics power.
train_writer = tf.summary.create_file_writer(STORE_PATH + "/DuelingQPERSI_{}".format(dt.datetime.now().strftime('%d%m%Y%H%M')))
steps = 0
for i in range(num_episodes):
state = env.reset()
state = image_preprocess(state[0])
state_stack = tf.Variable(np.repeat(state.numpy(), NUM_FRAMES).reshape((POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1],
NUM_FRAMES)))
cnt = 1
avg_loss = 0
tot_reward = 0
if i % GIF_RECORDING_FREQ == 0:
frame_list = []
while True:
if render:
env.render()
action = choose_action(state_stack, primary_network, eps, steps)
next_state, reward, terminated, truncated, info = env.step(action)
done = terminated or truncated
tot_reward += reward
if i % GIF_RECORDING_FREQ == 0:
frame_list.append(tf.cast(tf.image.resize(next_state, (480, 320)), tf.uint8).numpy())
next_state = image_preprocess(next_state)
old_state_stack = state_stack
state_stack = process_state_stack(state_stack, next_state)
if steps > DELAY_TRAINING:
loss = train(primary_network, memory, target_network)
update_network(primary_network, target_network)
_, error = get_per_error(tf.reshape(old_state_stack, (1, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)), np.array([action]), np.array([reward]), tf.reshape(state_stack, (1, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)), np.array([done]), primary_network, target_network)
# store in memory
memory.append((next_state, action, reward, done), error[0])
else:
loss = -1
# store in memory - default the priority to the reward
memory.append((next_state, action, reward, done), reward)
avg_loss += loss
# linearly decay the eps and PER beta values
if steps > DELAY_TRAINING:
eps = MAX_EPSILON - ((steps - DELAY_TRAINING) / EPSILON_MIN_ITER) * \
(MAX_EPSILON - MIN_EPSILON) if steps < EPSILON_MIN_ITER else \
MIN_EPSILON
beta = MIN_BETA + ((steps - DELAY_TRAINING) / BETA_DECAY_ITERS) * \
(MAX_BETA - MIN_BETA) if steps < BETA_DECAY_ITERS else \
MAX_BETA
memory.beta = beta
steps += 1
if done:
if steps > DELAY_TRAINING:
avg_loss /= cnt
print("Episode: {}, Reward: {}, avg loss: {:.5f}, eps: {:.3f}".format(i, tot_reward, avg_loss, eps))
with train_writer.as_default():
tf.summary.scalar('reward', tot_reward, step=i)
tf.summary.scalar('avg loss', avg_loss, step=i)
else:
print("Pre-training...Episode: {}".format(i))
if i % GIF_RECORDING_FREQ == 0:
record_gif(frame_list, i, tot_reward)
break
cnt += 1
if i % MODEL_SAVE_FREQ == 0: # and i != 0:
primary_network.save_weights(STORE_PATH + "/checkpoints/cp_primary_network_episode_{}.ckpt".format(i))
target_network.save_weights(STORE_PATH + "/checkpoints/cp_target_network_episode_{}.ckpt".format(i))
#primary_network
#target_network
#primary_network = DQModel(256, num_actions, True)
#target_network = DQModel(256, num_actions, True)
# primary_network.load_weights(STORE_PATH + "/checkpoints/cp_primary_network_episode_1000.ckpt")
# target_network.load_weights(STORE_PATH + "/checkpoints/cp_target_network_episode_1000.ckpt")
# env = gym.make("SpaceInvaders-v0", render_mode="human")
# render = True
# for i in range(1):
# state = env.reset()
# state = image_preprocess(state[0])
# state_stack = tf.Variable(np.repeat(state.numpy(), NUM_FRAMES).reshape((POST_PROCESS_IMAGE_SIZE[0],
# POST_PROCESS_IMAGE_SIZE[1],
# NUM_FRAMES)))
# cnt = 1
# avg_loss = 0
# tot_reward = 0
# if i % GIF_RECORDING_FREQ == 0:
# frame_list = []
# while True:
# if render:
# env.render()
# action = choose_action(state_stack, primary_network, 0, 51000) # guarantees primary network is chosen
# next_state, reward, terminated, truncated, info = env.step(action)
# done = terminated or truncated
# tot_reward += reward
# #if i % GIF_RECORDING_FREQ == 0:
# # frame_list.append(tf.cast(tf.image.resize(next_state, (480, 320)), tf.uint8).numpy())
# next_state = image_preprocess(next_state)
# old_state_stack = state_stack
# state_stack = process_state_stack(state_stack, next_state)
|
nhovadia/CSCI4830_final_project
|
SpaceInvaders_Training.py
|
SpaceInvaders_Training.py
|
py
| 21,331 |
python
|
en
|
code
| 0 |
github-code
|
6
|
410815561
|
from typing import Tuple
import jax
import jax.numpy as jnp
import jax.scipy.linalg as linalg
from numpy.typing import ArrayLike
def transition_function(F: jnp.array, u: jnp.array, L: jnp.array, h: float, n_linspace=10000) -> Tuple[
ArrayLike, ArrayLike,
ArrayLike]:
r"""
A prior of the form
\mathrm{d}X(t) = (FX(t) + u)\mathrm{d}t + L \mathrm{d}W_t,
has the following strong solution:
X(t+h) = \exp{Fh}(X(t) + \int_0^h \exp{-Fs}L \mathrm{d}W_s),
where
X(t+h) \mid X(t) ~ \mathcal{N}(A(h)X(t) + \xi(h), Q(h)).
----------------------------
Return \xi(h), Q(h), A(h).
"""
linspace = jnp.linspace(0, h, n_linspace)
A = linalg.expm(F * h)
@jax.vmap
def integrand_xi(s):
return linalg.expm(F * s) @ u
integrand_xi_values = integrand_xi(linspace)
xi = jnp.trapz(integrand_xi_values, linspace, axis=0)
@jax.vmap
def integrand_Q(s):
B = linalg.expm(F * s) @ L
return B @ B.T
integrand_Q_values = integrand_Q(linspace)
Q = jnp.trapz(integrand_Q_values, linspace, axis=0)
return xi, Q, A
|
hallelujahylefay/bayesianSDEsolver
|
bayesian_sde_solver/ode_solvers/probnum/transition_function.py
|
transition_function.py
|
py
| 1,114 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17689667482
|
import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
no_resBlocks = 16
HR_shape = 96
train_data_path = '../data/train'
val_data_path = '../data/val'
advLossFactor = 0.001
VGGLossFactor = 0.006
mse_lr = 0.0001
mse_epochs = 700
initial_lr = 0.0001
second_lr = 0.00001
gan_epochs = 140
batch_size = 16
images_to_eval = 10
no_workers = 8
|
abed11326/Training-a-Super-Resolution-GAN-for-4x-image-upscaling
|
hypParam.py
|
hypParam.py
|
py
| 365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5390277363
|
class Node:
def __init__(self, val):
self.val = val
self.next = None
root = Node(10)
tempNode = root
import numpy as np
for i in np.random.randint(0,100,[10]):
tempNode.next = Node(i)
tempNode = tempNode.next
print(i)
stack = []
print("")
while root:
stack.append(root)
root = root.next
while stack:
res = stack.pop()
print(res.val)
|
JarvisFei/leetcode
|
ๅๆofferไปฃ็ /ๆฐๆฎ็ปๆ/้ข่ฏ้ข6:ไปๅคดๅฐๅฐพๆๅฐ้พ่กจ.py
|
้ข่ฏ้ข6:ไปๅคดๅฐๅฐพๆๅฐ้พ่กจ.py
|
py
| 388 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34658203548
|
from math import exp
def Newton2(f, dfdx, x0, max_it=20, tol= 1e-3):
f0 = f(x0)
iter = 0
while abs(f0) > tol and iter < max_it:
x1 = x0 - f0/dfdx(x0)
x0 = x1
f0 = f(x0)
iter += 1
converged = iter < max_it
return x0, converged, iter
#call the method for f(x)= x**2-4*x+exp(-x)
f = lambda x: x**2-4*x+exp(-x)
dfdx = lambda x: 2*x-4-exp(-x)
sol, converged, iter = Newton2(f,dfdx,0,tol=1e-3)
if converged:
print(f'Newtons method converged in {iter} iterations')
print(f'The approximate root is {sol:g}')
else:
print(f'The method did not converge')
|
sundnes/python_intro
|
docs/src/chapter4/Newton2.py
|
Newton2.py
|
py
| 612 |
python
|
en
|
code
| 5 |
github-code
|
6
|
5503967408
|
# https://www.hackerrank.com/challenges/py-the-captains-room/problem
k = int(input())
array = list(map(int, input().split()))
frequencies = dict()
for element in array:
frequencies[element] = (frequencies[element] if element in frequencies else 0) + 1
for key in frequencies:
if frequencies[key] == 1:
print(key)
break
|
Nikit-370/HackerRank-Solution
|
Python/the-captains-room.py
|
the-captains-room.py
|
py
| 347 |
python
|
en
|
code
| 10 |
github-code
|
6
|
9174066290
|
load("@bazel_tools//tools/cpp:windows_cc_configure.bzl", "find_vc_path", "setup_vc_env_vars")
load("@bazel_tools//tools/cpp:cc_configure.bzl", "MSVC_ENVVARS")
# Keys: target architecture, as in <Windows-SDK-path>/<target-architecture>/bin/rc.exe
# Values: corresponding Bazel CPU value under @platforms//cpu:*
_TARGET_ARCH = {
"arm": "arm",
"arm64": "aarch64",
"x64": "x86_64",
"x86": "x86_32",
}
def _find_rc_exes(root):
result = {}
for a in _TARGET_ARCH:
exe = root.get_child(a).get_child("rc.exe")
if exe.exists:
result[a] = str(exe)
return result
def _find_all_rc_exe(repository_ctx):
if not repository_ctx.os.name.startswith("windows"):
return {}
vc = find_vc_path(repository_ctx)
if vc:
env = setup_vc_env_vars(
repository_ctx,
vc,
envvars = [
"WindowsSdkDir",
"WindowsSdkVerBinPath",
],
allow_empty = True,
escape = False,
)
# Try the versioned directory.
sdk = env.get("WindowsSdkVerBinPath")
if sdk:
archs = _find_rc_exes(repository_ctx.path(sdk))
if archs:
return archs
# Try the unversioned directory (typically Windows 8.1 SDK).
sdk = env.get("WindowsSdkDir")
if sdk:
archs = _find_rc_exes(repository_ctx.path(sdk).get_child("bin"))
if archs:
return archs
return {}
def _toolchain_defs(repository_ctx, rc_exes):
if not rc_exes:
return ""
result = ["""# Auto-generated by winsdk_configure.bzl
load(
"@io_bazel//src/main/res:winsdk_toolchain.bzl",
"WINDOWS_RESOURCE_COMPILER_TOOLCHAIN_TYPE",
"windows_resource_compiler_toolchain",
)"""]
for arch, rc_path in rc_exes.items():
wrapper = "rc_%s.bat" % arch
repository_ctx.file(
wrapper,
content = "@\"%s\" %%*" % rc_path,
executable = True,
)
result.append(
"""
windows_resource_compiler_toolchain(
name = "local_{arch}_tc",
rc_exe = "{wrapper}",
)
toolchain(
name = "local_{arch}",
exec_compatible_with = [
"@platforms//os:windows",
"@platforms//cpu:x86_64",
],
target_compatible_with = [
"@platforms//os:windows",
"@platforms//cpu:{cpu}",
],
toolchain = ":local_{arch}_tc",
toolchain_type = WINDOWS_RESOURCE_COMPILER_TOOLCHAIN_TYPE,
visibility = ["//visibility:public"],
)
""".format(
arch = arch,
wrapper = wrapper,
cpu = _TARGET_ARCH[arch],
),
)
return "\n".join(result)
def _toolchain_labels(repository_ctx, rc_exes):
tc_labels = [
"\"@{repo}//:local_{arch}\"".format(repo = repository_ctx.name, arch = arch)
for arch in rc_exes
]
if rc_exes:
body = "native.register_toolchains(%s)" % ", ".join(tc_labels)
else:
body = "pass"
return """# Auto-generated by winsdk_configure.bzl
def register_local_rc_exe_toolchains():
{body}
""".format(body = body)
def _impl(repository_ctx):
rc_exes = _find_all_rc_exe(repository_ctx)
repository_ctx.file(
"BUILD",
content = _toolchain_defs(repository_ctx, rc_exes),
executable = False,
)
repository_ctx.file(
"toolchains.bzl",
content = _toolchain_labels(repository_ctx, rc_exes),
executable = False,
)
winsdk_configure = repository_rule(
implementation = _impl,
local = True,
environ = list(MSVC_ENVVARS),
)
|
bazelbuild/bazel
|
src/main/res/winsdk_configure.bzl
|
winsdk_configure.bzl
|
bzl
| 3,643 |
python
|
en
|
code
| 21,632 |
github-code
|
6
|
35004221053
|
def solution(word):
seq = {'E': 1, 'I': 2, 'O': 3, 'U': 4}
res = 0
for i in range(len(word)):
char = word[i]
if char == 'A':
res += 1
continue
for j in range(4, i, -1):
res += (5 ** (j-i)) * seq[word[i]]
res += seq[word[i]] + 1
return res
|
Inflearn-everyday/study
|
SimEunJu/programmers/๋ชจ์์ฌ์ .py
|
๋ชจ์์ฌ์ .py
|
py
| 328 |
python
|
en
|
code
| 5 |
github-code
|
6
|
30341447680
|
count = 0
n = int(input())
for i in range(0,n):
a, b, c = input().split()
if int(a) + int(b) + int(c) >= 2:
count += 1
else:
continue
print(count)
|
rakbidb/meng-CP
|
Codeforces/Python/Problem A/Difficulty 800/Solved/231A-Team.py
|
231A-Team.py
|
py
| 177 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14712071511
|
from fastapi import APIRouter, Header
from fastapi.exceptions import HTTPException
from client import get_ccxt_client
router = APIRouter()
@router.get("/info/")
async def list_markets(x_connection_id: str = Header()):
try:
client = get_ccxt_client(x_connection_id)
return client.load_markets()
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.get("/info/{symbol:path}/")
async def retrieve_markets(symbol: str, x_connection_id: str = Header()):
try:
markets = await list_markets(x_connection_id)
if symbol not in markets:
raise HTTPException(status_code=400, detail=f"symbol {symbol} not found")
return markets[symbol]
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.post("/info/sync/")
async def sync_markets(x_connection_id: str = Header()):
try:
client = get_ccxt_client(x_connection_id)
return client.load_markets(reload=True)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.get("/ticker/")
async def list_ticker(x_connection_id: str = Header()):
try:
client = get_ccxt_client(x_connection_id)
return client.fetch_tickers()
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.get("/ticker/{symbol:path}/")
async def retrieve_ticker(symbol: str, x_connection_id: str = Header()):
try:
client = get_ccxt_client(x_connection_id)
return client.fetch_ticker(symbol=symbol)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
@router.get("/kline/{symbol:path}/interval/{interval:path}/")
async def retrieve_kline(symbol: str, interval: str, x_connection_id: str = Header()):
try:
client = get_ccxt_client(x_connection_id)
client_properties = client.describe()
if interval not in client_properties["timeframes"]:
raise HTTPException(status_code=400, detail="invalid value for interval")
return client.fetch_ohlcv(symbol=symbol, timeframe=interval)
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
|
masked-trader/raccoon-exchange-service
|
src/server/routes/market.py
|
market.py
|
py
| 2,242 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42435364768
|
import threading
import time
import PySimpleGUI as sg
import psutil
from pathlib import Path
import subprocess
def is_running(process_name):
running = False
for proc in psutil.process_iter():
if process_name in proc.name():
running = True
break
return running
def the_thread(window: sg.Window, logtext, values, gtav_exe, dll_name):
injected = False
while not injected:
if is_running(gtav_exe):
logtext += f"\n{gtav_exe} is running..."
window["log"].update(logtext)
delay = int(values["delay"])
logtext += f"\nInjecting DLL in {delay} seconds..."
window["log"].update(logtext)
time.sleep(delay)
inj_path = Path("Injector.exe").resolve()
window["log"].update(logtext)
inj_output = subprocess.run([inj_path, "--process-name", gtav_exe, "--inject", dll_name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, shell=True)
inj_output_out = inj_output.stdout.decode("UTF-8")
logtext += f"\n{inj_output_out}"
window["log"].update(logtext)
injected = True
window["start"].update(disabled=False)
def main():
delay = 10
gta_path = ""
dll_name = "GTAO_Booster.dll"
gtav_exe = "GTA5.exe"
play_gta = "PlayGTAV.exe"
if Path.exists(Path("settings.ini")):
with open("settings.ini", 'r') as file:
content = file.readlines()
for line in content:
if "[GTA5_LOC]=" in line:
gta_path = line.split("=")[1]
if "[DELAY]=" in line:
delay = int(line.split("=")[1])
if "[DLL]=" in line:
dll_name = line.split("=")[1]
sg.theme('Dark')
layout = [
[sg.Text('Select PlayGTAV.exe:', size=(16,1)), sg.Input(gta_path, key="gta_exe"), sg.FileBrowse(file_types=((play_gta, play_gta),))],
[sg.Text('Select DLL:', size=(16,1)), sg.Input(dll_name, key="dll"), sg.FileBrowse(file_types=(("", "*.dll"),))],
[sg.Text('Injection Delay:', size=(16,1), tooltip="Delay to allow GTA to start & decrypt memory. Depending on PC performance and storage media you can decrease/increase this value."), sg.Input(delay, size=(5, 1), enable_events=True, tooltip="Delay to allow GTA to start & decrypt memory. Depending on PC performance and storage media you can decrease/increase this value.", key="delay")],
[sg.Multiline(size=(70, 12), enable_events=True, key="log", autoscroll=True, disabled=True)],
[sg.Button('START GTAV & Inject DLL', key="start", disabled=False), sg.Button('EXIT', key="exit", button_color=("white", "red"))],
[sg.Text('ยฉ ThatOldGrumpyMan', size=(16, 1))],
]
window = sg.Window('GTAV Auto DLL Injector', layout, finalize=True)
if not Path.exists(Path("Injector.exe")):
logtext = "Injector.exe is missing! Place it in the same directory as this file!\nInjector.exe is required to inject DLL in process.\nGet it here: https://github.com/nefarius/Injector\nRestart application when done."
window["log"].update(logtext)
window["start"].update(disabled=True)
if is_running(gtav_exe):
logtext = "GTA V is already running! Close it and restart this application!"
window["log"].update(logtext)
window["start"].update(disabled=True)
while True:
event, values = window.read()
try:
delay = str(values["delay"])
except TypeError:
delay = str(delay)
if len(delay) > 0 and delay[0] not in ('123456789'):
window["delay"].update(values["delay"][:-1])
if len(delay) > 0 and delay[-1] not in ('0123456789'):
window["delay"].update(values["delay"][:-1])
if len(delay) > 2:
window["delay"].update(values["delay"][:-1])
if event == "start":
logtext = ""
window["log"].update(logtext)
window["start"].update(disabled=True)
gta_path = Path(str(values["gta_exe"]).strip("\n")).resolve()
try:
logtext = "Starting GTA V..."
window["log"].update(logtext)
subprocess.Popen([gta_path])
except WindowsError:
logtext += "\nInvalid GTA Path!"
window["log"].update(logtext)
window["start"].update(disabled=False)
continue
with open("settings.ini", 'w') as file:
file.write("[GTA5_LOC]=" + str(values["gta_exe"]) + "\n" + "[DELAY]=" + str(values["delay"]) + "\n" + "[DLL]=" + str(values["dll"]))
logtext += "\nWaiting for GTA V to start..."
window["log"].update(logtext)
dll_name = Path(str(values["dll"]).strip("\n")).resolve()
threading.Thread(target=the_thread, args=(window, logtext, values, gtav_exe, dll_name), daemon=True).start()
if event == "exit" or event == sg.WIN_CLOSED:
break
window.close()
if __name__ == '__main__':
main()
|
activatedtmx/GTAV-Auto-DLL-Injector
|
injector.py
|
injector.py
|
py
| 5,140 |
python
|
en
|
code
| 1 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.