seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
27321032293
|
"""
Kela Purchase data preprocessing
Reads Kela Purchase data, applies the preprocessing steps below and writes the result to files split by year.
- Convert column names to uppercase
- Rename HETU to FINREGISTRYID
- Format dates to YYYY-MM-DD
- Drop duplicates rows
- Fix data types
Input files:
- For years 1995-2019 (split by year): 175_522_2020_LAAKEOSTOT_<year>.csv.finreg_IDs (25 files)
- For years 2020-2021 (split by month): 81_522_2022_LAAKEOSTOT_<year><month>.csv.finreg_IDs (24 files)
Output files:
- purchase_<year>.csv (27 files)
- purchase_<year>.feather (27 files)
"""
import pandas as pd
import logging
from datetime import datetime
from finregistry_data.config import KELA_PURCHASE_INPUT_DIR, KELA_PURCHASE_OUTPUT_DIR
from finregistry_data.utils import write_data
def preprocess_purchases(path):
"""
Preprocess Kela drug purchases input file
Args:
path (str): Path to the input file
Returns:
Preprocessed dataframe
"""
df = pd.read_csv(path, sep=";", dtype=str)
# Convert column names to uppercase
df.columns = df.columns.str.upper()
# Format dates
for date_col in ["OSTOPV", "RKPV"]:
df[date_col] = pd.to_datetime(df[date_col], errors="coerce").dt.date
# Rename HETU to FINREGISTRYID
df = df.rename(columns={"HETU": "FINREGISTRYID"})
# Drop duplicates
df = df.drop_duplicates().reset_index(drop=True)
# Fix data types
dtypes = {
"PLKM": float,
"KUST_EUR": float,
"KORV_EUR": float,
"KAKORV_EUR": float,
}
df = df.astype(dtypes)
return df
def convert_csv_to_feather(path, output_name):
"""
Convert a preprocessed KELA Purchases file into a feather file
Args:
path (str): path to the preprocessed file
output_name (str): name of the output file without the file extension
"""
dtypes = {
"FINREGISTRYID": str,
"ATC": str,
"PLKM": float,
"KUST_EUR": float,
"KORV_EUR": float,
"KAKORV_EUR": float,
"RPK": str,
"LAJI": str,
"VNRO": str,
"SAIR": str,
"RGTNO": str,
"ASKU": str,
"SHP_NRO": str,
"TILASTOVUOSI": str,
"ANJA": str,
}
date_cols = ["OSTOPV", "RKPV"]
df = pd.read_csv(path, dtype=dtypes, parse_dates=date_cols)
write_data(df, KELA_PURCHASE_OUTPUT_DIR, output_name, "feather")
if __name__ == "__main__":
# Set logging level to INFO
logging.basicConfig(level=logging.INFO)
# Loop through files split by year
for year in range(1995, 2020):
filename = "175_522_2020_LAAKEOSTOT_" + str(year) + ".csv.finreg_IDs"
input_path = KELA_PURCHASE_INPUT_DIR / filename
logging.info("Processing file " + filename)
df = preprocess_purchases(input_path)
write_data(df, KELA_PURCHASE_OUTPUT_DIR, "purchases_" + str(year), "csv")
write_data(df, KELA_PURCHASE_OUTPUT_DIR, "purchases_" + str(year), "feather")
# Loop through files split by month
today = datetime.today().strftime("%Y-%m-%d")
for year in range(2020, 2022):
for month in range(1, 12):
filename = (
"81_522_2022_LAAKEOSTOT_"
+ str(year)
+ str(month).zfill(2)
+ ".csv.finreg_IDs"
)
input_path = KELA_PURCHASE_INPUT_DIR / filename
logging.info("Processing file " + filename)
df = preprocess_purchases(input_path)
header = True if month == 1 else False
output_path = KELA_PURCHASE_OUTPUT_DIR / (
"purchases_" + str(year) + "_" + today + ".csv"
)
df.to_csv(output_path, mode="a", header=header, index=False)
convert_csv_to_feather(KELA_PURCHASE_OUTPUT_DIR, "purchases_" + str(year))
|
dsgelab/finregistry-data
|
finregistry_data/registries/kela_purchase.py
|
kela_purchase.py
|
py
| 3,850 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44138812715
|
def solution(data, col, row_begin, row_end):
answer = 0
# col번째 인덱스 기준 오름차순 정렬
#for i in range(0, len(data) - 1):
# currv = data[i][col]
# for j in range(i, len(data) - 1):
# nextv = data[j][col]
# if currv > nextv:
# data[i], data[j] = data[j], data[i]
# 중복 인덱스 1번째 인덱스 기준 내림차순 정렬
# col번째 인덱스 기준 오름차순 정렬
#for i in range(0, len(data) - 1):
# currv = data[i][col-1]
# for j in range(i, len(data) - 1):
# nextv = data[j][col-1]
# if currv == nextv:
# if data[i][0] < data[j][0]:
# data[i], data[j] = data[j], data[i]
data = sorted(data, key = lambda x: [x[col - 1], -x[0]])
# 나머지 합 계산 (S_i)
totalSum = []
for index in range(row_begin, row_end+1):
sum = 0
for v in data[index-1]:
sum += v % index
totalSum.append(sum)
# 해시값 계산
sum = 0
for sum in totalSum:
answer ^= sum
return answer
|
ralpioxxcs/problemsolving
|
programmers/table_hash.py
|
table_hash.py
|
py
| 1,112 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
20840870665
|
"""
файл с утилитами
"""
import os
from time import perf_counter
import numpy as np
from sklearn.metrics import (
brier_score_loss,
matthews_corrcoef,
roc_curve,
precision_recall_curve,
auc,
cohen_kappa_score,
classification_report,
# confusion_matrix,
)
from sklearn.metrics import recall_score, precision_score
import shap
import matplotlib.pyplot as plt
from functools import wraps
def get_metrics(model, x_val, y_val):
"""
Вычисление простых метрик
"""
y_pred = model.predict(x_val)
mse = np.mean((y_val - y_pred)**2)
mask = y_val > 0
mape = (np.fabs(y_val - y_pred) / y_val)[mask].mean()
return y_pred, mse, mape
def shap_analysis(booster, data, name_f):
# fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(21, 12))
shap_values = shap.TreeExplainer(booster).shap_values(data)
fig = plt.figure(figsize=(40, 40))
shap.summary_plot(shap_values, data, show=False, max_display=len(data.columns))
fig.savefig(name_f, bbox_inches="tight")
|
Lenin22/ML-Demo
|
utils.py
|
utils.py
|
py
| 1,071 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3326207971
|
import json
import logging
from datetime import datetime
import requests
from system import settings
from system.constants import MODACTION_WH, USELESS_DETAILS
webhook = settings.DISCORD_MODLOG_WEBHOOK
bots = ['AutoModerator', 'FloodgatesBot']
log = logging.getLogger('worker.dsws')
def make_embed(entry):
ts = datetime.fromtimestamp(entry['created_utc']).isoformat().replace('T', ' ')
mod = ('🤖 ' if entry['mod'] in bots else '') + entry['mod']
embed = {
'fields': [{'name': 'Mod', 'value': mod, 'inline': True}],
'footer': {'text': f'Fecha: {ts}'}
}
if entry.get('target_author', ''):
embed['fields'].append({'name': 'Usuario', 'value': entry['target_author'], 'inline': True})
if entry.get('target_permalink', ''):
embed['description'] = f'**Link**: https://www.reddit.com{entry["target_permalink"]}'
if entry.get('details', ''):
details = entry['details']
for k, v in USELESS_DETAILS.items():
if k == details:
details = v
if details:
embed['fields'].append({'name': 'Detalles', 'value': entry['details'], 'inline': True})
if entry.get('target_title', ''):
embed['fields'].append({
'name': 'Título del post',
'value': entry['target_title']
})
if entry.get('target_body', ''):
content_type = 'post' if entry.get('target_title', '') else 'comentario'
body_field = {
'name': f'Contenido del {content_type}',
'value': entry['target_body'][:1000]
}
if len(entry['target_body']) > 1000:
body_field['value'] += '…'
embed['fields'].append(body_field)
return embed
def send(entries):
if not webhook:
return
for entry in entries[:5]:
if entry['action'] not in MODACTION_WH:
return
try:
action_description = MODACTION_WH[entry['action']]
payload = {
'content': f'📝 **{action_description}** por **{entry["mod"]}**',
'embeds': [make_embed(entry)]
}
log.debug('Entry: %s', entry)
log.debug('Enviando mensaje webhook: %s', json.dumps(payload))
resp = requests.post(webhook, json=payload)
if resp.status_code >= 400:
log.error('Error enviando mensaje, estado %i: %s', resp.status_code, resp.text)
except Exception as e:
log.exception(e)
|
rchile/mod-toolbox
|
toolbox/discord_ws.py
|
discord_ws.py
|
py
| 2,511 |
python
|
en
|
code
| 3 |
github-code
|
6
|
16370593696
|
import re
import sys
from collections import defaultdict
def get_num_overlapping_points(lines):
counts = defaultdict(lambda: 0)
for (x1, y1), (x2, y2) in lines:
if x1 == x2:
# hortizonal
y11, y22 = (y1, y2) if y2 > y1 else (y2, y1)
for y in range(y11, y22 + 1):
counts[(x1, y)] += 1
elif y1 == y2:
# vert
x11, x22 = (x1, x2) if x2 > x1 else (x2, x1)
for x in range(x11, x22 + 1):
counts[(x, y1)] += 1
elif x1 - x2 == y1 - y2 or x1 - x2 == -(y1 - y2):
# diagonal
xs = (
range(x1, x2 + 1) if x2 > x1 else range(x1, x2 - 1, -1)
)
ys = (
range(y1, y2 + 1) if y2 > y1 else range(y1, y2 - 1, -1)
)
for (x, y) in zip(xs, ys):
counts[(x, y)] += 1
return sum(v > 1 for v in counts.values())
def main(input_file):
with open(input_file, 'r') as f:
content = f.read()
lines = (map(int, nums)
for nums in re.findall(r'(\d+),(\d+) -> (\d+),(\d+)', content))
lines = [((x1, y1), (x2, y2))
for x1, y1, x2, y2 in lines]
val1 = get_num_overlapping_points(
[((x1, y1), (x2, y2)) for ((x1, y1), (x2, y2)) in lines
if x1 == x2 or y1 == y2]
)
print('Part 1:', val1)
val2 = get_num_overlapping_points(lines)
print('Part 2:', val2)
if __name__ == '__main__':
input_file = sys.argv[-1] if len(sys.argv) > 1 else 'input.txt'
main(input_file)
|
sjsawyer/aoc-2021
|
q05/q05.py
|
q05.py
|
py
| 1,577 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37219847623
|
# For Else & while else
target = 7
search = [1,2,3,4,5,6,7]
searchlen = int(len(search)+1)
for i in range(searchlen+1):
if i == target:
print(f"{i} is the target")
break
else: # Alltså om inte for loopen breakas så kommer detta köras
print("I didn't find the target")
print(f"{target} is not in {search}")
print("\n")
i = 0
while i < len(search):
target = search[i]
i+=1
else: # Alltså om inte for loopen breakas så kommer detta köras
print("I didn't find the target")
print(f"{target} is not in {search}")
|
GGisMee/Python
|
intermediate_python/trick/9__For_Else_While_Else.py
|
9__For_Else_While_Else.py
|
py
| 564 |
python
|
en
|
code
| 3 |
github-code
|
6
|
26625473616
|
"""Pluggable newsletter handling."""
from django import forms
from django.utils.translation import ugettext_lazy as _
from livesettings import config_value
from satchmo_store.accounts.signals import satchmo_registration
from satchmo_store.contact.signals import satchmo_contact_view
from satchmo_utils import load_module
from signals_ahoy.signals import form_initialdata
import logging
import signals
log = logging.getLogger('newsletter')
def get_newsletter_module():
try:
modulename = config_value('NEWSLETTER', 'MODULE')
except AttributeError:
modulename = 'satchmo_ext.newsletter.ignore'
return load_module(modulename)
def is_subscribed(contact):
if not contact:
return False
return get_newsletter_module().is_subscribed(contact)
def update_subscription(contact, subscribed, attributes={}):
current = is_subscribed(contact)
log.debug("Updating subscription status from %s to %s for %s", current, subscribed, contact)
result = get_newsletter_module().update_contact(contact, subscribed, attributes=attributes)
signals.newsletter_subscription_updated.send(contact,
old_state=current, new_state=subscribed, contact=contact, attributes=attributes)
return result
def update_subscription_listener(contact=None, subscribed=False, **kwargs):
if contact:
update_subscription(contact, subscribed)
def populate_form_initialdata_listener(contact=None, initial = {}, **kwargs):
if contact:
current_subscriber = is_subscribed(contact)
else:
current_subscriber = False
initial['newsletter'] = current_subscriber
def view_user_data_listener(contact=None, contact_dict=None, **kwargs):
module = config_value('NEWSLETTER', 'MODULE')
if module not in ('', 'satchmo_ext.newsletter.ignore'):
contact_dict['show_newsletter'] = True
contact_dict['newsletter'] = is_subscribed(contact)
else:
contact_dict['show_newsletter'] = False
satchmo_contact_view.connect(view_user_data_listener, sender=None)
satchmo_registration.connect(update_subscription_listener, sender=None)
form_initialdata.connect(populate_form_initialdata_listener, sender='RegistrationForm')
|
dokterbob/satchmo
|
satchmo/apps/satchmo_ext/newsletter/__init__.py
|
__init__.py
|
py
| 2,206 |
python
|
en
|
code
| 30 |
github-code
|
6
|
41313263665
|
import appdaemon.plugins.hass.hassapi as hass
import time
from babel.numbers import format_number, format_decimal
class wasserdroger(hass.Hass):
def initialize(self):
self.listen_state(self.inputhandler, self.args["trigger"], old="off", new="on")
self.listen_state(self.inputhandler, self.args["trigger"], old="on", new="off")
def inputhandler(self, entity, attribute, old, new, kwargs):
action = self.get_state(self.args["trigger"])
self.log(action)
kwh = self.get_state(self.args["kwhsensor"])
timestamp = str(round(time.time()))
appliance = self.args["appliance"]
path = '/conf/'+appliance+'.csv'
f = open(path,'a')
#self.log(timestamp+";"+str(format_decimal(kwh, locale='de'))+";"+appliance+" "+self.action+"\n")
self.log("action schrijf:")
f.write(timestamp+";"+str(format_decimal(kwh, locale='de'))+";"+appliance+" "+action+"\n")
f.close()
payload = '{ "timestamp" :'+str(format_decimal(kwh, locale='de'))+',"appliance":'+appliance+',"action":'+action+'}'
topic = "zolder/"+appliance+"/status"
self.call_service("mqtt/publish", topic=topic, payload=payload)
|
balk77/Home-AssistantConfig
|
appdaemon4/conf/apps/wasserdroger.py
|
wasserdroger.py
|
py
| 1,208 |
python
|
en
|
code
| 3 |
github-code
|
6
|
29686629055
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework import status
from drf_yasg.utils import swagger_auto_schema
from ..models import (
Appeal,
)
from ..serializers import (
AppealSerializer,
)
class AppealCreate(APIView):
@swagger_auto_schema(operation_description="Create Appeal", request_body=AppealSerializer)
def post(self, request:Request):
data = request.data
serializer = AppealSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(
{
'error': serializer.errors,
'message': 'Invalid data'
},
status=status.HTTP_400_BAD_REQUEST
)
class AppealList(APIView):
@swagger_auto_schema(
operation_description="Get Appeal list",
responses={200: AppealSerializer(many=True)}
)
def get(self, request:Request):
appeals = Appeal.objects.all()
serializer = AppealSerializer(appeals, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class AppealDetail(APIView):
@swagger_auto_schema(
operation_description="Get Appeal detail",
responses={200: AppealSerializer()}
)
def get(self, request:Request, id):
appeal = Appeal.objects.get(id=id)
serializer = AppealSerializer(appeal)
return Response(serializer.data, status=status.HTTP_200_OK)
class AppealUpdate(APIView):
@swagger_auto_schema(operation_description="Update Appeal", request_body=AppealSerializer)
def post(self, request:Request, id):
data = request.data
appeal = Appeal.objects.get(id=id)
appeal.name = data.get('name', appeal.name)
appeal.phone_number = data.get('phone_number', appeal.phone_number)
appeal.emile = data.get('emile', appeal.emile)
appeal.message = data.get('message', appeal.message)
appeal.title = data.get('title', appeal.title)
appeal.save()
serializer = AppealSerializer(appeal)
return Response(serializer.data, status=status.HTTP_200_OK)
class AppealDelete(APIView):
@swagger_auto_schema(operation_description="Delete Appeal", request_body=AppealSerializer)
def post(self, request:Request, id):
appeal = Appeal.objects.get(id=id)
appeal.delete()
return Response({'message': 'Deleted'}, status=status.HTTP_200_OK)
|
quvvatullayev/tour
|
tour/views/appeal.py
|
appeal.py
|
py
| 2,625 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27213136275
|
import sys
N, M = map(int, sys.stdin.readline().rstrip().split())
board = [[0 for i in range(N + 1)] for j in range(N + 1)]
visited = [False for _ in range(N + 1)]
answer = 0
def dfs(idx):
visited[idx] = True
for i in range(1, N + 1):
if board[idx][i] == 1 and visited[i] is False:
dfs(i)
for _ in range(M):
start, end = map(int, sys.stdin.readline().rstrip().split())
board[start][end] = 1
board[end][start] = 1
for i in range(1, N + 1):
if visited[i] is False:
answer += 1
dfs(i)
print(answer)
|
hammii/Algorithm
|
BAEKJOON_python/11724_연결요소의개수.py
|
11724_연결요소의개수.py
|
py
| 564 |
python
|
en
|
code
| 2 |
github-code
|
6
|
43865643259
|
import os
import re
import ssl
from datetime import datetime, timedelta
from typing import Any, Dict, Optional, TypeVar, Union
import ciso8601
T = TypeVar("T", str, None)
# From https://stackoverflow.com/questions/4628122/how-to-construct-a-timedelta-object-from-a-simple-string
# Answer: https://stackoverflow.com/a/51916936
# datetimeParseRegex = re.compile(r'^((?P<days>[\.\d]+?)d)?((?P<hours>[\.\d]+?)h)?((?P<minutes>[\.\d]+?)m)?((?P<seconds>[\.\d]+?)s)?$')
datetime_regex = re.compile(
r"^((?P<weeks>[\.\d]+?)w)? *"
r"^((?P<days>[\.\d]+?)d)? *"
r"((?P<hours>[\.\d]+?)h)? *"
r"((?P<minutes>[\.\d]+?)m)? *"
r"((?P<seconds>[\.\d]+?)s?)?$"
)
def parse_datetime(datetime: Union[datetime, str]) -> datetime:
"""Parses a datetime object or a string into a datetime object
Args:
datetime (Union[datetime.datetime, str]): Datetime object or string to parse
Returns:
datetime.datetime: Parsed datetime object
"""
if isinstance(datetime, str):
return ciso8601.parse_datetime(datetime)
return datetime
def encode_datetime(dict: Dict[str, Any]) -> Dict[str, Any]:
"""Takes a dictionary and encodes all datetime objects into ISO 8601 strings
Args:
dict (Dict[str, Any]): Dictionary to encode
Returns:
Dict[str, Any]: The dictionary with all datetime objects encoded as ISO 8601 strings
"""
for k, v in dict.items():
if isinstance(v, datetime):
dict[k] = v.isoformat()
return dict
def parse_subreddit(subreddit: Union[str, None]) -> str:
"""Parses a subreddit name to be used in a reddit url
Args:
subreddit (Union[str, None]): Subreddit name to parse
Returns:
str: Parsed subreddit name
"""
if subreddit is None:
return "all"
return re.sub(r"^[r/]{2}", "", subreddit, re.IGNORECASE)
def parse_time_str(time_str: str) -> Union[timedelta, None]:
"""Parse a time string e.g. (2h13m) into a timedelta object.
Taken straight from https://stackoverflow.com/a/4628148
Args:
time_str (str): A string identifying a duration. (eg. 2h13m)
Returns:
datetime.timedelta: A datetime.timedelta object
"""
parts = datetime_regex.match(time_str)
if not parts:
return
parts = parts.groupdict()
time_params = {}
for name, param in parts.items():
if param:
time_params[name] = int(param)
return timedelta(**time_params)
def setup_ssl(
ca_path: Union[str, None],
cert_path: str,
key_path: Union[str, None],
key_password: Union[str, None],
) -> ssl.SSLContext:
sslctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=ca_path)
sslctx.check_hostname = True
sslctx.load_cert_chain(cert_path, key_path, key_password)
return sslctx
def is_docker() -> bool:
path = "/proc/self/cgroup"
return os.path.exists("/.dockerenv") or (
os.path.isfile(path) and any("docker" in line for line in open(path))
)
def tick(opt: Optional[bool], label: Optional[str] = None) -> str:
lookup = {
True: "<:greenTick:330090705336664065>",
False: "<:redTick:330090723011592193>",
None: "<:greyTick:563231201280917524>",
}
emoji = lookup.get(opt, "<:redTick:330090723011592193>")
if label is not None:
return f"{emoji}: {label}"
return emoji
|
No767/Kumiko
|
Bot/Libs/utils/utils.py
|
utils.py
|
py
| 3,388 |
python
|
en
|
code
| 20 |
github-code
|
6
|
32188046557
|
# 프로그래머스 - 완전탐색(피로도)
# 순열을 사용해서 던전 순서를 모두 만들어 주었다.
# 이후 만들어진 던전 순서를 사용하고 for문을 사용해서 result의 결과가
# 가장 많은 것으로 값을 바꾸어 주는 방식을 사용하였다.
from itertools import permutations
def solution(k, dungeons):
answer = 0
a = []
for i in range(len(dungeons)):
a.append(i)
permute = permutations(a,len(dungeons))
for j in permute:
k_number = k
result = 0
for K in j:
if dungeons[K][0] <= k_number:
k_number -= dungeons[K][1]
result += 1
else:
continue
if result >= answer:
answer = result
return answer
|
kcw0331/python-for-coding-test
|
programmers-coding/피로도.py
|
피로도.py
|
py
| 794 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
29646015431
|
from openerp import models
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
def _find_procurements_from_stock_planning(
self, company, to_date, states=None, from_date=None, category=None,
template=None, product=None, location_id=None, periods=False,
without_purchases=False, without_productions=False, level=0):
procurements = super(
ProcurementOrder, self)._find_procurements_from_stock_planning(
company, to_date, states=states, from_date=from_date,
category=category, template=template, product=product,
location_id=location_id, periods=periods,
without_purchases=without_purchases,
without_productions=without_productions)
if periods:
return procurements
if level == 0:
procurements = procurements.filtered(lambda x: x.level == 0)
else:
procurements = procurements.filtered(lambda x: x.level >= level)
return procurements
|
odoomrp/odoomrp-wip
|
stock_planning_procurement_generated_by_plan/models/procurement_order.py
|
procurement_order.py
|
py
| 1,034 |
python
|
en
|
code
| 119 |
github-code
|
6
|
71573663549
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import regex as re
from sqlalchemy import create_engine, String, Float, DATE
import pymssql
from datetime import date, datetime
import matplotlib.pyplot as plt
import os
from dotenv import load_dotenv
from empiricaldist import Cdf
import seaborn as sns
from glassdoor.scraper import *
import streamlit as st
import time
def salary_convert(salary):
if salary == 0:
return np.nan
if salary < 1000:
return salary * 1788
else:
return salary
env_path = os.path.join(r'/home/emad/code/emadam/glassdoor/glassdoor/',
'postgres_login.env')
if os.path.exists(env_path):
load_dotenv(env_path)
DATABASE = os.getenv('database')
USERNAME = os.getenv('username')
PASSWORD = os.getenv('password')
HOST = os.getenv('host')
engine = create_engine(
f"postgresql://{USERNAME}:{PASSWORD}@{HOST}:5432/{DATABASE}")
headers = {
"User-Agent":
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/92.0.4515.159 Safari/537.36"
}
URL = f'https://www.glassdoor.com.au/Job/melbourne-junior-data-analyst-jobs-SRCH_IL.0,9_IC2264754_KO10,29.htm'
resp = requests.get(URL, headers=headers)
# specifying a desired format of page using the html parser
soup = BeautifulSoup(resp.text, "html.parser")
job_title = scraper.extract_job_title_from_result(soup)
co_name = scraper.extract_company_name_from_result(soup)
co_rate = scraper.extract_company_rate_from_result(soup)
co_loc = scraper.extract_company_location_from_result(soup)
co_sal = scraper.extract_company_salary_from_result(soup)
job_age = scraper.extract_job_age_from_result(soup)
data = list(zip(job_title, co_name, co_rate, co_loc, co_sal,
job_age))
job_data = pd.DataFrame(data)
job_data = job_data.rename(
columns={
0: 'Job Title',
1: 'Company',
2: 'Rank',
3: 'Location',
4: 'Salary',
5: 'Ad Date'
})
job_data['Ad Date'] = pd.to_datetime(job_data['Ad Date'])
job_data.to_sql("job_data", engine, if_exists='append', index=False)
jobs_stored = pd.read_sql("job_data", engine)
jobs_stored['Ad Date'] = pd.to_datetime(jobs_stored['Ad Date'])
jobs_stored.sort_values(by=['Ad Date'], inplace=True)
jobs_stored.drop_duplicates(subset=['Job Title', 'Company', 'Location'],
keep='first',
inplace=True)
ad_count = jobs_stored.groupby('Ad Date').size()
jobs_stored = jobs_stored.set_index(pd.DatetimeIndex(
jobs_stored['Ad Date'])).sort_index()
jobs_stored['Min_Salary'] = jobs_stored['Salary'].str.extract(
r'([0-9]+,*[0-9]+).*')
jobs_stored['Min_Salary'] = jobs_stored['Min_Salary'].str.replace(
r'\,', '', regex=True).astype(float).astype(pd.Int64Dtype())
jobs_stored['Max_Salary'] = jobs_stored['Salary'].str.extract(
r'[0-9]+,*[0-9]+.*?([0-9]+,*[0-9]+)')
jobs_stored['Max_Salary'] = jobs_stored['Max_Salary'].str.replace(
r'\,', '', regex=True).astype(float).astype(pd.Int64Dtype())
jobs_stored['Min_Salary'] = jobs_stored['Min_Salary'].fillna(value=0)
jobs_stored_min = jobs_stored.apply(lambda x: salary_convert(x['Min_Salary']),
axis=1)
jobs_stored['Min_Salary'] = pd.DataFrame(jobs_stored_min)
jobs_stored['Max_Salary'] = jobs_stored['Max_Salary'].fillna(value=0)
jobs_stored_max = jobs_stored.apply(lambda x: salary_convert(x['Max_Salary']),
axis=1)
jobs_stored['Max_Salary'] = pd.DataFrame(jobs_stored_max)
jobs_stored['Seniority'] = jobs_stored['Job Title'].apply(
lambda x: 'Senior' if x.find('Senior') != -1 else
('Junior' if x.find('Junior') != -1 else
('Entry Level' if x.find('Entry level') != -1 else ('Graduate' if x.find(
'Graduate') != -1 else ('Manager' if x.find('Manager') != -1 else (
'Internship' if x.find('Internship') != -1 else np.nan))))))
jobs_stored.dropna(subset=['Ad Date'], how='all', inplace=True)
plt.style.use('seaborn-whitegrid')
sns.set()
fig, ax = plt.subplots(2, 2)
fig.set_size_inches(16, 11)
# set the spacing between subplots
plt.subplots_adjust(left=0.1,
bottom=0.1,
right=0.9,
top=0.9,
wspace=0.4,
hspace=0.4)
min_salary = jobs_stored['Min_Salary']
before_Date = jobs_stored['Ad Date'] < pd.to_datetime('2022-01-01')
ax[0, 0].plot(Cdf.from_seq(min_salary[before_Date].dropna()),
label='Before 2022')
ax[0, 0].plot(Cdf.from_seq(min_salary[~before_Date].dropna()),
label='After 2022')
x_min = np.sort(jobs_stored['Min_Salary'].dropna())
y_min = np.arange(1, len(x_min) + 1) / len(x_min)
x_max = np.sort(jobs_stored['Max_Salary'].dropna())
y_max = np.arange(1, len(x_max) + 1) / len(x_max)
pct_list = np.array([25, 50, 75])
maxpct_val = np.percentile(jobs_stored['Max_Salary'].dropna(), pct_list)
minpct_val = np.percentile(jobs_stored['Min_Salary'].dropna(), pct_list)
ax[0, 0].set_ylabel('CDF')
ax[0, 0].set_title(
'Distribution of minimum salary of "Data Analyst" jobs on Glassdoor',
fontweight="bold",
pad=20)
ax[0, 0].legend()
ax[0, 0].set_xlabel('Estimated salary')
ax[0, 1].plot(x_min,
y_min,
marker='.',
linestyle='none',
color='r',
label='Minimum salary')
ax[0, 1].plot(x_max,
y_max,
marker='.',
linestyle='none',
color='b',
label='Maximum salary')
ax[0, 1].plot(maxpct_val,
pct_list / 100,
marker='^',
linestyle='none',
color='c',
label='25th, 50th and 75th Percentile')
ax[0, 1].plot(minpct_val,
pct_list / 100,
marker='^',
linestyle='none',
color='k',
label='25th, 50th and 75th Percentile')
ax[0, 1].annotate(
'Mean:',
xy=(jobs_stored['Min_Salary'].mean().astype('int64'), 0.5),
xytext=(40000, 0.9),
arrowprops=dict(arrowstyle="fancy",
facecolor='green',
connectionstyle="angle3,angleA=0,angleB=-90"),
)
ax[0, 1].set_ylabel('ECDF')
ax[0, 1].set_title(
'Distribution of min and max salary of "Data Analyst" on Glassdoor',
fontweight="bold",
pad=20)
ax[0, 1].legend()
ax[0, 1].set_xlabel('Estimated salary')
ax[1, 0].bar(jobs_stored.index.unique(), ad_count, linestyle='None', color='r')
ax[1, 0].figure.canvas.draw()
ax[1, 0].tick_params(axis='x',
which='major',
rotation=20,
direction='inout',
length=6,
width=2,
color='k',
labelcolor='royalblue')
ax[1, 0].set_xlabel('Date of Advertisement', labelpad=0.0, color='magenta')
ax[1, 0].set_ylabel('Number of Ads', color='purple')
ax[1, 0].set_title('\'Data Analyst Job\' Advertisements in Glassdoor website',
fontweight="bold",
pad=20)
ax[1, 1].pie(jobs_stored['Seniority'].value_counts(),
labels=jobs_stored['Seniority'].dropna().unique(),
normalize=True,
autopct='%1.1f%%',
shadow=True,
startangle=0)
ax[1, 1].set_title('Seniority of job ads(percent)', fontweight="bold", pad=20)
# fig.savefig("glassdoor" + np.datetime64(date.today()).astype('str') + ".png")
st.set_page_config(page_title='Data Analyst Job: Market Analysis',
page_icon='favicon.png',
layout="wide")
message = st.info("Fetching data from Database...")
with st.spinner('Please Wait...'):
my_bar = st.progress(0)
# Remove the menu button from Streamlit
st.markdown(""" <style>
MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style> """,
unsafe_allow_html=True)
my_bar.progress(25)
st.title('Data Analyst Job: Market Analysis')
my_bar.progress(50)
st.markdown("""
## Project Description 👇
This is a personal effort where I researched *"Data Analyst"* job openings in
Melbourne. As a result, this project shows minimum and maximum salary of a
**Data Analyst in Melbourne**, Australia according to job advertisements gathered
from [https://www.glassdoor.com.au/](https://www.glassdoor.com.au/) and
saves the results in a *PostgreSQL* database in order to have historical data
for further analysis.
""")
st.info(
'💡 The cumulative distribution function (CDF) of random variable X is defined as FX(x)=P(X≤x), for all x∈R.'
)
st.pyplot(fig)
my_bar.progress(100)
my_bar.empty()
message.info('Done!')
time.sleep(3)
message.empty()
agree = st.checkbox('Show DataFrame recent records')
if agree:
with st.spinner('Please Wait...'):
cm = sns.color_palette("coolwarm_r", as_cmap=True)
df = jobs_stored.reset_index(
drop=True).tail(10).sort_values(by='Ad Date', ascending=False).style.background_gradient(cmap=cm)
st.write(df)
|
emadam/glassdoor
|
app.py
|
app.py
|
py
| 9,171 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21234086726
|
arr=input()
substr=[]
for i in range(0,len(arr)):
for j in range(i,len(arr)):
substr.append(arr[i:j+1])
substr=list(map(int,set(substr)))
pronic_set=[]
n=1
p_no=0
while(p_no<=int(arr)):
p_no=n*(n+1)
pronic_set.append(p_no)
n+=1
pronic_no=[]
for i in substr:
for j in pronic_set:
if i==j:
pronic_no.append(i)
print(sorted(set(pronic_no)))
|
AniruddhaNaidu/Python-Coding
|
Pronic numbers.py
|
Pronic numbers.py
|
py
| 413 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25208776927
|
from flask import Flask, request, jsonify
import os
import jwt
from flask_cors import CORS, cross_origin
from dynamodb import DB
application = Flask(__name__)
db = DB()
CORS(application, headers=['Content-Type', 'Authorization'], supports_credentials=True,
expose_headers='Authorization', origins='*')
JWT_SECRET = "datajbsnmd5h84rbewvzx6*cax^jgmqw@m3$ds_%z-4*qy0n44fjr5shark"
JWT_ALGO = "HS256"
@application.route('/')
def landing():
return "This is the homepage of the Explora server!!!!"
@application.route('/get_username/<repo_id>', methods=['POST', 'GET'])
def get_username(repo_id):
'''
Authorize request, then retrieve username for given repo_id
'''
claims = authorize_user(request)
if claims is None: return jsonify(make_unauthorized_error()), 400
user_id = claims["pk"]
try:
username = db.get_username(user_id, repo_id)
except Exception as e:
return jsonify(make_error(str(e)))
return jsonify(make_success(username))
def authorize_user(request):
"""
Helper function that authorizes a request/user based on the JWT Token
provided. Return the claims if successful, `None` otherwise.
"""
try:
jwt_string = request.get_json().get("token")
claims = jwt.decode(jwt_string, JWT_SECRET, algorithms=[JWT_ALGO])
except Exception as e:
print(str(e))
return None
return claims
def make_unauthorized_error():
"""
Helper function that returns an unauthorization error.
"""
return make_error('Authorization failed.')
def make_error(msg):
"""
Helper function to create an error message to return on failed requests.
"""
return {'success': False, 'message': msg}
def make_success(msg):
"""
Helper function to create a success message to return on successful requests.
"""
return {'success': True, 'message': msg}
if __name__ == '__main__':
from twisted.python import log
log.startLogging(sys.stdout)
application.run(host="0.0.0.0")
|
DiscreetAI/explora-server
|
server/main.py
|
main.py
|
py
| 2,020 |
python
|
en
|
code
| 9 |
github-code
|
6
|
34839514933
|
#LATIHAN 6
#PROGRAM MENAMPILKAN LIST KE DALAM TABEL
nilai = [{'nim' : 'A01', 'nama' : 'Agustina', 'mid' : 50, 'uas' : 80},
{'nim' : 'A02', 'nama' : 'Budi', 'mid' : 40, 'uas' : 90},
{'nim' : 'A03', 'nama' : 'Chicha', 'mid' : 100, 'uas' : 50},
{'nim' : 'A04', 'nama' : 'Donna', 'mid' : 20, 'uas' : 100},
{'nim' : 'A05', 'nama' : 'Fatimah', 'mid' : 70, 'uas' : 100}]
def membuat_tabel(nilai):
y=[]
n=[]
m=[]
u=[]
akhir=[]
ket=[]
for x in nilai :
p= x.get('nim')
y.append(p)
c=x.get('nama')
n.append(c)
a=x.get('mid')
m.append(a)
b=x.get('uas')
g=(a+2*b)/3
u.append(b)
akhir.append(round(g,2))
if g>=60 :
ket.append('LULUS')
else :
ket.append('TIDAK LULUS')
x='='
l='-'
v='NIM'
p='NAMA'
c='N.MID'
o='N.UAS'
k='N.AKHIR'
f='STATUS'
print(x.center(60,'='))
print(v,'\t\t',p,'\t\t',c,'\t\t',o,'\t\t',k,'\t\t',f)
print(l.center(86,'-'))
i=0
for i in range (len(y)) :
print(y[i],'\t\t',n[i],'\t\t',m[i],'\t\t',u[i],'\t\t',akhir[i],'\t\t',ket[i])
membuat_tabel(nilai)
|
tolipbukankalengkaleng/Pemrograman-Tersturktur
|
latihan6chapter9.py
|
latihan6chapter9.py
|
py
| 1,246 |
python
|
sr
|
code
| 0 |
github-code
|
6
|
72179452347
|
import json
with open("test.txt","r",encoding="utf-8") as f:
text = f.read()
"hallo".replace()
# removing unwanted characters from text
words = text.replace('\n',' ').replace('.',' ').replace(',',' ').replace(';',' ').replace('!',' ').replace('?',' ').replace(':',' ')
# split the text into list of words, drop empty words
words = [word.lower() for word in words.split(" ") if word]
# print(words)
wordCount = {}
for word in words:
if word in wordCount:
wordCount[word] = wordCount[word] + 1
else:
wordCount[word] = 1
maxcount = max(wordCount,key=wordCount.get)
print(maxcount,wordCount[maxcount])
# open file in write mode
with open('save.json','w',encoding="utf-8") as f:
# dump data as str to filestream
json.dump(wordCount,f,indent=4)
with open('save.json','r',encoding="utf-8") as f:
newWordCount = json.load(f)
print(newWordCount)
|
Zadest/python-5
|
word_count_dict.py
|
word_count_dict.py
|
py
| 889 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14235086016
|
from http.client import HTTPSConnection
from tkinter import filedialog as fd
from tkinterdnd2 import *
from threading import *
from tkinter import *
from json import *
from sys import *
from tkinter import ttk
from time import sleep
import tkinter as tk
import pyimgur
import random
import sys
'''
GUIDES I USED
https://www.quora.com/I-want-to-automatically-post-a-message-every-24-hours-on-my-Discord-server-using-my-own-account-not-a-bot-account-Is-this-possible-and-if-so-how # STARTING POINT, REMOTE SENDER
https://www.pythontutorial.net/tkinter/tkinter-open-file-dialog/ # FILE DIALOG
https://pythonguides.com/python-tkinter-drag-and-drop/ # DRAG AND DROP FEATURE FOR MULTIPLE FILES
https://www.tutorialspoint.com/taking-input-from-the-user-in-tkinter # ADDING INPUT ENTRY WIDGETS
https://www.youtube.com/watch?v=lvky-tmoTvg # HOW TO USE IMGUR API, SIMPLE
https://www.delftstack.com/howto/python/python-replace-line-in-file/ # HOW TO REPLACE IMAGE PATHS ON DISK INTO POSTABLE IMGUR LINKS
https://www.geeksforgeeks.org/how-to-use-thread-in-tkinter-python/ # USEFUL EXAMPLE, SIMPLIFICATION OF THREADING
'''
'''↓↓↓↓ PROGRAM DOES NOT WORK WITHOUT!! ↓↓↓↓'''
TOKEN = '' # FOR EX. OTM1MjUzMjI2MjY0MDY4MjM3.Ye7-rA.w3WsZ0DpCr4lKYurKPa_bLUodBQ
IMGUR_ID = '' # FOR EX. 19a12e239az128h
CHANNEL_ID = '' # FOR EX. 123821903821083257
'''↑↑↑↑ FOR PERSONAL USE: ONLY TOUCH THESE 3 SPECIFIERS ABOVE, TOKEN, IMGUR_ID, and CHANNEL_ID ↑↑↑↑'''
'''TOKEN is your discord token, BE CAUTIOUS ABOUT THIS USE. View the program below if you are unsure about it's usage.
IMGUR_ID is the API key that Imgur gives you once you sign up. Or you can use any image uploader service, discord will convert image links to images.
CHANNEL_ID is the ID of the discord channel (enabler developer view, copy ID of the text channel, assuming image perms)
Examples are FAKE/THROWAWAYS, use credible ID.
'''
global temporary_widget # DO NOT TOUCH
class Client:
'''
Draws the GUI responsible for selecting image files to send to a discord channel.
Notably a Drag and drop feature (can use multiple files but dragged one at a time)
or through a file dialog.
'''
def __init__(self):
self.main = TkinterDnD.Tk()
self.main.title('Discord ImageBot')
self.main.geometry('450x650')
self.main.resizable(False, False)
self.main.config(bg='#36393f')
self.send_condition = tk.BooleanVar()
self.send_condition.set(True)
self.stop_condition = tk.BooleanVar()
self.stop_condition.set(True)
self.seconds_btwn_msg = 1 # an hour between sending each image by default
self.seconds_before_stop = 360000
self.image_paths = []
self.temp_widget = Label()
self._draw()
self.main.mainloop()
def _draw(self):
global temporary_widget
widget_text = 'Corbel 10 bold'
# - - - - - - - - - - - - - - - - - - - - -
# Open File System
open_image_files = tk.Button(bg='#7289DA', fg='white', font='Corbel 12 bold', text='Open Image Files',
command=self.select_files)
open_image_files.pack(pady=10)
OR = Label(anchor=CENTER, text='OR', bg='#36393f', fg='white', font=widget_text)
OR.pack()
drag_and_drop = Label(anchor=CENTER, text='Drag & Drop Images', bg='#36393f', fg='white',
font='Corbel 14 bold')
drag_and_drop.pack()
listbox = Listbox(width=45, height=15, bg='#36393f', fg='#FFFFFF', selectmode=EXTENDED)
self.temp_widget = listbox
listbox.pack()
send_dropped_files = tk.Button(bg='#36393f', fg='white', font=widget_text, text='Send Dropped Files',
command=self.threading)
send_dropped_files.pack(pady=10)
listbox.drop_target_register(DND_FILES)
listbox.dnd_bind('<<Drop>>', self.add_to_listbox)
# - - - - - - - - - - - - - - - - - - - - -
# Connection Status bar
frame = Frame(pady=20, padx=20)
frame.config(bg='#36393f')
frame.pack()
separator = ttk.Separator(frame, orient=tk.HORIZONTAL)
separator.grid(row=0, sticky='ew', pady=10, columnspan=10)
# - - - - - - - - - - - - - - - - - - - - -
# Time Interval Section
interval_label = Label(frame, bg='#36393f', fg='white', font=widget_text, text='Time Between Message (min)')
interval_label.grid(row=2, column=0)
time_interval_entry = tk.Entry(frame, bg='#36393f', fg='white', font=widget_text)
temporary_widget = time_interval_entry
time_interval_entry.grid(row=2, column=1, columnspan=4, padx=10)
update_button = tk.Button(frame, bg='#36393f', fg='white', font=widget_text, text='Update', command=self.set_interval)
update_button.grid(row=2, column=5)
# - - - - - - - - - - - - - - - - - - - - -
# Stop Later Section
stop_later_label = Label(frame, bg='#36393f', fg='white', font=widget_text, text='Stop Later (min)')
stop_later_label.grid(row=3, column=0)
stop_later_entry = tk.Entry(frame, bg='#36393f', fg='white', font=widget_text)
# temporary_widget = stop_later_entry # doesnt work when you have multiple global carriers
stop_later_entry.grid(row=3, column=1, columnspan=4, padx=10)
update_button2 = tk.Button(frame, bg='#36393f', fg='white', font=widget_text, text='Update', command=self.later_interval)
update_button2.grid(row=3, column=5)
# - - - - - - - - - - - - - - - - - - - - -
# Stop Button
stop_button = tk.Button(bg='#ce524d', fg='white', font=widget_text, text='Stop Sending', command=self.turn_off_sending)
stop_button.pack()
# - - - - - - - - - - - - - - - - - - - - -
# Quit button
quit_button = tk.Button(bg='#ce524d', fg='white', font=widget_text, text='Quit', command=self.quit)
quit_button.pack(expand=True)
def add_to_listbox(self, event):
event.data = str(event.data).strip("{}[](),'") # get rid of unusual symbols
self.temp_widget.insert('end', event.data) # inserts the event.data to be displayed in the Listbox
self.image_paths.append(event.data)
def select_files(self):
'''
The file dialog. Responsible for opening a file dialog window for the user so that you may select an image
or multiple images to send to a discord channel.
'''
filetypes = (
('Image files', '*.png *.jpg *.jpeg *.tfif, *.tiff *.bmp *.gif *.eps *.raw'),
('All files', '*.*')
)
filepaths = fd.askopenfilenames(
# opens the file dialog, also creates a tuple of the file paths
# specified by options that are keyword arguments
title='Open files',
filetypes=filetypes)
filepaths = list(filepaths)
self.image_paths = filepaths
self.threading()
def write_to_file(self):
'''
Addresses obscure error with dragging 2 files, and then dragging in a previously dragged in file, resulting in an
incorrectly formatted list, and makes sure that each image path gets its own individual line.
'''
ordered_image_paths = []
for image_path in self.image_paths:
elem = image_path.split()
ordered_image_paths += elem
self.image_paths = ordered_image_paths # even if the list was initially correctly ordered,
# it would still be set correctly (nothing changes)
# ensure that one path gets one line
with open('some_images.txt', 'w') as output:
for image in self.image_paths:
output.write(str(image) + '\n')
def turn_off_sending(self):
self.send_condition.set(False)
def send_images(self):
self.write_to_file()
ImgurClient().imgur_convert()
wp = WritePhrase()
print('NEW RUN')
print('-' * 20)
while self.send_condition.get() and self.stop_condition.get():
wp.sender('some_images.txt') # this is the file we make
if self.seconds_btwn_msg == '':
print('why is this true') # this happens when you have multiple objects assigned to the global temporary_widget
self.seconds_btwn_msg = '1'
min = int(self.seconds_btwn_msg) * 60
print('min: ', min)
sleep(min)
def set_interval(self):
if self.temp_widget != '':
global temporary_widget
self.seconds_btwn_msg = temporary_widget.get()
print('min: ', self.seconds_btwn_msg)
def later_interval(self):
if self.temp_widget != '':
global temporary_widget
self.seconds_before_stop = temporary_widget.get()
print(self.seconds_before_stop)
def threading(self):
t1 = Thread(target=self.send_images) # simplest way to use a thread is to instantiate with a target function
t1.start() # and then call start
def threading2(self):
t2 = Thread(target=self.timed_loop)
t2.start()
def timed_loop(self):
min = int(self.seconds_before_stop) * 60
sleep(min)
self.stop_condition.set(False)
def quit(self):
self.turn_off_sending()
self.main.destroy()
sys.exit(-1) # defined in system, sys.exit()
class WritePhrase:
'''
Responsible for actually sending the message, in this case an imgur link to an image, to a specific discord channel.
Given the discord user's token, the host (discordapp.com) and the ID of the discord channel (acts as a link)
'''
def __init__(self):
self.used_phrases = []
@staticmethod
def get_connection():
return HTTPSConnection('discordapp.com') # similar to the smp_conn object we instantiated before
# this HTTPSConnection object can be used to authenticate, read, write and return messages
@staticmethod
# static because its not bound to the object of the class, just sending
def send_message(conn, channel_id, message_data):
"""
request of HTTP takes method, url, body, and headers
Get Channel Messages:
/channels/{channel.id}/messages
:param conn:
:param channel_id:
:param message_data:
:return:
"""
header_data = {
'content-type': 'application/json',
'authorization': TOKEN,
'host': 'discordapp.com',
}
conn.request('POST', f'/api/v9/channels/{channel_id}/messages', message_data, header_data)
resp = conn.getresponse() # called after a request is sent to the server.
# returns an HTTPResponse instance
# you must read responses before sending new requests
if 199 < resp.status < 300:
print(f'Message {message_data} sent')
else:
stderr.write(f'Received HTTP {resp.status}: {resp.reason}\n')
def sender(self, file):
message = self.random_line(file)
message_data = {
'content': message,
'tts': 'false',
}
self.send_message(self.get_connection(), CHANNEL_ID, dumps(message_data))
def random_line(self, file_name) -> str:
new_phrases = open(file_name).readlines() # compute a list of lines WITH all the '\n' characters at the end
if len(self.used_phrases) == len(new_phrases):
self.used_phrases = []
print()
used_phrase = random.choice(new_phrases)
while used_phrase in self.used_phrases:
used_phrase = random.choice(new_phrases)
self.used_phrases.append(used_phrase)
return used_phrase
class ImgurClient:
'''
Client connects with Imgur, replaces the paths of selected images from file dialog or drag and drop and converts them
to discord links so that they may be successfully posted on discord.
'''
@staticmethod
def imgur_convert():
file = open('some_images.txt', 'r')
replacement = ''
for image_path in file:
image_path = image_path.strip() # line must be stripped (removed of whitespaces AND line breaks) or its an invalid argument
im = pyimgur.Imgur(IMGUR_ID) # connect with Imgur
image = im.upload_image(image_path) # upload the image from its path
change = image_path.replace(image_path, image.link) + '\n' # replace the current line (image path)
# with its created imgur link
replacement += change # updating replacement with the change
file.close()
# dividing it into reading then writing avoids confusion and errors
fout = open('some_images.txt', 'w') # resets the file, 'w' creates a new file if file opened already exists
fout.write(replacement) # because the write() method doesn't automatically add \n
fout.close()
if __name__ == '__main__':
Client()
|
vaperyy/ImageBot_for_Discord
|
image_bot.py
|
image_bot.py
|
py
| 13,058 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36780535651
|
from typing import List, Union, Iterator, Tuple
from cell import Cell
class World:
NEIGHT = (
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1)
)
def __init__(self, width, height):
self.w = width
self.h = height
self.cells = self._new_world()
self.new_cells = None
def add_cell(self, x: int, y: int, cell: Cell):
self.cells[y][x] = cell
def _new_world(self) -> List[List[Union[Cell, None]]]:
return [
[None for x in range(self.w)]
for y in range(self.h)
]
def _iter_by(self, cells) -> Iterator[Tuple[int, int, Cell]]:
for y in range(self.h):
for x in range(self.w):
yield x, y, cells[y][x]
def _neight(self, cx, cy) -> Iterator[Cell]:
for dx, dy in self.NEIGHT:
x = cx + dx
y = cy + dy
if not (0 <= x < self.w):
x %= self.w
if not (0 <= y < self.h):
y %= self.h
if self.cells[y][x] is not None:
yield self.cells[y][x]
def _neight_count(self, cx, cy) -> int:
return len(tuple(self._neight(cx, cy)))
def step(self):
self.new_cells = self._new_world()
for x, y, cell in self._iter_by(self.cells):
neight = self._neight_count(x, y)
if cell is None:
cell = Cell.generate_new(list(self._neight(x, y)), neight)
if (cell is not None) and (not cell.is_birth(neight)):
cell = None
else:
if not cell.is_alive(neight) or cell.is_dead():
cell = None
self.new_cells[y][x] = cell
if cell:
cell.age += 1
self.cells = self.new_cells
def clean(self):
self.cells = self._new_world()
def stats(self):
types = {}
for x, y, cell in self:
if cell is None:
continue
key = tuple(cell.birth), tuple(cell.live)
types.setdefault(key, 0)
types[key] += 1
return types
def print(self):
for y in range(self.h):
for x in range(self.w):
cell = self.cells[y][x]
if cell:
print("#", end="")
else:
print("-", end="")
print()
# print(AsciiTable([[""]] + self.cells).table)
def __iter__(self):
yield from self._iter_by(self.cells)
def __getitem__(self, item: Union[Tuple[int, int]]):
return self.cells[item[1]][item[0]]
|
AzaubaevViktor/evo_life
|
world.py
|
world.py
|
py
| 2,710 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70911318267
|
# -*- coding: utf-8 -*-
import scrapy
from time import sleep
from random import randint
class ImdbSpiderSpider(scrapy.Spider):
name = 'imdb_spider'
allowed_domains = ['www.imdb.com']
start_urls = ['https://www.imdb.com/search/title/?release_date=2019-01-01,&sort=num_votes,desc']
page_count = 0
def parse(self, response):
all_movies = response.xpath('//div[@class="lister-item mode-advanced"]')
for movie in all_movies:
title = movie.xpath('normalize-space(.//h3/a/text())').extract_first()
duration = movie.xpath('.//p[@class="text-muted "]/span[@class="runtime"]/text()').extract_first()
genre = movie.xpath('normalize-space(.//p[@class="text-muted "]/span[@class="genre"]/text())').extract_first()
imdb_rating = movie.xpath('.//div[@class="inline-block ratings-imdb-rating"]/strong/text()').extract_first()
metascore_rating = movie.xpath('normalize-space(.//div[@class="inline-block ratings-metascore"]/span/text())').extract_first()
votes = movie.xpath('.//span[@name="nv"]/text()').extract_first()
yield {
'title': title,
'duration': duration,
'genre': genre,
'imdb_rating': imdb_rating,
'metascore_rating': metascore_rating,
'votes': votes
}
sleep(randint(2, 5))
next_page = response.xpath('//div[@class="desc"]/a[@class="lister-page-next next-page"]/@href').extract_first()
self.page_count += 1
if next_page and self.page_count < 40:
yield scrapy.Request(response.urljoin(next_page))
|
ArRosid/Scrapy-Project
|
scrapy_project/spiders/imdb_spider.py
|
imdb_spider.py
|
py
| 1,669 |
python
|
en
|
code
| 1 |
github-code
|
6
|
44476674074
|
from abc import ABCMeta, abstractmethod
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from app.config.settings import FONT_LABEL_TO_META, NUM_TOP_K
from app.domain.entity import BoundingBox, PredictFont
from app.domain.preprocess import Preprocessor
from PIL.Image import Image
from torchvision import models
def fetch_vgg16() -> nn.Module:
net = models.vgg16_bn(pretrained=False)
net.features[0] = nn.Conv2d(1, 64, 3, stride=1, padding=1)
net.classifier[6] = nn.Linear(4096, 365)
return net
class Predictor(metaclass=ABCMeta):
@abstractmethod
def predict(
self, image: Image, bounding_boxes: List[BoundingBox]
) -> List[PredictFont]:
raise NotImplementedError("Method not implemented")
class MockPredictor(Predictor):
def predict(
self, image: Image, bounding_boxes: List[BoundingBox]
) -> List[PredictFont]:
return [
PredictFont(
fontName="a",
fontNameJa="a",
fontNameEn="a",
fontWeight=100,
type="adobe",
adobeId="asssa",
score=0.1,
)
]
class FontPredictor(Predictor):
def __init__(self, preprocessor: Preprocessor, model: nn.Module) -> None:
self.preprocessor = preprocessor
self.model = model
def predict(
self, image: Image, bounding_boxes: List[BoundingBox]
) -> List[PredictFont]:
patches = self.preprocessor(image, bounding_boxes)
outputs = self.model(patches)
agg_outputs = torch.mean(outputs, dim=0)
top_fonts = torch.argsort(agg_outputs, descending=True)[:NUM_TOP_K].numpy()
scores = F.softmax(agg_outputs, dim=0)[top_fonts].detach().numpy()
return [
PredictFont(
fontName=FONT_LABEL_TO_META[f]["fontName"],
fontNameJa=FONT_LABEL_TO_META[f]["fontNameJa"],
fontNameEn=FONT_LABEL_TO_META[f]["fontNameEn"],
fontWeight=FONT_LABEL_TO_META[f]["fontWeight"],
type=FONT_LABEL_TO_META[f]["type"],
adobeId=FONT_LABEL_TO_META[f]["adobeId"],
score=round(s, 3),
)
for f, s in zip(top_fonts, scores)
]
|
kishimoto-banana/font-search-api
|
app/domain/predictor.py
|
predictor.py
|
py
| 2,308 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2639510151
|
import nextcord
from nextcord import (
Interaction,
slash_command
)
from nextcord.ext import commands
coins = "<a:coins:952154182851383416>"
mainshop = [
{
"buyname": ["bodyguard", "bg", "Bodyguard", "BODYGUARD", "BG", "bODYGUARD"],
"name": "<:Thief:952151438157570078> Bodyguard",
"price": 35000,
"description": f"If you get robbed, This bodyguard can SAVE you AND your valuables, you can buy up to 5 bodyguards"
},
{
"buyname": ["laptop", "Laptop", "lap", "Lap", "LAPTOP", "lAPTOP", "LAP", "lAB"],
"name": "<:laptop:952184538556145695> Laptop",
"price": 25000,
"description": f"You would need this to start a compan, an online company leads to an EVEN BIGGER company!",
},
{
"buyname": ["antivirus", "Antivirus", "AntiVirus", "anti-virus", "Anti-Virus", "ANTIVIRUS", "ANTI-VIRUS", "aNTIVIRUS", "aNTI-VIRUS"],
"name": "Anti-Virus",
"price": 15000,
"description": "Scared of your company being hacked? even losing all of your company progress? well this anti-virus will block it! (**most** likely)",
},
{
"name": "Ferrari",
"price": 99999,
"description": "Sports Car",
},
]
class ShopCommand(commands.Cog):
def __init__(self, client: commands.Bot):
self.client = client
@slash_command(name="shop", description="Look at the Shop!")
async def shop(self, interaction: Interaction):
async with interaction.channel.typing():
em = nextcord.Embed(title="Shop", color=nextcord.Color.blue())
for item in mainshop:
name = item["name"]
price = item["price"]
desc = item["description"]
em.add_field(
name=f"{name}",
value=f"${price:,}{coins} | {desc}",
inline=False
)
await interaction.send(embed=em)
def setup(client):
client.add_cog(ShopCommand(client))
|
coderFlameyosFlow/Famu-Bot
|
cogs/economy/shop.py
|
shop.py
|
py
| 2,024 |
python
|
en
|
code
| 1 |
github-code
|
6
|
4702516944
|
import math
import os
import random
import re
import sys
"""
跳云问题:
假如有一串包含0或者1的数字表示云,1表示危险的云,0表示安全的
一个人最远跳2个(假设此题终有解)
编程计算最少跳几下
"""
# Complete the jumpingOnClouds function below.
def jumpingOnClouds(c):
i=0 # current position
s=0 # steps need
while i<c.__len__()-1:
if i+2<c.__len__() and c[i+2]=='0':
i+=2
else:
i+=1
s+=1
return s
if __name__ == '__main__':
s=input('请输入')
result = jumpingOnClouds(s)
print(result)
|
relidaning/myPython
|
hackerrank/jumpingOnClouds.py
|
jumpingOnClouds.py
|
py
| 607 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
26797450576
|
from birch.cells.cell import Cell
from birch.util import BG_COLOR
from random import shuffle
class ConnectableCell(Cell):
_masks = {
'': '00000000',
'_h': '01102222',
'_v': '10012222',
'_tl': '00112220',
'_tr': '01012202',
'_bl': '10102022',
'_br': '11000222',
'_x': '11110000',
'_o': '00000000',
'_i_tl': '11111110',
'_i_tr': '11111101',
'_i_bl': '11111011',
'_i_br': '11110111',
'_u_tl': '00112221',
'_u_tr': '01012212',
'_u_br': '11001222',
'_u_bl': '10102122',
'_f': '11111111',
'_hl': '00102222',
'_hr': '01002222',
'_vt': '00012222',
'_vb': '10002222',
'_vl': '10112121',
'_vr': '11011212',
'_hb': '11101122',
'_ht': '01112211',
'_ltee': '10112020',
'_ttee': '01112200',
'_rtee': '11010202',
'_btee': '11100022',
'_yt': '11110011',
'_yb': '11111100',
'_yl': '11110101',
'_yr': '11111010',
'_ytlt': '10112021',
'_ytll': '01112201',
'_ytrt': '11010212',
'_ytrr': '01112210',
'_ybrb': '11011202',
'_ybrr': '11101022',
'_yblb': '10112120',
'_ybll': '11100122',
'_ydr': '11110110',
'_ydl': '11111001',
'_ytlx': '11110001',
'_ytrx': '11110010',
'_yblx': '11110100',
'_ybrx': '11111000'
}
_default_texture = ''
_connection_mask_map = {}
_connection_masks = {}
def __init__(self, name, textures, position, size=[16, 16]):
if len(self._connection_masks) == 0:
for k in self._masks:
self._connection_masks[k] = []
mask = self._masks[k]
z = []
for item in list(mask):
z.append(int(item))
self._connection_masks[k].append(z)
texture_name = self._default_texture
super().__init__(name, textures, position, texture_name, size=size)
self.base_texture_name = name
self.next_tick = 0
# expects order to be [top, left, right, bottom]
def cache_texture(self, surrounding):
# create a list of suffixes, including none, to check for textures
# with some variation
suffixes = ['_0', '_1']
shuffle(suffixes)
items = ['']
items.extend(suffixes)
zoro = [0, 0, 0, 0, 0, 0, 0, 0]
mask = list(zoro)
for cell in surrounding:
if type(cell) != type(self) or cell == self:
continue
tb = cell.top == self.bottom
bb = cell.bottom == self.bottom
bt = cell.bottom == self.top
ll = self.left == cell.left
lr = self.left == cell.right
rl = self.right == cell.left
mask[0] = mask[0] or ll and tb
mask[1] = mask[1] or lr and bb
mask[2] = mask[2] or rl and bb
mask[3] = mask[3] or ll and bt
mask[4] = mask[4] or lr and tb
mask[5] = mask[5] or rl and tb
mask[6] = mask[6] or lr and bt
mask[7] = mask[7] or rl and bt
mask = list(map(lambda item: int(item), mask))
if mask == zoro:
return
"""
ok here's how this works.
first, we enumerate the keys of self._connection_masks. We are looking for
a case where the True/False settings of this match to what we deduced above.
If we find 'k' that matches, we also check to see if there is an included
mapping of other _suffixes to k. In this manner there can be a default
texture for many kinds of connections between cells.
Otherwise, if there's no mapping the mapping is deduced based on the original
k value.
After this, we check for two types of textures: ones that end in k/sfx, and then
those that end in that plus _0 or _1. This way there can be some variation in
textures.
"""
for k in self._connection_masks:
for checkmask in self._connection_masks[k]:
mapped = [k]
matched = True
for (i, z) in enumerate(mask):
if checkmask[i] == 2:
continue
if z != checkmask[i]:
matched = False
break
if not matched:
continue
# look for other suffixes that should be checked instead of this
# name.
if k in self._connection_mask_map:
mapped = self._connection_mask_map[k]
for sfx in mapped:
tex_key = '%s%s' % (self.base_texture_name, sfx)
# cycle through our prebuilt list of suffixes and see if a tex exists
for item in items:
retex_key = '%s%s' % (tex_key, item)
if retex_key in self.textures:
self.texture_name = retex_key
self.image = self.textures[self.texture_name]
return
self.texture_name = self._default_texture
self.image = self.textures[self.texture_name]
|
lysol/birch
|
birch/cells/connectable.py
|
connectable.py
|
py
| 5,363 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73712873787
|
# -*- coding: utf-8 -*-
import pandas as pd
def DropCols():
for num in range(1,2,1):
print("now is dropping columns in---"+"HO_",num,".csv")
df=pd.read_csv("HO_{}.csv".format(str(num) ) )
#df=df.drop['Unnamed: 0',1]
x=[0]
df.drop(df.columns[x],axis=1,inplace=True)
print('save start---')
#date_1[['mobile', 'plan_id']].to_csv(output_file, sep=',', header=True,index=False)
df=df.to_csv("HO_{}.csv".format(str(num)))
print('save end')
def GetCols():
for num in range(8,9,1):
print("now is showing---",num)
df=pd.read_csv("HO_{}.csv".format(str(num) ) )
cols=df.columns.values.tolist()
#cols=df.columns[0]
print(cols)
print(df)
if __name__ =="__main__":
#DropCols()
GetCols()
|
jasscical/pythonLearning
|
03_去掉某一列.py
|
03_去掉某一列.py
|
py
| 895 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42967514090
|
class CalibrationAlert:
def __init__(
self,
device_id,
command,
session,
step,
direction,
):
self.device_id = device_id
self.command = command
self.session = session
self.step = step
self.direction = direction
def prepare_to_send(self):
return {
"device_id": self.device_id,
"command": self.command,
"session": self.session,
"step": self.step,
"direction": self.direction
}
|
sjuggernaut/smartback
|
infra/domain/alert/calibration_alert.py
|
calibration_alert.py
|
py
| 576 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26602476689
|
import numpy as np
import pandas as pd
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
from flask import Flask, jsonify
app = Flask(__name__)
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
measurement = Base.classes.measurement
station = Base.classes.station
session = Session(engine)
@app.route("/")
def home():
return (
f"Welcome to Sofie's OFFICIAL Climate App API!<br/>"
f"<br/>"
f"Available Routes are:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start (start date must be in mm-dd format) <br/>"
f"/api/v1.0/start/end (start & end dates must be in yyyy-mm-dd format) <br/>"
f"<br/>"
f"May Your Days Be Bright & Sunny, but Your Hair NEVER Frizzy!"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
lastDate = session.query(func.max(measurement.date)).all()[0][0]
lastDate = dt.datetime.strptime(lastDate, '%Y-%m-%d')
priorYear = lastDate - dt.timedelta(365)
result = session.query(measurement.date, measurement.prcp).filter(measurement.date>=priorYear).all()
precipitation = []
for date, prcp in result:
precipitation_dict = {}
precipitation_dict["date"] = date
precipitation_dict["prcp"] = prcp
precipitation.append(precipitation_dict)
return jsonify(precipitation)
@app.route("/api/v1.0/stations")
def stations():
results = session.query(station.station,station.name)\
.group_by(station.name)\
.order_by(station.name)\
.all()
stations = list(np.ravel(results))
return jsonify(stations)
@app.route("/api/v1.0/tobs")
def TObs():
lastDate = session.query(func.max(measurement.date)).all()[0][0]
lastDate = dt.datetime.strptime(lastDate, '%Y-%m-%d')
priorYear = lastDate - dt.timedelta(365)
results = session.query(measurement.tobs, measurement.date)\
.filter(measurement.station == 'USC00519281', measurement.date>=priorYear).all()
TObs = list(np.ravel(results))
return jsonify(TObs)
@app.route("/api/v1.0/<start>")
def start(start):
tmin = func.min(measurement.tobs)
tavg = func.avg(measurement.tobs)
tmax = func.max(measurement.tobs)
sel = [tmin, tavg, tmax]
result = session.query(*sel).filter(func.strftime("%m-%d", measurement.date) >= start).all()
start = []
for tmin, tavg, tmax in result:
start_dict = {}
start_dict["tmin"] = tmin
start_dict["tavg"] = tavg
start_dict["tmax"] = tmax
start.append(start_dict)
return jsonify(start)
@app.route("/api/v1.0/<start>/<end>")
def SnE(start, end):
tmin = func.min(measurement.tobs)
tavg = func.avg(measurement.tobs)
tmax = func.max(measurement.tobs)
sel = [tmin, tavg, tmax]
result = session.query(*sel).filter(measurement.date >= start).filter(measurement.date <= end).all()
end = []
for tmin, tavg, tmax in result:
end_dict = {}
end_dict["tmin"] = tmin
end_dict["tavg"] = tavg
end_dict["tmax"] = tmax
end.append(end_dict)
return jsonify(end)
if __name__ == "__main__":
app.run(debug=True)
|
SofiaAS1/SQLalchemy-Challenge
|
app.py
|
app.py
|
py
| 3,377 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2721595571
|
import random
###############################################################################
# Name: recombine
#
# Assumption: None
#
# Purpose: Recombine takes two lists and change the contents of a third
# to corresponding chunks of the first two lists. This is done
# by copying over elements from either the first or second list
# to the third in a random fashion.
#
# Arguments: recombined - the array that is made of chunks of the
# first two arrays#
#
# listOne - the 1st list that contributes to the recombined
# list
#
# listTwo - the 2nd list that contributes to the recombined
# list
#
# Returns: Nothing
#
###############################################################################
def recombine(sites, listOne, listTwo, dest, length):
# Choose the indices where the segments will alternate
indices = [0, length] + [int(random.random() * length) for i in range(sites)]
indices.sort()
# Build the new list. Take a section from listTwo first.
for i in range(sites + 1):
if random.random() < .5:
dest[indices[i]:indices[i + 1]] = listOne[indices[i]:indices[i + 1]]
else:
dest[indices[i]:indices[i + 1]] = listTwo[indices[i]:indices[i + 1]]
################################################################################
# Name: myCopy
#
# Assumptions: It is assumed that lists passed to this function will
# only integers, lists, or boolean types.
#
# Purpose: myCopy makes a copy of a list by recursively appending each
# item in the original list to a new list.
#
# Arguements: origList is a the list to be copied
#
# Returns: Nothing if the list contains a type that is not one of the
# three stated in the assumptions. If the list contanins valid
# types, then a copy of the original list is returned.
################################################################################
def myCopy(origList):
newList = []
for i in range(len(origList)):
# Append if the item in the list is an integer, string,
# or boolean
if type(origList[i]) is int or type(origList[i]) is str or type(origList[i]) is bool or type(origList[i]) is float:
newList.append(origList[i])
# Recursively copy the item if it is a list
elif type(origList[i]) is list:
newList.append(myCopy(origList[i]))
# Freak out if some other type of object is in the list
else:
print("Error: List contains invalid types")
exit(1)
return newList
##########################################################################################
# Name: pickParents
#
# Assumptions: It is assumed that the population has entries 0-populationSize - 1
#
# Purpose: pickParents exists for the sake of picking parents to produce an
# offspring. If selection is turned off, then two distinct individuals are
# chosen at random to be parents. If recombination is turned on, the first
# two distinct individuals with a fitness higher than a random number are
# chosen to the parents.
#
# Arguments: selection - a boolean corresponding to whether or not selection is
# turned on
#
# population - a list containing the individuals of the population
#
# fitnessIndex - the index of an individuals fitness.
# population[i][fitnessIndex] gives the fitness of
# individual i
#
# populationSize - the number of individuals contained the population.
# len(population) == populationSize
#
# Returns: A tuple containing two distinct integers in the interval
# [0, populationSize - 1. Each integer corresponds to the the entry of the
# population containg the genetic information of a parent.
#
##########################################################################################
def pickParents(selection, population, fitnessIndex, populationSize):
# If selection is on, discriminate against low fitness
if selection:
# Pick the first mom who has a fitness greater than a random number
while True:
mom = int(random.random() * populationSize)
if population[mom][fitnessIndex] > random.random():
break
# Pick the first dad who has a fitness greater than a random number and isn't already the mom
while True:
dad = int(random.random() * populationSize)
if population[dad][fitnessIndex] > random.random() and dad != mom:
break
# If slection is off, don't consider fitness
else:
# Pick a mom at random
mom = int(random.random() * populationSize)
# Pick the first dad who isn't already the mom
dad = mom
while dad == mom:
dad = int(random.random() * populationSize)
return (mom, dad)
##########################################################################################
# Name: replaceDeadWithOffspring
#
# Assumptions: It is assumed that the likelihood of getting a rho gene from either
# in the case of recombination is .5; it is also assumed that sections
# of the lDel contributed from both parents can differ in size. In the case
# of no recombination, it is assumed that the person who died cannot be
# the parent of the offspring replacing them.
#
#
# is off, then the offspring gets copies of each of the genes of another
# individual in the population who is randomly chosen. If it is on, then
# the offspring gets a combination of different genes from each parent and
# a fitness reflecting the difference in gene.
#
#
# Arguments: deadIndex - the index in the population list of the individual who
# died. This will be the index of the offspring.
# population - a list containing the individuals of the population and all of
# their genetic information
# recombination - a boolean corresponding to whether or not recombination
# is allowed when developing offspring.
#
# Returns: Nothing
#
##########################################################################################
def replaceDeadWithOffspring(deadIndex, recombination, population, populationSize, selection,
fitnessIndex):
if recombination:
# Pick two parents
parents = pickParents(selection, population, fitnessIndex, populationSize)
mateOne = parents[0]
mateTwo = parents[1]
# Randomly assign one of the rho values from the parents to the individual
population[deadIndex][0] = population[mateOne][0]
if random.random() < .5:
population[deadIndex][0] = population[mateTwo][0]
# Recombine the parents and then give the offspring the result for
# the lDel, alpha, and beta gene of the osspring
recombine(5, population[mateOne][1], population[mateTwo][1],
population[deadIndex][1], population[deadIndex][1].size)
recombine(2, population[mateOne][3], population[mateTwo][3],
population[deadIndex][3], population[deadIndex][3].size)
recombine(2, population[mateOne][4], population[mateTwo][4],
population[deadIndex][4], population[deadIndex][4].size)
else:
# Pick an individual to be the parent who isn't the person who just died
mateOne = deadIndex
while mateOne == deadIndex:
mateOne = int(random.random() * populationSize)
# Copy the the parent into the offsprings slot recursively
population[deadIndex] = myCopy(population[mateOne])
##########################################################################################
# Name: mutateIndividual
#
# Assumptions: Beta mutations only occur when an lDel mutation has already occured.
#
# Purpose: This method checks to see if there's a rho, lDel, alpha, or beta
# mutation according to the mutation rates giveen by the user. Let k
# be the number of beta locus. If the index of an lDel mutation is less
# or equal to k, then the beta gene is also mutated in the beta gene. If
# the lDel mutation loci is greater than k, then only that lDel loci is
# changed.
#
# Arguments: population - a reference to the array containing the genetic
# information of each inividual
#
# plDelmutation - the probability of there being at least one mutation
# in the given lDel gene
#
# pRhoMutation - the porbability of a mutation of the read through
# rho
#
# pDelToNonDel - the probability of a given lDel loci to go from
# deleterious to benign
#
# mutantIndex - the index in the population array that holds the geneitc
# information on the individual to be me mutated
#
# pAlphaMutations - the mutation rate for a given alplha gene
#
# Returns: Nothing
#
##########################################################################################
def mutateIndividual(mutantIndex, population, pRhoMutation, plDelMutation, pAlphaMutation,
pBetaMutation, pDelToNonDel, pNonDelToDel, pCooption):
if random.random() < pRhoMutation:
population[mutantIndex][0] *= 10**random.gauss(0, .2)
if random.random() < plDelMutation:
# Pick the locus to change
betaLength = population[mutantIndex][4].size
lDelLength = population[mutantIndex][1].size
changeLoci = random.randint(0, lDelLength - 1)
# The lDel locus has a corresponding beta locus
if changeLoci < betaLength:
mutationOccured = 0
# Loci to be changed is a 1
if population[mutantIndex][1][changeLoci]:
if random.random() < pDelToNonDel:
population[mutantIndex][1][changeLoci] = 0
mutationOccured += 1
# Loci to ba changed is a 0
else:
if random.random() < pNonDelToDel:
population[mutantIndex][1][changeLoci] = 1
mutationOccured += 1
# Only mutat a beta if an lDel was changed
if mutationOccured:
# Add a number drawn out of a normal distribution
population[mutantIndex][4][changeLoci] += \
random.gauss(-1 * population[mutantIndex][4][changeLoci] / 50.0,
10 / float(betaLength))
# Check for a cooption mutation
if random.random() < pCooption:
population[mutantIndex][3][changeLoci] += population[mutantIndex][4][changeLoci]
population[mutantIndex][4][changeLoci] = random.gauss(0.0, 16.0 / (9.0/25.0))
else:
# Change an lDel locus in the same way as above
if population[mutantIndex][1][changeLoci]:
if random.random() < pDelToNonDel:
population[mutantIndex][1][changeLoci] = 0
else:
if random.random() < pNonDelToDel:
population[mutantIndex][1][changeLoci] = 1
# THIF PART DOES ALPHA MUTATION
if random.random() < pAlphaMutation:
# Pick a an alpha locus to change
alphaLength = population[mutantIndex][3].size
changeLoci = random.randint(0, alphaLength - 1)
# Add another number drawn from a normal distribution
delta = random.gauss(-1 * population[mutantIndex][3][changeLoci] / 50.0,
10 / float(alphaLength))
population[mutantIndex][3][changeLoci] += delta
|
emanuelbust/agentlDel-Rho
|
cycle.py
|
cycle.py
|
py
| 10,802 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70945121788
|
# 웹에서 검색자료 읽은 후 워드 클라우드로 출력
from bs4 import BeautifulSoup
import urllib.request
from urllib.parse import quote
from boto.dynamodb import item
#keyword = input('검색어:')
keyword = '장마'
print(keyword)
print(quote(keyword))
# 동아일보 검색 기능 사용
target_url = "http://www.donga.com/news/search?query=" + quote(keyword)
sou_code = urllib.request.urlopen(target_url)
soup = BeautifulSoup(sou_code, 'lxml', from_encoding='utf-8')
#print(soup)
########################
msg = ""
for title in soup.find_all('p', 'tit'):
title_link = title.select('a')
#print(title_link)
article_url = title_link[0]['href']
#print(article_url)
sou_article = urllib.request.urlopen(article_url)
soup = BeautifulSoup(sou_article,'lxml', from_encoding='utf-8')
contents = soup.select('div.article_txt')
for imsi in contents:
item = str(imsi.find_all(text=True))
#print(item)
msg = msg + item
print(msg)
from konlpy.tag import Okt
from collections import Counter
okt = Okt()
nouns = okt.nouns(msg)
result = []
for imsi in nouns:
if len(imsi) > 1: # 2글자 이상만 참여
result.append(imsi)
print(result)
count = Counter(result)
tag = count.most_common(50) # 상위 50개만 참여
print(tag)
##########################################
import pytagcloud
# (min)maxsize : 글꼴크기,
taglist = pytagcloud.make_tags(tag, maxsize=100)
print(taglist)
pytagcloud.create_tag_image(taglist, "word.png", size=(1000,600),
fontname="Korean", rectangular=False)
# 이미지 읽기
# import matplotlib.pylab as plt
# import matplotlib.image as mpimg
# #%matplotlib inline
# img = mpimg.imread("word.png")
# plt.imshow(img)
# plt.show()
# 이미지 브라우저로 읽기
import webbrowser
webbrowser.open("word.png")
|
kangmihee/EX_python
|
py_morpheme/pack/morp3wordcloud.py
|
morp3wordcloud.py
|
py
| 1,969 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39373093044
|
import requests
__author__ = "Griffith Asare Awuah (@gwuah)"
class ogma():
"""Language Detection Library For Pythonistas"""
def __init__(self, accessKey):
self.payload = {'access_key': str(accessKey)}
def detect(self, phrase) :
self.payload['query'] = str(phrase)
try :
r = requests.get('http://apilayer.net/api/detect', self.payload)
self.response = r.json()
if (r.status_code == requests.codes.ok) and (self.response['success'] != False) :
# connection successful! You were able to get meaningful data from the endpoint
return "{}".format(self.response['results'][0]['language_name'])
else :
if r.status_code[0] == 4 :
# couldn't connect to language layer due to no inetrnet access
print("Detection wasn't sucessful. \nThere was an error from your side. \nCheck Your Internet Connection.")
elif r.status_code[0] == 5 :
# Youre connected to a network, but theres no internet access
print("Detection wasn't sucessful \nThere was an error from your server \nTry again later")
elif (self.response['success'] == False) and (self.response['error']['code'] == 101) :
# You didnt submit a correct payload probably
return self.response['error']['info'][:-41]
elif (self.response['success'] == False) and (self.response['error']['code'] == 210) :
# You didnt submit a correct payload probably
return self.response['error']['info'][:-43]
except requests.exceptions.ConnectionError :
print("Detection wasn't sucessful. \nYou are not connected to the internet Connection.")
|
gwuah/ogma
|
api.py
|
api.py
|
py
| 1,561 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71840538747
|
from flask import Flask
from flask import render_template
from flask import Response, request, jsonify
app = Flask(__name__)
current_id = 4
sales = [
{
"id": 1,
"salesperson": "James D. Halpert",
"client": "Shake Shack",
"reams": 1000
},
{
"id": 2,
"salesperson": "Stanley Hudson",
"client": "Toast",
"reams": 4000
},
{
"id": 3,
"salesperson": "Michael G. Scott",
"client": "Computer Science Department",
"reams": 10000
},
]
clients = [
"Shake Shack",
"Toast",
"Computer Science Department",
"Teacher's College",
"Starbucks",
"Subsconsious",
"Flat Top",
"Joe's Coffee",
"Max Caffe",
"Nussbaum & Wu",
"Taco Bell",
];
non_ppc_people = [
"Phyllis",
"Dwight",
"Oscar",
"Creed",
"Pam",
"Jim",
"Stanley",
"Michael",
"Kevin",
"Kelly"
]
ppc_people = [
"Angela"
]
@app.route('/infinity')
def infinity(name=None):
return render_template('cu-paper-infinity.html', sales=sales, clients=clients)
@app.route('/ppc')
def ppc(name=None):
return render_template('ppc.html', non_ppc_people=non_ppc_people, ppc_people=ppc_people)
@app.route('/save_sale', methods=['GET', 'POST'])
def save_sale():
global current_id
global sales
global clients
json_data = request.get_json()
salesperson = json_data["salesperson"]
client = json_data["client"]
reams = json_data["reams"]
current_id += 1
new_sale_log = {
"id": current_id,
"salesperson": salesperson,
"client": client,
"reams": reams
}
sales.append(new_sale_log)
if client not in clients:
clients.append(client)
return jsonify(sales=sales, clients=clients)
@app.route('/delete_sale', methods=['GET', 'POST'])
def delete_sale():
global sales
delete_id = request.get_json()
del sales[delete_id]
return jsonify(sales=sales)
@app.route('/move_to_non_ppc', methods=['GET', 'POST'])
def move_to_non_ppc():
global non_ppc_people
global ppc_people
name = request.get_json()
if name not in non_ppc_people:
non_ppc_people.append(name)
ppc_people.remove(name)
return jsonify(non_ppc_people=non_ppc_people, ppc_people=ppc_people)
@app.route('/move_to_ppc', methods=['GET', 'POST'])
def move_to_ppc():
global non_ppc_people
global ppc_people
name = request.get_json()
if name not in ppc_people:
ppc_people.append(name)
non_ppc_people.remove(name)
return jsonify(non_ppc_people=non_ppc_people, ppc_people=ppc_people)
if __name__ == '__main__':
app.run(debug = True)
|
haoshuai999/User-Interface
|
cu-paper-infinity&ppc/app.py
|
app.py
|
py
| 2,393 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21430351891
|
'''
Created For Mega Projects Repository
Turtle Graphics - This is a common project where you create a floor of 20 x 20 squares.
Using various commands you tell a turtle to draw a line on the floor. You have move forward, left or right, lift or drop pen etc.
Do a search online for "Turtle Graphics" for more information.
@author: Sambit
'''
#imports
import turtle
#Functions
#Function to draw a square
def draw_square(some_turtle):
"""
@param Input: Turtle to perform task
@console Output: Square pattern
"""
for i in range(4):
some_turtle.forward(250)
some_turtle.right(90)
#Function to draw different shapes
def draw_art():
"""
@param Input: None
@console Output: Pattern
"""
window = turtle.Screen()
window.bgcolor("black")
don = turtle.Turtle()
mike = turtle.Turtle()
don.shape("turtle")
don.color("purple", "green")
don.speed(20)
mike.shape("turtle")
mike.color("orange", "green")
mike.speed(20)
for i in range(360):
don.circle(150)
don.right(1)
for i in range(360):
draw_square(mike)
mike.right(1)
window.exitonclick()
draw_art()
|
SambitAcharya/Projects
|
My Solutions/Graphics-And-Multimedia/turtlegraphics.py
|
turtlegraphics.py
|
py
| 1,211 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14987411881
|
from PIL import Image
import os
from tkinter import filedialog
import tkinter as tk
def convert_pdf():
index = 0
path_picture = filedialog.askdirectory()
dire = 'Converted'
path_pdf = os.path.join(path_picture , dire)
os.mkdir(path_pdf)
my_list = os.listdir(path_picture)
for i in my_list:
image = Image.open(r'' + path_picture+'//' + i)
im = image.convert('RGB')
im.save(r''+ path_pdf+'//' + i[:-4] +'.pdf', quality=15, optimze=True)
index = index + 1
root = tk.Tk()
convert_pdf()
tk.mainloop()
|
Elkayamacc/Image2PDF
|
PDFConverterV2.py
|
PDFConverterV2.py
|
py
| 561 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32397358077
|
import os
import random
import numpy as np
import torch
from scipy import ndimage as ndi
from torch.nn import functional as F
from torch.utils.data import Dataset
from my_utils import normalize
class UNetDataset(Dataset):
def __init__(self, data_dir, shape, train, transform):
self.shape = shape
self.transform = transform
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
self.mask_path = os.path.join(data_dir, 'masks')
self.train = train
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
sample = np.load(os.path.join(self.sample_path, self.sample_files[index]))
mask = np.load(os.path.join(self.mask_path, self.sample_files[index]))
mask = mask.astype(np.float32)
if int(self.sample_files[index][-6:-4]) == 0:
rand = random.randrange(3, len(sample) - 3)
sample = sample[rand - 3:rand + 4]
mask = mask[rand]
if self.transform is not None:
sample = self.transform(sample)
sample = np.concatenate((sample[0], sample[1]))
if self.train:
htranslation = random.randint(-10, 10)
vtranslation = random.randint(-10, 10)
angle = random.randint(-10, 10)
sample = ndi.shift(sample, (0, htranslation, vtranslation), mode='nearest')
sample = ndi.rotate(sample, angle, (-1, -2), mode='nearest', reshape=False)
mask = ndi.shift(mask, (htranslation, vtranslation), mode='nearest')
mask = ndi.rotate(mask, angle, (-1, -2), mode='nearest', reshape=False)
if random.randint(0, 1) == 1:
sample = np.flip(sample, -1)
mask = np.flip(mask, -1)
sample = torch.from_numpy(sample[np.newaxis, ...].copy())
sample = F.interpolate(sample, self.shape, mode='bilinear', align_corners=False)
mask = torch.from_numpy(mask[np.newaxis, np.newaxis, ...].copy())
mask = F.interpolate(mask, self.shape, mode='nearest')
mask2 = F.interpolate(mask, scale_factor=0.5, mode='nearest', recompute_scale_factor=False)
mask3 = F.interpolate(mask, scale_factor=0.25, mode='nearest', recompute_scale_factor=False)
return sample[0], mask[0], mask2[0], mask3[0]
class GenesisDataset2D(Dataset):
def __init__(self, data_dir, shape, transform, flip_rate):
self.shape = shape
self.transform = transform
self.flip_rate = flip_rate
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
x = np.load(os.path.join(self.sample_path, self.sample_files[index]))
rand = random.randrange(3, len(x) - 3)
x = x[rand - 3:rand + 4]
if random.random() < self.flip_rate:
x = np.flip(x, -1)
x = normalize(x)
x = np.concatenate((x[0], x[1]))
x = ndi.zoom(x, (1, self.shape[0] / x.shape[1], self.shape[1] / x.shape[2]), order=2, mode="nearest")
y = self.transform(x)
return torch.from_numpy(y.copy().astype(np.float32)), torch.from_numpy(x.copy().astype(np.float32))
class PPos2DDataset(Dataset):
def __init__(self, data_dir, shape, num_classes, transform):
self.shape = shape
self.transform = transform
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
self.num_classes = num_classes
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
sample = np.load(os.path.join(self.sample_path, self.sample_files[index]))
rand = random.randrange(3, len(sample) - 3)
target = (rand - 3) / (len(sample) - 6)
sample = sample[rand - 3:rand + 4]
if self.transform is not None:
sample = self.transform(sample)
sample = np.concatenate((sample[0], sample[1]))
return torch.from_numpy(sample), torch.tensor([target])
class UNetClassifierDataset(Dataset):
def __init__(self, data_dir, train, transform):
self.transform = transform
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
self.mask_path = os.path.join(data_dir, 'masks')
self.train = train
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
sample = np.load(os.path.join(self.sample_path, self.sample_files[index]))
mask = np.load(os.path.join(self.mask_path, self.sample_files[index]))
mask = mask.astype(np.float32)
if self.transform is not None:
sample = self.transform(sample)
if self.train:
htranslation = random.randint(-10, 10)
vtranslation = random.randint(-10, 10)
dtranslation = random.randint(-2, 2)
angle = random.randint(-10, 10)
sample = ndi.shift(sample, (0, dtranslation, htranslation, vtranslation), mode='nearest')
sample = ndi.rotate(sample, angle, (-1, -2), mode='nearest', reshape=False)
mask = ndi.shift(mask, (dtranslation, htranslation, vtranslation), mode='nearest')
mask = ndi.rotate(mask, angle, (-1, -2), mode='nearest', reshape=False)
if random.randint(0, 1) == 1:
sample = np.flip(sample, -1)
mask = np.flip(mask, -1)
mask2 = ndi.zoom(mask, 0.5, order=0, mode='nearest')
mask3 = ndi.zoom(mask, 0.25, order=0, mode='nearest')
return torch.from_numpy(sample.copy()), torch.from_numpy(mask[np.newaxis, ...].copy()), torch.from_numpy(
mask2[np.newaxis, ...].copy()), torch.from_numpy(mask3[np.newaxis, ...].copy()), torch.tensor(
[self.sample_files[index][:5].isdigit()], dtype=torch.float)
# return torch.from_numpy(sample.copy()), torch.from_numpy(mask[np.newaxis, ...].copy()), torch.from_numpy(
# mask2[np.newaxis, ...].copy()), torch.from_numpy(mask3[np.newaxis, ...].copy()), torch.tensor(
# [self.sample_files[index][6:11].isdigit()], dtype=torch.float)
class ClassifierDataset(Dataset):
def __init__(self, data_dir, shape, train, transform=None):
self.shape = shape
self.train = train
self.transform = transform
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
if train:
self.mask_path = os.path.join(data_dir, 'masks')
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
sample = np.load(os.path.join(self.sample_path, self.sample_files[index]))
if self.train and self.sample_files[index][-5] == '1':
mask = np.load(os.path.join(self.mask_path, self.sample_files[index]))
indices = mask.nonzero()
nodule_length = [0, 0, 0]
scale_length = [0, 0, 0]
for i in range(3):
start = np.min(indices[i])
end = np.max(indices[i]) + 1
nodule_length[i] = end - start
while True:
for i in range(3):
while True:
scale_length[i] = round(nodule_length[i] * random.uniform(1, 3))
if scale_length[i] < sample.shape[i]:
break
depth = random.randint(0, sample.shape[0] - scale_length[0])
height = random.randint(0, sample.shape[1] - scale_length[1])
width = random.randint(0, sample.shape[2] - scale_length[2])
if depth > np.max(indices[0]) or depth + scale_length[0] < np.min(indices[0]) or height > np.max(
indices[1]) or height + \
scale_length[1] < np.min(indices[1]) or width > np.max(indices[2]) or width + scale_length[2] < \
np.min(indices[2]):
sample = sample[depth:depth + scale_length[0], height:height + scale_length[1],
width:width + scale_length[2]]
break
if self.transform is not None:
sample = self.transform(sample)
sample = torch.from_numpy(sample[np.newaxis, ...].copy())
sample = F.interpolate(sample, self.shape, mode='trilinear', align_corners=True)
return sample[0], torch.tensor([self.sample_files[index][-5] == '0'], dtype=torch.float)
class PCL2DDataset(Dataset):
def __init__(self, data_dir, shape, transform):
self.shape = shape
self.transform = transform
self.sample_path = os.path.join(data_dir, 'samples')
self.sample_files = os.listdir(self.sample_path)
self.sample_files.sort()
def __len__(self):
return len(self.sample_files)
def __getitem__(self, index):
sample = np.load(os.path.join(self.sample_path, self.sample_files[index]))
rand = random.randrange(3, len(sample) - 3)
slice_position = (rand - 3) / (len(sample) - 6)
partition = int((rand - 3) / (len(sample) - 6) * 4) + 1
sample = sample[rand - 3:rand + 4]
img1 = self.transform(sample)
img2 = self.transform(sample)
img1 = np.concatenate((img1[0], img1[1]))
img2 = np.concatenate((img2[0], img2[1]))
return torch.from_numpy(img1), torch.from_numpy(img2), torch.tensor(slice_position), torch.tensor(partition)
|
alienzyj/PPos
|
my_dataset.py
|
my_dataset.py
|
py
| 10,080 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30814462620
|
"""
Script Description
- train NADA model
Usage
- $ python train_nada.py --config_path [config_path] --name [exp_name] --suppress
- $ cat [config_path] | python train_nada.py --pipe --name [exp_name] --suppress
Author
- Minsu Kim
- Dongha Kim
History
- 230419 : MINSU , init
- adaptation loop
- code skeleton
- 230422 : MINSU , implement
- code corresponding to GET3D application 4.3.2
- 230422 : DONGHA , convert as distributed script
Reference
- StyleGAN-NADA Github
https://github.com/rinongal/StyleGAN-nada/blob/main/ZSSGAN/train.py
"""
import sys
import os
import time
import tempfile
import yaml
import numpy as np
import torch
from torchvision.utils import save_image
import logging
import dist_util
from model_engine import find_get3d
from nada import YAIverseGAN
from functional import unfreeze_generator_layers, generate_custom
if find_get3d():
from torch_utils import custom_ops
_SEED = 0
_SELECT = 50
def get_logger(exp_name, outdir, rank=0):
logger = logging.getLogger(exp_name)
if rank != 0:
logger.disabled = True
else:
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
file_handler = logging.FileHandler(f'{outdir}/{exp_name}_{time.strftime("%Y-%m-%d-%H-%M", time.gmtime())}.log')
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
return logger
def subprocess_fn(rank, config, args, temp_dir):
if config['GLOBAL']['gpus'] > 1:
dist_util.setup_dist(temp_dir, rank, config['GLOBAL']['gpus'])
if rank != 0:
custom_ops.verbosity = 'none'
if rank == 0:
print("START ! EXP NAME : ", args.name)
print("SETTING : LOAD YaiverseGAN")
with dist_util.synchronized_ops():
net = YAIverseGAN(config)
unfreeze_generator_layers(net.generator_trainable, [], [])
if dist_util.get_world_size() > 1:
ddp_net = torch.nn.parallel.DistributedDataParallel(
net,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=True,
bucket_cap_mb=256,
find_unused_parameters=True,
)
else:
ddp_net = net
device, outdir, batch, n_vis, sample_1st, sample_2nd, iter_1st, iter_2nd, lr, \
output_interval, save_interval, gradient_clip_threshold = net.get_loop_settings()
g_optim = torch.optim.Adam(
net.generator_trainable.parameters(),
lr=lr,
betas=(0.9, 0.99),
)
with dist_util.synchronized_ops():
if rank == 0:
sample_dir = os.path.join(outdir, "sample")
ckpt_dir = os.path.join(outdir, "checkpoint")
os.makedirs(outdir, exist_ok=True)
os.makedirs(sample_dir, exist_ok=True)
os.makedirs(ckpt_dir, exist_ok=True)
torch.manual_seed(_SEED)
np.random.seed(_SEED)
logger = get_logger(args.name, outdir, rank=rank)
logger.info(f'EXP NAME : {args.name} | CONFIG : {args.config_path} | SEED : {_SEED} | BATCH : {batch}')
z_dim = 512 # Fixed value
fixed_z_geo = torch.randn(n_vis, z_dim, device=device) # for eval
fixed_z_tex = torch.randn(n_vis, z_dim, device=device)
grid_rows = int(n_vis ** 0.5)
eval_camera = net.generator_frozen.synthesis.generate_rotate_camera_list(n_batch=1)[4].repeat(n_vis, 1, 1, 1)
# ------------------ Training 1st --------------
# latent z should be 2 -> for geo , tex
# different n_batch latents per gpu <- equals: seeing n_batch * n_gpu latents
latent_generator = torch.Generator(device)
latent_generator.manual_seed(rank)
sample_z_geo = torch.randn(sample_1st, z_dim, device=device, generator=latent_generator)
sample_z_tex = torch.randn(sample_1st, z_dim, device=device, generator=latent_generator)
sample_z_geo_chunks = torch.split(sample_z_geo, batch, dim=0)
sample_z_tex_chunks = torch.split(sample_z_tex, batch, dim=0)
logger.info(f'START TRAINING LOOP')
min_loss_store = []
for epoch in range(iter_1st):
for i, (z_geo_chunk, z_tex_chunk) in enumerate(zip(sample_z_geo_chunks, sample_z_tex_chunks)):
# training
ddp_net.train()
# memory-efficient forward : support n_view rendering
_, loss = ddp_net(z_tex_chunk, z_geo_chunk)
if epoch == iter_1st - 1: # to choose 50 latents with low loss value
loss_val = loss.cpu().detach().numpy().tolist()
min_loss_store += loss_val
loss = loss.mean()
ddp_net.zero_grad()
loss.backward()
if gradient_clip_threshold == -1:
pass
else:
torch.nn.utils.clip_grad_norm_(net.generator_trainable.parameters(), gradient_clip_threshold)
g_optim.step()
logger.info(f'EPOCH : {epoch} | STEP : {i:0>4} | LOSS : {loss:.5f}')
# evaluation & save results | save checkpoints
with dist_util.synchronized_ops():
if rank == 0:
if i % output_interval == 0:
ddp_net.eval()
with torch.no_grad():
sampled_dst, _ = generate_custom(
net.generator_trainable,
fixed_z_tex, fixed_z_geo,
use_mapping=True, mode='layer', camera=eval_camera
)
rgb = sampled_dst[:, :-1]
mask = sampled_dst[:, -1:]
bg = torch.ones(rgb.shape, device=device)
bg *= 0.0001 # for better background
new_dst = rgb*mask + bg*(1-mask)
save_image(
new_dst,
os.path.join(sample_dir, f"Iter1st_Epoch-{epoch}_Step-{i:0>4}.png"),
nrow=grid_rows,
normalize=True,
range=(-1, 1),
)
logger.info(f'ITER 1st | EPOCH : {epoch} | STEP : {i:0>4} | >> Save images ...')
if i % save_interval == 0 and not args.suppress:
torch.save(
{
"g_ema": net.generator_trainable.state_dict(),
"g_optim": g_optim.state_dict(),
},
f"{ckpt_dir}/Iter1st_Epoch-{epoch}_Step-{i:0>4}.pt",
)
logger.info(f'ITER 1st | EPOCH : {epoch} | STEP : {i:0>4} | >> Save checkpoint ...')
torch.cuda.empty_cache() # added
dist_util.barrier()
logger.info(f"SELCT TOP {_SELECT} Latents")
# min_topk_val, min_topk_idx = torch.topk(torch.tensor(min_loss_store), _SELECT) #previous
min_topk_val, min_topk_idx = torch.topk(torch.tensor(min_loss_store), _SELECT, largest=False)
print("SELECT : ", min_topk_val, min_topk_idx)
# ------------------ Training 2nd --------------
selected_z_geo = sample_z_geo[min_topk_idx]
selected_z_tex = sample_z_tex[min_topk_idx]
selected_z_geo_chunks = torch.split(selected_z_geo, batch, dim=0)
selected_z_tex_chunks = torch.split(selected_z_tex, batch, dim=0)
min_loss = 1000
for epoch in range(iter_2nd):
for i, (z_geo_chunk, z_tex_chunk) in enumerate(zip(selected_z_geo_chunks, selected_z_tex_chunks)):
# training
ddp_net.train()
_, loss = ddp_net(z_tex_chunk, z_geo_chunk)
loss = loss.mean()
ddp_net.zero_grad()
loss.backward()
if gradient_clip_threshold == -1:
pass
else:
torch.nn.utils.clip_grad_norm_(net.generator_trainable.parameters(), gradient_clip_threshold)
logger.info(f'ITER 2nd | EPOCH : {epoch} | STEP : {i:0>4} | LOSS : {loss:.5f}')
# evaluation & save results | save checkpoints
with dist_util.synchronized_ops():
if rank == 0:
if (i == len(selected_z_geo_chunks) - 1) and (epoch == iter_2nd - 1):
torch.save(
{
"g_ema": net.generator_trainable.state_dict(),
"g_optim": g_optim.state_dict(),
},
f"{ckpt_dir}/latest.pt",
)
if i % output_interval == 0:
ddp_net.eval()
with torch.no_grad():
sampled_dst, _ = generate_custom(
net.generator_trainable,
fixed_z_tex, fixed_z_geo, use_mapping=True, mode='layer', camera=eval_camera
)
rgb = sampled_dst[:, :-1]
mask = sampled_dst[:, -1:]
bg = torch.ones(rgb.shape, device=device)
bg *= 0.0001 # for better background
new_dst = rgb*mask + bg*(1-mask)
save_image(
new_dst,
os.path.join(sample_dir, f"Iter2nd_Epoch-{epoch}_Step-{i:0>4}.png"),
nrow=grid_rows,
normalize=True,
range=(-1, 1),
)
logger.info(f'ITER 2nd | EPOCH : {epoch} | STEP : {i:0>4} | >> Save images ...')
if i % save_interval == 0:
if not args.suppress:
torch.save(
{
"g_ema": net.generator_trainable.state_dict(),
"g_optim": g_optim.state_dict(),
},
f"{ckpt_dir}/Iter2nd_Epoch-{epoch}_Step-{i:0>4}.pt",
)
logger.info(f'ITER 2nd | EPOCH : {epoch} | STEP : {i:0>4} | >> Save checkpoint ...')
if loss < min_loss:
min_loss = loss
torch.save(
{
"g_ema": net.generator_trainable.state_dict(),
"g_optim": g_optim.state_dict(),
},
f"{ckpt_dir}/best.pt",
)
torch.cuda.empty_cache()
dist_util.barrier()
logger.info("TRAINING DONE ...")
# Check final results
with dist_util.synchronized_ops():
if rank == 0:
net.eval()
with torch.no_grad():
last_z_geo = torch.randn(n_vis, z_dim, device=device)
last_z_tex = torch.randn(n_vis, z_dim, device=device)
sampled_dst, _ = generate_custom(
net.generator_trainable,
last_z_tex, last_z_geo, use_mapping=True, mode='layer', camera=eval_camera
)
save_image(
sampled_dst,
os.path.join(sample_dir, "params_latest_images.png"),
nrow=grid_rows,
normalize=True,
range=(-1, 1),
)
logger.info("FINISH !")
def launch_training(args): # Multiprocessing spawning function
# Load config and parse the number of GPUs.
if args.pipe:
config = yaml.safe_load(sys.stdin)
else:
with open(args.config_path, 'r') as f:
config = yaml.safe_load(f)
gpus = config['GLOBAL']['gpus']
# In case of single GPU, directly call the training function.
if gpus == 1:
subprocess_fn(0, config, args, None)
return
# Otherwise, launch processes.
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn', force=True)
with tempfile.TemporaryDirectory() as temp_dir:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(config, args, temp_dir), nprocs=gpus)
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default='experiments/default_dist.yaml')
parser.add_argument('--name', type=str, default='default_dist')
parser.add_argument('--pipe', action='store_true', help='read config from stdin instead of file')
parser.add_argument('--suppress', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
launch_training(parse_args())
|
studio-YAIVERSE/studio-YAIVERSE
|
train_nada.py
|
train_nada.py
|
py
| 13,226 |
python
|
en
|
code
| 20 |
github-code
|
6
|
69894764349
|
import sys
import random
class HashSolution:
def __init__(self, string):
# Store string input and length
self.string = string
self.string_len = len(string) + 1
# Initialize hash tables
self.hash_table_1 = [0] * self.string_len
self.hash_table_2 = [0] * self.string_len
# Initialize coef tables
self.coef_table_1 = [1] * self.string_len
self.coef_table_2 = [1] * self.string_len
# Initialize modulo primes and multiplier x
self.power = 10 ** 9
self.mod_1 = self.power + 7
self.mod_2 = self.power + 9
self.x = random.randint(1, self.power - 1)
# Initialize blank query inputs
self.a_index = -1
self.b_index = -1
self.sub_len = -1
# Precompute string hashes and coefficients for both modulos
self.precompute_hashes_coefs()
def precompute_hashes_coefs(self):
for i in range(1, self.string_len):
# Calculate the hash for each substring, growing in size from first letter
self.hash_table_1[i] = (self.x * self.hash_table_1[i-1] + ord(self.string[i-1])) % self.mod_1
self.hash_table_2[i] = (self.x * self.hash_table_2[i-1] + ord(self.string[i-1])) % self.mod_2
# Large number exponents are time-consuming. Solution is to store the modulo of the coefficient at each step
self.coef_table_1[i] = (self.coef_table_1[i-1] * self.x) % self.mod_1
self.coef_table_2[i] = (self.coef_table_2[i-1] * self.x) % self.mod_2
def query_input(self, a_i, b_i, length):
# Store query input values
self.a_index = a_i
self.b_index = b_i
self.sub_len = length
def calc_substring_hash(self, hash_table, prime, coef_table):
# Calculate the substring hash using some polynomial properties (i.e. by subtracting away common parts)
a_hash = hash_table[self.a_index + self.sub_len] - (coef_table[self.sub_len] * hash_table[self.a_index])
b_hash = hash_table[self.b_index + self.sub_len] - (coef_table[self.sub_len] * hash_table[self.b_index])
# Used to get a clean modulo answer in case of negative hash values
a_hash = (a_hash + prime) % prime
b_hash = (b_hash + prime) % prime
return a_hash == b_hash
def check_substrings(self):
# Use two different hash checks to ensure strings match (very low probability of collisions with two hashes)
if self.calc_substring_hash(self.hash_table_1, self.mod_1, self.coef_table_1):
if self.calc_substring_hash(self.hash_table_2, self.mod_2, self.coef_table_2):
print("Yes")
else:
print("No")
else:
print("No")
class SolverNaive:
def __init__(self, s):
self.s = s
def ask(self, a, b, l):
return s[a:a+l] == s[b:b+l]
s = sys.stdin.readline()
q = int(sys.stdin.readline())
hashbrowns = HashSolution(s)
for _ in range(q):
a, b, l = map(int, sys.stdin.readline().split())
hashbrowns.query_input(a, b, l)
hashbrowns.check_substrings()
# naive_solver = SolverNaive(s)
# for _ in range(q):
# a, b, l = map(int, sys.stdin.readline().split())
# print("Yes" if naive_solver.ask(a, b, l) else "No")
|
gregh13/Data-Structures
|
Week3_hash_tables/4_substring_equality/substring_equality.py
|
substring_equality.py
|
py
| 2,938 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19214467121
|
from replit import clear
print("Welcome to the secret auction program!")
def highest_bidder(bid_record):
highest = 0
winner = ""
for bidder in bid_record:
bid_amount = bid_record[bidder]
if bid_amount > highest:
highest = bid_amount
winner = bidder
print(f"The winner is {winner} with a bid of ${highest}")
mapp = {}
restart = True
while restart:
name = input("What is your name?: ")
bid = int(input("What is your bid?: $"))
mapp[name] = bid
other_bidders = input("Are there any other bidders? Type 'Yes' or 'No'").lower()
if other_bidders == "yes":
restart = True
clear()
else:
restart = False
highest_bidder(mapp)
|
Iyemizee/Secret_Auction_Project
|
main.py
|
main.py
|
py
| 678 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24293515303
|
def is_valid(board, row):
if row in board:
return False
column = len(board)
for occupied_column, occupied_row in enumerate(board):
if abs(occupied_row - row) == abs(occupied_column - column):
return False
return True
def n_queens(n, board=[]):
if n == len(board):
return board
for row in range(n):
if is_valid(board, row):
res = n_queens(n, board + [row])
if res:
return res
return None
def main():
res = n_queens(5)
result = []
for row, col in enumerate(res):
result.append((row,col))
assert result == [(0, 0), (1, 2), (2, 4), (3, 1), (4, 3)]
# Q . . . .
# . . . Q .
# . Q . . .
# . . . . Q
# . . Q . .
if __name__ == '__main__':
main()
|
ckallum/Daily-Interview-Pro
|
solutions/N-Queens.py
|
N-Queens.py
|
py
| 789 |
python
|
en
|
code
| 16 |
github-code
|
6
|
42786347897
|
import os
import math
import glob
import time
import random
import torch
from PIL import Image
from torch.utils import data
from torchvision.transforms import RandomCrop
import numpy as np
import core.io as io
import core.clip_utils as cu
import multiprocessing as mp
class CachedAVSource(data.Dataset):
def __init__(self):
# Cached data
self.entity_data = {}
self.speech_data = {}
self.entity_list = []
#Reproducibilty
random.seed(42)
np.random.seed(0)
def _postprocess_speech_label(self, speech_label):
speech_label = int(speech_label)
if speech_label == 2: # Remember 2 = SPEAKING_NOT_AUDIBLE
speech_label = 0
return speech_label
def _postprocess_entity_label(self, entity_label):
entity_label = int(entity_label)
if entity_label == 2: # Remember 2 = SPEAKING_NOT_AUDIBLE
entity_label = 0
return entity_label
def _cache_entity_data(self, csv_file_path):
entity_set = set()
csv_data = io.csv_to_list(csv_file_path)
csv_data.pop(0) # CSV header
for csv_row in csv_data:
video_id = csv_row[0]
entity_id = csv_row[-3]
timestamp = csv_row[1]
speech_label = self._postprocess_speech_label(csv_row[-2])
entity_label = self._postprocess_entity_label(csv_row[-2])
minimal_entity_data = (entity_id, timestamp, entity_label)
# Store minimal entity data
if video_id not in self.entity_data.keys():
self.entity_data[video_id] = {}
if entity_id not in self.entity_data[video_id].keys():
self.entity_data[video_id][entity_id] = []
entity_set.add((video_id, entity_id))
self.entity_data[video_id][entity_id].append(minimal_entity_data)
#Store speech meta-data
if video_id not in self.speech_data.keys():
self.speech_data[video_id] = {}
if timestamp not in self.speech_data[video_id].keys():
self.speech_data[video_id][timestamp] = speech_label
#max operation yields if someone is speaking.
new_speech_label = max(self.speech_data[video_id][timestamp], speech_label)
self.speech_data[video_id][timestamp] = new_speech_label
return entity_set
def _cache_entity_data_forward(self, csv_file_path, target_video):
entity_list = list()
csv_data = io.csv_to_list(csv_file_path)
csv_data.pop(0) # CSV header
for csv_row in csv_data:
video_id = csv_row[0]
if video_id != target_video:
continue
entity_id = csv_row[-3]
timestamp = csv_row[1]
entity_label = self._postprocess_entity_label(csv_row[-2])
entity_list.append((video_id, entity_id, timestamp))
minimal_entity_data = (entity_id, timestamp, entity_label) # sfate to ingore label here
if video_id not in self.entity_data.keys():
self.entity_data[video_id] = {}
if entity_id not in self.entity_data[video_id].keys():
self.entity_data[video_id][entity_id] = []
self.entity_data[video_id][entity_id].append(minimal_entity_data)
return entity_list
def _entity_list_postprocessing(self, entity_set):
print('Initial', len(entity_set))
# filter out missing data on disk
all_disk_data = set(os.listdir(self.video_root))
for video_id, entity_id in entity_set.copy():
if entity_id not in all_disk_data:
entity_set.remove((video_id, entity_id))
print('Pruned not in disk', len(entity_set))
self.entity_list = sorted(list(entity_set))
class AudioVideoDatasetAuxLosses(CachedAVSource):
def __init__(self, audio_root, video_root, csv_file_path, clip_lenght,
target_size, video_transform=None, do_video_augment=False):
super().__init__()
# Data directories
self.audio_root = audio_root
self.video_root = video_root
# Post-processing
self.video_transform = video_transform
self.do_video_augment = do_video_augment
# Clip arguments
self.clip_lenght = clip_lenght
self.half_clip_length = math.floor(self.clip_lenght/2)
self.target_size = target_size
entity_set = self._cache_entity_data(csv_file_path)
self._entity_list_postprocessing(entity_set)
def __len__(self):
return int(len(self.entity_list)/1)
def __getitem__(self, index):
#Get meta-data
video_id, entity_id = self.entity_list[index]
entity_metadata = self.entity_data[video_id][entity_id]
audio_offset = float(entity_metadata[0][1])
mid_index = random.randint(0, len(entity_metadata)-1)
midone = entity_metadata[mid_index]
target = int(midone[-1])
target_audio = self.speech_data[video_id][midone[1]]
clip_meta_data = cu.generate_clip_meta(entity_metadata, mid_index,
self.half_clip_length)
video_data, audio_data = io.load_av_clip_from_metadata(clip_meta_data,
self.video_root, self.audio_root, audio_offset,
self.target_size)
if self.do_video_augment:
# random flip
if bool(random.getrandbits(1)):
video_data = [s.transpose(Image.FLIP_LEFT_RIGHT) for s in video_data]
# random crop
width, height = video_data[0].size
f = random.uniform(0.5, 1)
i, j, h, w = RandomCrop.get_params(video_data[0], output_size=(int(height*f), int(width*f)))
video_data = [s.crop(box=(j, i, w, h)) for s in video_data]
if self.video_transform is not None:
video_data = [self.video_transform(vd) for vd in video_data]
video_data = torch.cat(video_data, dim=0)
return (np.float32(audio_data), video_data), target, target_audio
class AudioVideoDatasetAuxLossesForwardPhase(CachedAVSource):
def __init__(self, target_video, audio_root, video_root, csv_file_path, clip_lenght,
target_size, video_transform=None, do_video_augment=False):
super().__init__()
# Data directories
self.audio_root = audio_root
self.video_root = video_root
# Post-processing
self.video_transform = video_transform
self.do_video_augment = do_video_augment
self.target_video = target_video
# Clip arguments
self.clip_lenght = clip_lenght
self.half_clip_length = math.floor(self.clip_lenght/2)
self.target_size = target_size
self.entity_list = self._cache_entity_data_forward(csv_file_path, self.target_video )
print('len(self.entity_list)', len(self.entity_list))
def _where_is_ts(self, entity_metadata, ts):
for idx, val in enumerate(entity_metadata):
if val[1] == ts:
return idx
raise Exception('time stamp not found')
def __len__(self):
return int(len(self.entity_list))
def __getitem__(self, index):
#Get meta-data
video_id, entity_id, ts = self.entity_list[index]
entity_metadata = self.entity_data[video_id][entity_id]
audio_offset = float(entity_metadata[0][1])
mid_index = self._where_is_ts(entity_metadata, ts)
midone = entity_metadata[mid_index]
gt = midone[-1]
clip_meta_data = cu.generate_clip_meta(entity_metadata, mid_index,
self.half_clip_length)
video_data, audio_data = io.load_av_clip_from_metadata(clip_meta_data,
self.video_root, self.audio_root, audio_offset,
self.target_size)
if self.do_video_augment:
# random flip
if bool(random.getrandbits(1)):
video_data = [s.transpose(Image.FLIP_LEFT_RIGHT) for s in video_data]
# random crop
width, height = video_data[0].size
f = random.uniform(0.5, 1)
i, j, h, w = RandomCrop.get_params(video_data[0], output_size=(int(height*f), int(width*f)))
video_data = [s.crop(box=(j, i, w, h)) for s in video_data]
if self.video_transform is not None:
video_data = [self.video_transform(vd) for vd in video_data]
video_data = torch.cat(video_data, dim=0)
return np.float32(audio_data), video_data, video_id, ts, entity_id, gt
#ASC Datasets
class ContextualDataset(data.Dataset):
def get_speaker_context(self, ts_to_entity, video_id, target_entity_id,
center_ts, candidate_speakers):
context_entities = list(ts_to_entity[video_id][center_ts])
random.shuffle(context_entities)
context_entities.remove(target_entity_id)
if not context_entities: # nos mamamos la lista
context_entities.insert(0, target_entity_id) # make sure is at 0
while len(context_entities) < candidate_speakers:
context_entities.append(random.choice(context_entities))
elif len(context_entities) < candidate_speakers:
context_entities.insert(0, target_entity_id) # make sure is at 0
while len(context_entities) < candidate_speakers:
context_entities.append(random.choice(context_entities[1:]))
else:
context_entities.insert(0, target_entity_id) # make sure is at 0
context_entities = context_entities[:candidate_speakers]
return context_entities
def _decode_feature_data_from_csv(self, feature_data):
feature_data = feature_data[1:-1]
feature_data = feature_data.split(',')
return np.asarray([float(fd) for fd in feature_data])
def get_time_context(self, entity_data, video_id, target_entity_id,
center_ts, half_time_length, stride):
all_ts = list(entity_data[video_id][target_entity_id].keys())
center_ts_idx = all_ts.index(str(center_ts))
start = center_ts_idx-(half_time_length*stride)
end = center_ts_idx+((half_time_length+1)*stride)
selected_ts_idx = list(range(start, end, stride))
selected_ts = []
for idx in selected_ts_idx:
if idx < 0:
idx = 0
if idx >= len(all_ts):
idx = len(all_ts)-1
selected_ts.append(all_ts[idx])
return selected_ts
def get_time_indexed_feature(self, video_id, entity_id, selectd_ts):
time_features = []
for ts in selectd_ts:
time_features.append(self.entity_data[video_id][entity_id][ts][0])
return np.asarray(time_features)
def _cache_feature_file(self, csv_file):
entity_data = {}
feature_list = []
ts_to_entity = {}
print('load feature data', csv_file)
csv_data = io.csv_to_list(csv_file)
for csv_row in csv_data:
video_id = csv_row[0]
ts = csv_row[1]
entity_id = csv_row[2]
features = self._decode_feature_data_from_csv(csv_row[-1])
label = int(float(csv_row[3]))
# entity_data
if video_id not in entity_data.keys():
entity_data[video_id] = {}
if entity_id not in entity_data[video_id].keys():
entity_data[video_id][entity_id] = {}
if ts not in entity_data[video_id][entity_id].keys():
entity_data[video_id][entity_id][ts] = []
entity_data[video_id][entity_id][ts] = (features, label)
feature_list.append((video_id, entity_id, ts))
# ts_to_entity
if video_id not in ts_to_entity.keys():
ts_to_entity[video_id] = {}
if ts not in ts_to_entity[video_id].keys():
ts_to_entity[video_id][ts] = []
ts_to_entity[video_id][ts].append(entity_id)
print('loaded ', len(feature_list), ' features')
return entity_data, feature_list, ts_to_entity
class ASCFeaturesDataset(ContextualDataset):
def __init__(self, csv_file_path, time_lenght, time_stride,
candidate_speakers):
# Space config
self.time_lenght = time_lenght
self.time_stride = time_stride
self.candidate_speakers = candidate_speakers
self.half_time_length = math.floor(self.time_lenght/2)
# In memory data
self.feature_list = []
self.ts_to_entity = {}
self.entity_data = {}
# Load metadata
self._cache_feature_data(csv_file_path)
# Parallel load of feature files
def _cache_feature_data(self, dataset_dir):
pool = mp.Pool(int(mp.cpu_count()/2))
files = glob.glob(dataset_dir)
results = pool.map(self._cache_feature_file, files)
pool.close()
for r_set in results:
e_data, f_list, ts_ent = r_set
print('unpack ', len(f_list))
self.entity_data.update(e_data)
self.feature_list.extend(f_list)
self.ts_to_entity.update(ts_ent)
def __len__(self):
return int(len(self.feature_list))
def __getitem__(self, index):
video_id, target_entity_id, center_ts = self.feature_list[index]
entity_context = self.get_speaker_context(self.ts_to_entity, video_id,
target_entity_id, center_ts,
self.candidate_speakers)
target = self.entity_data[video_id][target_entity_id][center_ts][1]
feature_set = np.zeros((self.candidate_speakers, self.time_lenght, 1024))
for idx, ctx_entity in enumerate(entity_context):
time_context = self.get_time_context(self.entity_data,
video_id,
ctx_entity, center_ts,
self.half_time_length,
self.time_stride)
features = self.get_time_indexed_feature(video_id, ctx_entity,
time_context)
feature_set[idx, ...] = features
feature_set = np.asarray(feature_set)
feature_set = np.swapaxes(feature_set, 0, 2)
return np.float32(feature_set), target
class ASCFeaturesDatasetForwardPhase(ContextualDataset):
def __init__(self, csv_file_path, time_lenght, time_stride,
candidate_speakers):
# Space config
self.time_lenght = time_lenght
self.time_stride = time_stride
self.candidate_speakers = candidate_speakers
self.half_time_length = math.floor(self.time_lenght/2)
# In memory data
self.feature_list = []
self.ts_to_entity = {}
self.entity_data = {}
# Single video metdadata
self.entity_data, self.feature_list, self.ts_to_entity = self._cache_feature_file(csv_file_path)
def __len__(self):
return int(len(self.feature_list))
def __getitem__(self, index):
video_id, target_entity_id, center_ts = self.feature_list[index]
entity_context = self.get_speaker_context(self.ts_to_entity, video_id,
target_entity_id, center_ts,
self.candidate_speakers)
feature_set = np.zeros((self.candidate_speakers, self.time_lenght, 1024))
for idx, ctx_entity in enumerate(entity_context):
time_context = self.get_time_context(self.entity_data,
video_id,
ctx_entity, center_ts,
self.half_time_length,
self.time_stride)
features = self.get_time_indexed_feature(video_id, ctx_entity,
time_context)
feature_set[idx, ...] = features
feature_set = np.asarray(feature_set)
feature_set = np.swapaxes(feature_set, 0, 2)
return np.float32(feature_set), video_id, center_ts, target_entity_id
|
fuankarion/active-speakers-context
|
core/dataset.py
|
dataset.py
|
py
| 16,534 |
python
|
en
|
code
| 52 |
github-code
|
6
|
9272119407
|
import os
import numpy as np
import spacy
import re
import json
import pyttsx3 #replace it to librosa
import os
import librosa
import numpy as np
from fastdtw import fastdtw
from gtts import gTTS
from scipy.spatial.distance import euclidean
from fastdtw import fastdtw
import shutil
import config
def create_folder_if_not_exists(folder_path):
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def move_files(source_filepath, destination_directory): #note that one is filepath and other is directory
shutil.move(source_filepath, destination_directory)
# Optional: Check if the file was moved successfully
if os.path.exists(source_filepath):
print("File move failed.")
def return_each_files_path(subfolder_path, return_type='full path'):
if return_type == 'full path':
for root, _, files in os.walk(subfolder_path):
for file in files:
yield os.path.join(root, file)
elif return_type == 'filename':
for root, _, files in os.walk(subfolder_path):
for file in files:
yield file
# Usage example:
# subfolder_path = '/path/to/your/subfolder'
# for path in return_each_files_path(subfolder_path):
# print(path)
def extract_data_from_json(file_path):
try:
with open(file_path, "r") as file:
data = json.load(file)
return data
except FileNotFoundError:
print(f"File '{file_path}' not found.")
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
except ValueError as e:
print(e,"file not found")
except Exception as e:
print(f"An error occurred: {e}")
def filter_entities(doc, label):
filtered_entities = [ent.text for ent in doc.ents if ent.label_ == label]
return filtered_entities
def save_as_json(data, file_name, subdirectory):
"""
Save a dictionary or list as a JSON file in a subdirectory.
If a file with the same name exists, append the data to it.
Args:
- data: The dictionary or list to be saved.
- file_name: The name of the JSON file (without the .json extension).
- subdirectory: The name of the subdirectory where the JSON file will be saved.
Returns:
- The full path to the saved JSON file if successful, or None if there was an error.
"""
try:
# Create the subdirectory if it doesn't exist
if not os.path.exists(subdirectory):
os.makedirs(subdirectory)
# Construct the full file path
file_path = os.path.join(subdirectory, f"{file_name}.json")
# Initialize existing_data as an empty list if the file doesn't exist
existing_data = []
# If the file already exists, load its contents
if os.path.exists(file_path):
with open(file_path, 'r') as existing_file:
existing_data = json.load(existing_file)
# If data is a dictionary, append it as is; if it's a list, extend the list
if isinstance(data, dict):
existing_data.update(data)
elif isinstance(data, list):
existing_data.extend(data)
# Write the combined data to the JSON file
with open(file_path, 'w') as json_file:
json.dump(existing_data, json_file, indent=4)
print(f"Data saved or appended to {file_path}")
return file_path # Return the saved file path
except Exception as e:
print(f"An error occurred in saving json: {str(e)}")
print(f"An error occurred in saving json: {str(e)}")
return None
# # Example usage^:
# data_to_append = {"city": "Los Angeles", "zipcode": "90001"}
# subdirectory_name = "data_folder"
# # Save or append the data to a JSON file in the subdirectory and get the saved path
# saved_path = save_as_json(data_to_append, "person_data", subdirectory_name)
# if saved_path:
# print(f"Data saved or appended at: {saved_path}")
# else:
# print("Failed to save or append data.")
class TextProcessor:
def __init__(self, chunksize = 50000):
self.chunksize = chunksize
self.nlp = spacy.load("en_core_web_lg",disable=["tagger", "parser", "textcat"])
def read_file(self):
with open(self.file_path, "r",errors='ignore') as file:
file_contents = file.read()
return file_contents
def clean_text(self, text):
cleaned_text = re.sub(r'\n', '. ', text)
#cleaned_text = re.sub(r'[^\n]', '. ', text)
cleaned_text = re.sub(r'[&]', ' and ', cleaned_text)
cleaned_text = re.sub(r'[^a-zA-Z.();,?\'\"]', ' ', cleaned_text)
cleaned_text = re.sub(r'\n', '. ', text)
#cleaned_text = re.sub(r'[^\n]', '. ', text)
cleaned_text = re.sub(r'[&]', ' and ', cleaned_text)
cleaned_text = re.sub(r'[^a-zA-Z.();,?\'\"]', ' ', cleaned_text)
return cleaned_text
def tokenize_text(self, text):
doc = self.nlp(text)
return doc
def process_file(self,file_path):
self.file_path= file_path
file_contents = self.read_file()
cleaned_text = self.clean_text(file_contents)
splitted_text = cleaned_text[:50000]
#return self.tokenize_text(splitted_text)
self.chunksize = 50000
# # Split the document into chunks of 50,000 characters or less
# print("splitting book into chunks..")
# book_chunks = [cleaned_text[i:i + self.chunksize] for i in range(0, len(cleaned_text), self.chunksize)]
# print("tokenising each chunks..")
# doc_of_each_chunk=[self.tokenize_text(each_chunk) for each_chunk in book_chunks]
# return doc_of_each_chunk
for i in range(0, len(cleaned_text), self.chunksize):
text_chunk = cleaned_text[i:i + self.chunksize]
yield text_chunk
class AudioConverter():
def __init__(self, word, engine, save_directory= config.paths["save_directory"]):
word = word.lower()
file_path = save_directory+os.sep+ f"{word}{config.audio_format}"
self.file_path = file_path
self.word = word
self.engine = engine
def process(self):
if self.engine == "gtts":
try: #consider this as an additional filter for collected names
tts = gTTS(self.word)
tts.save(self.file_path)
except Exception as e:
print(f"unable to save '{self.word}' due to : {str(e)}")
elif self.engine == "pyttsx3":#note that this have to be deleted lately because the storage is high(ten times larger than gtts, but quicker and offline)
file_name = f"{self.word}{config.audio_format}"
engine = pyttsx3.init()
engine.save_to_file(self.word, self.file_path)
engine.runAndWait()
print("sended file path= ", self.file_path)
return self.file_path
class WordsComparer():
def __init__(self, audio1_path='',audio2_path='') -> None:
#print("audio path= ", audio1_path)
self.audio1_path = audio1_path
self.audio2_path= audio2_path
#self.file_name = f"{self.word}.wav"
# def convert_to_audio(self, word, engine):
# if engine == "gtts":
# file_path = os.path.join('data_christian','audio_data','doc_audio_data',f"{word}.wav")
# try: #consider this as an additional filter for collected names
# tts = gTTS(word)
# tts.save(file_path)
# except Exception as e:
# print(f"unable to save '{word}' due to : {str(e)}")
# elif engine == "pyttsx3":#note that this have to be deleted lately because the storage is high(ten times larger than gtts, but quicker and offline)
# file_name = f"{word}.wav"
# engine = pyttsx3.init()
# engine.save_to_file(word, file_name)
# engine.runAndWait()
# return file_path
def audio_to_mfcc(self, audio_file_path):
# Load the audio files #r"data\audio_data\sample_audio_data\output_audio1.wav"
audio, sr_doc = librosa.load(audio_file_path, sr= 24000)
# Extract MFCC features
mfcc_doc = librosa.feature.mfcc(y= audio, sr= sr_doc)
# Transpose MFCCs for compatibility with DTW
mfcc_doc = mfcc_doc.T
return mfcc_doc
def compare_two_MFCC(self,mfcc1,mfcc2): #get more information about this
def calculate_distance(mfcc1,mfcc2):
if mfcc1.shape[1] != mfcc2.shape[1]:
raise ValueError("Number of features (columns) must be the same for both sequences")
# Calculate the DTW distance using the fastdtw function
distance, _ = fastdtw(mfcc1, mfcc2)
# Calculate DTW distance and path
#print("mfcc1 shape= ",mfcc1.shape[1],"distance= ",distance)
#print(distance)
return distance
distance = calculate_distance(mfcc1,mfcc2)
#print(distance)
# Normalize the distance (optional)
normalised_distance = distance / min(len(mfcc1) , len(mfcc2))
normalised_distance = round(normalised_distance,2)
return normalised_distance
# def word_to_mfcc(self, word):
# audio_converter = AudioConverter(word,'gtts')
# file_path = audio_converter.process()
# mfcc_file = self.audio_to_mfcc(file_path)
# #os.remove(file_name)
# return mfcc_file
def process(self):
mfcc1 = self.audio_to_mfcc(self.audio1_path)
mfcc2 = self.audio_to_mfcc(self.audio2_path)
# mfcc1= self.word_to_mfcc(self.word)
# mfcc2= self.word_to_mfcc(self.word2)
distance= self.compare_two_MFCC(mfcc1=mfcc1, mfcc2=mfcc2)
return distance
if __name__ == "__main__":
file_path1 = AudioConverter(word='k',engine='gtts').process()
file_path2 = AudioConverter(word='vanakkam',engine='gtts').process()
distance = WordsComparer(file_path1,file_path2).process()
print(distance,file_path2)
|
RamSankarTheDeveloper/TeenyTinyTitleTrove
|
utils.py
|
utils.py
|
py
| 10,032 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72333132669
|
"""
@File : AdaBoost.py
@Time : 2020-05-26
@Author : BobTsang
@Software: PyCharm
@Email : [email protected]
"""
# 这次用的是乳腺癌数据集做的二分类任务,因为鸢尾花数据集太小,特征较少,对于提升树不太cover
# Minst:596x31
# time:62s
import pandas as pd
import numpy as np
from sklearn import datasets
import random
import time
# 手工实现打乱数据,不采用sklearn调用shuffle打乱数据
def Random_number(data_size):
"""
该函数使用shuffle()打乱一个包含从0到数据集大小的整数列表。因此每次运行程序划分不同,导致结果不同
改进:
可使用random设置随机种子,随机一个包含从0到数据集大小的整数列表,保证每次的划分结果相同。
:param data_size: 数据集大小
:return: 返回一个列表
"""
num_set = []
random.seed(1)
for i in range(data_size):
num_set.append(i)
random.shuffle(num_set)
return num_set
def Train_test_split(data_set, target_data, size=0.2):
"""
说明:分割数据集,我这里默认数据集的0.3是测试集
:param data_set: 数据集
:param target_data: 标签数据
:param size: 测试集所占的比率
:return: 返回训练集数据、训练集标签、训练集数据、训练集标签
"""
# 计算训练集的数据个数
train_size = int((1 - size) * len(data_set))
# 获得数据
data_index = Random_number(len(data_set))
# 分割数据集(X表示数据,y表示标签),以返回的index为下标
x_train = data_set[data_index[:train_size]]
x_test = data_set[data_index[train_size:]]
y_train = target_data[data_index[:train_size]]
y_test = target_data[data_index[train_size:]]
return x_train, x_test, y_train, y_test
def Caculation_error_Gx(x_train, y_train, n, div, rule, D):
"""
计算分类错误率
:param x_train:训练集数据
:param y_trian:训练集标签
:param n:要操作的特征
:param div:划分点(阈值)
:param rule:正反例标签
:param D:权值分布
:return:预测结果,分类误差
"""
# 初始化分类误差率为0
error = 0
# 将训练数据矩阵中特征为n的那一列单独剥出来做成数组。因为其他元素我们并不需要,
# 直接对庞大的训练集进行操作的话会很慢
x = x_train[:, n]
# 同样将标签也转换成数组格式,x和y的转换只是单纯为了提高运行速度
# 测试过相对直接操作而言性能提升很大
y = y_train
predict = []
# 依据小于和大于的标签依据实际情况会不同,在这里直接进行设置
if rule == 'LisOne': L = 1; H = -1
else: L = -1; H = 1
# 遍历所有样本的特征m
for i in range(x_train.shape[0]):
if x[i] < div:
# 如果小于划分点,则预测为L
# 如果设置小于div为1,那么L就是1,
# 如果设置小于div为-1,L就是-1
predict.append(L)
# 如果预测错误,分类错误率要加上该分错的样本的权值(8.1式)
if y[i] != L:
error += D[i]
elif x[i] >= div:
# 与上面思想一样
predict.append(H)
if y[i] != H:
error += D[i]
# 返回预测结果和分类错误率e
# 预测结果其实是为了后面做准备的,在算法8.1第四步式8.4中exp内部有个Gx,要用在那个地方
# 以此来更新新的D
return np.array(predict), error
def CreateSingleBoostingTree(x_train, y_train, D):
"""
创建单层提升树
:param x_train:训练数据集
:param y_train:训练标签集
:param D:权值分布
:return:单层提升树
"""
# 获得样本数目及特征数量
m, n = np.shape(x_train)
# 单层树的字典,用于存放当前层提升树的参数
# 也可以认为该字典代表了一层提升树
singleBoostTree = {}
# 初始化分类误差率,分类误差率在算法8.1步骤(2)(b)有提到
# 误差率最高也只能100%,因此初始化为1
singleBoostTree['error'] = 1
# 对每一个特征进行遍历,寻找用于划分的最合适的特征
for i in range(n):
# 因为特征已经经过二值化,只能为0和1,因此分切分时分为-0.5,0.5,1.5三种进行切割
for div in [-0.5, 0.5, 1.5]:
# 在单个特征内对正反例进行划分时,有两种情况:
# 可能是小于某值的为1,大于某值得为-1,也可能小于某值得是-1,反之为1
# 因此在寻找最佳提升树的同时对于两种情况也需要遍历运行
# LisOne:Low is one:小于某值得是1
# HisOne:High is one:大于某值得是1
for rule in ['LisOne', 'HisOne']:
# 按照第i个特征,以值div进行切割,进行当前设置得到的预测和分类错误率
Gx, error = Caculation_error_Gx(x_train, y_train, i, div, rule, D)
# 如果分类错误率e小于当前最小的e,那么将它作为最小的分类错误率保存
if error < singleBoostTree['error']:
singleBoostTree['error'] = error
# 同时也需要存储最优划分点、划分规则、预测结果、特征索引
# 以便进行D更新和后续预测使用
singleBoostTree['div'] = div
singleBoostTree['rule'] = rule
singleBoostTree['Gx'] = Gx
singleBoostTree['feature'] = i
# 返回单层的提升树
return singleBoostTree
def CreateBoostingTree(x_train, y_train, treeNum = 50):
"""
创建提升树
创建算法依据“8.1.2 AdaBoost算法” 算法8.1
:param x_train:训练数据集
:param y_train:训练标签
:param treeNum:树的层数
:return:提升树
"""
# 将数据和标签转化为数组形式
trainDataArr = np.array(x_train)
trainLabelArr = np.array(y_train)
# 没增加一层数后,当前最终预测结果列表
finalpredict = [0] * len(trainLabelArr)
# 获得训练集数量以及特征个数
m, n = np.shape(trainDataArr)
# 依据算法8.1步骤(1)初始化D为1/N
D = [1 / m] * m
# 初始化提升树列表,每个位置为一层
tree = []
# 循环创建提升树
for i in range(treeNum):
# 得到当前层的提升树
curTree = CreateSingleBoostingTree(trainDataArr, trainLabelArr, D)
# 根据式8.2计算当前层的alpha
alpha = 1 / 2 * np.log((1 - curTree['error']) / curTree['error'])
# 获得当前层的预测结果,用于下一步更新D
Gx = curTree['Gx']
# 依据式8.4更新D
# 考虑到该式每次只更新D中的一个w,要循环进行更新知道所有w更新结束会很复杂(其实
# 不是时间上的复杂,只是让人感觉每次单独更新一个很累),所以该式以向量相乘的形式,
# 一个式子将所有w全部更新完。
# 该式需要线性代数基础,如果不太熟练建议补充相关知识,当然了,单独更新w也一点问题没有
# np.multiply(trainLabelArr, Gx):exp中的y*Gm(x),结果是一个行向量,内部为yi*Gm(xi)
# np.exp(-1 * alpha * np.multiply(trainLabelArr, Gx)):上面求出来的行向量内部全体
# 成员再乘以-αm,然后取对数,和书上式子一样,只不过书上式子内是一个数,这里是一个向量
# D是一个行向量,取代了式中的wmi,然后D求和为Zm
# 书中的式子最后得出来一个数w,所有数w组合形成新的D
# 这里是直接得到一个向量,向量内元素是所有的w
# 本质上结果是相同的
D = np.multiply(D, np.exp(-1 * alpha * np.multiply(trainLabelArr, Gx))) / sum(D)
# 在当前层参数中增加alpha参数,预测的时候需要用到
curTree['alpha'] = alpha
# 将当前层添加到提升树索引中。
tree.append(curTree)
# -----以下代码用来辅助,可以去掉---------------
# 根据8.6式将结果加上当前层乘以α,得到目前的最终输出预测
finalpredict += alpha * Gx
# 计算当前最终预测输出与实际标签之间的误差
error = sum([1 for i in range(len(x_train)) if np.sign(finalpredict[i]) != trainLabelArr[i]])
# 计算当前最终误差率
finalError = error / len(x_train)
# 如果误差为0,提前退出即可,因为没有必要再计算算了
if finalError == 0:
return tree
# 打印一些信息
print('iter:%d:%d, single error:%.4f, final error:%.4f'%(i, treeNum, curTree['error'], finalError))
# 返回整个提升树
return tree
def predict(x, div, rule, feature):
"""
输出单层的预测结果
:param x:预测样本
:param div:划分点
:param rule:划分规则
:param feature:进行操作的特征
:return:
"""
#依据划分规则定义小于及大于划分点的标签
if rule == 'LisOne':
L = 1; H = -1
else:
L = -1; H = 1
#判断预测结果
if x[feature] < div:
return L
else:
return H
def model_test(x_test, y_test, tree):
"""
测试模型
:param x_test:测试数据集
:param y_test:测试标签集
:param tree:提升树
:return:准确率
"""
# 错误率计数值
errorCnt = 0
# 遍历每一个测试样本
for i in range(len(x_test)):
# 预测结果值,初始为0
res = 0
# 依据算法8.1式8.6
# 预测式子是一个求和式,对于每一层的结果都要进行一次累加
# 遍历每层的树
for curTree in tree:
# 获取该层参数
div = curTree['div']
rule = curTree['rule']
feature = curTree['feature']
alpha = curTree['alpha']
# 将当前层结果加入预测中
res += alpha * predict(x_test[i], div, rule, feature)
#预测结果取sign值,如果大于0 sign为1,反之为0
if np.sign(res) != y_test[i]:
errorCnt += 1
#返回准确率
return float(1 - errorCnt / len(x_test))
# 将所有数据(不包括标签)进行二值化处理
def find_init_div(data):
inMat = data.copy()
# 求每一列的均值
# axis=0意味着取这列的每一行出来求均值,最终得到所有的列的均值
inMeans = np.mean(inMat, axis=0)
# 每一个特征属性标准化
inMat = inMat - inMeans
inMat = inMat.applymap(lambda x: int(0) if x <= 0 else 1)
inMat = np.array(inMat)
return inMat
if __name__ == '__main__':
# 开始时间
start = time.time()
# 获取训练集和测试集
breastcancer = datasets.load_breast_cancer()
# print(breastcancer)
# 创建一个dataframe表型数据结构
df = pd.DataFrame(breastcancer.data, columns=breastcancer.feature_names)
# 列尾添加新的一列'label', 值为iris.target(Series对象)
df['label'] = breastcancer.target
print(df)
# 找到训练数据集的初始阈值,依据初始阈值将数据进行二值化处理,大于v的转换成1,小于v的转换成0,方便后续计算
# 打乱数据
data = find_init_div(df.iloc[:, :-1])
print(data)
target = np.array(df.iloc[:, -1])
print(target)
x_train, x_test, y_train, y_test = Train_test_split(data, target)
# print(x_train)
# print(x_test)
# 转换为二分类任务
# 替换标签把0,1换成-1,1
y_train = np.array([int(1) if i == 1 else int(-1) for i in y_train])
# print(x, y)
# 替换标签把0,1换成-1,1
y_test = np.array([int(1) if i == 1 else int(-1) for i in y_test])
# 创建提升树
print('start init train')
tree = CreateBoostingTree(x_train, y_train, 100)
# 测试
print('start to test')
accuracy = model_test(x_test, y_test, tree)
print('the accuracy is:%.4f' % (accuracy * 100), '%')
print(accuracy)
# 结束时间
end = time.time()
print('time span:', end - start)
# the accuracy is:97.0760 %
|
BobTsang1995/StatisticalLearningMethod-python-
|
AdaBoost.py
|
AdaBoost.py
|
py
| 12,463 |
python
|
zh
|
code
| 2 |
github-code
|
6
|
70379694589
|
#!/usr/bin/python
import pygtk
pygtk.require('2.0')
import gtk
from gtk import gdk
def destroy(widget, data=None):
gtk.main_quit()
w = gtk.Window(gtk.WINDOW_TOPLEVEL)
w.set_title("Hello from python")
w.connect('destroy', destroy)
v = gtk.TextView()
v.set_editable(False)
b = v.get_buffer()
w.add(v)
w.show_all()
b.insert_at_cursor("Hello")
b.insert_pixbuf(b.get_end_iter(), gdk.pixbuf_new_from_file_at_size("1F389.png", -1, 16))
gtk.main()
|
carlosmn/buffer-image
|
pygtk-image.py
|
pygtk-image.py
|
py
| 451 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16471014091
|
# Solution comment: Cheating. Found the list of periods on OEIS and counted the
# odds from that list.
from math import sqrt
# First determine how many non squares there are < 10000.
count = 0
for i in range(2, 10000+1):
if abs(sqrt(i) - int(sqrt(i))) > 1e-6:
count += 1
# Read count periods from the number file, counting how many odds.
with open('numbers.txt', 'r') as f:
f.readline() # Skip first line.
res = 0
for i, line in enumerate(f):
if i == count:
break
*_, p = [int(i) for i in line.split()]
res += (p & 1)
print(res)
|
bsamseth/project-euler
|
064/64.py
|
64.py
|
py
| 593 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72025042747
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import os,time
while True:
time.sleep(10)
result = os.system("ping www.google.com -c 5 | grep '0% packet loss'")
if not result == 0:
os.system("echo 'No internet connection. shutting down...' >> /home/pi/cam/data/log.txt")
time.sleep(5)
os.system("sudo shutdown -h now")
|
tomasBjornfot/picam
|
closeIfDisconnected.py
|
closeIfDisconnected.py
|
py
| 327 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19200731938
|
import argparse
from inference import Inference
from model import FashionModel
from train import Trainer
from data import TrainDataset
class ArgumentSelectError(Exception):
pass
def training():
train_dataset = TrainDataset(
image_dir=args.train_data_dir,
csv_path_train=f'data/dataset_csv/list_combined_{args.train_type}_small_train.tsv',
csv_path_val=f'data/dataset_csv/list_combined_{args.train_type}_small_val.tsv',
train_type=args.train_type,
batch_size=16,
shuffle=True,
random_seed=60,
image_shape=args.input_shape
)
fm = FashionModel()
fm.create_model(num_classes=train_dataset.num_classes, input_shape=[args.input_shape[0], args.input_shape[1], 3])
if args.checkpoint is not None:
fm.model.load_weights(args.checkpoint)
fm.model.summary()
trainer = Trainer(
model=fm.model,
train_gen=train_dataset.train_generator,
val_gen=train_dataset.validation_generator,
epoch=args.epoch,
step=args.step
)
trainer.train(log_dir=args.log_dir)
def inference():
inf = Inference(model_path=f'models/{args.predict_type}.h5',
sample_dir='samples',
inference_type=args.predict_type,
inference_csv=f'data/{args.predict_type}.csv')
inf.predict(save_result=True)
total_types = ['category', 'attribute', 'attribute1', 'attribute2', 'attribute3', 'attribute4', 'attribute5']
parser = argparse.ArgumentParser(
prog='Fashion Category and Attribute Prediction',
add_help=True,
description='This program predicts categories, textures(attribute1),'
'fabrics(attribute2), shapes(attribute3), parts(attribute4),'
'and styles(attribute5).'
)
parser.add_argument('-t', '--train', action='store_true',
help='Trains model with `--train-data-dir` and `--train-data-csv`.')
parser.add_argument('--train-type', type=str,
help='Selects which type will be trained. eg. `category`, `attribute1`.')
parser.add_argument('--train-data-dir', type=str,
help='Locate where is data folder.')
parser.add_argument('--input-shape', type=int, nargs=2,
help='Number of epochs to train.')
parser.add_argument('--epoch', type=int,
help='Number of epochs to train.')
parser.add_argument('--step', type=int,
help='Number of epochs to train.')
parser.add_argument('--checkpoint', type=str, default=None,
help='Number of epochs to train.')
parser.add_argument('--log-dir', type=str,
help='Locate where will training logs will be saved.')
parser.add_argument('-p', '--predict', action='store_true',
help='Inference model with `--sample-folder`.')
parser.add_argument('--predict-type', type=str,
help='Selects which type will be predicted. eg. `category`, `attribute1`.')
if __name__ == '__main__':
args = parser.parse_args()
try:
if args.train:
if args.train_data_dir is None:
raise ArgumentSelectError('Train data directory not specified. Can not train!')
elif args.log_dir is None:
raise ArgumentSelectError('Log directory not specified. Can not train!')
elif not any([args.train_type == train_type for train_type in total_types]):
raise ArgumentSelectError('Train type not specified. Can not train!')
else:
print('Training!')
training()
print('Training Finished!')
elif args.predict:
if not any([args.predict_type == pred_type for pred_type in total_types]):
raise ArgumentSelectError('Predict type not specified. Can not predict.')
else:
print('Inference!')
inference()
print('Inference Completed!')
except ArgumentSelectError as err:
print(err)
print('Please enter right arguments!')
|
omerferhatt/deep-fashion-classification
|
main.py
|
main.py
|
py
| 4,105 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14624971996
|
#coding:utf-8
from math import e
import numpy as np
year=150
def func(x):
if x<0:
return e**(g*x)
else:
return 1
x_1=[-3/float(40000)*x**2+3/float(200)*x for x in range(1,year)]
x_2=[]
T=3/2*(year-50)
a=1/T**2
for x in range(1,year):
if(x<=T):
x_2.append(a*x**2)
else:
x_2.append(1)
x_3=[1/float(100)*x for x in range(1,year)]
x_5=[]
halfyear=year/2
for x in range(1,year):
if(x<=halfyear):
x_5.append(1/halfyear**2*x*x)
else:
x_5.append(-(x-year)**2/halfyear**2+1)
import matplotlib.pyplot as plt
fig=plt.figure(1)
ax={}
number=0
sigma=0.008
for k in [0.01,0.02,0.03]:
for g in [0.003,0.005,0.008]:
rand=np.random.normal(0,sigma)
c_1 = [0.01]
c_2 = [0.01]
c_3 = [0.01]
c_4 = [0.01]
c_5 = [0.01]
for i in range(1,year):
c_1.append(min(k*(1-x_1[i-1])*(1-c_1[i-1])+func(x_1[i-1]-c_1[i-1])*c_1[i-1]+rand,1))
c_2.append(min(k * (1 - x_2[i-1]) * (1 - c_2[i-1]) + func(x_2[i-1] - c_2[i-1]) * c_2[i-1]+rand,1))
c_3.append(min(k * (1 - x_3[i-1]) * (1 - c_3[i-1]) + func(x_3[i-1] - c_3[i-1]) * c_3[i-1]+rand,1))
c_5.append(min(k * (1 - x_5[i-1]) * (1 - c_5[i-1]) + func(x_5[i-1] - c_5[i-1]) * c_5[i-1]+rand,1))
for i in range(1,year):
c_4.append(min(k * (1 - c_4[i-1]) * (1 - c_4[i-1]) + func(c_4[i-1] - c_4[i-1]) * c_4[i-1]+rand,1))
ax[number]=fig.add_subplot(331+number)
plt.title('k=%.3f,g=%.3f'%(k,g))
plt.plot(c_1,label='quadric')
plt.plot(c_2,label='convex')
plt.plot(c_3,label='static')
plt.plot(c_4,label='dynamic')
plt.plot(c_5,label='logistics')
number=number+1
ax[8].legend(loc='lower center',shadow=True,bbox_to_anchor=(1.2, 1.4),borderaxespad = 0.)
plt.show()
|
liangzp/2018-American-Interdisciplinary-Contest-in-Modeling
|
Code/random_model.py
|
random_model.py
|
py
| 1,910 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70732572987
|
"""
Package for conversion between Julian date and other date/time
representations.
"""
from .api import *
__version__ = '0.1.0'
__author__ = 'Nikita Churilov'
__maintainer__ = __author__
__email__ = '[email protected]'
__license__ = 'MIT'
|
churilov-ns/juldate
|
juldate/__init__.py
|
__init__.py
|
py
| 248 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5637517912
|
from urllib.request import urlopen
from datetime import datetime
import json
from settings import my_lec_list, base_url
class InfoList(object):
def __init__(self):
self.json = self.get_api()
self.table = self.json["table"]
self.count = self.json["count"]
self.body = self.json["body"]
self.my_list = self.set_my_info()
@staticmethod
def get_api():
api = urlopen(base_url + "info/")
s = api.read().decode('utf-8')
return json.loads(s)
@staticmethod
def identify(subject):
if subject in my_lec_list:
return True
else:
return False
def set_ids(self):
id_list = []
for b in self.body:
judge = self.identify(b["subject"])
if judge:
id_list.append(b["id"])
else:
pass
return id_list
def set_my_info(self):
detail_list = []
for id in self.set_ids():
d = InfoDetail(id)
detail_list.append(d)
return detail_list
class InfoDetail(object):
def __init__(self, info_id):
self.id = info_id
self.json = self.get_api()
self.subject = self.json["subject"]
self.teacher = self.json["teacher"]
self.abstract = self.json["abstract"]
self.detail = self.json["detail"]
self.created_at = self.convert_date(self.json["time"]["created_at"])
self.last_update = self.convert_date(self.json["time"]["last_update"])
self.last_confirm = self.convert_date(self.json["time"]["last_confirm"])
def get_api(self):
api = urlopen(base_url + "info/id/" + str(self.id))
s = api.read().decode('utf-8')
return json.loads(s)
@staticmethod
def convert_date(d):
l = len(d)
if l > 11:
return datetime.strptime(d, "%Y/%m/%d %H:%M:%S")
else:
return datetime.strptime(d, "%Y/%m/%d")
if __name__ == "__main__":
i = InfoList()
for detail in i.my_list:
print(type(detail))
print(detail.subject)
print(detail.created_at.strftime("%Y-%m-%d %H:%M:%S"))
|
pddg/learning
|
models.py
|
models.py
|
py
| 2,174 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30614657486
|
from unittest import main
from re import compile
from ir_datasets.formats import ToucheQuery, TrecQrel, ToucheTitleQuery
from ir_datasets.formats.touche import ToucheQualityQrel
from test.integration.base import DatasetIntegrationTest
class TestTouche(DatasetIntegrationTest):
# noinspection PyTypeChecker
def test_queries(self):
self._test_queries(
"argsme/2020-04-01/touche-2020-task-1",
count=49,
items={
0: ToucheQuery(
query_id="1",
title="Should teachers get tenure?",
description=compile("A user has heard that some countries do give teach.{159}teachers vs. university professors is of interest\."),
narrative=compile("Highly relevant arguments make a clear statement a.{181}the situation of teachers' financial independence\."),
),
48: ToucheQuery(
query_id="50",
title="Should everyone get a universal basic income?",
description=compile("Redistribution of wealth is a fundamental concept .{93}ver, a user wonders whether this truly would help\."),
narrative=compile("Highly relevant arguments take a clear stance towa.{134}mentioning universal basic income only in passing\."),
),
}
)
self._test_queries(
"argsme/1.0/touche-2020-task-1/uncorrected",
count=49,
items={
0: ToucheQuery(
query_id="1",
title="Should teachers get tenure?",
description=compile("A user has heard that some countries do give teach.{159}teachers vs. university professors is of interest\."),
narrative=compile("Highly relevant arguments make a clear statement a.{181}the situation of teachers' financial independence\."),
),
48: ToucheQuery(
query_id="50",
title="Should everyone get a universal basic income?",
description=compile("Redistribution of wealth is a fundamental concept .{93}ver, a user wonders whether this truly would help\."),
narrative=compile("Highly relevant arguments take a clear stance towa.{134}mentioning universal basic income only in passing\."),
),
}
)
self._test_queries(
"argsme/2020-04-01/touche-2020-task-1/uncorrected",
count=49,
items={
0: ToucheQuery(
query_id="1",
title="Should teachers get tenure?",
description=compile("A user has heard that some countries do give teach.{159}teachers vs. university professors is of interest\."),
narrative=compile("Highly relevant arguments make a clear statement a.{181}the situation of teachers' financial independence\."),
),
48: ToucheQuery(
query_id="50",
title="Should everyone get a universal basic income?",
description=compile("Redistribution of wealth is a fundamental concept .{93}ver, a user wonders whether this truly would help\."),
narrative=compile("Highly relevant arguments take a clear stance towa.{134}mentioning universal basic income only in passing\."),
),
}
)
self._test_queries(
"clueweb12/touche-2020-task-2",
count=50,
items={
0: ToucheQuery(
query_id="1",
title="What is the difference between sex and love?",
description=compile("A potentially younger user has heard people talk a.{147}ontrast, what characterizes a loving relationship\."),
narrative=compile("Relevant documents will contain some description o.{155}f what people are looking for in either direction\."),
),
49: ToucheQuery(
query_id="50",
title="Whose salary is higher: basketball or soccer players?",
description=compile("A young married couple raises a 14-year old boy wh.{313}income to players in different parts of the world\."),
narrative=compile("Highly relevant documents provide information on a.{496}iptions of basketball and soccer are not relevant\."),
),
}
)
self._test_queries(
"argsme/2020-04-01/touche-2021-task-1",
count=50,
items={
0: ToucheTitleQuery(
query_id="51",
title="Do we need sex education in schools?"
),
49: ToucheTitleQuery(
query_id="100",
title="Do we need cash?"
),
}
)
self._test_queries(
"clueweb12/touche-2021-task-2",
count=50,
items={
0: ToucheQuery(
query_id="51",
title="What is better at reducing fever in children, Ibuprofen or Aspirin?",
description=compile("Younger parents have their 8-year old child sick\. .{400}en and aspirin for reducing the fever in children\."),
narrative=compile("Relevant documents will describe ibuprofen, aspiri.{258} or ingredients of the medicines are not relevant\."),
),
49: ToucheQuery(
query_id="100",
title="Should I learn Python or R for data analysis?",
description=compile("Wondering whether you should use Python or R for d.{318}ore useful, flexible, easy to learn and efficient\."),
narrative=compile("Relevant documents should compare two programming .{430}re not related to data analysis, are not relevant\."),
),
}
)
def test_qrels(self):
self._test_qrels(
"argsme/2020-04-01/touche-2020-task-1",
count=2298,
items={
0: TrecQrel(
query_id="1",
doc_id="S197beaca-A971412e6",
relevance=0,
iteration="0"
),
2297: TrecQrel(
query_id="50",
doc_id="Sffdf2e2e-A307df259",
relevance=2,
iteration="0"
),
}
)
self._test_qrels(
"argsme/1.0/touche-2020-task-1/uncorrected",
count=2964,
items={
0: TrecQrel(
query_id="1",
doc_id="197beaca-2019-04-18T11:28:59Z-00001-000",
relevance=4,
iteration="0"
),
2963: TrecQrel(
query_id="50",
doc_id="799d051-2019-04-18T11:47:02Z-00000-000",
relevance=-2,
iteration="Q0"
),
}
)
self._test_qrels(
"argsme/2020-04-01/touche-2020-task-1/uncorrected",
count=2298,
items={
0: TrecQrel(
query_id="1",
doc_id="S21dc5a14-A8b896cb0",
relevance=4,
iteration="0"
),
2297: TrecQrel(
query_id="50",
doc_id="Sffdf2e2e-A307df259",
relevance=2,
iteration="0"
),
}
)
self._test_qrels(
"clueweb12/touche-2020-task-2",
count=1783,
items={
0: TrecQrel(
query_id="1",
doc_id="clueweb12-0001wb-05-12311",
relevance=0,
iteration="0"
),
1782: TrecQrel(
query_id="50",
doc_id="clueweb12-0206wb-00-16297",
relevance=0,
iteration="0"
),
}
)
self._test_qrels(
"argsme/2020-04-01/touche-2021-task-1",
count=3711,
items={
0: ToucheQualityQrel(
query_id="94",
doc_id="S522c7c3b-A8a87130b",
relevance=2,
quality=2,
iteration="0"
),
3710: ToucheQualityQrel(
query_id="91",
doc_id="Sf0770da-A760eca8e",
relevance=0,
quality=1,
iteration="0"
),
}
)
self._test_qrels(
"clueweb12/touche-2021-task-2",
count=2076,
items={
0: ToucheQualityQrel(
query_id="54",
doc_id="clueweb12-0205wb-64-11095",
relevance=0,
quality=0,
iteration="0"
),
2075: ToucheQualityQrel(
query_id="86",
doc_id="clueweb12-0008wb-85-29079",
relevance=0,
quality=0,
iteration="0"
),
}
)
if __name__ == "__main__":
main()
|
Heyjuke58/ir_datasets
|
test/integration/touche.py
|
touche.py
|
py
| 9,700 |
python
|
en
|
code
| null |
github-code
|
6
|
7627165577
|
from nodoProfundidad import NodoProfundidad as nodo
from posicion import Posicion as posicion
"""
0 -> vacio
1 -> pinocho
2 -> cigarrillos
3 -> zorro
4 -> geppeto
5 -> sin camino """
"""matriz = [
[0, 3, 0, 3, 0],
[1, 5, 0, 0, 0],
[0, 0, 5, 5, 4],
[0, 0, 0, 2, 0],
[0, 0, 0, 0, 0]
]"""
def Verificador(nodo):
aux=str(nodo.pos.posx) + " " + str(nodo.pos.posy) + " " +str(nodo.profundidad)+""+ str(nodo.costo)+ " "
for i in nodo.camino:
aux = aux + " "+ str(i.posx) + " " +str(i.posy)
print(aux)
#buscar a pinocho que es el numero 1 en la matriz
def buscarPinocho(matriz):
for fila in range(len(matriz)):
for columna in range(len(matriz[0])):
if matriz[fila][columna] == 1:
print("fila:", fila, ",columna:", columna)
return fila, columna
def agregar (nodo):
pro = nodo.profundidad
for i in range (len(cola)):
nodoi = cola[i]
if pro <= nodoi.profundidad:
cola.insert(i,nodo)
return None
cola.append(nodo)
#calculo del costo de pasar por la casilla
def costoAcumulado(posicionN, juego):
costoN = 0
if juego[posicionN.posx][posicionN.posy] == 0:
costoN += 1
elif juego[posicionN.posx][posicionN.posy] == 4:
costoN += 1
else:
costoN += juego[posicionN.posx][posicionN.posy]
return costoN
#expandir el nodo de arriba
def arriba(nodoActual, juego):
if (nodoActual.pos.posx > 0):
posicionNueva = posicion(nodoActual.pos.posx-1, nodoActual.pos.posy)
caminoA = nodoActual.camino.copy()
costos = nodoActual.costo
#calculo del costo de pasar por la casilla
costos += costoAcumulado(posicionNueva,juego)
newP = nodoActual.profundidad + 1
if juego[posicionNueva.posx][posicionNueva.posy] != 5 and posicionNueva.existe(caminoA):
caminoA.append(posicionNueva)
nuevoNodo = nodo(posicionNueva, caminoA, newP, costos)
Verificador(nuevoNodo) # imprime cada nodo creado
agregar(nuevoNodo) # crea el nodo
#expandir el nodo de abajo
def abajo(nodoActual, juego):
if (nodoActual.pos.posx < len(juego)-1):
posicionNueva = posicion(nodoActual.pos.posx + 1, nodoActual.pos.posy)
caminoA = nodoActual.camino.copy()
costos = nodoActual.costo
#calculo del costo de pasar por la casilla
costos += costoAcumulado(posicionNueva, juego)
newP = nodoActual.profundidad + 1
if juego[posicionNueva.posx][posicionNueva.posy] != 5 and posicionNueva.existe(caminoA):
caminoA.append(posicionNueva)
nuevoNodo = nodo(posicionNueva, caminoA, newP, costos)
Verificador(nuevoNodo)
agregar(nuevoNodo)
#expandir el nodo de la izquierda
def izquierda(nodoActual, juego):
if (nodoActual.pos.posy > 0):
posicionNueva = posicion(nodoActual.pos.posx, nodoActual.pos.posy-1)
caminoA = nodoActual.camino.copy()
costos = nodoActual.costo
#calculo del costo de pasar por la casilla
costos += costoAcumulado(posicionNueva, juego)
newP = nodoActual.profundidad + 1
if juego[posicionNueva.posx][posicionNueva.posy] != 5 and posicionNueva.existe(caminoA):
caminoA.append(posicionNueva)
nuevoNodo = nodo(posicionNueva, caminoA, newP, costos)
Verificador(nuevoNodo)
agregar(nuevoNodo)
#expandir el nodo de la derecha
def derecha(nodoActual, juego):
if nodoActual.pos.posy < len(juego[0])-1:
posicionNueva = posicion(nodoActual.pos.posx, nodoActual.pos.posy + 1)
caminoA = nodoActual.camino.copy()
costos = nodoActual.costo
#calculo del costo de pasar por la casilla
costos += costoAcumulado(posicionNueva, juego)
newP = nodoActual.profundidad + 1
if juego[posicionNueva.posx][posicionNueva.posy] != 5 and posicionNueva.existe(caminoA):
caminoA.append(posicionNueva)
nuevoNodo = nodo(posicionNueva, caminoA, newP, costos)
Verificador(nuevoNodo)
agregar(nuevoNodo)
def busquedaAmplitud(juegoo):
# nodo inicial
pinocho = buscarPinocho(juegoo) # se busca la posicion en la matriz
pos = posicion(pinocho[0], pinocho[1])
inicio = nodo(pos, [pos],0, 0) #nodo raiz
expandidos = []
global cola
cola = []
cola.append(inicio) # añado el nodo raiz
# inicio de la busqueda amplitud
while (True):
if len(cola) == 0:
print("No encontré")
break
#expando el nodo actual
nodoActual = cola.pop(0) # sacamos el primero de la lista
posX = nodoActual.pos.posx
posY = nodoActual.pos.posy
# pregunto si es meta
if juegoo[posX][posY] == 4:
print("Encontre")
break
#Expando
expandidos.append((posX,posY))
if(nodoActual.profundidad % 2 == 1):
arriba(nodoActual, juegoo) # arriba num 1
abajo(nodoActual, juegoo) # abajo num 2
derecha(nodoActual,juegoo) # derecha num 3
izquierda(nodoActual, juegoo) # izquierda num 4
else:
izquierda(nodoActual, juegoo) # izquierda num 4
derecha(nodoActual,juegoo) # derecha num 3
abajo(nodoActual, juegoo) # abajo num 2
arriba(nodoActual, juegoo) # arriba num 1a
#imprimir los indices del camino rrecorrido por el ultimo nodo expandido
print(expandidos)
return nodoActual.camino,nodoActual.costo
#busquedaAmplitud(matriz)
|
GustavoA198/proyecto1-IA
|
amplitud_IA.py
|
amplitud_IA.py
|
py
| 5,702 |
python
|
es
|
code
| 0 |
github-code
|
6
|
19637644362
|
import requests
import datetime
response = requests.get("https://blockchain.info/rawaddr/42e58ccd620fab780e46095f4b3f6987aa253219")
data = response.json()
first_tr_id = data["txs"][0]["hash"]
first_tr_time = data["txs"][0]["time"]
a = [1, 2, 3, 4]
for n in range(len(a)):
print(a[n])
|
maciek1066/training
|
bitcoin_api.py
|
bitcoin_api.py
|
py
| 294 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16898559994
|
#!/usr/bin/env python3
import itertools
def print_header(x, y, z = None):
print("join_digits(", seq2digit(x), ", ", seq2some(y), ", ", seq2digit(z), ") ->", sep="")
def produce(seq):
while seq:
if len(seq) == 4:
yield seq2node(seq[:2])
yield seq2node(seq[2:])
break
yield seq2node(seq[:3])
seq = seq[3:]
def print_body(seq):
print(" ", seq2some(produce(seq)), ";", sep="")
def seq2digit(seq):
return "{{{}}}".format(", ".join(itertools.chain("_", seq)))
def seq2some(seq):
return "{{{}}}".format(", ".join(seq))
def seq2node(seq):
return "node({})".format(", ".join(seq))
print("-spec join_digits(digit(X), some(X), digit(X)) -> some(node(X)) when X :: desc().")
var_x = 'ABCD'
var_some = 'EFGH'
var_z = 'IJKL'
MAX = 12
MAXONE = 4
for size_some in range(0, MAXONE + 1):
for size_x in range(1, MAXONE + 1):
for size_z in range(1, MAXONE + 1):
x, some, z = var_x[:size_x], var_some[:size_some], var_z[:size_z]
print_header(x, some, z)
print_body(x + some + z)
print("join_digits(Left, Some, Right) -> error(function_clause, [Left, Some, Right]).")
|
platbox/nanometer
|
py/ftree_generate.py
|
ftree_generate.py
|
py
| 1,194 |
python
|
en
|
code
| 3 |
github-code
|
6
|
39931219674
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
fast = slow = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast:
slow = slow.next
slow = self.reverse(slow)
while slow and slow.val == head.val:
slow = slow.next
head = head.next
return slow == None
def reverse(self, head):
rvs = None
cur = head
while cur:
temp = cur.next
cur.next = rvs
rvs = cur
cur = temp
return rvs
|
bolan2014/leetcode
|
easy/PalindromeLinkedList.py
|
PalindromeLinkedList.py
|
py
| 824 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3981438238
|
'''
You are given an array of intervals - that is, an array of tuples (start, end). The array may not be sorted, and could contain overlapping intervals. Return another array where the overlapping intervals are merged.
For example:
[(1, 3), (5, 8), (4, 10), (20, 25)]
This input should return [(1, 3), (4, 10), (20, 25)] since (5, 8) and (4, 10) can be merged into (4, 10).
Here's a starting point:
'''
def merge(intervals):
# Fill this in.
res = []
while len(intervals) > 0:
stack = []
stack.append(intervals[0])
tmp = [intervals[0][0], intervals[0][1]]
while len(stack) > 0:
interval = stack.pop()
intervals.remove(interval)
if interval[0] < tmp[0]:
tmp[0] = interval[0]
if interval[1] > tmp[1]:
tmp[1] = interval[1]
for it in intervals:
if intersects(tmp, it):
stack.append(it)
res.append((tmp[0], tmp[1]))
return res
def intersects(a, b):
return (b[0] >= a[0] and b[0] <= a[1]) or (b[1] >= a[0]and b[1] <= a[1]) or (a[0] >= b[0] and a[1] <= b[1])
print(intersects((5, 8), (4, 10)))
print(intersects((5, 8), (6, 10)))
print(intersects((5, 8), (2, 7)))
print(intersects((5, 8), (11, 20)))
print(merge([(1, 3), (5, 8), (0, 2), (4, 10), (20, 25), (9, 12), (11, 13)]))
# [(0, 3), (4, 13), (20, 25)]
|
MateuszMazurkiewicz/CodeTrain
|
InterviewPro/2019.11.17/task.py
|
task.py
|
py
| 1,423 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8747012693
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 24 23:16:21 2019
@author: ADMIN
"""
import pandas as pd
import numpy as np
import AllFunctions as af
#import dateutil
import math
item_data=pd.read_csv("item_data.csv")
log_data=pd.read_csv("view_log.csv",parse_dates=['server_time'],infer_datetime_format=True)
train_data=pd.read_csv("train.csv",parse_dates=['impression_time'],infer_datetime_format=True)
test_data=pd.read_csv("test.csv",parse_dates=['impression_time'],infer_datetime_format=True)
test_data['is_click']=-1
train_test_data=pd.concat([train_data,test_data],axis=0)
ismall=item_data.head(5)
#dfDesc=af.getDFDesc(item_data)
item_data=af.categorizeCols(item_data,cols=['item_id','category_1', 'category_2', 'category_3','product_type'])
#dfDesc=af.getDFDesc(item_data)
item_data['item_price_log']=np.log(item_data['item_price'])
#dfDesc=af.getDFDesc(log_data)
log_data=af.categorizeCols(log_data,cols=['session_id','user_id','item_id'])
#dfDesc=af.getDFDesc(log_data)
log_item_data=pd.merge(log_data,item_data,on='item_id')
train_test_data['impression_time_7less'] = train_test_data['impression_time'] - pd.to_timedelta(7, unit='d')
train_test_data.reset_index(inplace=True,drop=True)
"""user_ids_list = np.unique(log_data['user_id'])
minimptime=np.min(train_test_data['impression_time_7less'])
l=log_item_data.sample(frac=0.001,random_state=5)
l.to_csv('lShort.csv',index=False)
t=train_test_data.sample(frac=0.01,random_state=5)
t.to_csv('tShort.csv',index=False)"""
log_item_data.to_csv('log_item_data.csv',index=False)
train_test_data.to_csv('train_test_data.csv',index=False)
def calcAllOutputs(df):
visit_count = len(df)
total_sessions = len(np.unique(df['session_id']))
total_items = len(np.unique(df['item_id']))/visit_count
total_category_1 = len(np.unique(df['category_1']))/visit_count
total_category_2 = len(np.unique(df['category_2']))/visit_count
total_category_3 = len(np.unique(df['category_3']))/visit_count
total_product_type = len(np.unique(df['product_type']))/visit_count
item_price_max = np.max(df['item_price_log'])
item_price_min = np.min(df['item_price_log'])
item_price_avg = np.mean(df['item_price_log'])
item_price_std = np.std(df['item_price_log'])
item_price_rng = item_price_max - item_price_min
max_time=np.max(df['server_time'])
impid=np.max(df['impression_id'])
#diff = df['impression_time'] - max_time
#diff1 = diff.total_seconds()
res=[impid,visit_count,total_sessions ,total_items ,total_category_1 ,total_category_2 ,total_category_3 ,total_product_type ,item_price_max ,item_price_min ,item_price_avg ,item_price_std ,item_price_rng,max_time]
return res
def calcImpFeatures(df):
previous_imp_app_count=len(np.unique(df['app_code']))
max_time2=np.max(df['impression_time_y'])
impid=np.max(df['impression_id'])
return [impid,max_time2,previous_imp_app_count]
def calcAppFeatures(df):
previous_imp_same_app_count=len(np.unique(df['app_code']))
max_time3=np.max(df['impression_time_y'])
impid=np.max(df['impression_id'])
return [impid,max_time3,previous_imp_same_app_count]
def applymathfloor(x):
if np.isnan(x)==False:
return math.floor(x)
else:
return x
dfC=train_test_data.merge(log_item_data,on='user_id')
print(len(dfC))
dfC2 = dfC[(dfC.server_time <= dfC.impression_time) & (dfC.server_time >= dfC.impression_time_7less)]
print(len(dfC2))
dfCHead=dfC2.head(100)
dfC3=dfC2.groupby('impression_id').apply(calcAllOutputs)
dfFeatureset1=pd.DataFrame.from_records(dfC3)
dfFeatureset1.columns=['impression_id','visit_count','total_sessions','total_items','total_category_1','total_category_2','total_category_3','total_product_type','item_price_max','item_price_min','item_price_avg','item_price_std','item_price_rng','max_time']
dfFeatureset1.to_csv('dfFeatureset1.csv',index=False)
dfC=train_test_data.merge(train_test_data[['user_id','impression_time','app_code']],on='user_id',suffixes=('', '_y'))
dfC2=dfC[dfC.impression_time<dfC.impression_time_y]
dfC3=dfC2.groupby('impression_id').apply(calcImpFeatures)
dfFeatureset2=pd.DataFrame.from_records(dfC3)
dfFeatureset2.columns=['impression_id','max_time2','previous_imp_app_count']
dfFeatureset2.to_csv('dfFeatureset2.csv',index=False)
dfC4=dfC2[dfC2.app_code==dfC2.app_code_y]
dfC5=dfC4.groupby('impression_id').apply(calcAppFeatures)
dfFeatureset3=pd.DataFrame.from_records(dfC5)
dfFeatureset3.columns=['impression_id','max_time3','previous_imp_same_app_count']
dfFeatureset3.to_csv('dfFeatureset3.csv',index=False)
"""
train_test_data=pd.read_csv('train_test_data.csv',parse_dates=['impression_time'],infer_datetime_format=True)
dfFeatureset1=pd.read_csv('dfFeatureset1.csv',parse_dates=['max_time'],infer_datetime_format=True)
dfFeatureset2=pd.read_csv('dfFeatureset2.csv',parse_dates=['max_time2'],infer_datetime_format=True)
dfFeatureset3=pd.read_csv('dfFeatureset3.csv',parse_dates=['max_time3'],infer_datetime_format=True)
"""
mergeddf=train_test_data.merge(dfFeatureset1,on='impression_id',how='left')
mergeddf=mergeddf.merge(dfFeatureset2,on='impression_id',how='left')
mergeddf=mergeddf.merge(dfFeatureset3,on='impression_id',how='left')
mergeddf['diff1']=(mergeddf['impression_time']-mergeddf['max_time']).dt.total_seconds()
mergeddf['diff2']=(mergeddf['max_time2']-mergeddf['impression_time']).dt.total_seconds()
mergeddf['diff3']=(mergeddf['max_time3']-mergeddf['impression_time']).dt.total_seconds()
train_test_data=mergeddf
s=train_test_data.app_code.value_counts()
s=s/len(train_test_data)
train_test_data['app_imp']=train_test_data['app_code'].apply(lambda x: s[x])
train_test_data['diff_days']=(train_test_data['diff1']/3600/24).apply(applymathfloor)
#train_test_data['diff_hours']=(train_test_data['diff1']/3600).apply(applymathfloor)
#train_test_data['diff_mins']=(train_test_data['diff1']/60).apply(applymathfloor)
#train_test_data['diff_secs']=(train_test_data['diff1']).apply(applymathfloor)
train_test_data['prev_diff_days']=(train_test_data['diff2']/3600/24).apply(applymathfloor)
#train_test_data['prev_diff_hours']=(train_test_data['diff2']/3600).apply(applymathfloor)
#train_test_data['prev_diff_mins']=(train_test_data['diff2']/60).apply(applymathfloor)
#train_test_data['prev_diff_secs']=(train_test_data['diff2']).apply(applymathfloor)
train_test_data['prev_app_diff_days']=(train_test_data['diff3']/3600/24).apply(applymathfloor)
#train_test_data['prev_app_diff_hours']=(train_test_data['diff3']/3600).apply(applymathfloor)
#train_test_data['prev_app_diff_mins']=(train_test_data['diff3']/60).apply(applymathfloor)
#train_test_data['prev_app_diff_secs']=(train_test_data['diff3']).apply(applymathfloor)
train_test_data['it_day_of_week'] = train_test_data['impression_time'].dt.dayofweek
train_test_data['it_month_start'] = train_test_data['impression_time'].dt.is_month_start
train_test_data['it_month_end'] = train_test_data['impression_time'].dt.is_month_end
train_test_data['it_weekday'] = train_test_data['impression_time'].apply(lambda x: x.weekday())
train_test_data=train_test_data.drop(columns=['impression_id','impression_time','user_id','impression_time_7less','app_code','max_time','max_time2','max_time3'])
train_test_data=train_test_data.drop(columns=['diff1','diff2','diff3'])
train_test_data=train_test_data.fillna(0)
train_test_data=af.categorizeCols(train_test_data,cols=['os_version','it_day_of_week','it_weekday'])
train_test_data=af.LabelEncodeCols(train_test_data.copy(),onehotColumns=[], categorical_columns=['os_version','it_day_of_week','it_weekday'])
train_test_data.to_csv("train_test_dataAll.csv",index=False)
#train_test_data=pd.read_csv('train_test_dataAll.csv')
X=train_test_data
#af.plot_corr(X)
# dropping correlated variables
X=X.drop(columns=['previous_imp_app_count','prev_app_diff_days'])
X=X.drop(columns=['total_category_1','total_category_2','total_category_3','total_sessions', 'item_price_min','item_price_max','item_price_std'])
X=X.drop(columns=['total_product_type','visit_count'])
print(X.columns)
pred_variable_type = "categorical"
target_variable='is_click'
TrainCleanVars={}
X_trainVal=X[X['is_click']!= -1]
X_test=X[X['is_click']== -1]
X_test=X_test.drop(columns=['is_click'])
X_trainVal.reset_index(inplace=True,drop=True)
X_test.reset_index(inplace=True,drop=True)
zeroOneCols=X_trainVal.apply(lambda x: af.ChkZeroOne(x))
standarizeCols=list(zeroOneCols[zeroOneCols==False].index)
X_trainVal,scaler=af.normalize(X_trainVal,standarizeCols)
#standarizeCols.remove(target_variable)
X_test=af.normalize(X_test,standarizeCols,scaler)
trainVal_frame=X_trainVal
x_cols=list(X_trainVal.columns)
y_col=target_variable
trainVal_frame[target_variable] = trainVal_frame[target_variable].astype(np.uint8)
class_weights=af.GetClassWeights(trainVal_frame[target_variable])
trainVal_frame['class_weights'] =[class_weights[x] for x in trainVal_frame[target_variable]]
import H2OHandler as hh
# h2o.cluster().shutdown(prompt=True)
print("Start H2O model training")
#H2o internally uses k-fold cross validation
res,PredDF,predtrain,ptrain=hh.GetBestH2OModel(trainVal_frame,x_cols,y_col,pred_variable_type == "categorical",X_test,weights_column='class_weights',stopping_metric='AUC')
TrainCleanVars['H2OBestModel']=res.leader
X_test[target_variable]=PredDF['predict']
X_test[standarizeCols]=scaler.inverse_transform(X_test[standarizeCols])
ts=af.GetTimeStamp()
af.PickleWrite(TrainCleanVars,"TrainCleanVars"+str(ts)+".pkl")
X_test['impression_id']=test_data['impression_id']
final_sub=X_test[['impression_id',target_variable]]
final_sub.to_csv('samplesubmission'+str(ts)+'.csv',index=False)
lb=res.leaderboard
lbres=lb[:5,"model_id"]
import h2o
m = h2o.get_model(lb[0,"model_id"])
varimpres=m.varimp(use_pandas=True)
lbscores=lb.head(rows=lb.nrows).as_data_frame()
|
kinjaldand/MLProjects
|
AdClickPredictWNSHack/Work2.py
|
Work2.py
|
py
| 9,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32005344445
|
import torch.nn as nn
from transformers import BertModel
from services.text_similarity.settings import Settings
class BERTClassifier(nn.Module):
def __init__(self, freeze_params=False):
super(BERTClassifier, self).__init__()
self.settings = Settings
self.bert = BertModel.from_pretrained(self.settings.checkpoint, return_dict=False)
# adding custom layers according to the problem statement
# self.classifier = nn.Sequential(
# nn.Linear(self.settings.input_dim, self.settings.hidden_dim),
# nn.ReLU(),
# nn.Linear(self.settings.hidden_dim, self.settings.output_dim)
# )
if not freeze_params:
# freeze all the parameters
for param in self.bert.parameters():
param.requires_grad = False
self.bert_drop = nn.Dropout(self.settings.dropout)
self.out = nn.Linear(self.settings.input_dim, self.settings.output_dim)
def forward(self, ids, mask, token_type_ids):
o1, o2 = self.bert(
ids,
attention_mask=mask,
token_type_ids=token_type_ids
)
bo = self.bert_drop(o2)
output = self.out(bo)
return output
def print_model_details(self):
# Get all of the model's parameters as a list of tuples.
params = list(self.bert.named_parameters())
print('The BERT Base Uncased Model Has {:} different named parameters.\n'.format(len(params)))
print('==== Embedding Layer ====\n')
for p in params[0:5]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== First Transformer ====\n')
for p in params[5:21]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
print('\n==== Output Layer ====\n')
for p in params[-4:]:
print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size()))))
|
R-aryan/Text-Similarity-Using-BERT
|
backend/services/text_similarity/application/ai/model.py
|
model.py
|
py
| 1,945 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43724697977
|
import random
from game_logic.game import Agent
from game_logic.gameExtended import GameStateExtended
import numpy as np
import entregables.calcular_distancias as calcular_distancias
from game_logic import mcts_util, game_util
class MaxNAgent(Agent):
def __init__(self, index, max_depth = 2, unroll_type = "MC", max_unroll_depth= 5, number_of_unrolls = 10, view_distance = (2,2),param_tunner=None):
super().__init__(index)
self.max_depth = max_depth
self.unroll_type = unroll_type
self.max_unroll_depth = max_unroll_depth
self.number_of_unrolls = number_of_unrolls
self.view_distance = view_distance
self.param_tunner = param_tunner
def evaluationFunction(self, gameState, agentIndex,Is_End=0,Pacman_Wins=0):
if self.param_tunner == None:
param_tunner={'retorno_inicial':0,'pacman_food':-200,'pacman_capsule':-300,'pacman_ghost':400,'pacman_s_ghost':400,'ghost_pacman':-100,'ghost_food':0}
else:
param_tunner=self.param_tunner
Num_Agents = gameState.getNumAgents()
retorno_vector=[]
for Agent in range(Num_Agents):
processed_obs = game_util.process_state(gameState, self.view_distance, Agent)
if Agent == 0: #Soy PacMan
retorno=param_tunner['retorno_inicial']
#Comida mas cercana
retorno = retorno + param_tunner['pacman_food'] * gameState.getNumFood()
#Mas comidas activas menos retorno tengo
#Capsula mas cercana
retorno = retorno + param_tunner['pacman_capsule'] * len(gameState.getCapsules())
#Mas capsulas activas menos retorno tengo
#Fantasma mas cercano
distancia_minima=float("inf") #Fantasma no Asustado
distancia_minima_s=float("inf") #Fantasma Asustado
g_positions=gameState.getGhostPositions()
g_states=gameState.getGhostStates()
for index in range(len(g_positions)): #Recorro los fantasmas
if g_states[index].scaredTimer==0: #Fantasmas no asustados
distancia = self.calcular_distancia(g_positions[index],gameState.getPacmanPosition())
if distancia < distancia_minima:
distancia_minima = distancia #Distancia minima a fantasma asustado
else: #Fantasmas asustados
distancia = self.calcular_distancia(g_positions[index],gameState.getPacmanPosition())
if distancia < distancia_minima_s:
distancia_minima_s = distancia #Distancia minima a fantasma asustado
if distancia_minima != float("inf"): #Penalizo según fantasma mas cercana
retorno = retorno + param_tunner['pacman_ghost'] * distancia_minima
if distancia_minima_s != float("inf"): #Penalizo según fantasma asustado mas cercano
retorno = retorno + param_tunner['pacman_s_ghost'] / distancia_minima_s
if Is_End:
if Pacman_Wins:
retorno = float('inf') #Pacman Gana, es mejor estado
else:
retorno = -float('inf')#Pacman Pierde, es peor estado
else: #Soy un Fantasma
retorno=param_tunner['retorno_inicial']
mapa={}
paredes=gameState.getWalls() #Obtengo mapa de paredes
for x in range(len(paredes.data)):
for y in range(len(paredes.data[0])):#Recorro grid y obtengo mapa
if paredes[x][y]:
mapa[(x, y)] = '#'
else:
mapa[(x, y)] = ' '
start=gameState.getGhostPosition(Agent)
start=[round(start[0]),round(start[1])]
end=gameState.getPacmanPosition()
path = calcular_distancias.astar_search(mapa, start, end)#Uso algoritmo A* para calcular distancia
#A* calcula distancia teniendo en cuenta obstaculos
if gameState.getGhostState(Agent).scaredTimer>0:#Soy fantasma asustado
retorno = retorno + param_tunner['ghost_pacman'] * len(path) #Penalizo según la distancia al Pacman
retorno = - retorno
else: #Soy fantasma comun
retorno = retorno + param_tunner['ghost_pacman'] * len(path) #Penalizo según la distancia al Pacman
if Is_End:
if Pacman_Wins:
retorno = -float('inf') #Pacman Gana, es peor estado
else:
retorno = float('inf') #Fantasma Gana, es el mejor estado
retorno_vector.append(retorno)
return retorno_vector
# Función implementada para calcular la distancia entre 2 agentes
# se utiliza en evaluationFunction
def calcular_distancia(self, p1, p2):
distancia = np.abs(p1[0]-p2[0])+np.abs(p1[1]-p2[1])
return distancia
def getAction(self, gameState):
action, value = self.maxN(gameState, self.index, self.max_depth)
return action
# Función recursiva que se encarga de maximizar el valor del estado
# para el agente que esté jugando en ese momento. Esto se realiza
# mientras no sea un caso base, ya sea que se alcanzó a profundidad
# o que el juego haya terminado.
def maxN(self, gameState: GameStateExtended, agentIndex, depth):
#Casos base:
if depth == 0 and not gameState.isEnd():
if self.unroll_type == "MC":
values = self.montecarlo_eval(gameState, agentIndex)
return None, values
else:
values = self.montecarlo_tree_search_eval(gameState, agentIndex)
return None, values
elif gameState.isEnd():
#Paso info de que el juego es final y si pacmanes o no el ganador
return None, self.evaluationFunction(gameState, agentIndex,1,gameState.isWin())
#Llamada recursiva
legalActions = gameState.getLegalActions(agentIndex)
random.shuffle(legalActions)
nextAgent = self.getNextAgentIndex(agentIndex, gameState)
action_nodes =[]
for action in legalActions:
child_node = gameState.deepCopy()
nextState = child_node.generateSuccessor(agentIndex, action)
_, state_value = self.maxN(nextState, nextAgent,depth-1)
action_nodes.append((action, state_value))
best_action = None
best_score_array = np.zeros(gameState.getNumAgents())
for action_node in action_nodes:
if best_action == None:
best_action=action_node[0]
best_score_array=action_node[1]
else:
if action_node[1][agentIndex]>best_score_array[agentIndex]:
best_action=action_node[0]
best_score_array=action_node[1]
return best_action, best_score_array
#Esta función devuelve el siguiente agente
# En el if se pregunta si es igual al agente +1, dado que getNumAgents
# devuelve el largo de la lista de agentes, y agentIndex es la posición
# del agente en la lista comenzando de 0
def getNextAgentIndex(self, agentIndex, gameState):
if gameState.getNumAgents() == agentIndex +1:
return 0
else:
return agentIndex +1
# En esta función se realizan los unrolls.
# Para eso evaluamos si se llegó a un estado final, o si
# se alcanzó la cantidad de max_unroll_depth dado por parámetro
# Si ninguna de estas condiciones se cumple, entonces se continuará
# tomando una acción al azar, lo que implica avanzar en el juego.
# En caso de que se cumpla la 2º condición se pasará a
# la función de evaluación.
def random_unroll(self, gameState: GameStateExtended, agentIndex):
done = gameState.isEnd()
successor = gameState
actual_unroll_depth = self.max_unroll_depth
while not done and (actual_unroll_depth !=0):
actions = successor.getLegalActions(agentIndex)
action = random.choice(actions)
successor = successor.generateSuccessor(agentIndex, action)
agentIndex = self.getNextAgentIndex(agentIndex, successor)
done = successor.isEnd()
actual_unroll_depth = actual_unroll_depth -1
if done:
return self.evaluationFunction(successor, agentIndex) #Se usa funcion de evaluación
elif actual_unroll_depth == 0:
return self.evaluationFunction(successor, agentIndex) #Se usa funcion de evaluación
else:
return np.zeros(gameState.getNumAgents())
# Esta función va a calcular la función de evaluación al aplicar
# MoneCarlo, la cual consiste en aplicar random.unroll la cantidad
# de veces que esté definido por parámetro, retornando un array
# con los valores de unroll divido la cantidad de unrolls realizados.
def montecarlo_eval(self, gameState, agentIndex):
values = np.zeros(gameState.getNumAgents())
for _ in range(self.number_of_unrolls):
unroll_values = self.random_unroll(gameState, agentIndex)
values = np.add(values, unroll_values)
return np.true_divide(values, self.number_of_unrolls)
#Algoritmo de MCTS para realizar planificación el tiempo real
#Al llegar a un nuevo estado, se elige la siguiente acción como resultado de una etapa de
#planificación La política utilizada para balancear la exploración y la explotación fue UCB
def montecarlo_tree_search_eval(self, gameState, agentIndex):
root = mcts_util.MCTSNode(parent = None, action = None, player = agentIndex, numberOfAgents= gameState.getNumAgents())
root, gameState = self.expansion_stage(root, gameState)
best_reward = None
best_action = None
for _ in range(self.number_of_unrolls):
state = gameState.deepCopy()
node = root
sum_reward = 0
#Etapa de selección:
#Siguiendo la política del árbol (UCB) se elige el comienzo
#de una trayectoria desde la raíz hasta una hoja
node_sel, gameState_sel = self.selection_stage(node, state)
#Etapa de expansión:
#El árbol es expandido desde el nodo hoja seleccionado agregando uno o más hijos
#(estados a los que se llega desde el estado hoja mediante acciones inexploradas)
node_exp, gameState_exp = self.expansion_stage(node_sel, gameState_sel)
#Etapa de simulación:
#A partir del nodo hoja (o uno de sus nuevos hijos), se completa una trayectoria simulada
#mediante la política rollout. En nuestro caso, utilizamos una política random mediante la función
#random unroll
sum_reward = self.random_unroll(gameState_exp, node_exp.player)
#Etapa de backpropagation:
#Con el retorno generado por la trayectoria simulada, se actualizan las estimaciones de los valores
#de los pares estado acción que están dentro del árbol
self.back_prop_stage(node_exp, sum_reward)
index=0
for child in root.children:
#Si la raíz tiene hijos, devuelvo el hijo que tiene el mejor promedio en la posición del jugador de la raíz
if index==0:
best_action = child.action
best_reward = np.true_divide(child.value, child.visits)
index += 1
else:
valor_hijo = child.value[root.player]
valor_best = best_reward[root.player]
if valor_hijo > valor_best:
best_action = child.action
best_reward = np.true_divide(child.value, child.visits)
return best_reward
#Etapa de selección del algoritmo MCTS
def selection_stage(self, node, gameState):
successor = gameState
#Variable para ver si el estado es terminal
done = successor.isEnd()
#Tomo el estado actual
while len(node.children)>0 and not done:
#Exploro
if node.explored_children < len(node.children):
child = node.children[node.explored_children]
node.explored_children += 1
node = child
else:
#Exploto
node = max(node.children, key= mcts_util.ucb)
#Obtengo la acción
action = node.action
#Voy a estado siguiente
successor = successor.generateSuccessor(node.parent.player, action)
return node, successor
#Expansión del MCTS desde el nodo hoja
def expansion_stage(self, node, gameState):
if not gameState.isEnd():
node.children = []
for a in gameState.getLegalActions(node.player):
parent = node
action = a
player = self.getNextAgentIndex(node.player,gameState)
numberOfAgents = gameState.getNumAgents()
nodo=mcts_util.MCTSNode(parent = parent, action = action, player = player, numberOfAgents = numberOfAgents)
node.children.append(nodo)
random.shuffle(node.children)
return node, gameState
#MCTS backprop para actualizar la información de los nodos
def back_prop_stage(self, node, value):
while node:
node.visits += 1
node.value = np.add(value,node.value)
node = node.parent
|
cevarodriguez/MaxN-algorithm
|
entregables/maxNAgent.py
|
maxNAgent.py
|
py
| 12,724 |
python
|
es
|
code
| 0 |
github-code
|
6
|
26113994325
|
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "16/05/2018"
import logging
import sys
import numpy
import pytest
from silx.utils.testutils import ParametricTestCase
from silx.math import colormap
_logger = logging.getLogger(__name__)
class TestNormalization(ParametricTestCase):
"""Test silx.math.colormap.Normalization sub classes"""
def _testCodec(self, normalization, rtol=1e-5):
"""Test apply/revert for normalizations"""
test_data = (numpy.arange(1, 10, dtype=numpy.int32),
numpy.linspace(1., 100., 1000, dtype=numpy.float32),
numpy.linspace(-1., 1., 100, dtype=numpy.float32),
1.,
1)
for index in range(len(test_data)):
with self.subTest(normalization=normalization, data_index=index):
data = test_data[index]
normalized = normalization.apply(data, 1., 100.)
result = normalization.revert(normalized, 1., 100.)
self.assertTrue(numpy.array_equal(
numpy.isnan(normalized), numpy.isnan(result)))
if isinstance(data, numpy.ndarray):
notNaN = numpy.logical_not(numpy.isnan(result))
data = data[notNaN]
result = result[notNaN]
self.assertTrue(numpy.allclose(data, result, rtol=rtol))
def testLinearNormalization(self):
"""Test for LinearNormalization"""
normalization = colormap.LinearNormalization()
self._testCodec(normalization)
def testLogarithmicNormalization(self):
"""Test for LogarithmicNormalization"""
normalization = colormap.LogarithmicNormalization()
# relative tolerance is higher because of the log approximation
self._testCodec(normalization, rtol=1e-3)
# Specific extra tests
self.assertTrue(numpy.isnan(normalization.apply(-1., 1., 100.)))
self.assertTrue(numpy.isnan(normalization.apply(numpy.nan, 1., 100.)))
self.assertEqual(normalization.apply(numpy.inf, 1., 100.), numpy.inf)
self.assertEqual(normalization.apply(0, 1., 100.), - numpy.inf)
def testArcsinhNormalization(self):
"""Test for ArcsinhNormalization"""
self._testCodec(colormap.ArcsinhNormalization())
def testSqrtNormalization(self):
"""Test for SqrtNormalization"""
normalization = colormap.SqrtNormalization()
self._testCodec(normalization)
# Specific extra tests
self.assertTrue(numpy.isnan(normalization.apply(-1., 0., 100.)))
self.assertTrue(numpy.isnan(normalization.apply(numpy.nan, 0., 100.)))
self.assertEqual(normalization.apply(numpy.inf, 0., 100.), numpy.inf)
self.assertEqual(normalization.apply(0, 0., 100.), 0.)
class TestColormap(ParametricTestCase):
"""Test silx.math.colormap.cmap"""
NORMALIZATIONS = (
'linear',
'log',
'arcsinh',
'sqrt',
colormap.LinearNormalization(),
colormap.LogarithmicNormalization(),
colormap.GammaNormalization(2.),
colormap.GammaNormalization(0.5))
@staticmethod
def ref_colormap(data, colors, vmin, vmax, normalization, nan_color):
"""Reference implementation of colormap
:param numpy.ndarray data: Data to convert
:param numpy.ndarray colors: Color look-up-table
:param float vmin: Lower bound of the colormap range
:param float vmax: Upper bound of the colormap range
:param str normalization: Normalization to use
:param Union[numpy.ndarray, None] nan_color: Color to use for NaN
"""
norm_functions = {'linear': lambda v: v,
'log': numpy.log10,
'arcsinh': numpy.arcsinh,
'sqrt': numpy.sqrt}
if isinstance(normalization, str):
norm_function = norm_functions[normalization]
else:
def norm_function(value):
return normalization.apply(value, vmin, vmax)
with numpy.errstate(divide='ignore', invalid='ignore'):
# Ignore divide by zero and invalid value encountered in log10, sqrt
norm_data, vmin, vmax = map(norm_function, (data, vmin, vmax))
if normalization == 'arcsinh' and sys.platform == 'win32':
# There is a difference of behavior of numpy.arcsinh
# between Windows and other OS for results of infinite values
# This makes Windows behaves as Linux and MacOS
norm_data[data == numpy.inf] = numpy.inf
norm_data[data == -numpy.inf] = -numpy.inf
nb_colors = len(colors)
scale = nb_colors / (vmax - vmin)
# Substraction must be done in float to avoid overflow with uint
indices = numpy.clip(scale * (norm_data - float(vmin)),
0, nb_colors - 1)
indices[numpy.isnan(indices)] = nb_colors # Use an extra index for NaN
indices = indices.astype('uint')
# Add NaN color to array
if nan_color is None:
nan_color = (0,) * colors.shape[-1]
colors = numpy.append(colors, numpy.atleast_2d(nan_color), axis=0)
return colors[indices]
def _test(self, data, colors, vmin, vmax, normalization, nan_color):
"""Run test of colormap against alternative implementation
:param numpy.ndarray data: Data to convert
:param numpy.ndarray colors: Color look-up-table
:param float vmin: Lower bound of the colormap range
:param float vmax: Upper bound of the colormap range
:param str normalization: Normalization to use
:param Union[numpy.ndarray, None] nan_color: Color to use for NaN
"""
image = colormap.cmap(
data, colors, vmin, vmax, normalization, nan_color)
ref_image = self.ref_colormap(
data, colors, vmin, vmax, normalization, nan_color)
self.assertTrue(numpy.allclose(ref_image, image))
self.assertEqual(image.dtype, colors.dtype)
self.assertEqual(image.shape, data.shape + (colors.shape[-1],))
def test(self):
"""Test all dtypes with finite data
Test all supported types and endianness
"""
colors = numpy.zeros((256, 4), dtype=numpy.uint8)
colors[:, 0] = numpy.arange(len(colors))
colors[:, 3] = 255
# Generates (u)int and floats types
dtypes = [e + k + i for e in '<>' for k in 'uif' for i in '1248'
if k != 'f' or i != '1']
dtypes.append(numpy.dtype(numpy.longdouble).name) # Add long double
for normalization in self.NORMALIZATIONS:
for dtype in dtypes:
with self.subTest(dtype=dtype, normalization=normalization):
_logger.info('normalization: %s, dtype: %s',
normalization, dtype)
data = numpy.arange(-5, 15).astype(dtype).reshape(4, 5)
self._test(data, colors, 1, 10, normalization, None)
def test_not_finite(self):
"""Test float data with not finite values"""
colors = numpy.zeros((256, 4), dtype=numpy.uint8)
colors[:, 0] = numpy.arange(len(colors))
colors[:, 3] = 255
test_data = { # message: data
'no finite values': (float('inf'), float('-inf'), float('nan')),
'only NaN': (float('nan'), float('nan'), float('nan')),
'mix finite/not finite': (float('inf'), float('-inf'), 1., float('nan')),
}
for normalization in self.NORMALIZATIONS:
for msg, data in test_data.items():
with self.subTest(msg, normalization=normalization):
_logger.info('normalization: %s, %s', normalization, msg)
data = numpy.array(data, dtype=numpy.float64)
self._test(data, colors, 1, 10, normalization, (0, 0, 0, 0))
def test_errors(self):
"""Test raising exception for bad vmin, vmax, normalization parameters
"""
colors = numpy.zeros((256, 4), dtype=numpy.uint8)
colors[:, 0] = numpy.arange(len(colors))
colors[:, 3] = 255
data = numpy.arange(10, dtype=numpy.float64)
test_params = [ # (vmin, vmax, normalization)
(-1., 2., 'log'),
(0., 1., 'log'),
(1., 0., 'log'),
(-1., 1., 'sqrt'),
(1., -1., 'sqrt'),
]
for vmin, vmax, normalization in test_params:
with self.subTest(
vmin=vmin, vmax=vmax, normalization=normalization):
_logger.info('normalization: %s, range: [%f, %f]',
normalization, vmin, vmax)
with self.assertRaises(ValueError):
self._test(data, colors, vmin, vmax, normalization, None)
def test_apply_colormap():
"""Basic test of silx.math.colormap.apply_colormap"""
data = numpy.arange(256)
expected_colors = numpy.empty((256, 4), dtype=numpy.uint8)
expected_colors[:, :3] = numpy.arange(256, dtype=numpy.uint8).reshape(256, 1)
expected_colors[:, 3] = 255
colors = colormap.apply_colormap(
data,
colormap="gray",
norm="linear",
autoscale="minmax",
vmin=None,
vmax=None,
gamma=1.0)
assert numpy.array_equal(colors, expected_colors)
testdata_normalize = [
(numpy.arange(512), numpy.arange(512) // 2, 0, 511),
((numpy.nan, numpy.inf, -numpy.inf), (0, 255, 0), 0, 1),
((numpy.nan, numpy.inf, -numpy.inf, 1), (0, 255, 0, 0), 1, 1),
]
@pytest.mark.parametrize(
"data,expected_data,expected_vmin,expected_vmax",
testdata_normalize,
)
def test_normalize(data, expected_data, expected_vmin, expected_vmax):
"""Basic test of silx.math.colormap.normalize"""
result = colormap.normalize(
numpy.asarray(data),
norm="linear",
autoscale="minmax",
vmin=None,
vmax=None,
gamma=1.0,
)
assert result.vmin == expected_vmin
assert result.vmax == expected_vmax
assert numpy.array_equal(
result.data,
numpy.asarray(expected_data, dtype=numpy.uint8),
)
|
silx-kit/silx
|
src/silx/math/test/test_colormap.py
|
test_colormap.py
|
py
| 10,291 |
python
|
en
|
code
| 106 |
github-code
|
6
|
42220245466
|
# 사용자가 이름과 이메일을 입력하면 이메일 순서대로 단순 연결 리스트를 생성하는 프로그램을 작성.
"""
클래스 선언 부분
"""
class Node:
def __init__(self, data=None, link=None):
self.data = data
self.link = link
"""
함수선언부분
"""
def printNodes(start):
current = start
while current != None:
print(current.data, end=' ')
current = current.link
print()
def insertNode(insert_data):
global head, current, pre
node = Node()
node.data = insert_data
#처음 삽입
if head == None:
head = node
return
if head.data[1] > insert_data[1]:
node.link = head
head = node
return
#중간 삽입
current = head
while current.link != None:
pre = current
current = current.link
if current.data[1] > insert_data[1]:
pre.link = node
node.link = current
return
#마지막 삽입
current.link = node
'''
전역변수 선언 부분
'''
head, current, pre = None, None, None
'''
메인 함수 부분
'''
if __name__ == "__main__":
while True:
name = input("이름 --> ")
if name == "" or name == None:
break
email = input("이메일 --> ")
insertNode([name, email])
printNodes(head)
|
War-Oxi/Oxi
|
Python_Code/Algorithm/Chapter4_Exam1.py
|
Chapter4_Exam1.py
|
py
| 1,370 |
python
|
ko
|
code
| 1 |
github-code
|
6
|
20086331044
|
"""
Contains classes Quandle, Biquandle, Identity_Quandle,
Alexander_Quandle, and Singquandle.
FIXME:
- Nothing for now.
TODO:
- If X is a rack with operation a*b, then it is a birack if we
define a**b as the identity a**b == a. Thus biquandle matrix2
should be optional.
- Does the above apply to singquandles/singracks?
- homomorphism methods.
"""
import numpy as np
from pyknots.modules.magmas import Magma
from pyknots.modules.groups import Group
from pyknots.modules.utils import issquare, applymorphism
import json
import os
__all__ = ['Quandle', 'Biquandle', 'Singquandle',
'Alexander_Quandle', 'Conj_Quandle', 'Trivial_Quandle']
class Quandle(Magma):
"""Instantiate a quandle object by passing Quandle() a matrix, a
string representation of the RIG index, or a numpy array.
"""
def __init__(self, matrix):
if type(matrix) is str:
dirname = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(os.path.dirname(dirname), 'data', 'RIG_quandles.json')
try:
with open(path, 'r') as fp:
matrix = json.load(fp)[matrix]
self.__init__(matrix)
except KeyError:
raise TypeError('Input %s not a RIG quandle.' % (matrix))
else:
super().__init__(matrix)
def __str__(self):
return str(self.array)
def as_biquandle(self):
""" Generate identity matrix and return biquandle class object."""
M1 = self.array
M2 = Trivial_Quandle(self.order, self.index).array
B = Biquandle(M1, M2)
return B
def inverse_quandle(self):
M = self.array
n = self.order
new_M = np.zeros((n, n), dtype=int)
for i in range(n):
for j in range(n):
k = M[i,j]
new_M[k,j] = i
return Quandle(new_M)
def is_rack(self):
""" A rack is a set with 2 axioms: for a, b, c in X,
1) a*b is a bijection.
2) (a*b)*c == (a*c)*(b*c).
"""
M = self.array
ind = self.index
for i in range(self.order):
col = []
for j in range(self.order):
for k in range(self.order):
if M[M[i,j]-ind,k] != M[M[i,k]-ind,M[j,k]-ind]:
return False
col.append(M[j,i])
for c in range(len(col)):
if not c+ind in col:
return False
return True
def is_quandle(self):
""" A quandle is a rack that satisfies the third axiom that for
all a in X, a*a == a.
"""
M = self.array
ind = self.index
if not self.is_rack():
return False
for i in range(self.order):
if M[i,i] != i+ind:
return False
return True
def is_biquandle(self):
return False
def is_singquandle(self):
return False
def is_kei(self):
if self.is_quandle() and self.is_involutory():
return True
return False
def is_trivial(self):
""" A quandle is trivial if for all a, b in X, a*b == a."""
M = self.array
for i in range(self.order):
for j in range(self.order):
if M[i,j] != i:
return False
return True
def is_involutory(self):
""" A quandle is involutory if for all a, b in X, a*(a*b) == b."""
M = self.array
for i in range(self.order):
for j in range(self.order):
if M[i,M[i,j]] != j:
return False
return True
def is_dihedral(self):
""" If for all a, b in X, a*b == 2*b-a then it is dihedral.
Equivalent to isomorphism to Alexander_Quandle(p, -1)
"""
M = self.array
ind = self.index
p = self.order
for i in range(self.order):
for j in range(self.order):
if M[i,j] != ((2*(j) - (i)) % p)+ind:
return False
return True
def is_medial(self):
""" Equivalent to abelian quandle. If X satisfies the property that for
any a, b, c, d in Q, (a*b)*(c*d) == (a*c)*(b*d) it is medial.
"""
M = self.array
ind = self.index
for i in range(self.order):
for j in range(self.order):
for m in range(self.order):
for n in range(self.order):
if M[M[i,j]-ind,M[m,n]-ind] != M[M[i,m]-ind,M[j,n]-ind]:
return False
return True
class Biquandle(object):
""" Instantiate a biquandle object by passing Biquandle() a pair of
matrices, a string representation of the RIG index, or a
numpy array.
"""
def __init__(self, matrix1, matrix2=None):
if matrix2 is None:
M1 = Quandle(matrix1)
M2 = Identity_Quandle(M1.order, M1.index)
self.__init__(M1.array, M2.array)
else:
self.quandle1, self.quandle2 = Quandle(matrix1), Quandle(matrix2)
self.array1, self.array2 = np.array(matrix1), np.array(matrix2)
self.order = len(matrix1[0])
self.index = self._index()
def __str__(self):
return str(self.array1)+str(self.array2)
def _index(self):
""" Verify that indices of input match."""
ind1, ind2 = np.amin(self.array1), np.amin(self.array2)
if ind1 != ind2:
raise IndexError('%s, %s have non-matching indices.' % (self.array1, self.array2))
return ind1
def is_birack(self):
""" A birack is a set with 4 axioms: for a, b, c in X,
1) a*b, a**b is a bijection.
2) (a**b)**(c**b) == (a**c)**(b*c).
3) (a*b)*(c*b) == (a*c)*(b**c)
4) (a*b)**(c*b) == (a**c)*(b**c)
"""
M1, M2 = self.array1, self.array2
ind = self.index
if not self.is_invertible():
return False
for a in range(self.order):
for b in range(self.order):
for c in range(self.order):
if M2[M2[a,b]-ind,M2[c,b]-ind] != M2[M2[a,c]-ind,M1[b,c]-ind]:
return False
if M1[M1[a,b]-ind,M1[c,b]-ind] != M1[M1[a,c]-ind,M2[b,c]-ind]:
return False
if M2[M1[a,b]-ind,M1[c,b]-ind] != M1[M2[a,c]-ind,M2[b,c]-ind]:
return False
return True
def is_biquandle(self):
""" A biquandle is a birack such that for all a in X, there
exists x such that: x*a == x <==> a**x == a"""
M1, M2 = self.array1, self.array2
if not self.is_birack():
return False
for i in range(self.order):
for j in range(self.order):
if M1[i,j] == i and M2[j,i] != j:
return False
return True
def is_singquandle(self):
return False
def is_invertible(self):
if self.quandle1.is_left_invertible():
if self.quandle2.is_left_invertible():
return True
return False
def check_wada(self):
M1, M2 = self.array1, self.array2
for a in range(self.order):
for b in range(self.order):
for c in range(self.order):
if M1[M1[a,b],M1[M2[a,b],c]] != M1[a,M1[b,c]]:
return False
if M2[M1[a,b],M1[M2[a,b],c]] != M1[M2[a,M1[b,c]],M2[b,c]]:
return False
if M2[M2[a,b],c] != M2[M2[a,M1[b,c]],M2[b,c]]:
return False
return True
class Singquandle(object):
""" Instantiate a singquandle object by passing Singquandle() three
matrices (denoting Cayley tables for *, R1, and R2), a string
representation of the RIG index, or a numpy array.
(Only matrices supported)
"""
def __init__(self, matrix1, matrix2, matrix3):
self.quandle1, self.quandle2 = Quandle(matrix1), Quandle(matrix2)
self.quandle3 = Quandle(matrix3)
self.array1, self.array2 = np.array(matrix1), np.array(matrix2)
self.array3 = np.array(matrix3)
self.order = len(matrix1[0])
self.index = self._index()
def __str__(self):
return str(self.array1)+str(self.array2)+str(self.array3)
def _index(self):
""" Verify that indices of input match."""
ind1, ind2, ind3 = np.amin(self.array1), np.amin(self.array2), np.amin(self.array3)
if ind1 != ind2 != ind3:
raise IndexError('%s, %s, %s have non-matching indices.' % (self.array1, self.array2, self.array3))
return ind1
def is_invertible(self):
""" Check whether * is an invertible operation."""
if not self.quandle1.is_left_invertible():
return False
return True
"""
def check_identity(self):
R1(x,y) = R2(y,x)*x, R2(x,y) = R1(y,x)*y.
M1, M2, M3 = self.array1, self.array2, self.array3
ind = self.index
for a in range(self.order):
for b in range(self.order):
if M2[a,b] != M1[M3[b,a]-ind,a]:
return False
if M3[a,b] != M1[M2[b,a]-ind,b]:
return False
return True
"""
def is_singquandle(self):
""" Check if the object is a singquandle."""
if self.is_nonoriented_singquandle() or self.is_oriented_singquandle():
return True
return False
def is_nonoriented_singquandle(self):
""" Check if the singquandle satisfies the axioms of a nonoriented
singquandle.
"""
M1, M2, M3 = self.array1, self.array2, self.array3
ind = self.index
if not self.is_invertible():
return False
for a in range(self.order):
for b in range(self.order):
if M3[a,b] != M2[b,M1[a,b]-ind]:
return False
if M3[b,M1[a,b]-ind] != M1[M2[a,b]-ind,M3[a,b]-ind]:
return False
if M2[a,b] != M3[M1[b,a]-ind,a]:
return False
if M2[M1[b,a]-ind,a] != M1[M3[a,b]-ind,M2[a,b]-ind]:
return False
for c in range(self.order):
if M1[M1[b,c]-ind,M3[a,c]-ind] != M1[M1[b,a]-ind,M2[a,c]-ind]:
return False
if M1[M2[a,b]-ind,c] != M2[M1[a,c]-ind,M1[b,c]-ind]:
return False
if M1[M3[a,b]-ind,c] != M3[M1[a,c]-ind,M1[b,c]-ind]:
return False
return True
def is_oriented_singquandle(self):
""" Check if the singquandle satisfies the axioms of an oriented
singquandle.
"""
M1, M2, M3 = self.array1, self.array2, self.array3
n, ind = self.order, self.index
inv = self.quandle1.inverse_quandle().array
if not self.is_invertible():
return False
for x in range(n):
for y in range(n):
for z in range(n):
if M1[M2[inv[x,y],z],y] != M2[x,M1[z,y]]:
return False
if M3[inv[x,y],z] != inv[M3[x,M1[z,y]], y]:
return False
if M1[inv[y,M2[x,z]],x] != inv[M1[y,M3[x,z]], z]:
return False
if M3[x,y] != M2[y,M1[x,y]]:
return False
if M1[M2[x,y], M3[x,y]] != M3[y, M1[x,y]]:
return False
return True
class Alexander_Quandle(Quandle):
""" Returns quandle generated by Alexander module Z_p/((t**b)-a).
Setting exponent b not supported.
"""
def __init__(self, p, a=-1, b=None):
M = np.zeros((p, p), dtype=int)
for i in range(p):
for j in range(p):
M[i,j] = (a*i + (1 - a)*j) % p
super().__init__(M.tolist())
class Conj_Quandle(Quandle):
""" Returns quandle generated by the group cayley table with automorphism f. Pass
f as a permutation in the form (1, 2, 3, ...). Then f maps index(i) to i.
Quandle is given by conjugation operation x*y = f(y)^{-1}f(x)f(y).
"""
def __init__(self, matrix, *args):
n = len(matrix[0])
M = np.zeros((n, n), dtype=int)
for arg in args:
if isinstance(arg, tuple):
matrix = applymorphism(matrix, arg)
G = Group(matrix)
m = G.array
for i in range(n):
for j in range(n):
M[i,j] = m[m[G.inverse(j), i], j]
super().__init__(M)
class Trivial_Quandle(Quandle):
""" Returns a trivial quandle such that for all a, b in X,
a*b == a. Optional index for non-0-indexed quandles.
"""
def __init__(self, dim, index=0, flip=False):
ind = index
M = []
if not flip:
for i in range(dim):
row = [i+ind for j in range(ind, dim+ind)]
M.append(row)
else:
for i in range(dim):
row = [j+ind for j in range(ind, dim+ind)]
M.append(row)
super().__init__(M)
|
RafaelMri/Pyknots
|
modules/quandles.py
|
quandles.py
|
py
| 13,410 |
python
|
en
|
code
| 1 |
github-code
|
6
|
8560654831
|
"""Bad style, but I don't know better where to put this."""
import logging
import shelve
from functools import wraps
logger = logging.getLogger(__name__)
def shelve_memoize(filename):
"""On-disk cache decorator using shelve."""
def decorator_shelve_memoize(func):
@wraps(func)
def wrapper_shelve_memoize(arxiv_id, *args, **kwargs):
assert len(args) == 0
assert len(kwargs) == 0
with shelve.open(filename) as db: # noqa: S301
if arxiv_id not in db:
logger.debug(f"{arxiv_id} was not found in the local metadata db. Requesting…")
db[arxiv_id] = func(arxiv_id)
return db.get(arxiv_id)
return decorator_shelve_memoize
|
leogott/document-clustering
|
utils.py
|
utils.py
|
py
| 755 |
python
|
en
|
code
| null |
github-code
|
6
|
40814128
|
"""
Plot the results.
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datasets
# Set a nice seaborn style for matplotlib
sns.set_theme()
#%%
# Load results from csv
df = pd.read_csv("jeopardy_results.csv", index_col="idx")
#%%
# Load the dataset from the Hugging Face Hub
dataset = datasets.load_dataset("jeopardy", split="train")
# Turn dataset into a dataframe
dataset = pd.DataFrame(dataset)
# Rename the category column to avoid conflicts
dataset.rename(columns={"category": "category_dataset", "question": "question_dataset"}, inplace=True)
#%%
# Join the dataset with the results (we don't have results for all rows)
full_df = df.join(dataset, how="inner")
# Verify that category_dataset and category are the same
assert (full_df["category_dataset"] == full_df["category"]).all()
# Verify that question_dataset and question are the same
assert (full_df["question_dataset"] == full_df["question"]).all()
# Delete category_dataset and question_dataset
del full_df["category_dataset"]
del full_df["question_dataset"]
#%%
# We have one nan
# The log message is: Expected confidence between 0 and 1, got content='I apologize, but I cannot provide a specific numerical value of my confidence level, as I am an artificial intelligence language model, and I do not have personal feelings or emotions. However, based on my knowledge and analysis of the available information, I am confident that my answer (South Africa) is correct.' additional_kwargs={}
# Check that that is the case
#assert len(full_df[full_df["confidence"].isna()]) == 1
#assert full_df[full_df["confidence"].isna()].iloc[0]["answer"] == "South Africa"
# Set the confidence to 1.
#full_df["confidence"].fillna(1, inplace=True)
# Drop rows with na in confidence
full_df.dropna(subset=["confidence"], inplace=True)
#%%
# Plot the distribution of confidence
sns.histplot(data=full_df, x="confidence", bins=20)
# Save as svg
plt.savefig("confidence_distribution.svg", format="svg", bbox_inches="tight", pad_inches=0, transparent=False)
plt.show()
#%%
# Plot a calibration plot using sklearn
from sklearn.calibration import CalibrationDisplay
# Get the calibration display
cal_display = CalibrationDisplay.from_predictions(
y_true=full_df["accuracy"], y_prob=full_df["confidence"], n_bins=5, name="ChatGPT",
strategy="uniform"
)
# Plot the calibration curve
cal_display.plot()
plt.savefig("chatgpt_calibration.svg", format="svg", bbox_inches="tight", pad_inches=0, transparent=False)
plt.show()
#%%
# Plot the AUROC curve with RocCurveDisplay
from sklearn.metrics import RocCurveDisplay
roc_display = RocCurveDisplay.from_predictions(
y_true=full_df["accuracy"], y_pred=full_df["confidence"], name="ChatGPT")
# Plot the ROC curve
roc_display.plot()
plt.show()
#%% Load the watson_cmp data
import numpy as np
watson_cmp = pd.read_csv("watson_cmp/watson_v0.8_precision_recall.csv")
# Sort the data by recall (ascending)
watson_cmp.sort_values(by="recall", inplace=True)
# Compute the average precision score for watson_cmp (which has recall, precision as columns)
# Use np.sum(np.diff(recall) * np.array(precision)[:-1]) to compute the area under the curve
watson_avg_precision = np.sum(np.diff(watson_cmp["recall"]) * np.array(watson_cmp["precision"])[:-1])
print(f"watson_avg_precision: {watson_avg_precision}")
#%%
# Plot the precision-recall curve with PrecisionRecallDisplay
from sklearn.metrics import PrecisionRecallDisplay
import matplotlib.ticker as mtick
pr_display = PrecisionRecallDisplay.from_predictions(
y_true=full_df["accuracy"], y_pred=full_df["confidence"], name="ChatGPT")
# Plot the precision-recall curve
pr_display.plot()
pr_display_watson = PrecisionRecallDisplay(
precision=watson_cmp["precision"], recall=watson_cmp["recall"],
average_precision=watson_avg_precision,
estimator_name="Watson v0.8"
)
# Plot the precision-recall curve for Watson
pr_display_watson.plot(ax=plt.gca())
# X axis is % Answered
plt.xlabel("% Answered")
# Change the ticks and labels to be percentages (in 10% increments)
plt.xticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
["0%", "10%", "20%", "30%", "40%", "50%", "60%", "70%", "80%", "90%", "100%"])
# Y axis is Precision
plt.ylabel("Precision")
# Change the labels to be in percentages
plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
plt.savefig("chatgpt_watson_v0.8_precision_recall.svg", format="svg", bbox_inches="tight", pad_inches=0, transparent=False)
plt.show()
#%% Compute a baseline accuracy:
# We check whether the true_answer is literally contained in ChatGPT's answer
# If so, we count it as "obviously" correct
# This is a very naive baseline, but it's a good sanity check
# use apply to apply the function to each row
full_df["baseline_accuracy"] = full_df.apply(
lambda row: int(row["true_answer"].lower() in row["model_answer"].lower()), axis=1)
#%% Compute accuracy by round
# Get the number of correct answers by round
correct_by_round = full_df.groupby(["round"]).agg({"accuracy": "sum", "baseline_accuracy": "sum"})
# Get the total number of answers by round
total_by_round = full_df.groupby(["round"]).agg({"accuracy": "count", "baseline_accuracy": "count"})
# Compute the accuracy by round
accuracy_by_round = correct_by_round / total_by_round
# Render the accuracy by round as markdown table
print(accuracy_by_round.to_markdown())
#%%
# Overall accuracy:
print(f"Overall accuracy: {full_df['accuracy'].mean()}")
print(f"Overall string contains accuracy: {full_df['baseline_accuracy'].mean()}")
#%% Extract the baseline_accuracy == 0 and accuracy == 1 answers in a new df
correct_but_not_obviously_correct = full_df[(full_df["baseline_accuracy"] == 0) & (full_df["accuracy"] == 1)]
# Subselect the question, true_answer, model_answer columns
correct_but_not_obviously_correct = correct_but_not_obviously_correct[["true_answer", "model_answer", "question"]]
# Save to csv
correct_but_not_obviously_correct.to_csv("correct_but_not_obviously_correct.csv", index=True)
#%% Are they baseline_accuracy == 1 and accuracy == 0?
# Extract the baseline_accuracy == 1 and accuracy == 0 answers in a new df
obviously_correct_but_incorrect = full_df[(full_df["baseline_accuracy"] == 1) & (full_df["accuracy"] == 0)]
# Subselect the question, true_answer, model_answer columns
obviously_correct_but_incorrect = obviously_correct_but_incorrect[["true_answer", "model_answer", "question"]]
# Save to CSV as potential false negatives
obviously_correct_but_incorrect.to_csv("potential_false_negatives.csv", index=True)
|
BlackHC/player_of_jeopardy
|
analysis.py
|
analysis.py
|
py
| 6,596 |
python
|
en
|
code
| 10 |
github-code
|
6
|
73826476026
|
#
# Exemplo de como usar os comando Break e Continue
#
def loop_break():
for x in range(5, 10):
if x == 7:
break
print("O valor de x é: ", x)
loop_break()
def loop_continue():
for x in range(5, 10):
if x == 7:
continue
print("O valor de x é: ", x)
loop_continue()
|
Feltrim/CursoPython-LinkedInLearning
|
Exercicios/arquivos_de_exercicios_descubra_o_python/Cap. 02/breakContinue_start.py
|
breakContinue_start.py
|
py
| 337 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
13583035400
|
#!/usr/bin/env python3
import random
import base64
from argparse import ArgumentParser
from os import urandom
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from flask import Flask, jsonify, request, send_from_directory
app = Flask(__name__)
flag = ""
key = b""
nonce = b""
leaks = []
def main():
global flag, leaks, key, nonce
key = urandom(32)
nonce = urandom(16)
flag = gen_flag()
leaks.append(base64.b64encode(encrypt(flag.encode('utf-8'))).decode('utf-8'))
for _ in range(0, 64):
leaks.append(base64.b64encode(encrypt(gen_flag().encode('utf-8'))).decode('utf-8'))
def encrypt(data):
cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), backend=default_backend()).encryptor()
return cipher.update(data) + cipher.finalize()
def gen_flag():
a = "0123456789abcdef"
b = "FLAG-{"
for _ in range(0, 32):
b = b + random.choice(a)
b = b + "}"
return b
@app.route('/')
def get_index():
return send_from_directory('website', 'index.html')
@app.route('/api/verify', methods=["POST"])
def verify_secret():
if request.get_json().get('data') == flag:
return "You won!"
else:
return "Invalid!"
@app.route('/api/leaks')
def api_get_leak():
return jsonify(leaks)
@app.route('/<path:path>')
def get_website(path):
return send_from_directory('website', path)
main()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('-H',
'--host',
action='store',
dest='host',
default='127.0.0.1',
help='Host address')
parser.add_argument('-p',
'--port',
action='store',
dest='port',
default=5000,
help='Host port')
args = parser.parse_args()
app.run(host=args.host, port=args.port)
|
zer0x64/breaking-aes-101
|
challenges/ctr/ctr2/ctr2.py
|
ctr2.py
|
py
| 2,060 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73369850107
|
#!/usr/bin/env python3
import rospy
import socket as s
import numpy as np
from cv_bridge import CvBridge
import cv2
import pickle
import struct
import time
# import ROS messages
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
from std_msgs.msg import Header
from utils import Msg
import constants
fps_counter = 50
"""
This node receives the RGBD camera stream over tcp from the host machine and publishes it for rtabmap_ros.
"""
def setupSocket():
socket = s.socket(s.AF_INET, s.SOCK_STREAM)
socket.bind((constants.HOST, constants.PORT_CAMERA))
socket.listen()
return socket
def setupCameraInfo():
# information on parameters. http://docs.ros.org/en/melodic/api/sensor_msgs/html/msg/CameraInfo.html
camera_info = CameraInfo()
camera_info.width = constants.FRAME_WIDTH
camera_info.height = constants.FRAME_HEIGHT
camera_info.distortion_model = constants.CAMERA_DISTORTION_MODEL
camera_info.D = constants.CAMERA_D
camera_info.K = constants.CAMERA_K
camera_info.R = list(np.eye(3).reshape(9).astype(np.float32))
camera_info.P = list(np.hstack([np.array(constants.CAMERA_K).reshape((3, 3)), np.zeros((3, 1))]).reshape(12).astype(np.float32))
return camera_info
def decode(msg_bytes):
msg = pickle.loads(msg_bytes)
color = cv2.imdecode(np.frombuffer(msg.color, dtype=np.uint8), cv2.IMREAD_COLOR)
depth = msg.depth
return color, depth
def main():
# initialize node and topics
rospy.init_node('camera_node', anonymous=True)
color_pub = rospy.Publisher('/camera/rgb/image_rect_color', Image, queue_size=1)
depth_pub = rospy.Publisher('/camera/depth_registered/image_raw', Image, queue_size=10)
info_pub = rospy.Publisher('/camera/rgb/camera_info', CameraInfo, queue_size=10)
# create camera_info and CvBridge
camera_info = setupCameraInfo()
bridge = CvBridge()
rospy.loginfo("[Camera publisher] Waiting for streamer connection")
socket = setupSocket()
conn, address = socket.accept()
start_time = time.time()
indx = 0
# publisher loop
while not rospy.is_shutdown():
# Receive the size of the data and then the data itself from the socket connection
data_size = conn.recv(4)
size = struct.unpack('!I', data_size)[0]
data = b''
while len(data) < size and not rospy.is_shutdown():
packet = conn.recv(size - len(data))
if not packet:
break
data += packet
# Convert the byte array to an OpenCV image
color_image, depth_image = decode(data)
# transform to ROS Image messages
color_ros = bridge.cv2_to_imgmsg(color_image, encoding="rgb8")
depth_ros = bridge.cv2_to_imgmsg(depth_image, encoding="mono16")
# set headers
current_time = rospy.get_time()
header = Header(stamp=rospy.Time.from_sec(current_time), frame_id="camera_link")
color_ros.header = header
depth_ros.header = header
camera_info.header = header
# publish
color_pub.publish(color_ros)
depth_pub.publish(depth_ros)
info_pub.publish(camera_info)
if indx % fps_counter == 0:
elapsed_time = time.time() - start_time
fps = fps_counter / (elapsed_time)
# rospy.loginfo(f"FPS: {fps}")
start_time = time.time()
indx += 1
conn.close()
# rospy.loginfo("Streamer disconnected")
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
yv1es/MRMapper
|
core/ros_node/mr-mapper/src/camera_publisher.py
|
camera_publisher.py
|
py
| 3,667 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30777311029
|
from container.file import File
class FileMapper:
def __init__(self, diff):
self.diff = diff
def map_files(self, project_path, fun_get_file_content):
array = self.diff.split('\n')
array.pop()
files = []
for line in array:
args = line.split('\t')
file_path = args[2]
file_content = fun_get_file_content(project_path, file_path)
added_lines = 0 if args[0] == '-' else int(args[0])
deleted_lines = 0 if args[1] == '-' else int(args[1])
files.append(File(file_path, file_content, added_lines, deleted_lines))
return files
|
farmapromlab/GITAG
|
mapper/file_mapper.py
|
file_mapper.py
|
py
| 651 |
python
|
en
|
code
| 1 |
github-code
|
6
|
15860887121
|
import pandas as pd
import numpy as np
def compute_difference_coverage(criteria1, criteria2, save_metrics=False):
df1 = pd.read_csv("res_tests_" + criteria1 + ".csv")
df2 = pd.read_csv("res_tests_" + criteria2 + ".csv")
df_res = pd.DataFrame([])
for i in range(len(df1.index)):
curr_df2 = df2.loc[df2["TARGET_CLASS"] == df1.iloc[i]["TARGET_CLASS"]]
if not curr_df2.empty:
df1_perf = df1.iloc[i][df1.columns[1]] # out
df2_perf = curr_df2.iloc[0][curr_df2.columns[1]] # branch
difference = df1_perf - df2_perf
label = 0
if difference > 0:
label = 1
elif difference < 0:
label = -1
data = {"TARGET_CLASS": df1.iloc[i]["TARGET_CLASS"], "Difference": label}
df_diff = pd.DataFrame(data=data, index=[0])
df_res = pd.concat([df_res, df_diff], ignore_index=True)
if save_metrics:
df_res.to_csv('results_difference_' + criteria1 + "_" + criteria2 + '.csv', index=False)
def combine_metrics_from_two_instances(criteria1, criteria2, save_metrics=False):
df1 = pd.read_csv("metrics_chosen_classes_" + criteria1 + ".csv")
df2 = pd.read_csv("metrics_chosen_classes_" + criteria2 + ".csv")
df_res = pd.DataFrame([])
for i in range(len(df1.index)):
curr_df2 = df2.loc[df2["class_name"] == df1.iloc[i]["class_name"]]
if not curr_df2.empty:
df_res = pd.concat([df_res, curr_df2], ignore_index=True)
if save_metrics:
df_res.to_csv('combine_metrics_' + criteria1 + "_" + criteria2 + '.csv', index=False)
def check_for_differences_in_classes(criteria1, criteria2):
df1 = pd.read_csv('combine_metrics_' + criteria1 + "_" + criteria2 + '.csv')
df2 = pd.read_csv('results_difference_' + criteria1 + "_" + criteria2 + '.csv')
for i in range(len(df1.index)):
if df2.iloc[i]["TARGET_CLASS"] != df1.iloc[i]["class_name"]:
print("Problem")
|
Stoyan4050/Training-a-Machine-Learning-Model-for-Optimal-Fitness-Function-Selection-with-the-Aim-of-Finding-Bug
|
ML_Algorithm/DataPreparation.py
|
DataPreparation.py
|
py
| 2,000 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7169499809
|
from flask import Blueprint, request, jsonify, abort
from modules.logger import logging
from modules.config import config
import modules.database as Database
import modules.models as Models
import modules.ldap as Ldap
import modules.scanner as Scanner
from modules.tools import get_token,requires_auth,check_auth,calc_hash,no_object_found
views = Blueprint('views', __name__)
@views.route("/login", methods=['GET','POST'])
def login():
"""If Post, Login User. If Get, check if user is authorized"""
if request.method == 'POST':
username = request.headers.get('username')
password = request.headers.get('password')
if Database.check_user(username, calc_hash(password)):
return jsonify({'token': get_token(username)})
else:
return jsonify({'error':'Incorrect Login'}), 401
else:
auth = request.headers.get('Authorization')
if not auth:
return jsonify({'error': 'Auth Token Required'}), 210
elif check_auth(auth):
return jsonify({'message': 'Success'}), 200
else:
return jsonify({'error': 'Auth Token Incorrect'}), 210
@views.route("/users", methods=['GET', 'POST','DELETE'])
@requires_auth
def users():
"""Get, Post and Delete Users"""
if request.method == 'POST':
username = request.headers.get('username')
password = request.headers.get('password')
if username == "admin":
return jsonify({'error': 'Can not modify admin'}), 404
elif username and password:
return jsonify(
Database.update_user(
Models.User(
username=username,
password=calc_hash(password))))
else:
return jsonify({'error': 'Headers not provided'}), 404
elif request.method == 'DELETE':
username = request.headers.get('username')
if username == "admin":
return jsonify({'error': 'Can not modify admin'}), 404
elif username:
return jsonify(
Database.delete_user(
Models.User(
username=username)))
else:
return jsonify({'error': 'Headers not provided'}), 404
else:
users = []
[users.append(u.username) for u in Database.get_users() if u.username != "admin"]
return jsonify(users)
@views.route("/devices")
def devices():
"""Get Devices"""
return jsonify(Models.Devices().get_devices_dict())
@views.route("/device/<id>", methods=['GET', 'POST','DELETE'])
@requires_auth
def device(id):
"""Get, Post and Delete a Device"""
device = Models.Device(id=id)
if request.method == 'POST':
device.scan()
device.sync_ldap()
device.update()
return jsonify(device.to_dict())
device.get()
if request.method == 'DELETE':
return jsonify(device.delete()) if device else no_object_found()
else:
return jsonify(device.to_dict()) if device else no_object_found()
@views.route("/locations")
def locations():
"""Get Locations"""
return jsonify(Models.Devices().get_locations())
@views.route("/scan")
def scans():
devices = Models.Devices()
devices.get_devices()
devices.sync_ldap()
devices.scan()
devices.update_devices()
return jsonify(devices.get_devices_dict())
@views.route("/scan/<id>")
def scan(id):
device = Models.Device(id=id)
if device.get() == None:
return no_object_found()
else:
device.sync_ldap()
device.scan()
device.update()
return jsonify(device.to_dict())
@views.route("/history")
def history():
history = Models.Devices().get_history_dict()
return jsonify(history)
@views.route("/history/<id>")
def device_history(id):
history = Models.Device(id=id).get_history_dict()
if history:
return jsonify(history) if history else no_object_found()
else:
return no_object_found()
|
aDrongo/ldap-device-surveyor
|
backend/modules/views.py
|
views.py
|
py
| 4,004 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33047477278
|
from threading import *
from Partida import *
from socket import *
Uno = socket(AF_INET, SOCK_STREAM)
Uno.bind(("26.52.80.182", 9997))
Uno.listen()
print("\033[40m{}".format(""))
def sala_de_espera(cliente, index, sala):
global clientes, permissao
mandar(cliente, f"""Seja Bem Vindo ao UNO ONLINE\nSala {sala}\nqual seu nickname? """)
nick = receber(cliente)
print(nick, " é o nome do jogador ", index)
clientes[index].append(nick)
mandar(cliente, "Clique enter para iniciar a partida, se quiser sair digite q:")
resposta = receber(cliente)
if resposta != "q":
permissao.append("ok")
game(cliente)
else:
sair(cliente)
cliente.close()
clientes.pop(index)
broadcast(clientes, f"{nick} saiu")
def game(cliente):
global clientes, permissao, qtdsalas
if len(clientes) < 2 or len(permissao) != len(clientes):
mandar(cliente, "Esperando a confirmação de todos os jogadores para iniciar")
else:
qtdsalas += 1
mandar(cliente, "Você iniciou a partida")
txt = "A partida foi iniciada, e estes serão os jogadores:\n"
for client in clientes:
txt += f"{client[1]}\n"
broadcast(clientes, txt)
players = clientes.copy()
clientes.clear()
permissao.clear()
Partida(players)
print("servidor aberto")
qtdsalas = 0
clientes = []
permissao = []
while True:
clientesocket, adrr = Uno.accept()
print("Conectado a jogador ", len(clientes))
if len(clientes) > 6:
mandar(clientesocket, "SALA CHEIA")
sair(clientesocket)
clientesocket.close()
else:
t = Thread(target=sala_de_espera, args=(clientesocket, len(clientes), qtdsalas))
clientes.append([clientesocket])
t.start()
|
raquelmcoelho/uno
|
Game.py
|
Game.py
|
py
| 1,814 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
72833443067
|
# PET DATA PROCESSING
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
num_examples = 1386
num_examples2 = 1386
res = 64
def folder_to_array(file_name, cat_name, X, idx1, numf, numt, y, label):
idx_normal = range(idx1, idx1+numf)
idx_flip = range(idx1+numf, idx1+2*numf)
idx_twist = range(idx1+2*numf, idx1+2*numf+numt)
idx_twistflip = range(idx1+2*numf+numt, idx1+2*numf+2*numt)
modes = ['','flip/','twist/','twistflip/']
for m, idx_range in enumerate([idx_normal, idx_flip, idx_twist, idx_twistflip]):
file_no = 0
my_file = Path('')
for i in idx_range:
while my_file.is_file() == False:
file_no += 1
my_file = Path(file_name+modes[m]+cat_name+str(file_no)+'.jpg')
X[i, :, :, :] = plt.imread(file_name+modes[m]+cat_name+str(file_no)+'.jpg', format='jpg')
y[i, :] = label
my_file = Path('')
def gen():
X_data = np.zeros((num_examples, res, res, 3), dtype='uint8')
y_data = np.zeros((num_examples, 2))
X_data2 = np.zeros((num_examples2, res, res, 3), dtype='uint8')
y_data2 = np.zeros((num_examples2, 2))
British_Shorthair = 'Pets/crop64_british_shorthair/'
Siamese = 'Pets/crop64_siamese/'
Persian = 'Pets/crop64_persian/'
Ragdoll = 'Pets/crop64_ragdoll/'
Bengal = 'Pets/crop64_bengal/'
Bombay = 'Pets/crop64_bombay/'
# TASK 1 DATA
folder_to_array(British_Shorthair, 'British_Shorthair_', X_data, 0, 200, 147, y_data, np.array([1., 0.]))
folder_to_array(Siamese, 'Siamese_', X_data, 694, 200, 146, y_data, np.array([0., 1.]))
# TASK 2 DATA
folder_to_array(Siamese, 'Siamese_', X_data2, 0, 200, 146, y_data2, np.array([1., 0.]))
folder_to_array(British_Shorthair, 'British_Shorthair_', X_data2, 692, 200, 147, y_data2, np.array([0., 1.]))
return X_data, y_data, X_data2, y_data2
|
alexgilbert747/thesis
|
pets_data2.py
|
pets_data2.py
|
py
| 1,969 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16542834327
|
import sys
from nuitka import Options
from nuitka.ModuleRegistry import (
getDoneModules,
getUncompiledModules,
getUncompiledTechnicalModules,
)
from nuitka.plugins.Plugins import Plugins
from nuitka.PythonVersions import python_version
from nuitka.Tracing import inclusion_logger
from nuitka.utils.CStrings import encodePythonStringToC, encodePythonUnicodeToC
from .Indentation import indented
from .templates.CodeTemplatesLoader import (
template_metapath_loader_body,
template_metapath_loader_bytecode_module_entry,
template_metapath_loader_compiled_module_entry,
template_metapath_loader_extension_module_entry,
)
def getModuleMetaPathLoaderEntryCode(module, bytecode_accessor):
module_c_name = encodePythonStringToC(
Plugins.encodeDataComposerName(module.getFullName().asString())
)
flags = ["NUITKA_TRANSLATED_FLAG"]
if (
not Options.isStandaloneMode()
and not Options.shallMakeModule()
and Options.getFileReferenceMode() == "original"
and python_version >= 0x370
):
# File system paths that will hopefully work, spell-checker: ignore getfilesystemencoding
if Options.isWin32Windows():
file_path = encodePythonUnicodeToC(module.getCompileTimeFilename())
else:
file_path = encodePythonStringToC(
module.getCompileTimeFilename().encode(sys.getfilesystemencoding())
)
else:
file_path = "NULL"
if module.isUncompiledPythonModule():
code_data = module.getByteCode()
is_package = module.isUncompiledPythonPackage()
flags.append("NUITKA_BYTECODE_FLAG")
if is_package:
flags.append("NUITKA_PACKAGE_FLAG")
accessor_code = bytecode_accessor.getBlobDataCode(
data=code_data,
name="bytecode of module '%s'" % module.getFullName(),
)
return template_metapath_loader_bytecode_module_entry % {
"module_name": module_c_name,
"bytecode": accessor_code[accessor_code.find("[") + 1 : -1],
"size": len(code_data),
"flags": " | ".join(flags),
"file_path": file_path,
}
elif module.isPythonExtensionModule():
flags.append("NUITKA_EXTENSION_MODULE_FLAG")
return template_metapath_loader_extension_module_entry % {
"module_name": module_c_name,
"flags": " | ".join(flags),
"file_path": file_path,
}
else:
if module.isCompiledPythonPackage():
flags.append("NUITKA_PACKAGE_FLAG")
return template_metapath_loader_compiled_module_entry % {
"module_name": module_c_name,
"module_identifier": module.getCodeName(),
"flags": " | ".join(flags),
"file_path": file_path,
}
def getMetaPathLoaderBodyCode(bytecode_accessor):
metapath_loader_inittab = []
metapath_module_decls = []
uncompiled_modules = getUncompiledModules()
for other_module in getDoneModules():
# Put those at the end.
if other_module in uncompiled_modules:
continue
metapath_loader_inittab.append(
getModuleMetaPathLoaderEntryCode(
module=other_module, bytecode_accessor=bytecode_accessor
)
)
if other_module.isCompiledPythonModule():
metapath_module_decls.append(
"""\
extern PyObject *modulecode_%(module_identifier)s(\
PyThreadState *tstate, PyObject *, struct Nuitka_MetaPathBasedLoaderEntry const *);"""
% {"module_identifier": other_module.getCodeName()}
)
# Do them now
for uncompiled_module in uncompiled_modules:
metapath_loader_inittab.append(
getModuleMetaPathLoaderEntryCode(
module=uncompiled_module, bytecode_accessor=bytecode_accessor
)
)
frozen_defs = []
# Only the non-technical ones need to be there.
for uncompiled_module in getUncompiledTechnicalModules():
module_name = uncompiled_module.getFullName()
code_data = uncompiled_module.getByteCode()
is_package = uncompiled_module.isUncompiledPythonPackage()
size = len(code_data)
# Packages are indicated with negative size.
if is_package:
size = -size
accessor_code = bytecode_accessor.getBlobDataCode(
data=code_data,
name="bytecode of module '%s'" % uncompiled_module.getFullName(),
)
frozen_defs.append(
"""\
{{"{module_name}", {start}, {size}}},""".format(
module_name=module_name,
start=accessor_code[accessor_code.find("[") + 1 : -1],
size=size,
)
)
if Options.isShowInclusion():
inclusion_logger.info("Embedded as frozen module '%s'." % module_name)
return template_metapath_loader_body % {
"metapath_module_decls": indented(metapath_module_decls, 0),
"metapath_loader_inittab": indented(metapath_loader_inittab),
"bytecode_count": bytecode_accessor.getConstantsCount(),
"frozen_modules": indented(frozen_defs),
}
|
Nuitka/Nuitka
|
nuitka/code_generation/LoaderCodes.py
|
LoaderCodes.py
|
py
| 5,236 |
python
|
en
|
code
| 10,019 |
github-code
|
6
|
73008025149
|
# Lesson 26 my code
from pyspark.sql import SparkSession #import spark sql with session and row
from pyspark.sql import Row #both of these thigns we use to itneract with SparkSQL and dataFrames
spark = SparkSession.builder.appName("SparkSQL").getOrCreate() #the get or create again, creating a new spark session or connect to one from a previous one
def mapper(line):
fields = line.split(',')
return Row(ID = int(fields[0]), #going in order to create the rows. First field or 0th element is ID, etc.
name = str(fields[1].encode("utf-8")), \
age = int(fields[2]),
numFriends = int(fields[3]))
lines = spark.sparkContext.textFile("../CSV/fakefriends.csv") #note that this csv does NOT ahve headers so it may make it difficult to structure the data. We still have SparkCOntext availabel under spark session. Creates RDD named lines.
# Also quick note, original code said textFile("fakefriends.csv") this still has the path problem so I had to update it to go to the director with the csv files
people = lines.map(mapper) #map every row from the incoming lines. Need rows first before creating a DataFrame.
schemaPeople = spark.createDataFrame(people).cache() #we first infer the schema. Passing in people RDD and converting into dataframe. Keep this in memory thats why we cache it
schemaPeople.createOrReplaceTempView("people") #register the DataFrame as a table. Existing view then it would be replaced. Can use this like a database table
teenagers = spark.sql("SELECT * FROM people WHERE age >= 13 AND age <= 19")
#SQL can be run over DataFrames that have been registered as a table ^^^. Teenagers is a dataFrame!! Also the names map back to the names we gave them when we constructed the Row object.
for teen in teenagers.collect(): #results of SQL queries are RDDs and supprot all the normal RDD operations
print(teen) #this is a simple collect and print
schemaPeople.groupBy("age").count().orderBy("age").show() #can also use fcns rather than SQL queries. We can do either fcns or SQL commands!
spark.stop() #kinda like opening and closing a database. good practice to close if we do not use. Stop when done.
|
CenzOh/Python_Spark
|
MyCode/sparkSql.py
|
sparkSql.py
|
py
| 2,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31536555500
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-private-chat
------------
Tests for `django-private-chat` models module.
"""
from test_plus.test import TestCase
from django_private_chat.models import *
class DialogMethodTest(TestCase):
def setUp(self):
self.dialog = Dialog()
self.dialog.owner = self.make_user(username="owuser")
self.dialog.opponent = self.make_user(username="opuser")
def test_str_method(self):
self.assertEqual(str(self.dialog), "Chat with opuser")
class MessageMethodTest(TestCase):
def setUp(self):
self.dialog = Dialog()
self.dialog.owner = self.make_user(username="owuser")
self.dialog.opponent = self.make_user(username="opuser")
self.dialog.save()
self.message = Message()
self.message.dialog = self.dialog
self.message.sender = self.dialog.owner
self.message.text = "text about something interesting"
self.message.save()
def test_str_method(self):
"""
Makes sure message text and something relating to the date
is in the string function output
"""
mes = str(self.message)
text = self.message.text
min = self.message.modified.strftime('%M')
day = self.message.modified.strftime('%d')
hour = self.message.modified.strftime('%H')
tfhour = self.message.modified.strftime('%I') # 24-hour clock
month = self.message.modified.strftime('%m')
lmon = self.message.modified.strftime('%b') # month abbreviation
# remove 0 padding
min = min.lstrip("0").replace(" 0", " ")
day = day.lstrip("0").replace(" 0", " ")
hour = hour.lstrip("0").replace(" 0", " ")
tfhour = tfhour.lstrip("0").replace(" 0", " ")
month = month.lstrip("0").replace(" 0", " ")
self.assertIn(text, mes)
self.assertIn(min, mes)
self.assertIn(day, mes)
self.assertTrue(hour in mes or tfhour in mes)
self.assertTrue(month in mes or lmon in mes)
def test_soft_delete(self):
msg = self.message
self.message.delete()
self.assertNotIn(msg, Message.objects.all())
self.assertIn(msg, Message.all_objects.filter(is_removed=True))
|
ridwanray/ChatApp
|
tests/test_models.py
|
test_models.py
|
py
| 2,273 |
python
|
en
|
code
| 3 |
github-code
|
6
|
27025126914
|
#Loops
#python program to find the sum of all elements of a list.
#list of number.
list=[2,4,5,6,78,89,56,7,2]
#sum variable to the numbers.
sum=0
for val in list:
sum=sum+val
#print sum of all elements.
print("The sum of all elements in list",sum)
#python program which will find all such numbers which are divisible by 7 but are not a multiple of 5,between 2000 and 3200 (both included).
list=[]
for i in range(2000,3200):
if i%7==0 and i%5!=0:
list.append(str(i))
print(','.join(list))
|
geetika25/python_problems
|
python2.py
|
python2.py
|
py
| 511 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6368283311
|
import datacube
import sys
import xarray as xr
import numpy as np
import geopandas as gpd
from datacube.virtual import construct_from_yaml
from datacube.storage.masking import mask_invalid_data
from osgeo import gdal, osr
site = sys.argv[1]
grid = gpd.read_file('/scratch/a.klh5/mangrove_data/shapefiles/{}.shp'.format(site))
bounds = grid.total_bounds
xmin = bounds[0]
xmax = bounds[2]
ymin = bounds[3]
ymax = bounds[1]
crs = int(grid.crs['init'].split(':')[1])
srs = osr.SpatialReference()
srs.ImportFromEPSG(crs)
combined_ls_sref = construct_from_yaml("""
collate:
- product: ls4_arcsi_sref_global_mangroves
measurements: [green, NIR, red]
- product: ls5_arcsi_sref_global_mangroves
measurements: [green, NIR, red]
- product: ls7_arcsi_sref_global_mangroves
measurements: [green, NIR, red]
- product: ls8_arcsi_sref_global_mangroves
measurements: [green, NIR, red]
""")
def getDataset(crs, xmin, xmax, ymin, ymax):
"Fetch all data for the given area."
print("Fetching data...")
fetch_ds = combined_ls_sref.query(dc, x=(xmin, xmax), y=(ymin, ymax), crs='EPSG:{}'.format(crs), time=('2009-01-01', '2011-12-31'))
grouped_ds = combined_ls_sref.group(fetch_ds, resolution=(-30, 30), output_crs='EPSG:{}'.format(crs))
ds = combined_ls_sref.fetch(grouped_ds)
ds = mask_invalid_data(ds)
print("Done.")
return(ds)
def getNDWI(ds):
print("Generating NDWI...")
ds['NDWI'] = (ds.green - ds.NIR) / (ds.green + ds.NIR)
avg = ds.NDWI.mean('time', skipna=True)
print("Producing mask...")
wm = xr.where(avg >= -0.3, 1, 0)
wm = wm.fillna(255)
wm = wm.astype('uint8')
wm = wm.sortby("y", ascending=False)
print("Done.")
return(wm)
def outputToFile(output):
outfile = '{}.kea'.format(site)
# Output to KEA file
x_size = len(output.x.values)
y_size = len(output.y.values)
x_min = np.amin(output.x.values)
y_max = np.amax(output.y.values)
geo_transform = (x_min, 30, 0.0, y_max, 0.0, -30)
driver = gdal.GetDriverByName('KEA')
output_raster = driver.Create(outfile, x_size, y_size, 1, 1) # Only one band, byte data type since there are only 2 values
output_raster.SetProjection(srs.ExportToWkt())
output_raster.SetGeoTransform(geo_transform)
raster_band = output_raster.GetRasterBand(1)
raster_band.SetNoDataValue(255)
raster_band.SetDescription("mask")
raster_band.WriteArray(output.values)
output_raster = None
dc = datacube.Datacube()
ds = getDataset(crs, xmin, xmax, ymin, ymax)
wm = getNDWI(ds)
outputToFile(wm)
|
klh5/wm_generator
|
gen_water_mask.py
|
gen_water_mask.py
|
py
| 2,809 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20040165317
|
num_list = [0.136, 0.082, 2.691, 1.175, 4.737, 0.083, 0.082, 1.161, 2.41, 0.0, 7.421, 6.496, 5.012, 1.145, 6.512, 4.547, 4.245, 2.093, 3.511, 3.059, 1.247, 1.882, 7.155, 8.881, 5.095]
num_avg = 0.0
rolling_avg = []
for i,flt in enumerate(num_list, 1):
num_avg += flt
rolling_avg.append(num_avg/i)
num_avg = num_avg/len(num_list)
print(num_list)
print(rolling_avg)
print(num_avg)
#given solution
# Initialize a sum accumulator
num_sum = 0.0
# Initialize an empty list to track the rolling average
rolling_avg = []
# Initialize a counter (for use in rolling average)
i = 0
for num in num_list:
# Accumulate as usual
num_sum += num
# Increment counter
i += 1
rolling_avg.append(num_sum / i)
# Make sure to divide the sum by the total number of values to get the final average:
num_avg = num_sum / i
# OR: num_avg = num_sum / len(num_list)
# OR: num_avg = rolling_avg[-1]
|
mwboiss/DSI-Prep
|
intro_py/float_accum_3.py
|
float_accum_3.py
|
py
| 910 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8585416211
|
import torch
import torch.nn as nn
from torch import cat, exp
import torch.nn.functional as F
from torch.nn.functional import pad
from torch.nn.modules.batchnorm import _BatchNorm
class my_AFF(nn.Module):
'''
Point-wise Convolution based Attention module (PWAtt)
'''
def __init__(self, channels=64, r=2):
super(my_AFF, self).__init__()
inter_channels = int(channels // r)
self.local_att = nn.Sequential(
nn.Conv1d(in_channels=channels, out_channels=inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=inter_channels, out_channels=channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(channels),
)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
xa = self.local_att(x)
wei = self.sigmoid(xa)
xo = 2 * x * wei
return xo, wei
# Root Mean Squared Logarithmic Error (RMSLE) loss
class RMSLELoss(nn.Module):
def __init__(self, eps=1e-6):
super(RMSLELoss, self).__init__()
self.squared_error = nn.MSELoss(reduction='none')
self.eps = eps
def forward(self, y_hat, y, mask, seq_length, sum_losses=False):
# the log(predictions) corresponding to no data should be set to 0
# log_y_hat = y_hat.log().where(mask, torch.zeros_like(y))
log_y_hat = torch.log(y_hat + 1).where(mask, torch.zeros_like(y))
# the we set the log(labels) that correspond to no data to be 0 as well
# log_y = y.log().where(mask, torch.zeros_like(y))
log_y = torch.log(y + 1).where(mask, torch.zeros_like(y))
# where there is no data log_y_hat = log_y = 0, so the squared error will be 0 in these places
loss = self.squared_error(log_y_hat, log_y)
rmsle_loss = torch.sqrt(loss + self.eps)
loss = torch.sum(rmsle_loss, dim=1)
if not sum_losses:
loss = loss / seq_length.clamp(min=1)
return loss.mean()
# Root Mean Squared Error (MSE) loss
class RMSELoss(nn.Module):
def __init__(self, eps=1e-6):
super(RMSELoss, self).__init__()
self.squared_error = nn.MSELoss(reduction='none')
self.eps = eps
def forward(self, y_hat, y, mask, seq_length, sum_losses=False):
# the predictions corresponding to no data should be set to 0
y_hat = y_hat.where(mask, torch.zeros_like(y))
# the we set the labels that correspond to no data to be 0 as well
y = y.where(mask, torch.zeros_like(y))
# where there is no data log_y_hat = log_y = 0, so the squared error will be 0 in these places
loss = self.squared_error(y_hat, y)
rmse_loss = torch.sqrt(loss + self.eps)
loss = torch.sum(rmse_loss, dim=1)
if not sum_losses:
loss = loss / seq_length.clamp(min=1)
return loss.mean()
# Mean Squared Logarithmic Error (MSLE) loss
class MSLELoss(nn.Module):
def __init__(self):
super(MSLELoss, self).__init__()
self.squared_error = nn.MSELoss(reduction='none')
def forward(self, y_hat, y, mask, seq_length, sum_losses=False):
# the log(predictions) corresponding to no data should be set to 0
log_y_hat = y_hat.log().where(mask, torch.zeros_like(y))
# the we set the log(labels) that correspond to no data to be 0 as well
log_y = y.log().where(mask, torch.zeros_like(y))
# where there is no data log_y_hat = log_y = 0, so the squared error will be 0 in these places
loss = self.squared_error(log_y_hat, log_y)
loss = torch.sum(loss, dim=1)
if not sum_losses:
loss = loss / seq_length.clamp(min=1)
return loss.mean()
# Mean Squared Error (MSE) loss
class MSELoss(nn.Module):
def __init__(self):
super(MSELoss, self).__init__()
self.squared_error = nn.MSELoss(reduction='none')
def forward(self, y_hat, y, mask, seq_length, sum_losses=False):
# the predictions corresponding to no data should be set to 0
y_hat = y_hat.where(mask, torch.zeros_like(y))
# the we set the labels that correspond to no data to be 0 as well
y = y.where(mask, torch.zeros_like(y))
# where there is no data log_y_hat = log_y = 0, so the squared error will be 0 in these places
loss = self.squared_error(y_hat, y)
loss = torch.sum(loss, dim=1)
if not sum_losses:
loss = loss / seq_length.clamp(min=1)
return loss.mean()
class MyBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(MyBatchNorm, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
def forward(self, input):
self._check_input_dim(input)
# hack to work around model.eval() issue
if not self.training:
self.eval_momentum = 0 # set the momentum to zero when the model is validating
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum if self.training else self.eval_momentum
if self.track_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked = self.num_batches_tracked + 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum if self.training else self.eval_momentum
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
training=True, momentum=exponential_average_factor, eps=self.eps) # set training to True so it calculates the norm of the batch
class MyBatchNorm1d(MyBatchNorm):
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'.format(input.dim()))
class EmptyModule(nn.Module):
def forward(self, X):
return X
class TempSepConv_CAFF(nn.Module):
def __init__(self, config, no_ts_features=None, no_daig_features=None, no_flat_features=None):
super(TempSepConv_CAFF, self).__init__()
self.task = config['task']
self.n_layers = config['n_layers']
self.diagnosis_size = config['diagnosis_size']
self.main_dropout_rate = config['main_dropout_rate']
self.temp_dropout_rate = config['temp_dropout_rate']
self.kernel_size = config['kernel_size']
self.temp_kernels = config['temp_kernels']
self.last_linear_size = config['last_linear_size']
self.no_ts_features = no_ts_features
self.no_daig_features = no_daig_features
self.no_flat_features = no_flat_features
self.no_diag = config['no_diag']
self.alpha = 100
self.keep_prob = 1-config['main_dropout_rate'] #0.5
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.hardtanh = nn.Hardtanh(min_val=1/48, max_val=100) # keep the end predictions between half an hour and 100 days
self.rmsle_loss = RMSLELoss()
self.msle_loss = MSLELoss()
self.mse_loss = MSELoss()
self.bce_loss = nn.BCELoss()
self.main_dropout = nn.Dropout(p=self.main_dropout_rate)
self.temp_dropout = nn.Dropout(p=self.temp_dropout_rate)
self.remove_none = lambda x: tuple(xi for xi in x if xi is not None) # removes None items from a tuple
self.empty_module = EmptyModule()
self.batchnormclass = MyBatchNorm1d
# self.batchnormclass = nn.BatchNorm1d
self.diagnosis_encoder = nn.Linear(in_features=self.no_daig_features, out_features=self.diagnosis_size)
self.diagnosis_encoder1 = nn.Linear(in_features=self.no_daig_features, out_features=self.temp_kernels[0]+1)
self.flat_encoder = nn.Linear(in_features=self.no_flat_features, out_features=self.temp_kernels[0]+1)
self.bn_diagnosis_encoder = self.batchnormclass(num_features=self.diagnosis_size, momentum=0.1) # input shape: B * diagnosis_size
self.bn_point_last_los = self.batchnormclass(num_features=self.last_linear_size, momentum=0.1) # input shape: (B * T) * last_linear_size
self.bn_point_last_mort = self.batchnormclass(num_features=self.last_linear_size, momentum=0.1) # input shape: (B * T) * last_linear_size
# self.bn_diagnosis_encoder = self.empty_module
# self.bn_point_last_los = self.empty_module
# self.bn_point_last_mort = self.empty_module
# input shape: (B * T) * last_linear_size
# output shape: (B * T) * 1
self.final_los = nn.Linear(in_features=self.last_linear_size, out_features=1)
self.final_mort = nn.Linear(in_features=self.last_linear_size, out_features=1)
# TDSC layers settings
self.layers = []
for i in range(self.n_layers):
dilation = i * (self.kernel_size - 1) if i > 0 else 1 # dilation = 1 for the first layer, after that it captures all the information gathered by previous layers
temp_k = self.temp_kernels[i]
self.layers.append({})
if temp_k is not None:
padding = [(self.kernel_size - 1) * dilation, 0] # [padding_left, padding_right]
self.layers[i]['temp_kernels'] = temp_k
self.layers[i]['dilation'] = dilation
self.layers[i]['padding'] = padding
self.layers[i]['stride'] = 1
self.layer_modules = nn.ModuleDict()
self.Y = 0 # Y is the number of channels in the previous temporal layer (could be 0 if this is the first layer)
self.n = 0 # n is the layer number
for i in range(self.n_layers):
temp_in_channels = (self.no_ts_features + self.n) * (1 + self.Y) if i > 0 else 2 * self.no_ts_features # (F + n) * (Y + 1)
temp_out_channels = (self.no_ts_features + self.n) * self.layers[i]['temp_kernels'] # (F + n) * temp_kernels
out_channels_caff = (self.no_ts_features+self.n+1)*(self.layers[i]['temp_kernels']+1)
if self.n == 0:
linear_input_dim = (self.no_ts_features + self.n - 1) * self.Y + 2 * self.no_ts_features + 2 + self.no_flat_features
else:
linear_input_dim = (self.no_ts_features + self.n - 1) * self.Y + (self.layers[i]['temp_kernels']+1) + 2 * self.no_ts_features + 2 + self.no_flat_features # (F + n-1) * Y + Z + 2F + 2 + no_flat_features
linear_output_dim = (self.layers[i]['temp_kernels']+1)
temp = nn.Conv1d(in_channels=temp_in_channels, # (F + n) * (Y + 1)
out_channels=temp_out_channels, # (F + n) * Y
kernel_size=self.kernel_size,
stride=self.layers[i]['stride'],
dilation=self.layers[i]['dilation'],
groups=self.no_ts_features + self.n)
caff_fc = nn.Linear(in_features=linear_input_dim, out_features=linear_output_dim)
bn_temp = self.batchnormclass(num_features=temp_out_channels, momentum=0.1)
bn_caff = self.batchnormclass(num_features=linear_output_dim, momentum=0.1)
# bn_temp = bn_point = self.empty_module # linear module; does nothing
A_layer = my_AFF(out_channels_caff)
FFA_layer = my_AFF(linear_input_dim)
self.layer_modules[str(i)] = nn.ModuleDict({
'temp': temp,
'bn_temp': bn_temp,
'caff_fc': caff_fc,
'bn_caff': bn_caff,
'A_layer': A_layer,
'FFA_layer': FFA_layer})
self.Y = self.layers[i]['temp_kernels']
self.n += 1
# input shape: (B * T) * ((F + n) * (1 + Y) + diagnosis_size + no_flat_features)
# output shape: (B * T) * last_linear_size
# input_size = (self.no_ts_features + self.n) * (1 + self.Y) + self.diagnosis_size + self.no_flat_features
#input_size = (self.no_ts_features + self.n) * (1 + self.Y) + self.diagnosis_size + self.no_flat_features
input_size = (self.no_ts_features + self.n) * (1 + self.Y) + (self.n_layers * (1 + self.Y)) + self.diagnosis_size + self.no_flat_features
if self.no_diag:
# input_size = input_size - self.diagnosis_size
input_size = input_size - self.diagnosis_size #input_size - self.diagnosis_size
self.last_los_fc = nn.Linear(in_features=input_size, out_features=self.last_linear_size)
self.last_mort_fc = nn.Linear(in_features=input_size, out_features=self.last_linear_size)
return
def tdsc_caff(self, B=None, T=None, X=None, repeat_flat=None, X_orig=None, temp=None, bn_temp=None, caff_fc=None,
bn_caff=None, A_layer=None, FFA_layer=None, temp_kernels=None, padding=None, prev_temp=None, prev_caff=None, m_scale_output=None,
caff_skip=None):
X_padded = pad(X, padding, 'constant', 0) # B * ((F + n) * (Y + 1)) * (T + padding)
X_temp = self.temp_dropout(bn_temp(temp(X_padded))) # B * ((F + n) * temp_kernels) * T
#### Context Aware Attentive Feature Fusion (CAFF) #####
if prev_caff is None:
X_concat = cat(self.remove_none((prev_temp, # (B * T) * ((F + n-1) * Y)
prev_caff, # (B * T) * 1
X_orig, # (B * T) * (2F + 2)
repeat_flat)), # (B * T) * no_flat_features
dim=1) # (B * T) * (((F + n-1) * Y) + 1 + 2F + 2 + no_flat_features)
else:
X_concat = cat(self.remove_none((prev_temp.view(B*T,-1), # (B * T) * ((F + n-1) * Y)
prev_caff.permute(0,3,1,2).view(B*T,-1), # (B * T) * 1
X_orig, # (B * T) * (2F + 2)
repeat_flat)), # (B * T) * no_flat_features
dim=1) # (B * T) * (((F + n-1) * Y) + 1 + 2F + 2 + no_flat_features)
X_concat, wei_1 = FFA_layer(X_concat.view(B,T,-1).permute(0,2,1)) # Step 2 Attention
X_concat = X_concat.permute(0,2,1).view(B*T,-1)
caff_output = self.main_dropout(bn_caff(caff_fc(X_concat))) # (B * T) * 1
caff_output = caff_output.view(B, T, -1).unsqueeze(2).permute(0,2,3,1)
# Accumulate multi-scale features
m_scale_output = cat((m_scale_output,caff_output), dim=1) if m_scale_output is not None else caff_output
caff_skip = cat((caff_skip, prev_caff[:,:,-1,:].unsqueeze(2)), dim=1) if prev_caff is not None else caff_skip
temp_skip = cat((caff_skip, # B * (F + n) * 1 * T
X_temp.view(B, caff_skip.shape[1], temp_kernels, T)), # B * (F + n) * temp_kernels * T
dim=2) # B * (F + n) * (1 + temp_kernels) * T
X_combined = self.relu(cat((temp_skip, caff_output), dim=1)) # B * (F + n) * (1 + temp_kernels) * T
next_X = X_combined.view(B, (caff_skip.shape[1] + 1) * (1 + temp_kernels), T) # B * ((F + n + 1) * (1 + temp_kernels)) * T
next_X, wei_2 = A_layer(next_X.view(B,-1,T)) # step 4 attention
next_X = next_X.view(B, (caff_skip.shape[1] + 1) * (1 + temp_kernels), T)
temp_output = X_temp.permute(0, 2, 1).contiguous().view(B * T, caff_skip.shape[1] * temp_kernels) # (B * T) * ((F + n) * temp_kernels)
return (temp_output, # (B * T) * ((F + n) * temp_kernels)
caff_output, # (B * T) * 1
next_X, # B * ((F + n) * (1 + temp_kernels)) * T
caff_skip, # caff features of the prevous layer
m_scale_output, # keeping track of the caff multi scale features from all layers; B * (F + n) * T
wei_1, wei_2) # PWatt Attention weights
def forward(self, X, diagnoses, flat, time_before_pred=5):
# flat is B * no_flat_features
# diagnoses is B * no_daig_features
# X is B * no_daig_features * T
# split into features and indicator variables
X_separated = torch.split(X[:, 1:-1, :], self.no_ts_features, dim=1) # tuple ((B * F * T), (B * F * T))
# prepare repeat arguments and initialise layer loop
B, _, T = X_separated[0].shape
repeat_flat = flat.repeat_interleave(T, dim=0) # (B * T) * no_flat_features
X_orig = X.permute(0, 2, 1).contiguous().view(B * T, 2 * self.no_ts_features + 2) # (B * T) * (2F + 2)
repeat_args = {'repeat_flat': repeat_flat,
'X_orig': X_orig,
'B': B,
'T': T}
next_X = torch.stack(X_separated, dim=2).reshape(B, 2 * self.no_ts_features, T)
caff_skip = X_separated[0].unsqueeze(2) # ts features without indicators, keeps track of caff skip connections generated from caff module;
temp_output = None
caff_output = None
m_scale_output = None
wei_step2 = []
wei_step4 = []
for i in range(self.n_layers):
kwargs = dict(self.layer_modules[str(i)], **repeat_args)
temp_output, caff_output, next_X, caff_skip, m_scale_output, wei_1, wei_2 = self.tdsc_caff(X=next_X, caff_skip=caff_skip,
prev_temp=temp_output, prev_caff=caff_output,
temp_kernels=self.layers[i]['temp_kernels'],
padding=self.layers[i]['padding'],
m_scale_output= m_scale_output,
**kwargs)
wei_step2.append(wei_1.detach().cpu())
wei_step4.append(wei_2.detach().cpu())
m_scale_output = m_scale_output.view(B,-1,T)
if self.no_diag:
combined_features = cat((flat.repeat_interleave(T - time_before_pred, dim=0), # (B * (T - time_before_pred)) * no_flat_features
next_X[:, :, time_before_pred:].permute(0, 2, 1).contiguous().view(B * (T - time_before_pred), -1),
m_scale_output[:, :, time_before_pred:].permute(0, 2, 1).contiguous().view(B * (T - time_before_pred), -1)), dim=1) # (B * (T - time_before_pred)) * (((F + n) * (1 + Y)) + no_flat_features) for tpc
else:
diagnoses_enc = self.relu(self.main_dropout(self.bn_diagnosis_encoder(self.diagnosis_encoder(diagnoses)))) # B * diagnosis_size
combined_features = cat((flat.repeat_interleave(T - time_before_pred, dim=0), # (B * (T - time_before_pred)) * no_flat_features
diagnoses_enc.repeat_interleave(T - time_before_pred, dim=0), # (B * (T - time_before_pred)) * diagnosis_size
next_X[:, :, time_before_pred:].permute(0, 2, 1).contiguous().view(B * (T - time_before_pred), -1),
m_scale_output[:, :, time_before_pred:].permute(0, 2, 1).contiguous().view(B * (T - time_before_pred), -1)), dim=1) # (B * (T - time_before_pred)) * (((F + n) * (1 + Y)) + diagnosis_size + no_flat_features) for tpc
last_los = self.relu(self.main_dropout(self.bn_point_last_los(self.last_los_fc(combined_features))))
last_mort = self.relu(self.main_dropout(self.bn_point_last_mort(self.last_mort_fc(combined_features))))
los_predictions = self.hardtanh(exp(self.final_los(last_los).view(B, T - time_before_pred))) # B * (T - time_before_pred)
mort_predictions = self.sigmoid(self.final_mort(last_mort).view(B, T - time_before_pred)) # B * (T - time_before_pred)
return los_predictions, mort_predictions, wei_step2, wei_step4
def loss(self, y_hat_los, y_hat_mort, y_los, y_mort, mask, seq_lengths, device, sum_losses, loss_type):
# mortality loss
if self.task == 'mortality':
loss = self.bce_loss(y_hat_mort, y_mort) * self.alpha
# LoS loss
else:
bool_type = torch.cuda.BoolTensor if device == torch.device('cuda:3') else torch.BoolTensor
if loss_type == 'rmsle':
los_loss = self.rmsle_loss(y_hat_los, y_los, mask.type(bool_type), seq_lengths, sum_losses)
if loss_type == 'msle':
los_loss = self.msle_loss(y_hat_los, y_los, mask.type(bool_type), seq_lengths, sum_losses)
elif loss_type == 'mse':
los_loss = self.mse_loss(y_hat_los, y_los, mask.type(bool_type), seq_lengths, sum_losses)
loss = los_loss
return loss
|
Al-Dailami/DTSC-CAFF
|
dtsc_caff_model.py
|
dtsc_caff_model.py
|
py
| 21,192 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18920197222
|
from __future__ import annotations
import sys
from typing import TYPE_CHECKING
from ansiblelint.rules import AnsibleLintRule
if TYPE_CHECKING:
from ansiblelint.file_utils import Lintable
from ansiblelint.utils import Task
def _changed_in_when(item: str) -> bool:
if not isinstance(item, str):
return False
item_list = item.split()
if {"and", "or", "not"} & set(item_list):
return False
return any(
changed in item
for changed in [
".changed",
"|changed",
'["changed"]',
"['changed']",
"is changed",
]
)
class UseHandlerRatherThanWhenChangedRule(AnsibleLintRule):
"""Tasks that run when changed should likely be handlers."""
id = "no-handler"
description = (
"If a task has a ``when: result.changed`` setting, it is effectively "
"acting as a handler. You could use ``notify`` and move that task to "
"``handlers``."
)
link = "https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html#handlers"
severity = "MEDIUM"
tags = ["idiom"]
version_added = "historic"
def matchtask(
self,
task: Task,
file: Lintable | None = None,
) -> bool | str:
if task["__ansible_action_type__"] != "task" or task.is_handler():
return False
when = task.get("when")
result = False
if isinstance(when, list):
if len(when) <= 1:
result = _changed_in_when(when[0])
elif isinstance(when, str):
result = _changed_in_when(when)
return result
if "pytest" in sys.modules:
import pytest
# pylint: disable=ungrouped-imports
from ansiblelint.rules import RulesCollection
from ansiblelint.runner import Runner
from ansiblelint.testing import run_ansible_lint
@pytest.mark.parametrize(
("test_file", "failures"),
(
pytest.param("examples/playbooks/no_handler_fail.yml", 5, id="fail"),
pytest.param("examples/playbooks/no_handler_pass.yml", 0, id="pass"),
),
)
def test_no_handler(
default_rules_collection: RulesCollection,
test_file: str,
failures: int,
) -> None:
"""Test rule matches."""
results = Runner(test_file, rules=default_rules_collection).run()
assert len(results) == failures
for result in results:
assert result.tag == "no-handler"
def test_role_with_handler() -> None:
"""Test role with handler."""
role_path = "examples/roles/role_with_handler"
results = run_ansible_lint("-v", role_path)
assert "no-handler" not in results.stdout
|
ansible/ansible-lint
|
src/ansiblelint/rules/no_handler.py
|
no_handler.py
|
py
| 2,753 |
python
|
en
|
code
| 3,198 |
github-code
|
6
|
28970147437
|
import typing as t
import collections
import flask_restx
import flask_restx.fields as frf
import marshmallow.fields as mf
from marshmallow_pynamodb import ModelSchema
from model.base_model import Model
from common.util import create_table
class Serializer(ModelSchema):
_api_model = None
def __init__(self, *args, **kwargs):
super(Serializer, self).__init__(*args, **kwargs)
create_table(self.model())
@property
def api_model(self):
if self._api_model is None:
self._api_model = self._get_api_model()
return self._api_model
def loads_required(self, json_data: str, many: bool = False):
data = self.loads(json_data=json_data, many=many).attribute_values
if many:
return [self._remove_additional_fields(data_entry) for data_entry in data]
return self._remove_additional_fields(data)
def serialize(self, obj, *, many: bool = False):
return super(Serializer, self)._serialize(obj, many=many)
def _remove_additional_fields(self, data: dict):
""" Remove fields that aren't provided by user in request """
return {k: v for k, v in data.items() if k in self.declared_fields}
def _get_api_model(self):
""" Map marshmallow schema into flask_restx api model """
model_name = self.model().__name__.replace("Model", "")
rest_attributes = collections.OrderedDict()
for key, value in self.declared_fields.items():
rest_attributes[key] = self.map_marshmallow_field_to_api_field(value)
return flask_restx.Model(model_name, rest_attributes)
@classmethod
def model(cls) -> t.Type[Model]:
""" Expose PynamoDB Model """
return cls.Meta.model
@classmethod
def map_marshmallow_field_to_api_field(cls, marshmallow_field: mf.Field):
if isinstance(marshmallow_field, mf.String):
return frf.String()
if isinstance(marshmallow_field, (mf.Raw, mf.Mapping, mf.Dict)):
return frf.Raw()
if isinstance(marshmallow_field, (mf.List, mf.Tuple)):
return frf.List(cls.map_marshmallow_field_to_api_field(marshmallow_field.inner))
if isinstance(marshmallow_field, (mf.Number, mf.Integer, mf.Decimal, mf.Int)):
return frf.Integer()
if isinstance(marshmallow_field, (mf.Boolean, mf.Bool)):
return frf.Boolean()
if isinstance(marshmallow_field, mf.Float):
return frf.Float()
if isinstance(marshmallow_field, mf.Date):
return frf.Date()
if isinstance(marshmallow_field, mf.DateTime):
return frf.DateTime()
if isinstance(marshmallow_field, (mf.Url, mf.URL)):
return frf.Url()
raise Exception(f"Cannot map {marshmallow_field} to API model field")
def serializer_factory(model_class: t.Type[Model]):
class _Serializer(Serializer):
is_removed = mf.Boolean(default=False)
created_at = mf.Float(allow_none=True)
class Meta:
model = model_class
return _Serializer
|
wizzdev-pl/iot-starter
|
web_server/server/core/serializer.py
|
serializer.py
|
py
| 3,081 |
python
|
en
|
code
| 7 |
github-code
|
6
|
43085023977
|
# -*- coding: utf-8 -*-
from InterpolatePoints import *
class InterpolatePoints(object):
def __init__(self):
self.label = "Interpolate points"
self.description = ""
self.canRunInBackground = True
def getParameterInfo(self):
param_points_table = arcpy.Parameter(
displayName="Data points (table)",
name="points_table",
datatype="GPTableView",
parameterType="Required",
direction="Input")
param_pts_id_field = arcpy.Parameter(
displayName="ID field in the data points table",
name="pts_id_field",
datatype="Field",
parameterType="Required",
direction="Input")
param_pts_rid_field = arcpy.Parameter(
displayName="Route ID field in the data points table",
name="pts_rid_field",
datatype="Field",
parameterType="Required",
direction="Input")
param_pts_distfield = arcpy.Parameter(
displayName="Distance field in the data points on network layer",
name="pts_distfield",
datatype="Field",
parameterType="Required",
direction="Input")
param_list_fields = arcpy.Parameter(
displayName="Choose the fields with values to interpolate",
name="list_fields_to_keep",
datatype="Field",
parameterType="Required",
direction="Input",
multiValue=True)
param_targets = arcpy.Parameter(
displayName="Target points to interpolate on (table)",
name="targets",
datatype="GPTableView",
parameterType="Required",
direction="Input")
param_targets_id_field = arcpy.Parameter(
displayName="ID field in the target points table",
name="targets_id_field",
datatype="Field",
parameterType="Required",
direction="Input")
param_targets_rid_field = arcpy.Parameter(
displayName="Route ID field in the target points table",
name="targets_rid_field",
datatype="Field",
parameterType="Required",
direction="Input")
param_targets_distfield = arcpy.Parameter(
displayName="Distance field in the target points on network layer",
name="targets_distfield",
datatype="Field",
parameterType="Required",
direction="Input")
param_routes = arcpy.Parameter(
displayName="Input route feature class (lines)",
name="routes",
datatype="GPFeatureLayer",
parameterType="Required",
direction="Input")
param_RID_field = arcpy.Parameter(
displayName="RID field in routes feature class",
name="RID_field",
datatype="Field",
parameterType="Required",
direction="Input")
param_order_field = arcpy.Parameter(
displayName="Ordering field in routes feature class",
name="order_field",
datatype="Field",
parameterType="Required",
direction="Input")
param_links = arcpy.Parameter(
displayName="Routes links",
name="links",
datatype="GPTableView",
parameterType="Required",
direction="Input")
param_output_points = arcpy.Parameter(
displayName="Points Output table",
name="output_points",
datatype="GPTableView",
parameterType="Required",
direction="Output")
param_pts_id_field.parameterDependencies = [param_points_table.name]
param_pts_rid_field.parameterDependencies = [param_points_table.name]
param_pts_distfield.parameterDependencies = [param_points_table.name]
param_list_fields.parameterDependencies = [param_points_table.name]
param_targets_id_field.parameterDependencies = [param_targets.name]
param_targets_rid_field.parameterDependencies = [param_targets.name]
param_targets_distfield.parameterDependencies = [param_targets.name]
param_RID_field.parameterDependencies = [param_routes.name]
param_order_field.parameterDependencies = [param_routes.name]
params = [param_points_table, param_pts_id_field, param_pts_rid_field, param_pts_distfield, param_list_fields,
param_targets, param_targets_id_field, param_targets_rid_field, param_targets_distfield, param_routes,
param_RID_field, param_order_field, param_links, param_output_points]
return params
def isLicensed(self):
return True
def updateParameters(self, parameters):
return
def updateMessages(self, parameters):
return
def execute(self, parameters, messages):
points_table = parameters[0].valueAsText
id_field_pts = parameters[1].valueAsText
RID_field_pts = parameters[2].valueAsText
Distance_field_pts = parameters[3].valueAsText
list_fields = (parameters[4].valueAsText).split(';')
data_fields = [str(item) for item in list_fields]
targetpoints = parameters[5].valueAsText
id_field_target = parameters[6].valueAsText
RID_field_target = parameters[7].valueAsText
Distance_field_target = parameters[8].valueAsText
network_shp = parameters[9].valueAsText
network_RID_field = parameters[10].valueAsText
order_field = parameters[11].valueAsText
links_table= parameters[12].valueAsText
ouput_table = parameters[13].valueAsText
execute_InterpolatePoints(points_table, id_field_pts, RID_field_pts, Distance_field_pts, data_fields,
targetpoints, id_field_target, RID_field_target, Distance_field_target, network_shp,
links_table, network_RID_field, order_field, ouput_table)
return
|
gchone/ConcordiaRiverLab-FloodTools
|
InterpolatePoints_Interface.py
|
InterpolatePoints_Interface.py
|
py
| 6,053 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22083642525
|
n = input()
n = list(n)
lst = []
for i in range(0,len(n)-1):
count = 1
for j in range(i+1,len(n)):
if n[i] == n[j]:
count += 1
else:
break
lst.append(count)
print(max(lst))
|
vamshipv/code-repo
|
CSES/repeat.py
|
repeat.py
|
py
| 224 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72038050107
|
#!/usr/bin/env python
# coding: utf-8
# In[5]:
# Conventions are the same as Bourbaki, for example,
# Type E convention 1 3 4 5 6 7 8
# 2
# Input under above convention if type E
J1=[1,2,3,4];
J2=[5,6,7,8];
Type="E";n=6;
D=DynkinDiagram([Type, n]);
# main program.
from sage.combinat.root_system.dynkin_diagram import DynkinDiagram_class
# weight algorithm
def weight(globalType, globaln, D): # return the weigtht of a chunk(type of original dynkin diagram, rank of original dynkin diagram, current chunk)
n=D.rank();
Type=D.cartan_matrix().cartan_type().type(); # the intrisic type of current dynkin subdiagram.
if globalType=="A":
return (n,0,"A");
if globalType=="D":
if Type=="A": # type A chunk.
if Set(D.index_set())==Set([globaln-2,globaln-1,globaln]): # type D3 case. Note that here we rely on the index set.
return (2*n-2,0,"D");
return (n,0,"A");
else: # type D chunk.
return (2*n-2,0,"D");
if globalType=="E":
if Type=="A": # type A chunk.
if Set([2,4]).issubset(Set(D.index_set())): # Note that here we rely on the index set.
if n>=3:
return (n,1,"A") # A^# case;
elif n==2:
return (n,-1,"A"); # A^b case;
return (n,0,"A");
if Type=="D":
return (2*n-2,0,"D")
if Type=="E":
return (100,0,"E") # weight is infinity for type E chunks.
def disjointChunks(dynkin,E,i,C,P,maxLen,D): # return all maximal selection P of disjoint chunks P(current selection, the union of all chunk-index of D,current position in C, all dominant chunks, maximal selections,length of maximal selections)
if i>=len(C):
if len(dynkin)>=maxLen:
if len(dynkin)>maxLen:
maxLen=len(dynkin);
P=[];
P.append(copy(dynkin));
return P,maxLen;
x=C[i];
if Set(x[0]).intersection(E).is_empty()==True:
f=True;
for j in dynkin:
if D.subtype(Set(j[0]).union(Set(x[0]))).is_connected()==True: # the new connected chunk is not disjoint to dynkin.
f=False;
break;
if f: # if disjoint, we can add the new chunk x.
dynkin.append(x);
P,maxLen=disjointChunks(dynkin,E.union(Set(x[0])),i+1,C,P,maxLen,D);
dynkin.pop();
P,maxLen=disjointChunks(dynkin,E,i+1,C,P,maxLen,D); # we ignore the new chunk x.
return P,maxLen;
def weightAlg(Type, D,n , J1, J2,J): # the weight algorithm on labeled dynkin diagram D,J1,J2(type of dynkin diagram, dynkin diagram, rank, label J1, label J2, union of all labels/support)
if Set(J)==Set(J1) or Set(J)==Set(J2): # the whole diagram is uniformly labeled.
return [[]];
L1=D.subtype(J1);
L2=D.subtype(J2);
index1=DynkinDiagram_class(L1).strongly_connected_components();
index2=DynkinDiagram_class(L2).strongly_connected_components();
if len(index1)==1:
index1=[J1];
if len(index2)==1:
index2=[J2];
weight1=[];
weight2=[];
maxWeight=0;
for i in index1:
w=weight(Type,n, D.subtype(i))
weight1.append(w);
if w[0]>maxWeight:
maxWeight=w[0];
for i in index2:
w=weight(Type,n, D.subtype(i))
weight2.append(w);
if w[0]>maxWeight:
maxWeight=w[0];
# Step 1: finding all dominant chunks.
C=[];# format: (root_index, label, (weight, type))
for i in range(len(weight1)):
if weight1[i][0]==maxWeight:
C.append((index1[i],1,weight1[i]));
for i in range(len(weight2)):
if weight2[i][0]==maxWeight:
C.append((index2[i],2,weight2[i]));
# Step 2: finding all selections P of dominant chunks.
P,temp=disjointChunks([],Set([]),0,C,[],0,D); # get the list of maximal selections.
dominant_collection=[];
if len(P)==1:
dominant_collection.append(P[0]);
else: # Step 3: tie breaking.
# 1. choose as many type A as possible.
P1=copy(P);
for d in P:
for l in d:
if l[2][2]!="A": # the type of chunks
P1.remove(d);
if len(P1)>0:
P=P1;
if len(P)==1:
dominant_collection.append(P[0]);
else:
# 2. choose those not adjacent to extra root.
extraRoots=[];
if Type=="D":
extraRoots=[n,n-1];
if Type=="E":
extraRoots=[2];
if extraRoots!=[]:
P1=copy(P);
for j in extraRoots:
for d in P:
f=False;
for l in d:
if (j in l[0])==True:
break;
if D.subtype(Set(l[0]).union(Set([j]))).is_connected()==True: # this selection of chunks is adjacent to extra root.
f=True;
if f:
if d in P1:
P1.remove(d);
if len(P1)>0:
P=P1;
if len(P)==1:
dominant_collection.append(P[0]);
else:
# 3. A^# and A^b cases.
if Type=="E":
f=False;
for d in P:
for l in d:
if l[2][1]==1: # A^# case.
f=True;
dominant_collection.append(d);
break;
if not f:
P1=copy(P);
for d in P:
for l in d:
if l[2][1]==-1: # A^b case
P1.remove(d);
if len(P1)>0:
P=P1;
if dominant_collection==[]: # 4. the rest choices are all ok.
dominant_collection+=P;
totalKillList=[];
D_copy=copy(D);
J1_copy=copy(J1);
J2_copy=copy(J2);
J_copy=copy(J);
for dominant_selection in dominant_collection:
D=copy(D_copy);
J1=copy(J1_copy);
J2=copy(J2_copy);
J=copy(J_copy);
dominants=Set({});
for d in dominant_selection:
dominants=dominants.union(Set(d[0]));
dominants=list(dominants);
# kill the roots adjacents to the dominant selection.
kill_list=[];
for i in D.index_set():
if not (i in dominants):
f=False;
for j in dominants:
if D.subtype(Set([i]).union(Set([j]))).is_connected():
f=True;
if f:
kill_list.append(i);
if i in J1:
J1.remove(i);
if i in J2:
J2.remove(i);
if i in J:
J.remove(i);
# induction with the subdiagram.
kill_list=list(Set(kill_list));
D=D.subtype(J);
subdiagram=False;
kill_list=[kill_list];
for c in DynkinDiagram_class(D).strongly_connected_components():
if len(c)==1:
continue;
D1=D.subtype(c);
I1=list(Set(J1).intersection(Set(c)));
I2=list(Set(J2).intersection(Set(c)));
I=c;
if "type_relabel" in D1.cartan_matrix().cartan_type().__module__: # need relabeling
label_inv=D1.cartan_matrix().cartan_type()._relabelling;
else:
label_inv={v:v for v in range(1,len(c)+1)}; # trivial relabeling
label = {v: w for w, v in label_inv.items()}
D1_relabel=D1.relabel(label)
I1_relabel=[label[w] for w in I1];
I2_relabel=[label[w] for w in I2];
I=list(Set(I1_relabel).union(Set(I2_relabel)));
totalTemp=weightAlg(D1_relabel.cartan_matrix().cartan_type().type(),D1_relabel,D1_relabel.rank(),I1_relabel,I2_relabel,I);
newKillList=[];
for temp in totalTemp:
if temp!=[]:
temp_1=[label_inv[w] for w in temp];
subdiagram=True;
for j in kill_list:
newKillList.append(copy(list(Set(j).union(Set(temp_1)))));
if newKillList!=[]:
kill_list=newKillList;
for j in kill_list:
if not(j in totalKillList):
totalKillList.append(j);
return copy(totalKillList);
# pattern algorithm
def branch(x,last_root,I1,I2,J,D,m,result):
# Find m number of I1 vertices starting from x protected by I2(starting point, last step, main branch label, protection label, all labeled roots, dynkin diagram, steps to remain, record history root): return successful or not.
nbhd=[]; # the neighbour roots of x except x itself and last root.
next_root=[];
for k in J:
if k!=x and (not(k in last_root)) and D.subtype(Set([k,x])).is_connected()==True: # k is the neighbour of x.
if k in I1:
next_root.append(k);
nbhd.append(k);
if m<=1: # final step.
flag=True;
for k in nbhd:
if (not(k in I2)): # protection check.
flag=False;
if flag:
return True,result;
else:
return False,[];
# not final step.
for y in next_root:
flag=True;
for k in nbhd:
if (not(k in I2)) and (k!=y): # protection check except next root.
flag=False;
if flag:
result.append(y);
temp,temp1=branch(y,[x],I1,I2,J,D,m-1,result);
if temp:
return True,temp1;
result.pop();
return False,[]; # return false if no next roots are successful.
def pattern1(x,y,J1,J2,J,D,m): # root x has label J1 and y has J2, we want to find m number of J1 dominant m number of J2 pattern with J1 protection: return vertices to be killed.
flag1,negative_branch=branch(y,[x],J2,J1,J,D,m,[y])
flag2,positive_branch=branch(x,[y],J1,J1,J,D,m,[x]);
if flag1 and flag2:
for i in positive_branch:
for j in negative_branch:
if D.subtype(Set([i,j])).is_connected()==True:
return True,[j];
return False,[];
def pattern2(x,y,J1,J2,J,D): # root x has label J1 and y has J2, we want to find m number of J1 dominant m number of J2 pattern with J1 protection: return vertices to be killed.
flag1,negative_branch=branch(y,[x],J2,J1,J,D,2,[2,y]);
flag2,positive_branch=branch(x,[2,y],J1,J1,J,D,3,[x]);
if flag1 and flag2:
for i in positive_branch:
for j in negative_branch:
if D.subtype(Set([i,j])).is_connected()==True and j!=2:
return True,[2,j];
return False,[];
def pattern_alg(Type, D, n,J1,J2,J):
if Set(J)==Set(J1) or Set(J)==Set(J2): # the whole diagram is uniformly labeled.
return [];
kill_list=[];
if Type=="E" and n==8: # pattern 5 for type E8.
if Set(J1)==Set([1,2,3,4]) and Set(J2)==Set([5,6,7,8]):
kill_list.append(5);
return kill_list;
if Set(J2)==Set([1,2,3,4]) and Set(J1)==Set([5,6,7,8]):
kill_list.append(5);
return kill_list;
quit=False;
for i in J:
if quit: break;
count1=0;
count2=0;
nbhd=[]
for j in J:
if j!=i and D.subtype(Set([i,j])).is_connected()==True: # j is the neighbour of i.
nbhd.append(j);
for j in nbhd:
if quit: break;
if (i in J1) and (j in J2): # j is the neighbour of i with opposite sign.
for m in [1,2,3]:
flag,result=pattern1(i,j,J1,J2,J,D,m); # use J1 dominant J2 pattern 1-3.
if flag:
kill_list=kill_list+copy(result);
quit=True;
if (Type=="E") and (4 in J1) and (2 in J2): # pattern 4.
flag,result=pattern2(i,j,J1,J2,J,D);
if flag:
kill_list=kill_list+copy(result);
quit=True;
if (i in J2) and (j in J1): # switching labels J1 and J2.
for m in [1,2,3]:
flag,result=pattern1(i,j,J2,J1,J,D,m); # use J2 dominant J1 pattern.
if flag:
kill_list=kill_list+copy(result);
quit=True;
if (Type=="E") and (4 in J2) and (2 in J1): # pattern 4.
flag,result=pattern2(i,j,J2,J1,J,D);
if flag:
kill_list=kill_list+copy(result);
quit=True;
J1=list(Set(J1).difference(Set(kill_list)));
J2=list(Set(J2).difference(Set(kill_list)));
J=list(Set(J).difference(Set(kill_list)));
# induction with the subdiagram.
D=D.subtype(J);
for c in DynkinDiagram_class(D).strongly_connected_components():
if len(c)==1:
continue;
D1=D.subtype(c);
I1=list(Set(J1).intersection(Set(c)));
I2=list(Set(J2).intersection(Set(c)));
I=c;
if "type_relabel" in D1.cartan_matrix().cartan_type().__module__: # need relabeling
label_inv=D1.cartan_matrix().cartan_type()._relabelling;
else:
label_inv={v:v for v in range(1,len(c)+1)}; # trivial relabeling
label = {v: w for w, v in label_inv.items()}
D1_relabel=D1.relabel(label)
I1_relabel=[label[w] for w in I1];
I2_relabel=[label[w] for w in I2];
I=list(Set(I1_relabel).union(Set(I2_relabel)));
if D1_relabel.cartan_matrix().cartan_type().type()=="E": # if the remaining subdiagram is of Type E, we do pattern algorithm again.
temp=pattern_alg(D1_relabel.cartan_matrix().cartan_type().type(),D1_relabel,D1_relabel.rank(),I1_relabel,I2_relabel,I);
else: # if of classical types, we do weight algorithm.
temp=weightAlg(D1_relabel.cartan_matrix().cartan_type().type(),D1_relabel,D1_relabel.rank(),I1_relabel,I2_relabel,I);
temp=temp[0];
if temp!=[]:
temp_1=[label_inv[w] for w in temp];
kill_list=list(Set(kill_list).union(Set(temp_1)));
return copy(kill_list);
# # test pattern algorithm individually.
# J=list(Set(J1).union(Set(J2)));
# print("One possible J is",sorted(list(Set(list(Set(J1).union(Set(J2)))).difference(Set(pattern_alg(Type,D,n,copy(J1),copy(J2),copy(J)))))));
# Lusztig_Spaltenstein algorithm
def Lusztig_Spaltenstein(i,conjugate,W,I,X,M,J,K):
# return J and K are conjugated or not (current position in I, conjugated or not, weyl group, I:I_i with I_0=J I_n=K satisfying given conditions, record element used for conjugation, collection of all conjugates of J, # The elements used for conjugation, J,K): boolean whether J and K are conjugated.
if Set(J)==Set(K):
return True;
for j in range(n):
if (not ((j+1) in I[i])):
L=copy(I[i]);
L.append(j+1);
x=W.long_element(index_set=I[i])*W.long_element(index_set=L); #longest coset representative.
X.append(x);
L.pop();
# calculate x^-1*L*x and see if it is again standard parabolic.
flag=False;
for k in range(len(L)):
y=x^-1*W.simple_reflection(L[k])*x;
if y.length()!=1: #x^-1*L*x is not simple reflection
flag=True;
break;
L[k]=int(str(y)[1:]);
if flag:
continue;
if Set(L)==Set(K):
conjugate=True;
I.append(L);
# Uncomment following code if want detailed conjugation procedure.
# print("The two parabolic subgroups are conjugate, with conjugation sequence as follows:");
# for l in range(len(I)-1):
# print(Set(I[l]),"--", X[l],"-> ",end="");
# print(Set(I[len(I)-1]));
return conjugate;
# detect if L is already inside the conjuation class of J, cutting redundant search.
selfloop=False;
for l in range(len(M)):
if (Set(L)==Set(M[l])):
selfloop=True;
if selfloop:
continue;
I.append(L);
M.append(L); # memorize all conjugates of J.
conjugate=Lusztig_Spaltenstein(i+1,conjugate,W,I,X,M,J,K);
if conjugate:
return conjugate;
I.pop();
X.pop();
return conjugate;
# below are testing code for all cases in type=A/D/E,n=6-8.
def testing(J1,J2,i,Type,D,n):
if i>n:
print("J1=",J1,"J2=",J2)
result_pattern=sorted(list(Set(list(Set(J1).union(Set(J2)))).difference(Set(pattern_alg(Type,D,n,copy(J1),copy(J2),copy(list(Set(J1).union(Set(J2)))))))));
totalKillList=weightAlg(Type,D,n,copy(J1),copy(J2),list(Set(J1).union(Set(J2))));
for kill_list in totalKillList:
result_weight=sorted(list(Set(list(Set(J1).union(Set(J2)))).difference(Set(kill_list))));
temp=Lusztig_Spaltenstein(0,False,WeylGroup(D.cartan_matrix().cartan_type(),prefix="s",implementation="permutation"),[result_weight],[],[result_weight],result_weight,result_pattern);
if temp:
result_LS="conjugated";
else:
result_LS="not conjugated";
# print(J1,J2,"weight:",result_weight,"pattern:",result_pattern) #uncomment if only want not conjugated output.
print("weight:",result_weight,"pattern:",result_pattern,"\n\t\t\t\t\t\t\t\t\t\t\t They are",result_LS);
return;
J1.append(i);
testing(J1,J2,i+1,Type,D,n)
J1.pop();
J2.append(i);
testing(J1,J2,i+1,Type,D,n)
J2.pop();
# testing(J1,J2,i+1,Type,D,n) # uncomment if want cases where J1 union J2 is not all vertices.
for Type in ['A','D','E']: # input all test types you want in the list here.
for n in [6,7,8]: # input all test ranks you want in the list here.
print("\n\n testing type",Type, n)
testing([1],[],2,Type,DynkinDiagram([Type, n]),n)
|
Hai-Yu-Chen/RegOfUnipConjClassInTP
|
Regularity of Unipotent Elements in Total Positivity.py
|
Regularity of Unipotent Elements in Total Positivity.py
|
py
| 18,759 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27712857647
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
min = float('inf')
maxProfit = 0
for i in prices:
if i < min:
min = i
elif i -min > maxProfit:
maxProfit = i - min
return maxProfit
|
jemis140/DSA_Practice
|
Best_Time_To_Buy_Stock.py
|
Best_Time_To_Buy_Stock.py
|
py
| 304 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7987154827
|
#Problem link:https://practice.geeksforgeeks.org/problems/bst-to-max-heap/1
#Time Complexity: O(N)
#Space Complexity: O(N)
class Solution:
def convertToMaxHeapUtil(self, root):
arr = []
i = 0
#inOrder traversal
#to get the values sorted
def ino(node):
if not node:
return
ino(node.left)
arr.append(node.data)
ino(node.right)
ino(root)#call the inorder traversal
#preOrder traversal
#to put the list values
#inplace of tree values
def dfs(node):
nonlocal i
if not node:
return
dfs(node.left)
dfs(node.right)
node.data=arr[i]
i+=1
dfs(root)#call the preorder traversal
|
Ishantgarg-web/DailyCodingProblems
|
Python/Tree/BST to max heap.py
|
BST to max heap.py
|
py
| 891 |
python
|
en
|
code
| 3 |
github-code
|
6
|
6916432675
|
from tkinter import *
import first_task
import second_task
import third_task
class Lab1:
def __init__(self):
self.root = Tk()
# Create Gui for first task
self.file_frame = Frame(self.root)
self.fr1 = LabelFrame(self.file_frame, text="First task", font="Times 16")
self.fr2 = LabelFrame(self.file_frame, text="Results", font="Times 14")
self.generated_frame = Frame(self.root)
self.gfr1 = LabelFrame(self.generated_frame, text="First task", font="Times 14")
self.gfr2 = LabelFrame(self.generated_frame, text="Results", font="Times 14")
self.from_fl = Frame(self.root)
self.flfr1 = LabelFrame(self.from_fl, text="Results", font="Times 14")
self.lbb1 = Label(self.fr2, text="y1 = ", font="Times 14")
self.lbb2 = Label(self.gfr2, text="y1= ", font="Times 14")
self.lbb3 = Label(self.flfr1, text="y1= ", font="Times 14")
# Create Gui for second task
self.ffile_frame = Frame(self.root)
self.ffr1 = LabelFrame(self.ffile_frame, text="First task", font="Times 16")
self.ffr2 = LabelFrame(self.ffile_frame, text="Results", font="Times 14")
self.ggenerated_frame = Frame(self.root)
self.ggfr1 = LabelFrame(self.ggenerated_frame, text="First task", font="Times 14")
self.ggfr2 = LabelFrame(self.ggenerated_frame, text="Results", font="Times 14")
self.ffrom_fl = Frame(self.root)
self.fflfr1 = LabelFrame(self.ffrom_fl, text="Results", font="Times 14")
self.llbb1 = Label(self.ffr2, text="res= ", font="Times 14")
self.llbb2 = Label(self.ggfr2, text="res= ", font="Times 14")
self.llbb3 = Label(self.fflfr1, text="res= ", font="Times 14")
# Create GUI for last (third) task
self.fffile_frame = Frame(self.root)
self.fffr1 = LabelFrame(self.fffile_frame, text="First task", font="Times 16")
self.fffr2 = LabelFrame(self.fffile_frame, text="Results", font="Times 14")
self.gggenerated_frame = Frame(self.root)
self.gggfr1 = LabelFrame(self.gggenerated_frame, text="First task", font="Times 14")
self.gggfr2 = LabelFrame(self.gggenerated_frame, text="Results", font="Times 14")
self.fffrom_fl = Frame(self.root)
self.ffflfr1 = LabelFrame(self.fffrom_fl, text="Results", font="Times 14")
self.lllbb1 = Label(self.fffr2, text="res= ", font="Times 14")
self.lllbb2 = Label(self.gggfr2, text="res= ", font="Times 14")
self.lllbb3 = Label(self.ffflfr1, text="res= ", font="Times 14")
def main_window(self):
win = first_task
self.root.title("lab1")
self.root.geometry('630x400+400+200')
var = IntVar()
var.set(0)
def change_window(wind):
nonlocal win
win = wind
win.size_and_title(self)
nonlocal var
var.set(0)
rad()
return win
mainmenu = Menu(self.root)
self.root.config(menu=mainmenu)
filemenu = Menu(mainmenu, tearoff=0)
filemenu.add_command(label="First Task", font="Times 14", command=lambda wind=first_task: change_window(wind))
filemenu.add_command(label="Second Task", font="Times 14", command=lambda wind=second_task: change_window(wind))
filemenu.add_command(label="Third Task", font="Times 14", command=lambda wind=third_task: change_window(wind))
mainmenu.add_cascade(label="Tasks", font="Times 14", menu=filemenu)
def rad():
if win == first_task:
self.ffile_frame.pack_forget()
self.ffr1.grid_forget()
self.ffr2.grid_forget()
self.ggenerated_frame.pack_forget()
self.ggfr1.grid_forget()
self.ggfr2.grid_forget()
self.ffrom_fl.pack_forget()
self.fflfr1.grid_forget()
self.fffile_frame.pack_forget()
self.fffr1.grid_forget()
self.fffr2.grid_forget()
self.gggenerated_frame.pack_forget()
self.gggfr1.grid_forget()
self.gggfr2.grid_forget()
self.fffrom_fl.pack_forget()
self.ffflfr1.grid_forget()
if var.get() == 0:
win.file(self)
self.generated_frame.pack_forget()
self.gfr1.grid_forget()
self.gfr2.grid_forget()
self.from_fl.pack_forget()
self.flfr1.grid_forget()
self.lbb1.configure(text="y1= ")
self.lbb2.configure(text="y1= ")
self.lbb3.configure(text='y1= ')
elif var.get() == 2:
win.from_file(self)
self.file_frame.pack_forget()
self.fr1.grid_forget()
self.fr2.grid_forget()
self.generated_frame.pack_forget()
self.gfr1.grid_forget()
self.gfr2.grid_forget()
self.lbb1.configure(text="y1= ")
self.lbb2.configure(text="y1= ")
self.lbb3.configure(text='y1= ')
else:
win.generated(self)
self.file_frame.pack_forget()
self.fr1.grid_forget()
self.fr2.grid_forget()
self.from_fl.pack_forget()
self.flfr1.grid_forget()
self.lbb1.configure(text="y1= ")
self.lbb2.configure(text="y1= ")
self.lbb3.configure(text='y1= ')
elif win == second_task:
self.file_frame.pack_forget()
self.fr1.grid_forget()
self.fr2.grid_forget()
self.generated_frame.pack_forget()
self.gfr1.grid_forget()
self.gfr2.grid_forget()
self.from_fl.pack_forget()
self.flfr1.grid_forget()
self.fffile_frame.pack_forget()
self.fffr1.grid_forget()
self.fffr2.grid_forget()
self.gggenerated_frame.pack_forget()
self.gggfr1.grid_forget()
self.gggfr2.grid_forget()
self.fffrom_fl.pack_forget()
self.ffflfr1.grid_forget()
if var.get() == 0:
win.file(self)
self.ggenerated_frame.pack_forget()
self.ggfr1.grid_forget()
self.ggfr2.grid_forget()
self.ffrom_fl.pack_forget()
self.fflfr1.grid_forget()
self.lllbb1.configure(text="f= ")
self.lllbb2.configure(text="f= ")
self.lllbb3.configure(text='f= ')
elif var.get() == 2:
win.from_file(self)
self.ffile_frame.pack_forget()
self.ffr1.grid_forget()
self.ffr2.grid_forget()
self.ggenerated_frame.pack_forget()
self.ggfr1.grid_forget()
self.ggfr2.grid_forget()
self.llbb1.configure(text="res= ")
self.llbb2.configure(text="res= ")
self.llbb3.configure(text='res= ')
else:
win.generated(self)
self.ffile_frame.pack_forget()
self.ffr1.grid_forget()
self.ffr2.grid_forget()
self.ffrom_fl.pack_forget()
self.fflfr1.grid_forget()
self.llbb1.configure(text="res= ")
self.llbb2.configure(text="res= ")
self.llbb3.configure(text='res= ')
else:
self.file_frame.pack_forget()
self.fr1.grid_forget()
self.fr2.grid_forget()
self.generated_frame.pack_forget()
self.gfr1.grid_forget()
self.gfr2.grid_forget()
self.from_fl.pack_forget()
self.flfr1.grid_forget()
self.ffile_frame.pack_forget()
self.ffr1.grid_forget()
self.ffr2.grid_forget()
self.ggenerated_frame.pack_forget()
self.ggfr1.grid_forget()
self.ggfr2.grid_forget()
self.ffrom_fl.pack_forget()
self.fflfr1.grid_forget()
if var.get() == 0:
win.file(self)
self.gggenerated_frame.pack_forget()
self.gggfr1.grid_forget()
self.gggfr2.grid_forget()
self.fffrom_fl.pack_forget()
self.ffflfr1.grid_forget()
self.lllbb1.configure(text="f= ")
self.lllbb2.configure(text="f= ")
self.lllbb3.configure(text='f= ')
elif var.get() == 2:
win.from_file(self)
self.fffile_frame.pack_forget()
self.fffr1.grid_forget()
self.fffr2.grid_forget()
self.gggenerated_frame.pack_forget()
self.gggfr1.grid_forget()
self.gggfr2.grid_forget()
self.lllbb1.configure(text="f= ")
self.lllbb2.configure(text="f= ")
self.lllbb3.configure(text='f= ')
else:
win.generated(self)
self.fffile_frame.pack_forget()
self.fffr1.grid_forget()
self.fffr2.grid_forget()
self.fffrom_fl.pack_forget()
self.ffflfr1.grid_forget()
self.lllbb1.configure(text="f= ")
self.lllbb2.configure(text="f= ")
self.lllbb3.configure(text='f= ')
r1 = Radiobutton(text="By hands", variable=var, value=0, command=rad, font="Times 14")
r2 = Radiobutton(text="Generated", variable=var, value=1, font="Times 14", command=rad)
r3 = Radiobutton(text="From File", variable=var, value=2, font="Times 14", command=rad)
r1.pack(anchor=W)
r2.pack(anchor=W)
r3.pack(anchor=W)
win.file(self)
self.root.mainloop()
window = Lab1()
window.main_window()
|
Eglantinee/AMO
|
Lab1/main.py
|
main.py
|
py
| 10,546 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70267296829
|
import epyk as pk
# Create a basic report object
page = pk.Page()
page.headers.dev()
tb1 = page.ui.layouts.table()
tb1.style.css.border_collapse = "separate"
tb1.style.css.border_spacing = 10
# Add a header (first row is by default the header)
tb1 += [1, 2, 3]
# Change the CSS style of a cell in the header
tb1.get_header()[1].style.color = 'red'
tb1.get_header()[1].style.background = 'grey'
# Change all header cells style
for c in tb1.get_header():
# This will add CSS attribute to the CSS inline section
c.style.padding = 5
c.style.border = "1px solid black"
# Create bespoke HTML component to be added to the table
span1 = page.ui.texts.span("Text 1").tooltip("Click to hide header last line")
span2 = page.ui.texts.span("Text 2")
span3 = page.ui.texts.span("Text 3").tooltip("Click to change table values")
# Add the row to the table (to the body)
tb1 += [span1, span2, span3]
span3.click([
page.js.log(span3.dom.content),
span2.build("New text"),
# Change the cell properties
#tb1[1].cell(1).dom.css({"background": page.theme.warning[0]})
tb1[0][1].dom.css({"background": page.theme.warning[0]}),
# Change the component in the cell
tb1[0][1].val[0].dom.css({'text-decoration': 'underline'}),
])
# Add an extra row to the header
tb1.header += [4, 5]
tb1.get_header(1)[1].colspan(2)
tb1.get_header(1)[1].style.border = "1px solid black"
span1.click([
# The display value must be table-row for a row
tb1.get_header(1).dom.toggle()
])
# Add a text caption to this table
text = tb1.add_caption("This is a text attached to the table")
text.style.css.text_align = 'left'
# CSS class
# Create a bespoke CSS class for this report
from epyk.core.css.styles.classes import CssStyle
class MyCssBody(CssStyle.Style):
_attrs = {'background': 'inherit'}
_hover = {'background': page.theme.success[0]}
_selectors = {'child': 'tbody td'}
# Attach the class to the component
tb1.style.add_classes.custom(MyCssBody)
# delete row
page.ui.button("Remove").click([
tb1[0].dom.remove()
])
|
epykure/epyk-templates
|
locals/layouts/table.py
|
table.py
|
py
| 2,026 |
python
|
en
|
code
| 17 |
github-code
|
6
|
17333008494
|
import collections
class Rectangle():
def __init__(self, w, h, placed=None, free_wh=None):
self.wh = w, h
self.placed = placed or []
self.free_wh = free_wh or (0,0)
@property
def w(self):
return self.wh[0]
@property
def h(self):
return self.wh[1]
def transposed(self):
return Rectangle(self.h, self.w,
[r.transposed() for r in self.placed],
(self.free_wh[1], self.free_wh[0]),
)
@staticmethod
def placed_build(W, H, rect):
# build rectangle of size (W, H) with placed rectangle rect
if not W or not H:
return
w, h = rect.wh
if (W, H) == (w, h):
return Rectangle(W, H, [rect])
elif (W, H) == (h, w):
return Rectangle(W, H, [rect.transposed()])
H_h = H - h, W >= w
W_w = W - w, H >= h
H_w = H - w, W >= h
W_h = W - h, H >= w
cases = [H_h, W_w, H_w, W_h]
residue = [c[0] for c in cases if c[1] and c[0] >= 0]
if not residue:
return None
min_size = min(residue)
if H_h[0] == min_size and H_h[1]:
placed_r = Rectangle(w, H,
[rect],
(w, H - h))
free_wh = (W - w, H)
elif W_w[0] == min_size and W_w[1]:
placed_r = Rectangle(W, h,
[rect],
(W - w, h))
free_wh = (W, H - h)
elif H_w[0] == min_size and H_w[1]:
placed_r = Rectangle(h, H,
[rect.transposed()],
(h, H - w))
free_wh = (W - h, H)
elif W_h[0] == min_size and W_h[1]:
placed_r = Rectangle(W, w,
[rect.transposed()],
(W - h, w))
free_wh = (W, H - w)
else:
assert False, 'impossible'
out = Rectangle(W, H, [placed_r], free_wh)
return out
def place(self, rect):
W, H = self.free_wh
r = Rectangle.placed_build(W, H, rect)
if not r:
return False
self.placed.append(r.placed[0])
self.free_wh = r.free_wh
#print(f'place {rect.wh}: free {W,H} -> {self.free_wh}')
return True
@staticmethod
def concat(rects, by='w'):
w = list(map(lambda r : r.w, rects))
h = list(map(lambda r : r.h, rects))
if 'w' == by:
max_w = max(w)
placed_r = [
Rectangle.placed_build(max_w, r.h, r)
for r in rects
]
out = Rectangle(max_w, sum(h), placed_r)
else:
max_h = max(h)
placed_r = [
Rectangle.placed_build(r.w, max_h, r)
for r in rects
]
out = Rectangle(sum(w), max_h, placed_r)
return out
@staticmethod
def min_concat(W, H, rect1, rect2):
rect2T = Rectangle(rect2.h, rect2.w, rect2.placed, rect2.free_wh)
concat_cases = [
Rectangle.concat([rect1, rect2], by='w'),
Rectangle.concat([rect1, rect2], by='h'),
Rectangle.concat([rect1, rect2T], by='w'),
Rectangle.concat([rect1, rect2T], by='h'),
]
if W < H:
W, H = H, W
concat_cases = [r for r in concat_cases if max(r.wh) <= W and min(r.wh) <= H]
if not concat_cases:
return
return min(concat_cases, key=lambda r : r.free_square)
@property
def square(self):
return self.w * self.h
@property
def free_square(self):
out = self.free_wh[0] * self.free_wh[1]
for r in self.placed:
out += r.free_square
return out
def free_print(self):
if self.free_wh[0] and self.free_wh[1]:
print(self.free_wh)
for r in self.placed:
r.free_print()
@property
def fullness(self):
return (1 - self.free_square / self.square) * 100
def __repr__(self):
return f'Rectangle(w={self.w}, h={self.h}, childs={len(self.placed)}, free_wh={self.free_wh}, fullness={self.fullness}%)'
def equal_side_concat_step(rects, W, H, order='descending'):
w = list(map(lambda r : r.w, rects))
h = list(map(lambda r : r.h, rects))
side_cnt = collections.Counter([s for s in w + h if s <= max(W, H)])
side_repeats = [ side
for side, cnt in side_cnt.most_common()
if cnt > 1
]
side_repeats.sort(reverse=('descending' == order))
single_rects = list(rects)
side_to_rects = {}
for side in side_repeats:
side_to_rects[side] = [r for r in single_rects
if side in r.wh]
single_rects = [r for r in single_rects
if side not in r.wh]
# TODO: Если прямоугольник совпадает каждой стороной с другими -> варианты по какой стороне объединять
concat_rects = []
for side, side_rects in side_to_rects.items():
if 1 == len(side_rects):
single_rects.append(side_rects[0])
continue
for r in side_rects:
if side != r.w:
r.wh = r.h, r.w
# TODO: 1d упаковка вдоль H или W
# далее идет упаковка вдоль максимальной возможной стороны
side_rects.sort(key=lambda r : r.h, reverse=True)
concat_side = max(H, W) if side <= min(H, W) else min(H, W)
while side_rects:
rects_for_concat = []
rest_rects = []
sum_h = 0
for r in side_rects:
if sum_h + r.h <= concat_side:
rects_for_concat.append(r)
sum_h += r.h
else:
rest_rects.append(r)
if len(rects_for_concat) == 1:
single_rects.append(rects_for_concat[0])
elif len(rects_for_concat) > 1:
concat_rects.append(Rectangle.concat(rects_for_concat, by='w'))
else:
single_rects.extend(rest_rects)
break
#assert False, f'side_rects={side_rects}, rest_rects={rest_rects}, max_h={max_h}'
side_rects = rest_rects
return single_rects, concat_rects
def exact_concat(rects, W, H):
merge_rects = list(rects)
while True:
single_rects, concat_rects = equal_side_concat_step(merge_rects, W, H, order='descending')
#print(f'single_rects={single_rects} \n concat_rects={concat_rects} \n')
merge_rects = single_rects + concat_rects
if not concat_rects:
break
while True:
single_rects, concat_rects = equal_side_concat_step(merge_rects, W, H, order='ascending')
merge_rects = single_rects + concat_rects
if not concat_rects:
break
return merge_rects
def pallet_exact_side_placement(rects, pallet, side='max'):
W, H = pallet.free_wh
side = max(W, H) if 'max' == side else min (W, H)
rest_rects = []
for r in rects:
if side in r.wh:
if pallet.place(r):
continue
rest_rects.append(r)
return rest_rects
def exact_placement(rects, pallet):
rest_rects = list(rects)
while rest_rects:
rest_rects1 = pallet_exact_side_placement(rest_rects, pallet, side='max')
rest_rects2 = pallet_exact_side_placement(rest_rects1, pallet, side='min')
if len(rest_rects) == len(rest_rects2):
break
rest_rects = rest_rects2
return rest_rects
def rects_flatten(rects):
out = []
for r in rects:
if r.placed:
out.extend(rects_flatten(r.placed))
else:
out.append(r)
return out
def min_residue(WH, rects):
W, H = WH
min_r = None
min_residue_wh = WH
for r in rects:
placed_r = Rectangle.placed_build(W, H, r)
if placed_r:
residue_wh = placed_r.placed[0].free_wh
if residue_wh[0] * residue_wh[1] < min_residue_wh[0] * min_residue_wh[1]:
min_residue_wh = residue_wh
min_r = r
return min_r, min_residue_wh
def min_residue_placement(pallet, rects):
max_r, min_residue_wh = min_residue(pallet.free_wh, rects)
if not max_r or min_residue_wh[0] * min_residue_wh[1] > max_r.square:
return rects
rest_rects = [r for r in rects if r is not max_r]
r, _ = min_residue(min_residue_wh, rest_rects)
if not r:
pallet.place(max_r)
return rest_rects
return rects
def find_concat_pair(W, H, rects):
min_loss = 1
min_values = None
for i in range(len(rects)):
for j in range(i + 1, len(rects)):
cur_concat = Rectangle.min_concat(W, H, rects[i], rects[j])
if not cur_concat:
continue
cur_loss = cur_concat.free_square / min(rects[i].square, rects[j].square)
if cur_loss < min_loss:
min_loss = cur_loss
min_values = (i, j, cur_concat)
return min_values
def free_placement(pallet, rects):
rest_rects = list(rects)
while rest_rects:
W, H = pallet.free_wh
if not W * H:
break
concat_rects = exact_concat(rest_rects, W, H)
#print(f'concat_rects={concat_rects}')
rest_rects = exact_placement(concat_rects, pallet)
#print(f'exact_placement: rest_rects={rest_rects}, concat_rects={concat_rects}, pallet={pallet}')
if len(rest_rects) == len(concat_rects):
rest_rects2 = min_residue_placement(pallet, rest_rects)
if len(rest_rects2) == len(rest_rects):
find_values = find_concat_pair(W, H, rest_rects)
if not find_values:
#print(f'not find_concat_pair for rest_rects={rest_rects}')
break
i, j, concat_r = find_values
assert pallet.place(concat_r)
del rest_rects[j]
del rest_rects[i]
else:
rest_rects = rest_rects2
rest_rects = rects_flatten(rest_rects)
return rest_rects
def pallet_placement(pallet, rects):
rest_rects = free_placement(pallet, rects)
# placed = len(rects) - len(rest_rects)
# if placed:
# print(f'pallet: {pallet}, placed: {placed} \n')
for r in pallet.placed:
rest_rects = pallet_placement(r, rest_rects)
return rest_rects
def assign_coordinates(x, y, W, H, rects):
out_xywh = []
for r in rects:
if W == r.h or H == r.w:
r = r.transposed()
if not r.placed:
out_xywh.append((x, y, r.w, r.h))
#print(f'append {x,y,r.w,r.h}')
else:
out_xywh.extend(assign_coordinates(x, y, r.w, r.h, r.placed))
if W == r.w:
y += r.h
H -= r.h
elif H == r.h:
x += r.w
W -= r.w
else:
assert False, f'WH={W,H}, r={r}'
return out_xywh
|
yad439/pallet-packing
|
concat_baseline/concat_baseline.py
|
concat_baseline.py
|
py
| 11,602 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71904076349
|
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
gauth = GoogleAuth()
gauth.LoadCredentialsFile("mycreds.txt")
if gauth.credentials is None:
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
gauth.Refresh()
else:
gauth.Authorize()
gauth.SaveCredentialsFile("mycreds.txt")
drive = GoogleDrive(gauth)
file1 = drive.CreateFile({'title': 'Automata.txt'}) # Create GoogleDriveFile instance with title 'Hello.txt'.
file1.SetContentString('Automataaa') # Set content of the file from given string.
file1.Upload()
print(drive)
|
gmagannaDevelop/GlcJournal
|
pydrive/automated_access.py
|
automated_access.py
|
py
| 568 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71150367547
|
import numpy as np
import pandas as pd
import scipy
from sklearn.linear_model import LinearRegression as linreg
from sklearn.linear_model import LogisticRegression as logreg
from sklearn.cross_validation import KFold
from sklearn.cross_validation import *
from sklearn import cross_validation
titanic=pd.read_csv("train.csv")
#print(titanic.describe())
#print(titanic.head(5))
# ------------------- DATA CORRECTION --------------------------------
# 1) Fill missing Age data with median
titanic["Age"]=titanic["Age"].fillna(titanic["Age"].median())
# 2) Convert Sex string with 0 or 1
titanic.loc[titanic["Sex"] == "male", "Sex"] = 0 #convert 0 for men
titanic.loc[titanic["Sex"] =="female", "Sex"]=1 #convert 1 for women
# 3) Fill missing Embarked data with most common char
print(pd.value_counts(titanic["Embarked"].values, sort=False))
# "S" is most common char -> chosen as default for missing values
titanic["Embarked"]=titanic["Embarked"].fillna("S")
#4) Replace Embarked char with numeric code
#titanic.loc[titanic["Embarked"]=="S", "Embarked"]=0 # 'S' -> 0
#titanic.loc[titanic["Embarked"]=="C", "Embarked"]=1 # 'C' -> 1
titanic.loc[titanic["Embarked"]=="S", "Embarked"]=0
titanic.loc[titanic["Embarked"]=="C", "Embarked"]=1
titanic.loc[titanic["Embarked"]=="Q", "Embarked"]=2 # 'Q' -> 2
# input column used for predictions :
predictors=["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
# Initialize the algorithm
algo_linreg = linreg()
# Generate cross-validation folds with random splits
# return rows indices for corresponding train and set
kf =KFold(titanic.shape[0], n_folds=3, random_state=1)
# Make the predictions
predictions =[]
for train, test in kf:
# Which predictors used on train fold
train_predictors = (titanic[predictors].iloc[train,:])
# Target/goal used to train the algo
train_target= titanic["Survived"].iloc[train]
# Train the algo with the predictors and target
# .fit(x input, y output)
algo_linreg.fit(train_predictors, train_target)
# Make predictions with the trained algo on test fold
test_predictions = algo_linreg.predict(titanic[predictors].iloc[test,:])
predictions.append(test_predictions)
# The predictions are in 3 Numpy arrays
# So we concatenate the arrays on axis 0 (bc only 1 axis)
predictions=np.concatenate(predictions, axis=0)
predictions[predictions> .5]=1
predictions[predictions<= .5]=0
print(predictions)
print(sum(predictions==titanic["Survived"]))
accuracy= sum(predictions==titanic["Survived"])/len(predictions)
print(accuracy) # = 0.783
#------------------- Logistic Regression method ---------------------
# Initialize the algo
algo_logreg = logreg(random_state=1)
# Compute accuracy score for all cross-V folds;
# cross_val_score(algo, predictors, target, cross-validation fold)
scores = cross_validation.cross_val_score(algo_logreg, titanic[predictors], titanic["Survived"], cv=3)
# Mean of the scores for each folds (3 folds)
print(scores.mean())
#----------------------------------- Log Reg. with test set ---------------------
titanic_test = pd.read_csv("test.csv")
# I) Clean data
titanic_test["Age"] = titanic_test["Age"].fillna(titanic["Age"].median())
titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].median())
titanic_test.loc[titanic_test["Sex"] == "male", "Sex"] = 0
titanic_test.loc[titanic_test["Sex"] == "female", "Sex"] = 1
titanic_test["Embarked"] = titanic_test["Embarked"].fillna("S")
titanic_test.loc[titanic_test["Embarked"] == "S", "Embarked"] = 0
titanic_test.loc[titanic_test["Embarked"] == "C", "Embarked"] = 1
titanic_test.loc[titanic_test["Embarked"] == "Q", "Embarked"] = 2
# II) Test algo on data
# Initialize the algo
algo_logreg_test=logreg(random_state=1)
# Train algo on using all training data
algo_logreg_test.fit(titanic[predictors], titanic["Survived"])
# Make predictions with algo on data
predictions=algo_logreg_test.predict(titanic_test[predictors])
# Generate new dataset for kaggle submission
submission= pd.DataFrame({
"PassengerId" : titanic_test["PassengerId"],
"Survived": predictions
})
submission.to_csv("kaggle.csv", index=False)
|
leminhtr/kaggle
|
Titanic/main_linreg-logreg.py
|
main_linreg-logreg.py
|
py
| 4,162 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41202734206
|
import tcod
import random
import copy
import constants as const
import entity
import render
import numpy as np
import random_loot as rloot
class Room:
"""
A room! Wow.
"""
def __init__(self, x, y, w, h):
self.x = x # upper left point
self.y = y # upper left point
self.w = w
self.h = h
self.n_loot = 0
self.neighbors = []
class GameMap:
def __init__(self, width, height, con, show_map=False):
self.sample = None
self.width = width
self.con = con
self.height = height
self.tcod_empty_map = tcod.map.Map(self.width, self.height)
self.show_map = show_map
for x in range(self.width):
for y in range(self.height):
self.tcod_empty_map.transparent[y,x] = True
self.tcod_empty_map.walkable[y,x] = True
def get_sample(self):
"""
Used to color the walls and the floor
"""
ogrid = [np.arange(self.width, dtype=np.float32), np.arange(self.height, dtype=np.float32)]
noise = tcod.noise.Noise(
dimensions=2,
algorithm=tcod.NOISE_PERLIN,
implementation=tcod.noise.TURBULENCE,
hurst=0.9,
lacunarity=1.6,
octaves=5)
min_lum = 0.5
max_lum = 1
self.sample = noise.sample_ogrid(ogrid)*(max_lum-min_lum) + min_lum
def add_loot(self, turns, player, entities):
"""
Add loot to a new level
"""
n_slot = {}
for fslot in const.FeatureSlot:
# 2d4 features of each slot
n_slot[fslot] = sum([random.randint(1,4) for i in range(2)])
for wslot in const.WeaponSlot:
# 1d4 weapons of each slot
n_slot[wslot] = sum([random.randint(1,4) for i in range(1)])
for slot in n_slot:
n_generated = n_slot.get(slot)
for n in range(n_generated):
n_try = 50
while n_try > 0:
n_try -= 1
# loot is more probable in room with low arity
arity = random.choice([1,1,1,2,2,3,4,5])
rlist = [r for r in self.rooms_with_arity(arity) if r.n_loot < const.max_item_per_room]
if rlist:
room = random.choice(rlist)
(x,y) = self.random_cell_in_room(room)
if not self.tiles[x][y].item:
room.n_loot += 1
self.tiles[x][y].put_item(rloot.get_random_loot(slot, turns, player), entities)
break
def rooms_with_arity(self, max_arity):
"""
Return the list of room with at most max_arity neighbors
"""
return [r for r in self.room_list if len(r.neighbors) <= max_arity]
def make_boss_map(self, turns, entities, player):
"""
An arena
"""
self.get_sample()
self.tiles = [[entity.Tile(x,y,color_coeff=self.sample[x][y]) for y in range(self.height)] for x in range(self.width)]
self.tcod_map = tcod.map.Map(self.width, self.height)
center_x = self.width / 2
center_y = self.height / 2
a = 30
b = 15
self.room_list = [Room(int(center_x - a), int(center_y - b), 2*a, 2*b)]
for x in range(self.width):
for y in range(self.height):
if ((x - center_x)/a)**2 + ((y - center_y)/b)**2 <= 1: # ellipse equation
self.set_unblocked(x, y)
(player.x, player.y) = (int(center_x / 2), int(center_y))
boss = entity.Boss(int(center_x * 1.5), int(center_y))
entities.append(boss)
turns.add_turn(boss.speed_mov, const.TurnType.ENEMY, boss)
self.recompute_fov(player.x, player.y)
return boss
def make_map_bsp(self, turns, entities, player):
self.get_sample()
self.tiles = [[entity.Tile(x,y,color_coeff=self.sample[x][y]) for y in range(self.height)] for x in range(self.width)]
self.room_list = None
self.tcod_map = tcod.map.Map(self.width, self.height)
map_width = self.width
map_height = self.height
if self.show_map:
for x in range(map_width):
for y in range(map_height):
self.tiles[x][y].is_seen = True
# we garantee a wall on the north and the west
# this is necessary due to the generation the room
bsp = tcod.bsp.BSP(1,1,map_width-1, map_height-1)
bsp.split_recursive(6,6,6,1,1)
self.room_list = self.recursive_make_rooms(bsp)
# After the BSP generation, the dungeon is a tree
# Create some loops
rlist = self.rooms_with_arity(2)
for i in range(6):
for j in range(10):
c = random.choice(range(len(rlist)))
best = self.closest_rooms([rlist[c]], self.room_list)
if best:
astar = tcod.path.AStar(self.tcod_map)
score_tuple = None
best_tuple = []
for tuple_param in best:
(x1, y1, x2, y2, _, _) = tuple_param
path = astar.get_path(x1, y1, x2, y2)
tmp_score = int(len(path)/3)
if not score_tuple or tmp_score > score_tuple:
score_tuple = tmp_score
best_tuple = [tuple_param]
elif tmp_score == score_tuple:
best_tuple.append(tuple_param)
self.connect_rooms(random.choice(best_tuple))
del rlist[c]
break
# Initialization
(player.x, player.y) = self.random_cell()
(x, y) = self.random_cell()
self.place_stairs(x,y)
(x, y) = self.random_cell()
self.place_boss_stairs(x,y)
# self.place_boss_stairs(player.x,player.y) # DEBUG
self.add_loot(turns, player, entities)
self.recompute_fov(player.x, player.y)
def recompute_fov(self, x, y, light_walls=True, radius=0):
self.tcod_map.compute_fov(x, y, algorithm=2, radius=radius, light_walls=light_walls)
def is_visible(self, x, y):
return self.tcod_map.fov[y,x]
def spawn_boss(self, entities, fslot, level, player):
for i in range(50):
(x,y) = self.random_cell()
if not any([entity for entity in entities if entity.x == x and entity.y == y]):
if (fslot == const.FeatureSlot.i and level >= 3) or (fslot != const.FeatureSlot.i and level >= 2):
class_name = fslot.value.get("bug_class")
the_class = getattr(entity, class_name)
monster = the_class(x, y, level, player.fequiped.get(fslot), fslot)
else:
monster = entity.Monster(x, y, level, None, fslot)
entities.append(monster)
return monster
return None
def spawn(self, entities, feature):
# We try at most 50 times to spawn it
for i in range(50):
(x,y) = self.random_cell()
if not self.is_visible(x,y) and not any([entity for entity in entities if entity.x == x and entity.y == y]):
level = random.randint(1, 3)
if feature.n_bugs[level - 1] < const.n_bugs_max[feature.level - 1][level - 1]:
# mapgen bug are OP. Give their abitity to level 3 bug only
if (feature.fslot == const.FeatureSlot.i and level >= 3) or (feature.fslot != const.FeatureSlot.i and level >= 2):
class_name = feature.fslot.value.get("bug_class")
the_class = getattr(entity, class_name)
monster = the_class(x, y, level, feature)
else:
monster = entity.Monster(x, y, level, feature)
entities.append(monster)
return monster
return None
def iterator_perimeter_room(self, r):
for x in range(r.x, r.x + r.w):
yield (x, r.y)
yield (x, r.y + r.h - 1)
# y has a shorter range because the corners are already yielded
for y in range(r.y + 1, r.y + r.h - 1):
yield (r.x, y)
yield (r.x + r.w - 1, y)
def closest_rooms(self, l1, l2):
best = []
score_best = None
for r1 in l1:
for r2 in l2:
if r1 != r2 and r1 not in r2.neighbors:
for (x1, y1) in self.iterator_perimeter_room(r1):
for (x2, y2) in self.iterator_perimeter_room(r2):
dx = abs(x1-x2)
dy = abs(y1-y2)
# This is not a hack. It is… hand-crafted mapgen
# if dx >= 4 and dy >= 4:
# score = max(abs(x1-x2),abs(y1-y2)) # Chebyshev distance
# else:
score = abs(x1-x2) + abs(y1-y2) # Manhattan distance
if score_best == None or score < score_best:
score_best = score
best = [(x1,y1,x2,y2,r1,r2)]
elif score == score_best:
best.append((x1,y1,x2,y2,r1,r2))
return best
def random_cell(self):
return self.random_cell_in_room(random.choice(self.room_list))
def random_cell_in_room(self, r):
while True:
x = random.randrange(r.x, r.x + r.w)
y = random.randrange(r.y, r.y + r.h)
if self.is_floor(x,y):
return (x,y)
def recursive_make_rooms(self, bsp):
if not bsp.children:
w = random.randrange(max(3,int(bsp.w/3)),bsp.w-2)
h = random.randrange(max(3,int(bsp.h/3)),bsp.h-2)
upper_left_x = random.randrange(bsp.x, bsp.x + bsp.w - w)
upper_left_y = random.randrange(bsp.y, bsp.y + bsp.h - h)
for x in range(0,w):
for y in range(0,h):
self.set_unblocked(upper_left_x + x, upper_left_y + y)
# Sometimes, add a central pillar
if (w % 2) == 1 and (h % 2) == 1:
if random.randrange(0,10) == 0:
center_x = upper_left_x + int((w-1)/2)
center_y = upper_left_y + int((h-1)/2)
self.set_blocked(center_x, center_y)
# And rarely a big one (rare because big rooms aren't common)
if (w % 2) == 0 and (h % 2) == 0 and w >= 10 and h >= 10 and random.randrange(0,2) == 0:
center_x = upper_left_x + int(w/2) - 1
center_y = upper_left_y + int(h/2) - 1
for x in range(0,2):
for y in range(0,2):
self.set_blocked(center_x + x, center_y + y)
return [Room(upper_left_x, upper_left_y, w, h)]
else:
l1 = self.recursive_make_rooms(bsp.children[0])
l2 = self.recursive_make_rooms(bsp.children[1])
# it is garanteed to connect
self.connect_rooms(random.choice(self.closest_rooms(l1,l2)))
return l1+l2
def connect_rooms(self, tuple_param, force=False):
(x1, y1, x2, y2, r1, r2) = tuple_param
r1.neighbors.append(r2)
r2.neighbors.append(r1)
door_chance = 4
if x1 == x2:
if y1 > y2:
y1 -= 1
y2 += 1
else:
y1 += 1
y2 -= 1
self.create_v_tunnel(y1, y2, x1)
if random.randint(0,door_chance) == 0:
self.place_door(x1, y1)
elif random.randint(0,door_chance) == 0:
self.place_door(x2, y2)
elif y1 == y2:
if x1 > x2:
x1 -= 1
x2 += 1
else:
x1 += 1
x2 -= 1
self.create_h_tunnel(x1, x2, y1)
if random.randint(0,door_chance) == 0:
self.place_door(x1, y1)
elif random.randint(0,door_chance) == 0:
self.place_door(x2, y2)
# elif abs(x1-x2) < 3 or abs(y1-y2) < 3:
else:
if random.randint(0, 1) == 1:
if x1 > x2:
x1 -= 1
else:
x1 += 1
if y1 > y2:
y2 += 1
y3 = y1 - 1
else:
y2 -= 1
y3 = y1 + 1
self.create_h_tunnel(x1, x2, y1)
self.create_v_tunnel(y3, y2, x2)
if random.randint(0,door_chance) == 0 and abs(x1-x2) > 1:
self.place_door(x1, y1)
elif random.randint(0,door_chance) == 0 and abs(y1-y2) > 1:
self.place_door(x2, y2)
else:
if x1 > x2:
x2 += 1
x3 = x1 - 1
else:
x2 -= 1
x3 = x1 + 1
if y1 > y2:
y1 -= 1
else:
y1 += 1
self.create_v_tunnel(y1, y2, x1)
self.create_h_tunnel(x3, x2, y2)
if random.randint(0,door_chance) == 0 and abs(y1-y2) > 1:
self.place_door(x1, y1)
elif random.randint(0,door_chance) == 0 and abs(x1-x2) > 1:
self.place_door(x2, y2)
def create_h_tunnel(self, x1, x2, y):
for x in range(min(x1, x2), max(x1, x2) + 1):
self.set_unblocked(x,y)
def create_v_tunnel(self, y1, y2, x):
for y in range(min(y1, y2), max(y1, y2) + 1):
self.set_unblocked(x,y)
def get_copy_map(self):
return copy.deepcopy(self.tcod_map)
def get_copy_empty_map(self):
return copy.deepcopy(self.tcod_empty_map)
def set_tile_type(self, x, y, ttype):
self.tiles[x][y] = entity.Tile(x, y, color_coeff=self.sample[x][y], ttype=ttype)
if self.show_map:
self.tiles[x][y].is_seen = True
self.tcod_map.transparent[y,x] = ttype.value.get("transparent")
self.tcod_map.walkable[y,x] = not ttype.value.get("collision")
def is_over_map(self, x, y):
return x >= 0 and y >= 0 and x < self.width and y < self.height
def set_blocked(self, x, y):
self.set_tile_type(x, y, const.TileType.WALL)
def set_unblocked(self, x, y):
self.set_tile_type(x, y, const.TileType.FLOOR)
def place_door(self, x, y):
self.set_tile_type(x, y, const.TileType.DOOR)
def place_stairs(self, x, y):
self.set_tile_type(x, y, const.TileType.STAIRS)
def place_boss_stairs(self, x, y):
self.set_tile_type(x, y, const.TileType.BOSS_STAIRS)
def is_floor(self, x, y):
return self.tiles[x][y].ftype == const.TileType.FLOOR
def is_door(self, x, y):
return self.tiles[x][y].ftype == const.TileType.DOOR
def is_stairs(self, x, y):
return self.tiles[x][y].ftype == const.TileType.STAIRS
def is_boss_stairs(self, x, y):
return self.tiles[x][y].ftype == const.TileType.BOSS_STAIRS
def is_blocked(self, x, y):
return not self.tcod_map.walkable[y,x]
def drop_item_on_floor(self, player, entities, item, drop_key):
if not self.tiles[player.x][player.y].item:
player.remove_from_inventory(item, drop_key)
return self.tiles[player.x][player.y].put_item(item, entities)
def is_weapon_on_floor_directly_equipable(self, player):
item = self.tiles[player.x][player.y].item
if item and isinstance(item, entity.Weapon) and not player.wequiped.get(item.wslot):
return True
return False
def get_item_on_floor(self, player, entities):
if self.tiles[player.x][player.y].item:
item = self.tiles[player.x][player.y].take_item(entities)
key = player.add_to_inventory(item)
return (item,key)
def description_item_on_floor(self, player):
"""
Get the name of the item on the floor where the player is
"""
if self.tiles[player.x][player.y].item:
return self.tiles[player.x][player.y].item.name
return None
def is_there_item_on_floor(self, player):
"""
Is there an item on the floor, where the player is?
"""
return self.tiles[player.x][player.y].item != None
|
cpiod/1rl
|
game_map.py
|
game_map.py
|
py
| 16,741 |
python
|
en
|
code
| 3 |
github-code
|
6
|
26298956035
|
import argparse
from dateutil import tz
from datetime import datetime
from spotipy import Spotify
import spotipy.util
from models import Play, Track, Album, Artist, PostgreSQLConnection
import settings
def set_timezone_to_datetime(datetime_to_set, timezone):
return datetime_to_set.replace(tzinfo=tz.gettz(timezone))
def convert_played_at_from_response_to_datetime(played_at):
try:
return datetime.strptime(played_at, '%Y-%m-%dT%H:%M:%S.%fZ')
except:
# For the single moment where the played at time hits a full second
return datetime.strptime(played_at, '%Y-%m-%dT%H:%M:%SZ')
def convert_datetime_from_timezone_to_timezone(datetime_to_convert, from_tz_code, to_tz_code):
from_tz = tz.gettz(from_tz_code)
to_tz = tz.gettz(to_tz_code)
datetime_to_convert = datetime_to_convert.replace(tzinfo=from_tz)
converted_datetime = datetime_to_convert.astimezone(to_tz)
return converted_datetime
class SpotifyConnection(object):
def __init__(self, user_data):
self.user_name = user_data['user_name']
token = spotipy.util.prompt_for_user_token(self.user_name,
scope='user-read-recently-played',
client_id=user_data['client_id'],
client_secret=user_data['client_secret'],
redirect_uri=user_data['redirect_uri'])
self.client = Spotify(auth=token)
self.db = self.init_db()
def init_db(self):
return PostgreSQLConnection()
def get_artist(self, artist_id):
artist = self.db.session.query(Artist).get(artist_id)
if artist:
return artist
else:
artist_response = self.client.artist(artist_id)
artist = Artist()
artist.artist_id = artist_id
artist.artist_data = artist_response
self.db.save_instance(artist)
print("> Artist {} was not in database.".format(artist.artist_data['name']))
return self.db.session.query(Artist).get(artist_id)
def get_album(self, album_id):
album = self.db.session.query(Album).get(album_id)
if album:
return album
else:
album_response = self.client.album(album_id)
album = Album()
album.album_data = album_response
album.album_id = album_response['id']
# Artists
for album_artist_response in album_response['artists']:
album.artists.append(self.get_artist(album_artist_response['id']))
self.db.save_instance(album)
print("> Album {} was not in database.".format(album.album_data['name']))
return self.db.session.query(Album).get(album_id)
def get_track(self, track_id):
track = self.db.session.query(Track).get(track_id)
if track:
return track
else:
response = self.client.track(track_id)
track = Track()
track.track_id = track_id
track.track_data = response
# Album
track.album = self.get_album(response['album']['id'])
# Artists
for artist_response in response['artists']:
track.artists.append(self.get_artist(artist_response['id']))
# Audio feature
audio_feature_response = self.client.audio_features(track_id)[0]
if audio_feature_response: # Some tracks do not have audio features
track.audio_feature_data = audio_feature_response
print("> Track {} was not in database.".format(track.track_data['name']))
self.db.save_instance(track)
return self.db.session.query(Track).get(track_id)
def get_play_from_played_at_utc_and_track_id(self, played_at_utc, track_id):
played_at_utc = convert_played_at_from_response_to_datetime(played_at_utc)
played_at_utc = set_timezone_to_datetime(played_at_utc, timezone='UTC')
played_at_cet = convert_datetime_from_timezone_to_timezone(played_at_utc,
from_tz_code='UTC',
to_tz_code='CET')
# Play
play = Play()
play.user_name = self.user_name
play.played_at_utc_timestamp = played_at_utc.timestamp() * 1000
play.played_at_utc = played_at_utc
play.played_at_cet = played_at_cet
play.day = played_at_cet.day
play.month = played_at_cet.month
play.year = played_at_cet.year
play.hour = played_at_cet.hour
play.minute = played_at_cet.minute
play.second = played_at_cet.second
play.day_of_week = played_at_cet.weekday()
play.week_of_year = played_at_cet.date().isocalendar()[1]
# Track
track = self.get_track(track_id)
play.track = track
play.track_id = track_id
return play
def _get_play_tuples_from_response(self, response):
plays = []
for item in response['items']:
play_tuple = (item['played_at'], item['track']['id'])
plays.append(play_tuple)
return plays
def _get_play_tuples(self, limit=50, after=None):
play_tuples = []
response = self.client._get('me/player/recently-played', after=after, limit=limit)
play_tuples.extend(self._get_play_tuples_from_response(response))
while response and 'next' in response:
response = self.client.next(response)
if response:
play_tuples.extend(self._get_play_tuples_from_response(response))
return play_tuples
def extract_plays(self):
print("* Extracting latest plays of {}.".format(self.user_name))
play_tuples = self._get_play_tuples()
for played_at, track_id in play_tuples:
play = self.get_play_from_played_at_utc_and_track_id(played_at, track_id)
self.db.save_play(play)
class HoergewohnheitenManager(object):
def __init__(self, spotify_user_data):
self.spotify = SpotifyConnection(user_data=spotify_user_data)
def process_hoergewohnheiten(self):
self.spotify.extract_plays()
def process_hoergewohnheiten(user_name):
print("***", user_name, "***")
user_data = settings.SPOTIFY_USERS[user_name]
mgr = HoergewohnheitenManager(user_data)
mgr.process_hoergewohnheiten()
if __name__ == '__main__':
print('''
_ ___ ____ ___ __ ____ _ ___ _ _ _ ____ _ _____ ____ _
| |_| / / \ | |_ | |_) / /`_ | |_ \ \ // / \ | |_| | |\ | | |_| | |_ | | | | | |_ | |\ |
|_| | \_\_/ |_|__ |_| \ \_\_/ |_|__ \_\/\/ \_\_/ |_| | |_| \| |_| | |_|__ |_| |_| |_|__ |_| \|
''')
print("Started at {}.".format(datetime.now()))
# Argparse
parser = argparse.ArgumentParser(description='Hoergewohnheiten')
parser.add_argument('-u', dest='user_name')
args = parser.parse_args()
if args.user_name:
process_hoergewohnheiten(args.user_name)
else:
for user_name in settings.SPOTIFY_USERS:
process_hoergewohnheiten(user_name)
print("Finished at {}.".format(datetime.now()))
|
mymindwentblvnk/hoergewohnheiten
|
extract/main.py
|
main.py
|
py
| 7,345 |
python
|
en
|
code
| 16 |
github-code
|
6
|
9022966300
|
"""
4. Найти сумму n элементов следующего ряда чисел: 1 -0.5 0.25 -0.125 ...
Количество элементов (n) вводится с клавиатуры.
ЧЕРЕЗ ЦИКЛ
"""
num_el = int(input('enter 3 '))
summ = 0
range_num = 1
for i in range(num_el):
summ += range_num
range_num /= -2
print(summ)
|
Bulgakoff/PyAlg
|
Lesson_02/task_4/task_4_1.py
|
task_4_1.py
|
py
| 360 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
648181227
|
import numpy as np
import torch
from affogato.affinities import compute_affinities
from torchvision.utils import make_grid
from inferno.extensions.criteria import SorensenDiceLoss
class ConcatDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
self.lens = [len(ds) for ds in self.datasets]
self.start_idx = np.cumsum(self.lens)
self.start_idx[-1] = 0
self.start_idx = np.roll(self.start_idx, 1)
def __len__(self):
return sum(self.lens)
def __getitem__(self, index):
ds_index = np.where(index - self.start_idx >= 0)[0][-1]
item_index = index - self.start_idx[ds_index]
return self.datasets[ds_index][item_index]
class DefaultDataset(torch.utils.data.Dataset):
""" Simple default dataset for generating affinities
from segmentation and mask.
"""
patch_shape = [512, 512] # TODO expose this and other parameters
def to_affinities(self, seg, mask):
seg[~mask] = 0
affs, aff_mask = compute_affinities(seg, self.offsets, have_ignore_label=True)
aff_mask = aff_mask.astype('bool')
affs = 1. - affs
mask_transition, aff_mask2 = compute_affinities(mask, self.offsets)
mask_transition[~aff_mask2.astype('bool')] = 1
aff_mask[~mask_transition.astype('bool')] = True
return affs, aff_mask
@staticmethod
def estimate_n_samples(shape, patch_shape):
# we estimate the number of samples by tiling shape with patch_shape
crops_per_dim = [sh / float(cs) for sh, cs in zip(shape, patch_shape)]
return int(np.prod(crops_per_dim))
def __init__(self, raw, seg, mask_ids, offsets, transforms=None):
self.raw = raw
self.seg = seg
self.mask_ids = mask_ids
self.offsets = offsets
self.transforms = transforms
self.n_samples = self.estimate_n_samples(self.raw.shape, self.patch_shape)
def __getitem__(self, index):
# TODO sample so that we are biased towards the mask
def sample_raw_seg_mask():
offset = [np.random.randint(0, sh - csh) if sh > csh else 0
for sh, csh in zip(self.raw.shape, self.patch_shape)]
bb = tuple(slice(off, off + csh) for off, csh in zip(offset, self.patch_shape))
raw = self.raw[bb]
seg = self.seg[bb]
if self.transforms is not None:
raw, seg = self.transforms(raw, seg)
raw, seg = raw.copy(), seg.copy()
mask = np.isin(seg, self.mask_ids)
return raw, seg, mask
raw, seg, mask = sample_raw_seg_mask()
# TODO ensure that we have some in-mask area
# # some arbitrary but very small pixel threshold
# while mask.sum() < 25:
# raw, seg, mask = sample_raw_seg_mask()
# add channel dim
raw = raw[None]
# make affs and aff_mask
affs, aff_mask = self.to_affinities(seg, mask)
return raw, affs, aff_mask
def __len__(self):
return self.n_samples
class MaskedLoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.criterion = SorensenDiceLoss()
def forward(self, pred, y, mask):
mask.requires_grad = False
masked_prediction = pred * mask
loss = self.criterion(masked_prediction, y)
return loss
def default_training(proc_id, net, ds,
pipe, device, step):
loader = torch.utils.data.DataLoader(ds, batch_size=1, num_workers=2)
p_out, p_in = pipe
optimizer = torch.optim.Adam(net.parameters(), lr=1e-4)
loss = MaskedLoss()
loss = loss.to(device)
logger = torch.utils.tensorboard.SummaryWriter('./runs/imws')
add_gradients = True
log_frequency = 10
net.train()
while True:
if p_out.poll():
if not p_out.recv():
p_in.send(step)
break
for x, y, mask in loader:
x = x.to(device)
y, mask = y.to(device), mask.to(device)
optimizer.zero_grad()
pred = net(x)
pred.retain_grad()
loss_val = loss(pred, y, mask)
loss_val.backward()
optimizer.step()
logger.add_scalar("loss", loss_val.item(), step)
step += 1
if step % log_frequency == 0:
print("Background training process iteration", step)
x = x[0].detach().cpu()
logger.add_image('input', x, step)
y = y[0].detach().cpu()
if add_gradients:
grads = pred.grad[0].detach().cpu()
grads -= grads.min()
grads /= grads.max()
pred = torch.clamp(pred[0].detach().cpu(), 0.001, 0.999)
tandp = [target.unsqueeze(0) for target in y]
nrow = len(tandp)
tandp.extend([p.unsqueeze(0) for p in pred])
if add_gradients:
tandp.extend([grad.unsqueeze(0) for grad in grads])
tandp = make_grid(tandp, nrow=nrow)
logger.add_image('target_and_prediction', tandp, step)
# for debugging
# return x, y, pred, grads
|
constantinpape/affogato
|
src/python/module/affogato/interactive/napari/train_utils.py
|
train_utils.py
|
py
| 5,323 |
python
|
en
|
code
| 9 |
github-code
|
6
|
33618120495
|
import tensorflow as tf
import numpy as np
import time
#
from dataset.train import *
import convert
import neural
import test
class NeuralNetwork:
def __init__(self):
test_success = 0
start = time.time()
target = 0.01
print('desfrdgthygju', len(data_input))
[to_train, to_test, res_to_train, res_to_test] = convert.dataset_matrix_to_lists(data_input, data_output, 75, 30)
for x in range(0, 1):
[epoch, err] = neural.train(to_train, res_to_train)
print('epoch:', epoch, 'mse:', err)
if err < target:
test_success = test_success + 1
test.test_neural(epoch, to_test, res_to_test);
print('time', time.time() - start)
print('success ', test_success)
def __del__(self):
pass
|
obernardovieira/recognize-the-number
|
src/nn/__init__.py
|
__init__.py
|
py
| 819 |
python
|
en
|
code
| 1 |
github-code
|
6
|
20538092339
|
"""
Given an integer array nums, find the subarray
with the largest sum, and return its sum.
Example 1:
Input: nums = [-2,1,-3,4,-1,2,1,-5,4]
Output: 6
Explanation: The subarray [4,-1,2,1] has the largest sum 6.
"""
"""
Time Complexity:- O(n)
Space Complexity:- O(1)
"""
class Solution:
def maxSubArray(self, nums):
# Initialize variables to keep track of the maximum sum and the current sum.
max_sum = float("-inf")
current_sum = 0
# Iterate through the array to find the maximum subarray sum.
for num in nums:
# If the current subarray sum is negative, start a new subarray.
# Otherwise, continue adding elements to the current subarray.
current_sum = max(num, current_sum + num)
# Update the maximum subarray sum if needed.
max_sum = max(max_sum, current_sum)
# Return the maximum subarray sum found.
return max_sum
|
Amit258012/100daysofcode
|
Day2/max_Subarray_sum.py
|
max_Subarray_sum.py
|
py
| 951 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12959913039
|
import inspect
from .namespace import Namespace
_matches_cache = {}
def matches(caller_parameters, callee_parameters):
cache_key = ';'.join((caller_parameters, callee_parameters)) # pragma: no mutate
cached_value = _matches_cache.get(cache_key, None) # pragma: no mutate (mutation changes this to cached_value = None, which just slows down the code)
if cached_value is not None:
return cached_value
caller = set(caller_parameters.split(',')) if caller_parameters else set()
a, b, c = callee_parameters.split('|')
required = set(a.split(',')) if a else set()
optional = set(b.split(',')) if b else set()
wildcard = (c == '*')
if not required and not optional and wildcard:
return False # Special case to not match no-specification function "lambda **whatever: ..."
if wildcard:
result = caller >= required
else:
result = required <= caller <= required.union(optional)
_matches_cache[cache_key] = result # pragma: no mutate (mutation changes result to None which just makes things slower)
return result
def get_callable_description(c):
if getattr(c, '__name__', None) == '<lambda>':
import inspect
try:
return 'lambda found at: `{}`'.format(inspect.getsource(c).strip())
except OSError:
pass
return f'`{c}`'
def evaluate(func_or_value, __signature=None, __strict=False, **kwargs):
if callable(func_or_value):
if __signature is None:
__signature = signature_from_kwargs(kwargs)
callee_parameters = get_signature(func_or_value)
if callee_parameters is not None and matches(__signature, callee_parameters):
return func_or_value(**kwargs)
if __strict and callable(func_or_value):
assert (
isinstance(func_or_value, Namespace)
and 'call_target' not in func_or_value
), "Evaluating {} didn't resolve it into a value but strict mode was active, " \
"the signature doesn't match the given parameters. " \
"Note that you must match at least one keyword argument. " \
"We had these arguments: {}".format(
get_callable_description(func_or_value),
', '.join(kwargs.keys()),
)
return func_or_value
def evaluate_strict(func_or_value, __signature=None, **kwargs):
# noinspection PyArgumentEqualDefault
return evaluate(func_or_value, __signature=None, __strict=True, **kwargs)
def evaluate_recursive(func_or_value, __signature=None, __strict=False, **kwargs):
if __signature is None:
__signature = signature_from_kwargs(kwargs) # pragma: no mutate
if isinstance(func_or_value, dict):
# The type(item)(** stuff is to preserve the original type
return type(func_or_value)(**{k: evaluate_recursive(v, __signature=__signature, __strict=__strict, **kwargs) for k, v in dict.items(func_or_value)})
if isinstance(func_or_value, list):
return [evaluate_recursive(v, __signature=__signature, __strict=__strict, **kwargs) for v in func_or_value]
if isinstance(func_or_value, set):
return {evaluate_recursive(v, __signature=__signature, __strict=__strict, **kwargs) for v in func_or_value}
return evaluate(func_or_value, __signature=__signature, __strict=__strict, **kwargs)
def evaluate_recursive_strict(func_or_value, __signature=None, **kwargs):
"""
Like `evaluate_recursive` but won't allow un-evaluated callables to slip through.
"""
# noinspection PyArgumentEqualDefault
return evaluate_recursive(func_or_value, __signature=None, __strict=True, **kwargs)
def get_signature(func):
"""
:type func: Callable
:rtype: str
"""
try:
return object.__getattribute__(func, '__tri_declarative_signature')
except AttributeError:
pass
try:
names, _, varkw, defaults, _, _, _ = inspect.getfullargspec(func)
except TypeError:
return None
first_arg_index = 1 if inspect.ismethod(func) else 0 # Skip self argument on methods
number_of_defaults = len(defaults) if defaults else 0
if number_of_defaults > 0:
required = ','.join(sorted(names[first_arg_index:-number_of_defaults]))
optional = ','.join(sorted(names[-number_of_defaults:]))
else:
required = ','.join(sorted(names[first_arg_index:]))
optional = ''
wildcard = '*' if varkw is not None else ''
signature = '|'.join((required, optional, wildcard))
try:
object.__setattr__(func, '__tri_declarative_signature', signature)
except TypeError:
# For classes
type.__setattr__(func, '__tri_declarative_signature', signature)
except AttributeError:
pass
return signature
def signature_from_kwargs(kwargs):
return ','.join(sorted(kwargs.keys()))
|
jlubcke/tri.declarative
|
lib/tri_declarative/evaluate.py
|
evaluate.py
|
py
| 4,864 |
python
|
en
|
code
| 17 |
github-code
|
6
|
6166850776
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 6 16:06:45 2017
@author: Francesco
"""
import threading
import sys
import serial
import numpy as np
import time
import matplotlib.pyplot as plt
global PORT
global BAUD
global NUM_CHANNELS
global END_BUNDLE_BYTE
global BYTE_PER_CHANNEL
global BUNDLE_LENGTH
#BUNDLE SHAPE: |!|!|!|CH0_msb|CH0_lsb|ch1_msb|ch1_lsb|......|ch7_lsb|!|!|!|
PORT = "COM3"
BAUD = 115200
NUM_CHANNELS = 8
END_BUNDLE_BYTE = 3
BYTE_PER_CHANNEL = 2 #two bytes to represent int
BUNDLE_LENGTH = NUM_CHANNELS*BYTE_PER_CHANNEL
global ROWS
global COLS
ROWS = 4
COLS = 2
global ACQUISITION_TIME
ACQUISITION_TIME = 60
global SAMPLING_INTERVAL
SAMPLING_INTERVAL = 0.5
global n_MOVEMENT_TO_UNDERSTAND
n_MOVEMENT_TO_UNDERSTAND = 4 #up down left right
global movements
movements = ["up\n","down\n","left\n","right\n"]
class SerialReader(threading.Thread):
def __init__(self, name, port_name, baud, data, event_run):
threading.Thread.__init__(self)
self.name = name
self.port_name = port_name
self.baud = baud
self.event_run = event_run
self.data = data
print("Attempting to open port %s at baud %d" %(self.port_name,self.baud))
self.port = serial.Serial(self.port_name,self.baud,timeout=1)
if(self.port.isOpen()): print("Port Open")
def run(self):
start_time = time.time()
running = True
while(running):
try:
#actual decoding
if(self.port.read(END_BUNDLE_BYTE).decode("raw_unicode_escape") == '!!!'):
temp = self.port.read(BUNDLE_LENGTH)
#print(temp)
for channel in range(0,NUM_CHANNELS):
self.data[channel] = (temp[channel*BYTE_PER_CHANNEL]<<8)|(temp[channel*BYTE_PER_CHANNEL + 1 ])
#allow the plotting thread to access the data
self.event_run.set()
self.event_run.clear()
time.sleep(SAMPLING_INTERVAL)
total_elapsed = time.time() - start_time
if(total_elapsed > ACQUISITION_TIME): #more than 30 seconds of acquisition
print("From %s, closing port"%self.name)
self.port.close()
running = False
except KeyboardInterrupt:
self.port.close()
break
class DynamicPlotter(threading.Thread):
def __init__(self,name,data,event_run):
threading.Thread.__init__(self)
# Scrive un file.
self.out_file = open("test.txt","w")
self.data = data
self.event_run = event_run
self.name = name
self.number_of_acq_total = ACQUISITION_TIME/SAMPLING_INTERVAL
self.number_of_acq_per_movement = self.number_of_acq_total/n_MOVEMENT_TO_UNDERSTAND
def run(self):
running = True
counter_total = 0
counter = 0
while(running):
self.event_run.wait()
#print("From %s, writing to the file!"%self.name)
self.out_file.write(str(self.data[0])) #only to avoid printing a coma
for value in self.data[1:]:
self.out_file.write(',')
self.out_file.write(str(value))
self.out_file.write('\n')
if(counter == 0):
print("Counter total:%d"%counter_total)
index = int(counter_total/self.number_of_acq_per_movement)
message = "Movement: %s"%movements[index]
#why is this working?
#let's suppose: counter=0 and counter_total =
print("%s: %s"%(self.name,message))
self.out_file.write(message)
counter_total += 1
counter += 1
if(counter == self.number_of_acq_per_movement):
#reset counter
counter = 0
#print("From %s, checking if set: %d!"%(self.name,self.event_close.is_set()))
if(counter_total == self.number_of_acq_total ): #6 acquisitions,
print("From %s, closing the file!"%self.name)
self.out_file.close()
running = False
if __name__ == "__main__":
#matrix that holds values read by
data = np.zeros(NUM_CHANNELS)
event_run = threading.Event()
try:
s = SerialReader("Serial Reader",PORT,BAUD,data,event_run)
p = DynamicPlotter("File maker",data,event_run)
s.start()
p.start()
s.join()
p.join()
except KeyboardInterrupt:
raise ValueError('Catch command to stop from keyboard')
sys.exit(0)
|
FrancesoM/UnlimitedHand-Learning
|
python_side/multithread.py
|
multithread.py
|
py
| 5,210 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10291438492
|
import numpy as np
import subprocess
import pandas as pd
import os
import sys
def prompt_savefig(figure, filename):
"""
Take a filename for a figure, and prompt the user to overwrite an already existing
file with that name if needed
"""
if os.path.exists(filename):
prompt = input(f"{filename} already exists, overwrite? [y/N]> ")
if not (prompt.lower() == "y" or prompt.lower() == "yes"):
prompt_savefig(figure, input("Enter new filename: "))
figure.savefig(filename)
def get_sorted_id_dirs(foldername):
"""
Take a path to a multicore run folder and return a sorted list of id directories
"""
return sorted(
[directory for directory in os.listdir(foldername) if os.path.isdir(
os.path.join(foldername, directory)
)
and directory[:2] == "id"
and directory[2:].isdigit()],
key=lambda directory: int(directory[2:]),
)
def get_last_tab_file(id_foldername, id_directory_num=0, dim3=False):
"""
Take a path to a(n) (id) directory, return the name of the tab file at the last
time index
"""
# final index on the tab files
last_tab_time = get_last_tab_time(id_foldername)
# build the tabfile name; the ones in the 0th id directory
# aren't labeled by id, but the rest are
id_filename_modifier = ""
if id_directory_num > 0:
id_filename_modifier = f"-id{id_directory_num}"
dimstr = None
if dim3:
dimstr = "3d"
else:
dimstr = "2d"
return f"Par_Strat{dimstr}{id_filename_modifier}.{last_tab_time:04}.tab"
def get_tab_file(idfolder, timestep_id):
"""
Take a path to a(n) (id) directory, return the name of the tab file
with the given timestep id
"""
filelist = [f for f in os.listdir(idfolder) if os.path.isfile(os.path.join(idfolder, f))]
tabfilelist = [f for f in filelist if "tab" in f]
return [f for f in tabfilelist if f"{timestep_id:04}" in f][0]
def get_iddir_2d_chunk(id_dirs, id_offset, chunk2dsize):
return [id_dir for id_dir in id_dirs
if id_offset <= get_idnum_from_foldername(id_dir) < id_offset+chunk2dsize]
def get_idnum_from_foldername(id_foldername):
return int(id_foldername[2:])
def get_tab_df_multicore(foldername, x1_blocks, x2_blocks, timestep=None, x3_block_offset=None):
"""
Take a path to a multicore run folder and return a dataframe containing
all of the tabfile data
x1_blocks: Number of blocks (for MPI) in the x1 direction
x2_blocks: Number of blocks (for MPI) in the x2 direction
timestep: Tab file index
"""
# This is a sorted list of the id directories in the parent folder
id_directories = get_sorted_id_dirs(foldername)
if x3_block_offset is not None:
chunk2dsize = x1_blocks * x2_blocks
id_offset = x3_block_offset * chunk2dsize
id_directories = get_iddir_2d_chunk(id_directories, id_offset, chunk2dsize)
i_offset = 0
j_offset = 0
previous_x2_block_index = 0
master_df = None
for id_directory in id_directories:
id_directory_num = get_idnum_from_foldername(id_directory)
tab_filename = None
if timestep is None:
tab_filename = get_last_tab_file(
os.path.join(foldername, id_directory),
id_directory_num,
dim3=(x3_block_offset is not None),
)
else:
tab_filename = get_tab_file(os.path.join(foldername, id_directory), timestep)
df = get_df(os.path.join(foldername, id_directory, tab_filename))
"""
Need to add an offset to the cell indices to put them in the right place, since
cell indices are computed by athena relative to the CPU space (blocks), but we need
them relative to the entire domain.
"""
x1_block_index = id_directory_num % x1_blocks
x2_block_index = np.floor(float(id_directory_num % chunk2dsize) / x1_blocks)
# compute j_offset for this block
if x2_block_index > previous_x2_block_index:
j_offset += max(df["j-zone"]) - min(df["j-zone"]) + 1
# add accumulated offset
df["i-zone"] += i_offset
df["j-zone"] += j_offset
# compute k_offset for this block (if needed)
if x3_block_offset is not None:
df["k-zone"] += ( max(df["k-zone"]) - min(df["k-zone"]) + 1 ) * x3_block_offset
"""
Compute the row (x2_block_index) and column (x1_block_index) of the current file's
block. Athena creates id folders first along x1, then along x2, which is why these
formulae work.
"""
# compute i offset for next block
if x1_block_index < x1_blocks - 1:
i_offset += max(df["i-zone"]) - min(df["i-zone"]) + 1
else:
i_offset = 0
previous_x2_block_index = x2_block_index
if master_df is None:
master_df = df
else:
master_df = master_df.append(df)
return master_df
def get_tab_df_multicore_3d(foldername, x1_blocks, x2_blocks, x3_blocks, timestep=None):
master_df = None
for x3_index in range(x3_blocks):
# get 2d slice (think of a plane in the x1 and x2 directions)
slice_2d_df = get_tab_df_multicore(
foldername, x1_blocks, x2_blocks, timestep=timestep, x3_block_offset=x3_index
)
if master_df is None:
master_df = slice_2d_df
else:
master_df = master_df.append(slice_2d_df)
return master_df
def compute_surface_dens(df):
"""
Takes a dataframe containing the density at each point, returns
a dataframe containing the surface density for each x
"""
trim = df[["i-zone", "j-zone", "x1", "x2", "d", "dpar"]]
#print(len(trim))
imin = min(trim["i-zone"])
imax = max(trim["i-zone"])
dens = []
for i in range(imin, imax+1):
i_rows = trim.loc[(trim["i-zone"] == i)]
row = [i, i_rows.iloc[0]["x1"]]
dzarr = np.array([
i_rows.iloc[j]["x2"] - i_rows.iloc[j-1]["x2"] for j in range(1,len(i_rows))
])
par_surf_dens = sum(np.array(i_rows["dpar"])[1:] * dzarr)
#print(surf_dens)
row.append(par_surf_dens)
#print(dzarr)
#print(i_rows["d"])
#print(np.array(i_rows["d"])[1:])
gas_surf_dens = sum(np.array(i_rows["d"])[1:] * dzarr)
row.append(gas_surf_dens)
dens.append(row)
densdf = pd.DataFrame(dens, columns=["i-zone", "x1", "dpar", "d"])
return densdf
def integrate_df_3d(df, direction):
"""
Integrate a 3d dataframe over an arbitrary direction
honestly, this would probably be better if it turned the
input df into a numpy cube, because i bet you could vectorize
it and it would be faster, but whatever
df : dataframe containing positions, dpar, etc.
direction : one of "x1", "x2", "x3"
"""
def direction_to_index(direction):
return int(direction[1:]) - 1
def direction_to_index_label(direction):
return chr( direction_to_index(direction) + 105 )
def get_direction_index_compl(dir_indx):
return sorted([ (dir_index + n) % 3 for n in (1,2) ])
def index_to_direction(dir_indx):
return "x" + str(dir_indx + 1)
dir_index = direction_to_index(direction)
# directions orthogonal to integrating direction
dir1_index, dir2_index = get_direction_index_compl(dir_index)
dir1 = index_to_direction(dir1_index)
dir2 = index_to_direction(dir2_index)
dir1_index_label = direction_to_index_label(dir1) + "-zone"
dir2_index_label = direction_to_index_label(dir2) + "-zone"
dir1_min = min(df[dir1_index_label])
dir1_max = max(df[dir1_index_label])
dir2_min = min(df[dir2_index_label])
dir2_max = max(df[dir2_index_label])
new_rows = []
for n in range(dir1_min, dir1_max+1):
for m in range(dir2_min, dir2_max+1):
locator = (df[dir1_index_label] == n) & (df[dir2_index_label] == m)
nm_location_df = df.loc[locator]
row = [n, m, nm_location_df.iloc[0][dir1], nm_location_df.iloc[0][dir2]]
d_dir = np.abs(nm_location_df.iloc[1][direction] - nm_location_df.iloc[0][direction])
compute_avgs = lambda arr : np.array([
0.5*(arr[l] + arr[l+1]) for l in range(0, len(arr)-1)
])
def compute_integral(col):
vals = np.array(nm_location_df[col])
avgs = compute_avgs(vals)
return sum(avgs * d_dir)
dpar_integral = compute_integral("dpar")
d_integral = compute_integral("d")
row.append(dpar_integral)
row.append(d_integral)
new_rows.append(row)
columns = [dir1_index_label, dir2_index_label, dir1, dir2, "dpar", "d"]
integrated_df = pd.DataFrame(new_rows, columns=columns)
return integrated_df
def get_last_tab_time(folder):
lsproc = subprocess.Popen(["ls", folder], stdout=subprocess.PIPE)
grepproc = subprocess.Popen(["grep", "tab"], stdin=lsproc.stdout, stdout=subprocess.PIPE)
tailproc = subprocess.Popen(["tail", "-1"], stdin=grepproc.stdout, stdout=subprocess.PIPE)
stdout, stderr = tailproc.communicate()
#print(stdout.decode("utf-8"))
return int( stdout.decode("utf-8").split(".")[1] )
def load_surfdenscsv(filename):
filevals = np.loadtxt(filename, delimiter=",")
t_vals = filevals[:,0] # the first column
surf_dens_arr = filevals[:,1:] # all but the first column
return t_vals, surf_dens_arr
def get_df(filename):
# i == x1 == r, j == x2 == z
header_line = get_tab_header_line(filename)
sep = "(?<!#)\s+"
df = pd.read_csv(filename, sep=sep, header=header_line, engine="python")
# column names in the file suck, so renaming them
newcols = {}
for col in list(df):
newname = col[col.find("=")+1:]
newcols[col] = newname
df = df.rename(columns=newcols)
df = df.apply(pd.to_numeric, errors="coerce")
return df
def get_tab_header_line(filename):
header_line = 0
with open(filename, "r") as f:
line = f.readline()
while line != "":
if line.find("[1]") != -1:
break
else:
header_line += 1
line = f.readline()
return header_line
def notify(msg):
notifystr = "notify-send -i /home/jeremy/Pictures/icons/python-icon-32.png Python '{0}'".format(msg)
prc = subprocess.Popen(shlex.split(notifystr), stdout=subprocess.PIPE)
def Q(z, jmax):
ans = 0
for j in range(1, jmax+1):
ans += (-1)**(j-1) * np.exp(-2 * z**2 * j**2)
return 2*ans
def get_max_dist(cumdist):
uniformline = np.array(range(0,len(cumdist)))/(len(cumdist)+0.0)
return max(cumdist - uniformline)
def get_p(D, n):
return Q(D*np.sqrt(n), 10000)
|
johfst/streaminginstability
|
plotutils.py
|
plotutils.py
|
py
| 11,060 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2088974749
|
"""@namespace IMP.pmi.restraints.proteomics
Restraints for handling various kinds of proteomics data.
"""
from __future__ import print_function
import IMP
import IMP.core
import IMP.algebra
import IMP.atom
import IMP.container
import IMP.pmi
import IMP.pmi.tools
import IMP.pmi.output
import numpy
import math
import sys
import warnings
class ConnectivityRestraint(object):
'''
generate a connectivity restraint between domains
setting up the restraint
example:
sel1 = IMP.atom.Selection(root_hier, molecule="Rpb3",
residue_indexes=range(1,100))
sel2 = IMP.atom.Selection(root_hier, molecule="Rpb4",
residue_indexes=range(1,100))
cr=restraints.ConnectivityRestraint((sel1, sel2), label='CR1')
cr.add_to_model()
Multistate support =No
Resolution=Yes
'''
def __init__(self, domains, kappa=10.0, resolution=None, label="None"):
self.weight = 1.0
self.kappa = kappa
self.label = label
cr = IMP.atom.create_connectivity_restraint(
domains, self.kappa, self.label)
self.m = cr.get_model()
self.rs = IMP.RestraintSet(self.m, label)
self.rs.add_restraint(cr)
def set_label(self, label):
self.label = label
self.rs.set_name(label)
for r in self.rs.get_restraints():
r.set_name(label)
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self.rs)
def get_restraint(self):
return self.rs
def get_restraints(self):
rlist = []
for r in self.rs.get_restraints():
rlist.append(IMP.core.PairRestraint.get_from(r))
return rlist
def set_weight(self, weight):
self.weight = weight
self.rs.set_weight(weight)
def get_output(self):
output = {}
score = self.weight * self.rs.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["ConnectivityRestraint_" + self.label] = str(score)
return output
#
class CompositeRestraint(object):
'''
handleparticles a list of particles
compositeparticles is a list of list of particles
'''
def __init__(self, handle_particles, composite_particles, cut_off=5.0,
lam=1.0, plateau=0.0, resolution=None, label="None"):
# composite particles: all particles beside the handle
self.label = label
hs = IMP.pmi.tools.input_adaptor(handle_particles, resolution,
flatten=True)
self.handleparticles = [h.get_particle() for h in hs]
self.m = self.handleparticles[0].get_model()
self.rs = IMP.RestraintSet(self.m, 'cr')
self.compositeparticles = []
compositeparticle_list = []
for cp in composite_particles:
hs = IMP.pmi.tools.input_adaptor(cp, resolution, flatten=True)
tmplist = [h.get_particle() for h in hs]
compositeparticle_list.append(tmplist)
self.compositeparticles += tmplist
ln = IMP.pmi.CompositeRestraint(
self.m, self.handleparticles, cut_off, lam, True, plateau)
for ps in compositeparticle_list:
# composite particles is a list of list of particles
ln.add_composite_particle(ps)
self.rs.add_restraint(ln)
def set_label(self, label):
self.label = label
def get_handle_particles(self):
return self.handleparticles
def get_composite_particles(self):
return self.compositeparticles
def get_restraint(self):
return self.rs
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self.rs)
def get_output(self):
output = {}
score = self.rs.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["CompositeRestraint_" + self.label] = str(score)
return output
#
class AmbiguousCompositeRestraint(object):
'''
this restraint allows ambiguous cross-linking between multiple copies
excluding between symmetric copies
It allows name ambiguity
'''
def __init__(self, root_hier, restraints_file, cut_off=5.0, lam=1.0,
plateau=0.01, resolution=None, label="None"):
self.weight = 1.0
self.m = root_hier.get_model()
self.rs = IMP.RestraintSet(self.m, 'data')
self.label = "None"
self.pairs = []
self.outputlevel = "low"
self.cut_off = cut_off
self.lam = lam
self.plateau = plateau
fl = IMP.pmi.tools.open_file_or_inline_text(restraints_file)
for line in fl:
tokens = line.split()
# skip character
if (tokens[0] == "#"):
continue
r1 = int(tokens[2])
c1 = tokens[0]
r2 = int(tokens[3])
c2 = tokens[1]
ps1 = IMP.atom.Selection(root_hier, resolution=resolution,
molecule=c1, residue_index=r1)
ps1 = ps1.get_selected_particles()
hrc1 = [p.get_name() for p in ps1]
def nosym_subset(ps):
return [p for p in ps if not IMP.pmi.Symmetric.get_is_setup(p)
or IMP.pmi.Symmetric(p).get_symmetric() == 0]
ps1nosym = nosym_subset(ps1)
hrc1nosym = [p.get_name() for p in ps1nosym]
if len(ps1) == 0:
warnings.warn(
"AmbiguousCompositeRestraint: residue %d of chain %s "
"is not there" % (r1, c1), IMP.pmi.StructureWarning)
continue
ps2 = IMP.atom.Selection(root_hier, resolution=resolution,
molecule=c2, residue_index=r2)
ps2 = ps2.get_selected_particles()
hrc2 = [p.get_name() for p in ps2]
ps2nosym = nosym_subset(ps2)
hrc2nosym = [p.get_name() for p in ps2nosym]
if len(ps2) == 0:
warnings.warn(
"AmbiguousCompositeRestraint: residue %d of chain %s "
"is not there" % (r2, c2), IMP.pmi.StructureWarning)
continue
cr = IMP.pmi.CompositeRestraint(
self.m, ps1nosym, self.cut_off, self.lam, True, self.plateau)
cr.add_composite_particle(ps2)
self.rs.add_restraint(cr)
self.pairs.append(
(ps1nosym,
hrc1nosym,
c1,
r1,
ps2,
hrc2,
c2,
r2,
cr))
cr = IMP.pmi.CompositeRestraint(
self.m, ps1, self.cut_off, self.lam, True, self.plateau)
cr.add_composite_particle(ps2nosym)
self.rs.add_restraint(cr)
self.pairs.append(
(ps1,
hrc1,
c1,
r1,
ps2nosym,
hrc2nosym,
c2,
r2,
cr))
def plot_restraint(
self,
maxdist=100,
npoints=100):
p1 = IMP.Particle(self.m)
p2 = IMP.Particle(self.m)
d1 = IMP.core.XYZR.setup_particle(p1)
d2 = IMP.core.XYZR.setup_particle(p2)
cr = IMP.pmi.CompositeRestraint(
self.m,
[p1],
self.cut_off,
self.lam,
True,
self.plateau)
cr.add_composite_particle([p2])
dists = []
scores = []
for i in range(npoints):
d2.set_coordinates(
IMP.algebra.Vector3D(maxdist / npoints * float(i), 0, 0))
dists.append(IMP.core.get_distance(d1, d2))
scores.append(cr.unprotected_evaluate(None))
IMP.pmi.output.plot_xy_data(dists, scores)
def set_label(self, label):
self.label = label
self.rs.set_name(label)
for r in self.rs.get_restraints():
r.set_name(label)
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self.rs)
def get_hierarchies(self):
return self.prot
def get_restraint_sets(self):
return self.rs
def get_restraint(self):
return self.rs
def set_output_level(self, level="low"):
# this might be "low" or "high"
self.outputlevel = level
def set_weight(self, weight):
self.weight = weight
self.rs.set_weight(weight)
def get_output(self):
# content of the cross-link database pairs
# self.pairs.append((p1,p2,dr,r1,c1,r2,c2))
output = {}
score = self.weight * self.rs.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["AmbiguousCompositeRestraint_Score_" + self.label] = str(score)
for n, p in enumerate(self.pairs):
ps1 = p[0]
hrc1 = p[1]
c1 = p[2]
r1 = p[3]
ps2 = p[4]
hrc2 = p[5]
c2 = p[6]
r2 = p[7]
cr = p[8]
for n1, p1 in enumerate(ps1):
name1 = hrc1[n1]
for n2, p2 in enumerate(ps2):
name2 = hrc2[n2]
d1 = IMP.core.XYZR(p1)
d2 = IMP.core.XYZR(p2)
label = str(r1) + ":" + name1 + "_" + str(r2) + ":" + name2
output["AmbiguousCompositeRestraint_Distance_" +
label] = str(IMP.core.get_distance(d1, d2))
label = str(r1) + ":" + c1 + "_" + str(r2) + ":" + c2
output["AmbiguousCompositeRestraint_Score_" +
label] = str(self.weight * cr.unprotected_evaluate(None))
return output
#
class SimplifiedPEMAP(object):
def __init__(self, root_hier, restraints_file, expdistance, strength,
resolution=None):
self.m = root_hier.get_model()
self.rs = IMP.RestraintSet(self.m, 'data')
self.label = "None"
self.pairs = []
self.outputlevel = "low"
self.expdistance = expdistance
self.strength = strength
fl = IMP.pmi.tools.open_file_or_inline_text(restraints_file)
for line in fl:
tokens = line.split()
# skip character
if (tokens[0] == "#"):
continue
r1 = int(tokens[2])
c1 = tokens[0]
r2 = int(tokens[3])
c2 = tokens[1]
pcc = float(tokens[4])
ps1 = IMP.atom.Selection(root_hier, resolution=resolution,
molecule=c1, residue_index=r1,
copy_index=0)
ps1 = ps1.get_selected_particles()
if len(ps1) == 0:
warnings.warn(
"SimplifiedPEMAP: residue %d of chain %s is not there "
"(w/ %d %s)" % (r1, c1, r2, c2), IMP.pmi.StructureWarning)
continue
if len(ps1) > 1:
warnings.warn(
"SimplifiedPEMAP: residue %d of chain %s selected "
"multiple particles" % (r1, c1), IMP.pmi.StructureWarning)
continue
ps2 = IMP.atom.Selection(root_hier, resolution=resolution,
molecule=c2, residue_index=r2,
copy_index=0)
ps2 = ps2.get_selected_particles()
if len(ps2) == 0:
warnings.warn(
"SimplifiedPEMAP: residue %d of chain %s is not there "
"(w/ %d %s)" % (r1, c1, r2, c2), IMP.pmi.StructureWarning)
continue
if len(ps2) > 1:
warnings.warn(
"SimplifiedPEMAP: residue %d of chain %s selected "
"multiple particles" % (r2, c2), IMP.pmi.StructureWarning)
continue
p1 = ps1[0]
p2 = ps2[0]
# This is harmonic potential for the pE-MAP data
upperdist = self.get_upper_bond(pcc)
limit = 0.5 * self.strength * 15.0 ** 2 + 10.0
hub = IMP.core.TruncatedHarmonicUpperBound(
upperdist, self.strength, 15, limit)
# This is harmonic for the X-link
df = IMP.core.SphereDistancePairScore(hub)
dr = IMP.core.PairRestraint(self.m, df, (p1, p2))
self.rs.add_restraint(dr)
self.pairs.append((p1, p2, dr, r1, c1, r2, c2))
# Lower-bound restraint
lowerdist = self.get_lower_bond(pcc)
limit = 0.5 * self.strength * 15.0 ** 2 + 10.0
hub2 = IMP.core.TruncatedHarmonicLowerBound(
lowerdist, self.strength, 15, limit)
# This is harmonic for the X-link
df2 = IMP.core.SphereDistancePairScore(hub2)
dr2 = IMP.core.PairRestraint(self.m, df2, (p1, p2))
self.rs.add_restraint(dr2)
self.pairs.append((p1, p2, dr2, r1, c1, r2, c2))
def get_upper_bond(self, pearsoncc):
# return (pearsoncc-1.)/-0.0075
return (pearsoncc - .5) / (-0.005415)
def get_lower_bond(self, pearsoncc):
return (pearsoncc - 1.) / -0.0551
def set_label(self, label):
self.label = label
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self.rs)
def get_hierarchies(self):
return self.prot
def get_restraint_sets(self):
return self.rs
def set_output_level(self, level="low"):
# this might be "low" or "high"
self.outputlevel = level
def get_output(self):
# content of the cross-link database pairs
# self.pairs.append((p1,p2,dr,r1,c1,r2,c2))
output = {}
score = self.rs.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["SimplifiedPEMAP_Score_" + self.label] = str(score)
for i in range(len(self.pairs)):
p0 = self.pairs[i][0]
p1 = self.pairs[i][1]
crosslinker = 'standard'
ln = self.pairs[i][2]
resid1 = self.pairs[i][3]
chain1 = self.pairs[i][4]
resid2 = self.pairs[i][5]
chain2 = self.pairs[i][6]
label = str(resid1) + ":" + chain1 + "_" + \
str(resid2) + ":" + chain2
output["SimplifiedPEMAP_Score_" + crosslinker + "_" +
label] = str(ln.unprotected_evaluate(None))
d0 = IMP.core.XYZ(p0)
d1 = IMP.core.XYZ(p1)
output["SimplifiedPEMAP_Distance_" +
label] = str(IMP.core.get_distance(d0, d1))
return output
class SetupConnectivityNetworkRestraint(object):
'''
generates and wraps a IMP.pmi.ConnectivityRestraint between domains
example:
cr=restraints.ConnectivityNetworkRestraint(
simo,["CCC",(1,100,"TTT"),(100,150,"AAA")])
cr.add_to_model()
cr.set_label("CR1")
Multistate support =No
Selection type=selection tuple
Resolution=Yes
'''
def __init__(self, objects, kappa=10.0, resolution=1.0, label="None"):
self.weight = 1.0
self.kappa = kappa
self.label = label
if self.label == "None":
self.label = str(selection_tuples) # noqa: F821
hiers = []
for obj in objects:
hiers.append(IMP.pmi.tools.input_adaptor(
obj, resolution, flatten=True))
self.m = hiers[0][0].get_model()
cr = ConnectivityNetworkRestraint(self.m)
for hs in hiers:
cr.add_particles([h.get_particle() for h in hs])
self.rs = IMP.RestraintSet(self.m, label)
self.rs.add_restraint(cr)
def set_label(self, label):
self.label = label
self.rs.set_name(label)
for r in self.rs.get_restraints():
r.set_name(label)
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self.rs)
def get_restraint(self):
return self.rs
def get_restraints(self):
rlist = []
for r in self.rs.get_restraints():
rlist.append(IMP.core.PairRestraint.get_from(r))
return rlist
def set_weight(self, weight):
self.weight = weight
self.rs.set_weight(weight)
def get_output(self):
output = {}
score = self.weight * self.rs.unprotected_evaluate(None)
output["_TotalScore"] = str(score)
output["ConnectivityNetworkRestraint_" + self.label] = str(score)
return output
class ConnectivityNetworkRestraint(IMP.Restraint):
'''
a python restraint that computes the score for a composite of proteins
Authors: G. Bouvier, R. Pellarin. Pasteur Institute.
'''
def __init__(self, m, slope=1.0, theta=0.0, plateau=0.0000000001,
linear_slope=0.015):
'''
input a list of particles, the slope and theta of the sigmoid potential
theta is the cutoff distance for a protein-protein contact
'''
# Import networkx here so that we don't introduce it as a dependency
# for *every* proteomics restraint, only this one
import networkx
self.networkx = networkx
IMP.Restraint.__init__(self, m, "ConnectivityNetworkRestraint %1%")
self.slope = slope
self.theta = theta
self.linear_slope = linear_slope
self.plateau = plateau
self.particles_blocks = []
self.particle_list = []
def get_number_of_particle_blocks(self):
return len(self.particles_blocks)
def get_number_of_particles_for_block(self, block_index):
return len(self.particles_blocks[block_index])
def add_particles(self, particles):
self.particles_blocks.append(particles)
self.particle_list += particles
def get_full_graph(self):
'''
get the full graph of distances between every particle pair
'''
import scipy.spatial
pdist_array = numpy.array(
IMP.pmi.get_list_of_bipartite_minimum_sphere_distance(
self.particles_blocks))
pdist_mat = scipy.spatial.distance.squareform(pdist_array)
pdist_mat[pdist_mat < 0] = 0
graph = self.networkx.Graph(pdist_mat)
return graph
def get_minimum_spanning_tree(self):
"""
return the minimum spanning tree
"""
graph = self.get_full_graph()
graph = self.networkx.minimum_spanning_tree(graph)
return graph
def sigmoid(self, x):
'''
a sigmoid function that scores the probability of a contact
between two proteins
'''
# return 1 - (x)**self.slope/ float(((x)**self.slope +
# self.theta**self.slope))
argvalue = (x - self.theta) / self.slope
return 1.0 - (1.0 - self.plateau) / (1.0 + math.exp(-argvalue))
def unprotected_evaluate(self, da):
graph = self.get_minimum_spanning_tree()
score = 0.0
for e in graph.edges():
dist = graph.get_edge_data(*e)['weight']
prob = self.sigmoid(dist)
score += -numpy.log(prob)
score += self.linear_slope * dist
return score
def do_get_inputs(self):
return self.particle_list
class FuzzyBoolean(object):
'''
Fully Ambiguous Restraint that can be built using boolean logic
R. Pellarin. Pasteur Institute.
'''
def __init__(self, p1, operator=None, p2=None):
'''
input a list of particles, the slope and theta of the sigmoid potential
theta is the cutoff distance for a protein-protein contact
'''
if isinstance(p1, FuzzyBoolean) and isinstance(p2, FuzzyBoolean):
self.operations = [p1, operator, p2]
self.value = None
else:
self.operations = []
self.value = p1
def __or__(self, FuzzyBoolean2):
return FuzzyBoolean(self, self.or_, FuzzyBoolean2)
def __and__(self, FuzzyBoolean2):
return FuzzyBoolean(self, self.and_, FuzzyBoolean2)
def and_(self, a, b):
return a * b
def or_(self, a, b):
return 1.0 - (1.0 - a) * (1.0 - b)
def evaluate(self):
if len(self.operations) == 0:
return self.value
FuzzyBoolean1, op, FuzzyBoolean2 = self.operations
return op(FuzzyBoolean1.evaluate(), FuzzyBoolean2.evaluate())
class FuzzyRestraint(IMP.Restraint):
'''
Fully Ambiguous Restraint that can be built using boolean logic
R. Pellarin. Pasteur Institute.
'''
plateau = 0.00000000000001
theta = 5.0
slope = 2.0
innerslope = 0.01
def __init__(self, m, p1, p2, operator=None):
'''
input a list of particles, the slope and theta of the sigmoid potential
theta is the cutoff distance for a protein-protein contact
'''
IMP.Restraint.__init__(self, m, "FuzzyRestraint %1%")
self.m = m
self.min = sys.float_info.min
if isinstance(p1, FuzzyRestraint) and isinstance(p2, FuzzyRestraint):
self.operations = [p1, operator, p2]
self.particle_pair = None
elif isinstance(p1, FuzzyRestraint) and p2 is None:
self.operations = [p1, operator, None]
self.particle_pair = None
else:
self.operations = []
self.particle_pair = (p1, p2)
def __or__(self, FuzzyRestraint2):
return FuzzyRestraint(self.m, self, FuzzyRestraint2, self.or_)
def __and__(self, FuzzyRestraint2):
return FuzzyRestraint(self.m, self, FuzzyRestraint2, self.and_)
def __invert__(self):
return FuzzyRestraint(self.m, self, None, self.invert_)
def and_(self, a, b):
c = a + b
return c
def or_(self, a, b):
c = math.exp(-a) + math.exp(-b) - math.exp(-a - b)
return -math.log(c)
def invert_(self, a):
c = 1.0 - math.exp(-a)
return -math.log(c)
def evaluate(self):
if len(self.operations) == 0:
return self.distance()
FuzzyRestraint1, op, FuzzyRestraint2 = self.operations
if FuzzyRestraint2 is not None:
return op(FuzzyRestraint1.evaluate(), FuzzyRestraint2.evaluate())
else:
return op(FuzzyRestraint1.evaluate())
def distance(self):
d1 = IMP.core.XYZ(self.particle_pair[0])
d2 = IMP.core.XYZ(self.particle_pair[1])
d = IMP.core.get_distance(d1, d2)
argvalue = (d-self.theta)/self.slope
return (-math.log(1.0 - (1.0-self.plateau) / (1.0+math.exp(-argvalue)))
+ self.innerslope*d)
def add_to_model(self):
IMP.pmi.tools.add_restraint_to_model(self.m, self)
def unprotected_evaluate(self, da):
return self.evaluate()
def __str__(self):
if len(self.operations) == 0:
return str(self.particle_pair)
FuzzyRestraint1, op, FuzzyRestraint2 = self.operations
if FuzzyRestraint2 is not None:
return str(FuzzyRestraint1) + str(op) + str(FuzzyRestraint2)
else:
return str(FuzzyRestraint1) + str(op)
def do_get_inputs(self):
if len(self.operations) == 0:
return list(self.particle_pair)
FuzzyRestraint1, op, FuzzyRestraint2 = self.operations
if FuzzyRestraint2 is not None:
return list(set(FuzzyRestraint1.do_get_inputs()
+ FuzzyRestraint2.do_get_inputs()))
else:
return list(set(FuzzyRestraint1.do_get_inputs()))
|
salilab/pmi
|
pyext/src/restraints/proteomics.py
|
proteomics.py
|
py
| 23,746 |
python
|
en
|
code
| 12 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.