seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
32413683762
|
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.views import LogoutView
from django.shortcuts import redirect
from django.shortcuts import render
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from django.views.generic import View
from django.views.generic import FormView
from django.db import IntegrityError
from users import forms
from users.models import UserFollows, CustomUser
class LoginView(View):
"""
View to manage the user login functionality.
'LoginView' handles both GET and POST requests related to the login page.
During a GET request, an empty login form is presented. During a POST
request, the submitted credentials are authenticated. If they are valid,
the user is logged in and redirected to the feed; otherwise, an error
message is displayed.
"""
form_class = forms.LoginForm
template_name = 'users/login.html'
def get(self, request):
"""
Handle GET requests to the login page.
Renders the login page with an unpopulated login form.
"""
form = self.form_class()
message = ''
return render(request, self.template_name,
{"form": form, 'message': message})
@method_decorator(require_POST)
def post(self, request):
"""
Handle POST requests to the login page.
Authenticates the user's credentials. If they are valid, the user
is logged in and redirected to the feed. If they are invalid, an
error message is displayed.
"""
form = self.form_class(request.POST)
message = ''
if form.is_valid():
user = authenticate(
username=form.cleaned_data["username"],
password=form.cleaned_data["password"]
)
if user is not None:
login(request, user)
return redirect("feed")
else:
message = "Invalid credentials."
return render(request, self.template_name,
{"form": form, 'message': message})
class LogoutUserView(LogoutView):
"""
View to handle user logout functionality with automatic redirection.
`LogoutUserView` inherits from Django's `LogoutView` and is aimed to
facilitate straightforward user logout actions, followed by a redirection
to a specified page - in this case, the login page.
"""
next_page = reverse_lazy('login')
class SignupView(FormView):
"""
View to manage the user signup functionality.
`SignupPageView` facilitates the creation of a new user account through
a signup form. Upon receiving a GET request, it renders the signup page
with the form. When handling a POST request, it attempts to create a new user
and log them in. Upon successful account creation and login, the user is
redirected to the URL specified as the successful login destination.
"""
form_class = forms.SignupForm
template_name = "users/signup.html"
# success_url = settings.LOGIN_REDIRECT_URL
success_url = reverse_lazy("feed")
def form_valid(self, form):
"""
Handle POST requests with valid form data.
Creates a user, logs them in, and redirects to 'success_url'.
"""
# Create a new user instance and save it to the database.
user = form.save()
# Log the user in.
login(self.request, user)
# Redirect to the URL specified as the login destination in settings.
return super().form_valid(form)
class FollowedUsersView(LoginRequiredMixin, View):
"""
FollowedUsersView is a class-based view that renders a list of users that the
currently authenticated user is following.
This view ensures that only authenticated users can access the page to see their
followed users by using the LoginRequiredMixin.
Methods
-------
get(self, request, *args, **kwargs):
Handles GET requests. Retrieves and renders a list of followed users for the
currently authenticated user.
"""
def get(self, request, *args, **kwargs):
followed_users = UserFollows.objects.filter(user=request.user)
return render(request,
'users/followed_users.html',
{'followed_users': followed_users})
class FollowUserView(LoginRequiredMixin, View):
"""
View to handle user-following actions.
This view is designed to handle POST requests that contain the username
of the person to be followed. It has mechanisms to handle scenarios such as
trying to follow oneself, trying to follow a user that doesn’t exist, and
trying to follow a user that one is already following.
Methods
-------
post(request, *args, **kwargs):
Processes POST requests, attempting to create a following relationship
and providing user feedback via messages.
"""
@method_decorator(require_POST)
def post(self, request, *args, **kwargs):
# Retrieve the username to follow from the POST data.
username_to_follow = request.POST.get('username_to_follow')
# Check if the user is trying to follow themselves.
if username_to_follow == request.user.username:
messages.error(request, "You cannot follow yourself.")
return redirect('abonnements')
try:
# Retrieve the user to follow from the database.
user_to_follow = CustomUser.objects.get(username=username_to_follow)
# Create a new follow relationship.
UserFollows.objects.create(user=request.user, followed_user=user_to_follow)
# Send a success message to the user.
messages.success(request, f"You are now following {user_to_follow.username}!")
except CustomUser.DoesNotExist:
# Send an error message if the user to follow does not exist.
messages.error(request, f"The user {username_to_follow} does not exist.")
except IntegrityError:
# Send an error message if the following relationship already exists.
messages.error(request, f"You are already following {username_to_follow}!")
# Redirect the user back to the 'abonnements' page.
return redirect('abonnements')
class UnfollowUserView(LoginRequiredMixin, View):
"""
View to handle the action of unfollowing a user.
The view expects to receive a 'pk' (primary key) of the user to unfollow
as part of the URL. This 'pk' is used to identify the followed user
and delete the corresponding follow relationship.
"""
@method_decorator(require_POST)
def post(self, request, pk, *args, **kwargs):
follow = UserFollows.objects.filter(user=request.user, followed_user_id=pk).first()
# Check if the following relationship is found.
if follow:
# Save the followed user's username for use in the message.
followed_username = follow.followed_user.username
# Delete the following relationship.
follow.delete()
# Send a success message to the user.
messages.success(request, f"You have unfollowed {followed_username}.")
else:
# If the relationship is not found, send an error message to the user.
messages.error(request, "User not found.")
# Redirect the user back to the 'abonnements' page.
return redirect('abonnements')
|
ErnestoAquino/LITRevu
|
litrevu/users/views.py
|
views.py
|
py
| 7,709 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30754602045
|
from collections import Counter
for _ in range(int(input())):
n = int(input())
if n < 3:
input()
print(-1)
else:
nb = list(map(int, input().split(' ')))
cnt = Counter(nb)
flag = True
for k, v in cnt.items():
if v >= 3:
print(k)
flag = False
break
if flag:
print(-1)
|
Tanguyvans/Codeforces
|
784/B.py
|
B.py
|
py
| 412 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4713040273
|
import os
import re
import json
import numpy as np
from tqdm import tqdm_notebook
from collections import Counter
base_path = 'LongSumm-data/extractive_summaries/'
path_to_jsons = base_path + 'papers-jsons/'
p_jsons = os.listdir(path_to_jsons)
p_unread = []
section_1 = ['abstract']
section_2 = ['introduction', 'problem formulation', 'overview', 'problem definition']
section_3 = ['related work', 'background', 'preliminaries', 'related works', 'previous work', 'baseline models']
section_4 = ['conclusion', 'conclusions', 'discussion', 'conclusion and future work', 'analysis', 'inference', 'discussion and conclusion', 'future work', 'theoretical analysis', 'concluding remarks']
section_5 = ['experiments', 'experimental setup', 'experiment', 'setup', 'training details', 'implementation', 'hyperparameters', ]
section_6 = ['model', 'approach', 'method', 'methods', 'methodology', 'models', 'our approach', 'proposed method', 'model architecture', 'algorithm']
section_7 = ['experimental results', 'results', 'evaluation', 'error analysis', 'main results', 'results and analysis', 'human evaluation', 'experimental evaluation', 'empirical results', 'experiments and results']
section_8 = ['data', 'datasets', 'dataset', 'evaluation metrics']
remove_sections = ['acknowledgements', 'acknowledgments', 'acknowledgement', 'acknowledgment', 'appendix', 'appendices', 'a appendix', 'notation']
section_names = []
for p in tqdm_notebook(p_jsons):
with open(path_to_jsons+p) as json_file:
try:
p_data = json.load(json_file)
except UnicodeDecodeError:
p_unread.append(p)
continue
p_sections = {}
p_sections['name_of_paper'] = p_data['name'][:-4]
if p_data['metadata']['sections'] is not None:
for s in p_data['metadata']['sections']:
if s['heading'] is None:
s['heading'] = 'abstract'
s_name = re.sub(' +', ' ', re.sub('[^a-z\s]', '', s['heading'].lower())).lstrip()
if s_name in remove_sections:
continue
else:
section_names.append(s_name)
if s_name in section_1:
p_sections['abstract'] = s['text']
elif s_name in section_2:
p_sections['introduction'] = s['text']
elif s_name in section_3:
p_sections['related_work'] = s['text']
elif s_name in section_4:
p_sections['conclusion'] = s['text']
elif s_name in section_5:
p_sections['experiments'] = s['text']
elif s_name in section_6:
p_sections['model'] = s['text']
elif s_name in section_7:
p_sections['results'] = s['text']
elif s_name in section_8:
p_sections['data'] = s['text']
else:
if 'other' in p_sections.keys():
p_sections['other'] = ' '.join([p_sections['other'], s['text']])
p_sections['other_section_titles'].append(s_name)
else:
p_sections['other'] = s['text']
p_sections['other_section_titles'] = []
p_sections['other_section_titles'].append(s_name)
with open('LongSumm-data/extractive_summaries/combined_sections/'+p_sections['name_of_paper']+'.json', 'w') as file:
json.dump(p_sections, file)
|
dchandak99/LongSumm
|
.ipynb_checkpoints/join_sections_manual-checkpoint.py
|
join_sections_manual-checkpoint.py
|
py
| 3,727 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6807988061
|
import setuptools
import os
import codecs
from setuptools import setup
# https://packaging.python.org/guides/single-sourcing-package-version/
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
setup(
name="oo-tools",
version=get_version("oo_tools/__init__.py"),
url="",
author="Wesley Uykimpang",
description="Some object-oriented classes + utilities for python",
packages=setuptools.find_packages(),
install_requires=['pyyaml', 'requests'],
python_requires = ">=3.6",
setup_requires = ['pytest-runner'],
tests_require = ['pytest'],
package_data={'oo_tools': ['*.py']}
)
|
wesuuu/oo-tools
|
setup.py
|
setup.py
|
py
| 1,004 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35707696287
|
import bcrypt
import time
from flask import Flask, jsonify, request
from flask import Flask, jsonify
from flask_cors import CORS
# * ============ (Core functions) ============ *#
from utils.save_results_in_db import save_results_in_db
from utils.scan_for_vulns import scan_for_vulns
from utils.data_adapter import data_adapter
from utils.save_results_as_json import save_results_as_json
from utils.obtain_cve_info_from_api import obtain_cve_info_from_api
from utils.get_default_gateway import get_default_gateway
from utils.db_connection import db_connection
from utils.get_db_results import get_db_results
from utils.get_db_results_filter import get_db_results_filter
from utils.obtain_isp_info_from_api import obtain_isp_info_from_api
from utils.obtain_user_collection import obtain_user_collection
from utils.get_db_reports import get_db_reports
# * ========= API ========= *#
from api.reports.get_top_cve import get_top_cve
from api.reports.get_top_isp import get_top_isp
from api.reports.get_top_vendor import get_top_vendor
from api.reports.get_top_vendor_cve import get_top_vendor_cve
from api.reports.get_top_ip import get_top_ip
from api.reports.get_top_isp_cve import get_top_isp_cve
from api.reports.get_top_port_cve import get_top_port_cve
from api.reports.get_top_ip_scanning_time import get_top_ip_scanning_time
app = Flask(__name__)
CORS(app)
@app.route("/")
def index():
return "Hello World!"
@app.route("/scan", methods=["POST"])
def scan():
userId = request.get_json()["userId"]
gateway = get_default_gateway()
start_time = time.time()
scan_results = scan_for_vulns(gateway, "nmap -sV --script vulners")
save_results_as_json(scan_results, "1-scan_results.json")
scan_results_adapted = data_adapter(scan_results, gateway, userId)
scan_results_adapted = obtain_isp_info_from_api(scan_results_adapted)
collection = db_connection()
if len(scan_results_adapted["vulnerabilities"]) == 0:
# save_results_in_db(collection, scan_results_adapted)
end_time = time.time()
elapsed_time = end_time - start_time
scan_results_adapted["scanningTime"] = elapsed_time
save_results_as_json(scan_results_adapted, "2-scan_results_adapted.json")
save_results_in_db(collection, scan_results_adapted)
return jsonify(scan_results_adapted)
scan_results_adapted_cve_info = obtain_cve_info_from_api(scan_results_adapted)
end_time = time.time()
elapsed_time = end_time - start_time
scan_results_adapted_cve_info["scanningTime"] = elapsed_time
save_results_as_json(
scan_results_adapted_cve_info, "3-scan_results_adapted_cve_info.json"
)
save_results_in_db(collection, scan_results_adapted_cve_info)
return jsonify(scan_results_adapted_cve_info)
@app.route("/scan/all")
def getAllScans():
collection = db_connection()
results = get_db_results(collection)
return results
@app.route("/scan/filter", methods=["POST"])
def getScanByFilter():
collection = db_connection()
results = get_db_results_filter(collection)
return results
@app.route("/register", methods=["POST"])
def register_user():
try:
users_collection = obtain_user_collection()
user_data = request.get_json()
existent_user = users_collection.find_one({"email": user_data["email"]})
if existent_user:
return jsonify({"error": "El Usuario ya existe"}), 400
hashed_password = bcrypt.hashpw(
user_data["password"].encode("utf-8"), bcrypt.gensalt()
)
users_collection.insert_one(
{
"name": user_data["name"],
"email": user_data["email"],
"role": user_data["role"] if "role" in user_data else "USER",
"asn": user_data["asn"] if "asn" in user_data else None,
"password": hashed_password,
}
)
return jsonify({"message": "Usuario creado exitosamente"}), 201
except Exception as e:
print(e)
return jsonify({"error": "Error al crear el usuario"}), 500
@app.route("/login", methods=["POST"])
def login():
try:
# Obtiene los datos de inicio de sesión del cuerpo de la solicitud
login_data = request.get_json()
users_collection = obtain_user_collection()
# Busca el usuario en la base de datos por su correo electrónico
user = users_collection.find_one({"email": login_data["email"]})
if user:
# Compara la contraseña proporcionada con la contraseña almacenada en la base de datos
if bcrypt.checkpw(login_data["password"].encode("utf-8"), user["password"]):
return (
jsonify(
{
"message": "Inicio de sesión exitoso",
"user": {
"_id": str(user["_id"]),
"name": user["name"],
"email": user["email"],
"role": user["role"],
"asn": user["asn"],
},
}
),
200,
)
else:
return jsonify({"error": "Credenciales incorrectas"}), 401
else:
return jsonify({"error": "Usuario no encontrado"}), 404
except Exception as e:
print(e)
return jsonify({"error": "Error al iniciar sesión"}), 500
# Reports
@app.route("/reports")
def reports():
collection = db_connection()
results = get_db_reports(collection)
return results
@app.route("/reports/cve")
def api_get_top_cve():
return get_top_cve()
@app.route("/reports/ip")
def api_get_top_ip():
return get_top_ip()
@app.route("/reports/ip/scanning_time")
def api_get_top_ip_scanning_time():
return get_top_ip_scanning_time()
@app.route("/reports/isp")
def api_get_top_isp():
return get_top_isp()
@app.route("/reports/isp/cve")
def api_get_top_isp_cve():
return get_top_isp_cve()
@app.route("/reports/port/cve")
def api_get_top_port_cve():
return get_top_port_cve()
@app.route("/reports/vendor")
def api_get_top_vendor():
return get_top_vendor()
@app.route("/reports/vendor/cve")
def api_get_top_vendor_cve():
return get_top_vendor_cve()
if __name__ == "__main__":
app.run(debug=True, port=3000)
|
JorgeAVargasC/router-scan-backend
|
app.py
|
app.py
|
py
| 6,478 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19815525990
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^regprocess$', views.user),
url(r'^jobs/new$', views.registration),
url(r'^loginprocess$', views.login_process),
url(r'^login$', views.login),
url(r'^logout$', views.logout),
url(r'^jobprocess$', views.job_process),
url(r'^dashboard$', views.jobs),
url(r'^job/(?P<jobid>\w+)/delete$', views.remove_job),
url(r'^job/update/(?P<jobid>\w+)$', views.update),
url(r'^jobs/edit/(?P<jobid>\w+)$', views.edit_job),
url(r'^add/(?P<jobid>\w+)$', views.add),
url(r'^giveup/(?P<jobid>\w+)$', views.giveup),
url(r'^jobs/(?P<jobid>\w+)$', views.details)
]
|
aidapira/handyhelper
|
apps/job_manager_app/urls.py
|
urls.py
|
py
| 724 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74183020029
|
# -*- coding: utf-8 -*-
from django.conf import urls
from django.contrib.auth import decorators
from .views import HistoriaCreateView
from .views import HistoriaDetailView
from .views import HistoriaPacienteListView
from .views import HistoriaUpdateView
HISTORIA_CREATE_URL_NAME = 'historia_create'
HISTORIA_UPDATE_URL_NAME = 'historia_update'
HISTORIA_DETAIL_URL_NAME = 'historia_detail'
HISTORIA_LIST_URL_NAME = 'historia_list'
urlpatterns = urls.patterns("",
urls.url(
regex=r'^nueva/$',
view=decorators.login_required(HistoriaCreateView.as_view()),
name=HISTORIA_CREATE_URL_NAME
),
urls.url(
regex=r'^editar/(?P<pk>\d+)$',
view=decorators.login_required(HistoriaUpdateView.as_view()),
name=HISTORIA_UPDATE_URL_NAME
),
urls.url(
regex=r'^(?P<pk>\d+)/$',
view=decorators.login_required(HistoriaDetailView.as_view()),
name=HISTORIA_DETAIL_URL_NAME
),
urls.url(
regex=r'^paciente/(?P<paciente_id>\d+)/$',
view=decorators.login_required(HistoriaPacienteListView.as_view()),
name=HISTORIA_LIST_URL_NAME
)
)
|
gustavoatt/consultas
|
consultas_proyecto/historias_app/urls.py
|
urls.py
|
py
| 1,138 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1705824671
|
import sys
import json
import h5py
import numpy as np
import matplotlib.pyplot as plt
import sys_id_utils
for i, data_file in enumerate(sys.argv[1:]):
data = h5py.File(data_file, 'r')
run_param = json.loads(data.attrs['jsonparam'])
print(run_param)
t = data['t'][()]
v_stimu = data['v_stimulus'][()]
v_plant = data['v_plant'][()]
v_error = data['v_error'][()]
is_trial = data['is_trial'][()]
stimu_count = data['stimulus_count'][()]
stimu_event = data['stimulus_event'][()]
# Mask of trial region
mask = is_trial > 0
t = t[mask]
v_stimu = v_stimu[mask]
v_plant = v_plant[mask]
v_error = v_error[mask]
stimu_count = stimu_count[mask]
stimu_event = stimu_event[mask]
# Remove last few points
k = 3
t = t[:-k]
v_stimu = v_stimu[:-k]
v_plant = v_plant[:-k]
v_error = v_error[:-k]
stimu_count = stimu_count[:-k]
stimu_event = stimu_event[:-k]
num_pts = t.shape[0]
nperseg = num_pts/12
f_sample = 1.0/(t[1] - t[0])
f_cutoff = 0.7
# Compute gain and phase as funtion of frequency
f, gain_db, phase_deg = sys_id_utils.freq_response(v_stimu[:,0], v_plant[:,0], f_sample, f_cutoff, nperseg)
if i==0:
fig0, ax0 = plt.subplots(3,1,sharex=True)
ax0[0].plot(t, v_stimu[:,0],'b')
ax0[0].plot(t, v_plant[:,0],'r')
ax0[0].set_ylabel('vel (pix/sec)')
ax0[0].grid(True)
ax0[1].plot(t, v_error[:,0],'b')
ax0[1].grid(True)
ax0[1].set_ylabel('err (pix/sec)')
ax0[2].plot(t, stimu_count)
ax0[2].grid(True)
ax0[2].set_xlabel('t (sec)')
if i==0:
fig1, ax1 = plt.subplots(2,1,sharex=True)
fig1.suptitle('Frequency Response')
ax1[0].semilogx(f, gain_db,'or')
ax1[0].grid(True, which='both', axis='both')
ax1[0].set_ylabel('gain (dB)')
ax1[1].semilogx(f, phase_deg,'or')
ax1[1].grid(True, which='both', axis='both')
ax1[1].set_ylabel('phase lag (deg)')
ax1[1].set_xlabel('f (Hz)')
plt.show()
|
willdickson/imafly
|
python/imafly/examples/data_step_tmp/analyze_step_data.py
|
analyze_step_data.py
|
py
| 2,044 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36332873922
|
import os
import time
import subprocess
LIB = 'neural_style.py'
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
LIB_PATH = os.path.join(DIR_PATH, 'lib/neural-style-tf-master/')
for content_img in os.listdir(os.path.join(LIB_PATH, 'image_input')):
print(f'--------------- {content_img} ---------------')
for style_img in os.listdir(os.path.join(LIB_PATH, 'styles')):
print(f'\n{style_img}')
output_img = style_img[:-4] + '_' +content_img[:-4]
output_pixel_max = 512
# output_pixel_max = 1280
print(f'output pixel max: {output_pixel_max}')
tic = time.time()
subprocess.run(['python', os.path.join(LIB_PATH, LIB),
'--style_imgs', style_img,
'--content_img', content_img,
'--img_name', output_img,
'--max_size', str(output_pixel_max),
# '--original_colors',
'--device', '/gpu:0'],
capture_output=True, cwd=LIB_PATH)
toc = time.time()
print(f'Elapsed time is {round((toc - tic)/60, 2)} minutes')
|
alexhla/deep-learning-for-computer-vision
|
run_neural_style_tf.py
|
run_neural_style_tf.py
|
py
| 948 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1975281972
|
import math
pi = math.acos (-1)
def main ():
t = int (input ())
for i in range (0, t):
inp = input ().split (' ')
ans = 0
ans += pi * (int (inp [0]) ** 2)
new = 4
for j in range (1, int (inp [1])):
ans += new * ((int (inp [0]) / (2 ** j)) ** 2) * pi
new *= 3
print (ans)
if __name__ == '__main__':
main ()
|
joaoandreotti/competitive_programming
|
maps19_kattis/f.py
|
f.py
|
py
| 392 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39688221214
|
# Time: 4^gold + size(grid)
# Space: size(grid)
class Solution:
def getMaximumGold(self, grid: List[List[int]]) -> int:
max_gold = float('-inf')
for row in range(len(grid)):
for col in range(len(grid[0])):
if grid[row][col]:
seen = set()
max_gold = max(max_gold, self.dfs_util(grid, row, col, seen, 0))
return max_gold if max_gold!=float('-inf') else 0
def dfs_util(self, grid, row, col, seen, cur_sum):
if (row,col) in seen or row not in range(len(grid)) or col not in range(len(grid[0])) or not grid[row][col]:
return cur_sum
# print(row, col)
seen.add((row, col))
down = self.dfs_util(grid, row+1, col, seen, cur_sum+grid[row][col])
right = self.dfs_util(grid, row, col+1, seen, cur_sum+grid[row][col])
up = self.dfs_util(grid, row-1, col, seen, cur_sum+grid[row][col])
left = self.dfs_util(grid, row, col-1, seen, cur_sum+grid[row][col])
seen.remove((row, col))
return max(left, right, up, down)
|
cmattey/leetcode_problems
|
Python/lc_1219_path_with_maximum_gold.py
|
lc_1219_path_with_maximum_gold.py
|
py
| 1,093 |
python
|
en
|
code
| 4 |
github-code
|
6
|
6880332813
|
# -- Project information -----------------------------------------------------
project = "Test build"
copyright = "2018, Executable Books Project"
author = "Executable Books Project"
extensions = ["sphinx_comments", "myst_parser"]
comments_config = {
"hypothesis": True,
"utterances": {"repo": "executablebooks/sphinx-comments", "theme": "footheme",},
"dokieli": True,
}
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
|
yangxuan21/sphinx-comments
|
tests/config/conf.py
|
conf.py
|
py
| 1,271 |
python
|
en
|
code
| null |
github-code
|
6
|
18972684429
|
import pandas as pd
from dagster import asset, get_dagster_logger
from SSH_DEMO.resources import daily_partitions_def
# path for the directory as served from the SFTP server
GLOBAL_PREFIX = "upload"
DB_ZONE = "landing"
def _source_path_from_context(context):
return (
context.solid_def.output_defs[0].metadata["source_file_base_path"]
+ "/"
+ context.partition_key
+ "/"
+ context.solid_def.output_defs[0].metadata["source_file_name"]
)
def read_csv_sftp_direct(sftp, remotepath: str, partition_key: str, *args, **kwargs) -> pd.DataFrame:
"""
Read a file from a remote host using SFTP over SSH.
Args:
sftp: the already initialized paramikro SFTP session
remotepath: the file path on the remote to read
partition_key: the key of the processed partition
*args: positional arguments to pass to pd.read_csv
**kwargs: keyword arguments to pass to pd.read_csv
Returns:
a pandas DataFrame with data loaded from the remote host
"""
remote_file = sftp.open(remotepath)
dataframe = pd.read_csv(remote_file, *args, **kwargs)
dataframe['event_dt'] = partition_key
now_ts = pd.Timestamp.now()
dataframe['load_ts'] = now_ts
remote_file.close()
sftp.close()
return dataframe
@asset(
compute_kind="python",
partitions_def=daily_partitions_def,
metadata={"source_file_base_path": GLOBAL_PREFIX, "source_file_name": "foo.csv", "db_zone": DB_ZONE},
required_resource_keys={"credentials", "ssh"},
# io_manager_key="parquet_io_manager"
)
def foo_asset(context):
path = _source_path_from_context(context)
get_dagster_logger().info(f"Processing file '{path}'")
ssh = context.resources.ssh
sftp = ssh.open_sftp()
df = read_csv_sftp_direct(sftp, path, context.partition_key)
return df
@asset(
compute_kind="python",
partitions_def=daily_partitions_def,
metadata={"source_file_base_path": GLOBAL_PREFIX, "source_file_name": "bar.csv", "db_zone": DB_ZONE},
required_resource_keys={"credentials", "ssh"},
# io_manager_key="parquet_io_manager"
)
def bar_asset(context):
return _shared_helper(context)
@asset(
compute_kind="python",
partitions_def=daily_partitions_def,
metadata={"source_file_base_path": GLOBAL_PREFIX, "source_file_name": "baz.csv", "db_zone": DB_ZONE},
required_resource_keys={"credentials", "ssh"},
# io_manager_key="parquet_io_manager"
)
def baz_asset(context):
return _shared_helper(context)
def _shared_helper(context):
path = _source_path_from_context(context)
get_dagster_logger().info(f"Shared processing file '{path}'")
ssh = context.resources.ssh
sftp = ssh.open_sftp()
df = read_csv_sftp_direct(sftp, path, context.partition_key)
return df
|
geoHeil/dagster-ssh-demo
|
SSH_DEMO/assets/ingest_assets.py
|
ingest_assets.py
|
py
| 2,839 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10854990799
|
import numpy as np
import pytorch_lightning as pl
import torch
from torch.utils.data import Dataset, DataLoader
from utils import Language
SRC_LANG = Language('src')
TRG_LANG = Language('trg')
class SentenceDataset(Dataset):
"""
This class loads the desired data split for the Occupation Classification dataset
"""
def __init__(self, task, num_train, batch_size, data_path, dataset, debug=False):
"""
Args:
"""
self.batch_size = batch_size
self.src_file = data_path + dataset + "." + task + '.src'
self.trg_file = data_path + dataset + "." + task + '.trg'
src_sentences = open(self.src_file).readlines()
trg_sentences = open(self.trg_file).readlines()
self.alignment_file = data_path + dataset + "." + task + ".align"
alignment_sentences = open(self.alignment_file).readlines()
if debug: # small scale
src_sentences = src_sentences[:int(1e5)]
trg_sentences = trg_sentences[:int(1e5)]
alignment_sentences = alignment_sentences[: int(1e5)]
if dataset == 'train':
src_sentences = src_sentences[:num_train]
trg_sentences = trg_sentences[:num_train]
alignment_sentences = alignment_sentences[:num_train]
# parallel should be at least equal len
assert (len(src_sentences) == len(trg_sentences))
self.samples = []
self.src_samples = []
self.trg_samples = []
self.aligned_outputs = []
# represent all sentences
for idx in range(0, len(src_sentences)):
# get the slice
src_sample = SRC_LANG.get_sent_rep(src_sentences[idx])
trg_sample = TRG_LANG.get_sent_rep(trg_sentences[idx])
align_sample = alignment_sentences[idx]
self.src_samples.append(src_sample)
self.trg_samples.append(trg_sample)
self.aligned_outputs.append(align_sample)
# represent them
# src_sample = [SRC_LANG.get_sent_rep(s) for s in src_sample]
# trg_sample = [TRG_LANG.get_sent_rep(s) for s in trg_sample]
# sort by decreasing source len
sorted_ids = sorted(enumerate(self.src_samples), reverse=True, key=lambda x: len(x[1]))
src_sample = [self.src_samples[i] for i, v in sorted_ids]
trg_sample = [self.trg_samples[i] for i, v in sorted_ids]
align_sample = [self.aligned_outputs[i] for i, v in sorted_ids]
src_len = [len(s) for s in src_sample]
trg_len = [len(t) for t in trg_sample]
# large set seq len
max_src_len = max(src_len)
max_trg_len = max(trg_len)
# pad the extra indices
src_sample = SRC_LANG.pad_sequences(src_sample, max_src_len)
trg_sample = TRG_LANG.pad_sequences(trg_sample, max_trg_len)
# generated masks
aligned_outputs = []
for alignment in align_sample:
# print (alignment)
current_alignment = np.zeros([max_trg_len, max_src_len])
for pair in alignment.strip().split():
src_i, trg_j = pair.split("-")
src_i = min(int(src_i) + 1, max_src_len - 1)
trg_j = min(int(trg_j) + 1, max_trg_len - 1)
current_alignment[trg_j][src_i] = 1
aligned_outputs.append(current_alignment)
# numpy them
self.src_samples = np.array(src_sample, dtype=np.int64)
self.trg_samples = np.array(trg_sample, dtype=np.int64)
self.aligned_outputs = np.array(aligned_outputs)
# align output is batch_size x max target_len x max_src_len
assert (self.src_samples.shape[1] == max_src_len)
assert (self.trg_samples.shape[1] == max_trg_len)
# craft samples out of prepared data
for idx in range(0, len(self.src_samples)):
src_sample = self.src_samples[idx]
trg_sample = self.trg_samples[idx]
self.samples.append([src_sample, len(src_sample), trg_sample, len(trg_sample), self.aligned_outputs[idx]])
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return self.samples[idx]
class SentenceDataModule(pl.LightningDataModule):
"""
This Lightning module takes a "task" argument and produces DataLoaders for that task
using predefined task-Dataset instances.
"""
def __init__(self, task, batch_size, num_train, data_path, debug=False):
super().__init__()
self.task = task
self.batch_size = batch_size
self.num_train = num_train
self.debug = debug
self.data_path = data_path
# noinspection PyAttributeOutsideInit
def setup(self, stage=None):
self.train = SentenceDataset(self.task, self.num_train, self.batch_size, self.data_path, 'train', debug=self.debug)
# don't accept new words from validation and test set
SRC_LANG.stop_accepting_new_words()
TRG_LANG.stop_accepting_new_words()
self.val = SentenceDataset(self.task, self.num_train, self.batch_size, self.data_path, 'dev', debug=self.debug)
self.test = SentenceDataset(self.task, self.num_train, self.batch_size, self.data_path, 'test', debug=self.debug)
def train_dataloader(self):
return DataLoader(self.train, batch_size=self.batch_size, num_workers=4)
def val_dataloader(self):
return DataLoader(self.val, batch_size=self.batch_size, num_workers=4)
def test_dataloader(self, batch_size=None):
if batch_size is None:
batch_size = self.batch_size
# pin_memory=True
return DataLoader(self.test, batch_size=batch_size, num_workers=4)
def prepare_data(self, *args, **kwargs):
# download or similar ...
pass
|
matprst/deceptive-attention-reproduced
|
deceptive-attention/src/seq2seq/lightning/data_utils.py
|
data_utils.py
|
py
| 5,858 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22050926816
|
from flask import Flask, request, render_template, session, redirect, url_for, jsonify
from models.user import User
from models.rawpicture import Rawpicture
from models.savepicture import Savepicture
from models.comment import Comment
from random import choice
import mlab
import base64
import requests
mlab.connect()
def base64encode(url):
link1 = base64.b64encode(requests.get(url).content)
link2 = str(link1)
link = link2.replace("b'","data:image/jpeg;base64,").replace("'","")
return link
def func_top100pics():
# Tìm tất cả những bức tranh đã hoàn thành:
finished_list = Savepicture.objects(picstatus='finished', piclikes__ne=0)
# Tìm 100 bức có số like lớn nhất và lưu số likes đó vào 1 list:
likes_list = []
for pic in finished_list:
likes_list.append(pic.piclikes)
likes_list.sort(reverse=True) # sắp xếp theo thứ tự giảm dần
if len(likes_list) > 100:
likes_list = likes_list[:101]
likes_list = list(dict.fromkeys(likes_list)) # loại bỏ các giá trị trùng nhau
# Tạo Top 100 bằng cách tìm ngược likes trong list trên ở database ảnh:
top100pics = []
for i, v in enumerate(likes_list):
for pic in finished_list:
if pic.piclikes == v:
Savepicture.objects(id=pic.id).first().update(set__picpositionintop100=i+1)
artist = User.objects(username=pic.picartist).first()
toppic = {
'picpositionintop100': pic.picpositionintop100,
'picname': pic.picname,
'piclink': pic.piclink,
'piclikes': pic.piclikes,
'picartist': artist.fullname,
'username': artist.username,
'picid': pic.id
}
top100pics.append(toppic)
return top100pics
# Các biến được dùng để hiển thị trên HTML:
# 1. Tên bức tranh: picname
# 2. Link ảnh để hiển thị ảnh: piclink
# 3. Số lượng like: piclikes
# 4. Tác giả: picartist
def func_top100artists():
# Tìm tất cả các artist:
artist_list = User.objects(totallikes__ne=0)
# Tìm 100 artist có likes lớn nhất và lưu số like đó vào 1 list:
likes_list = []
for artist in artist_list:
likes_list.append(artist.totallikes)
likes_list.sort(reverse=True) # sắp xếp theo thứ tự giảm dần
if len(likes_list) > 100:
likes_list = likes_list[:101]
likes_list = list(dict.fromkeys(likes_list)) # loại bỏ các giá trị trùng nhau
# Tạo top 100 Artist bằng cách tìm ngược likes trong database user:
top100artists = []
for i, v in enumerate(likes_list):
for artist in artist_list:
if artist.totallikes == v:
# Update Position trong top 100:
User.objects(username=artist.username).first().update(set__positionintop100=i+1)
# Số tranh đã hoàn thành:
finished_list = Savepicture.objects(picartist=artist.username, picstatus='finished')
# # Số tranh trong top 100 pics:
# picsintop100 = 0
# top100pics = func_top100pics()
# for pic in top100pics:
# if pic['picartist'] == artist.username:
# picsintop100 += 1
# User.objects(username=artist.username).first().update(set__picsintop100=picsintop100)
# Tìm bức tranh có nhiều like nhất của artist đó:
likes = []
for pic in finished_list:
likes.append(pic.piclikes)
bestpic = Savepicture.objects(picartist=artist.username, picstatus='finished', piclikes=max(likes)).first()
# Đưa các thông tin của artist đó vào list top 100 artist:
topartist = {
'positionintop100': artist.positionintop100,
'fullname': artist.fullname,
'username': artist.username,
# 'picsintop100': picsintop100,
'totallikes': artist.totallikes,
# 'finishedarts': len(finished_list),
'bestpic': bestpic.piclink,
'bestpicid': bestpic.id
}
top100artists.append(topartist)
return top100artists
# Các biến dùng để hiển thị trên HTML:
# 1. Thứ hạng của artist: positionintop100
# 2. Tên đầy đủ của artist: fullname
# 3. Số lượng pic nằm trong top100pics: picsintop100
# 4. Tổng like: totallikes
# 5. Số bức vẽ đã hoàn thành: finishedarts
# 6. Link bức vẽ được nhiều like nhất để hiển thị: bestpic
def func_artist_infor(artist):
# Fullname của artist:
artist_fullname = User.objects(username=artist).first().fullname
# Số bức tranh đã hoàn thành:
finished_list = Savepicture.objects(picartist=artist, picstatus='finished')
finished_arts = len(finished_list)
# Số bức tranh đang làm dở:
working_list = Savepicture.objects(picartist=artist, picstatus='working')
working_arts = len(working_list)
# Tính tổng like của artist:
##### Liệu có cách nào tự động kết nối data user vs data picture để tự tính tổng like?
totallikes = 0
for art in finished_list:
totallikes += art.piclikes
# # Tổng số bức tranh trong top 100 pics:
# picsintop100 = 0
# top100pics = func_top100pics()
# for pic in top100pics:
# if pic['picartist'] == artist:
# picsintop100 += 1
# User.objects(username=artist).first().update(set__picsintop100=picsintop100)
# # Tìm thứ hạng trong top 100:
# positionintop100 = 0
# top100artists = func_top100artists()
# for a in top100artists:
# if a['username'] == artist:
# positionintop100 = a['positionintop100']
# Tạo 1 dictionary lưu thông tin của artist:
artist_infor = {
'fullname': artist_fullname,
'username': artist,
'finished_arts': finished_arts,
'working_arts': working_arts,
'totallikes': totallikes,
# 'picsintop100': picsintop100,
# 'positionintop100': positionintop100
}
return artist_infor
# Thông tin của artist:
# - Tên đầy đủ của artist: fullname
# - Số bức tranh đã hoàn thành: finished_arts
# - Số bức tranh đang vẽ: working_arts
# - Tổng số likes của artist đó: totallikes
# - Bỏ: Số bức tranh trong top 100: picsintop100 (bằng 0 là không có bức nào)
# - Bỏ: Thứ hạng của artist: positionintop100 (bằng 0 là không được vào top)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'teamcolorpictures'
@app.route('/') # Hiển thị trang chủ
def home():
return render_template('homepage.html')
@app.route('/signup', methods=['GET', 'POST']) # Đăng ký tài khoản
def signup():
if 'token' in session:
return render_template('homepage.html')
if request.method == 'GET':
return render_template("signup.html")
else:
form = request.form
f = form['fullname']
u = form['username']
p = form['password']
# e = form['email']
new_user = User(fullname=f, username=u, password=p) #, email=e)
user_check = User.objects(username=u).first()
# email_check = User.objects(email=e).first()
warning = ''
if f == '' or u == '' or p == '': #or e == '':
warning = 'Vui lòng nhập đầy đủ thông tin!'
elif ' ' in u or ' ' in p:
warning = 'Username hoặc password không được chứa dấu cách!'
# Check xem có tồn tại username hoặc email đó chưa:
elif user_check is not None:
warning = 'Username đã tồn tại!'
# elif email_check is not None:
# warning = 'Email đã tồn tại'
if warning != '':
return render_template('signup.html', warning=warning)
else:
new_user.save()
session['token'] = u
# Đăng ký xong thì trả về giao diện trang Welcome
return render_template('welcome.html', fullname=f, u=u)
@app.route('/login', methods=['GET', 'POST']) # Đăng nhập
def login():
if 'token' in session:
return render_template('homepage.html')
if request.method == 'GET':
return render_template('login.html')
else:
form = request.form
u = form['username']
p = form['password']
user_check = User.objects(username=u).first()
# Check xem có nhập username và password hay không và nhập đúng hay không:
warning = ''
if u == '':
warning = 'Bạn chưa nhập username!'
elif user_check is None:
warning = 'Username không tồn tại!'
else:
if p == '':
warning = 'Vui lòng nhập password!'
elif p != user_check.password:
warning = 'Password sai!'
if warning != '':
return render_template('login.html', warning=warning)
else:
session['token'] = u
# Đăng nhập đúng thì trả về giao diện trang Welcome
return render_template('welcome.html', fullname=User.objects(username=u).first().fullname, u=u)
@app.route('/logout') # Đăng xuất
def logout():
if 'token' in session:
del session['token']
return redirect(url_for('home'))
@app.route('/top100pics') # Hiển thị 100 Pics đc nhiều like nhất
def top100pics():
top100pics = func_top100pics()
return render_template('top100pics.html', top100pics=top100pics)
@app.route('/top100artists') # Hiển thị 100 Artists đc nhiều like nhất
def top100artists():
top100artists = func_top100artists()
return render_template('top100artists.html', top100artists=top100artists)
@app.route('/profile/<artist>') # Hiển thị profile
def profile(artist):
# Chạy hàm func_artist_infor và trả về các thông tin của artist đó
artist_infor = func_artist_infor(artist)
# Các bức tranh đã hoàn thành sắp xếp theo số lượng like:
# Tạo 1 list gồm số like của các bức tranh của artist đó
likes_list = []
finished_list = Savepicture.objects(picartist=artist, picstatus='finished')
for pic in finished_list:
likes_list.append(pic.piclikes)
likes_list.sort(reverse=True)
likes_list = list(dict.fromkeys(likes_list)) # loại bỏ các giá trị trùng nhau
# Tạo 1 list các bức tranh sắp xếp theo số lượng like để sau đó hiển thị trên trang profile của artist
artist_finised_arts = []
for i in likes_list:
for pic in finished_list:
if pic.piclikes == i:
# # Tìm thứ hạng của pic đó trong top 100 pics nếu có:
# top100pics = func_top100pics()
# positionintop100 = 0
# for toppic in top100pics:
# if toppic['picid'] == pic.id:
# positionintop100 = toppic['picpositionintop100']
# Tìm số lượng comment trong bức tranh đó:
comments = len(Comment.objects(picid=pic.id))
# Đưa các thông tin của các bức vẽ vào list các bức vẽ của artist đó
toppic = {
# 'positionintop100': positionintop100,
'picname': pic.picname,
'piclink': pic.piclink,
'piclikes': pic.piclikes,
'picid': pic.id,
'piccomments': comments
}
artist_finised_arts.append(toppic)
# Danh sách những bức đang vẽ (chỉ nhìn thấy của chính mình nếu đăng nhập vào)
working_list = []
if 'token' in session:
if session['token'] == artist:
working_list = Savepicture.objects(picartist=artist, picstatus='working')
return render_template('profile.html', artist_infor=artist_infor, artist_finised_arts=artist_finised_arts, working_list=working_list)
# Các biến được dùng để hiển thị trên HTML:
# 1. Thông tin của artist:
# - Tên đầy đủ của artist: artist_fullname
# - Số bức tranh đã hoàn thành: finished_arts
# - Số bức tranh đang vẽ: working_arts. (Cái này chỉ hiện ra nếu ở trang profile của mình, còn của người khác chỉ hiện finished_arts thôi)
# - Bỏ: Số bức tranh trong top 100: picsintop100 (bằng 0 là không có bức nào)
# - Bỏ: Thứ hạng trong 100 artist: positionintop100 (bằng 0 là không nằm trong danh sách)
# 2. Thông tin từng bức vẽ đã hoàn thành, bao gồm:
# - Bỏ: Thứ hạng trong top 100 pics nếu bức đó lọt vào: positionintop100
# - Tên bức tranh: picname
# - Link ảnh để hiển thị: piclink
# - Số lượng like: piclikes
# - Số lượng comment: piccomments
# Lấy link của 1 random pic:
pic_list = Rawpicture.objects()
random_picid = choice(pic_list).id
@app.route('/category') # Hiển thị trang Category tổng
def full_category():
# category_list = Rawpicture.objects() # Sau sẽ xử lý hiển thị tất cả các category trong html bằng vòng for
return render_template('category.html', random_picid=random_picid)
@app.route('/category/<category>') # Hiển thị 1 trang category cụ thể
def one_category(category):
pic_list = Rawpicture.objects(category__icontains=category)
cap_category = category.title()
return render_template('one_category.html', pic_list=pic_list, category=cap_category)
@app.route('/new_picture/<picid>') # Hiển thị trang vẽ tranh của 1 bức tranh
def new_picture(picid):
pic = Rawpicture.objects(id=picid).first()
piclinkb64 = base64encode(pic.piclink)
return render_template('new_picture.html', piclinkb64=piclinkb64)
@app.route('/view/<picid>', methods=['GET', 'POST']) # Hiển thị 1 bức tranh đã hoàn thành để like và comment:
def view(picid):
pic = Savepicture.objects(id=picid).first()
artist = User.objects(username=pic.picartist).first()
comment_list = Comment.objects(picid=picid)
if request.method == 'GET':
return render_template("view.html", pic=pic, artist=artist,comment_list=comment_list)
else:
form = request.form
comment = form['comment']
warning = ''
if 'token' in session:
user = User.objects(username=session['token']).first()
new_comment = Comment(comment=comment, who_fullname=user.fullname, who_username=user.username, picid=picid)
if comment == '':
warning = 'Bạn chưa viết gì nên không có gì để đăng!'
else:
new_comment.save()
else:
warning = 'Vui lòng đăng nhập để like & comment!'
return render_template('view.html', pic=pic, artist=artist, comment_list=comment_list, warning=warning)
@app.route('/like')
def index():
return render_template('like_test.html')
@app.route('/_get_data/', methods=['POST'])
def _get_data():
piclikes = 1
return jsonify({'data': piclikes})
if __name__ == '__main__':
app.run(debug=True)
|
hoangcuong9x/test
|
app.py
|
app.py
|
py
| 16,081 |
python
|
vi
|
code
| 0 |
github-code
|
6
|
72532274109
|
from abc import ABC, abstractmethod
from models_library.api_schemas_directorv2.dynamic_services import (
DynamicServiceCreate,
RetrieveDataOutEnveloped,
RunningDynamicServiceDetails,
)
from models_library.basic_types import PortInt
from models_library.projects import ProjectID
from models_library.projects_networks import DockerNetworkAlias
from models_library.projects_nodes_io import NodeID
from models_library.service_settings_labels import SimcoreServiceLabels
from models_library.users import UserID
from servicelib.fastapi.long_running_tasks.client import ProgressCallback
from servicelib.fastapi.long_running_tasks.server import TaskProgress
class SchedulerInternalsInterface(ABC):
@abstractmethod
async def start(self) -> None:
"""initialize scheduler internals"""
@abstractmethod
async def shutdown(self):
"""finalize scheduler internals"""
class SchedulerPublicInterface(ABC):
@abstractmethod
def toggle_observation(self, node_uuid: NodeID, disable: bool) -> bool:
"""
Enables/disables the observation of the service temporarily.
NOTE: Used by director-v2 cli.
"""
@abstractmethod
async def push_service_outputs(
self, node_uuid: NodeID, progress_callback: ProgressCallback | None = None
) -> None:
"""
Push service outputs.
NOTE: Used by director-v2 cli.
"""
@abstractmethod
async def remove_service_containers(
self, node_uuid: NodeID, progress_callback: ProgressCallback | None = None
) -> None:
"""
Removes all started service containers.
NOTE: Used by director-v2 cli.
"""
@abstractmethod
async def remove_service_sidecar_proxy_docker_networks_and_volumes(
self, task_progress: TaskProgress, node_uuid: NodeID
) -> None:
"""
Cleans up all started resources for the service.
NOTE: Used by director-v2 cli.
"""
@abstractmethod
async def save_service_state(
self, node_uuid: NodeID, progress_callback: ProgressCallback | None = None
) -> None:
"""
Saves the state of the service.
NOTE: Used by director-v2 cli.
"""
@abstractmethod
async def add_service(
self,
service: DynamicServiceCreate,
simcore_service_labels: SimcoreServiceLabels,
port: PortInt,
request_dns: str,
request_scheme: str,
request_simcore_user_agent: str,
can_save: bool,
) -> None:
"""
Adds a new service.
"""
@abstractmethod
def is_service_tracked(self, node_uuid: NodeID) -> bool:
"""returns True if service is being actively observed"""
def list_services(
self,
*,
user_id: UserID | None = None,
project_id: ProjectID | None = None,
) -> list[NodeID]:
"""Returns the list of tracked service UUIDs"""
@abstractmethod
async def mark_service_for_removal(
self,
node_uuid: NodeID,
can_save: bool | None,
skip_observation_recreation: bool = False,
) -> None:
"""The service will be removed as soon as possible"""
@abstractmethod
async def is_service_awaiting_manual_intervention(self, node_uuid: NodeID) -> bool:
"""
returns True if services is waiting for manual intervention
A service will wait for manual intervention if there was an issue while saving
it's state or it's outputs.
"""
@abstractmethod
async def get_stack_status(self, node_uuid: NodeID) -> RunningDynamicServiceDetails:
"""Polled by the frontend for the status of the service"""
@abstractmethod
async def retrieve_service_inputs(
self, node_uuid: NodeID, port_keys: list[str]
) -> RetrieveDataOutEnveloped:
"""Pulls data from input ports for the service"""
@abstractmethod
async def attach_project_network(
self, node_id: NodeID, project_network: str, network_alias: DockerNetworkAlias
) -> None:
"""Attach project network to service"""
@abstractmethod
async def detach_project_network(
self, node_id: NodeID, project_network: str
) -> None:
"""Detach project network from service"""
@abstractmethod
async def restart_containers(self, node_uuid: NodeID) -> None:
"""Restarts containers without saving or restoring the state or I/O ports"""
|
ITISFoundation/osparc-simcore
|
services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/_abc.py
|
_abc.py
|
py
| 4,481 |
python
|
en
|
code
| 35 |
github-code
|
6
|
27998557212
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 2 14:50:05 2021
@author: mizo_
"""
import os
from PIL import Image
import numpy as np
import csv
from impreproc5 import processImg
# image =Image.open('test/test.png')
# z='test/resize/testresize.png'
# c=processImg(image,z)
c=0
directory = f'test/done'
z='test/resize/testresize.png'
result = []
with open('testcsv3.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
for filename in os.listdir(directory):
fn = os.path.join(directory, filename)
# checking if it is a file
if os.path.isfile(fn):
print(c, fn)
image=Image.open(fn)
image=processImg(image,z)
a=np.array(image).astype(np.uint8)
a= a.flatten()
#print(a)
a=a/255
print(a.shape)
#a=np.transpose(a, axes=None)
writer.writerow(a)
result.append(fn)
c+=1
print(result)
|
moataz-abbas/NeuralNetworks
|
createTestCSV.py
|
createTestCSV.py
|
py
| 1,101 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70829236348
|
from aip import AipFace
""" 你的 APPID AK SK """
APP_ID = '10777848'
API_KEY = 'ifcHAWfOSsOQQTuhI1wbinyP'
SECRET_KEY = 'OCoPqGVZOMeVPlrEAkC15AdIZqXOsuYh'
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
image = get_file_content(r'C:\Users\yukizzc\Pictures\小妹.JPG')
""" 调用人脸检测 """
client.detect(image);
""" 如果有可选参数 """
options = {}
options["max_face_num"] = 2
options["face_fields"] = "age,beauty"
""" 带参数调用人脸检测 """
out = client.detect(image, options)
print(out['result'][0]['beauty'])
|
marcellinamichie291/Code_Store
|
baidu_api/face_demo.py
|
face_demo.py
|
py
| 631 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33078595311
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import time
from threading import Thread
import requests
class DownloadHandler(Thread):
def __init__(self, url):
super().__init__()
self.url = url
def run(self):
filename = self.url[self.url.rfind('/') + 1:]
resp = requests.get(self.url)
file_path = '/local/path/' + filename
with open(file_path, 'wb') as f:
f.write(resp.content)
def main():
api_url = 'https://example.com/api'
resp = requests.get(api_url)
data_model = resp.json()
for mm_dict in data_model['newslist']:
url = mm_dictp['picUrl']
DownloadHandler(url).start()
if __name__ == '__main__':
main()
|
letterli/py-cookbook
|
books/python-100-days/Day14/requests_demo.py
|
requests_demo.py
|
py
| 733 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1047110963
|
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django import forms
class ticketChatForm(forms.Form):
def __init__(self, *args, **kwargs):
super(ticketChatForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
# self.helper.form_id = 'id-exampleForm'
# self.helper.form_class = 'blueForms'
self.helper.form_method = 'post'
self.helper.form_action = ''
self.helper.add_input(Submit('отправить', 'Отправить'))
post = forms.CharField(widget=forms.HiddenInput(), )
name = forms.CharField(widget=forms.HiddenInput())
body = forms.CharField(label='Сообщения')
file = forms.FileField(label='Файл', max_length=100, required=False)
def setF(self, post, name):
self.fields['post'].initial = str(post)
self.fields['name'].initial = str(name)
return True
|
hewimetall/django_Help_Desk
|
label_ListPage/form.py
|
form.py
|
py
| 931 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2018421498
|
import unittest
import sys
import os
import tempfile
import shutil
from appliapps.examples.a_pyecho import PythonEcho
from appliapps.examples.b_extecho import ExternalEcho
from appliapps.examples.cp import CpApp
from appliapps.examples.template import TemplateApp
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tdir = tempfile.mkdtemp(dir=".")
os.chdir(cls.tdir)
with open("testfile", "w") as f:
f.write("testcontent")
def test1_pyecho(self):
sys.argv = ['--COMMENT', 'comment']
PythonEcho.main()
def test2_extecho(self):
sys.argv = ['--COMMENT', 'comment']
ExternalEcho.main()
def test3_cp(self):
sys.argv = ["--FILE", "testfile"]
CpApp.main()
os.chmod("testfile", 000)
self.assertRaises(SystemExit, CpApp.main)
os.chmod("testfile", 644)
def test4_tpl(self):
sys.argv = ['--COMMENT', 'comment', '--WORKDIR', '.']
TemplateApp.main()
assert os.path.exists("template_out.tpl")
@classmethod
def tearDownClass(cls):
os.chdir("..")
shutil.rmtree(cls.tdir)
|
lcb/applicake
|
tests/test_examples.py
|
test_examples.py
|
py
| 1,160 |
python
|
en
|
code
| 1 |
github-code
|
6
|
15024595640
|
def field(items, *args):
assert len(args) > 0
result = []
for item in items:
if len(args) == 1:
if args[0] in item.keys():
result.append(item[args[0]])
else:
res = dict()
for key in args:
if key in item.keys():
res[key] = item[key]
result.append(res)
return result
|
blackfox2001/bmstu
|
RIP/labs/laba3/field.py
|
field.py
|
py
| 409 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27147516534
|
import pytest
from ..common_imports import PdfXmp, PdfResource
class TestPdfXmp:
@pytest.fixture
def resource(self, test_params):
return PdfResource(test_params.resources_path + "XmpAndOtherSample.pdf", "XmpAndOtherSample.pdf")
@pytest.fixture
def text(self, resource, test_params, get_endpoint):
text = PdfXmp(resource)
return get_endpoint(text, test_params)
def test_pdf_xmp(self, text, test_params):
res = text.process()
if res.is_successful:
with open(test_params.output_path + "pdf_xmp.xml", "wb") as out_stream:
out_stream.write(res.content)
assert res.is_successful
|
dynamicpdf-api/python-client
|
test/PdfXmpEndpoint/test_pdf_xmp.py
|
test_pdf_xmp.py
|
py
| 688 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15910442299
|
import unittest
from mock import Mock, call
from six import StringIO
from trashcli.restore.file_system import RestoreReadFileSystem, \
RestoreWriteFileSystem, FakeReadCwd
from trashcli.restore.restore_cmd import RestoreCmd
from trashcli.restore.trashed_file import TrashedFile, TrashedFiles
def last_line_of(io): # type: (StringIO) -> str
return io.getvalue().splitlines()[-1]
class TestTrashRestoreCmd(unittest.TestCase):
def setUp(self):
self.stdout = StringIO()
self.stderr = StringIO()
self.trashed_files = Mock(spec=TrashedFiles)
self.trashed_files.all_trashed_files.return_value = []
self.read_fs = Mock(spec=RestoreReadFileSystem)
self.write_fs = Mock(spec=RestoreWriteFileSystem)
self.read_cwd = FakeReadCwd("cwd")
self.cmd = RestoreCmd.make(stdout=self.stdout,
stderr=self.stderr,
exit=self.capture_exit_status,
input=lambda x: self.user_reply,
version='1.2.3',
trashed_files=self.trashed_files,
read_fs=self.read_fs,
write_fs=self.write_fs,
read_cwd=self.read_cwd)
def capture_exit_status(self, exit_status):
self.exit_status = exit_status
def test_should_print_version(self):
self.cmd.run(['trash-restore', '--version'])
assert 'trash-restore 1.2.3\n' == self.stdout.getvalue()
def test_with_no_args_and_no_files_in_trashcan(self):
self.cmd.curdir = lambda: "cwd"
self.cmd.run(['trash-restore'])
assert ("No files trashed from current dir ('cwd')\n" ==
self.stdout.getvalue())
def test_until_the_restore_unit(self):
self.read_fs.path_exists.return_value = False
self.set_trashed_files_to([a_trashed_file_in('cwd/parent/path')])
self.user_reply = '0'
self.cmd.run(['trash-restore'])
assert '' == self.stderr.getvalue()
assert [call.path_exists('cwd/parent/path')] == self.read_fs.mock_calls
assert [call.mkdirs('cwd/parent'),
call.move('orig_file', 'cwd/parent/path'),
call.remove_file('info_file')] == self.write_fs.mock_calls
def test_when_user_reply_with_empty_string(self):
self.set_trashed_files_to([a_trashed_file])
self.user_reply = ''
self.cmd.run(['trash-restore'])
assert last_line_of(self.stdout) == 'Exiting'
def test_when_user_reply_with_not_number(self):
self.set_trashed_files_to([a_trashed_file])
self.user_reply = 'non numeric'
self.cmd.run(['trash-restore'])
assert last_line_of(self.stderr) == \
'Invalid entry: not an index: non numeric'
assert 1 == self.exit_status
def set_trashed_files_to(self, trashed_files):
self.trashed_files.all_trashed_files.return_value = trashed_files
a_trashed_file = TrashedFile("cwd/a_path", None, "info_file", "orig_file")
def a_trashed_file_in(path):
return TrashedFile(path, None, 'info_file', 'orig_file')
|
cloudlylooudy/trash-cli
|
tests/test_restore/restore_cmd/test_trash_restore_cmd.py
|
test_trash_restore_cmd.py
|
py
| 3,233 |
python
|
en
|
code
| null |
github-code
|
6
|
26024158970
|
# 建立COO 稀疏矩阵
from scipy.sparse import coo_matrix # 引入所需要的库
row = [0, 1, 2, 2]
col = [0, 1, 2, 3]
data = [1, 2, 3, 4] # 建立矩阵的参数
c = coo_matrix((data, (row, col)), shape=(4, 4)) # 构建4*4的稀疏矩阵
print(c)
d = c.todense() # 稀疏矩阵转化为密集矩阵
print(d)
e = coo_matrix(d) # 将一个0值很多的矩阵转为稀疏矩阵
print(e)
f = e.tocsr() # 将COO 稀疏矩阵转化为CSR稀疏矩阵
print(f)
print("\n")
g = e.tocsc() # 将COO 稀疏矩阵转化为CSC稀疏矩阵
print(g)
|
suanhaitech/pythonstudy2023
|
july/11.py
|
11.py
|
py
| 584 |
python
|
en
|
code
| 2 |
github-code
|
6
|
71276865469
|
# internal imports
from typing import Dict, Optional
# external imports
import gspread
def add_new_row(sheet, data):
sheet.append_row(data)
def update_row(sheet, cell, data):
for idx, d in enumerate(data):
sheet.update_cell(cell.row, cell.col + idx, data[idx])
def upload_results(sheet_name: str, exp_name: str, results: Dict[str, int], worksheet_name: Optional[str] = None) -> None:
"""
Upload the results to googlesheets. If no row with the exp_name
exists, then a new row will be added. If the experiment does
exist, the row will simply be updated.
"""
gc = gspread.service_account()
sh = gc.open(sheet_name)
if worksheet_name is None:
worksheet_name = sh.sheet1.title
ws = sh.worksheet(worksheet_name)
data = [exp_name] + [v for v in results.values()]
try:
cell = ws.find(exp_name)
update_row(ws, cell, data)
except gspread.CellNotFound:
add_new_row(ws, data)
|
jaypmorgan/labscribe
|
labscribe/googlesheets.py
|
googlesheets.py
|
py
| 968 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31513841146
|
#!/usr/bin/env python3
"""Convolutional Neural Networks"""
import numpy as np
def conv_backward(dZ, A_prev, W, b, padding="same", stride=(1, 1)):
"""back prop convolutional 3D image, RGB image - color
Arg:
dZ: containing the partial derivatives (m, h_new, w_new, c_new)
A_prev: contains the output of prev layer (m, h_prev, w_prev, c_prev)
W: filter for the convolution (kh, kw, c_prev, c_new)
b: biases (1, 1, 1, c_new)
padding: string ‘same’, or ‘valid’
stride: tuple (sh, sw)
Returns: parcial dev prev layer (dA_prev), kernels (dW), biases (db)
"""
k_h, k_w, c_prev, c_new = W.shape
_, h_new, w_new, c_new = dZ.shape
m, h_x, w_x, c_prev = A_prev.shape
s_h, s_w = stride
x = A_prev
if padding == 'valid':
p_h = 0
p_w = 0
if padding == 'same':
p_h = np.ceil(((s_h*h_x) - s_h + k_h - h_x) / 2)
p_h = int(p_h)
p_w = np.ceil(((s_w*w_x) - s_w + k_w - w_x) / 2)
p_w = int(p_w)
db = np.sum(dZ, axis=(0, 1, 2), keepdims=True)
x_padded = np.pad(x, [(0, 0), (p_h, p_h), (p_w, p_w), (0, 0)],
mode='constant', constant_values=0)
dW = np.zeros_like(W)
dx = np.zeros(x_padded.shape)
m_i = np.arange(m)
for i in range(m):
for h in range(h_new):
for w in range(w_new):
for f in range(c_new):
dx[i,
h*(stride[0]):(h*(stride[0]))+k_h,
w*(stride[1]):(w*(stride[1]))+k_w,
:] += dZ[i, h, w, f] * W[:, :, :, f]
dW[:, :,
:, f] += x_padded[i,
h*(stride[0]):(h*(stride[0]))+k_h,
w*(stride[1]):(w*(stride[1]))+k_w,
:] * dZ[i, h, w, f]
if padding == 'same':
dx = dx[:, p_h:-p_h, p_w:-p_w, :]
else:
dx = dx
return dx, dW, db
|
macoyulloa/holbertonschool-machine_learning
|
supervised_learning/0x07-cnn/2-conv_backward.py
|
2-conv_backward.py
|
py
| 2,015 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71601274107
|
#!/usr/bin/env python3
from jinja2 import Template
import numpy as np
min_x = -20
max_x = 20
min_z = 0.0
max_z = 20.0
with open('nonapod_input.jinja') as template_file:
templ = Template(template_file.read())
# Do the cases for grid sampling. Since 50 and 500 are not perfect squares,
# must use an approximate number.
x_values = np.linspace(min_x, max_x, 100)
z_values = np.linspace(min_z, max_z, 100)
fh = open('nonapod_inputs_grid_many/input_list', 'w')
for i, x in enumerate(x_values):
for j, z in enumerate(z_values):
with open('nonapod_inputs_grid_many/x_%i_z_%i'%(i,j), 'w') as result:
result.write(templ.render(x=x, z=z))
fh.write('%i %i %s\n' %(i, j, 'x_%i_z_%i'%(i,j)))
fh.close()
|
gridley/truss_optimization
|
write_big_grid.py
|
write_big_grid.py
|
py
| 730 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73477700027
|
from tinygrad.densetensor import DenseTensor
import numpy as np
class BatchNorm2D:
def __init__(self, sz, eps=1e-5, track_running_stats=False, training=False, momentum=0.1):
self.eps, self.track_running_stats, self.training, self.momentum = eps, track_running_stats, training, momentum
self.weight, self.bias = DenseTensor.ones(sz), DenseTensor.zeros(sz)
self.running_mean, self.running_var = DenseTensor.zeros(sz, requires_grad=False), DenseTensor.ones(sz, requires_grad=False)
self.num_batches_tracked = DenseTensor.zeros(1, requires_grad=False)
def __call__(self, x):
if self.track_running_stats or self.training:
batch_mean = x.mean(axis=(0,2,3))
y = (x - batch_mean.reshape(shape=[1, -1, 1, 1]))
batch_var = (y*y).mean(axis=(0,2,3))
if self.track_running_stats:
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * batch_mean
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * batch_var
if self.num_batches_tracked is None: self.num_batches_tracked = DenseTensor.zeros(1, requires_grad=False)
self.num_batches_tracked += 1
if self.training:
return self.normalize(x, batch_mean, batch_var)
return self.normalize(x, self.running_mean, self.running_var)
def normalize(self, x, mean, var):
x = (x - mean.reshape(shape=[1, -1, 1, 1])) * self.weight.reshape(shape=[1, -1, 1, 1])
return x.div(var.add(self.eps).reshape(shape=[1, -1, 1, 1])**0.5) + self.bias.reshape(shape=[1, -1, 1, 1])
class Linear:
def __init__(self, in_dim, out_dim, bias=True):
self.in_dim = in_dim
self.out_dim = out_dim
self.use_bias = bias
self.weight = DenseTensor.uniform(in_dim, out_dim)
if self.use_bias:
self.bias = DenseTensor.zeros(out_dim)
def __call__(self, x):
B, *dims, D = x.shape
x = x.reshape(shape=(B * np.prod(dims).astype(np.int32), D))
x = x.dot(self.weight)
if self.use_bias:
x = x.add(self.bias.reshape(shape=[1, -1]))
x = x.reshape(shape=(B, *dims, -1))
return x
class Dropout:
def __init__(self, p=0.5):
self.p = p
def __call__(self, x):
return x.dropout(p=self.p)
class Identity:
def __call__(self, x):
return x
class Conv2d:
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
self.out_channels = out_channels
self.kernel_size = (kernel_size, kernel_size) if isinstance(kernel_size, int) else (kernel_size[0], kernel_size[1])
self.stride = (stride, stride) if isinstance(stride, int) else (stride[0], stride[1])
self.padding = (padding, ) * 4 if isinstance(padding, int) else (padding[0], padding[0], padding[1], padding[1])
self.use_bias = bias
self.weight = DenseTensor.uniform(out_channels, in_channels, self.kernel_size[0], self.kernel_size[1])
if self.use_bias:
self.bias = DenseTensor.uniform(out_channels)
def __call__(self, x):
if self.padding[0] > 0:
x = x.pad2d(padding=self.padding)
x = x.conv2d(self.weight, stride=self.stride)
if self.use_bias:
x = x.add(self.bias.reshape(shape=(1, -1, 1, 1)))
return x
class Sequential:
def __init__(self, *layers):
self.layers = layers
def __call__(self, x):
for l in self.layers:
x = l(x)
return x
|
fpaboim/tinysparse
|
tinygrad/nn.py
|
nn.py
|
py
| 3,311 |
python
|
en
|
code
| 9 |
github-code
|
6
|
18760758159
|
import time
import aiohttp
import discord
import importlib
import os
import sys
import requests
import asyncio
from io import BytesIO
from discord.ext import commands
from my_utils import permissions, default, dataIO
from my_utils.guildstate import state_instance
class admin(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = default.get("config.json")
self._last_result = None
@commands.command()
@commands.check(permissions.is_owner)
async def load(self, ctx, name: str):
""" Loads an extension. """
try:
self.bot.load_extension(f"cogs.{name}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"Loaded extension **{name}.py**")
@commands.command()
@commands.check(permissions.is_owner)
async def unload(self, ctx, name: str):
""" Unloads an extension. """
try:
self.bot.unload_extension(f"cogs.{name}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"Unloaded extension **{name}.py**")
@commands.command()
@commands.check(permissions.is_owner)
async def reload(self, ctx, name: str):
""" Reloads an extension. """
try:
self.bot.reload_extension(f"cogs.{name}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"Reloaded extension **{name}.py**")
@commands.command()
@commands.check(permissions.is_owner)
async def reloadall(self, ctx):
""" Reloads all extensions. """
error_collection = []
for file in os.listdir("cogs"):
if file.endswith(".py"):
name = file[:-3]
try:
self.bot.reload_extension(f"cogs.{name}")
except Exception as e:
error_collection.append(
[file, default.traceback_maker(e, advance=False)]
)
if error_collection:
output = "\n".join([f"**{g[0]}** ```diff\n- {g[1]}```" for g in error_collection])
return await ctx.send(
f"Attempted to reload all extensions, was able to reload, "
f"however the following failed: \n\n{output}"
)
await ctx.send("Successfully reloaded all extensions")
@commands.command()
@commands.check(permissions.is_owner)
async def reloadutils(self, ctx, name: str):
""" Reloads a utils module. """
name_maker = f"utils_folder/{name}.py"
try:
module_name = importlib.import_module(f"utils_folder.{name}")
importlib.reload(module_name)
except ModuleNotFoundError:
return await ctx.send(f"Couldn't find module named **{name_maker}**")
except Exception as e:
error = default.traceback_maker(e)
return await ctx.send(f"Module **{name_maker}** returned error and was not reloaded...\n{error}")
await ctx.send(f"Reloaded module **{name_maker}**")
@commands.command()
@commands.check(permissions.is_owner)
async def reboot(self, ctx):
""" Reboot the bot """
await ctx.send('Rebooting now...')
time.sleep(1)
dataIO.backup_states(state_instance)
await self.bot.close()
sys.exit()
@commands.command()
@commands.check(permissions.is_owner)
async def dm(self, ctx, user_id: int, *, message: str):
""" DM the user of your choice """
user = self.bot.get_user(user_id)
if not user:
return await ctx.send(f"Could not find any UserID matching **{user_id}**")
try:
await user.send(message)
await ctx.send(f"✉️ Sent a DM to **{user_id}**")
except discord.Forbidden:
await ctx.send("This user might be having DMs blocked or it's a bot account...")
@commands.group()
@commands.check(permissions.is_owner)
async def change(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send_help(str(ctx.command))
@change.command(name="playing")
@commands.check(permissions.is_owner)
async def change_playing(self, ctx, *, playing: str):
""" Change playing status. """
if self.config.status_type == "idle":
status_type = discord.Status.idle
elif self.config.status_type == "dnd":
status_type = discord.Status.dnd
else:
status_type = discord.Status.online
if self.config.playing_type == "listening":
playing_type = 2
elif self.config.playing_type == "watching":
playing_type = 3
else:
playing_type = 0
try:
await self.bot.change_presence(
activity=discord.Activity(type=playing_type, name=playing),
status=status_type
)
dataIO.change_value("config.json", "playing", playing)
await ctx.send(f"Successfully changed playing status to **{playing}**")
except discord.InvalidArgument as err:
await ctx.send(err)
except Exception as e:
await ctx.send(e)
@change.command(name="username")
@commands.check(permissions.is_owner)
async def change_username(self, ctx, *, name: str):
""" Change username. """
try:
await self.bot.user.edit(username=name)
await ctx.send(f"Successfully changed username to **{name}**")
except discord.HTTPException as err:
await ctx.send(err)
@change.command(name="nickname")
@commands.check(permissions.is_owner)
async def change_nickname(self, ctx, *, name: str = None):
""" Change nickname. """
try:
await ctx.guild.me.edit(nick=name)
if name:
await ctx.send(f"Successfully changed nickname to **{name}**")
else:
await ctx.send("Successfully removed nickname")
except Exception as err:
await ctx.send(err)
@change.command(name="avatar")
@commands.check(permissions.is_owner)
async def change_avatar(self, ctx, url: str = None):
""" Change avatar. """
if url is None and len(ctx.message.attachments) == 1:
url = ctx.message.attachments[0].url
else:
url = url.strip('<>') if url else None
try:
bio = requests.get(url).content
await self.bot.user.edit(avatar=bio)
await ctx.send(f"Successfully changed the avatar. Currently using:\n{url}")
except aiohttp.InvalidURL:
await ctx.send("The URL is invalid...")
except discord.InvalidArgument:
await ctx.send("This URL does not contain a useable image")
except discord.HTTPException as err:
await ctx.send(err)
except TypeError:
await ctx.send("You need to either provide an image URL or upload one with the command")
@change.command(name="def_prefix")
@commands.check(permissions.is_owner)
async def change_default_prefix(self, ctx, prefix):
"""Changes the default premanent prefix"""
dataIO.change_value("config.json", "prefix", prefix)
await ctx.send(f"Successfully changed default prefix to **{prefix}**")
@commands.command(aliases = ["api_for", "api"])
@commands.check(permissions.is_owner)
async def search_api(self, ctx, category = ""):
""" Search for some apis """
if category != "":
your_api = requests.get(f"https://api.publicapis.org/entries?category={category.lower()}&https=true").json()
elif category.lower() == "categories":
your_api = requests.get(f"https://api.publicapis.org/categories").json()
else:
your_api = requests.get("https://api.publicapis.org/random?auth=null").json()
if your_api['count'] == 0:
return await ctx.send("No APIs found")
apis = f"{your_api['entries'][0]['Category']} apis\n"
def auth(index):
if your_api['entries'][i]['Auth'] != None:
return your_api['entries'][i]['Auth']
return "None"
for i in range(your_api["count"]):
apis += f"**{i+1}**. {your_api['entries'][i]['API']} - {your_api['entries'][i]['Description']} | Auth: {auth(i)} | Cors: {your_api['entries'][i]['Cors']} | Link: {your_api['entries'][i]['Link']}\n"
if len(str(apis)) > 1999:
apis = apis[:2000][::-1]
arr = apis.index(".")
apis = apis[arr:][::-1]
return await ctx.send(apis)
@commands.group(aliases = ["file"])
@commands.check(permissions.is_owner)
async def fil(self, ctx):
if ctx.invoked_subcommand is None:
await ctx.send_help(str(ctx.command))
@fil.group()
@commands.check(permissions.is_owner)
async def add(self, ctx, location = ""):
if len(ctx.message.attachments) == 1 and location != "":
try:
await ctx.message.attachments[0].save(f"{location}\{ctx.message.attachments[0].filename}")
except FileNotFoundError:
await ctx.send("Directory not found. Creating directory...")
os.makedirs(location)
await ctx.message.attachments[0].save(f"{location}\{ctx.message.attachments[0].filename}")
elif len(ctx.message.attachments) == 1 and location == "":
await ctx.message.attachments[0].save(f"{ctx.message.attachments[0].filename}")
else:
return await ctx.send("Provide a file as an attachment")
await ctx.message.delete(delay=1)
return await ctx.send(f"The {ctx.message.attachments[0].filename} has been added")
@fil.group()
@commands.check(permissions.is_owner)
async def remove(self, ctx, file_name_with_path):
await ctx.send("Are you sure you want to remove the file. Please remember to unload if the file is and existing cog.\n(y/n)")
def mcheck(message):
if message.author == ctx.author:
return True
return False
try:
answer = await self.bot.wait_for('message', timeout=20, check=mcheck)
except asyncio.TimeoutError:
return await ctx.send("You didn't respond in time")
if answer.content == "y":
pass
else:
return await ctx.send("As you wish, the file will not be removed")
try:
default.delete(file_name_with_path)
await ctx.send(f"Removed {file_name_with_path}")
except Exception as e:
await ctx.send(e)
await ctx.message.delete(delay=1)
def setup(bot):
bot.add_cog(admin(bot))
|
Albedo-Discord/ext
|
cogs/admin.py
|
admin.py
|
py
| 10,872 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41191258670
|
#? pip install flask flask-pymongo
from flask import Flask, render_template
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config['MONGO_URI'] = "mongodb://localhost:27017/myDatabase"
mongo = PyMongo(app)
@app.route('/')
def hello_world():
mongo.db.inventory.insert_one({"b":31})
a = mongo.db.inventory.find({})
return render_template('index.html',data=a)
@app.route('/mydata')
def mydata():
info = ['Vedant', 'Age: 19', 'Programmer', 'Music Lover']
return render_template('mydata.html', personal=info)
app.run(debug=True,port=3000)
|
Vedant817/Flask-and-MongoDB
|
main.py
|
main.py
|
py
| 569 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24615532465
|
from tiles import AnimatableTile
import pygame
class Coin(AnimatableTile):
def __init__(self, size, position, frames, data):
super().__init__(size, position, frames, data)
for i in range(len(self.frames)):
self.frames[i] = pygame.transform.scale(self.frames[i], (8, 8))
self.position.x += size / 2
self.position.y += size / 2
def live(self, dt, surface):
self.animate(dt)
self.draw(surface)
|
ysbrandB/M6FinalProject
|
code/coin.py
|
coin.py
|
py
| 462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75167070268
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
import math
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size/2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size))
return window
def SSIM1(img1, img2):
(_, channel, _, _) = img1.size()
window_size = 11
pad = int(window_size/11)
window = create_window(window_size, channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding = pad, groups = channel)
mu2 = F.conv2d(img2, window, padding = pad, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = pad, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = pad, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = pad, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def SSIM(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret
def PSNR(img1, img2):
mse = np.mean( (img1/255. - img2/255.) ** 2 )
if mse == 0:
return 100
PIXEL_MAX = 1
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def CIEDE2000(Lab_1, Lab_2):
'''Calculates CIEDE2000 color distance between two CIE L*a*b* colors'''
C_25_7 = 6103515625 # 25**7
L1, a1, b1 = Lab_1[0], Lab_1[1], Lab_1[2]
L2, a2, b2 = Lab_2[0], Lab_2[1], Lab_2[2]
C1 = math.sqrt(a1 ** 2 + b1 ** 2)
C2 = math.sqrt(a2 ** 2 + b2 ** 2)
C_ave = (C1 + C2) / 2
G = 0.5 * (1 - math.sqrt(C_ave ** 7 / (C_ave ** 7 + C_25_7)))
L1_, L2_ = L1, L2
a1_, a2_ = (1 + G) * a1, (1 + G) * a2
b1_, b2_ = b1, b2
C1_ = math.sqrt(a1_ ** 2 + b1_ ** 2)
C2_ = math.sqrt(a2_ ** 2 + b2_ ** 2)
if b1_ == 0 and a1_ == 0:
h1_ = 0
elif a1_ >= 0:
h1_ = math.atan2(b1_, a1_)
else:
h1_ = math.atan2(b1_, a1_) + 2 * math.pi
if b2_ == 0 and a2_ == 0:
h2_ = 0
elif a2_ >= 0:
h2_ = math.atan2(b2_, a2_)
else:
h2_ = math.atan2(b2_, a2_) + 2 * math.pi
dL_ = L2_ - L1_
dC_ = C2_ - C1_
dh_ = h2_ - h1_
if C1_ * C2_ == 0:
dh_ = 0
elif dh_ > math.pi:
dh_ -= 2 * math.pi
elif dh_ < -math.pi:
dh_ += 2 * math.pi
dH_ = 2 * math.sqrt(C1_ * C2_) * math.sin(dh_ / 2)
L_ave = (L1_ + L2_) / 2
C_ave = (C1_ + C2_) / 2
_dh = abs(h1_ - h2_)
_sh = h1_ + h2_
C1C2 = C1_ * C2_
if _dh <= math.pi and C1C2 != 0:
h_ave = (h1_ + h2_) / 2
elif _dh > math.pi and _sh < 2 * math.pi and C1C2 != 0:
h_ave = (h1_ + h2_) / 2 + math.pi
elif _dh > math.pi and _sh >= 2 * math.pi and C1C2 != 0:
h_ave = (h1_ + h2_) / 2 - math.pi
else:
h_ave = h1_ + h2_
T = 1 - 0.17 * math.cos(h_ave - math.pi / 6) + 0.24 * math.cos(2 * h_ave) + 0.32 * math.cos(
3 * h_ave + math.pi / 30) - 0.2 * math.cos(4 * h_ave - 63 * math.pi / 180)
h_ave_deg = h_ave * 180 / math.pi
if h_ave_deg < 0:
h_ave_deg += 360
elif h_ave_deg > 360:
h_ave_deg -= 360
dTheta = 30 * math.exp(-(((h_ave_deg - 275) / 25) ** 2))
R_C = 2 * math.sqrt(C_ave ** 7 / (C_ave ** 7 + C_25_7))
S_C = 1 + 0.045 * C_ave
S_H = 1 + 0.015 * C_ave * T
Lm50s = (L_ave - 50) ** 2
S_L = 1 + 0.015 * Lm50s / math.sqrt(20 + Lm50s)
R_T = -math.sin(dTheta * math.pi / 90) * R_C
k_L, k_C, k_H = 1, 1, 1
f_L = dL_ / k_L / S_L
f_C = dC_ / k_C / S_C
f_H = dH_ / k_H / S_H
dE_00 = math.sqrt(f_L ** 2 + f_C ** 2 + f_H ** 2 + R_T * f_C * f_H)
return dE_00
def rgb2xyz(rgb):
def format(c):
c = c / 255.
if c > 0.04045: c = ((c + 0.055) / 1.055) ** 2.4
else: c = c / 12.92
return c * 100
rgb = list(map(format, rgb))
xyz = [None, None, None]
xyz[0] = rgb[0] * 0.4124 + rgb[1] * 0.3576 + rgb[2] * 0.1805
xyz[1] = rgb[0] * 0.2126 + rgb[1] * 0.7152 + rgb[2] * 0.0722
xyz[2] = rgb[0] * 0.0193 + rgb[1] * 0.1192 + rgb[2] * 0.9505
return xyz
def xyz2lab(xyz):
def format(c):
if c > 0.008856: c = c ** (1. / 3.)
else: c = (7.787 * c) + (16. / 116.)
return c
xyz[0] = xyz[0] / 95.047
xyz[1] = xyz[1] / 100.00
xyz[2] = xyz[2] / 108.883
xyz = list(map(format, xyz))
lab = [None, None, None]
lab[0] = (116. * xyz[1]) - 16.
lab[1] = 500. * (xyz[0] - xyz[1])
lab[2] = 200. * (xyz[1] - xyz[2])
return lab
|
chenkhan/haze-synthesizing
|
util/metrics.py
|
metrics.py
|
py
| 5,878 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27318923223
|
# @PascalPuchtler
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import cv2
# This class is inspired by https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/TFLite_detection_webcam.py
class GenerateCarView:
def __init__(self):
self.frameRateCalc = 1
self.freq = cv2.getTickFrequency()
self.t1 = cv2.getTickCount()
def getFrame(self, image, pylons= None):
# self.addFrameRate(image)
if pylons is not None:
self.addBoxesToImage(image, pylons)
return image
def addFrameRate(self,image):
cv2.putText(image,'FPS: {0:.2f}'.format(self.frameRateCalc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
self.t2 = cv2.getTickCount()
self.frameRateCalc = self.freq/(self.t2-self.t1)
self.t1 = cv2.getTickCount()
def addBoxesToImage(self, image, pylons):
for pylone in pylons:
xmin = pylone['xmin']
ymin = pylone['ymin']
xmax = pylone['xmax']
ymax = pylone['ymax']
cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
# Draw label
label = pylone['label'] + ' %d%%' % (int(pylone['score']*100)) + ' ' + str(round(pylone['distanceAbsolut'],2)) + ' m'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(image, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(image, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
|
iisys-hof/autonomous-driving
|
car-controller/src/mainController/View/Render/GenerateCarView.py
|
GenerateCarView.py
|
py
| 2,531 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12608079869
|
'''
Load embedding, create dictionary, convert text to index
'''
import io
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
import argparse
#import json
import os
import numpy as np
import pickle
import pdb
def text2index(text, vocab, analyzer):
# 1 is unk
doc_toks = [vocab[y] if y in vocab else 1 for y in analyzer(text) ]
return doc_toks
def load_vectors(fname):
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
data = {}
for line in fin:
tokens = line.rstrip().split(' ')
data[tokens[0]] = np.fromiter(map(float, tokens[1:]), dtype=np.float)
return data
def build_vocab(text, emb, emb_dim=300, max_df=.7, max_features=20000, stop_words= 'english'):
'''
Fit vocabulary
:param text: list of documents for creating vocabulary
:return: vectorizer
'''
vect = CountVectorizer(stop_words=stop_words, max_df=max_df, max_features=max_features,
token_pattern=r"(?u)[!\"#\$\%&\'()\*\+,-./:;<=>\?@\[\\\]\^_`{|}~\w]+")
vect.fit(text)
no_embedding = [k for k in vect.vocabulary_.keys() if k not in emb]
print("No Embeddings for: ")
print(len(no_embedding))
vocab = [k for i, k in enumerate(vect.vocabulary_.keys()) if k in emb]
new_vocab = dict([(k, i + 2) for i, k in enumerate(vocab)])
# Set 0 to be the padding index, 1 to be unk
vect.vocabulary_ = new_vocab
print('Vocabulary size: ', len(new_vocab))
embedding = np.zeros(shape=(len(new_vocab) + 2, emb_dim))
for k,i in new_vocab.items():
embedding[i] = emb[k]
return vect, embedding
def df2List(df, vocab, analyzer, label_dict, ismnli = False):
out = []
for i, row in df.iterrows():
set1 = text2index(row['sentence1'], vocab, analyzer)
set2 = text2index(row['sentence2'], vocab, analyzer)
label = label_dict[row['label']]
if ismnli:
genre = row['genre']
else:
genre = 'snli'
out.append([set1, set2, label, i, genre])
return out
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--inputPath", default = '../hw2_data/') # Should have train/val in this directory
parser.add_argument("--embPath", default='../hw2_data/wiki-news-300d-1M.vec') # embedding vector path
parser.add_argument("--emb_dim", type=int, default = 300)
parser.add_argument("--outPath") # Output Path
parser.add_argument("--max_df", type=float, default = 0.7)
parser.add_argument("--max_features", type=int, default=20000)
parser.add_argument("--stop_words", default = 'english')
args = parser.parse_args()
if not os.path.isdir(args.outPath):
os.mkdir(args.outPath)
print("Data processing parameters: ", args)
print("Loading Data")
train = pd.read_csv(args.inputPath + 'snli_train.tsv', header = 0, sep = '\t')
test = pd.read_csv(args.inputPath + 'snli_val.tsv', header=0, sep='\t')
train_mnli = pd.read_csv(args.inputPath + 'mnli_train.tsv', header=0, sep='\t')
test_mnli = pd.read_csv(args.inputPath + 'mnli_val.tsv', header=0, sep='\t')
emb = load_vectors(args.embPath)
print("Fitting Vocabulary")
vect, embedding = build_vocab(train['sentence1'] + ' ' + train['sentence2'], emb, emb_dim = args.emb_dim,
max_df = args.max_df, max_features = args.max_features, stop_words=args.stop_words)
#vect = pickle.load(open(args.outPath + 'vect.p', 'rb'))
vocab = vect.vocabulary_
analyzer = vect.build_analyzer()
print('Transform data frame')
label_dict = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
train2 = df2List(train, vocab, analyzer, label_dict)
test2 = df2List(test, vocab, analyzer, label_dict)
train_mnli2 = df2List(train_mnli, vocab, analyzer, label_dict, ismnli = True)
test_mnli2 = df2List(test_mnli, vocab, analyzer, label_dict, ismnli = True)
pickle.dump(train2, open(args.outPath + 'train.p', 'wb'))
pickle.dump(test2, open(args.outPath + 'test.p', 'wb'))
pickle.dump(train_mnli2, open(args.outPath + 'train_mnli.p', 'wb'))
pickle.dump(test_mnli2, open(args.outPath + 'test_mnli.p', 'wb'))
pickle.dump(vect, open(args.outPath + 'vect.p', 'wb'))
pickle.dump(embedding, open(args.outPath + 'embedding.p', 'wb'))
# Document length:
lsLen = [max(len(x[0]), len(x[1])) for x in train2]
print('Median doc size: ', np.percentile(lsLen, 50))
print('95 percentile: ', np.percentile(lsLen, 95))
print('Max: ', max(lsLen))
lsLen = [max(len(x[0]), len(x[1])) for x in train_mnli2]
print('Median mnli_doc size: ', np.percentile(lsLen, 50))
print('95 percentile: ', np.percentile(lsLen, 95))
print('Max: ', max(lsLen))
|
jingsliu/NLP_HW
|
HW2/code/dataPrep.py
|
dataPrep.py
|
py
| 4,852 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11221497921
|
import os
import pandas as pd
import time
import data_prep
import freq_analysis
from features_extract import numeric_extract
from features_extract import price_extract
from features_extract import feature_extract
from features_extract import ID_extract
from features_extract import mfrID_extract
from features_extract import factorize
from features_extract import replace_blank
from list_flatten import list_flatten
from parameters_by_retailer import param_retailer as param
from rating_extract_newegg import rating_extract_newegg
######################### change these parameters ############################
path = 'C:\\Users\\roy79\\Desktop\\Research\\product-analysis'
working_dir = path + '\\cleaning'
retailer_name = 'newegg'
data_path = path+'\\raw_data\\'+retailer_name+'_hdphone.csv'
products = pd.read_csv(data_path)
# d.n change colnames and features_re
colnames = param[retailer_name]['colnames']
features_re = param[retailer_name]['features_re']
# used as arguments in factorize(), None by default
factorize_conn_col = param[retailer_name]['factorize_conn_col']
factorize_type_col = param[retailer_name]['factorize_type_col']
# extract integer/float (ID, price, etc.) from these columns
numeric_columns = [
# For Walmart
# colnames['COLNAME_RATING'],
# colnames['COLNAME_NUM_RATING'],
# colnames['COLNAME_RETAILER_ID'],
'_UPC_'
]
#'UPC']
# replace np.nan in these columns with 0
# if bhpv, then '' because it doesn't have a colname_about
feat_replace = ''
if (colnames['COLNAME_ABOUT'] != ''):
feat_replace = ['_connection_', '_microphone_']
##############################################################################
# print most frequent words related to feature
# returns word frequency count for later use to avoid expensive frequency count
# ngram = 1 or 3
def word_freq_analysis(products, ngram, feature, word_freq_df=None):
# Exploratory Analysis:
# find most frequently associated word for each feature
# eg. features: 'noise'-> noise cancelling; noise reduction; ...
# update features_re if necessary
is_return = False
if (word_freq_df is None):
is_return = True
if (ngram == 1):
word_freq_df = freq_analysis.unigram_freq(products)
if (ngram == 3):
word_freq_df = freq_analysis.trigram_freq(products)
most_freq = freq_analysis.most_freq_word_feat(word_freq_df,feature)
print(most_freq)
if (is_return == True):
return word_freq_df
def execute():
# set working directory
os.chdir(working_dir)
products = pd.read_csv(data_path)
print('Successfully loaded dataset')
# run next line to print all colnames after loading the dataset
# products.columns
# This helps remove empty rows that accidentally gets scraped
# DEBUG
#print(sum(products['name'] == np.nan))
products = data_prep.remove_blank_row(products,colnames['COLNAME_TITLE'])
# DEBUGs
#print(sum(products['name'] == np.nan))
# clean the about / description text and put them in column: 'about_text_clean'
if (colnames['COLNAME_ABOUT'] != ''):
print('Start about/description preparation')
data_prep.about_prep(products,colnames['COLNAME_ABOUT'])
# =============================================================================
# # This is useful for determining what keywords to search for each feature
# # exploratory analysis
# print('Start Word Frequency Analysis')
# word_freq = word_freq_analysis(products, 1, 'noise')
# trigram_freq = word_freq_analysis(products, 3, 'frequency')
# word_freq_analysis(products, 3, 'noise', trigram_freq)
# =============================================================================
# Extract features from about/description
print('Start Feature Extraction')
feat_ext_df = feature_extract(products, features_re)
products = pd.concat([products, feat_ext_df], axis=1)
# Flatten any lists
if (colnames['COLNAME_FEAT_LABELS'] != ''):
print('Start List Flattening')
data_prep.list_clean(products,
colnames['COLNAME_FEAT_LABELS'],
colnames['COLNAME_FEAT_VALUES'])
flattened_feat = list_flatten(products)
# Combine the extracted features and the original dataset
products = pd.concat([products, flattened_feat], axis=1)
# Remove used products
print('Remove used products')
products = data_prep.remove_used(products, colnames['COLNAME_TITLE'])
# Extract price
print('Extract price')
price_extract(products,colnames['COLNAME_PRICE_CUR'], colnames['COLNAME_PRICE_ORIG'])
# Extract numbers from select columns
print('Extract numerics from columns')
numeric_extract(products, numeric_columns)
mfrID_extract(products, '_manufacturerID_')
# Extract IDs
if (colnames['COLNAME_MODEL'] != ''):
print('Extract semi-numeric IDs')
ID_extract(products, colnames['COLNAME_MODEL'])
# This is for Newegg because its rating is embedded in unstructured text
if (retailer_name == 'newegg'):
print('Newegg specific cleanup functions')
rating_extract_newegg(products,colnames['COLNAME_RATING'])
products[colnames['COLNAME_NUM_RATING']] = products[colnames['COLNAME_NUM_RATING']]*(-1)
# Categorize these columns
print('Factorize columns')
if (colnames['COLNAME_ABOUT'] != ''):
factorize(products,
mic_colname='_microphone_',
noise_colname='_noise_',
water_colname='_water_',
# This line is specific to walmart, change column names to fit your dataset / or comment out before run
wireless_colname=factorize_conn_col,
# This line is specific to walmart, change column names to fit your dataset / or comment out before run
type_colname=factorize_type_col)
# Replace blank cells with 0 in these columns
if (feat_replace != ''):
print('Replace empty cells with 0 in select columns')
replace_blank(products, feat_replace)
print('Save cleaned csv to: ' + working_dir)
products.to_csv(retailer_name+'_hdphone_cleaned_',index=False)
execute()
# todo:
#
# combine columns
|
renyc432/headphone-product-analysis
|
cleaning/execute_cleaning.py
|
execute_cleaning.py
|
py
| 6,517 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30157505435
|
from find_dir import cmd_folder
import pandas as pd
import os
import json
import numpy as np
buyer_history = pd.read_csv(cmd_folder+"data/processed/buyer_history.csv")
sorted_history = buyer_history[["buyer_id","visit_id","timestamp","event"]].sort_values(["buyer_id","visit_id","timestamp","event"],ascending=True)
sorted_history["regroup"] = False
total_pageview_chat = sorted_history["visit_id"][sorted_history["event"]=="pageview"].index.values.tolist()
total_pageview_chat.extend(sorted_history["visit_id"][sorted_history["event"]=="chat"].index.values.tolist())
unique = sorted_history["visit_id"][sorted_history["event"]=="pageview"].drop_duplicates().index.values.tolist()
unique.extend( sorted_history["visit_id"][sorted_history["event"]=="chat"].drop_duplicates().index.values.tolist())
duplicate_pageview_chat = list((set(total_pageview_chat) - set(unique)))
index_without_duplicates = list(set(sorted_history.index.values.tolist()) - set(duplicate_pageview_chat))
regroup_history= sorted_history[["buyer_id","timestamp","event"]].loc[index_without_duplicates]
with open(cmd_folder+"data/processed/trace_regroup.json","w") as buf:
buf.write("[")
buyers = regroup_history["buyer_id"].unique()
for buyer in buyers[0:buyers.size-1]:
buf.write("{\"id\":\""+buyer+"\",\"trace\":")
regroup_history[regroup_history["buyer_id"]==buyer][["event"]].to_json(path_or_buf=buf,orient="records",force_ascii=False)
buf.write("},\n")
buf.write("{\"id\":\""+buyer+"\",\"trace\":")
regroup_history[regroup_history["buyer_id"]==buyer][["event"]].to_json(path_or_buf=buf,orient="records",force_ascii=False)
buf.write("}]")
trace = json.load(open(cmd_folder+"data/processed/trace_regroup.json","r"))
|
pierrenodet/PFE
|
src/make_trace_bis.py
|
make_trace_bis.py
|
py
| 1,746 |
python
|
en
|
code
| 2 |
github-code
|
6
|
28002035268
|
import os
import torch
import numpy as np
from PIL import Image
# This dataset comes form paper:
# [2D and 3D Segmentation of Uncertain Local Collagen Fiber Orientations in SHG Microscopy]
# https://github.com/Emprime/uncertain-fiber-segmentation
def collagen3d_dataset(dataloader_config, label_type='mask'):
# Ref -- https://blog.csdn.net/Teeyohuang/article/details/79587125
# label_type: 'classlabel' or 'mask'
dataset_path = dataloader_config['dataset_path']
train_batch_size = dataloader_config['train_batch_size']
val_batch_size = dataloader_config['val_batch_size']
num_workers = os.cpu_count()
# num_workers = 1
train_data = _DatasetLD(data_path=dataset_path, dataset_return=label_type, read_in_ram_mode=True)
test_data = _DatasetLD(data_path=dataset_path, dataset_return=label_type, read_in_ram_mode=False)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=train_batch_size, shuffle=True,
num_workers=num_workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(test_data, batch_size=val_batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True)
# return train_data, test_data
return train_loader, val_loader
class _DatasetLD(torch.utils.data.Dataset):
def __init__(self, data_path, dataset_return, transform=None, target_transform=None, read_in_ram_mode=False):
super().__init__()
self.dataset_path = data_path
self.dataset_return = dataset_return
self.read_in_ram_mode = read_in_ram_mode
self.img_name = []
self.num_label = []
for subfolder in os.listdir(data_path):
subfolder_path = os.path.join(self.dataset_path, subfolder)
# Sub-folders:
# shg-ce-de: SHG image
# shg-masks: SHG mask
if subfolder == 'shg-ce-de':
for root, dirs, files in os.walk(subfolder_path):
if not len(dirs) and len(files):
# print(root, dirs, files)
self.img_name.append(root)
# Read all images in RAM, which requires a large RAM
if self.read_in_ram_mode:
img_all_list, mask_all_list = [], []
for i, index in enumerate(range(len(self.img_name))):
print(f'Reading image [{i+1}]/[{len(self.img_name)}]')
image_folder = self.img_name[index]
mask_folder = self.img_name[index].replace('shg-ce-de', 'shg-masks')
img_list, mask_list = [], []
img_file_list, mask_file_list = list(os.listdir(image_folder)), list(os.listdir(mask_folder))
img_file_list.sort(key=self._sort_num)
for img_name in img_file_list:
img = np.array(Image.open(os.path.join(image_folder, img_name)).convert('L')) # [H, W]
img = np.reshape(img, img.shape + (1,)) # Convert gray image into [H, W, C] mode
# img = np.array(Image.open(os.path.join(image_folder, img_name)))
img_list.append(img)
mask_file_list.sort(key=self._sort_num)
for mask_name in mask_file_list:
mask_list.append(np.array(Image.open(os.path.join(mask_folder, mask_name)))) # [H, W, C]
img_all_list.append(img_list)
mask_all_list.append(mask_list)
self.img_name, self.num_label = img_all_list, mask_all_list
self.transform = transform
self.target_transform = target_transform
@staticmethod
def _inner_rand_cut(img_in, cut_start):
h, w = img_in.shape
if h > w:
return img_in[cut_start:cut_start+w, :, :]
else:
return img_in[:, cut_start:cut_start+h, :]
@staticmethod
def _sort_num(name_string):
'''
Separate numbers in a name, in order to sort.
Extract the first number in string
'''
import re
num = re.findall('\d+\.?\d*', name_string)
try:
num = float(num[0])
except:
num = -1.0
return num
def __getitem__(self, index): # Read data once
if self.dataset_return == 'mask':
return self._getitem_mask(index)
elif self.dataset_return == 'classlabel':
return self._getitem_label(index)
else:
return
def _getitem_label(self, index): # Read data once
# Todo !!!!!!!!! Not written
file_name = self.img_name[index]
label = self.num_label[index]
img = Image.open(os.path.join(self.dataset_path, 'image', file_name))
img = np.array(img)
# Random cut
h, w = img.shape
cut_start = np.random.randint(0, abs(h-w))
img = self._inner_rand_cut(img, cut_start)
if self.transform is not None:
img = self.transform(img)
return img, label
def _getitem_mask(self, index): # Read data once
if self.read_in_ram_mode:
img_list = self.img_name[index]
mask_list = self.num_label[index]
else:
image_folder = self.img_name[index]
mask_folder = self.img_name[index].replace('shg-ce-de', 'shg-masks')
img_list, mask_list = [], []
img_file_list, mask_file_list = list(os.listdir(image_folder)), list(os.listdir(mask_folder))
img_file_list.sort(key=self._sort_num)
for img_name in img_file_list:
img = np.array(Image.open(os.path.join(image_folder, img_name)).convert('L')) # [H, W]
img = np.reshape(img, img.shape + (1,)) # Convert gray image into [H, W, C] mode
# img = np.array(Image.open(os.path.join(image_folder, img_name)))
img_list.append(img)
mask_file_list.sort(key=self._sort_num)
for mask_name in mask_file_list:
mask_list.append(np.array(Image.open(os.path.join(mask_folder, mask_name)))) # [H, W, C]
img = np.array(img_list).transpose([3, 1, 2, 0]) # Convert from [D, H, W, C] into [C, H, W, D] mode
mask = np.array(mask_list)
mask = np.max(mask, axis=3) # Convert mask to label
mask = np.transpose(mask, [1, 2, 0]) # Convert from [D, H, W] into [H, W, D] mode
# ToDo Temp
_, h, w, d = img.shape
new_size = 64
new_depth = 32
h_random, w_random, d_random = np.random.randint(0, h-new_size), np.random.randint(0, w-new_size), np.random.randint(0, d-new_depth)
img = img[:, h_random:h_random+new_size, w_random:w_random+new_size, d_random:d_random+new_depth]
mask = mask[h_random:h_random+new_size, w_random:w_random+new_size, d_random:d_random+new_depth]
######################################
# Should cut with mask here
######################################
# # Random cut
# h, w = img.shape
# if np.abs(h - w):
# cut_start = np.random.randint(0, abs(h - w))
# img = self._inner_rand_cut(img, cut_start)
# mask = self._inner_rand_cut(mask, cut_start)
if self.transform is not None:
img = self.transform(img)
return np.array(img, dtype=np.float32), np.array(mask/255., dtype=np.int64)
def __len__(self):
return len(self.img_name)
|
Surtol-Sun/TrainFramework_torch
|
components/dataset_loader/dataset_loader_3dcollagen.py
|
dataset_loader_3dcollagen.py
|
py
| 7,648 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30192254789
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
# sigmoid函数
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# 定义回归模型
def model(X, theta):
return sigmoid(np.dot(X, theta.T))
# 计算梯度
def gradient(X, y, theta):
grad = np.zeros(theta.shape) # 初始化梯度,维度与参数向量的维度相同
error = (model(X, theta) - y).ravel() # 计算偏差
for j in range(len(theta.ravel())): # 计算n个偏导数(梯度)
term = np.multiply(error, X[:, j])
grad[0, j] = np.sum(term) / len(X)
return grad
# 定义损失函数
def cost(X, y, theta):
return np.sum((np.multiply(-y, np.log(model(X, theta)))) - (np.multiply(1 - y, np.log(1 - model(X, theta)))))/(len(X))
path = 'datas' + os.sep + 'iris.csv'
irisData = pd.read_csv(path, header=None, names=['petal_len', 'petal_width', 'sepal_len', 'sepal_width', 'class'],
dtype={'petal_len': float, 'petal_width': float, 'sepal_len': float, 'sepal_width': float,
'class': str})
irisData.loc[irisData['class'] == 'setosa', 'class'] = 0 # 将setosa置为0
irisData.loc[irisData['class'] == 'versicolor', 'class'] = 1 # 将versicolor置为1
irisData.loc[irisData['class'] == 'virginica', 'class'] = 2 # 将virginica置为2
print("---------------打印数据信息------------------ #")
print(irisData.head()) # 打印前两行
print(irisData.shape) # 打印数据维度
print(irisData.describe()) # 打印描述信息
print()
# 绘制数据分布图像
positive = irisData[irisData['class'] == 0] # 设置正类
negative = irisData[irisData['class'] == 1] # 设置负类
# fig, ax = plt.subplots(figsize=(8, 6))
fig, figer1 = plt.subplots(figsize=(10, 5)) # 设置图像大小
figer1.scatter(positive['sepal_len'], positive['sepal_width'], s=30, c='b', marker='o', label='setosa') # 绘制setosa花的散点图
figer1.scatter(negative['sepal_len'], negative['sepal_width'], s=30, c='r', marker='x',
label='versicolor') # 绘制versicolor花的散点图
figer1.legend(loc=2) # 标题放在左上角
figer1.set_xlabel('sepal_len') # 设置x标签
figer1.set_ylabel('sepal_width') # 设置y标签
plt.show() # 显示初始图像
irisData.insert(2, 'Ones', 1) # 在第3列插入一列数据,值为1
print("----------打印初始数据的前五行------------ ")
print(irisData.head())
orig_data = irisData.as_matrix() # 构造一个矩阵
print(orig_data.dtype)
print("----------------初始打印矩阵-----------------")
print(orig_data[:5, :])
cols = orig_data.shape[1] # 得到矩阵的列数
orig_data = orig_data[:100, :] # 取矩阵的前100行数据
scaled_data1 = orig_data[:50, 2:cols] # 第一类数据矩阵,选择花瓣属性
scaled_data2 = orig_data[50:100, 2:cols] # 第二类数据矩阵
np.random.shuffle(scaled_data1) # 打乱第一类数据的顺序
np.random.shuffle(scaled_data2) # 打乱第二类数据的顺序
np.random.shuffle(orig_data)
# 从两个矩阵中分别取固定个数的数据作为测试集
# scaled_data = orig_data[4:100, 2:cols]
#scaled_data = np.vstack((scaled_data1[:25, :], scaled_data2[:25, :])) # 50%
#scaled_data = np.vstack((scaled_data1[:15, :], scaled_data2[:15, :])) # 30%
scaled_data = np.vstack((scaled_data1[:5, :], scaled_data2[:5, :])) # 10%
np.random.shuffle(scaled_data) # 打乱测试集数据的顺序
print("-------打印测试集-------")
print(scaled_data)
print("------测试集的属性-------")
print(scaled_data.shape)
# 从两个矩阵中分别取相同个数的数据作为训练集
# orig_data = orig_data[:4, 2:cols]
#orig_data = np.vstack((scaled_data1[25:50, :], scaled_data2[25:50, :])) # 50%
#orig_data = np.vstack((scaled_data1[15:50, :], scaled_data2[15:50, :])) # 70%
orig_data = np.vstack((scaled_data1[5:50, :], scaled_data2[5:50, :])) # 90%
np.random.shuffle(orig_data) # 打乱训练集数据的顺序
print("---------打印训练集--------")
print(orig_data)
X = orig_data[:100, 1:cols - 1] # 选择前三列
y = orig_data[:100, cols - 1:cols] # 选择最后一列结果
print("-------打印X的值-------")
print(X)
print("---------打印y的值----------")
print(y)
# 构造参数向量
theta = np.zeros([1, 3])
# 打印矩阵的维度
print("----------打印训练数据的信息----------")
print("参数值为:")
print(theta)
print("X的维度为:")
print(X.shape)
print("y的维度为")
print(y.shape)
print("参数的维度为")
print(theta.shape)
c = cost(X, y, theta) # 求初始损失函数的值
print("--------初始损失值为-------")
print(X.dtype)
print(c)
# 刷新数据,打乱数据的顺序
def shuffleData(data):
np.random.shuffle(data)
cols = data.shape[1]
X = data[:100, 0:cols - 1]
y = data[:100, cols - 1:]
return X, y
import time
# 定义梯度下降求解函数
def descent(data, theta, batchSize, threshold, alpha):
init_time = time.time() # 设置初始时间
i = 0 # 设置迭代次数
k = 0 # batch
X, y = shuffleData(data) # 打乱数据
grad = np.zeros(theta.shape) # 计算初始的梯度
costs = [cost(X, y, theta)] # 计算初始损失函数值
# 开始迭代
while True:
grad = gradient(X[k:k + batchSize], y[k:k + batchSize], theta) # 求解梯度值
k += batchSize # 取batch个数据
if k >= n: # 如果数据取完
k = 0
X, y = shuffleData(data) # 对数据进行重新洗牌
theta = theta - alpha * grad # 对参数进行更新
print(theta)
cost_new = cost(X, y, theta) # 计算新的损失值
print(cost_new)
costs.append(cost_new) # 将新的损失之追加到列表末尾
i += 1 # 更新循环变量
value = costs # cost为损失值
if abs(value[-1] - value[-2]) < threshold:
break
return theta, i - 1, costs, grad, time.time() - init_time
# 绘制图像
def Run(data, theta, batchSize, thresh, alpha):
theta, iter, costs, grad, dur = descent(data, theta, batchSize, thresh, alpha) # 开始执行梯度下降
name = "Original" if (data[:, 1] > 2).sum() > 1 else "Scaled"
name += " data - learning rate: {} -".format(alpha)
# 选择梯度下降策略和停止方案
if batchSize == n:
strDescType = "Gradient"
elif batchSize == 1:
strDescType = "Stochastic"
else:
strDescType = "Mini-batch({})".format(batchSize)
name += strDescType + " descent - stop: "
strStop = "costs change < {}".format(thresh)
name += strStop
print("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format(name, theta, iter, costs[-1],
dur))
fig, ax = plt.subplots(figsize=(12, 4))
ax.plot(np.arange(len(costs)), costs, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title(name)
plt.show()
return theta
# 开始训练模型
n = 100 # 一次读入100个数据进行训练
print("打印矩阵")
print(orig_data)
theta = Run(orig_data, theta, n, thresh=0.000001, alpha=0.1) # 两次迭代损失函数变化非常小时停止(1e-6)
# 对结果进行测试
# 设定阈值 大于0.5则为1,小于0.5为0
def predict(X, theta):
return [1 if x >= 0.5 else 0 for x in model(X, theta)]
scaled_X = scaled_data[:, :3] # 设置测试集输入
y = scaled_data[:, 3] # 正确值
print("--------打印测试的数据---------")
print(scaled_X)
print("----------theta的值为-----------")
print(theta)
predictions = predict(scaled_X, theta)
print("-----------打印预测值-----------")
print(predictions)
print("-------------打印真实值-----------")
print(y)
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
accuracy = (sum(map(int, correct)) / len(correct)) * 100
print('正确率 = {0}%'.format(accuracy))
# 设置分割曲线函数
def y1(x2, theta):
# y = theta[0] + theta[1]* x1 + theta[2] * x2
x1 = (-(theta[0, 0] + theta[0, 2] * x2)) / theta[0, 1]
return x1
x2 = np.linspace(0, 5, 1000)
x1 = y1(x2, theta)
fig, figer1 = plt.subplots(figsize=(10, 5)) # 设置图像大小
figer1.scatter(positive['sepal_len'], positive['sepal_width'], s=30, c='b', marker='o', label='setosa') # 绘制setosa花的散点图
figer1.scatter(negative['sepal_len'], negative['sepal_width'], s=30, c='r', marker='x',
label='versicolor') # 绘制versicolor花的散点图
figer1.legend(loc=2) # 标题放在左上角
figer1.set_xlabel('sepal_len') # 设置x标签
figer1.set_ylabel('sepal_width') # 设置y标签
plt.plot(x1, x2, 'r-', linewidth=1)
plt.show() # 显示结果图像
|
TJPU-ML/Homework-for-the-fall-semester-of-2018
|
iris classification/张家源/iris4.py
|
iris4.py
|
py
| 9,017 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22904436913
|
import webbrowser
class Movie():
''' This class provides a way to store movie related information '''
'''This is a constant variable (class variable), and Google StyleGuide says that these type of variables should be spelled out in all caps'''
VALID_RATINGS = ["G", "PG", "PG-13", "R"]
def __init__(self, movie_title,movie_storyline, poster_image, trailer_youtube, date, numb_of_times_watched):
# NOTE:__ underscores tell that this is a reserved word in python
'''Function that constructs instances of Movie class for each movie in the website. Arguments are assigned to corresponding instance variables below.
Arguments:
movie_title(str): Name of the movie
movie_storyline(str): Brief description of the movie and its plot
poster_image(str): ULR of movie posting from Wikipedia (if available)
trailer_youtube(str): URL of movie trailer from YouTube (if available)
date(number): Year in which the movie was relased
numb_of_times_watched (number): Total number of times I've seen that movie'''
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.launch_date = date
self.times_watched = numb_of_times_watched
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url) # opens browser to show movie trailer
|
OdeclasV/movie_website
|
media.py
|
media.py
|
py
| 1,363 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72757362749
|
"""
Roll adjusted and multiple prices for a given contract, after checking that we do not have positions
NOTE: this does not update the roll calendar .csv files stored elsewhere. Under DRY the sole source of production
roll info is the multiple prices series
"""
from dataclasses import dataclass
import numpy as np
from syscore.interactive import print_menu_of_values_and_get_response, get_and_convert
from syscore.objects import success, failure, status, named_object
from syscore.text import landing_strip, print_with_landing_strips_around
from sysdata.data_blob import dataBlob
from sysobjects.contracts import futuresContract
from sysobjects.production.roll_state import (
default_state,
roll_adj_state,
explain_roll_state_str,
allowable_roll_state_from_current_and_position,
RollState,
no_roll_state,
)
from sysproduction.reporting.report_configs import roll_report_config
from sysproduction.reporting.reporting_functions import run_report_with_data_blob
from sysproduction.data.positions import diagPositions, updatePositions
from sysproduction.data.contracts import dataContracts
from sysproduction.data.prices import diagPrices, get_valid_instrument_code_from_user
from sysproduction.reporting.data.rolls import (
rollingAdjustedAndMultiplePrices,
relative_volume_in_forward_contract_versus_price,
)
no_change_required = named_object("No roll required")
EXIT_CODE = "EXIT"
def interactive_update_roll_status():
with dataBlob(log_name="Interactive_Update-Roll-Status") as data:
function_to_call = get_rolling_master_function()
function_to_call(data)
def get_rolling_master_function():
MANUAL_INPUT = "Manually input instrument codes and manually decide when to roll"
MENU_OPTIONS = [
MANUAL_INPUT,
"Cycle through instrument codes automatically, but manually decide when to roll",
"Cycle through instrument codes automatically, auto decide when to roll, manually confirm rolls",
"Cycle through instrument codes automatically, auto decide when to roll, automatically roll",
]
function_list = [
update_roll_status_manual_cycle,
update_roll_status_auto_cycle_manual_decide,
update_roll_status_auto_cycle_manual_confirm,
update_roll_status_full_auto,
]
print("How do you want to do your rolls today?")
selection = print_menu_of_values_and_get_response(
MENU_OPTIONS, default_str=MANUAL_INPUT
)
selection_idx = MENU_OPTIONS.index(selection)
function_to_call = function_list[selection_idx]
return function_to_call
@dataclass
class RollDataWithStateReporting(object):
instrument_code: str
original_roll_status: RollState
position_priced_contract: int
allowable_roll_states_as_list_of_str: list
days_until_roll: int
relative_volume: float
@property
def original_roll_status_as_string(self):
return self.original_roll_status.name
def display_roll_query_banner(self):
print(landing_strip(80))
print("Current State: %s" % self.original_roll_status)
print(
"Current position in priced contract %d (if zero can Roll Adjusted prices)"
% self.position_priced_contract
)
print("")
print("These are your options:")
print("")
for state_number, state in enumerate(self.allowable_roll_states_as_list_of_str):
print("%s: %s" % (state, explain_roll_state_str(state)))
print("")
def update_roll_status_manual_cycle(data: dataBlob):
do_another = True
while do_another:
instrument_code = get_valid_instrument_code_from_user(
data=data, allow_exit=True, exit_code=EXIT_CODE
)
if instrument_code is EXIT_CODE:
# belt and braces
do_another = False
else:
manually_report_and_update_roll_state_for_code(data, instrument_code)
return success
def update_roll_status_auto_cycle_manual_decide(data: dataBlob):
days_ahead = get_days_ahead_to_consider_when_auto_cycling()
instrument_list = get_list_of_instruments_to_auto_cycle(data, days_ahead=days_ahead)
for instrument_code in instrument_list:
manually_report_and_update_roll_state_for_code(
data=data, instrument_code=instrument_code
)
return success
def update_roll_status_auto_cycle_manual_confirm(data: dataBlob):
days_ahead = get_days_ahead_to_consider_when_auto_cycling()
auto_parameters = get_auto_roll_parameters()
instrument_list = get_list_of_instruments_to_auto_cycle(data, days_ahead=days_ahead)
for instrument_code in instrument_list:
roll_data = setup_roll_data_with_state_reporting(data, instrument_code)
roll_state_required = auto_selected_roll_state_instrument(
data=data, roll_data=roll_data, auto_parameters=auto_parameters
)
if roll_state_required is no_change_required:
warn_not_rolling(instrument_code, auto_parameters)
else:
modify_roll_state(
data=data,
instrument_code=instrument_code,
original_roll_state=roll_data.original_roll_status,
roll_state_required=roll_state_required,
confirm_adjusted_price_change=True,
)
def update_roll_status_full_auto(data: dataBlob):
days_ahead = get_days_ahead_to_consider_when_auto_cycling()
instrument_list = get_list_of_instruments_to_auto_cycle(data, days_ahead=days_ahead)
auto_parameters = get_auto_roll_parameters()
for instrument_code in instrument_list:
roll_data = setup_roll_data_with_state_reporting(data, instrument_code)
roll_state_required = auto_selected_roll_state_instrument(
data=data, roll_data=roll_data, auto_parameters=auto_parameters
)
if roll_state_required is no_change_required:
warn_not_rolling(instrument_code, auto_parameters)
else:
modify_roll_state(
data=data,
instrument_code=instrument_code,
original_roll_state=roll_data.original_roll_status,
roll_state_required=roll_state_required,
confirm_adjusted_price_change=False,
)
def get_days_ahead_to_consider_when_auto_cycling() -> int:
days_ahead = get_and_convert(
"How many days ahead should I look for expiries?",
type_expected=int,
allow_default=True,
default_value=10,
)
return days_ahead
def get_list_of_instruments_to_auto_cycle(data: dataBlob, days_ahead: int = 10) -> list:
diag_prices = diagPrices()
list_of_potential_instruments = (
diag_prices.get_list_of_instruments_in_multiple_prices()
)
instrument_list = [
instrument_code
for instrument_code in list_of_potential_instruments
if include_instrument_in_auto_cycle(
data=data, instrument_code=instrument_code, days_ahead=days_ahead
)
]
print_with_landing_strips_around(
"Identified following instruments that are near expiry %s"
% str(instrument_list)
)
return instrument_list
def include_instrument_in_auto_cycle(
data: dataBlob, instrument_code: str, days_ahead: int = 10
) -> bool:
days_until_expiry = days_until_earliest_expiry(data, instrument_code)
return days_until_expiry <= days_ahead
def days_until_earliest_expiry(data: dataBlob, instrument_code: str) -> int:
data_contracts = dataContracts(data)
carry_days = data_contracts.days_until_carry_expiry(instrument_code)
roll_days = data_contracts.days_until_roll(instrument_code)
price_days = data_contracts.days_until_price_expiry(instrument_code)
return min([carry_days, roll_days, price_days])
@dataclass
class autoRollParameters:
min_volume: float
manual_prompt_for_position: bool
state_when_position_held: RollState
def get_auto_roll_parameters() -> autoRollParameters:
min_volume = get_and_convert(
"Minimum relative volume before rolling",
type_expected=float,
allow_default=True,
default_value=0.1,
)
manual_prompt_for_position_str = input(
"Manually prompt for state if have position? (n / *anything for yes*)"
)
if manual_prompt_for_position_str == "n":
manual_prompt_for_position = False
else:
manual_prompt_for_position = True
if manual_prompt_for_position:
state_when_position_held = no_change_required
else:
state_when_position_held = get_state_to_use_for_held_position()
auto_parameters = autoRollParameters(
min_volume=min_volume,
manual_prompt_for_position=manual_prompt_for_position,
state_when_position_held=state_when_position_held,
)
return auto_parameters
STATE_OPTIONS = [RollState.Passive, RollState.Force, RollState.Force_Outright]
STATE_OPTIONS_AS_STR = [str(state) for state in STATE_OPTIONS]
def get_state_to_use_for_held_position() -> RollState:
print(
"Choose state to automatically assume if we have a position in priced contract AND roll state is currently NO ROLL"
)
select_state_for_position_held = print_menu_of_values_and_get_response(
STATE_OPTIONS_AS_STR, default_str=STATE_OPTIONS_AS_STR[0]
)
state_when_position_held = STATE_OPTIONS[
STATE_OPTIONS_AS_STR.index(select_state_for_position_held)
]
return state_when_position_held
def auto_selected_roll_state_instrument(
data: dataBlob,
roll_data: RollDataWithStateReporting,
auto_parameters: autoRollParameters,
) -> RollState:
if roll_data.relative_volume < auto_parameters.min_volume:
print_with_landing_strips_around(
"For %s relative volume of %f is less than minimum of %s : NOT AUTO ROLLING"
% (
roll_data.instrument_code,
roll_data.relative_volume,
auto_parameters.min_volume,
)
)
return no_change_required
no_position_held = roll_data.position_priced_contract == 0
if no_position_held:
print_with_landing_strips_around(
"No position held, auto rolling adjusted price for %s"
% roll_data.instrument_code
)
return roll_adj_state
if auto_parameters.manual_prompt_for_position:
run_roll_report(data, roll_data.instrument_code)
roll_state_required = get_roll_state_required(roll_data)
return roll_state_required
original_roll_status = roll_data.original_roll_status
if original_roll_status is no_roll_state:
roll_state_required = auto_parameters.state_when_position_held
print_with_landing_strips_around(
"Automatically changing state from %s to %s for %s"
% (original_roll_status, roll_state_required, roll_data.instrument_code)
)
else:
print_with_landing_strips_around(
"Roll status already set to %s for %s: not changing"
% (original_roll_status, roll_data.instrument_code)
)
return no_change_required
return roll_state_required
def warn_not_rolling(instrument_code: str, auto_parameters: autoRollParameters):
print_with_landing_strips_around(
"\n NOT rolling %s as doesn't meet auto parameters %s\n"
% (instrument_code, str(auto_parameters))
)
def manually_report_and_update_roll_state_for_code(
data: dataBlob, instrument_code: str
):
run_roll_report(data, instrument_code)
manually_update_roll_state_for_code(data, instrument_code)
def manually_update_roll_state_for_code(data: dataBlob, instrument_code: str):
# First get the roll info
# This will also update to console
data.log.setup(instrument_code=instrument_code)
roll_data = setup_roll_data_with_state_reporting(data, instrument_code)
roll_state_required = get_roll_state_required(roll_data)
modify_roll_state(
data=data,
instrument_code=instrument_code,
original_roll_state=roll_data.original_roll_status,
roll_state_required=roll_state_required,
confirm_adjusted_price_change=True,
)
return success
def run_roll_report(data: dataBlob, instrument_code: str):
config = roll_report_config.new_config_with_modified_output("console")
config.modify_kwargs(instrument_code=instrument_code)
report_results = run_report_with_data_blob(config, data)
if report_results is failure:
raise Exception("Can't run roll report, so can't change status")
def get_roll_state_required(roll_data: RollDataWithStateReporting) -> RollState:
invalid_input = True
while invalid_input:
roll_data.display_roll_query_banner()
roll_state_required_as_str = print_menu_of_values_and_get_response(
roll_data.allowable_roll_states_as_list_of_str
)
if roll_state_required_as_str != roll_data.original_roll_status_as_string:
# check if changing
print("")
check = input(
"Changing roll state for %s from %s to %s, are you sure y/n to try again/<RETURN> to exit: "
% (
roll_data.instrument_code,
roll_data.original_roll_status_as_string,
roll_state_required_as_str,
)
)
print("")
if check == "y":
# happy
return RollState[roll_state_required_as_str]
elif check == "":
print("Okay, we're done")
return no_change_required
else:
print("OK. Choose again.")
# back to top of loop
continue
else:
print("No change")
return no_change_required
def setup_roll_data_with_state_reporting(
data: dataBlob, instrument_code: str
) -> RollDataWithStateReporting:
diag_positions = diagPositions(data)
diag_contracts = dataContracts(data)
original_roll_status = diag_positions.get_roll_state(instrument_code)
priced_contract_date = diag_contracts.get_priced_contract_id(instrument_code)
contract = futuresContract(instrument_code, priced_contract_date)
position_priced_contract = int(diag_positions.get_position_for_contract(contract))
allowable_roll_states = allowable_roll_state_from_current_and_position(
original_roll_status, position_priced_contract
)
days_until_roll = diag_contracts.days_until_roll(instrument_code)
relative_volume = relative_volume_in_forward_contract_versus_price(
data=data, instrument_code=instrument_code
)
if np.isnan(relative_volume):
relative_volume = 0.0
roll_data_with_state = RollDataWithStateReporting(
instrument_code=instrument_code,
original_roll_status=original_roll_status,
position_priced_contract=position_priced_contract,
allowable_roll_states_as_list_of_str=allowable_roll_states,
days_until_roll=days_until_roll,
relative_volume=relative_volume,
)
return roll_data_with_state
def modify_roll_state(
data: dataBlob,
instrument_code: str,
original_roll_state: RollState,
roll_state_required: RollState,
confirm_adjusted_price_change: bool = True,
):
if roll_state_required is no_change_required:
return
if roll_state_required is original_roll_state:
return
update_positions = updatePositions(data)
update_positions.set_roll_state(instrument_code, roll_state_required)
if roll_state_required is roll_adj_state:
state_change_to_roll_adjusted_prices(
data=data,
instrument_code=instrument_code,
original_roll_state=original_roll_state,
confirm_adjusted_price_change=confirm_adjusted_price_change,
)
def state_change_to_roll_adjusted_prices(
data: dataBlob,
instrument_code: str,
original_roll_state: RollState,
confirm_adjusted_price_change: bool = True,
):
# Going to roll adjusted prices
update_positions = updatePositions(data)
roll_result = roll_adjusted_and_multiple_prices(
data=data,
instrument_code=instrument_code,
confirm_adjusted_price_change=confirm_adjusted_price_change,
)
if roll_result is success:
# Return the state back to default (no roll) state
data.log.msg(
"Successful roll! Returning roll state of %s to %s"
% (instrument_code, default_state)
)
update_positions.set_roll_state(instrument_code, default_state)
else:
data.log.msg(
"Something has gone wrong with rolling adjusted of %s! Returning roll state to previous state of %s"
% (instrument_code, original_roll_state)
)
update_positions.set_roll_state(instrument_code, original_roll_state)
def roll_adjusted_and_multiple_prices(
data: dataBlob, instrument_code: str, confirm_adjusted_price_change: bool = True
) -> status:
"""
Roll multiple and adjusted prices
THE POSITION MUST BE ZERO IN THE PRICED CONTRACT! WE DON'T CHECK THIS HERE
:param data: dataBlob
:param instrument_code: str
:return:
"""
print(landing_strip(80))
print("")
print("Rolling adjusted prices!")
print("")
try:
rolling_adj_and_mult_object = rollingAdjustedAndMultiplePrices(
data, instrument_code
)
# this will also do the roll calculations
rolling_adj_and_mult_object.compare_old_and_new_prices()
except Exception as e:
print("Error %s when trying to calculate roll prices" % str(e))
return failure
if confirm_adjusted_price_change:
confirm_roll = input(
"Confirm roll adjusted prices for %s are you sure y/n:" % instrument_code
)
if confirm_roll != "y":
print(
"\nUSER DID NOT WANT TO ROLL: Setting roll status back to previous state"
)
return failure
else:
print_with_landing_strips_around("AUTO ROLLING - NO USER CONFIRMATION REQUIRED")
try:
rolling_adj_and_mult_object.write_new_rolled_data()
except Exception as e:
data.log.warn(
"%s went wrong when rolling: Going to roll-back to original multiple/adjusted prices"
% e
)
rolling_adj_and_mult_object.rollback()
return failure
return success
|
ahalsall/pysystrade
|
sysproduction/interactive_update_roll_status.py
|
interactive_update_roll_status.py
|
py
| 18,575 |
python
|
en
|
code
| 4 |
github-code
|
6
|
74667821627
|
from typing import List, Optional
from fastapi import Depends
from ..service import Service, get_service
from app.utils import AppModel
from . import router
class InsideObjectResponse(AppModel):
_id:str
address:str
type:str
price:int
area:float
rooms_count:int
location:dict
class GenResponse(AppModel):
total:int
objects: List[InsideObjectResponse]
@router.get("/shanyraks")
def get_shanyraks(
limit:int,
offset:int,
type:Optional[str]=None,
rooms_count:Optional[int]=None,
price_from:Optional[int]=None,
price_until:Optional[int]=None,
svc : Service = Depends(get_service)
):
val = svc.repository.pagination(limit, offset, type, rooms_count, price_from, price_until)
return GenResponse(**val)
|
MamushevArup/code-climb-ai-back
|
app/shanyrak/router/router_get_pagination.py
|
router_get_pagination.py
|
py
| 770 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38308536356
|
from src import create_app
from src import db
from src.models.wifi import Wifi
from src.models.device import Device
from src.models.threshold import Threshold
from src.models.measurement import Measurement
from .network_setup import NetworkSetUp
from .default_data import threshold_data, wifi_data, measurement_data
class DatabaseSetUp():
"""Set up the initial boot up of the Raspberry Pi Sensor."""
def __init__(self, app_env = 'development', *args, **kwargs):
self.db = db
self.app = create_app(app_env)
self.app_context = self.app.app_context()
self.app_context.push()
self.setup_rpi()
def init_db(self):
self.db.create_all()
def get_or_create_threshold(self):
threshold = Threshold.query.first()
if not threshold:
threshold = Threshold(threshold_data)
threshold.save()
return threshold
def get_or_create_wifi(self):
wifi = Wifi.query.first()
if not wifi:
wifi = Wifi(wifi_data)
wifi.save()
return wifi
def get_or_create_measurement(self):
measurement = Measurement.query.first()
if not measurement:
measurement = Measurement(measurement_data)
measurement.save()
return measurement
def get_or_create_device(self):
device = Device.query.first()
if not device:
# get data, if get_network_info(not spcify name, gets the first wlan)
network = NetworkSetUp()
ip_addr, netmask, mac_addr = network.get_network_info()
data = {'mac_addr':mac_addr, 'netmask':netmask, 'ip_addr':ip_addr}
# create the device
device = Device(data)
device.save()
return device
# ONCE DATABASE IS READY TO ACCEPT CONNECITIONS
def setup_rpi(self):
data = {}
# 1-set the raspbeerypi device information CREATE A DEVICE in db
# check or create the threshold
device = self.get_or_create_device()
# 2-create the threshold
# check or create the threshold
threshold = self.get_or_create_threshold()
data.update(threshold=threshold)
# 3 - check or create the wifi
# check or create the wifi
wifi = self.get_or_create_wifi()
data.update(wifi=wifi)
# 4 get or create the first measurement (for testing porpuses)
measurement = self.get_or_create_measurement()
# update device with new trheshold, wifi and measurement
device.measurements.extend([measurement])
device.update(data)
# Testing
print(
device.ip_addr, device.mac_addr, device.netmask, device.threshold, device.wifi,
device.threshold.soil_ph_min, device.threshold.device_id,
device.wifi.ssid, device.wifi.password, device.wifi.device_id,
device.measurements,
device.measurements[0].air_temp,
)
if __name__ == '__main__':
# 0 db - instanciate obj
db_setup = DatabaseSetUp()
# 1 db - setup and installation (init, migrate, upgrade)
db_setup.init_db()
# 2 db - add default data (device, threshold, wifi, measurement)
db_setup.setup_rpi()
|
Fantaso/multi-raspberry-flask-apis-server
|
boot_setup/db_setup.py
|
db_setup.py
|
py
| 3,284 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42624248567
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'plusMinus' function below.
#
# The function accepts INTEGER_ARRAY arr as parameter.
#
n = int(input())
def plusMinus(arr):
#Write your code here
p=m=z=0
for i in range(n):
if arr[i]>0:
p=p+1
elif arr[i]<0:
m=m+1
else:
z=z+1
print(p/n)
print(m/n)
print(z/n)
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
plusMinus(arr)
|
sarmistha1619/HackerRank---Algorithm
|
Warmup/6. HRSa - Plus Minus.py
|
6. HRSa - Plus Minus.py
|
py
| 564 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9512551614
|
#返回倒数第k个结点
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
#时间复杂度O(n) 空间复杂度O(1)
class Solution1:
def FindKthToTail(self , pHead: ListNode, k: int) -> ListNode:
# write code here
l=self.size(pHead)
if(k>l):
return None
t=l-k
while (t>0):
pHead=pHead.next
t=t-1
return pHead
def size(self,pHead):
l=0
while pHead:
l=l+1
pHead=pHead.next
return l
#快慢指针;第一个指针先移动k步骤,然后两个指针同时移动,当第一个指针走到终点的时候,返回第二个指针
class Solution2:
def FindKthToTail(self, pHead: ListNode, k: int) -> ListNode:
fast = slow = pHead
while k > 0:
if fast:
fast = fast.next
k = k - 1
else:
return None
while fast:
fast = fast.next
slow = slow.next
return slow
#栈 把原链表的结点全部压栈,然后再把栈中最上面的k个节点出栈,出栈的结点重新串成一个新的链表即可
class Solution3:
def FindKthToTail(self, pHead: ListNode, k: int) -> ListNode:
stack=[]
while pHead:
stack.append(pHead)
pHead=pHead.next
if k>len(stack) or not k:
return None
return stack[-k]
|
guozhiyan1/data-structure
|
linklist/six.py
|
six.py
|
py
| 1,519 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41636163192
|
"""Insert Noop: insert a statement that doesn't affect any other variables."""
from refactorings.base import BaseTransformation, JoernTransformation, SrcMLTransformation
from refactorings.random_word import get_random_word, get_random_typename_value
import string
from srcml import E
from lxml import etree
import logging
logger = logging.getLogger(__name__)
type_to_literaltype = {
"int": 'number',
"char": 'char',
"char *": 'string',
}
tagnames = ['expr_stmt', 'decl_stmt', 'for', 'do', 'while', 'if_stmt', 'switch', 'label']
class InsertNoop(SrcMLTransformation):
def get_targets(self, **kwargs):
targets = []
for tagname in tagnames:
targets += self.srcml.xp(f'//src:{tagname}')
return targets
def _apply(self, target):
new_name = get_random_word()
typename, value = get_random_typename_value()
literaltype = type_to_literaltype[typename]
new_decl_stmt = E.decl_stmt(
E.decl(
E.type(
E.name(typename, ' '),
E.name(new_name, ' '),
E.init(
'= ',
E.expr(
E.literal(value, {"type": literaltype})
)
),
),
';'
),
target.tail
)
logger.debug(etree.tostring(new_decl_stmt))
try:
target_idx = target.getparent().index(target)
target.getparent().insert(target_idx+1, new_decl_stmt)
self.srcml.apply_changes()
except Exception:
self.srcml.revert_changes()
raise
new_text = self.srcml.load_c_code()
return new_text.splitlines(keepends=True)
|
bstee615/cfactor
|
refactorings/insert_noop.py
|
insert_noop.py
|
py
| 1,800 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8797452881
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("C:/Users/Admin/OneDrive/Desktop/decision tree/Iris.csv")
df.head()
df.isnull().sum()
df.shape
df.info()
df.describe()
df.drop('Id',axis=1, inplace=True)
df.shape
df['Species'].value_counts().plot(kind='pie', autopct="%.1f%%")
df.corr()
sns.heatmap(df.corr(), annot=True)
x = df.iloc[:,:4].values
y = df.iloc[:,4:5]
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.20,random_state=0)
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print(y_pred)
print("Accuracy: ", metrics.accuracy_score(y_test,y_pred))
new_data = [[3.5, 3.0, 1.2, 1.7]]
y_pred = model.predict(new_data)
print(y_pred)
from sklearn import tree
import matplotlib.pyplot as plt
plt.figure(figsize = (20,10))
tree.plot_tree(model, filled=True, rounded=True)
plt.show()
|
ShreyasiDesai/LGMVIP-DataScience
|
decition tree.py
|
decition tree.py
|
py
| 1,182 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5893119020
|
weight = float(input("what is your weight in kg? "))
height = float(input("what is your height in m? "))
BMI = weight / (height ** 2)
if BMI < 18.5:
print("youre underweight")
elif BMI < 25:
print("you have a normal weight")
elif BMI < 30:
print("youre overweight")
elif BMI < 35:
print("youre obese")
else:
print("youre clinically obese")
|
wandexdev/ProjectsInPython
|
Day-3/task3of3.py
|
task3of3.py
|
py
| 362 |
python
|
en
|
code
| 2 |
github-code
|
6
|
21396845249
|
from typing import Dict
from starlette.types import ASGIApp, Receive, Scope, Send
class AsgiDispatcher:
def __init__(self, patterns: Dict[str, ASGIApp], default: ASGIApp):
self.patterns = patterns
self.default_app = default
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
app = None
request_path = scope['path']
for pattern_prefix, pattern_app in self.patterns.items():
if request_path.startswith(pattern_prefix):
if scope['type'] in {'http', 'websocket'}:
app = pattern_app
break
if app is None:
app = self.default_app
await app(scope, receive, send)
|
TheRacetrack/racetrack
|
racetrack_commons/racetrack_commons/api/asgi/dispatcher.py
|
dispatcher.py
|
py
| 730 |
python
|
en
|
code
| 27 |
github-code
|
6
|
38354134434
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch.nn as nn
import torch
from transformers.models.dpr.modeling_dpr import DPRReaderOutput
from transformers.modeling_outputs import QuestionAnsweringModelOutput, ModelOutput, SequenceClassifierOutput
from transformers.models.vilt.modeling_vilt import ViltForImagesAndTextClassificationOutput
from transformers import VisualBertForQuestionAnswering, VisualBertForVisualReasoning, LxmertForQuestionAnswering
from transformers import ViltProcessor, ViltForImagesAndTextClassification
from transformers import BertForQuestionAnswering
from meerqat.train.losses import _calc_mml
class Trainee(nn.Module):
"""Base class for all Trainee models (to be trained by a Trainer)
Should implement a forward function that returns loss between output and target (as a tuple, dict or ModelOutput)
The actual forward pass should be done using the model attribute
"""
def __init__(self, model):
super().__init__()
self.model = model
@dataclass
class DPRReaderForQuestionAnsweringOutput(DPRReaderOutput):
"""Same as DPRReaderOutput with an extra loss attribute (or as QuestionAnsweringModelOutput with relevance_logits)
N. B. unfortunately we have to redefine everything so that loss is the first attribute
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
relevance_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class MultiPassageBERTOutput(QuestionAnsweringModelOutput):
"""
Same as QuestionAnsweringModelOutput but with start and end log-probabilities
(equivalent to softmax(start_logits) when there is only one passage per question)
"""
start_log_probs: torch.FloatTensor = None
end_log_probs: torch.FloatTensor = None
@dataclass
class BERTRankerOutput(QuestionAnsweringModelOutput):
"""
Same as MultiPassageBERTOutput but with relevance_logits important for ranking
"""
loss: Optional[torch.FloatTensor] = None
relevance_logits: torch.FloatTensor = None
@dataclass
class DPRBiEncoderOutput(ModelOutput):
"""
Outputs from the question and context encoders
(same as DPRQuestionEncoderOutput, DPRContextEncoderOutput with prefixes)
"""
question_pooler_output: Optional[torch.FloatTensor] = None
question_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
question_attentions: Optional[Tuple[torch.FloatTensor]] = None
context_pooler_output: Optional[torch.FloatTensor] = None
context_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
context_attentions: Optional[Tuple[torch.FloatTensor]] = None
class DPRBiEncoder(nn.Module):
"""Adapted from https://github.com/facebookresearch/DPR/blob/main/dpr/models/biencoder.py"""
def __init__(self, question_model, context_model):
"""
Parameters
----------
question_model: transformers.DPRQuestionEncoder
Encoder based on BERT used to encode the question/query
context_model: transformers.DPRContextEncoder
Encoder based on BERT used to encode the context/evidence/passage
('context' is confusing IMO but I keep it for consistency with DPR and transformers)
"""
super().__init__()
self.question_model = question_model
self.context_model = context_model
def forward(self, question_inputs, context_inputs, return_dict=None):
"""
Embeds questions and contexts with their respective model and returns the embeddings.
N - number of questions in a batch
M - number of passages per questions
L - sequence length
d - dimension of the model/embeddings
Parameters
----------
question_inputs: dict[torch.LongTensor]
input_ids: torch.LongTensor
shape (N, L)
usual BERT inputs, see transformers.DPRQuestionEncoder
context_inputs: dict[torch.LongTensor]
input_ids: torch.LongTensor
shape (N*M, L)
usual BERT inputs, see transformers.DPRContextEncoder
return_dict: bool, optional
"""
return_dict = return_dict if return_dict is not None else self.question_model.config.use_return_dict
# embed questions and contexts
question_outputs = self.question_model(**question_inputs)
context_outputs = self.context_model(**context_inputs)
return DPRBiEncoderOutput(
question_pooler_output=question_outputs.pooler_output,
question_hidden_states=question_outputs.hidden_states,
question_attentions=question_outputs.attentions,
context_pooler_output=context_outputs.pooler_output,
context_hidden_states=context_outputs.hidden_states,
context_attentions=context_outputs.attentions)
class DPRReaderForQuestionAnswering(Trainee):
def forward(self,
input_ids, attention_mask,
start_positions=None, end_positions=None, answer_mask=None,
return_dict=None, **kwargs):
"""Based on transformers.BertForQuestionAnswering and dpr.models.Reader"""
return_dict = return_dict if return_dict is not None else self.model.config.use_return_dict
# notations: N - number of questions in a batch, M - number of passages per questions, L - sequence length
N, M, L = input_ids.size()
outputs = self.model(input_ids, attention_mask, return_dict=True, **kwargs)
# compute loss
total_loss = None
if start_positions is not None and end_positions is not None:
start_positions = start_positions.view(N * M, -1)
end_positions = end_positions.view(N * M, -1)
answer_mask = answer_mask.view(N * M, -1)
start_logits, end_logits, relevance_logits = outputs[:3]
start_logits = start_logits.view(N * M, -1)
end_logits = end_logits.view(N * M, -1)
relevance_logits = relevance_logits.view(N * M)
answer_mask = answer_mask.to(device=relevance_logits.device, dtype=torch.float32)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = nn.CrossEntropyLoss(reduction='none', ignore_index=ignored_index)
# compute switch loss
relevance_logits = relevance_logits.view(N, M)
switch_labels = torch.zeros(N, dtype=torch.long, device=relevance_logits.device)
switch_loss = torch.sum(loss_fct(relevance_logits, switch_labels))
# compute span loss
start_losses = [(loss_fct(start_logits, _start_positions) * _span_mask)
for (_start_positions, _span_mask)
in zip(torch.unbind(start_positions, dim=1), torch.unbind(answer_mask, dim=1))]
end_losses = [(loss_fct(end_logits, _end_positions) * _span_mask)
for (_end_positions, _span_mask)
in zip(torch.unbind(end_positions, dim=1), torch.unbind(answer_mask, dim=1))]
loss_tensor = torch.cat([t.unsqueeze(1) for t in start_losses], dim=1) + \
torch.cat([t.unsqueeze(1) for t in end_losses], dim=1)
loss_tensor = loss_tensor.view(N, M, -1).max(dim=1)[0]
span_loss = _calc_mml(loss_tensor)
total_loss = span_loss + switch_loss
if not return_dict:
outputs = outputs.to_tuple()
return ((total_loss,) + outputs) if total_loss is not None else outputs
return DPRReaderForQuestionAnsweringOutput(loss=total_loss, **outputs)
class MultiPassageBERT(BertForQuestionAnswering):
"""
PyTorch/Transformers implementation of Multi-passage BERT by Wang et. al (based on the global normalization by Clark et. al)
i.e. groups passages per question before computing the softmax (and the NLL loss)
so that spans scores are comparable across passages
Code based on transformers.BertForQuestionAnswering, dpr.models.Reader
and https://github.com/allenai/document-qa/blob/master/docqa/nn/span_prediction.py
N. B. differences with DPRReaderForQuestionAnswering:
* no projection layer between BERT and QA-extraction
* no re-ranking (TODO implement MultiPassageDPRReader?)
* global normalization
References
----------
@inproceedings{wang_multi-passage_2019,
address = {Hong Kong, China},
title = {Multi-passage {BERT}: {A} {Globally} {Normalized} {BERT} {Model} for {Open}-domain {Question} {Answering}},
shorttitle = {Multi-passage {BERT}},
url = {https://www.aclweb.org/anthology/D19-1599},
doi = {10.18653/v1/D19-1599},
urldate = {2021-06-14},
booktitle = {Proceedings of the 2019 {Conference} on {Empirical} {Methods} in {Natural} {Language} {Processing} and the 9th {International} {Joint} {Conference} on {Natural} {Language} {Processing} ({EMNLP}-{IJCNLP})},
publisher = {Association for Computational Linguistics},
author = {Wang, Zhiguo and Ng, Patrick and Ma, Xiaofei and Nallapati, Ramesh and Xiang, Bing},
month = nov,
year = {2019},
pages = {5878--5882}
}
@inproceedings{clark_simple_2018,
address = {Melbourne, Australia},
title = {Simple and {Effective} {Multi}-{Paragraph} {Reading} {Comprehension}},
url = {https://aclanthology.org/P18-1078},
doi = {10.18653/v1/P18-1078},
urldate = {2021-07-08},
booktitle = {Proceedings of the 56th {Annual} {Meeting} of the {Association} for {Computational} {Linguistics} ({Volume} 1: {Long} {Papers})},
publisher = {Association for Computational Linguistics},
author = {Clark, Christopher and Gardner, Matt},
month = jul,
year = {2018},
pages = {845--855},
}
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.log_softmax = nn.LogSoftmax(1)
def forward(self,
input_ids,
start_positions=None, end_positions=None, answer_mask=None,
return_dict=None, **kwargs):
"""
notations:
N - number of distinct questions
M - number of passages per question in a batch
L - sequence length
Parameters
----------
input_ids: Tensor[int]
shape (N * M, L)
There should always be a constant number of passages (relevant or not) per question
start_positions, end_positions: Tensor[int], optional
shape (N, M, max_n_answers)
The answer might be found several time in the same passage, maximum `max_n_answers` times
Defaults to None (i.e. don’t compute the loss)
answer_mask: Tensor[int], optional
shape (N, M, max_n_answers)
Used to mask the loss for answers that are not `max_n_answers` times in the passage
Required if start_positions and end_positions are specified
**kwargs: additional arguments are passed to BERT after being reshape like
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(input_ids, return_dict=True, **kwargs)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
# compute loss
total_loss, start_log_probs, end_log_probs = None, None, None
if start_positions is not None and end_positions is not None:
n_times_m, L = input_ids.size()
M = start_positions.size(1)
assert n_times_m % M == 0
N = n_times_m//M
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = L
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = nn.NLLLoss(reduction='none', ignore_index=ignored_index)
# reshape from (N * M, L) to (N, M * L) so that all M passages related to the same question
# will share the same softmax normalization
start_logits, end_logits = start_logits.view(N, M*L), end_logits.view(N, M*L)
start_log_probs, end_log_probs = self.log_softmax(start_logits), self.log_softmax(end_logits)
# after computing the softmax, reshape back to (N * M, L)
# because the last dimension, L, must match the position indices (i.e. class label) in start_positions, end_positions
start_log_probs, end_log_probs = start_log_probs.view(N*M, L), end_log_probs.view(N*M, L)
start_logits, end_logits = start_logits.view(N*M, L), end_logits.view(N*M, L)
# reshape to match model output
start_positions, end_positions = start_positions.view(N*M, -1), end_positions.view(N*M, -1)
answer_mask = answer_mask.to(device=input_ids.device, dtype=torch.float32).view(N*M, -1)
# compute span loss for each answer position in passage (in range `max_n_answers`)
start_losses = [(loss_fct(start_log_probs, _start_positions) * _span_mask)
for (_start_positions, _span_mask)
in zip(torch.unbind(start_positions, dim=1), torch.unbind(answer_mask, dim=1))]
end_losses = [(loss_fct(end_log_probs, _end_positions) * _span_mask)
for (_end_positions, _span_mask)
in zip(torch.unbind(end_positions, dim=1), torch.unbind(answer_mask, dim=1))]
loss_tensor = torch.cat([t.unsqueeze(1) for t in start_losses], dim=1) + \
torch.cat([t.unsqueeze(1) for t in end_losses], dim=1)
# keep the maximum per passage for each question
loss_tensor = loss_tensor.view(N, M, -1).max(dim=1)[0]
total_loss = _calc_mml(loss_tensor)
if not return_dict:
output = (start_logits, end_logits, start_log_probs, end_log_probs) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return MultiPassageBERTOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
start_log_probs=start_log_probs,
end_log_probs=end_log_probs,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class BERTRanker(BertForQuestionAnswering):
"""
BERT-based Ranker Based on transformers.BertForQuestionAnswering
and https://github.com/allenai/document-qa/blob/master/docqa/nn/span_prediction.py
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.qa_classifier = nn.Linear(self.config.hidden_size, 1)
def forward(self,
input_ids,
switch_labels=None,
N=None, M=None,
indices=None, relevants=None,
return_dict=None, **kwargs):
"""
notations:
N - number of distinct questions
M - number of passages per question in a batch
L - sequence length
Parameters
----------
input_ids: Tensor[int]
shape (N * M, L)
There should always be a constant number of passages (relevant or not) per question
**kwargs: additional arguments are passed to BERT after being reshape like
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(input_ids, return_dict=True, **kwargs)
sequence_output = outputs[0]
relevance_logits = self.qa_classifier(sequence_output[:, 0, :])
switch_loss = None
if len(switch_labels) > 0:
loss_fct = nn.CrossEntropyLoss(reduction='mean')
# compute switch loss
relevance_logits = relevance_logits.view(N, M)
switch_loss = loss_fct(relevance_logits, switch_labels)
if not return_dict:
output = (relevance_logits) + outputs[2:]
return ((switch_loss,) + output) if switch_loss is not None else output
return BERTRankerOutput(
loss=switch_loss,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
relevance_logits=relevance_logits,
)
class ViLTRanker(ViltForImagesAndTextClassification):
"""
ViLT-based Ranker Based on transformers.ViltForImagesAndTextClassification
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Classifier head
num_images = self.config.num_images
self.qa_classifier = nn.Sequential(
nn.Linear(self.config.hidden_size * num_images, self.config.hidden_size * num_images),
nn.LayerNorm(self.config.hidden_size * num_images),
nn.GELU(),
nn.Linear(self.config.hidden_size * num_images, 1),
)
def forward(self,
input_ids, pixel_values, pixel_mask,
output_attentions=None,
output_hidden_states=None,
switch_labels=None,
N=None, M=None,
indices=None, relevants=None,
return_dict=None, **kwargs):
"""
notations:
N - number of distinct questions
M - number of passages per question in a batch
L - sequence length
Parameters
----------
input_ids: Tensor[int]
shape (N * M, L)
There should always be a constant number of passages (relevant or not) per question
**kwargs: additional arguments are passed to BERT after being reshape like
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is not None and pixel_values.ndim == 4:
# add dummy num_images dimension
pixel_values = pixel_values.unsqueeze(1)
num_images = pixel_values.shape[1]
if num_images != self.config.num_images:
raise ValueError(
"Make sure to match the number of images in the model with the number of images in the input."
)
pooler_outputs = []
hidden_states = [] if output_hidden_states else None
attentions = [] if output_attentions else None
for i in range(num_images):
# forward every image through the model
outputs = self.vilt(
input_ids,
pixel_values=pixel_values[:, i, :, :, :],
pixel_mask=pixel_mask[:, i, :, :] if pixel_mask is not None else None,
image_token_type_idx=i + 1,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs
)
pooler_output = outputs.pooler_output if return_dict else outputs[1]
pooler_outputs.append(pooler_output)
if output_hidden_states:
hidden_states.append(outputs.hidden_states)
if output_attentions:
attentions.append(outputs.attentions)
pooled_output = torch.cat(pooler_outputs, dim=-1)
relevance_logits = self.qa_classifier(pooled_output)
switch_loss = None
if len(switch_labels) > 0:
loss_fct = nn.CrossEntropyLoss(reduction='mean')
# compute switch loss
relevance_logits = relevance_logits.view(N, M)
switch_loss = loss_fct(relevance_logits, switch_labels)
if not return_dict:
output = (relevance_logits, hidden_states, attentions)
return ((switch_loss,) + output) if switch_loss is not None else output
return ViltForImagesAndTextClassificationOutput(
loss=switch_loss,
logits=relevance_logits,
hidden_states=hidden_states,
attentions=attentions,
)
|
mdsalem17/reranking
|
meerqat/train/trainee.py
|
trainee.py
|
py
| 21,260 |
python
|
en
|
code
| null |
github-code
|
6
|
27477751575
|
import re
def parse(html):
# define the regex pattern for the url
url_pattern = r"https?://(?:www\.)?youtube\.com/embed/(\w+)"
# use re.search to find the first matching url in the HTML
match = re.search(url_pattern, html, re.IGNORECASE)
if match:
# extract the video ID from the matched url
video_id = match.group(1)
# Generate the youtu.be url
url = f"https://youtu.be/{video_id}"
return url
def main():
# get user input (HTML snippet)
html = input("HTML: ").strip()
if html.startswith("https://"):
return None
else:
# call the parse function for extraction and print output
result = parse(html)
print(result)
if __name__ == "__main__":
main()
|
iZusi/CS50P-Portfolio
|
problem_sets/problem_set7/watch/watch.py
|
watch.py
|
py
| 766 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74165330428
|
from tkinter import*
from tkinter import ttk
from tkinter import Tk
from PIL import Image, ImageTk
from student import student
import os
import tkinter
from train import Train
from facereco import Face_Reco
from attendance import atendance
from developer import developer
from help import help
class facerecognitionsystem:
def __init__(self, root):
self.root = root
self.root.geometry("1530x790+0+0")
self.root.title("Face Recogn")
img = Image.open(r"C:\Users\Dell\Desktop\tiet.jfif")
img = img.resize((500,130),Image.ANTIALIAS)
self.photoimg = ImageTk.PhotoImage(img)
first_label = Label(self.root,image = self.photoimg)
first_label.place(x=0,y=0,width=500,height=160)
img1 = Image.open(r"C:\Users\Dell\Desktop\ss.jpg")
img1 = img1.resize((500,130),Image.ANTIALIAS)
self.photoimg1 = ImageTk.PhotoImage(img1)
first_label = Label(self.root,image = self.photoimg1)
first_label.place(x=500,y=0,width=500,height=160)
img2 = Image.open(r"C:\Users\Dell\Desktop\sjd.jfif")
img2 = img2.resize((500,130),Image.ANTIALIAS)
self.photoimg2 = ImageTk.PhotoImage(img2)
first_label = Label(self.root,image = self.photoimg2)
first_label.place(x=1000,y=0,width=500,height=160)
img3 = Image.open(r"C:\Users\Dell\Desktop\bg.jpg")
img3 = img3.resize((1530,630),Image.ANTIALIAS)
self.photoimg3 = ImageTk.PhotoImage(img3)
bg_label = Label(self.root,image = self.photoimg3)
bg_label.place(x=0,y=160,width=1530,height=630)
title_lbl = Label(bg_label, text ="FACE RECOGNITION SYSYTEM ", font=("times new roman", 35, "bold"), bg = "white", fg = "green")
title_lbl.place(x=0,y=0,width=1530,height=100)
#student button
img4 = Image.open(r"C:\Users\Dell\Desktop\student details.jfif")
img4 = img4.resize((160,160),Image.ANTIALIAS)
self.photoimg4 = ImageTk.PhotoImage(img4)
b1 = Button(bg_label, image = self.photoimg4, command = self.student_details, cursor ="hand2")
b1.place(x=150,y=80,width=160,height=160)
b1_1 = Button(bg_label, text = "Student Details",command = self.student_details , cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b1_1.place(x=150,y=240,width=160,height=40)
#detect faces
img5 = Image.open(r"C:\Users\Dell\Desktop\fr.jfif")
img5 = img5.resize((160,160),Image.ANTIALIAS)
self.photoimg5 = ImageTk.PhotoImage(img5)
b2 = Button(bg_label, image = self.photoimg5, cursor ="hand2",command=self.face_data)
b2.place(x=400,y=80,width=160,height=160)
b2_1 = Button(bg_label, text = "Face Detector",command=self.face_data, cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b2_1.place(x=400,y=240,width=160,height=40)
img6 = Image.open(r"C:\Users\Dell\Desktop\attendance.jfif")
img6 = img6.resize((160,160),Image.ANTIALIAS)
self.photoimg6 = ImageTk.PhotoImage(img6)
b3 = Button(bg_label, image = self.photoimg6, cursor ="hand2",command=self.attendance_data,)
b3.place(x=700,y=80,width=160,height=160)
b3_1 = Button(bg_label, text = "Attendance", cursor ="hand2",command=self.attendance_data, font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b3_1.place(x=700,y=240,width=160,height=40)
img7 = Image.open(r"C:\Users\Dell\Desktop\help desk.png")
img7 = img7.resize((160,160),Image.ANTIALIAS)
self.photoimg7 = ImageTk.PhotoImage(img7)
b4 = Button(bg_label, image = self.photoimg7,command = self.help1, cursor ="hand2")
b4.place(x=1000,y=80,width=160,height=160)
b4_1 = Button(bg_label, text = "Help Desk",command = self.help1, cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b4_1.place(x=1000,y=240,width=160,height=40)
img8 = Image.open(r"C:\Users\Dell\Pictures\training data.png")
img8 = img8.resize((160,160),Image.ANTIALIAS)
self.photoimg8 = ImageTk.PhotoImage(img8)
b5 = Button(bg_label, image = self.photoimg8, cursor ="hand2", command =self.train_data)
b5.place(x=150,y=350,width=160,height=160)
b5_1 = Button(bg_label, text = "Train Data", cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green",command=self.train_data)
b5_1.place(x=150,y=510,width=160,height=40)
#detect faces
img9 = Image.open(r"C:\Users\Dell\Desktop\photos.jfif")
img9 = img9.resize((160,160),Image.ANTIALIAS)
self.photoimg9 = ImageTk.PhotoImage(img9)
b6 = Button(bg_label, image = self.photoimg9, cursor ="hand2",command =self.open_image)
b6.place(x=400,y=350,width=160,height=160)
b6_1 = Button(bg_label, text = "Photos", cursor ="hand2",command =self.open_image ,font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b6_1.place(x=400,y=510,width=160,height=40)
img10 = Image.open(r"C:\Users\Dell\Pictures\dev.png")
img10 = img10.resize((160,160),Image.ANTIALIAS)
self.photoimg10 = ImageTk.PhotoImage(img10)
b7 = Button(bg_label, image = self.photoimg10, command = self.developer,cursor ="hand2")
b7.place(x=700,y=350,width=160,height=160)
b7_1 = Button(bg_label, text = "Developer",command = self.developer, cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b7_1.place(x=700,y=510,width=160,height=40)
img11 = Image.open(r"C:\Users\Dell\Desktop\exit.jfif")
img11 = img11.resize((160,160),Image.ANTIALIAS)
self.photoimg11 = ImageTk.PhotoImage(img11)
b8 = Button(bg_label, image = self.photoimg11,command = self.exitf, cursor ="hand2")
b8.place(x=1000,y=350,width=160,height=160)
b8_1 = Button(bg_label, text = "Exit",command = self.exitf, cursor ="hand2", font=("times new roman", 15, "bold"), bg = "white", fg = "green")
b8_1.place(x=1000,y=510,width=160,height=40)
def open_image(self):
os.startfile("data")
#function buttons
def student_details(self):
self.new_window = Toplevel(self.root)
self.app = student(self.new_window)
def train_data(self):
self.new_window = Toplevel(self.root)
self.app = Train(self.new_window)
def face_data(self):
self.new_window = Toplevel(self.root)
self.app = Face_Reco(self.new_window)
def attendance_data(self):
self.new_window = Toplevel(self.root)
self.app = atendance(self.new_window)
def developer(self):
self.new_window = Toplevel(self.root)
self.app = developer(self.new_window)
def help1(self):
self.new_window = Toplevel(self.root)
self.app = help(self.new_window)
def exitf(self):
self.exitf = tkinter.messagebox.askyesno("Face Recognition", "Are you sure you want to exit?",parent = self.root)
if self.exitf>0:
self.root.destroy()
else:
return
if __name__ == "__main__":
root = Tk()
obj = facerecognitionsystem(root)
root.mainloop()
|
kg300902/Smart-Attendance-System
|
main.py
|
main.py
|
py
| 7,693 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12509808903
|
import requests
city = input('enter the city... ')
api_address = 'https://samples.openweathermap.org/data/2.5/weather?q={},uk&appid=b6907d289e10d714a6e88b30761fae22'.format(
city)
url = api_address + city
data = requests.get(url).json()
# print(data)
weather = data['weather']
print(weather[0]['description'])
|
Riyam224/techcampus---projects
|
04/testApi.py
|
testApi.py
|
py
| 319 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71735044668
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
# Change the display options
pd.options.display.max_columns = None
pd.options.display.max_rows = None
species_df = pd.read_csv('species_info.csv')
observations_df = pd.read_csv('observations.csv')
# print(species_df.head())
# print(observations_df.head())
# Describe data of species
# print(species_df.dtypes)
species = species_df.astype({'category': 'string',
'scientific_name': 'string',
'common_names': 'string',
'conservation_status': 'string'}) # Change our types of columns
# print(species_df.info())
print(species_df.describe())
print(species_df.category.value_counts())
# print(species_df.conservation_status.value_counts(normalize=True))
# Pie and bar of category
"""
sub_category = species_df.category.value_counts()
plt.figure(figsize=(10, 8))
plt.pie(species_df.category.value_counts().values, labels=species_df.category.value_counts().index, autopct='%1.1f%%')
plt.suptitle('Category of species', fontweight='bold')
plt.savefig('pie_category.png')
plt.show()
"""
# Describe data of observations
# print(observations_df.dtypes)
observations_df = observations_df.astype({'scientific_name': 'string',
'park_name': 'string'})
# print(observations_df.info())
print(observations_df.describe())
# print(observations_df.observations.median())
# print(observations_df.observations.mode())
# print(observations_df.observations.mad())
# The distribution of conservation_status for animals
"""
status_counts = species_df.conservation_status.value_counts()
plt.figure(figsize=(10, 8))
plt.subplot(1, 2, 1)
sns.countplot(x='conservation_status', data=species_df)
plt.xlabel('Conservation status')
plt.ylabel('Count of status')
plt.xticks(rotation=15)
plt.subplot(1, 2, 2)
plt.pie(status_counts, labels=status_counts.index, autopct='%1.1f%%')
plt.axis('equal')
plt.suptitle('Distribution of conservation status for animals', fontweight='bold')
plt.subplots_adjust(wspace=0.5)
plt.savefig('dis_con_status.png')
plt.show()
plt.clf()
"""
# Certain types of species more likely to be endangered
influence = pd.crosstab(species_df.category, species_df.conservation_status)
influence_prop = influence / len(species_df)
print(influence)
print(influence_prop)
influence_marginals = influence_prop.sum(axis=0)
influence_marginals_1 = influence_prop.sum(axis=1)
print(influence_marginals)
print(influence_marginals_1)
chi2, pval, dof, expected = stats.chi2_contingency(influence)
print(expected)
print(chi2)
# Species were spotted the most at each park
"""
merged_df = species_df.merge(observations_df)
grouped_df = merged_df.groupby('category')['observations'].count()
print(grouped_df)
plt.figure(figsize=(15, 8))
plt.subplot(1, 2, 1)
sns.boxplot(x='category', y='observations', data=merged_df)
plt.xlabel('Species')
plt.ylabel('Number of observations')
plt.xticks(rotation=15)
plt.subplot(1, 2, 2)
plt.pie(grouped_df, labels=grouped_df.index, autopct='%1.1f%%')
plt.suptitle('Species were spotted the most at each park', fontweight='bold')
plt.savefig('species_observ.png')
plt.show()
plt.clf()
"""
# sns.histplot(x='observations', data=observations_df)
# plt.show()
print(species_df.scientific_name.mode())
|
Pavich-3/-Biodiversity-in-National-Parks
|
project.py
|
project.py
|
py
| 3,509 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30186234566
|
from openzwave.network import ZWaveNetwork
#from openzwave.network import ZWaveNetwork
# Initialiser le réseau Z-Wave
network = ZWaveNetwork()
# Attendre que le réseau soit prêt
network.start()
print("Serveur Z-Wave démarré")
# Boucle principale du serveur
while True:
# Vérifier les événements Z-Wave
network.update()# Traiter les événements reçus
for node in network.nodes:
for value in network.nodes[node].get_changed_values():
print(f"Node ID: {node} - Value ID: {value.value_id} - Nouvelle valeur: {value.data}")
# Arrêter le réseau Z-Wave
network.stop()
|
ronisflamme/Iot-project
|
protocole Z-wave/serveur Z-wave.py
|
serveur Z-wave.py
|
py
| 607 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
33585060395
|
__author__ = 'Vivek'
#Given a sorted array and a target value, return the index if the target is found.
# If not, return the index where it would be if it were inserted in order.
#You may assume no duplicates in the array.
def searchInsert(A, B):
"""
:param: A List of integers , B integer to be inserted
:return: return index if B is already present in A , otherwise index at which B will be inserted
"""
low = 0
high = len(A) - 1
while low <= high :
mid = (low + high)/2
if A[mid] == B :
return mid
if mid + 1 != len(A) and A[mid] < B < A[mid+1] :
return mid + 1
elif mid == len(A) - 1 and A[mid] < B :
return mid + 1
elif mid != 0 and A[mid - 1] < B < A[mid] :
return mid
elif mid == 0 and A[mid] > B :
return mid
if B < A[mid] :
high = mid - 1
elif B > A[mid] :
low = mid + 1
|
viveksyngh/InterviewBit
|
Binary Search/INSERTPOS.py
|
INSERTPOS.py
|
py
| 957 |
python
|
en
|
code
| 3 |
github-code
|
6
|
7262256391
|
from http import HTTPStatus
from flask import current_app, jsonify, request
from app.models.vacine_model import Vacine
from sqlalchemy.exc import IntegrityError
from app.exc.errors import CpfInvalid
from app.services.verif_data import verify_data
from app.services.generate_data import data_generate
def get_vacines():
vacines = Vacine.query.all()
serialized = [
{
"cpf": vacine.cpf,
"name": vacine.name,
"vaccine_name": vacine.vaccine_name,
"health_unit_name": vacine.health_unit_name,
"first_shot_date": vacine.first_shot_date,
"second_shot_date": vacine.second_shot_date
}
for vacine in vacines
]
return jsonify(serialized), 200
def create_vacine():
data = request.get_json()
verify_data(data)
for key in data.keys():
if type(data[key]) != str:
return {"error": f" A chave {key} está em um formato inválido."}
try:
new_vaccine = Vacine(
cpf=data["cpf"],
name=data["name"],
vaccine_name=data["vaccine_name"],
health_unit_name=data["health_unit_name"],
first_shot_date=data_generate(),
second_shot_date=data_generate()
)
session = current_app.db.session
session.add(new_vaccine)
session.commit()
return jsonify(new_vaccine), 201
except IntegrityError:
return {"message": "CPF já cadastrado."}, HTTPStatus.CONFLICT
except CpfInvalid:
return {"message": "O CPF não está no formato correto."}, HTTPStatus.BAD_REQUEST
except KeyError as err:
return {"message": f"Está faltando a Key {str(err)}."}, HTTPStatus.BAD_REQUEST
|
Kenzie-Academy-Brasil-Developers/q3-sprint5-vacinacao-theogandara
|
app/controllers/vacine_controller.py
|
vacine_controller.py
|
py
| 1,750 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5820650421
|
import hashlib
import json
import urllib.parse
from typing import Union, Dict
import asks
from asks.response_objects import Response
from spins_halp_line.constants import Credentials
from spins_halp_line.util import get_logger, SynchedCache
_cred_key = "resource_space"
_field_ids = {
"adventure_name": 86,
"player": 87
}
_base_url = "base_url"
_user = "user"
_secret = "secret"
_l = get_logger()
# search:
# [
# {
# "score":"0",
# "ref":"1001",
# "resource_type":"4",
# "has_image":"0",
# "is_transcoding":"0",
# "creation_date":"2020-11-18 19:13:51",
# "rating":"",
# "user_rating":"",
# "user_rating_count":"",
# "user_rating_total":"",
# "file_extension":"mp3",
# "preview_extension":"jpg",
# "image_red":"",
# "image_green":"",
# "image_blue":"",
# "thumb_width":"",
# "thumb_height":"",
# "archive":"0",
# "access":"0",
# "colour_key":"",
# "created_by":"1",
# "file_modified":"2020-11-18 19:13:51",
# "file_checksum":"",
# "request_count":"0",
# "new_hit_count":"8",
# "expiry_notification_sent":"0",
# "preview_tweaks":"0|1",
# "file_path":"",
# "modified":"2020-11-19 03:58:07",
# "group_access":"",
# "user_access":"",
# "field12":"2020-11-18 19:13",
# "field8":"Shipwreck Front Yard",
# "field3":"",
# "order_by":"",
# "total_hit_count":"8"
# }
# ]
# get_resource_data
# {
# "ref":"1001", // both
# "title":"", // not the title field in the ui lol
# "resource_type":"4", // both
# "has_image":"0", // both
# "is_transcoding":"0", // both
# "hit_count":"8",
# "new_hit_count":"8", // both
# "creation_date":"2020-11-18 19:13:51", // both
# "rating":"", // both
# "user_rating":"", // both
# "user_rating_count":"", // both
# "user_rating_total":"", // both
# "country":"",
# "file_extension":"mp3", // both
# "preview_extension":"jpg", // both
# "image_red":"", // both
# "image_green":"", // both
# "image_blue":"", // both
# "thumb_width":"", // both
# "thumb_height":"", // both
# "archive":"0", // both
# "access":"0", // both
# "colour_key":"", // both
# "created_by":"1", // both
# "file_path":"", // both
# "file_modified":"2020-11-18 19:13:51", // both
# "file_checksum":"", // both
# "request_count":"0", // both
# "expiry_notification_sent":"0", // both
# "preview_tweaks":"0|1", // both
# "geo_lat":"",
# "geo_long":"",
# "mapzoom":"",
# "disk_usage":"623803",
# "disk_usage_last_updated":"2020-11-18 19:13:52",
# "file_size":"623803",
# "preview_attempts":"1",
# "field12":"2020-11-18 19:13", // both (?)
# "field8":"Shipwreck Front Yard", // both (title)
# "field3":"", // both (?)
# "modified":"2020-11-19 03:58:07",
# "last_verified":"",
# "integrity_fail":"0",
# "google_vision_processed":"",
# "lock_user":""
# }
# Community boards are useful: https://groups.google.com/g/resourcespace?pli=1
# static cache
_global_cache = SynchedCache()
class RSResource(object):
@classmethod
async def for_room(cls, room_name):
# https://www.resourcespace.com/knowledge-base/user/special-search-terms
# todo: The caching logic here could use improvement. We cache the data from a particular room
# todo: so if we load the same room again we won't repeat those requests...but it seems wrong to
# todo: cache searches? This caching model is based on a read-only assumption - that the server
# todo: will be restarted if we make changes in the CMS. Maybe we should cache search results?
# todo: In any case, since these calls should mostly be made once, it's possible that any caching
# todo: is properly viewed as premature optimization.
files = await cls._get(
'do_search',
{
'search': f'room:{room_name}'
}
)
files = [f for f in files if f['field8'] == room_name]
return await cls._from_list(files)
@classmethod
async def _from_list(cls, resources):
result = []
for r in resources:
obj = RSResource(r)
await obj.load()
result.append(obj)
return result
_k_id = 'ref'
_k_ext = 'file_extension'
_k_ui_title = 'field8'
_k_d_url = 'data_url'
_k_adventure = 'adventure_name'
_k_player = 'player'
_k_room = 'room'
_k_date = 'date'
_k_duration = 'duration'
_k_path = 'path'
_extended_fields = [
_k_adventure,
_k_player,
_k_room,
_k_date,
_k_duration,
_k_path
]
_resource_types = {
'1': 'photo',
'2': 'document',
'3': 'video',
'4': 'audio'
}
def __init__(self, data: Union[Dict[str, str], str, int]):
global _global_cache
self._cache = _global_cache
self._data = {}
self._loaded = False
self._id = None
if isinstance(data, dict):
self._data = data
self._loaded_basic = True
# self._id = data.get(self._k_id)
elif isinstance(data, int) or isinstance(data, str):
self._id = int(data)
def _throw_if_not_loaded(self):
if not self._loaded:
raise ValueError(f'{self} has not had its fields loaded!')
async def load(self):
cache_key = self.id
# support caching results
# todo: this cache doesn't work
data = await self._cache.get(cache_key)
print(f'loading resource {self.id}')
if data is None:
data = await self.get_info()
data = await self.load_extended_fields(data)
self._data = data
# do this last so the extension is loaded
data[self._k_d_url] = await self.get_data_url()
await self._cache.set(cache_key, data)
self._data = data
self._loaded = True
async def load_extended_fields(self, data):
for field in (await self.get_all_fields()):
name = field['name']
if name in self._extended_fields:
data[name] = field['value']
return data
@property
def id(self):
return self._data.get(self._k_id, self._id)
@property
def ext(self):
return self._data.get(self._k_ext)
@property
def title(self):
return self._data.get(self._k_ui_title)
@property
def url(self):
return self._data.get(self._k_d_url)
@property
def adventure(self):
return self._data.get(self._k_adventure)
@property
def player(self):
return self._data.get(self._k_player)
@property
def room(self):
return self._data.get(self._k_room)
@property
def date(self):
return self._data.get(self._k_date)
@property
def duration(self):
return self._data.get(self._k_duration)
@property
def path(self):
return self._data.get(self._k_path)
async def get_data_url(self):
return await self._get(
'get_resource_path',
{
'ref': self.id,
'getfilepath': 0,
'extension': self.ext,
# 'generate': True,
# 'alternative': -1,
# 'size': ''
}
)
async def get_info(self):
return await self._get(
'get_resource_data',
{
'resource': self.id
}
)
# Example response JSON:
# [
# {"value": "Shipwreck Adventure", "resource_type_field": "86", "ref": "86", "name": "adventure_name",
# "title": "Adventure Name", "field_constraint": "0", "type": "3", "order_by": "0", "keywords_index": "1",
# "partial_index": "0", "resource_type": "0", "resource_column": "", "display_field": "1",
# "use_for_similar": "1", "iptc_equiv": "", "display_template": "", "tab_name": "", "required": "0",
# "smart_theme_name": "", "exiftool_field": "", "advanced_search": "1", "simple_search": "0", "help_text": "",
# "display_as_dropdown": "0", "external_user_access": "1", "autocomplete_macro": "", "hide_when_uploading": "0",
# "hide_when_restricted": "0", "value_filter": "", "exiftool_filter": "", "omit_when_copying": "0",
# "tooltip_text": "", "regexp_filter": "", "sync_field": "", "display_condition": "", "onchange_macro": "",
# "linked_data_field": "", "automatic_nodes_ordering": "0", "fits_field": "", "personal_data": "0",
# "include_in_csv_export": "1", "browse_bar": "1", "read_only": "0", "active": "1", "full_width": "0",
# "frequired": "0", "fref": "86"},
# {"value": "", "resource_type_field": "87", "ref": "87", "name": "player", "title": "Player",
# "field_constraint": "0", "type": "0", "order_by": "0", "keywords_index": "1", "partial_index": "0",
# "resource_type": "0", "resource_column": "", "display_field": "1", "use_for_similar": "1", "iptc_equiv": "",
# "display_template": "", "tab_name": "", "required": "0", "smart_theme_name": "", "exiftool_field": "",
# "advanced_search": "1", "simple_search": "0", "help_text": "", "display_as_dropdown": "0",
# "external_user_access": "1", "autocomplete_macro": "", "hide_when_uploading": "0", "hide_when_restricted": "0",
# "value_filter": "", "exiftool_filter": "", "omit_when_copying": "0", "tooltip_text": "", "regexp_filter": "",
# "sync_field": "", "display_condition": "", "onchange_macro": "", "linked_data_field": "",
# "automatic_nodes_ordering": "0", "fits_field": "", "personal_data": "0", "include_in_csv_export": "1",
# "browse_bar": "1", "read_only": "0", "active": "1", "full_width": "0", "frequired": "0", "fref": "87"},
# {"value": "Shipwreck Yard Front", "resource_type_field": "88", "ref": "88", "name": "room", "title": "Room",
# "field_constraint": "", "type": "3", "order_by": "0", "keywords_index": "1", "partial_index": "0",
# "resource_type": "0", "resource_column": "", "display_field": "1", "use_for_similar": "1", "iptc_equiv": "",
# "display_template": "", "tab_name": "", "required": "0", "smart_theme_name": "", "exiftool_field": "",
# "advanced_search": "1", "simple_search": "0", "help_text": "", "display_as_dropdown": "0",
# "external_user_access": "1", "autocomplete_macro": "", "hide_when_uploading": "0", "hide_when_restricted": "0",
# "value_filter": "", "exiftool_filter": "", "omit_when_copying": "0", "tooltip_text": "", "regexp_filter": "",
# "sync_field": "", "display_condition": "", "onchange_macro": "", "linked_data_field": "",
# "automatic_nodes_ordering": "0", "fits_field": "", "personal_data": "0", "include_in_csv_export": "1",
# "browse_bar": "1", "read_only": "0", "active": "1", "full_width": "0", "frequired": "0", "fref": "88"},
# {"value": "Description", "resource_type_field": "8", "ref": "8", "name": "title", "title": "Title",
# "field_constraint": "", "type": "0", "order_by": "10", "keywords_index": "1", "partial_index": "0",
# "resource_type": "0", "resource_column": "title", "display_field": "0", "use_for_similar": "1",
# "iptc_equiv": "2#005", "display_template": "", "tab_name": "", "required": "1", "smart_theme_name": "",
# "exiftool_field": "Title", "advanced_search": "1", "simple_search": "0", "help_text": "",
# "display_as_dropdown": "0", "external_user_access": "1", "autocomplete_macro": "", "hide_when_uploading": "0",
# "hide_when_restricted": "0", "value_filter": "", "exiftool_filter": "", "omit_when_copying": "",
# "tooltip_text": "", "regexp_filter": "", "sync_field": "", "display_condition": "", "onchange_macro": "",
# "linked_data_field": "", "automatic_nodes_ordering": "0", "fits_field": "", "personal_data": "0",
# "include_in_csv_export": "1", "browse_bar": "1", "read_only": "0", "active": "1", "full_width": "0",
# "frequired": "1", "fref": "8"},
# ...
# ]
async def get_all_fields(self):
return await self._get(
'get_resource_field_data',
{
'resource': self.id
}
)
def _add_extended_field(self, field):
self._data[field['name']] = field['']
@staticmethod
async def _get(function, params, unwrap=True) -> dict:
base_url = Credentials[_cred_key][_base_url]
params['function'] = function
params['user'] = Credentials[_cred_key][_user]
qstring = urllib.parse.urlencode(params)
secret = Credentials[_cred_key][_secret]
signer = hashlib.sha256()
signer.update(f'{secret}{qstring}'.encode("utf-8"))
request = f'{base_url}?{qstring}&sign={signer.hexdigest()}'
result: Response = await asks.get(request)
# print("-" * 60)
# print(request)
# print(">" * 5)
# print(result)
# print("\\/" * 5)
# print(result.content.decode("utf-8"))
# print("-" * 60)
# if unwrap and result.status_code >= 200 and result.status_code < 300:
result: dict = json.loads(result.content.decode("utf-8"))
return result
def __str__(self):
return f'RSR[{self.id}] {self.url}'
def __repr__(self):
return str(self)
|
aeturnum/spins_halp_line
|
spins_halp_line/media/resource_space.py
|
resource_space.py
|
py
| 13,353 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2063946987
|
from loader import dp
from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import Text
from loguru import logger
from datetime import datetime
@dp.message_handler(commands='reload', state='*')
@dp.message_handler(Text(equals='reload',
ignore_case=True), state='*')
async def cmd_reload(message: types.Message,
state: FSMContext) -> None:
"""
Функция ресетит машину состояний
:param msg: Message сообщение пользователя
:param state: FSMContext машина состояний
"""
cur_state = await state.get_state()
logger.info(
f'\nПользователь: {message.from_user.full_name}, '
f'id: {message.from_user.id}, выполнил перезагрузку,'
f'пользователь был в состоянии {cur_state},'
f'дата: {datetime.now()}'
)
await message.answer('Перезагрузка')
await state.reset_state()
|
Taiven396/tickets_bot
|
handlers/reload.py
|
reload.py
|
py
| 1,059 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
29956253019
|
import sys
sys.path.append("..") # Adds higher directory to python modules path.
import to_bip as tb
import blocks as bl
import shapegen as sh
def main():
print(" ---- Pyramid ----")
py = sh.Pyramid(5)
py.generate()
py_blocks = bl.init_blocks_3D(py.matlist())
py_conn = bl.connect_blocks_3D(py_blocks)
for block in py_conn.values():
bl.visual_print_2D(block)
print("\n")
print("LAYERS:")
for mat in py.matlist():
bl.print_matrix(mat)
print("\n")
print("\n")
tb.write_BIP("pyramid.bip", py_conn)
if __name__ == "__main__":
main()
|
ninocapipoca/ModularRobots
|
tests/test_pyramid.py
|
test_pyramid.py
|
py
| 613 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32195861005
|
from pyplasm import *
import os,sys
sys.path.insert(0, 'lib/py/')
from lar2psm import *
from larcc import *
from sysml import *
#Funzioni Utili
DRAW = COMP([VIEW,STRUCT,MKPOLS])
DRAW2 = COMP([STRUCT,MKPOLS])
def rgbToPlasmColor(color):
return [color[0]/255., color[1]/255., color[2]/255.]
def creaFinestre(x,z):
finestra0=CUBOID([x,0.1,z])
anta1=CUBOID([0.1,0.1,z-0.1])
anta1=T([2,3])([-0.1,0.1])(anta1)
anta2=CUBOID([0.1,0.1,z-0.1])
anta2=T([1,2,3])([x-0.1,-0.1,0.1])(anta2)
anta3=CUBOID([x,0.1,0.1])
anta3=T(2)(-0.1)(anta3)
anta4=CUBOID([x,0.1,0.1])
anta4=T([2,3])([-0.1,z-0.1])(anta4)
anta5=CUBOID([0.1,0.1,z-0.1])
anta5=T([1,2,3])([(x-0.1)/2,-0.1,0.1])(anta5)
ante=STRUCT([anta1,anta2,anta3,anta4,anta5])
ante=COLOR(rgbToPlasmColor([153,51,0]))(ante)
finestra0=COLOR(rgbToPlasmColor([153,203,255]))(finestra0)
finestra=STRUCT([finestra0,ante])
return finestra
def creaPorta(x,z):
porta0=CUBOID([x,0.1,z])
porta0=COLOR(rgbToPlasmColor([192,64,0]))(porta0)
cilind_T = CYLINDER([0.025, (10.0/12)*0.1])(50)
cilind_T=ROTATE([2,3])(PI/2)(cilind_T)
cilind_T=T([1,3])([x-0.1,z/2])(cilind_T)
cilind_T=COLOR(rgbToPlasmColor([205,133,63]))(cilind_T)
porta=STRUCT([porta0,cilind_T])
return porta
#Camera1
master = assemblyDiagramInit([5,3,2])([[.1,1.5,1,1.5,.1],[.1,3,.1],[.1,2.7]])
V,CV = master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc= cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#Porta
toMerge = 23
diagram0 = assemblyDiagramInit([3,1,2])([[1.5,2,1.5],[.1],[2.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Finestra
toMerge = 13
diagram0 = assemblyDiagramInit([1,1,3])([[3],[.1],[1,1.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [30,35,9,14,20]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
camera=DRAW2(master)
camera=COLOR(rgbToPlasmColor([255 ,204,153]))(camera)
finestra1=creaFinestre(1,1.2)
finestra1=T([1,3])([1.6,1.1])(finestra1)
porta0=creaPorta(0.6,2.2)
porta0=ROTATE([1,2])(PI)(porta0)
porta0=T([1,2,3])([3.65,3.2,0.1])(porta0)
camera1=STRUCT([camera,finestra1,porta0])
#Camera2
master = assemblyDiagramInit([5,3,2])([[.1,1.5,1,1.5,.1],[.1,3,.1],[.1,2.7]])
V,CV = master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc= cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#Porta
toMerge = 19
diagram0 = assemblyDiagramInit([3,1,2])([[1.5,2,1.5],[.1],[2.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Finestra
toMerge = 17
diagram0 = assemblyDiagramInit([1,1,3])([[3],[.1],[1,1.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [30,35,9,15,19]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
camera2Temp=DRAW2(master)
camera2Temp=COLOR(rgbToPlasmColor([255 ,204,153]))(camera2Temp)
finestra0=creaFinestre(1.2,1.2)
finestra0=ROTATE([1,2])(PI)(finestra0)
finestra0=T([1,2,3])([2.6,3.2,1.1])(finestra0)
porta0=creaPorta(0.6,2.2)
porta0=T([1,3])([3.05,0.1])(porta0)
camera2=STRUCT([camera2Temp,finestra0,porta0])
camera2=T(2)(5)(camera2)
#Bagno2
master = assemblyDiagramInit([3,5,2])([[.1,2.2,.1],[.1,.4,0.9,.4,.1],[.1,2.7]])
V,CV = master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#Porta
toMerge = 25
diagram2 = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram2,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Finestra
toMerge = 5
diagram0 = assemblyDiagramInit([1,1,3])([[3],[.1],[1,1.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,1)
#Rimozione
toRemove = [14,16,12,28,31]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
bagnoTemp=DRAW2(master)
bagnoTemp=COLOR(rgbToPlasmColor([255 ,204,153]))(bagnoTemp)
finestra0=creaFinestre(1,1.2)
finestra0=ROTATE([1,2])(-PI/2)(finestra0)
finestra0=T([2,3])([1.5,1.1])(finestra0)
porta0=creaPorta(0.9,2.2)
porta0=ROTATE([1,2])(PI/2)(porta0)
porta0=T([1,2,3])([2.4,0.5,0.1])(porta0)
bagno=STRUCT([bagnoTemp,finestra0,porta0])
bagno=T(2)(3.1)(bagno)
#Stireria
master = assemblyDiagramInit([5,3,2])([[.1,1.4,1,.5,.1],[.1,2.1,.1],[.1,2.7]])
V,CV = master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#Porta
toMerge = 13
diagram3 = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram3,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Finestra
toMerge = 16
diagram0 = assemblyDiagramInit([3,1,3])([[3,3,3],[.1],[1,1.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [34,28,9,14,19,31,37]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
DRAW2(master)
stireriaTemp=DRAW2(master)
stireriaTemp=COLOR(rgbToPlasmColor([255 ,204,153]))(stireriaTemp)
finestra0=creaFinestre(1,1.2)
finestra0=ROTATE([1,2])(-PI)(finestra0)
finestra0=T([1,2,3])([2.5,2.3,1.1])(finestra0)
porta0=creaPorta(1,2.2)
porta0=T([1,3])([1.5,0.1])(porta0)
stireria=STRUCT([stireriaTemp,finestra0,porta0])
stireria=T([1,2])([10.8,8.2])(stireria)
#Cucina
master = assemblyDiagramInit([5,6,2])([[.1,1,1,2,.1],[.1,1.5,1,1.2,0.3,.1],[.1,2.7]])
V,CV = master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#hpc4=T([1,2])([13.2,4.1])(hpc4)
#Finestra
toMerge = 53
diagram = assemblyDiagramInit([1,1,3])([[3],[.1],[1,1.2,.5]])
master = diagram2cell(diagram,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Porta
toMerge = 35
diagram = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Porta
toMerge = 25
diagram = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Porta
toMerge = 7
diagram = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [61,63,57,31,59,14,25,36,16,27,38,18,29,40,20,31,42]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
cucinaTemp=DRAW2(master)
cucinaTemp=COLOR(rgbToPlasmColor([255 ,204,153]))(cucinaTemp)
finestra0=creaFinestre(1,1.2)
finestra0=ROTATE([1,2])(PI/2)(finestra0)
finestra0=T([1,2,3])([4.2,1.6,1.1])(finestra0)
porta0=creaPorta(1,2.2)
porta0=ROTATE([1,2])(PI)(porta0)
porta0=T([1,2,3])([2.1,4.2,0.1])(porta0)
porta1=creaPorta(1,2.2)
porta1=T([1,3])([1.1,0.1])(porta1)
porta2=creaPorta(1.2,2.2)
porta2=ROTATE([1,2])(-PI/2)(porta2)
porta2=T([2,3])([3.8,0.1])(porta2)
cucina=STRUCT([cucinaTemp,finestra0,porta0,porta1,porta2])
cucina=T([1,2])([13.2,4.1])(cucina)
#Bagno2
master = assemblyDiagramInit([5,3,2])([[.1,1.4,1,.4,.1],[.1,1.2,.1],[.1,2.7]])
V,CV = master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#hpc=T([1,2])([10.6,5.3])(hpc)
#Porta
toMerge = 17
diagram = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [29,15,9,20]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
bagnoTemp2=DRAW2(master)
bagnoTemp2=COLOR(rgbToPlasmColor([255 ,204,153]))(bagnoTemp2)
porta0=creaPorta(1,2.2)
porta0=ROTATE([1,2])(PI)(porta0)
porta0=T([1,2,3])([2.5,1.4,0.1])(porta0)
bagno2=STRUCT([porta0,bagnoTemp2])
bagno2=T([1,2])([10.2,5.3])(bagno2)
#Scale
master = assemblyDiagramInit([3,5,2])([[.1,3.4,.1],[.1,.2,1,.2,.1],[.1,2.7]])
V,CV= master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Porta
toMerge = 25
diagram2 = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram2,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Finestra
toMerge = 19
diagram0 = assemblyDiagramInit([3,1,3])([[3,3,3],[.1],[1,1.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [28,15,34,13,17,14]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
scaleTemp=DRAW2(master)
scaleTemp=COLOR(rgbToPlasmColor([255 ,204,153]))(scaleTemp)
finestra0=creaFinestre(1.2,1.2)
finestra0=ROTATE([1,2])(PI)(finestra0)
finestra0=T([1,2,3])([2.4,1.6,1.1])(finestra0)
porta0=creaPorta(1,2.2)
porta0=ROTATE([1,2])(PI/2)(porta0)
porta0=T([1,2,3])([3.6,0.3,0.1])(porta0)
scale=STRUCT([scaleTemp,finestra0,porta0])
scale=T([1,2])([4,6.6])(scale)
#Remove
master = assemblyDiagramInit([3,3,2])([[.1,1.3,.1],[.1,1.4,.1],[.1,2.7]])
V,CV= master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#Remove
toRemove = [5,11,17,3,9,15]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
remove=DRAW2(master)
remove=T([1,2])([7.6,6.6])(remove)
remove=COLOR(rgbToPlasmColor([255 ,204,153]))(remove)
finestra0=creaFinestre(1.2,1.2)
finestra0=ROTATE([1,2])(PI)(finestra0)
finestra0=T([1,2,3])([2.4,1.6,1.1])(finestra0)
#Soggiorno2
master = assemblyDiagramInit([5,3,2])([[.15,0.8,1.6,0.8,.15],[.1,6.0,.1],[.1,2.7]])
V,CV= master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#Porta
toMerge = 13
diagram0 = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [29,3,9,14,20,26,11,16,22]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
soggiornoTemp2=DRAW2(master)
soggiornoTemp2=COLOR(rgbToPlasmColor([255 ,204,153]))(soggiornoTemp2)
finestra0=creaFinestre(1.6,2.3)
finestra0=T([1])(0.95)(finestra0)
soggiorno2=STRUCT([soggiornoTemp2,finestra0])
soggiorno2=T([1,2])([4.1,0.5])(soggiorno2)
#Soggiorno3
master = assemblyDiagramInit([7,4,2])([[.4,1,1,0.9,1,1,.3],[.1,.4,4.8,.1],[.1,2.7]])
V,CV= master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc= cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#Porta
toMerge = 41
diagram0 = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Porta
toMerge = 33
diagram0 = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Porta
toMerge = 17
diagram0 = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Porta
toMerge = 9
diagram0 = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [58,56,54,52,5,12,19,27,34,41,49,7,14,21,29,36,43,51,10,17,32,39]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
soggiornoTemp3=DRAW2(master)
soggiornoTemp3=COLOR(rgbToPlasmColor([255 ,204,153]))(soggiornoTemp3)
finestra0=creaFinestre(2,2.3)
finestra0=T(1)(0.4)(finestra0)
finestra1=creaFinestre(2,2.3)
finestra1=T(1)(3.3)(finestra1)
soggiorno3=STRUCT([soggiornoTemp3,finestra0,finestra1])
soggiorno3=T([1])([7.6])(soggiorno3)
#Sala pranzo
master = assemblyDiagramInit([5,3,2])([[.1,1,2,1,.1],[.1,3.5,.1],[.1,2.7]])
V,CV= master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#Finestra
toMerge = 13
diagram0 = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [29,9,14,20,3,28,5,11,16,22,23]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
soggiornoTemp4=DRAW2(master)
soggiornoTemp4=COLOR(rgbToPlasmColor([255 ,204,153]))(soggiornoTemp4)
finestra0=creaFinestre(2,2.3)
finestra0=T(1)(1.1)(finestra0)
soggiorno4=STRUCT([soggiornoTemp4,finestra0])
soggiorno4=T([1,2])([13.2,0.5])(soggiorno4)
#Camino
master = assemblyDiagramInit([3,3,2])([[.1,1,.1],[.1,2,.1],[.1,2.7]])
V,CV= master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
master = master[0], [cell for k,cell in enumerate(master[1]) ]
camino=DRAW2(master)
camino=T([1,2])([6.5,2])(camino)
#Remove2
master = assemblyDiagramInit([4,5,2])([[.1,3.6,2.6,.1],[.1,2.8,1.1,1.1,.1],[.1,2.7]])
V,CV= master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#Finestra
toMerge = 19
diagram0 = assemblyDiagramInit([3,1,3])([[3,3,3],[.1],[1,1.2,.5]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Finestra
toMerge = 7
diagram = assemblyDiagramInit([1,1,2])([[2],[.1],[2.2,.5]])
master = diagram2cell(diagram,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [47,42,12,21,14,23,16,25,3,10,1,19,31,29,33,35,37,27]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
remove2Temp=DRAW2(master)
remove2Temp=COLOR(rgbToPlasmColor([255 ,204,153]))(remove2Temp)
porta0=creaPorta(1.1,2.2)
porta0=ROTATE([1,2])(-PI/2)(porta0)
porta0=T([2,3])([5.1,0.1])(porta0)
finestra0=creaFinestre(1.2,1.2)
finestra0=ROTATE([1,2])(PI)(finestra0)
finestra0=T([1,2,3])([2.5,5.2,1.1])(finestra0)
remove2=STRUCT([remove2Temp,finestra0,porta0])
remove2=T([1,2])([7.5,5.3])(remove2)
#Remove3
master = assemblyDiagramInit([3,3,2])([[.1,2.1,.1],[.1,2,.1],[.1,2.7]])
V,CV= master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
toRemove = [1,3,5,7,9,11,13,15,17]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
remove3=DRAW2(master)
remove3=T([1,2])([2.4,3.1])(remove3)
#Scale
gradino2_vertici = [ [0,0], [0,0.3], [1.3,0], [1.3,0.3] ];
gradino2_num_lati = [range(1,5)]
gradino2_2D = MKPOL([gradino2_vertici, gradino2_num_lati, None])
gradino2 = PROD([gradino2_2D, Q(0.2)])
gradino3 = PROD([gradino2_2D, Q(0.2)])
gradino3=T([2,3])([0.2,0.2])(gradino3)
gradino4 = PROD([gradino2_2D, Q(0.2)])
gradino4=T([2,3])([0.4,0.4])(gradino4)
gradino5 = PROD([gradino2_2D, Q(0.2)])
gradino5=T([2,3])([0.6,0.6])(gradino5)
gradino6 = PROD([gradino2_2D, Q(0.2)])
gradino6=T([2,3])([0.8,0.8])(gradino6)
gradino7 = PROD([gradino2_2D, Q(0.2)])
gradino7=T([2,3])([1,1])(gradino7)
gradino8 = PROD([gradino2_2D, Q(0.2)])
gradino8=T([2,3])([1.2,1.2])(gradino8)
gradino9 = PROD([gradino2_2D, Q(0.2)])
gradino9=T([2,3])([1.4,1.4])(gradino9)
gradino10 = PROD([gradino2_2D, Q(0.2)])
gradino10=T([2,3])([1.6,1.6])(gradino10)
gradino11 = PROD([gradino2_2D, Q(0.2)])
gradino11=T([2,3])([1.8,1.8])(gradino11)
gradino12 = PROD([gradino2_2D, Q(0.2)])
gradino12=T([2,3])([2,2])(gradino12)
gradino13 = PROD([gradino2_2D, Q(0.2)])
gradino13=T([2,3])([2.2,2.2])(gradino13)
gradino14 = PROD([gradino2_2D, Q(0.2)])
gradino14=T([2,3])([2.4,2.4])(gradino14)
gradino15 = PROD([gradino2_2D, Q(0.2)])
gradino15=T([2,3])([2.6,2.6])(gradino15)
#Assemblo la scalinata
scalinata=STRUCT([gradino2,gradino3,gradino4,gradino5,gradino6,gradino7,gradino8,gradino9,gradino10,
gradino11,gradino12,gradino13,gradino14,gradino15])
scalinata=ROTATE([1,2])(PI/2)(scalinata)
#La traslo sui 3 assi al centro della parte frontale
scalinata=T([1,2])([7.3,5.3])(scalinata)
#Faccio la seconda scala
scalinata2=scalinata
#Traslo la seconda scala
scalinata2=T([1,2,3])([-11.9,-13.4,-2.7])(scalinata2)
scalinata2=ROTATE([1,2])(-PI)(scalinata2)
#Esterno
est1= CUBOID([4,6,0.2])
est1=T([1,2,3])([13.2,-5.5,-0.1])(est1)
est2= CUBOID([14.7,2.8,0.2])
est2=T([1,2,3])([-1.5,-2.3,-0.1])(est2)
est3=CUBOID([14.5,0.2,0.2])
est3=T([1,2,3])([-1.5,-2.5,-0.1])(est3)
est3=COLOR(rgbToPlasmColor([147,147,147]))(est3)
est4=CUBOID([0.2,3.2,0.2])
est4=T([1,2,3])([13,-5.5,-0.1])(est4)
est4=COLOR(rgbToPlasmColor([147,147,147]))(est4)
est5=CUBOID([4,0.2,0.2])
est5=T([1,2,3])([13,-5.7,-0.1])(est5)
est5=COLOR(rgbToPlasmColor([147,147,147]))(est5)
est6=CUBOID([0.5,1.7,0.5])
est6=T([1,2,3])([16.9,-7,-0.1])(est6)
est6=COLOR(rgbToPlasmColor([255 ,204,153]))(est6)
colonna1=CUBOID([0.5,0.5,2.8])
colonna1=T([1,2])([3,-1])(colonna1)
colonna2=CUBOID([0.5,0.5,2.8])
colonna2=T([1,2])([7,-1])(colonna2)
colonna3=CUBOID([0.5,0.5,2.8])
colonna3=T([1,2])([10.5,-1])(colonna3)
colonna4=CUBOID([0.5,0.5,2.8])
colonna4=T([1,2])([14,-1])(colonna4)
colonna5=CUBOID([0.5,0.5,2.8])
colonna5=T([1,2])([3.9,10])(colonna5)
colonne=STRUCT([colonna1,colonna2,colonna3,colonna4,colonna5])
ext4_vertici = [ [0,0], [0,2], [0.5,0], [0.5,2] ];
ext4_num_lati = [range(1,5)]
ext4_2D = MKPOL([ext4_vertici, ext4_num_lati, None])
ext4 = PROD([ext4_2D, Q(2.8)])
ext4=T(2)(-2)(ext4)
ext5_vertici = [ [0,0], [0,6], [2.8,0],[2.8,2.5],[0.3,6] ];
ext5_num_lati = [range(1,6)]
ext5_2D = MKPOL([ext5_vertici, ext5_num_lati, None])
ext5 = PROD([ext5_2D, Q(0.5)])
ext5=ROTATE([1,3])(PI/2)(ext5)
ext5=ROTATE([1,2])(PI)(ext5)
ext5=T([1,2])([16.9,0.5])(ext5)
baseEsterno=STRUCT([est1,est2])
esterno=STRUCT([ext4,ext5,colonne])
#ParteSuperiore
parteSup=CUBOID([17.4,10.3,0.7])
parteSup=T([2,3])([-2,2.8])(parteSup)
#Tetto
tetto_vertici = [ [0,0], [10.3,0], [5.15,1.5], ];
tetto_num_lati = [range(1,4)]
tetto_2D = MKPOL([tetto_vertici, tetto_num_lati, None])
#Porto in 2,5D
tetto = PROD([tetto_2D, Q(17.4)])
tetto=ROTATE([2,3])(PI/2)(tetto)
tetto=ROTATE([1,2])(PI/2)(tetto)
tetto=T([2,3])([-2,3.5])(tetto)
#Creo La mansarda
#Camera1
master = assemblyDiagramInit([7,6,2])([[.1,0.5,1.1,2,1,1,.1],[.1,0.5,1.3,2,1.3,.2],[.1,2.3]])
V,CV = master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc= cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#Finestra
toMerge = 49
diagram0 = assemblyDiagramInit([1,1,3])([[3],[.1],[1,1.2,.2]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Porta
toMerge = 35
diagram0 = assemblyDiagramInit([1,1,2])([[3],[.1],[2.2,.2]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
toMerge = 25
diagram0 = assemblyDiagramInit([1,1,2])([[3],[.1],[2.2,.2]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
toMerge = 9
diagram0 = assemblyDiagramInit([1,1,2])([[3],[.1],[2.2,.2]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
toMerge = 5
diagram0 = assemblyDiagramInit([1,1,2])([[3],[.1],[2.2,.2]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [80,88,84,86,24,26,28,30,35,37,39,41,46,48,50,52,58,60,62,64,13,15,17,19,82]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
cameraManTemp=DRAW2(master)
cameraManTemp=COLOR(rgbToPlasmColor([255 ,204,153]))(cameraManTemp)
finestra0=creaFinestre(1,1.2)
finestra0=T([1,3])([3.7,1])(finestra0)
porta0=creaPorta(1.1,2.1)
porta0=T([1,3])([0.6,0.1])(porta0)
porta1=creaPorta(1.1,2.1)
porta1=ROTATE([1,2])(PI)(porta1)
porta1=T([1,2,3])([1.7,5.3,0.1])(porta1)
porta2=creaPorta(1.3,2.1)
porta2=ROTATE([1,2])(-PI/2)(porta2)
porta2=T([2,3])([1.9,0.1])(porta2)
porta3=creaPorta(1.3,2.1)
porta3=ROTATE([1,2])(-PI/2)(porta3)
porta3=T([2,3])([5.2,0.1])(porta3)
cameraMan=STRUCT([cameraManTemp,finestra0,porta0,porta1,porta2,porta3])
cameraMan=T([1])([4.1])(cameraMan)
#bagnoMan
master = assemblyDiagramInit([5,5,2])([[.1,0.5,1.5,2,.1],[.1,0.5,1.3,1.5,.1],[.1,2.3]])
V,CV = master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc= cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
toMerge = 21
diagram0 = assemblyDiagramInit([1,1,3])([[3],[.1],[1,1.2,.3]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [50,17,26,36,46,15,24,34,44,13,22,32,42]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
bagnoManTemp=DRAW2(master)
bagnoManTemp=COLOR(rgbToPlasmColor([255 ,204,153]))(bagnoManTemp)
finestra0=creaFinestre(1.6,1.2)
finestra0=T([1,3])([0.5,1])(finestra0)
bagnoMan=STRUCT([bagnoManTemp,finestra0])
#Balcone
master = assemblyDiagramInit([3,3,3])([[.1,9.8,.1],[.1,1.3,.1],[.1,1.5,.1]])
V,CV = master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc= cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
#Rimozione
toRemove = [13,16,17,14]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
balcone=DRAW2(master)
balcone=T(2)(-1.5)(balcone)
#muro
muro=CUBOID([0.1,1.8,2.4])
muro=T(2)(3.5)(muro)
#muro2
muro2=CUBOID([4.5,0.1,2.4])
muro2=T(2)(5.3)(muro2)
#ParteSupTettoMansarda
ParteSupTettoMansarda=CUBOID([10,9.7,0.3])
ParteSupTettoMansarda=T([2,3])([-0.7,2.4])(ParteSupTettoMansarda)
#TettoMansarda
tettoM_vertici = [ [0,0], [9.7,0], [4.85,1.5], ];
tettoM_num_lati = [range(1,4)]
tettoM_2D = MKPOL([tettoM_vertici, tettoM_num_lati, None])
#Porto in 2,5D
tettoM = PROD([tettoM_2D, Q(10)])
tettoM=ROTATE([2,3])(PI/2)(tettoM)
tettoM=ROTATE([1,2])(PI/2)(tettoM)
tettoM=T([2,3])([-0.7,2.7])(tettoM)
#Sottotetto
master = assemblyDiagramInit([5,5,2])([[.1,3,.1,6.7,.1],[.1,1,1,1.4,.1],[.1,2.3]])
V,CV = master
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc= cellNumbering (master,hpc)(range(len(CV)),CYAN,2)
toMerge = 25
diagram0 = assemblyDiagramInit([1,1,2])([[3],[.1],[2.2,.2]])
master = diagram2cell(diagram0,master,toMerge)
hpc = SKEL_1(STRUCT(MKPOLS(master)))
hpc = cellNumbering (master,hpc)(range(len(master[1])),CYAN,2)
#Rimozione
toRemove = [11,21,30,49,13,15,17,36,34,32]
master = master[0], [cell for k,cell in enumerate(master[1]) if not (k in toRemove)]
sottotetto=DRAW2(master)
sottotetto=T([2])(5.4)(sottotetto)
tettoM=COLOR(rgbToPlasmColor([206,48,24]))(tettoM)
ParteSupTettoMansarda=COLOR(rgbToPlasmColor([123,27 ,2 ]))(ParteSupTettoMansarda)
balcone=COLOR(rgbToPlasmColor([255 ,204,153]))(balcone)
muro=COLOR(rgbToPlasmColor([255 ,204,153]))(muro)
muro2=COLOR(rgbToPlasmColor([255 ,204,153]))(muro2)
tettoM=COLOR(rgbToPlasmColor([206,48,24]))(tettoM)
ParteSupTettoMansarda=COLOR(rgbToPlasmColor([123,27 ,2 ]))(ParteSupTettoMansarda)
cameraMan=COLOR(rgbToPlasmColor([255 ,204,153]))(cameraMan)
sottotetto=COLOR(rgbToPlasmColor([255 ,204,153]))(sottotetto)
bagnoMan=COLOR(rgbToPlasmColor([255 ,204,153]))(bagnoMan)
balcone=COLOR(rgbToPlasmColor([255 ,204,153]))(balcone)
muro=COLOR(rgbToPlasmColor([255 ,204,153]))(muro)
muro2=COLOR(rgbToPlasmColor([255 ,204,153]))(muro2)
Mansarda=STRUCT([cameraMan,bagnoMan,balcone,muro,muro2,sottotetto,ParteSupTettoMansarda,tettoM])
#ParteSupTettoMansarda,tettoM
Mansarda=T([1,2,3])([3.9,1.5,2.8])(Mansarda)
#Rimozione
rimozione=CUBOID([10.1,14,3.8])
rimozione=T([1,3])([3.9,2.8])(rimozione)
tetto=DIFFERENCE([tetto,rimozione])
parteSup=DIFFERENCE([parteSup,rimozione])
#Giardino
domain1D = larDomain([32])
domain2D = larIntervals([32,48],'simplex')([1,1])
b1 = BEZIER(S1)([[-1.5,4], [0.5,-7], [6.5,-7], [7.5,0]])
b2=BEZIER(S1)([[-1.5,4], [0.5,5], [6.5,5], [7.5,0]])
controls = [b1,b2]
mapping = BEZIER(S2)(controls)
path = STRUCT(MKPOLS(larMap(mapping)(domain2D)))
giardino1 = PROD([path, Q(2.8)])
giardino1=T([2,3])([-14.3,-2.9])(giardino1)
b1 = BEZIER(S1)([[9.5,0], [10.5,-7], [16.5,-7], [17.3,4]])
b2=BEZIER(S1)([[9.5,0], [9.5,5], [16.5,5], [17.3,4]])
controls = [b1,b2]
mapping = BEZIER(S2)(controls)
path = STRUCT(MKPOLS(larMap(mapping)(domain2D)))
giardino2 = PROD([path, Q(2.8)])
giardino2=T([2,3])([-14.3,-2.9])(giardino2)
giardino3=CUBOID([18.8,6,2.8])
giardino3=T([1,2,3])([-1.5,-10.4,-2.9])(giardino3)
giardino4=CUBOID([13,6,2.8])
giardino4=T([1,2,3])([2,-14,-2.9])(giardino4)
#Assemblo
giardino=STRUCT([giardino1,giardino2,
giardino4])
giardino=T([2])(3.4)(giardino)
giardino3=T([2])(3.4)(giardino3)
#GiardinoREMOVE
domain1D = larDomain([32])
domain2D = larIntervals([32,48],'simplex')([1,1])
b1 = BEZIER(S1)([[-1.3,4], [0.7,-7], [6.3,-7], [7.3,0]])
b2=BEZIER(S1)([[-1.3,4], [0.7,5], [6.3,5], [7.3,0]])
controls = [b1,b2]
mapping = BEZIER(S2)(controls)
path = STRUCT(MKPOLS(larMap(mapping)(domain2D)))
giardinoR1 = PROD([path, Q(0.1)])
giardinoR1=T([2,3])([-14.3,0.1])(giardinoR1)
b1 = BEZIER(S1)([[9.7,0], [10.7,-7], [16.3,-7], [17.1,4]])
b2=BEZIER(S1)([[9.7,0], [9.7,5], [16.3,5], [17.1,4]])
controls = [b1,b2]
mapping = BEZIER(S2)(controls)
path = STRUCT(MKPOLS(larMap(mapping)(domain2D)))
giardinoR2 = PROD([path, Q(0.1)])
giardinoR2=T([2,3])([-14.3,0.1])(giardinoR2)
giardinoR3=CUBOID([18.4,4.5,0.1])
giardinoR3=T([1,2,3])([-1.3,-10.5,0.1])(giardinoR3)
giardinoR4=CUBOID([12.8,6,0.1])
giardinoR4=T([1,2,3])([2.1,-14,0.1])(giardinoR4)
#Assemblo
giardinoR=STRUCT([giardinoR1,giardinoR2,
giardinoR4])
giardinoR=T([2])(3.4)(giardinoR)
giardinoR3=T([2])(3.4)(giardinoR3)
giardinoRemove=STRUCT([giardinoR,giardinoR3])
giardinoRemove=T(3)(-0.2)(giardinoRemove)
giardinoRemove=COLOR(rgbToPlasmColor([34,139,34 ]))(giardinoRemove)
#Assemblo
principale=STRUCT([camera1,camera2,bagno,stireria,scale,soggiorno2,soggiorno3,soggiorno4,cucina
,bagno2,remove,remove2,remove3])
#Coloro
camino=COLOR(rgbToPlasmColor([240,248,255]))(camino)
scalinata=COLOR(rgbToPlasmColor([153,51,0]))(scalinata)
scalinata2=COLOR(rgbToPlasmColor([229,228,226]))(scalinata2)
parteSup=COLOR(rgbToPlasmColor([123 ,27 ,2 ]))(parteSup)
esterno=COLOR(rgbToPlasmColor([255 ,204,153]))(esterno)
baseEsterno=COLOR(rgbToPlasmColor([255 ,204,153]))(baseEsterno)
tetto=COLOR(rgbToPlasmColor([206,48,24]))(tetto)
giardino=COLOR(rgbToPlasmColor([184 ,115 ,51]))(giardino)
giardino3= COLOR(rgbToPlasmColor([255 ,204,153]))(giardino3)
est3=COLOR(rgbToPlasmColor([147,147,147]))(est3)
#Creo la siepe
pianta= CUBOID([0.5,0.5,0.8])
Tp=T(2)(0.6)
piante1=STRUCT(NN(8)([Tp, pianta]))
piante1=T([1,2])([-1.3,-8.2])(piante1)
pianta2=T([1,2])([-1.1,-8.2])(pianta)
pianta3=T([1,2])([-1.0,-8.8])(pianta)
pianta4=T([1,2])([-0.8,-9.4])(pianta)
pianta5=T([1,2])([-0.6,-10])(pianta)
pianta6=T([1,2])([-0.4,-10.6])(pianta)
pianta7=T([1,2])([-0.2,-11.2])(pianta)
pianta8=T([1,2])([0,-11.8])(pianta)
pianta9=T([1,2])([0.2,-12.4])(pianta)
pianta10=T([1,2])([0.6,-13])(pianta)
pianta11=T([1,2])([0.9,-13.6])(pianta)
pianta12=T([1,2])([1.3,-14.2])(pianta)
pianta13=T([1,2])([1.7,-14.8])(pianta)
pianta14=T([1,2])([2.3,-15.4])(pianta)
pianta15=T([1,2])([2.8,-15.6])(pianta)
pianta16=T([1,2])([3.4,-15.8])(pianta)
pianta17=T([1,2])([4.0,-15.8])(pianta)
pianta18=T([1,2])([4.6,-15.6])(pianta)
pianta19=T([1,2])([5.1,-15.4])(pianta)
pianta20=T([1,2])([5.4,-14.9])(pianta)
pianta21=T([1,2])([5.8,-14.4])(pianta)
pianta22=T([1,2])([6.2,-13.8])(pianta)
pianta23=T([1,2])([6.4,-13.2])(pianta)
pianta24=T([1,2])([6.6,-12.6])(pianta)
pianta25=T([1,2])([6.8,-12])(pianta)
pianta26=T([1,2])([7,-11.4])(pianta)
pianta27=T([1,2])([7,-10.6])(pianta)
pianta28=T([1,2])([9.3,-10.6])(pianta)
siepe1=STRUCT([pianta18,pianta19,pianta20,pianta21,pianta22,pianta23,pianta24,pianta25])
siepe2=STRUCT([pianta18,pianta19,pianta20,pianta21,pianta22,pianta23,pianta24,pianta25])
siepe2=T(1)(9)(siepe2)
siepe3=STRUCT([pianta7,pianta8,pianta9,pianta10,pianta11,pianta12,pianta13,pianta14,pianta15,pianta16,pianta17])
siepe4=STRUCT([pianta7,pianta8,pianta9,pianta10,pianta11,pianta12,pianta13,pianta14,pianta15,pianta16,pianta17])
siepe4=T(1)(9.5)(siepe4)
siepe5=STRUCT([pianta5,pianta6,pianta7,pianta8,pianta9])
siepe6=STRUCT([pianta5,pianta6,pianta7,pianta8,pianta9])
siepe6=ROTATE([1,3])(PI)(siepe6)
siepe6=T([1,2,3])([16.6,1,0.8])(siepe6)
siepe7=STRUCT([pianta2,pianta3,pianta4])
siepe8=STRUCT([pianta2,pianta3])
siepe8=ROTATE([1,3])(PI)(siepe8)
siepe8=T([1,2,3])([16.2,0.5,0.8])(siepe8)
siepe=STRUCT([siepe1,siepe2,siepe3,siepe4,siepe5,siepe6,siepe7,siepe8,piante1,
pianta26,pianta27,pianta28])
siepe=COLOR(rgbToPlasmColor([128 ,128,0]))(siepe)
plan1 = STRUCT([principale,scalinata,scalinata2,esterno,baseEsterno,parteSup,giardino,camino,
Mansarda,tetto,giardino3,giardinoRemove,siepe,est3,est4,est5,est6])
#tetto,Mansarda
#Visualizzo
VIEW(plan1)
|
cvdlab-alumni/433043
|
2014-05-16/python/exercise1.py
|
exercise1.py
|
py
| 29,897 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13941876090
|
from argparse import ArgumentParser
from sudoku_solver import SudokuSolver
from sudoku import Sudoku
def get_args():
parser = ArgumentParser()
parser.add_argument('--sudoku', required=True)
return parser.parse_args()
def main():
args = get_args()
sudoku = Sudoku.from_file(args.sudoku)
solver = SudokuSolver(sudoku)
solved_sudoku = solver.solve()
solved_sudoku.print_sudoku()
if __name__ == '__main__':
main()
|
delabania/sudoku-solver
|
solve.py
|
solve.py
|
py
| 450 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11473045132
|
'''
Terminal
!pip install dash==0.26.5 # The core dash backend
!pip install dash-html-components==0.12.0 # HTML components
!pip install dash-core-components==0.28.0 # Supercharged components
!pip install dash_bootstrap_components==0.13.1
'''
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
from dash import Dash, dcc, html
import plotly.express as px
import pandas as pd
import dash_bootstrap_components as dbc
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# create dash
app = Dash(__name__)
colors = {
'background': '#FFFFFF',
'text': '#288CC2'
}
### bar chart example
df = pd.DataFrame({
"Fruit": ["Apples", "Oranges", "Bananas", "Apples", "Oranges", "Bananas"],
"Amount": [4, 1, 2, 2, 4, 5],
"City": ["SF", "SF", "SF", "Montreal", "Montreal", "Montreal"]
})
fig = px.bar(df, x="Fruit", y="Amount", color="City", barmode="group")
fig.update_layout(
#plot_bgcolor=colors['background'],
#paper_bgcolor=colors['background'],
font_color=colors['text']
)
### scatter plot example
df2 = pd.read_csv('https://gist.githubusercontent.com/chriddyp/5d1ea79569ed194d432e56108a04d188/raw/a9f9e8076b837d541398e999dcbac2b2826a81f8/gdp-life-exp-2007.csv')
fig2 = px.scatter(df2, x="gdp per capita", y="life expectancy",
size="population", color="continent", hover_name="country",
log_x=True, size_max=60)
fig2.update_layout(
font_color=colors['text'])
### violin plot example 1
df3 = pd.DataFrame(
{'x':np.tile(['no', 'yes'], 80000),
'y':np.random.normal(0, 1, 160000),
'cl':np.repeat([0, 1], 80000)
}
)
fig3 = px.violin(df3, x="x", y="y", color='cl', box=True, hover_data=df3.columns)
fig4 = px.violin(df3, y="y", color='cl',
violinmode='overlay', # draw violins on top of each other
# default violinmode is 'group' as in example above
hover_data=df3.columns)
### violin plot example 2
df4 = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/violin_data.csv")
fig5 = go.Figure()
fig5.add_trace(go.Violin(x=df4['day'][ df4['smoker'] == 'Yes' ],
y=df4['total_bill'][ df4['smoker'] == 'Yes' ],
legendgroup='Yes', scalegroup='Yes', name='Yes',
side='negative',
line_color='blue')
)
fig5.add_trace(go.Violin(x=df4['day'][ df4['smoker'] == 'No' ],
y=df4['total_bill'][ df4['smoker'] == 'No' ],
legendgroup='No', scalegroup='No', name='No',
side='positive',
line_color='orange')
)
fig5.update_traces(meanline_visible=True) # orientation='h' -> horizontal
fig5.update_layout(violingap=0, violinmode='overlay')
### subplot example
df5 = px.data.iris()
fig6 = make_subplots(rows=1,
cols=2,
subplot_titles=[
'Fruit', # 1. subplot title
'City' # 2. subplot title
])
fig6.add_trace(go.Bar(x=df['Fruit'], y=df['Amount']),row=1, col=1)
fig6.add_trace(go.Bar(x=df['City'], y=df['Amount'], text=df['Amount'],
textposition='auto',), row=1, col=2)
fig6.update_layout(title='Count', title_x=0.5)
# set the web layout
app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[
html.H1(
children='Hello Dash',
style={
'textAlign': 'center',
'color': colors['text']
}
),
html.Div(children='Dash: A web application framework for your data.', style={
'textAlign': 'center',
'color': colors['text']
}),
dcc.Graph(
id='example-graph-1',
figure=fig
),
dcc.Graph(
id='example-graph-2',
figure=fig2
),
dcc.Graph(
id='example-graph-3',
figure=fig3
),
dcc.Graph(
id='example-graph-5',
figure=fig5
),
dcc.Graph(
id='example-graph-6',
figure=fig6
),
])
if __name__ == '__main__':
app.run_server(debug=True)
|
hsyho11/python-plotly-dash
|
plotly_example.py
|
plotly_example.py
|
py
| 4,180 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12170535231
|
# Created on 24 September 2019
from square import Square, getScaledFont
from random import randint
from math import cos, sin, pi, atan, copysign
from pygame.mixer import *
from pygame.draw import rect
from pygame.locals import *
from pygame.time import Clock
from pygame.display import update
from pygame.mouse import get_pos
class GameDriver:
def __init__(self, dim, w):
self.dim = dim
self.w = w
self.squares = []
self.vals = []
# Stores sets of ((x,y): (#Squares to slide, surface))
self.slides = {}
self.slide_duration = 300
self.v = (0, 0)
self.score = 0
self.prev_score = 0
for y in range(dim[1]):
row = []
val = []
for x in range(dim[0]):
row.append(None)
val.append(-1)
self.squares.append(row)
self.vals.append(val)
def drawBoard(self, display):
for y, row in enumerate(self.squares):
for x, s in enumerate(row):
if s != None:
display.blit(s.surface, (x * self.w, y * self.w))
else:
rect(display, (0, 0, 0), (x * self.w, y * self.w, self.w, self.w))
rect(display, (255, 255, 255), (x * self.w, y * self.w, self.w, self.w), 2)
update()
def move(self, display, undo):
if len(self.slides) == 0:
return
if undo:
self.score = self.prev_score
for y, (row1, row2) in enumerate(zip(self.squares, self.vals)):
for x, (s, val) in enumerate(zip(row1, row2)):
if val == -1 and s != None:
self.squares[y][x] = None
elif val != -1 and s == None:
self.squares[y][x] = Square(val, self.w)
elif val != -1 and s != None:
self.squares[y][x].changeVal(val)
updates = 20
for i in range(updates):
for x, y in self.slides.keys():
v, surface = self.slides[(x, y)]
xf, yf = x + v[0], y + v[1]
if undo:
v = (-v[0], -v[1])
x, xf = xf, x
y, yf = yf, y
v = (v[0] * self.w, v[1] * self.w)
x1, y1 = x * self.w, y * self.w
dx, dy = v[0] * i / updates, v[1] * i / updates
rect(display, (0, 0, 0), (x1 + dx, y1 + dy, self.w, self.w))
dx, dy = v[0] * (i + 1) / updates, v[1] * (i + 1) / updates
display.blit(surface, (x1 + dx, y1 + dy))
update()
Clock().tick(updates * 1000 / self.slide_duration)
self.drawBoard(display)
def addSquares(self, display):
nones = []
for y, row in enumerate(self.squares):
for x, s in enumerate(row):
if s == None:
nones.append((x, y))
for i in range(min(len(nones), 2)):
idx = randint(0, len(nones) - 1)
x, y = nones[idx]
s = Square(2, self.w)
self.squares[y][x] = s
display.blit(s.surface, (x * self.w, y * self.w))
nones.pop(idx)
update()
def lost(self):
for y, row in enumerate(self.squares):
for x, s in enumerate(row):
if s == None:
return False
else:
adjacent = []
for delta in ((-1, 0), (1, 0), (0, -1), (0, 1)):
x1, y1 = x + delta[0], y + delta[1]
in_range = 0 <= x1 < self.dim[0] and 0 <= y1 < self.dim[1]
if in_range and self.squares[y1][x1] != None:
adjacent.append(self.squares[y1][x1].val)
if s.val in adjacent:
return False
return True
def updateScore(self, display, score_rect):
font = getScaledFont("Times New Roman", (score_rect.w, score_rect.h), str(self.score))
text = font.render(str(self.score), 1, (255, 255, 255))
text_rect = text.get_rect(center=(score_rect.centerx, score_rect.centery))
rect(display, (0, 0, 0), text_rect)
display.blit(text, text_rect)
def run(self, display, events, undo_rect, score_rect):
for e in events:
if e.type == MOUSEBUTTONUP and e.button == BUTTON_LEFT and \
undo_rect.collidepoint(get_pos()):
self.move(display, True)
self.slides.clear()
elif e.type == KEYUP:
if e.key == K_LEFT:
self.v = (-1, 0)
elif e.key == K_RIGHT:
self.v = (1, 0)
elif e.key == K_UP:
self.v = (0, -1)
elif e.key == K_DOWN:
self.v = (0, 1)
else:
continue
self.slides.clear()
self.prev_score = self.score
move_x = self.v[0] != 0
is_neg = -1 in self.v
idx = 0 if move_x else 1
lb = 0 if is_neg else -abs(self.dim[idx] * self.v[idx]) + 1
ub = abs(self.dim[idx] * self.v[idx]) if is_neg else 1
blanks = []
merges = []
prev = []
for v1 in range(lb, ub):
for v2 in range(self.dim[1 - idx]):
if len(blanks) <= v2:
blanks.append(0)
prev.append(0)
merges.append(0)
x = abs(v1) if move_x else v2
y = v2 if move_x else abs(v1)
s = self.squares[y][x]
self.vals[y][x] = -1 if s == None else s.val
if s == None:
blanks[v2] += 1
else:
offset = blanks[v2] + merges[v2]
dx, dy = self.v[0] * offset, self.v[1] * offset
last_val = prev[v2]
prev[v2] = s.val
if last_val == s.val:
offset += 1
dx1, dy1 = self.v[0] * offset, self.v[1] * offset
self.slides[(x, y)] = ((dx1, dy1), s.surface)
self.squares[y + dy1][x + dx1].upgrade()
self.squares[y][x] = None
prev[v2] = 0
merges[v2] += 1
self.score += s.val * 2
elif offset != 0:
self.slides[(x, y)] = ((dx, dy), s.surface)
self.squares[y + dy][x + dx] = s
self.squares[y][x] = None
self.move(display, False)
self.addSquares(display)
if sum(merges) >= 3:
music.load("bomb.mp3")
music.play()
self.updateScore(display, score_rect)
return self.lost()
|
AaronOrenstein210/2048
|
gameDriver.py
|
gameDriver.py
|
py
| 7,331 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33040338091
|
n, m = map(int, input().split())
graph = []
arr = []
cnt = 0
for _ in range(n):
graph.append(list(map(int, input())))
def dfs(x, y):
global cnt
if x<0 or y<0 or x>=n or y>=m:
return False
if graph[x][y] == 1:
cnt += 1
graph[x][y] = 0
dfs(x-1, y)
dfs(x, y-1)
dfs(x+1, y)
dfs(x, y+1)
value = cnt
cnt = 0
return True
result = 0
for i in range(n):
for j in range(m):
if dfs(i,j) == True:
arr.append(cnt)
result += 1
print(max(arr))
print(result)
|
ParanMoA/SelfSoftware
|
ShinTIL/2023.01.19/1926.py
|
1926.py
|
py
| 584 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7209449045
|
import pandas as pd
def distance_in_yards(object_size_actual,object_size_mils):
try:
float(object_size_actual) and float(object_size_mils)
except ValueError:
return "Please enter a valid number."
object_distance_yards = (float(object_size_actual)*27.8)/float(object_size_mils)
return round(object_distance_yards,2)
def correction_moa(correction_inches_seen,known_distance):
try:
float(correction_inches_seen) and float(known_distance)
except ValueError:
return "Please enter a valid number."
correction_required = float(correction_inches_seen)/(float(known_distance)/100)
return round(correction_required,2)
def wind_correction(range_to_target,windspeed):
try:
float(range_to_target) and float(windspeed)
except ValueError:
return "Please enter a valid number."
if float(range_to_target) <= 500:
wind_correction_factor = ((float(range_to_target)/100)*float(windspeed))/15
return wind_correction_factor
if 500 < float(range_to_target) <= 600:
wind_correction_factor = ((float(range_to_target)/100)*float(windspeed))/14
return wind_correction_factor
if 600 < float(range_to_target) <= 800:
wind_correction_factor = ((float(range_to_target)/100)*float(windspeed))/13
return wind_correction_factor
if 800 < float(range_to_target) <= 900:
wind_correction_factor = ((float(range_to_target)/100)*float(windspeed))/12
return wind_correction_factor
if 900 < float(range_to_target) <= 1000:
wind_correction_factor = ((float(range_to_target)/100)*float(windspeed))/11
return wind_correction_factor
else:
return "Shot is too far for windage rule"
def hw_tw_correction (range_to_target_hw_tw,windspeed_hw_tw,hw_or_tw):
try:
float(range_to_target_hw_tw) and float(windspeed_hw_tw)
except ValueError:
return "Please enter a valid number."
hw_tw_correction_final = ((float(range_to_target_hw_tw)/100)*float(windspeed_hw_tw))/4
return round(hw_tw_correction_final,2)
def new_range_zero(sight_height,known_zero_range,desired_zero_range,bullet_type):
try: float(sight_height)
except ValueError:
return "Please enter a valid number"
df_ballistics_chart = pd.read_excel(r"C:\Users\bnofi\OneDrive\Desktop\Long_range_shooting\hornady_excel_document.xlsx")
bullet_drop_known_zero = df_ballistics_chart.query('BULLET == @bullet_type')[known_zero_range]
bullet_drop_desired_zero = df_ballistics_chart.query('BULLET == @bullet_type' )[desired_zero_range]
sight_adjustment = (float(bullet_drop_known_zero) - float(sight_height)) + (float(known_zero_range)/float(desired_zero_range)) * (float(sight_height) - (float(bullet_drop_desired_zero)))
return round(sight_adjustment,2)
def new_range_zero_moa(sight_height_moa,known_zero_range_moa,desired_zero_range_moa,bullet_type_moa):
try: float(sight_height_moa)
except ValueError:
return "Please enter a valid number"
df_ballistics_chart = pd.read_excel(r"C:\Users\bnofi\OneDrive\Desktop\Long_range_shooting\hornady_excel_document.xlsx")
bullet_drop_known_zero_moa = df_ballistics_chart.query('BULLET == @bullet_type_moa')[known_zero_range_moa]
bullet_drop_desired_zero_moa = df_ballistics_chart.query('BULLET == @bullet_type_moa' )[desired_zero_range_moa]
a_one = 95.493*((float(sight_height_moa)-(float(bullet_drop_known_zero_moa)))/float(known_zero_range_moa))
a_two = 95.493*((float(sight_height_moa)-(float(bullet_drop_desired_zero_moa)))/float(desired_zero_range_moa))
final_moa_correction = float(a_two) - float(a_one)
return round(final_moa_correction,2)
|
brandon10135/sportshootingrules
|
shooter_calcs.py
|
shooter_calcs.py
|
py
| 3,793 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12830919470
|
from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
dummy = ListNode()
while head:
cur = head
head = head.next
cur.next = dummy.next
dummy.next = cur
return dummy.next
|
theRobertSan/LeetCode-Solutions-Python
|
206.py
|
206.py
|
py
| 419 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29310319456
|
from random import randint
print("Welcome to the Number GuessingGame!")
print("I'm thinkin of a number between 1 and 100.")
#select hard, you get 5 guesses/ select easy you get 10
def guessGame():
number2Guess = randint(1,100)
guessed = False
global lives
lives = setLives()
#game begins
while guessed == False:
print(f"You have {lives} attempts remaining to guess the number.") #you have x Lives and you can guess
guessed = howClose(int(input("Make a guess: ")),number2Guess) #calls our function, if true we get out and we it
again = input("Would you like to play again? 'y' or 'n': ")
if again == 'y':
guessGame()
else:
print("Game over")
def setLives():
difficulty = input("Choose a difficulty. Type 'easy' or 'hard': ")
if difficulty == 'easy':
lives = 10
return lives
elif difficulty == 'hard':
lives = 5
return lives
else:
guessGame()
def howClose(guess,hiddenNumber):
global lives
if guess == hiddenNumber:
print(f"Congratulations the answer was {hiddenNumber}")
return True
elif guess < hiddenNumber:
print("Too low")
lives-=1
return False
elif guess > hiddenNumber:
print("Too high")
lives-=1
return False
guessGame()
|
RoccoPic/100-Days-of-Code
|
Day-12/numberGuessingGame.py
|
numberGuessingGame.py
|
py
| 1,337 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73924507386
|
import gzip
# 对两跳子图的处理:先过滤掉出现超过 2w 次的实体和出现少于 50 次的关系;然后再采样 15 核的
# 设置,同时只保留出现大于50次的关系,对两跳子图进行清洗
if __name__ == "__main__":
item_dict = {}
rela_dict = {}
print('start statistics')
with gzip.open('../doc/origin_graph_step2.txt.gz', 'rb') as f:
for num, line in enumerate(f):
line = line.strip()
triplet = line.decode().split()
# print(triplet)
# 头实体
item_dict[triplet[0]] = 1 if triplet[0] not in item_dict else item_dict[triplet[0]] + 1
# 尾实体
item_dict[triplet[2]] = 1 if triplet[2] not in item_dict else item_dict[triplet[2]] + 1
# 关系
rela_dict[triplet[1]] = 1 if triplet[1] not in rela_dict else rela_dict[triplet[1]] + 1
if num % 100000 == 0:
print(num)
print('start filter')
filter_list = []
with gzip.open('../doc/origin_graph_step2.txt.gz', 'rb') as f:
for num, line in enumerate(f):
line = line.strip()
triplet = line.decode().split()
# 高频过滤
if item_dict[triplet[0]] < 20000 and item_dict[triplet[2]] < 20000 and rela_dict[triplet[1]] > 50:
filter_list.append(triplet)
if num % 100000 == 0:
print(num)
# 统计新的实体
item_dict.clear()
rela_dict.clear()
for triplet in filter_list:
# 头实体
item_dict[triplet[0]] = 1 if triplet[0] not in item_dict else item_dict[triplet[0]] + 1
# 尾实体
item_dict[triplet[2]] = 1 if triplet[2] not in item_dict else item_dict[triplet[2]] + 1
# 关系
rela_dict[triplet[1]] = 1 if triplet[1] not in rela_dict else rela_dict[triplet[1]] + 1
# 过滤实体
res_file = open('../doc/graph_step2.txt', 'w', encoding='utf-8')
for triplet in filter_list:
if item_dict[triplet[0]] > 15 and item_dict[triplet[2]] > 15 and rela_dict[triplet[1]] > 50:
tri_str = triplet[0] + ' ' + triplet[1] + ' ' + triplet[2] + '\n'
res_file.write(tri_str)
# 统计关系
# rela_dict.clear()
# for triplet in filter2_list:
# rela_dict[triplet[1]] = 1 if triplet[1] not in rela_dict else rela_dict[triplet[1]] + 1
# 第二次过滤关系
# res_file = open('../doc/graph_step2.txt', 'w', encoding='utf-8')
# for num, line in enumerate(filter2_list):
# # triplet = line.split()
# triplet = line
# if rela_dict[triplet[1]] > 50:
# # filter2_list.append(triplet)
# tri_str = triplet[0] + ' ' + triplet[1] + ' ' + triplet[2] + '\n'
# res_file.write(tri_str)
|
icecream-and-tea/labs_web
|
lab2/lab2_stage1/src/filter2.py
|
filter2.py
|
py
| 2,811 |
python
|
en
|
code
| 2 |
github-code
|
6
|
3822252154
|
import hp_items as hpi
import hp_classes as hpc
import random
import time
player_options = ['chaser', 'beater', 'keeper', 'seeker']
test_team_1 = {'chaser': [100, 150, 200],
'beater': [175, 125],
'keeper': [100, 150, 200],
'seeker': [13]}
test_team_2 = {'chaser': [100, 150, 200],
'beater': [135, 165],
'keeper': [100, 150, 200],
'seeker': [7]}
def quidditch_match(team1, team2):
global player_options
in_progress = True
team1_score = 0
team1_snitch_score = 0
team2_score = 0
team2_snitch_score = 0
offense = team2
defense = team1
while in_progress:
player1 = random.choice(player_options)
player2 = random.choice(player_options)
# seekers in play
if player1 == 'seeker' or player2 == 'seeker':
seeker_match = random.randrange(0,31)
if team1['seeker'][0] == seeker_match:
print(f'''
***
Team 1's seeker has caught the snitch!
''')
team1_snitch_score += 150
in_progress = False
break
elif team2['seeker'][0] == seeker_match:
print(f'''
***
Team 2's seeker has caught the snitch!
''')
team2_snitch_score += 150
in_progress = False
break
else:
print(f'''
***
One of the seekers has seen the snitch!
The Team1 and Team2 seekers race towards a glint
of gold in the air, but it quickly disappears.
Play resumes.
''')
# chaser v. keeper
elif player1 == 'chaser' and player2 == 'keeper':
player1_value = random.choice(team1[player1])
player2_value = random.choice(team2[player2])
if player1_value >= player2_value:
team1_score += 10
print(f'''
***
Team1's chaser throws the quaffle past the keeper
and scores!
Team1's score: {team1_score}
''')
elif player2_value > player1_value:
print(f'''
***
Team1's chaser throws the quaffle, but the Team2
keeper makes an excellent save!
Play resumes.
''')
elif player2 == 'chaser' and player1 == 'keeper':
player2_value = random.choice(team1[player1])
player1_value = random.choice(team2[player2])
if player2_value >= player1_value:
team1_score += 10
print(f'''
***
Team2's chaser throws the quaffle past the keeper
and scores!
Team2's score: {team2_score}
''')
elif player1_value > player2_value:
print(f'''
***
Team2's chaser throws the quaffle, but the Team1
keeper makes an excellent save!
Play resumes.
''')
# beaters.
if player1 == 'beater' or player2 == 'beater':
player1_value = random.choice(team1[player1])
player2_value = random.choice(team2[player2])
# team1 beater
if player1 == 'beater' and player1_value >= player2_value:
team1_score += 10
print(f'''
***
Team1's beater knocks a bludger toward Team2's
{player2}. The bludger knocks the {player2} off
track, allowing Team1's chaser to score a goal!
Team1's score: {team1_score}
''')
elif player1 == 'beater' and player2_value > player1_value:
team2_score += 10
print(f'''
***
Team1's beater tries to knock Team2 off course,
but the referee calls a foul on the play. During
the foul shot, Team2 scores!
Team2's score: {team2_score}
''')
elif player2 == 'beater' and player2_value >= player1_value:
team2_score += 10
print(f'''
***
Team2's beater knocks a bludger toward Team1's
{player2}. The bludger knocks the {player1} off
track, allowing Team2's chaser to score a goal!
Team2's score: {team2_score}
''')
else:
team1_score += 10
print(f'''
***
Team2's beater tries to knock Team1 off course,
but the referee calls a foul on the play. During
the foul shot, Team1 scores!
Team1's score: {team1_score}
''')
else:
print(f'''
***
Player 1: {player1}
Player 2: {player2}
''')
# adding house points
if team1_score > team2_score:
winner = 'Team1'
elif team2_score > team1_score:
winner = 'Team2'
else:
winner = 'Tied game!'
print(f'''
***
End result:
Team1: {team1_score + team1_snitch_score}
Team2: {team2_score + team2_snitch_score}
Winner: {winner}
''')
return [team1_score, team2_score]
game_1 = quidditch_match(test_team_1, test_team_2)
hpc.house_points['Gryffindor'] += game_1[0]
hpc.house_points['Slytherin'] += game_1[1]
print(hpc.display_points())
|
meganmonaghan/Harry-Potter-Emulator
|
quidditch_test.py
|
quidditch_test.py
|
py
| 4,162 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29924772061
|
"""
Author: JW
Date: 07/26/2023
Module Name: picture_capture_controls_uplink.py
Description:
This Python script is part of an image processing and classification application.
It provides various functions for interacting with images, databases, and user stacks.
The script includes functionalities such as simulating image classification, checking classification progress, and updating image labels in a database.
It is designed to work with Anvil, tkinter, multiprocessing, and PIL (Python Imaging Library) libraries.
Functions:
- `open_file_explorer`: Opens a file explorer dialog for selecting directories.
- `classify_images_simulate`: Simulates image classification and stores results in a database.
- `start_classifier_build`: Initiates the image classification process, handling new or existing image stacks.
- `check_classifier_progress`: Monitors the progress of image classification and retrieves completed labels and images.
- `submit_labels_to_db`: Handles the submission of labels to a database, updates labels, and moves files based on labels.
For detailed information on each function's purpose and usage, please refer to the function definitions and comments within the script.
"""
from time import sleep
import random
import json
import uuid
import multiprocessing
from PIL import Image
import anvil.media
import os
import io
import shutil
# Uplink imports:
try:
import utils.mySQL_utils as localSQL
from uplink_scripts.stack import Stack
# Local host imports
except (ModuleNotFoundError) as mod_err:
print("Trying local host imports in picture_capture_controls.py")
from ..utils import mySQL_utils as localSQL
from .stack import Stack
# NOTE: When running from a docker container, we will be unable to import tkinter:
try:
import tkinter as tk
from tkinter import filedialog
except(ImportError) as err:
print("Unable to import tkinter")
# Set up our stack:
image_stack = Stack()
def open_file_explorer():
"""
Opens a file explorer navigator for the user to select the source and / or destination directory.
Returns str(file_path)
*Depending on when function is called, file_path could be either the source or destination dir.
"""
try:
root = tk.Tk()
root.withdraw()
file_path = filedialog.askdirectory()
if not file_path:
file_path = "N/A"
root.destroy()
return file_path
except (Exception) as err:
print("tikinter not installed...returning empty path")
return ""
def classify_images_simulate(image_full_path, img_name_list, job_id):
""" Test function to simulate classify_images()
1. sleep 5 seconds
2. randomly pick a class
3. write result and job id to data-table
"""
labels = ["Cotton", "Plastic", "HID", "Tray", "Other"]
cnx = localSQL.sql_connect()
for index, img in enumerate(image_full_path):
# 1 sleep
sleep(10)
# 2 randomly select a label
rand = random.randint(0, 4)
label = labels[rand]
# write label & job_id to data-table:
img_name = img_name_list[index]
insert_query = f"INSERT INTO anvil_imgProcessor (job_id, img_name, img_label) VALUES ('{job_id}','{img_name}','{label}')"
localSQL.sql_insert(cnx, insert_query)
# Close db connection
localSQL.sql_closeConnection(cnx)
print("Finished classyfing")
def start_classifier_build(json_data):
"""
json_data: {image_path, num_images}
"""
# convert json dict to python dict
python_dict_classifier = json.loads(json_data)
# Unpack the dictionary:
page_num = python_dict_classifier.get("page_num")
user_id = python_dict_classifier.get("user_id")
num_images = python_dict_classifier.get("num_images")
file_path = python_dict_classifier.get("file_path_src")
# IF user wants to grab previous images (back_button press or jump_to_page) -> "pop" images from stack, ELSE get new images
try:
# Try getting images from the users stack using page_num as the list index.
labels, img_names, images, update_database = image_stack.pop(user_id, page_num)
# If the number of images retrieved == to number of images user currently wants to retrieve, return the images:
if (int(num_images) == len(img_names)):
print(f"Retrieved previous images for page {page_num}")
return images, labels, img_names, update_database
# If the user changed the number of images to display on each page -> reset stack and grab new images.
else:
print(f"Number of images changed... reseting users stack")
# TODO: If user changed the number of images to grab, reset the users stack:
image_stack.reset_stack(user_id)
# If we get a KeyError or IndexError -> grab new images from directory.
except (KeyError, IndexError) as err:
print(f"{err}: Grabbing new images for page {page_num}")
# Set up a job ID:
job_id = str(uuid.uuid4())
job_id = job_id.replace("-", "")
try:
# NOTE: with large n we may want to only a subset of all images
all_files_in_dir = os.listdir(file_path)
# Filter to select only image files:
all_images = [file for file in all_files_in_dir if file.endswith(".jpg") or file.endswith(".png")]
except (Exception) as e:
print("Could not access directory")
return None
num_images_found = len(all_images)
# Check to make sure images were found in the directory:
if(num_images_found == 0):
print("Dir does not contain any images")
return None, None, None, None
# If for whatever reason the directory has < 10 images -> grab all found images
if(num_images_found < int(num_images)):
# Randomly select n images:
rand_n_imgs = random.sample(all_images, int(num_images_found))
# now that we've selected our images, lets move them to a seperate folder such that they are not re-used
else:
# Randomly select n images:
rand_n_imgs = random.sample(all_images, int(num_images))
# now that we've selected our images, lets move them to a seperate folder such that they are not re-used
#Establish Connection to the Databse:
cnx = localSQL.sql_connect()
# Write job ID to anvil_img_Classifier data-table:
insert_query = f"INSERT INTO anvil_imgProcessor (job_id) VALUES ('{job_id}')"
localSQL.sql_insert(cnx, insert_query)
#Close connection to the database:
localSQL.sql_closeConnection(cnx)
imgs_full_path, img_name_list = [], []
# Loop accomplishes two things:
# 1) Creates the full image path for each randomly selected image
# 2) Reads in the image and converts to anvil.BlobMedia
for image in rand_n_imgs:
# Get the full image path
img_full_path = file_path + "/" + image
# Keep track of all the img paths
imgs_full_path.append(img_full_path)
img_name_list.append(image)
##############
# NOTE: SPAWN new process here:
#classify_images(imgs_full_path, job_id)
##############
process = multiprocessing.Process(target=classify_images_simulate, args=(imgs_full_path, img_name_list, job_id))
# Start the process
process.start()
return job_id
def check_classifier_progress(json_data):
"""
This function will be called every n seconds once timer reaches 0...
1. Every n seconds go out and check database to see how many images / n are ready
1a. if > n images are done, return % finished and update progress bar.
1b. if n images are done retrieve labels, set flag HIGH indiciating we are ready to display the images to the user
"""
MAX_STACK_HEIGHT = 50 # Starting with 50, could be increased... (100*num_images) = # of elem ents in each stack
# convert json dict to python dict
python_dict_classifier = json.loads(json_data)
# Unpack the dictionary:
user_id = python_dict_classifier.get("user_id")
num_images = python_dict_classifier.get("num_images")
job_id = python_dict_classifier.get("job_id")
file_path = python_dict_classifier.get("file_path_src")
# Check database using job_id to see how many images are ready.
# Establish Connection to the Databse:
cnx = localSQL.sql_connect()
# Create a cursor
cursor = cnx.cursor()
search_query = f"SELECT * FROM anvil_imgProcessor WHERE job_id = ('{job_id}')"
cursor.execute(search_query)
rows = cursor.fetchall()
# Close the connection
cnx.close()
num_rows_ready = len(rows)
print(num_rows_ready)
img_labels_list, img_name_list, img_list = [], [], []
img_labels_dict = {}
if(num_rows_ready == (num_images + 1)):
done_classifying = True # Set our flag to true
pct_ready = 1
# Once images are done get the assigned labels:
for row in rows:
# Get the assigned label for each image:
img_labels_list.append(row[-1])
img_name_list.append(row[-2])
# Delete the first element of each list (first element has NULL label and img name values)
del img_labels_list[0]
del img_name_list[0]
# Store key-value pair (img_name: label) in dict data-structure
for i in range(len(img_name_list)):
img_labels_dict[img_name_list[i]] = img_labels_list[i]
# Using the image name and file path, import the image to type anvil.BlobMedia
# Get the full image path
img_full_path = file_path + "/" + img_name_list[i]
# Retrieve our image using PIL
pil_img = Image.open(img_full_path)
# resize image to 1280 x 960
resized_image = pil_img.resize((960,720))
bs = io.BytesIO()
# Convert to bytes:
resized_image.save(bs, format="png")
# Conver to type anvil.BlobMedia so that we can display it for the client
anvil_image = anvil.BlobMedia("image/png", bs.getvalue(), name="cotton")
img_list.append(anvil_image)
print(img_labels_list)
print(img_labels_dict)
# Set-up the "stack" here:
# Pythonic: If user does not have a stack created, create one
try:
print(f"Adding images for user {user_id} to stack...")
image_stack.push(user_id,
img_labels_dict,
img_name_list,
img_list)
except (KeyError) as ke:
print("No ID found!")
print(f"Creating stack for user: {user_id} ")
image_stack.init_user(user_id,
img_labels_dict,
img_name_list,
img_list)
#Check length of stack if stack is > max_len --> start removing elements
try:
stack_height = image_stack.size(user_id)
if(stack_height > MAX_STACK_HEIGHT):
print(f"Users stack reached max height of {MAX_STACK_HEIGHT}, Removing first element...")
# Delete first [0] from stack
image_stack.delete_element(user_id)
except (KeyError) as err:
print(f"Unable to get height of users stack: {err}")
return [done_classifying, pct_ready, img_labels_dict, img_name_list, img_list]
else:
done_classifying = False
pct_ready = ((num_rows_ready - 1) / (num_images)) * 100
return [done_classifying, pct_ready, False, False, False]
def submit_labels_to_db(json_data):
"""
Retrieves images from src directory, runs through classifier, adds images to users stack, and returns images and labels.
Function Outline:
1. Unpack JSON data
2. Determine if retreiving previously used images, or grabing new images from directory.
Using a Try / Except statement, that returns a IndexError if the index (page_num) is not valid (aka grab new images then)
3. Access the source directory (file_path) and randomly selected num_images_to_get from directory.
4. Convert each image to type Anvil.BlobMedia so that we can display them in a Canvas component.
4a. TEMPORARY: assign image a "dummy" label of either HID or Cotton
4b. TODO: ADD in classifers to replace "dummy" labels
5. Check if user already has a stack made for them, if not create one using user_id
5a. Add images to already made or newly created user stack
6. Check if MAX_STACK_HEIGHT has been exceeded, if so remove first entry from stack.
6. Return the images (img_list), img_labels (img_label_dict), img names (img_name_list), and update_database BOOLEAN indicator
"""
#Extract our json data into a python dict
python_dict = json.loads(json_data)
processed_dir = python_dict.get("file_path_dst")
#processed_dir = "/home/pi/Desktop/Jon_workspace/Anvil/processed_images" # NOTE: ONLY USED FOR TESTING (REMOVE FOR DEPLOYMENT)
# Create the destination directory if it doesn't exist
if not os.path.exists(processed_dir):
os.makedirs(processed_dir)
keys_list = []
# Unpack the dict:
classifier_labels = python_dict.get("original_labels")
#human modified labels
modified_labels = python_dict.get("modified_labels")
selected_folder = python_dict.get("selected_folder")
page_num = python_dict.get("page_num")
user_id = python_dict.get("user_id")
use_sub_folders = python_dict.get("proc_sub_folders")
#If user manually specified the path, enter:
if(selected_folder == "dir"):
# Add the user modified labels to their stack:
try:
print(f"Adding modified labels for user {user_id} to stack...")
image_stack.push(user_id,
user_labels=modified_labels)
except (KeyError) as ke:
print("No ID found!")
print(f"Creating modified labels stack for user: {user_id} ")
image_stack.init_user(user_id,
user_labels=modified_labels)
file_path = python_dict.get("file_path_src")
#file_path = "/home/pi/Desktop/Jon_workspace/Anvil/Cotton" # NOTE: ONLY USED FOR TESTING (REMOVE FOR DEPLOYMENT)
# Check if we need to set up sub-folders:
if(use_sub_folders):
print("Setting up sub folders")
# Set-up sub-folders for the processed images
proc_cotton_dir = processed_dir + "/cotton"
proc_tray_dir = processed_dir + "/tray"
proc_plastic_dir = processed_dir + "/plastic"
proc_hid_dir = processed_dir + "/HID"
proc_other_dir = processed_dir + "/other"
proc_mislabeled_dir = processed_dir + "/mislabeled"
# Create the destination directory if it doesn't exist
if not os.path.exists(proc_cotton_dir):
os.makedirs(proc_cotton_dir)
if not os.path.exists(proc_tray_dir):
os.makedirs(proc_tray_dir)
if not os.path.exists(proc_plastic_dir):
os.makedirs(proc_plastic_dir)
if not os.path.exists(proc_hid_dir):
os.makedirs(proc_hid_dir)
if not os.path.exists(proc_other_dir):
os.makedirs(proc_other_dir)
if not os.path.exists(proc_mislabeled_dir):
os.makedirs(proc_mislabeled_dir)
# get all the keys (image names)
for key in classifier_labels:
keys_list.append(key)
# Next, Establish Connection to the Databse:
cnx = localSQL.sql_connect()
# Loop through each key(image name) and add to correct db column
for key in range(len(keys_list)):
image_name = keys_list[key]
orginal_label = classifier_labels[keys_list[key]]
corrected_label = modified_labels[keys_list[key]]
# Get our source path (used with moving the image):
source_path = os.path.join(file_path, image_name)
# Get our processed img path:
dest_path = os.path.join(processed_dir, image_name)
if(orginal_label == corrected_label):
correctP = True
# Add to to columns: Correct_column, JOINT, and Path
add_query = f"INSERT INTO anvil_imgClassification ({corrected_label}, JOINT, Path) VALUES ('{str(keys_list[key])}', '{str(orginal_label)}' ,'{str(source_path)}')"
localSQL.sql_insert(cnx, add_query)
else:
# If the classifier got the prediction wrong, add img file name to GotWrong column and correct column in database
correctP = False
# Add to to columns: GotWrong, Correct_column, JOINT, and Path
gotWrong_query = f"INSERT INTO anvil_imgClassification (GotWrong, {corrected_label}, JOINT, Path) VALUES ('{str(keys_list[key])}', '{str(keys_list[key])}', '{str(orginal_label)}' , '{str(source_path)}')"
localSQL.sql_insert(cnx, gotWrong_query)
#Lastly, move image to new processed directory:
try:
if(use_sub_folders):
if(corrected_label == "Cotton"):
shutil.copy(source_path, proc_cotton_dir)
elif(corrected_label == "Plastic" ):
shutil.copy(source_path, proc_plastic_dir)
elif(corrected_label == "HID" ):
shutil.copy(source_path, proc_hid_dir)
elif(corrected_label == "Tray" ):
shutil.copy(source_path, proc_tray_dir)
elif(corrected_label == "Other" ):
shutil.copy(source_path, proc_other_dir)
# Check if we also need to move file to the GotWrong fodler:
if(correctP):
# Delete the file from the src directory
if os.path.exists(source_path):
os.remove(source_path)
else:
# move file to the GotWrong folder:
shutil.move(source_path, proc_mislabeled_dir)
else:
shutil.move(source_path, dest_path)
except (FileNotFoundError) as e_file:
return
#Close connection to the database:
localSQL.sql_closeConnection(cnx)
return
elif(selected_folder == "update"):
# Need to update modified labels stack:
print(f"Updating modified labels from page {page_num} for user {user_id}")
image_stack.update_stack(user_id, page_num, user_labels=modified_labels)
# Names of table columns, will be iterated over
column_names = ["Cotton","Plastic", "HID", "Tray", "Other"]
print("Updating database...")
# Search through CSV and find the lines that need to be altered:
# get all the keys
for key in classifier_labels:
keys_list.append(key)
# Next, Establish Connection to the Databse:
cnx = localSQL.sql_connect()
# Create a cursor
cursor = cnx.cursor()
for key in range(len(keys_list)):
image_name = keys_list[key]
corrected_label = modified_labels[keys_list[key]]
#Iterate over the possible column (labels) in the table:
for column in column_names:
#Search for img name in each column to get the row:
search_query = f"SELECT * FROM anvil_imgClassification WHERE {column} = ('{str(keys_list[key])}')"
cursor.execute(search_query)
result = cursor.fetchone()
try:
cnx.commit()
except (Exception) as err:
pass
# RESULT RETURNED FORMAT: (row_number(id), user_id, Cotton, Plastic, Tray, HID, Other, GotWrong, PATH, JOINT) of type tuple
if result:
if corrected_label == column:
print(f"No need to update img {image_name} found in {column} with label {corrected_label}, breaking out...")
break
# print(f"result value returned: {result}")
# print(f"Image name {str(keys_list[key])}")
# Get row number:
row_number = str(result[0])
# print(f"column value: {row_number}")
# Get JOINT value:
joint_value = str(result[-1])
# Set row value in previous column and GotWrong column to None:
update_query = "UPDATE anvil_imgClassification SET %s = NULL, GotWrong = NULL WHERE id = %s"%(column, row_number)
cursor.execute(update_query)
cnx.commit()
# check if joint == new_label
if(joint_value == corrected_label):
print("Joint == Correct!")
# Add img name to the corrected_label colum in row_number:
update_query = f"UPDATE anvil_imgClassification SET {str(corrected_label)} = '{str(keys_list[key])}' WHERE id = '{row_number}'"
cursor.execute(update_query)
cnx.commit()
else:
update_query =f"UPDATE anvil_imgClassification SET {str(corrected_label)} = '{str(keys_list[key])}', GotWrong = '{str(keys_list[key])}' WHERE id = '{row_number}'"
cursor.execute(update_query)
cnx.commit()
# print("breaking..")
break
else:
print(f"result not found in column {column}")
#Close connection to the database:
cnx.close()
return
|
JonWakefield/Anvil-Web-App
|
server_code/uplink_scripts/picture_capture_controls_uplink.py
|
picture_capture_controls_uplink.py
|
py
| 22,281 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71733863868
|
from logging import Logger
from extract.adapters.airtable.credentials import AirtableCredentials
from pyairtable import Table
class AirTableAdapter:
def __init__(self, logger: Logger, credentials: AirtableCredentials):
self.logger = logger
self.api_key = credentials.api_key
self.base_id = credentials.base_id
def extract(self, table_ids: list) -> dict:
data_fetched = {}
dict_of_data = {}
for table_id in table_ids:
try:
table = Table(self.api_key, self.base_id, table_id)
dict_of_data[table_id] = table.all()
data_fetched[table_id] = True
except RuntimeError:
self.logger.error(f"loading of airtable '{table_id}' data has not been successful")
for table_id in data_fetched:
if data_fetched[table_id] is True:
self.logger.info(f"loading of airtable '{table_id}' data has been successful")
return dict_of_data
|
patrikbraborec/good-crm-analytics
|
src/extract/adapters/airtable/impl.py
|
impl.py
|
py
| 1,004 |
python
|
en
|
code
| 1 |
github-code
|
6
|
24200260957
|
class Solution:
def strToInt(self, s: str) -> int:
s = s.lstrip()
if not s:
return 0
res = 0
i = 1
is_positive = True
max_int = 2 ** 31 - 1
if s[0] == "-":
is_positive = False
elif s[0] != "+":
i = 0
for c in s[i: ]:
if not "0" <= c <= "9":
break
res = 10 * res + ord(c) - ord("0")
if res > max_int:
return max_int if is_positive else -max_int - 1
return res if is_positive else -res
# class Solution:
# def strToInt(self, s: str) -> int:
# nums = {str(x): x for x in range(10)}
# # 跳过开头无用空格
# i = 0
# while i < len(s) and s[i] == " ":
# i += 1
# # 字符串为空或字符串仅包含空白字符
# if i == len(s):
# return 0
# int_max = 2 ** 31 - 1
# int_min = -(2 ** 31)
# if s[i] == "+":
# res = 0
# i += 1
# while res < int_max and i < len(s) and s[i] in nums:
# res = res * 10 + nums[s[i]]
# i += 1
# return res if res < int_max else int_max
# elif s[i] == "-":
# res = 0
# i += 1
# while res < -int_min and i < len(s) and s[i] in nums:
# res = res * 10 + nums[s[i]]
# i += 1
# return -res if -res > int_min else int_min
# elif s[i] in nums:
# res = 0
# while res < int_max and i < len(s) and s[i] in nums:
# res = res * 10 + nums[s[i]]
# i += 1
# return res if res < int_max else int_max
# else:
# return 0
|
AiZhanghan/Leetcode
|
code/面试题67. 把字符串转换成整数.py
|
面试题67. 把字符串转换成整数.py
|
py
| 1,785 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72416331709
|
from socket import *
import time
import osascript
from multiprocessing import Process, Manager, Value
import os
#osascript -e 'display notification "{}" with title "{}"'
volume = 0
def recieve_data(val):
serverSock = socket(AF_INET, SOCK_STREAM)
serverSock.bind(('', 7777))
serverSock.listen(1)
connectionSock, addr = serverSock.accept()
print("Client address : ", str(addr))
while True:
print("val : ", val.value)
try :
vol = int(connectionSock.recv(4).decode('utf-8'))
if vol == 1111:
print("mute")
osascript.osascript('set volume output muted TRUE')
val.value = 0
while True:
vol = int(connectionSock.recv(4).decode('utf-8'))
if vol == 2222:
osascript.osascript('set volume output muted FALSE')
break
if vol == 3333:
print("screenshot")
os.system("screencapture screen.png")
vol = 0
if vol == 4444:
print("fix volume")
osascript.osascript('tell app "System Events" to shut down')
time.sleep(5)
if vol < 300:
val.value = vol
except:
pass
def volume_control(val):
while True:
print("volume : ", val.value)
osascript.osascript("set volume output volume " + str(val.value))
time.sleep(0.1)
if __name__ == '__main__':
v = Value('i', 0)
p0 = Process(target = recieve_data, args = (v,))
p0.start()
p1 = Process(target = volume_control, args = (v,))
p1.start()
p0.join()
p1.join()
|
Arc1el/DeepLearning_Jetson_AI
|
server.py
|
server.py
|
py
| 1,802 |
python
|
en
|
code
| 4 |
github-code
|
6
|
1701461424
|
import argparse
import numpy as np
import cv2
import time
import math
from sympy.solvers import solve
from sympy import Symbol
X_POS = 0
Y_POS = 1
Thresh = 170
imageName = "picture.jpg"
def modImage(sceneName, img, kernel, erodeNum, dilateNum, invertion=False):
ret, result = cv2.threshold(img, Thresh, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
if(invertion):
result = cv2.bitwise_not(result)
result = cv2.erode(result, kernel, iterations=erodeNum)
result = cv2.dilate(result, kernel, iterations=dilateNum)
result = cv2.GaussianBlur(result, (5,5), 0)
return result
def searchBorder(img, numOfBorder):
result_point = []
myQ = []
height, width = img.shape[:2]
visited = [[False for rows in range(0, height)]for cols in range(0, width)]
#direction = [ [0, -1], [1, -1], [1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1] ]
direction = [ [0, 1], [1, 1], [1, 0], [-1, 1], [-1, 0], [-1, 1], [0, -1], [1, -1]]
start_x = int(width / 2)
start_y = int(height / 2)
startBorder = False
borderCounter = 0
search_cursor_x = -1
search_cursor_y = -1
for y in range(start_y, 0, -1):
for x in range(start_x, 0, -1):
if(img[y][x] != 0 and not startBorder):
startBorder = True
search_cursor_x = x
search_cursor_y = y
borderCounter += 1
elif(img[y][x] != 0 and startBorder):
startBorder = False
borderCounter = 0
elif(img[y][x] == 0 and startBorder):
borderCounter += 1
if(startBorder and borderCounter > 10):
myQ.append([search_cursor_x, search_cursor_y])
while len(myQ) != 0:
point = myQ.pop(0)
try:
if(visited[point[Y_POS]][point[X_POS]]):
continue
except:
continue
visited[point[Y_POS]][point[X_POS]] = True
result_point.append(point)
if( len(result_point) >= numOfBorder ):
return result_point
test_border = False
temp_list = []
for dir in direction:
next_point = [ point[X_POS] + dir[X_POS], point[Y_POS] + dir[Y_POS] ]
try:
if(img[next_point[Y_POS]][next_point[X_POS]] == 0):
temp_list.append(next_point)
else:
test_border = True
except:
continue
if(test_border):
for temp_point in temp_list:
myQ.append(temp_point)
return result_point
def findCircleCenter(pointA, pointB, pointC):
x = Symbol('x')
y = Symbol('y')
AB_center_x = (pointA[X_POS] + pointB[X_POS])/2
AB_center_y = (pointA[Y_POS] + pointB[Y_POS])/2
AB_incline = (pointA[X_POS] - pointB[X_POS]) / (pointA[Y_POS] - pointB[Y_POS])
equation1 = AB_incline * x + y - AB_incline*AB_center_x - AB_center_y
AC_center_x = (pointA[X_POS] + pointC[X_POS])/2
AC_center_y = (pointA[Y_POS] + pointC[Y_POS])/2
AC_incline = (pointA[X_POS] - pointC[X_POS]) / (pointA[Y_POS] - pointC[Y_POS])
equation2 = AC_incline * x + y - AC_incline*AC_center_x - AC_center_y
result = solve( (equation1, equation2), dict=True)
temp_total = math.pow(result[0][x] - pointA[X_POS], 2) + math.pow(result[0][y] - pointA[Y_POS], 2)
radius = math.sqrt(temp_total)
return int(result[0][x]), int(result[0][y]), int(radius)
def findResult(pointList, rate):
unit_length = int(len(pointList) / 3)
total_length = int(len(pointList) - unit_length*2)
result = {}
for i in range(0, rate):
try:
x,y,radius = findCircleCenter(pointList[i], pointList[i+unit_length], pointList[i+unit_length*2])
if (x,y) in result:
result[(x,y)].append(radius)
else:
result[(x,y)] = [ radius ]
except:
continue
if(x < 0 or y < 0):
continue
if len(result) == 0:
return None, None, None
max_key = max(result, key=lambda p: len(result[p]))
max_value = result[max_key]
return int(max_key[0]), int(max_key[1]), int(sum(max_value) / float(len(max_value)))
def drawCircle(pointList, output_image, point_color, circle_color, rate):
unit_length = int(len(pointList) / 3)
total_length = int(len(pointList) - unit_length*2)
for i in range(0, rate):
try:
x,y,radius = findCircleCenter(pointList[i], pointList[i+unit_length], pointList[i+unit_length*2])
except:
continue
if(x < 0 or y < 0):
continue
cv2.circle(output_image, (x,y), radius, circle_color, 1)
cv2.rectangle(output_image, (x-2, y-2), (x+2, y+2), point_color, -1)
def getPupil(eye_img):
pupilImg = cv2.inRange(eye_img.copy(), (30,30,30), (80,80,80))
_, contours, __ = cv2.findContours(pupilImg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
del pupilImg
pupilImg = eye_img.copy()
for cnt in contours:
moments = cv2.moments(cnt)
area = moments['m00']
if (area > 50):
pupilArea = area
x = moments['m10']/area
y = moments['m01']/area
pupil = contours
global centroid
centroid = (int(x),int(y))
cv2.drawContours(pupilImg, pupil, -1, (0,255,0), -1)
break
return (pupilImg)
def irisDetect_debug(output, image, scale, rate):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
processed_img = getPupil(image.copy())
hsv = cv2.cvtColor(processed_img, cv2.COLOR_BGR2HSV)
(channel_h, channel_s, channel_v) = cv2.split(hsv)
cv2.imshow("hue", channel_h)
cv2.imshow("saturation", channel_s)
cv2.imshow("value", channel_v)
pupil = modImage("pu_man", channel_h, kernel, 5, 5)
iris = modImage("ir_man", channel_v, kernel, 8, 8, True)
cv2.imshow("pupil", pupil)
cv2.imshow("iris", iris)
pupil_point_list = searchBorder(pupil, scale)
iris_point_list = searchBorder(iris, scale)
if not pupil_point_list is None:
drawCircle(pupil_point_list, output, (255, 255, 0), (0, 255, 0), rate)
if not iris_point_list is None:
drawCircle(iris_point_list, output, (0, 255, 255), (255, 0, 0), rate)
def irisDetect(output, image, scale, rate):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
processed_img = getPupil(image.copy())
hsv = cv2.cvtColor(processed_img, cv2.COLOR_BGR2HSV)
(channel_h, channel_s, channel_v) = cv2.split(hsv)
pupil = modImage("pu_man", channel_h, kernel, 5, 5)
iris = modImage("ir_man", channel_v, kernel, 8, 8, True)
pupil_point_list = searchBorder(pupil, scale)
iris_point_list = searchBorder(iris, scale)
if not pupil_point_list is None:
x,y,radius = findResult(pupil_point_list, rate)
if x is not None:
cv2.circle(output, (x,y), radius, (0, 255, 0), 1)
cv2.rectangle(output, (x-2, y-2), (x+2, y+2), (255, 255, 0), -1)
"""
if not iris_point_list is None:
x,y,radius = findResult(iris_point_list, rate)
if x is not None:
cv2.circle(output, (x,y), radius, (255, 0, 0), 1)
cv2.rectangle(output, (x-2, y-2), (x+2, y+2), (0, 255, 255), -1)
"""
if __name__ == "__main__":
image = cv2.imread(imageName)
output = image.copy()
irisDetect(output, image, 1500, 30)
cv2.imshow("display", output)
cv2.waitKey(0)
if cv2.waitKey(1)&0xFF == ord('q'):
cv2.destroyAllWindows()
|
Edwin222/CPL-20181-Team3
|
iris_detect_service/iris_detection.py
|
iris_detection.py
|
py
| 6,860 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11353211013
|
'''
스도쿠
https://www.acmicpc.net/problem/2580
'''
import sys
sudoku = [list(map(int,sys.stdin.readline().split())) for _ in range(9)]
zeros = [(i,j) for i in range(9) for j in range(9) if sudoku[i][j] == 0]
is_complete = [False]
def check_horizontal(x,val):
if val in sudoku[x]:
return False
return True
def check_vertical(y, val):
for index in range(9):
if val == sudoku[index][y]:
return False
return True
def check_sqaure(x,y,val):
_x = x//3 * 3
_y = y//3 * 3
for i in range(3):
for j in range(3):
if val == sudoku[_x+i][_y+j]:
return False
return True
def solve(x):
if is_complete[0]:
return
if len(zeros) == x:
for row in sudoku:
for val in row:
print(val, end=' ')
print()
is_complete[0] = True
else:
for i in range(1,10):
nx = zeros[x][0]
ny = zeros[x][1]
if check_horizontal(nx, i) and check_vertical(ny, i) and check_sqaure(nx,ny, i):
sudoku[nx][ny] = i
solve(x+1)
sudoku[nx][ny] = 0
solve(0)
|
jihoonyou/problem-solving
|
Baekjoon/boj2580.py
|
boj2580.py
|
py
| 1,176 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29435711236
|
import re
import os
import socket
from threading import Thread, Event
import subprocess
import time
from shutil import copyfile
from tiny_test_fw import Utility, DUT
import ttfw_idf
stop_sock_listener = Event()
stop_io_listener = Event()
sock = None
client_address = None
manual_test = False
def io_listener(dut1):
global sock
global client_address
data = b''
while not stop_io_listener.is_set():
try:
data = dut1.expect(re.compile(r"PacketOut:\[([a-fA-F0-9]+)\]"), timeout=5)
except DUT.ExpectTimeout:
continue
if data != () and data[0] != b'':
packet_data = data[0]
print("Packet_data>{}<".format(packet_data))
response = bytearray.fromhex(packet_data.decode())
print("Sending to socket:")
packet = ' '.join(format(x, '02x') for x in bytearray(response))
print("Packet>{}<".format(packet))
if client_address is not None:
sock.sendto(response, ('127.0.0.1', 7777))
def sock_listener(dut1):
global sock
global client_address
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5)
server_address = '0.0.0.0'
server_port = 7771
server = (server_address, server_port)
sock.bind(server)
try:
while not stop_sock_listener.is_set():
try:
payload, client_address = sock.recvfrom(1024)
packet = ' '.join(format(x, '02x') for x in bytearray(payload))
print("Received from address {}, data {}".format(client_address, packet))
dut1.write(str.encode(packet))
except socket.timeout:
pass
finally:
sock.close()
sock = None
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def lwip_test_suite(env, extra_data):
global stop_io_listener
global stop_sock_listener
"""
steps: |
1. Rebuilds test suite with esp32_netsuite.ttcn
2. Starts listeners on stdout and socket
3. Execute ttcn3 test suite
4. Collect result from ttcn3
"""
dut1 = env.get_dut("net_suite", "examples/system/network_tests", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "net_suite.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("net_suite", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("net_suite", bin_size // 1024, dut1.TARGET)
dut1.start_app()
thread1 = Thread(target=sock_listener, args=(dut1, ))
thread2 = Thread(target=io_listener, args=(dut1, ))
if not manual_test:
# Variables refering to esp32 ttcn test suite
TTCN_SRC = 'esp32_netsuite.ttcn'
TTCN_CFG = 'esp32_netsuite.cfg'
# System Paths
netsuite_path = os.getenv("NETSUITE_PATH")
netsuite_src_path = os.path.join(netsuite_path, "src")
test_dir = os.path.dirname(os.path.realpath(__file__))
# Building the suite
print("Rebuilding the test suite")
print("-------------------------")
# copy esp32 specific files to ttcn net-suite dir
copyfile(os.path.join(test_dir, TTCN_SRC), os.path.join(netsuite_src_path, TTCN_SRC))
copyfile(os.path.join(test_dir, TTCN_CFG), os.path.join(netsuite_src_path, TTCN_CFG))
proc = subprocess.Popen(['bash', '-c', 'cd ' + netsuite_src_path + ' && source make.sh'],
cwd=netsuite_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.stdout.read()
print("Note: First build step we expect failure (titan/net_suite build system not suitable for multijob make)")
print(output)
proc = subprocess.Popen(['bash', '-c', 'cd ' + netsuite_src_path + ' && make'],
cwd=netsuite_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("Note: This time all dependencies shall be generated -- multijob make shall pass")
output = proc.stdout.read()
print(output)
# Executing the test suite
thread1.start()
thread2.start()
time.sleep(2)
print("Executing the test suite")
print("------------------------")
proc = subprocess.Popen(['ttcn3_start', os.path.join(netsuite_src_path,'test_suite'), os.path.join(netsuite_src_path, TTCN_CFG)],
stdout=subprocess.PIPE)
output = proc.stdout.read()
print(output)
print("Collecting results")
print("------------------")
verdict_stats = re.search('(Verdict statistics:.*)', output)
if verdict_stats:
verdict_stats = verdict_stats.group(1)
else:
verdict_stats = b""
verdict = re.search('Overall verdict: pass', output)
if verdict:
print("Test passed!")
Utility.console_log(verdict_stats, "green")
else:
Utility.console_log(verdict_stats, "red")
raise ValueError('Test failed with: {}'.format(verdict_stats))
else:
try:
# Executing the test suite
thread1.start()
thread2.start()
time.sleep(2)
while True:
time.sleep(0.5)
except KeyboardInterrupt:
pass
print("Executing done, waiting for tests to finish")
print("-------------------------------------------")
stop_io_listener.set()
stop_sock_listener.set()
thread1.join()
thread2.join()
if __name__ == '__main__':
print("Manual execution, please build and start ttcn in a separate console")
manual_test = True
lwip_test_suite()
|
espressif/ESP8266_RTOS_SDK
|
components/lwip/weekend_test/net_suite_test.py
|
net_suite_test.py
|
py
| 5,711 |
python
|
en
|
code
| 3,148 |
github-code
|
6
|
30124092991
|
import sys
#sys.stdin=open("A.txt","r")
#n,m=map(int,input().split()) #정n면체 정m면
#a=list(map(int,input().split()))
res=0
N=int(input())
#a=list(map(int,input().split())) 이거는 [1,2,3,4,5]이런식
for i in range(N):
tmp=input().split() #이거는['3','3','6']이렇게 문자열로저장
tmp.sort()
a,b,c=map(int,tmp)
#print(a,b,c)
if a==b and b==c:
money=10000+a*1000
elif a==b or b==c or a==c:
if a==b or a==c:
money= 1000+a*100
elif b==c:
money= 1000+b*100
else:
money=c*100#오름차순
if money>res:
res=money
print(res)
|
kimyoonseong/202207_08_PythonAlgorithm
|
코드구현력기르기Part/주사위게임.py
|
주사위게임.py
|
py
| 646 |
python
|
ja
|
code
| 0 |
github-code
|
6
|
11914708160
|
''' Write a program which accepts a
string as input to print "Yes" if the
string is "yes" or "YES" or "Yes",
otherwise print "No". '''
str_val= input("enter a string : ")
if str_val == 'yes' or str_val =='YES' or str_val =='Yes':
print("Yes")
else:
print("No")
|
mrudulamucherla/Python-Class
|
string_Yes_No.py
|
string_Yes_No.py
|
py
| 276 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20921250486
|
import networkx as nx
from sklearn.cluster import SpectralClustering
def spectral_clustering(G, n_clusters=2):
adj_mat = nx.to_numpy_matrix(G)
sc = SpectralClustering(n_clusters, affinity='precomputed', n_init=100)
sc.fit(adj_mat)
clusters = {}
for i in range(len(sc.labels_)):
if sc.labels_[i] not in clusters:
clusters[sc.labels_[i]] = []
clusters[sc.labels_[i]].append(i)
return clusters.values()
|
sharpenb/Multi-Scale-Modularity-Graph-Clustering
|
Scripts/clustering_algorithms/spectral_clustering.py
|
spectral_clustering.py
|
py
| 454 |
python
|
en
|
code
| 2 |
github-code
|
6
|
19400730749
|
###############################################################################
# Process to read Customer Updates #
#
# Pre-requisites: Kafka server should be running #
###############################################################################
import os
import sys
import logging
import json
import settings as SETTINGS
curpath = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join (curpath, "../")))
from app_messaging_utils import SimpleKafkaConsumer, SimpleKafkaMessage
from app_models import Customer, AppEventType
from app_utils import MongoRepository, DbEntity
###############################################################################
class MessageProcessor():
def __init__(self, process_func=None):
#Create and configure logger
logfile = os.path.abspath('{0}/{1}'.format(SETTINGS.Logging["LogFolder"],SETTINGS.Logging["LogFile"]))
os.makedirs(os.path.dirname(logfile), exist_ok=True)
logging.basicConfig(
filename=logfile,
format='%(asctime)s %(message)s',
filemode='a'
)
#Creating an object
self.logger=logging.getLogger()
#Setting the threshold of logger to DEBUG
self.logger.setLevel(SETTINGS.Logging["LogLevel"])
self.config = SETTINGS.KafkaService
self.topic = SETTINGS.MESSAGE_TOPIC
self.customer_repo = MongoRepository(
logger=self.logger,
server=SETTINGS.MongoDB["Url"],
port=SETTINGS.MongoDB["Port"],
database=SETTINGS.MongoDB["Db"],
collection=SETTINGS.MongoDB["Collection"],
session_id=1)
###########################################################################
def process_message(self, evt_msg: SimpleKafkaMessage):
'''
Function to process SimpleKafkaMessage
Deserialize the SimpleKafkaMessage,
extract and process relevant payload
'''
try:
evt = json.loads(evt_msg.message)
if evt["app_event_type"] == AppEventType.Insert:
entity = evt["after_change"]
customer = Customer(
id=entity["id"],
name=entity["name"],
phone=entity["phone"],
email=entity["email"]
)
msg="Processing INSERT message for customer id:{0}".format(customer.id)
print(msg)
eid = self.customer_repo.create(customer) # expect to get back an ObjectId
msg="Created customer id:{0}".format(customer.id)
print(msg)
self.logger.debug(msg)
elif evt["app_event_type"] == AppEventType.Update:
entity = evt["after_change"]
customer = Customer(
id=entity["id"],
name=entity["name"],
phone=entity["phone"],
email=entity["email"]
)
msg="Processing UPDATE message for customer id:{0}".format(customer.id)
print(msg)
self.customer_repo.update_by_id(customer.id, customer)
msg="Updated customer id:{0}".format(customer.id)
print(msg)
self.logger.debug(msg)
elif evt["app_event_type"] == AppEventType.Delete:
entity = evt["after_change"]
customer = Customer(
id=entity["id"],
name=entity["name"],
phone=entity["phone"],
email=entity["email"]
)
msg="Processing DELETE message for customer id:{0}".format(customer.id)
print(msg)
self.customer_repo.delete_by_id(customer.id)
msg="Deleted customer id:{0}".format(customer.id)
print(msg)
self.logger.debug(msg)
else:
pass
except Exception as e:
msg = "Error in process_message function: {0}".format(str(e))
print(msg)
self.logger.error(msg)
###########################################################################
def read_messages(self):
'''
Function to read messages from kafka queue
'''
reader_id = self.config["group.id"]
counter=0
try:
msg = "Starting Process:{0} to read topic:{1} from Kafka Queue".format( reader_id , self.topic )
self.logger.info(msg)
print(msg)
consumer = SimpleKafkaConsumer(logger=self.logger)
consumer.configure(config=self.config)
print ("Starting Consumer")
for evt_msg in consumer.consume(topics=['MICROSERVICE-CUSTOMER-UPDATES']):
counter +=1
# msg = "Received msg: {0} # {1}".format(counter, evt_msg.message)
# print(msg)
# self.logger.debug(msg)
# Process the message
self.process_message(evt_msg)
except KeyboardInterrupt:
msg = "\n\n Exiting Process:'{0}'. {1} message(s) read on topic from Kafka Queue:'{2}'".format( reader_id, counter, self.topic )
print (msg)
self.logger.info(msg)
except Exception as e:
msg = "Error in {0} : {1}".format(reader_id, str(e))
print(msg)
self.logger.error(msg)
###############################################################################
if __name__ == "__main__":
MessageProcessor().read_messages()
###############################################################################
|
bbcCorp/py_microservices
|
src/app_services_replication/message_processor.py
|
message_processor.py
|
py
| 5,987 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14276598167
|
import sys
sys.path.append('D:/Users/Murph Strange/Jupyter Notebook/')
import menus
import random
import types
class Character:
def __init__(self, name):
self.name = name
self.strength = 16
self.intellect = 16
self.resilience = 16
#ability to run away (TODO: implement Escape action?)
self.agility = 16
self.max_hp = 16
self.hp = self.max_hp
self.max_mp = 16
self.mp = self.max_mp
self.attack_power = 0
self.defense_power = 0
self.equipment = {
"weapon": False,
"armor": False,
"shield": False
}
self.status = []
self.inventory = {}
self.spellbook = {}
def alive(self):
if "deceased" not in self.status:
return True
else:
print("%s is a corpse.\n" % self.name)
return False
def health(self, change = 0):
self.hp += change
if change == 0:
return self.hp
elif self.alive() and (change < 0):
print("%s takes %d damage.\n" % (self.name, abs(change)))
if self.hp < 1:
self.hp = 0
print("%s has died.\n" % self.name)
self.status = ["deceased"]
return True
elif self.alive() and (change > 0):
print("%s restores %d hp.\n" % (self.name, change))
if self.hp > self.max_hp:
self.hp = self.max_hp
print("%s's health is fully restored!\n" % self.name)
return True
else:
return False
def magic(self, change = 0):
if change == 0:
return self.mp
elif self.alive() and (change < 0):
if abs(change) > self.mp:
print("%s doesn't have enough magic!\n" % self.name)
return False
else:
self.mp += change
print("%s expends %d magic.\n" % (self.name, abs(change)))
if self.mp < 1:
self.mp = 0
print("%s has depleted their magic.\n" % self.name)
return True
elif self.alive() and (change > 0):
self.mp += change
print("%s restores %d mp.\n" % (self.name, change))
if self.mp > self.max_mp:
self.mp = self.max_mp
print("%s's magic has been fully restored!\n" % self.name)
return True
else:
return False
def drink(self, item):
if item in self.inventory.keys():
return self.inventory[item].drink(self)
else:
print("%s can't drink a %s.\n" % (self.name, item))
return False
def equip(self, item):
if item in self.inventory.keys():
return self.inventory[item].equip(self)
else:
print("%s can't equip a %s.\n" % (self.name, item))
return False
def attack(self, target):
if self.equipment["weapon"]:
return self.equipment["weapon"].attack(self, target)
else:
print("%s is unarmed.\n" % self.name)
return False
def defend(self):
return self.defense_power
def cast(self, spell, target):
if spell in self.spellbook.keys():
print("%s casts %s.\n" % (self.name, spell))
return self.spellbook[spell].cast(target, self)
else:
print("%s can't cast %s.\n" % (self.name, spell))
return False
def speak(self, target):
#Options to converse, trade, engage in combat
pass
class NPCharacter(Character):
def __init__(self, name):
super().__init__(name)
pass
def health(self, change = 0):
res = super().health(change)
if self.alive() and self.hp < int(self.max_hp/3):
self.drink("red potion")
return res
def magic(self, change = 0):
res = super().magic(change)
if self.alive() and self.mp < int(self.max_mp/3):
self.drink("blue potion")
return res
def speak(self, target):
#Options to converse, trade, or engage in combat
#overwrite this method with the dialog menus of
#your choice, using menus and types.MethodType()
print("%s says" % self.name)
dialog = menus.Menu('"Continue?"', ['y', 'n'])
res = dialog.display_prompt()
return res
class PlayerCharacter(Character):
def __init__(self, name):
super().__init__(name)
self.gold = 100
self.level = 1
self.xp = 0
self.quest_flags = {}
def speak(self, target):
if self.alive() and target.alive():
if type(target) == PlayerCharacter:
#player character options (trade? duel? invite to party?)
pass
elif type(target) == NPCharacter:
#invoke npc's menus for dialog,
#changes depending on what the
#npc is (foe, merchant, quest giver)
return target.speak(self)
else:
print("%s can't speak to that.\n" % self.name)
return False
else:
print("%s can't speak to %s.\n" % (self.name, target.name))
return False
class MonsterCharacter(NPCharacter):
def __init__(self, name, health, magic, attack_power, defense_power, agility, xp, gold, spell_list = []):
super().__init__(name)
self.max_hp = health
self.hp = self.max_hp
self.max_mp = magic
self.mp = self.max_mp
self.attack_power = attack_power
self.defense_power = defense_power
self.agility = agility
self.xp = xp
self.gold = gold
for item in spell_list:
self.spellbook[item] = Spell(item, spells[item][0], spells[item][1], spells[item][2])
def attack(self, target):
if self.equipment["weapon"]:
return self.equipment["weapon"].attack(self, target)
else:
if self.alive() and target.alive():
damage = random.randint(int((self.strength + self.attack_power)/3), self.strength + self.attack_power)
print("%s attacks %s!\n" % (self.name, target.name))
if random.randint(0, 7) in [3, 5]:
print("%s misses!\n" % self.name)
damage = 0
else:
print("%s deals %d damage.\n" % (self.name, damage))
target_defense = target.defend()
if target.equipment["shield"]:
target_defense += target.equipment["shield"].defend()
if target.equipment["armor"]:
target_defense += target.equipment["armor"].defend()
resist = random.randint(int((target.resilience + target_defense)/3), target.resilience + target_defense)
print("%s has a defense rating of %d.\n" % (target.name, target_defense))
print("%s resists %d damage.\n" % (target.name, resist))
damage -= resist
if damage <= 0:
damage = 0
print("%s blocks the attack!\n" % target.name)
target.health(-damage)
return True
else:
return False
class Effect():
def __init__(self, name, power):
self.name = name
self.power = power
class Potion(Effect):
def __init__(self, name, attribute, power, quantity = 0):
super().__init__(name, power)
self.attribute = attribute
self.quantity = quantity
def pay(self, caster):
if caster.inventory[self.name].quantity > 0:
caster.inventory[self.name].quantity -= 1
return True
else:
print("%s has no %s left.\n" % (caster.name, self.name))
return False
def drink(self, caster):
if caster.alive() and self.pay(caster):
print("%s drinks a %s.\n" % (caster.name, self.name))
if self.attribute == 'hp':
caster.health(self.power)
elif self.attribute == 'mp':
caster.magic(self.power)
return True
else:
return False
class Spell(Effect):
def __init__(self, name, cost, power, status = 'none'):
super().__init__(name, power)
self.cost = cost
self.status = status
def pay(self, caster):
if caster.magic() > self.cost:
caster.magic(-self.cost)
return True
else:
return False
def cast(self, target, caster):
if caster.alive() and target.alive() and self.pay(caster):
damage = random.randint(int((caster.intellect + self.power)/2), caster.intellect + self.power)
print("%s deals %d damage.\n" % (self.name.capitalize(), damage))
if self.status in target.status:
resist = random.randint(int((target.intellect + target.resilience)/2), target.intellect + target.resilience)
else:
resist = random.randint(0, int((target.intellect + target.resilience)/2))
if target.alive() and ((damage - resist) > 0) and not self.status == 'none' and self.status not in target.status:
target.status.append(self.status)
print("%s resists %d damage.\n" % (target.name, resist))
damage -= resist
if damage <= 0:
damage = 0
print("%s is ineffective!\n" % self.name.capitalize())
target.health(-damage)
return True
else:
return False
class Equipment(Effect):
def __init__(self, name, power, slot):
super().__init__(name, power)
self.slot = slot
def equip(self, caster):
if caster.alive() and self.name in caster.inventory.keys():
if caster.equipment[self.slot]:
caster.inventory.append(caster.equipment[self.slot])
caster.equipment[self.slot] = self
caster.inventory.pop(self.name)
print("%s equips the %s.\n" % (caster.name, self.name))
return True
else:
return False
def defend(self):
return self.power
class Weapon(Equipment):
def __init__(self, name, power):
super().__init__(name, power, "weapon")
def attack(self, caster, target):
if caster.alive() and target.alive():
damage = random.randint(int((caster.strength + self.power)/3), caster.strength + self.power)
print("%s attacks %s with %s.\n" % (caster.name, target.name, self.name))
if random.randint(0, 7) == 3:
print("%s misses!\n" % caster.name)
damage = 0
else:
print("%s deals %d damage.\n" % (self.name, damage))
target_defense = 0
if target.equipment["shield"]:
target_defense += target.equipment["shield"].defend()
if target.equipment["armor"]:
target_defense += target.equipment["armor"].defend()
resist = random.randint(int((target.resilience + target_defense)/3), target.resilience + target_defense)
print("%s has a defense rating of %d.\n" % (target.name, target_defense))
print("%s resists %d damage.\n" % (target.name, resist))
damage -= resist
if damage <= 0:
damage = 0
print("%s blocks the attack!\n" % target.name)
target.health(-damage)
return True
else:
return False
weapons = {
"Bamboo Pole": 2, #10
"Club": 4, #60
"Copper Sword": 10, #180
"Hand Axe": 15, #560
"Broad Sword": 20, #1500
"Flame Sword": 28, #9800
"Erdrick's Sword": 40 #0
}
armor = {
"Clothes": 2, #50
"Leather Armor": 4, #40
"Chain Mail": 12, #300
"Half Plate": 16, #1000
"Full Plate": 24, #3000
"Magic Armor": 24, #7700
"Erdrick's Armor": 28 #0
}
shields = {
"Leather Shield": 4, #90
"Iron Shield": 10, #800
"Silver Shield": 24 #14800
}
spells = {
"zap": (1, 2, 'none'),
"fireball": (2, 4, 'burning'),
"blizzard": (4, 8, 'freezing'),
"lightning": (8, 12, 'shocked')
}
potions = {
"red potion": (8, 'hp'),
"blue potion": (8, 'mp')
}
monsters = {
"slime": (3, 0, 5, 3, 2, 1, 2, []),
"she-slime":(4, 0, 7, 3, 4, 2, 4, []),
"dracky": (6, 0, 9, 6, 5, 3, 6, []),
"ghost": (7, 0, 11, 8, 6, 4, 8, []),
"prestidigitator": (12, 8, 8, 12, 6, 8, 16, ["zap"]),
"drackolyte": (15, 8, 13, 13, 8, 12, 20, ["zap"]),
"scorpion": (20, 0, 18, 35, 4, 16, 25, []),
"skeleton": (30, 0, 28, 22, 17, 25, 42, []),
"lunatick": (22, 0, 22, 18, 11, 14, 21, []),
"fightgeist": (23, 10, 18, 20, 14, 15, 19,["fireball"]),
"drohl drone": (35, 0, 24, 6, 9, 18, 30, []),
"drackyma":(20, 10, 22, 26, 16, 20, 25, ["fireball"]),
"legerdeman": (28, 10, 26, 24, 15, 28, 50, ["fireball"]),
"bewarewolf": (34, 0, 40, 30, 21, 40, 60, []),
"iron scorpion": (22, 0, 36, 60, 25, 31, 48, []),
"skeleton scrapper": (36, 0, 44, 34, 23, 42, 62, []),
"scarewolf": (38, 6, 50, 36, 23, 52, 80, ["zap"]),
"gold golem": (99, 0, 48, 30, 26, 6, 650, []),
"chimaera": (42, 0, 56, 48, 31, 64, 150, []),
"spitegeist": (33, 14, 40, 38, 26, 47, 72, ["fireball"]),
"raving lunatick": (35, 30, 41, 40, 28, 58, 95, ["blizzard"]),
"drohl diabolist": (38, 10, 44, 16, 11, 58, 110, ["fireball"]),
"skeleton soldier": (46, 12, 62, 46, 36, 72, 120, ["fireball"]),
"death scorpion": (35, 0, 55, 90, 33, 70, 110, []),
"knight errant": (55, 6, 70, 71, 45, 78, 150, ["zap"]),
"dark skeleton": (43, 0, 79, 51, 40, 90, 148, []),
"hocus chimaera": (50, 12, 68, 62, 44, 83, 135, ["fireball"]),
"metal slime": (4, 6, 18, 255, 153, 775, 6, ["zap"]),
"tearwolf": (60, 0, 80, 65, 45, 95, 155, []),
"cosmic chimaera": (73, 15, 82, 65, 52, 105, 169, ["zap", "fireball"]),
"dragon": (67, 18, 88, 72, 47, 135, 160, ["zap", "fireball"]),
"green dragon": (166, 65, 88, 72, 47, 950, 250, ["zap", "fireball", "blizzard", "lightning"]),
"vis mager": (70, 16, 71, 60, 49, 120, 185, ["zap", "fireball"]),
"golem": (155, 0, 120, 60, 39, 2000, 10, []),
"knight aberrant": (79, 4, 94, 92, 53, 130, 165, ["zap"]),
"blue dragon": (98, 75, 98, 80, 52, 180, 150, ["zap", "fireball", "blizzard", "lightning"]),
"stone golem": (160, 0, 100, 40, 40, 155, 148, []),
"knight abhorrent": (98, 14, 105, 99, 57, 172, 152, ["zap", "fireball"]),
"red dragon": (105, 85, 115, 104, 62, 350, 143, ["zap", "fireball", "blizzard", "lightning"]),
"dragon mage": (240, 95, 107, 110, 55, 480, 500, ["zap", "fireball", "blizzard", "lightning"]),
"dragon lord": (361, 120, 130, 150, 90, 1000, 2500, ["zap", "fireball", "blizzard", "lightning"])
}
if __name__ == "__main__":
pass
#TODO: Implement escape action
#Create a vendor from the NPC class that takes gold for weapons and armor
|
drunkfurball/dragonquest
|
dragonquest.py
|
dragonquest.py
|
py
| 15,882 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5085250146
|
from copy import deepcopy
import json
import re
from flask import render_template
from maf_api_mock_data import EGFR_BLCA_BRCA as FAKE_MAF_DATA
from hotspots.seqpeek.tumor_types import tumor_types as ALL_TUMOR_TYPES
from app_logging import get_logger
log = get_logger()
try:
from hotspots.seqpeek.gene_list import gene_list as GENE_LIST
except ImportError:
log.error("Loading gene list failed, using static list.")
GENE_LIST = ['EGFR', 'TP53', 'PTEN']
from hotspots.seqpeek.uniprot_data import get_uniprot_data
from hotspots.seqpeek.interpro_data import get_protein_domain_data
from hotspots.seqpeek.cluster_data import get_cluster_data as get_cluster_data_remote
from hotspots.seqpeek.mutation_data import get_mutation_data as get_mutation_data_remote
from hotspots.seqpeek.mutation_data import get_mutation_data_summary_for_gene
SEQPEEK_VIEW_DEBUG_MODE = False
SEQPEEK_VIEW_MUTATION_DEBUG = False
SAMPLE_ID_FIELD_NAME = 'patient_barcode'
TUMOR_TYPE_FIELD = "tumor"
COORDINATE_FIELD_NAME = 'amino_acid_position'
MUTATION_DATA_PROTEIN_FIELD = 'uniprot_id'
PROTEIN_DOMAIN_DB = 'PFAM'
ALPHA_FINDER = re.compile('[\W_]+', re.UNICODE)
TEMPLATE_NAME = 'hotspots/seqpeek/view.html'
def get_number_of_unique_samples(track):
sample_ids = set()
for mutation in track['mutations']:
sample_ids.add(mutation[SAMPLE_ID_FIELD_NAME])
return len(sample_ids)
# TODO remove if not needed
def clean_track_mutations(mutations_array):
retval = []
for mutation in mutations_array:
cleaned = deepcopy(mutation)
cleaned[COORDINATE_FIELD_NAME] = int(mutation[COORDINATE_FIELD_NAME])
retval.append(cleaned)
return retval
def sort_track_mutations(mutations_array):
return sorted(mutations_array, key=lambda k: k[COORDINATE_FIELD_NAME])
def get_track_statistics(track):
return {
'samples': {
'numberOf': get_number_of_unique_samples(track)
}
}
def filter_protein_domains(match_array):
return [m for m in match_array if m['dbname'] == PROTEIN_DOMAIN_DB]
def get_table_row_id(tumor_type):
return "seqpeek_row_{0}".format(tumor_type)
def build_seqpeek_regions(protein_data):
return [{
'type': 'exon',
'start': 0,
'end': protein_data['length']
}]
def build_summary_track(tracks, render_summary_only=False):
all = []
for track in tracks:
all.extend(track["mutations"])
return {
'mutations': all,
'label': 'COMBINED',
'tumor': 'none-combined',
'type': 'summary',
'do_variant_layout': True if render_summary_only is True else False
}
def get_track_label(track):
return track[TUMOR_TYPE_FIELD]
def process_raw_domain_data(data):
result = []
for item in data:
database = item['database']
# Filter for PFAM
if not database.startswith('PF'):
continue
domain = {
'name': item['name'][:5] + '...',
'full_name': item['name'],
'locations': [{
'start': item['start'],
'end': item['end']
}],
'dbname': 'PFAM',
'ipr': {
'type': 'Domain',
'id': item['interpro_id'],
'name': item['name'][:2]
},
'id': database
}
result.append(domain)
log.debug("Found {total} domains, filtered down to {num}".format(total=len(data), num=len(result)))
return result
def get_protein_domains_remote(uniprot_id):
uniprot_data = get_uniprot_data(uniprot_id)
log.debug("UniProt entry: " + str(uniprot_data))
# Add protein domain data to the UniProt entry
raw_domain_data = get_protein_domain_data(uniprot_id)
domains = process_raw_domain_data(raw_domain_data)
uniprot_data['matches'] = domains
return uniprot_data
def get_protein_domains(uniprot_id):
return get_protein_domains_remote(uniprot_id)
def get_maf_data_remote(gene, tumor_type_list):
return get_mutation_data_remote(tumor_type_list, gene)
def get_mutation_data(gene, tumor_type_list):
if SEQPEEK_VIEW_MUTATION_DEBUG:
return deepcopy(FAKE_MAF_DATA['items'])
else:
return get_mutation_data_remote(tumor_type_list, gene)
def process_cluster_data_for_tumor(all_clusters, tumor_type):
clusters = filter(lambda c: c['tumor_type'] == tumor_type, all_clusters)
result = []
for index, cluster in enumerate(clusters):
item = {
'name': '',
'type': 'cluster',
'id': 'cluster_' + str(index),
'locations': [{
'start': cluster['start'],
'end': cluster['end']
}],
'mutation_stats': cluster['mutation_stats'],
'stats': cluster['stats']
}
result.append(item)
return result
def build_track_data(tumor_type_list, all_tumor_mutations, all_clusters):
tracks = []
for tumor_type in tumor_type_list:
mutations = filter(lambda m: m['tumor_type'] == tumor_type, all_tumor_mutations);
track_obj = {
TUMOR_TYPE_FIELD: tumor_type,
'mutations': mutations,
'clusters': process_cluster_data_for_tumor(all_clusters, tumor_type),
'do_variant_layout': True
}
if len(mutations) > 0:
track_obj['render_in_seqpeek'] = True
else:
track_obj['render_in_seqpeek'] = False
tracks.append(track_obj)
return tracks
def find_uniprot_id(mutations):
uniprot_id = None
for m in mutations:
if MUTATION_DATA_PROTEIN_FIELD in m:
uniprot_id = m[MUTATION_DATA_PROTEIN_FIELD]
break
return uniprot_id
def get_cluster_data(tumor_type_array, gene):
clusters = get_cluster_data_remote(tumor_type_array, gene)
return clusters
def sanitize_gene_input(gene_parameter):
return ALPHA_FINDER.sub('', gene_parameter)
def sanitize_normalize_tumor_type(tumor_type_list):
tumor_set = frozenset(ALL_TUMOR_TYPES)
sanitized = []
for tumor_param in tumor_type_list:
if tumor_param in tumor_set:
sanitized.append(tumor_param)
return sanitized
def format_tumor_type_list(tumor_type_array, selected_types=[]):
result = []
for tumor_type in tumor_type_array:
result.append({
'name': tumor_type,
'selected': tumor_type in selected_types
})
return result
def seqpeek(request_gene, request_tumor_list, summary_only=False):
gene = None
if request_gene is not None:
# Remove non-alphanumeric characters from parameters and uppercase all
gene = sanitize_gene_input(request_gene).upper()
parsed_tumor_list = sanitize_normalize_tumor_type(request_tumor_list)
log.debug("Valid tumors from request: {0}".format(str(parsed_tumor_list)))
tumor_types_for_tpl = format_tumor_type_list(ALL_TUMOR_TYPES, parsed_tumor_list)
context = {
'gene_select_widget': {
'action': '/seqpeek',
'tumor_type_select': True,
'all_tumor_types': tumor_types_for_tpl,
'button_label': 'Redraw'
},
'query_status': {
'no_mutations_found': False,
'uniprot_id_not_found': False,
'data_found': False,
'summary_only': False,
'insufficient_parameters': False,
'request_gene': request_gene
},
'gene_label': gene,
'is_gene_summary': summary_only,
'static_data': {
'gene_list': GENE_LIST,
'gene_label': gene,
'fill_in_gene': True
},
'all_tumor_types': tumor_types_for_tpl
}
if (len(parsed_tumor_list) == 0 and summary_only is False) or gene is None:
context['query_status']['insufficient_parameters'] = True
context['static_data']['fill_in_gene'] = False
context.update({
'static_data': json.dumps(context['static_data'])
})
return render_template(TEMPLATE_NAME, **context)
if summary_only is False:
cluster_data = get_cluster_data(parsed_tumor_list, gene)
maf_data = get_mutation_data(gene, parsed_tumor_list)
else:
maf_data = get_mutation_data_summary_for_gene(gene)
if len(maf_data) == 0:
context['query_status']['no_mutations_found'] = True
context['static_data']['fill_in_gene'] = False
context.update({
'static_data': json.dumps(context['static_data'])
})
return render_template(TEMPLATE_NAME, **context)
uniprot_id = find_uniprot_id(maf_data)
if uniprot_id is None:
context['query_status']['uniprot_id_not_found'] = True
context['static_data']['fill_in_gene'] = False
context.update({
'static_data': json.dumps(context['static_data'])
})
return render_template(TEMPLATE_NAME, **context)
log.debug("Found UniProt ID: " + repr(uniprot_id))
context['query_status']['data_found'] = True
protein_data = get_protein_domains(uniprot_id)
plot_data = {
'gene_label': gene,
'protein': protein_data
}
if summary_only is False:
track_data = build_track_data(parsed_tumor_list, maf_data, cluster_data)
plot_data['tracks'] = track_data
# Pre-processing
# - Sort mutations by chromosomal coordinate
for track in plot_data['tracks']:
track['mutations'] = sort_track_mutations(track['mutations'])
# Annotations
# - Add label, possibly human readable
# - Add type that indicates whether the track is driven by data from search or
# if the track is aggregate
for track in plot_data['tracks']:
track['type'] = 'tumor'
track['label'] = get_track_label(track)
plot_data['tracks'].append(build_summary_track(plot_data['tracks'], render_summary_only=False))
else:
summary_track = {
'mutations': sort_track_mutations(maf_data)
}
plot_data['tracks'] = [build_summary_track([summary_track], render_summary_only=True)]
for track in plot_data['tracks']:
# Calculate statistics
track['statistics'] = get_track_statistics(track)
# Unique ID for each row
track['render_info'] = {
'row_id': get_table_row_id(track[TUMOR_TYPE_FIELD])
}
plot_data['regions'] = build_seqpeek_regions(plot_data['protein'])
plot_data['protein']['matches'] = filter_protein_domains(plot_data['protein']['matches'])
# Filter the tracks-array for Seqpeek. Only leave tracks with at least one mutation.
seqpeek_data = {key: plot_data[key] for key in ['gene_label', 'protein', 'regions']}
seqpeek_tracks = []
for track in plot_data['tracks']:
if len(track['mutations']) > 0:
# Gene has to be passed to the track object, so that it can be used
# to construct the URI for the pathway association view
track['gene'] = gene
seqpeek_tracks.append(track)
else:
log.debug("{0}: 0 mutations, not rendering in SeqPeek.".format(track['label']))
seqpeek_data['tracks'] = seqpeek_tracks
tumor_list = ','.join(parsed_tumor_list)
context.update({
'search': {},
'plot_data': plot_data,
'data_bundle': json.dumps(seqpeek_data),
'gene': gene,
'tumor_list': tumor_list
})
context.update({
'static_data': json.dumps(context['static_data'])
})
return render_template(TEMPLATE_NAME, **context)
|
cancerregulome/multiscale-mutation-hotspots
|
hotspots/seqpeek/view.py
|
view.py
|
py
| 11,643 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7642412610
|
from unittest import result
from pip._vendor.distlib.compat import raw_input
def start():
n1 = input("n1: ")
control_input(n1)
def control_input(x):
try:
val = int(x)
print("Input is an integer number. Number = ", val)
result = "int_number"
except ValueError:
try:
val = float(x)
print("Input is a float number. Number = ", val)
result = "float_number"
except ValueError:
print(x + "is a string")
result = "string"
return result
if __name__ == '__main__':
start()
|
Ruxuge/TAU
|
lab7/main.py
|
main.py
|
py
| 648 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26969758526
|
import os
import time
import numpy as np
import torch
from torchvision.utils import make_grid
from torchvision.transforms import ToPILImage
from base import BaseTrainer
from evaluate import get_fid_score, get_i3d_activations, init_i3d_model, evaluate_video_error
from utils.readers import save_frames_to_dir
from model.loss import AdversarialLoss
class Trainer(BaseTrainer):
"""
Trainer class
Note:
Inherited from BaseTrainer.
"""
def __init__(
self, model, losses, metrics,
optimizer_g, optimizer_d_s, optimizer_d_t, resume, config,
data_loader, valid_data_loader=None, lr_scheduler=None,
train_logger=None, learn_mask=True, test_data_loader=None,
pretrained_path=None
):
super().__init__(
model, losses, metrics, optimizer_g,
optimizer_d_s, optimizer_d_t, resume, config, train_logger,
pretrained_path
)
self.config = config
self.data_loader = data_loader
self.valid_data_loader = valid_data_loader
self.test_data_loader = test_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = self.config['visualization']['log_step']
self.loss_gan_s_w = config['gan_losses']['loss_gan_spatial_weight']
self.loss_gan_t_w = config['gan_losses']['loss_gan_temporal_weight']
self.adv_loss_fn = AdversarialLoss()
self.evaluate_score = config['trainer'].get('evaluate_score', True)
self.store_gated_values = False
self.printlog = False
if self.test_data_loader is not None:
self.toPILImage = ToPILImage()
self.evaluate_test_warp_error = config.get('evaluate_test_warp_error', False)
self.test_output_root_dir = os.path.join(self.checkpoint_dir, 'test_outputs')
init_i3d_model()
def _store_gated_values(self, out_dir):
from model.blocks import GatedConv, GatedDeconv
def save_target(child, out_subdir):
if not os.path.exists(out_subdir):
os.makedirs(out_subdir)
if isinstance(child, GatedConv):
target = child.gated_values[0]
elif isinstance(child, GatedDeconv):
target = child.conv.gated_values[0]
else:
raise ValueError('should be gated conv or gated deconv')
target = target.transpose(0, 1)
for t in range(target.shape[0]):
for c in range(target.shape[1]):
out_file = os.path.join(out_subdir, f'time{t:03d}_channel{c:04d}.png')
self.toPILImage(target[t, c: c + 1]).save(out_file)
for key, child in self.model.generator.coarse_net.upsample_module.named_children():
out_subdir = os.path.join(out_dir, f'upsample_{key}')
save_target(child, out_subdir)
for key, child in self.model.generator.coarse_net.downsample_module.named_children():
out_subdir = os.path.join(out_dir, f'downsample_{key}')
save_target(child, out_subdir)
def _evaluate_data_loader(self, epoch=None, output_root_dir=None, data_loader=None, name='test'):
total_length = 0
total_warp_error = 0 if self.evaluate_test_warp_error else None
total_error = 0
total_psnr = 0
total_ssim = 0
total_p_dist = 0
if output_root_dir is None:
output_root_dir = self.test_output_root_dir
if epoch is not None:
output_root_dir = os.path.join(output_root_dir, f"epoch_{epoch}")
output_root_dir = os.path.join(output_root_dir, name)
output_i3d_activations = []
real_i3d_activations = []
with torch.no_grad():
for batch_idx, data in enumerate(data_loader):
data_input, model_output = self._process_data(data)
inputs, outputs, targets, masks = self._unpack_data(data_input, model_output)
if self.store_gated_values:
out_dir = os.path.join(output_root_dir, 'gated_values', f'input_{batch_idx:04}')
self._store_gated_values(out_dir)
outputs = outputs.clamp(0, 1)
if self.evaluate_score:
# get i3d activation
output_i3d_activations.append(get_i3d_activations(outputs).cpu().numpy())
real_i3d_activations.append(get_i3d_activations(targets).cpu().numpy())
assert len(outputs) == 1 # Batch size = 1 for testing
inputs = inputs[0]
outputs = outputs[0].cpu()
targets = targets[0].cpu()
masks = masks[0].cpu()
if epoch is not None and epoch == 0:
# Save inputs to output_dir
output_dir = os.path.join(output_root_dir, 'inputs', f"input_{batch_idx:04}")
self.logger.debug(f"Saving batch {batch_idx} input to {output_dir}")
save_frames_to_dir([self.toPILImage(t) for t in inputs.cpu()], output_dir)
if epoch is not None and epoch % 5 == 0:
# Save test results to output_dir
output_dir = os.path.join(output_root_dir, f"result_{batch_idx:04}")
self.logger.debug(f"Saving batch {batch_idx} to {output_dir}")
save_frames_to_dir([self.toPILImage(t) for t in outputs], output_dir)
if self.evaluate_score:
# Evaluate scores
warp_error, error, psnr_value, ssim_value, p_dist, length = \
self._evaluate_test_video(outputs, targets, masks)
if self.evaluate_test_warp_error:
total_warp_error += warp_error
total_error += error
total_ssim += ssim_value
total_psnr += psnr_value
total_p_dist += p_dist
total_length += length
if self.evaluate_score:
output_i3d_activations = np.concatenate(output_i3d_activations, axis=0)
real_i3d_activations = np.concatenate(real_i3d_activations, axis=0)
fid_score = get_fid_score(real_i3d_activations, output_i3d_activations)
else:
fid_score = 0
total_p_dist = [0]
total_length = 1
total_p_dist = total_p_dist[0]
if epoch is not None:
self.writer.set_step(epoch, name)
self._write_images(
inputs, outputs, targets, masks,
model_output=model_output, data_input=data_input
)
if self.evaluate_test_warp_error:
self.writer.add_scalar('test_warp_error', total_warp_error / total_length)
self.writer.add_scalar('test_mse', total_error / total_length)
self.writer.add_scalar('test_ssim', total_ssim / total_length)
self.writer.add_scalar('test_psnr', total_psnr / total_length)
self.writer.add_scalar('test_p_dist', total_p_dist / total_length)
self.writer.add_scalar('test_fid_score', fid_score)
return total_warp_error, total_error, total_ssim, total_psnr, total_p_dist, total_length, fid_score
def _write_images(
self, inputs, outputs, targets, masks, output_edges=None,
target_edges=None, model_output=None, data_input=None
):
self.writer.add_image('input', make_grid(inputs.cpu(), nrow=3, normalize=False))
self.writer.add_image('loss_mask', make_grid(masks.cpu(), nrow=3, normalize=False))
self.writer.add_image(
'output', make_grid(outputs.clamp(0, 1).cpu(), nrow=3, normalize=False))
self.writer.add_image('gt', make_grid(targets.cpu(), nrow=3, normalize=False))
self.writer.add_image('diff', make_grid(targets.cpu() - outputs.cpu(), nrow=3, normalize=True))
self.writer.add_image('IO_diff', make_grid(inputs.cpu() - outputs.cpu(), nrow=3, normalize=True))
try:
output_edges = self.losses['loss_edge'][0].current_output_edges
target_edges = self.losses['loss_edge'][0].current_target_edges
self.writer.add_image('output_edge', make_grid(output_edges[0].cpu(), nrow=3, normalize=True))
self.writer.add_image('target_edge', make_grid(target_edges[0].cpu(), nrow=3, normalize=True))
except Exception:
pass
try:
guidances = data_input['guidances']
self.writer.add_image('guidances', make_grid(guidances[0].cpu(), nrow=3, normalize=True))
except Exception:
pass
if model_output is not None:
if 'imcomplete_video' in model_output.keys():
self.writer.add_image('imcomplete_video', make_grid(
model_output['imcomplete_video'][0].transpose(0, 1).cpu(), nrow=3, normalize=False))
def _evaluate_test_video(self, output, gt_frames, masks):
gt_images = [self.toPILImage(gt) for gt in gt_frames]
result_images = [self.toPILImage(result) for result in output]
mask_images = [self.toPILImage(mask / 255) for mask in masks]
return evaluate_video_error(
result_images, gt_images, mask_images,
flownet_checkpoint_path=None,
evaluate_warping_error=self.evaluate_test_warp_error,
printlog=self.printlog
)
def _eval_metrics(self, output, target):
acc_metrics = np.zeros(len(self.metrics))
for i, metric in enumerate(self.metrics):
acc_metrics[i] += metric(output, target)
self.writer.add_scalar(f'{metric.__name__}', acc_metrics[i])
return acc_metrics
def _get_gan_loss(self, outputs, target, masks, discriminator, w, guidances=None, is_disc=None):
if w <= 0:
return torch.Tensor([0]).to(self.device)
scores = self.model.forward(outputs, masks, guidances, model=discriminator)
gan_loss = self.adv_loss_fn(scores, target, is_disc)
return gan_loss
def _get_grad_mean_magnitude(self, output, optimizer):
"""
Get mean magitude (absolute value) of gradient of output w.r.t params in the optimizer.
This function is used to get a simple understanding over the impact of a loss.
:output: usually the loss you want to compute gradient w.r.t params
:optimizer: the optimizer who contains the parameters you care
Note:
This function will reset the gradient stored in paramerter, so please
use it before <your loss>.backward()
Example:
> grad_magnitude = self._get_grad_mean_magnitude(
loss_recon * self.loss_recon_w, self.optimizer_g))
> print(grad_magnitude)
"""
optimizer.zero_grad()
output.backward(retain_graph=True)
all_grad = []
for group in optimizer.param_groups:
for p in group['params']:
all_grad.append(p.grad.view(-1))
value = torch.cat(all_grad).abs().mean().item()
optimizer.zero_grad()
return value
def _get_edge_guidances(self, tensors):
from utils.edge import get_edge
guidances = []
for batch_idx in range(tensors.size(0)):
batch_edges = []
for frame_idx in range(tensors.size(1)):
edge = get_edge(
tensors[batch_idx, frame_idx:frame_idx + 1]
)
batch_edges.append(edge)
guidances.append(torch.cat(batch_edges, dim=0))
guidances = torch.stack(guidances)
return guidances
def _process_data(self, data):
inputs = data["input_tensors"].to(self.device)
masks = data["mask_tensors"].to(self.device)
targets = data["gt_tensors"].to(self.device)
# guidances = self._get_edge_guidances(targets).to(self.device) if 'edge' in data['guidance'] else None
guidances = data["guidances"].to(self.device) if len(data["guidances"]) > 0 else None
data_input = {
"inputs": inputs,
"masks": masks,
"targets": targets,
"guidances": guidances
}
model_output = self.model(inputs, masks, guidances)
return data_input, model_output
def _unpack_data(self, data_input, model_output):
# inputs, outputs, targets, masks = self._unpack_data(data_input, model_output)
return (
data_input['inputs'],
model_output['outputs'] if 'refined_outputs' not in model_output.keys()
else model_output['refined_outputs'],
data_input['targets'],
data_input['masks']
)
def _get_non_gan_loss(self, data_input, model_output):
# Compute and write all non-GAN losses to tensorboard by for loop
losses = []
for loss_name, (loss_instance, loss_weight) in self.losses.items():
if loss_weight > 0.0:
loss = loss_instance(data_input, model_output)
self.writer.add_scalar(f'{loss_name}', loss.item())
loss *= loss_weight
losses.append(loss)
loss = sum(losses)
return loss
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current training epoch.
:return: A log that contains all information you want to save.
Note:
If you have additional information to record, for example:
> additional_log = {"x": x, "y": y}
merge it with log before return. i.e.
> log = {**log, **additional_log}
> return log
The metrics in log must have the key 'metrics'.
"""
self.model.train()
epoch_start_time = time.time()
total_loss = 0
total_metrics = np.zeros(len(self.metrics))
for batch_idx, data in enumerate(self.data_loader):
batch_start_time = time.time()
# Set writer
self.writer.set_step((epoch - 1) * len(self.data_loader) + batch_idx)
data_input, model_output = self._process_data(data)
inputs, outputs, targets, masks = self._unpack_data(data_input, model_output)
# Train G
non_gan_loss = self._get_non_gan_loss(data_input, model_output)
loss_gan_s = self._get_gan_loss(
outputs, 1, masks, discriminator='D_s', w=self.loss_gan_s_w, is_disc=False)
loss_gan_t = self._get_gan_loss(
outputs, 1, masks, discriminator='D_t', w=self.loss_gan_t_w, is_disc=False)
loss_total = (
non_gan_loss
+ loss_gan_s * self.loss_gan_s_w
+ loss_gan_t * self.loss_gan_t_w
)
self.optimizer_g.zero_grad()
# Uncomment these lines to see the gradient
# grad_recon = self._get_grad_mean_magnitude(loss_recon, self.optimizer_g)
# grad_vgg = self._get_grad_mean_magnitude(loss_vgg, self.optimizer_g)
# grad_gan_s = self._get_grad_mean_magnitude(loss_gan_s, self.optimizer_g)
# grad_gan_t = self._get_grad_mean_magnitude(loss_gan_t, self.optimizer_g)
# self.logger.info(f"Grad: recon {grad_recon} vgg {grad_vgg} gan_s {grad_gan_s} gan_t {grad_gan_t}")
loss_total.backward()
self.optimizer_g.step()
# Train spatial and temporal discriminators
for d in ['s', 't']:
weight = getattr(self, f'loss_gan_{d}_w')
optimizer = getattr(self, f'optimizer_d_{d}')
if weight > 0:
optimizer.zero_grad()
loss_d = (
self._get_gan_loss(
targets, 1, masks, discriminator=f'D_{d}', w=weight, is_disc=True)
+ self._get_gan_loss(
outputs.detach(), 0, masks, discriminator=f'D_{d}', w=weight, is_disc=True)
) / 2
loss_d.backward()
optimizer.step()
self.writer.add_scalar(f'loss_d_{d}', loss_d.item())
self.writer.add_scalar('loss_total', loss_total.item())
self.writer.add_scalar('loss_gan_s', loss_gan_s.item())
self.writer.add_scalar('loss_gan_t', loss_gan_t.item())
with torch.no_grad():
total_loss += loss_total.item()
total_metrics += self._eval_metrics(outputs, targets)
if self.verbosity >= 2 and \
(batch_idx % self.log_step == 0 and epoch < 30) or \
batch_idx == 0:
self.logger.info(
f'Epoch: {epoch} [{batch_idx * self.data_loader.batch_size}/{self.data_loader.n_samples} '
f' ({100.0 * batch_idx / len(self.data_loader):.0f}%)] '
f'loss_total: {loss_total.item():.3f}, '
f'BT: {time.time() - batch_start_time:.2f}s'
)
self._write_images(inputs[0], outputs[0], targets[0], masks[0],
model_output=model_output, data_input=data_input)
log = {
'epoch_time': time.time() - epoch_start_time,
'loss_total': total_loss / len(self.data_loader),
'metrics': (total_metrics / len(self.data_loader)).tolist()
}
if self.do_validation:
val_log = self._valid_epoch(epoch)
log = {**log, **val_log}
if self.test_data_loader is not None:
log = self.evaluate_test_set(epoch=epoch, log=log)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def evaluate_test_set(self, output_root_dir=None, epoch=None, log=None):
# Insert breakpoint when Nan
self.model.eval()
if isinstance(self.test_data_loader, list):
test_data_loaders = self.test_data_loader
else:
test_data_loaders = [self.test_data_loader]
try:
for i, data_loader in enumerate(test_data_loaders):
name = data_loader.name if data_loader.name is not None else f'test{i}'
total_warp_error, total_error, total_ssim, total_psnr, total_p_dist, total_length, fid_score = \
self._evaluate_data_loader(data_loader=data_loader, name=name,
output_root_dir=output_root_dir, epoch=epoch)
if log is not None:
log[f'{name}_p_dist'] = total_p_dist / total_length
log[f'{name}_fid_score'] = fid_score
if self.printlog:
self.logger.info(f'test set name: {name}')
if self.evaluate_test_warp_error:
self.logger.info(f'test_warp_error: {total_warp_error / total_length}')
self.logger.info(f'test_mse: {total_error / total_length}')
self.logger.info(f'test_ssim: {total_ssim / total_length}')
self.logger.info(f'test_psnr: {total_psnr / total_length}')
self.logger.info(f'test_p_dist: {total_p_dist / total_length}')
self.logger.info(f'test_fid_score: {fid_score}\n')
except Exception as err:
self.logger.error(err, exc_info=True)
breakpoint() # NOQA
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:return: A log that contains information about validation
Note:
The validation metrics in log must have the key 'val_metrics'.
"""
self.model.eval()
total_val_loss = 0
total_val_metrics = np.zeros(len(self.metrics))
self.logger.info(f"Doing {epoch} validation ..")
with torch.no_grad():
for batch_idx, data in enumerate(self.valid_data_loader):
if epoch == 1 and batch_idx > 5:
continue
self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + batch_idx, 'valid')
data_input, model_output = self._process_data(data)
inputs, outputs, targets, masks = self._unpack_data(data_input, model_output)
loss_total = self._get_non_gan_loss(data_input, model_output)
self.writer.add_scalar('loss_total', loss_total.item())
total_val_loss += loss_total.item()
total_val_metrics += self._eval_metrics(outputs, targets)
if batch_idx % self.log_step == 0:
self._write_images(
inputs[0], outputs[0], targets[0], masks[0],
model_output=model_output, data_input=data_input
)
return {
'val_loss': total_val_loss / len(self.valid_data_loader),
'val_metrics': (total_val_metrics / len(self.valid_data_loader)).tolist(),
}
|
amjltc295/Free-Form-Video-Inpainting
|
src/trainer/trainer.py
|
trainer.py
|
py
| 21,228 |
python
|
en
|
code
| 323 |
github-code
|
6
|
5898092758
|
permission_list = [
['fsdDecl', ['fLib', 'fsDecl', 'fsdLink', 'fvLib']],
['fLib', ['f']],
['fsDecl', ['fsDescr', 'fsConstraints', 'fDecl']],
['fvLib', ['binary', 'default', 'fs', 'numeric', 'string', 'symbol', 'vAlt', 'vColl', 'vLabel', 'vMerge', 'vNot']],
['fDecl', ['fDescr', 'vRange', 'vDefault']],
['fsConstraints', ['bicond', 'cond']],
['bicond', ['f', 'fs', 'iff']],
['cond', ['f', 'fs', 'then']],
['vDefault', ['binary', 'default', 'fs', 'if', 'numeric', 'string', 'symbol', 'vAlt', 'vColl', 'vLabel', 'vMerge', 'vNot']],
['if', ['binary', 'default', 'f', 'fs', 'numeric', 'string', 'symbol', 'then', 'vAlt', 'vColl', 'vLabel', 'vMerge', 'vNot']],
['vRange', ['binary', 'default', 'fs', 'numeric', 'string', 'symbol', 'vAlt', 'vColl', 'vLabel', 'vMerge', 'vNot']],
['fs', ['f']],
['f', ['binary', 'default', 'fs', 'numeric', 'string', 'symbol', 'vAlt', 'vColl', 'vLabel', 'vMerge', 'vNot']],
['vAlt', ['binary', 'default', 'fs', 'numeric', 'string', 'symbol', 'vAlt', 'vColl', 'vLabel', 'vMerge', 'vNot']],
['vColl', ['binary', 'default', 'fs', 'numeric', 'string', 'symbol', 'vAlt', 'vLabel']],
['vLabel', ['binary', 'default', 'fs', 'numeric', 'string', 'symbol', 'vAlt', 'vColl', 'vLabel', 'vMerge', 'vNot']],
['vMerge', ['binary', 'default', 'fs', 'numeric', 'string', 'symbol', 'vAlt', 'vColl', 'vLabel', 'vMerge', 'vNot']],
['vNot', ['binary', 'default', 'fs', 'numeric', 'string', 'symbol', 'vAlt', 'vColl', 'vLabel', 'vMerge', 'vNot']]
]
# empty elements do not need to be included
prohibition_list = [] # not sure if needed?
def allowed(test_predecessor, test_element, line_nr):
permitted = False
print(" checking: {} and {}".format(test_predecessor, test_element))
for rule in permission_list:
if rule[0] == test_predecessor:
#print("equal: ", rule, test_predecessor)
for rule_succ in rule[1]:
if rule_succ == test_element:
#print("equal: ", rule_succ, test_element)
permitted = True
if permitted:
print(" TEI Rules apply, moving on...")
else:
print("Line {}: {} is not allowed after {}".format(line_nr, test_element, test_predecessor))
raise SystemExit(0)
|
Darboven/TEI-Feature-Structures
|
TEI-Checker/tei_rules.py
|
tei_rules.py
|
py
| 2,287 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17938250421
|
# from __future__ import absolute_import
import base64
import re
# import mimetypes
from config import media_types, static_files, static_ext, save_content
class ResponseParser(object):
"""docstring for ResponseParser"""
def __init__(self, f):
super(ResponseParser, self).__init__()
self.flow = f
# self.content_type = self.get_content_type()
# self.extension = self.get_extension()
# self.ispass = self.capture_pass()
def parser_data(self):
"""parser the capture response & request"""
result = dict()
# result['content_type'] = self.content_type
result['url'] = self.flow.request.url
result['path'] = '/{}'.format('/'.join(self.flow.request.path_components))
# result['extension'] = self.get_extension()
result['host'] = self.flow.request.host
result['port'] = self.flow.request.port
result['scheme'] = self.flow.request.scheme
result['method'] = self.flow.request.method
result['status_code'] = self.flow.response.status_code
# result['date_start'] = self.flow.response.timestamp_start
# result['date_end'] = self.flow.response.timestamp_end
result['content_length'] = int(self.flow.response.headers.get('Content-Length', 0))
# result['static_resource'] = self.ispass
# result['resp_header'] = self.parser_header(self.flow.response.headers)
result['request_header'] = self.parser_header(self.flow.request.headers)
# request resource is media file & static file, so pass
# if self.ispass:
# result['resp_content'] = None
# result['request_content'] = None
# return result
# result['resp_content'] = self.flow.response.content if save_content else ''
# result['request_content'] = self.get_request_content() if save_content else ''
result['request_content'] = self.flow.request.content
return result
# def get_content_type(self):
# if not self.flow.response.headers.get('Content-Type'):
# return ''
# return self.flow.response.headers.get('Content-Type').split(';')[:1][0]
# def get_content_length(self):
# if self.flow.response.headers.get('Content-Length'):
# return int(self.flow.response.headers.get('Content-Length'))
# else:
# return 0
# def capture_pass(self):
# """if content_type is media_types or static_files, then pass captrue"""
#
# if self.extension in static_ext:
# return True
#
# # can't catch the content_type
# if not self.content_type:
# return False
#
# if self.content_type in static_files:
# return True
#
# http_mime_type = self.content_type.split('/')[:1]
# if http_mime_type:
# return True if http_mime_type[0] in media_types else False
# else:
# return False
# def get_request_content(self):
# content = self.flow.request.content
# if 'multipart/form-data' in self.parser_header(self.flow.request.headers).get('Content-Type', ''):
# content = self.decode_response_text(content)
# return self.parser_multipart(content)
# else:
# return content
# def get_header(self):
# return self.parser_header(self.flow.response.headers)
# def get_content(self):
# return self.flow.response.content
# def get_request_header(self):
# return self.parser_header(self.flow.request.headers)
# def get_url(self):
# return self.flow.request.url
# def get_path(self):
# return '/{}'.format('/'.join(self.flow.request.path_components))
# def get_scheme(self):
# return self.flow.request.scheme
#
# def get_method(self):
# return self.flow.request.method
# def get_port(self):
# return self.flow.request.port
#
# def get_host(self):
# return self.flow.request.host
# def get_status_code(self):
# return self.flow.response.status_code
# def get_extension(self):
# if not self.flow.request.path_components:
# return ''
# else:
# end_path = self.flow.request.path_components[-1:][0]
# split_ext = end_path.split('.')
# if not split_ext or len(split_ext) == 1:
# return ''
# else:
# return split_ext[-1:][0][:32]
@staticmethod
def parser_multipart(content):
if isinstance(content, str):
res = re.findall(r'name=\"(\w+)\"\r\n\r\n(\w+)', content)
if res:
return "&".join([k + '=' + v for k, v in res])
else:
return ""
else:
return ""
@staticmethod
def parser_header(header):
headers = {}
for key, value in header.items():
headers[key] = value
return headers
@staticmethod
def decode_response_text(content):
for _ in ['UTF-8', 'GB2312', 'GBK', 'iso-8859-1', 'big5']:
try:
return content.decode(_)
except:
continue
return content
|
jjf012/PassiveScanner
|
utils/parser.py
|
parser.py
|
py
| 5,258 |
python
|
en
|
code
| 112 |
github-code
|
6
|
74021781309
|
from typing import List
from collections import Counter
from time import time
import matplotlib.pyplot as plt
import numpy as np
# constants
ENGLISH_ALPHABET_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ '
def get_string_size(string: str, format: str='utf8') -> int:
'''Returns size of string in bytes'''
return len(string.encode('utf-8'))
def get_words_from_text(text: str, approved_chars=ENGLISH_ALPHABET_CHARS) -> List[str]:
'''Returns list of filtered words from a text'''
# filter unwanted characters from text
text = ''.join(char for char in text if char in approved_chars)
# split and format words into list
words = [word.lower() for word in text.split(' ') if len(word) > 0]
return words
# read in file
with open('input.txt', 'r') as input_file:
# iterate through file entries and extract words
words = []
for i, entry_text in enumerate(input_file):
# get words from text and append
entry_words = get_words_from_text(entry_text)
words.extend(entry_words)
# count and rank words
word_count_rank = dict(Counter(words).most_common())
n_unique_words = len(word_count_rank)
# plot
fig, ax = plt.subplots()
plot = ax.plot(range(n_unique_words), list(word_count_rank.values()))
ax.set_xticks(np.arange(1, n_unique_words+1, 25))
n_labels = 6
for i, word in enumerate(list(word_count_rank)[:n_labels]):
ax.text(i, word_count_rank[word], word, fontsize=8)
plt.show()
|
lucrae/zipf-score
|
side/score_old.py
|
score_old.py
|
py
| 1,510 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16542777837
|
import contextlib
from .Indentation import indented
class SourceCodeCollector(object):
def __init__(self):
self.codes = []
def __call__(self, code):
self.emit(code)
def emit(self, code):
for line in code.split("\n"):
self.codes.append(line)
def emitTo(self, emit, level):
for code in self.codes:
emit(indented(code, level))
self.codes = None
@contextlib.contextmanager
def withSubCollector(emit, context):
context.pushCleanupScope()
with context.variable_storage.withLocalStorage():
sub_emit = SourceCodeCollector()
# To use the collector and put code in it and C declarations on the context.
yield sub_emit
local_declarations = context.variable_storage.makeCLocalDeclarations()
if local_declarations:
emit("{")
for local_declaration in local_declarations:
emit(indented(local_declaration))
sub_emit.emitTo(emit, level=1)
emit("}")
else:
sub_emit.emitTo(emit, level=0)
context.popCleanupScope()
|
Nuitka/Nuitka
|
nuitka/code_generation/Emission.py
|
Emission.py
|
py
| 1,132 |
python
|
en
|
code
| 10,019 |
github-code
|
6
|
18654748060
|
from typing import Dict, List, Type
from src.domain.models.pets import Pets
from src.domain.use_cases import FindPet as FindPetInterface
from src.data.interfaces import PetRepositoryInterface
class FindPet(FindPetInterface):
"""Use case for Find pet"""
def __init__(self, pets_repository: Type[PetRepositoryInterface]):
self.pets_repository = pets_repository
def by_id(self, pet_id: int) -> Dict[bool, List[Pets]]:
"""Method By id"""
response = None
validate = isinstance(pet_id, int)
if validate:
response = self.pets_repository.select_pet(pet_id=pet_id)
return {"Success": validate, "Data": response}
def by_user_id(self, user_id: int) -> Dict[bool, List[Pets]]:
"""Get pet by name"""
response = None
validate = isinstance(user_id, int)
if validate:
response = self.pets_repository.select_pet(user_id=user_id)
return {"Success": validate, "Data": response}
def by_pet_id_and_user_id(
self, pet_id: int, user_id: int
) -> Dict[bool, List[Pets]]:
"""Get pet by name"""
response = None
validate = isinstance(user_id, int) and isinstance(pet_id, int)
if validate:
response = self.pets_repository.select_pet(user_id=user_id, pet_id=pet_id)
return {"Success": validate, "Data": response}
|
MatheusDev20/flask-application-clean-arch
|
src/data/find_pet/find.py
|
find.py
|
py
| 1,392 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41058442656
|
class Poly:
def __init__(self,*terms):
# __str__ uses the name self.terms for the dictionary of terms
# So __init__ should build this dictionary from terms
self.terms = {}
for numbers in terms:
assert type(numbers[0]) is (int or float), "Poly.__init__: illegal powers in : (" + str(*terms) + ")"
assert type(numbers[1]) is int and numbers[1] >= 0, "Poly.__init__: illegal powers in : (" + str(*terms) + ")"
assert type(numbers[1]) not in self.terms, "Poly.__init__: illegal powers in : (" + str(*terms) + ")"
if numbers[0] != 0:
self.terms[numbers[1]] = numbers[0]
# Fill in the rest of this method, using *terms to intialize self.terms
# I have written str(...) because it is used in the bsc.txt file and
# it is a bit subtle to get correct. Notice that it assumes that
# every Poly object stores a dict whose keys are powers and whose
# associated values are coefficients. This function does not depend
# on any other method in this class being written correctly.
def __str__(self):
def term(c,p,var):
return (str(c) if p == 0 or c != 1 else '') +\
('' if p == 0 else var+('^'+str(p) if p != 1 else ''))
if len(self.terms) == 0:
return '0'
else:
return ' + '.join([term(c,p,'x') for p,c in sorted(self.terms.items(),reverse=True)]).replace('+ -','- ')
def __repr__(self):
answer = 'Poly('
for i in self.terms:
answer += '(' + str(self.terms[i]) + ', ' + str(i) + '), '
if self.terms != {}:
answer = answer[:-2]
answer += ')'
return answer
def __len__(self):
answer = 0
for i in self.terms:
if i > answer:
answer = i
return answer
def __call__(self,arg):
answer = 0
for power in self.terms:
answer += self.terms[power]**power
return answer
def __iter__(self):
answer = list(self.terms.items())
answer.sort(reverse = True)
return iter(answer)
def __getitem__(self,index):
if index < 0 or type(index) != int:
raise TypeError("Sorry, " + str(index) + " must be an integer greater than 0.")
else:
if index not in self.terms:
return 0
else:
return self.terms[index]
def __setitem__(self,index,value):
if type(index) != int or index < 0:
raise TypeError("Sorry, " + str(index) + " must be an integer greater than 0.")
else:
if value == 0:
self.terms.__delitem__(value)
else:
self.__dict__[index] = value
def __delitem__(self,index):
if type(index) != int or index < 0:
raise TypeError("Sorry, " + str(index) + " must be an integer greater than 0.")
else:
if index in self.terms:
self.terms.__delitem__(index)
def _add_term(self,c, p):
if type(c) != (int or float):
raise TypeError("Sorry, " + str(c) + " must be an int or float")
if type(p) != int or p < 0:
raise TypeError("Sorry, " + str(p) + " must be a non-negative int")
if p not in self.terms:
self.terms[p] = c
else:
self.terms[p] += c
if self.terms[p] == 0:
self.terms.__delitem__(p)
def __add__(self,right):
if type(self) != Poly:
if type(self) is not (int or float):
print(type(self))
raise TypeError("Sorry " + str(self) + " must be a Polynomial or int or float")
if type(right) != Poly:
if type(right) is not (int or float):
raise TypeError("Sorry " + str(right) + " must be a Polynomial or int or float")
if type(right) is (int or float) and type(self) == (int or float):
raise TypeError("Sorry, one of the variables must be a polynomial")
if type(self) is (int or float):
self, right = right, self
if type(right) is (int or float):
answer = self.terms[0]
answer += right
return answer
else:
answer = {}
answer2 = {}
for i in self.terms:
answer[self.terms[i]] = i
for i in right.terms:
answer2[right.terms[i]] = i
for i in answer:
if i in answer2:
answer[i] += answer2[i]
for i in answer2:
if i not in answer:
answer[i] = answer2[i]
realanswer = {}
for i in answer:
realanswer[answer[i]] = i
answer = Poly(answer)
realanswer = Poly(realanswer)
#return Poly(realanswer)
def __radd__(self,left):
if type(self) != Poly:
if type(self) is not (int or float):
print(type(self))
raise TypeError("Sorry " + str(self) + " must be a Polynomial or int or float")
if type(left) != Poly:
if type(left) is not (int or float):
raise TypeError("Sorry " + str(left) + " must be a Polynomial or int or float")
if type(left) is (int or float) and type(self) == (int or float):
raise TypeError("Sorry, one of the variables must be a polynomial")
def __mul__(self,right):
if type(self) != Poly:
if type(self) is not (int or float):
print(type(self))
raise TypeError("Sorry " + str(self) + " must be a Polynomial or int or float")
if type(right) != Poly:
if type(right) is not (int or float):
raise TypeError("Sorry " + str(right) + " must be a Polynomial or int or float")
if type(right) is (int or float) and type(self) == (int or float):
raise TypeError("Sorry, one of the variables must be a polynomial")
def __rmul__(self,left):
if type(self) != Poly:
if type(self) is not (int or float):
print(type(self))
raise TypeError("Sorry " + str(self) + " must be a Polynomial or int or float")
if type(left) != Poly:
if type(left) is not (int or float):
raise TypeError("Sorry " + str(left) + " must be a Polynomial or int or float")
if type(left) is (int or float) and type(self) == (int or float):
raise TypeError("Sorry, one of the variables must be a polynomial")
def __eq__(self,right):
if type(self) != Poly:
if type(self) is not (int or float):
print(type(self))
raise TypeError("Sorry " + str(self) + " must be a Polynomial or int or float")
if type(right) != Poly:
if type(right) is not (int or float):
raise TypeError("Sorry " + str(right) + " must be a Polynomial or int or float")
if type(right) is (int or float) and type(self) == (int or float):
raise TypeError("Sorry, one of the variables must be a polynomial")
pass
if __name__ == '__main__':
# Some simple tests; you can comment them out and/or add your own before
# the driver is called.
print('Start simple tests')
p = Poly((3,2),(-2,1), (4,0))
print(' For Polynomial: 3x^2 - 2x + 4')
print(' str(p):',p)
print(' repr(p):',repr(p))
print(' len(p):',len(p))
print(' p(2):',p(2))
print(' list collecting iterator results:',[t for t in p])
print(' p+p:',p+p)
print(' p+2:',p+2)
print(' p*p:',p*p)
print(' p*2:',p*2)
print('End simple tests\n')
import driver
#driver.default_show_exception=True
#driver.default_show_exception_message=True
#driver.default_show_traceback=True
driver.driver()
|
solomc1/python
|
ics 33/solutions/ile2 solutions/Lab 3/YeSiyuan/poly.py
|
poly.py
|
py
| 8,269 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18131053441
|
diceTop = 0
diceLeft = 0
diceRight = 0
diceFront = 0
diceBack = 0
diceBottom = 0
mapList = []
n,m,y,x,k = map(int,input().split())
for i in range(0,n):
mapList.append(input().split())
movingList = (input().split())
for i in range(0,len(movingList)):
direction = int(movingList[i])
if direction == 1:
if x+1 >= len(mapList[0]):
continue
x += 1
elif direction == 2:
if x-1 < 0 :
continue
x -= 1
elif direction == 3:
if y - 1 < 0:
continue
y -= 1
elif direction == 4:
if y + 1 >= len(mapList):
continue
y += 1
temp = diceTop
if direction == 1:
diceTop = diceLeft
diceLeft = diceBottom
diceBottom = diceRight
diceRight = temp
elif direction == 2:
diceTop = diceRight
diceRight = diceBottom
diceBottom = diceLeft
diceLeft = temp
elif direction == 3:
diceTop = diceFront
diceFront = diceBottom
diceBottom = diceBack
diceBack = temp
elif direction == 4:
diceTop = diceBack
diceBack = diceBottom
diceBottom = diceFront
diceFront = temp
if(mapList[y][x] == "0"):
mapList[y][x] = str(diceBottom)
else :
diceBottom = int(mapList[y][x])
mapList[y][x] = "0"
print(diceTop)
|
Hyeneung-Kwon/Baekjoon_Python
|
14499.py
|
14499.py
|
py
| 1,387 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17759233501
|
from abc import ABC, abstractmethod
class Book(ABC):
def __init__(self, isbn, title, author, publisher, pages, price, copies):
self.isbn = isbn
self.title = title
self.author = author
self.publisher = publisher
self.pages = pages
self.price = price
self.copies = copies
@abstractmethod
def get_details(self):
pass
@abstractmethod
def in_stock(self):
pass
@abstractmethod
def sell(self):
pass
class PhysicalBook(Book):
def get_details(self):
book_dict = {
"isbn": self.isbn,
"title": self.title,
"author": self.author,
"publisher": self.publisher,
"pages": self.pages,
"price": self.price,
"copies": self.copies
}
return book_dict
def in_stock(self):
return True if self.copies > 0 else False
def sell(self):
if self.in_stock():
self.copies -= 1
else:
print('The book is out of stock')
book_list = []
while True:
print("\nMenu:")
print("1. Add Book")
print("2. Display Book Details")
print("3. Exit")
choice = int(input("Enter your choice: "))
if choice == 1:
isbn = input("Enter ISBN: ")
title = input("Enter title: ")
author = input("Enter author: ")
publisher = input("Enter publisher: ")
pages = int(input("Enter number of pages: "))
price = float(input("Enter price: "))
copies = int(input("Enter number of copies: "))
book = PhysicalBook(isbn, title, author, publisher, pages, price, copies)
book_list.append(book)
elif choice == 2:
for book in book_list:
print(book.get_details())
elif choice == 3:
print(book.in_stock)
elif choice == 4:
break
else:
print("Invalid choice. Try again.")
|
APARNA01MOHANAN/pycharm-projects
|
book-bank/BOOK34.py
|
BOOK34.py
|
py
| 1,924 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4783789916
|
# Adapted from pytorch examples
from __future__ import print_function
from torch import nn, optim
from railrl.core import logger
import numpy as np
from railrl.pythonplusplus import identity
from railrl.torch.core import PyTorchModule
from railrl.torch.networks import Mlp
import railrl.torch.pytorch_util as ptu
class ReprojectionNetworkTrainer():
def __init__(
self,
train_dataset,
test_dataset,
model,
batch_size=128,
log_interval=0,
lr=1e-3,
**kwargs
):
self.log_interval = log_interval
self.batch_size = batch_size
if ptu.gpu_enabled():
model.cuda()
self.model = model
self.representation_size = model.representation_size
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
self.train_dataset, self.test_dataset = train_dataset, test_dataset
assert self.train_dataset['z'].dtype == np.float32
assert self.test_dataset['z'].dtype ==np.float32
assert self.train_dataset['z_proj'].dtype == np.float32
assert self.test_dataset['z_proj'].dtype == np.float32
self.mse = nn.MSELoss()
def get_batch(self, train=True):
dataset = self.train_dataset if train else self.test_dataset
ind = np.random.randint(0, len(dataset['z']), self.batch_size)
return {
'z': ptu.np_to_var(dataset['z'][ind, :]),
'z_proj': ptu.np_to_var(dataset['z_proj'][ind, :]),
}
def mse_loss(self, z_proj_hat, z_proj):
return self.mse(z_proj_hat, z_proj)
def train_epoch(self, epoch, batches=100):
self.model.train()
mses = []
losses = []
for batch_idx in range(batches):
data = self.get_batch()
z = data["z"]
z_proj = data['z_proj']
self.optimizer.zero_grad()
z_proj_hat = self.model(z)
mse = self.mse_loss(z_proj_hat, z_proj)
loss = mse
loss.backward()
mses.append(mse.data[0])
losses.append(loss.data[0])
self.optimizer.step()
logger.record_tabular("train/epoch", epoch)
logger.record_tabular("train/MSE", np.mean(mses))
logger.record_tabular("train/loss", np.mean(losses))
def test_epoch(self, epoch, save_network=True, batches=100):
self.model.eval()
mses = []
losses = []
for batch_idx in range(batches):
data = self.get_batch(train=False)
z = data["z"]
z_proj = data['z_proj']
z_proj_hat = self.model(z)
mse = self.mse_loss(z_proj_hat, z_proj)
loss = mse
mses.append(mse.data[0])
losses.append(loss.data[0])
logger.record_tabular("test/epoch", epoch)
logger.record_tabular("test/MSE", np.mean(mses))
logger.record_tabular("test/loss", np.mean(losses))
logger.dump_tabular()
if save_network:
logger.save_itr_params(epoch, self.model, prefix='reproj', save_anyway=True)
class ReprojectionNetwork(PyTorchModule):
def __init__(
self,
vae,
hidden_sizes=list([64, 128, 64]),
init_w=1e-3,
hidden_init=ptu.fanin_init,
output_activation=identity,
layer_norm=False,
**kwargs
):
self.save_init_params(locals())
super().__init__()
self.vae = vae
self.representation_size = self.vae.representation_size
self.hidden_init = hidden_init
self.output_activation = output_activation
# self.dist_mu = np.zeros(self.representation_size)
# self.dist_std = np.ones(self.representation_size)
self.dist_mu = self.vae.dist_mu
self.dist_std = self.vae.dist_std
self.relu = nn.ReLU()
self.init_w = init_w
hidden_sizes = list(hidden_sizes)
self.network=Mlp(hidden_sizes,
self.representation_size,
self.representation_size,
layer_norm=layer_norm,
hidden_init=hidden_init,
output_activation=output_activation,
init_w=init_w)
def forward(self, z):
z = z.view(-1, self.representation_size)
return self.network(z)
def __getstate__(self):
d = super().__getstate__()
# Add these explicitly in case they were modified
d["_dist_mu"] = self.dist_mu
d["_dist_std"] = self.dist_std
return d
def __setstate__(self, d):
super().__setstate__(d)
self.dist_mu = d["_dist_mu"]
self.dist_std = d["_dist_std"]
|
snasiriany/leap
|
railrl/torch/vae/reprojection_network.py
|
reprojection_network.py
|
py
| 4,777 |
python
|
en
|
code
| 45 |
github-code
|
6
|
44426734106
|
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import assert_equal
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import create_transaction, CScript, msg_tx, prepare_init_chain
from test_framework.script import OP_CHECKMULTISIG, OP_TRUE
# We create 100 high and 10 low sigops density transactions and make sure that low density transactions are mined too.
class MempoolHighSigopsDensity(ComparisonTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.genesisactivationheight = 50
self.extra_args = [['-whitelist=127.0.0.1', '-genesisactivationheight=%d' % self.genesisactivationheight]]
def run_test(self):
self.test.run()
def get_tests(self):
# shorthand for functions
block = self.chain.next_block
node = self.nodes[0]
self.chain.set_genesis_hash( int(node.getbestblockhash(), 16) )
block(0)
yield self.accepted()
test, out, _ = prepare_init_chain(self.chain, 300, 300)
yield test
# send 100 transactions with high sigops density
txsMultisigs = []
twoGB = 2147483647
for i in range(100):
txMultisig = create_transaction(out[i].tx, out[i].n, b'', 100000, CScript([twoGB, OP_CHECKMULTISIG]))
self.test.connections[0].send_message(msg_tx(txMultisig))
txsMultisigs.append(txMultisig)
# check that transactions are in mempool
self.check_mempool(self.test.connections[0].rpc, txsMultisigs)
# send 10 transactions with normal sigops density
txsBasics = []
for j in range(10):
txBasic = create_transaction(out[i+j+1].tx, out[i+j+1].n, b'', 100000, CScript([2, OP_CHECKMULTISIG]))
self.test.connections[0].send_message(msg_tx(txBasic))
txsBasics.append(txBasic)
# check that transactions are in mempool
self.check_mempool(self.test.connections[0].rpc, txsBasics)
mempool = node.getrawmempool()
for tx in txsMultisigs:
assert_equal(True, tx.hash in mempool)
for tx in txsBasics:
assert_equal(True, tx.hash in mempool)
node.generate(1)
blockTxs = node.getblock(node.getbestblockhash())['tx']
for tx in txsBasics:
assert_equal(True, tx.hash in blockTxs)
if __name__ == '__main__':
MempoolHighSigopsDensity().main()
|
bitcoin-sv/bitcoin-sv
|
test/functional/bsv-highsigopsdensitymempool.py
|
bsv-highsigopsdensitymempool.py
|
py
| 2,529 |
python
|
en
|
code
| 597 |
github-code
|
6
|
71844063869
|
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from simple_menu import MenuItem
submenu_items = [
MenuItem(
_("customers").capitalize(),
reverse("packs:sales_customer_list"),
weight=20,
icon="bx-right-arrow-alt",
),
MenuItem(
_("invoices").capitalize(),
reverse("packs:sales_invoice_list"),
weight=20,
icon="bx-right-arrow-alt",
),
]
sales_item = MenuItem(
_("sales").capitalize(), "#", icon="bxs-shopping-bag", children=submenu_items
)
|
dbsiavichay/faclab
|
apps/accounts/menus/sales.py
|
sales.py
|
py
| 563 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25995631588
|
from dataclasses import dataclass, field
from .. import docker
from .. import exceptions
from .. import utils
from ..runtime import register, RuntimePlugin
@register
@dataclass
class Docker(RuntimePlugin):
name: str = field(init=False, default="Docker")
def init(self, graph, outputs):
# Parse the users docker conf file
# and record a list of logins we know about
self.auths = set()
self.cfg = None
self.graph = graph
self.image_pull_secrets = {}
cfg = docker.parse_config()
if cfg:
self.auths |= set(cfg.get("auths").keys())
self.cfg = cfg
def image_secrets_for(self, image):
m = docker.parse_docker_tag(image)
if not m or m["domain"] not in self.auths:
return None
r = utils.AttrAccess(
auth=docker.auth_for(self.cfg, m["domain"]),
key=f"{self.graph.name}-{m['domain']}",
)
self.image_pull_secrets[r.key] = r
return r
|
parlaylabs/model
|
model/runtimes/docker.py
|
docker.py
|
py
| 1,011 |
python
|
en
|
code
| 2 |
github-code
|
6
|
75385540986
|
from collections import defaultdict
T = int(input())
for i in range(T):
N = int(input())
c = list(map(int, input().split(' ')))
g = defaultdict(list)
for _ in range(N - 1):
edge = list(map(int, input().split(' ')))
g[edge[0]].append(edge[1])
g[edge[1]].append(edge[0])
def dfs(u, pere):
maxi = 0
for v in g[u]:
if v != pere:
maxi = max(maxi, dfs(v, u))
return maxi + c[u - 1]
L = []
for v in g[1]:
L.append(dfs(v, 1))
L.sort()
res = c[0]
if len(L) > 0:
res += L[-1]
if len(L) > 1:
res += L[-2]
print(f"Case #{i + 1}: {res}")
|
fortierq/competitions
|
fb_hacker_cup/2021/qualification/c1_gold_mine.py
|
c1_gold_mine.py
|
py
| 686 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6146581577
|
import datetime
import pyttsx3
import speech_recognition as sr
import wikipedia
import webbrowser
import pywhatkit
import time
import threading
import newsapi
import random
maquina = pyttsx3.init()
voz = maquina.getProperty('voices')
maquina.setProperty('voice', voz[1].id)
def executa_comando():
try:
with sr.Microphone() as source:
recognizer = sr.Recognizer()
voz = recognizer.listen(source)
comando = recognizer.recognize_google(voz, language='pt-BR')
comando = comando.lower()
return comando
except sr.UnknownValueError:
maquina.say('Não entendi o comando')
maquina.runAndWait()
except sr.RequestError as e:
maquina.say('Desculpe, houve um erro ao processar o comando')
maquina.runAndWait()
return ''
def comando_voz_usuario():
while True:
comando = executa_comando()
if 'horas' in comando:
tempo = datetime.datetime.now().strftime('%H:%M')
maquina.say('Agora são ' + tempo)
maquina.runAndWait()
elif 'procure por' in comando:
procurar = comando.replace('procure por', '')
wikipedia.set_lang('pt')
resultado = wikipedia.summary(procurar, 2)
maquina.say(resultado)
maquina.runAndWait()
elif 'abrir navegador' in comando:
webbrowser.open('https://www.google.com.br/')
elif 'pesquise por' in comando:
pesquisar = comando.replace('pesquise por', '')
webbrowser.open('https://www.google.com.br/search?q=' + pesquisar)
elif 'toque' in comando:
musica = comando.replace('toque', '')
pywhatkit.playonyt(musica)
maquina.say('Tocando Música ' + musica)
maquina.runAndWait()
elif 'clima' in comando:
obter_clima()
elif 'pare de escutar' in comando:
maquina.say('Por quantos minutos você quer que eu pare de escutar?')
maquina.runAndWait()
resposta = executa_comando()
try:
tempo = int(resposta)
maquina.say('Ok, vou parar de escutar por ' + str(tempo) + ' minutos')
maquina.runAndWait()
time.sleep(tempo * 60)
maquina.say('Voltei! O que posso fazer por você?')
maquina.runAndWait()
except ValueError:
maquina.say('Desculpe, não entendi o tempo que você informou')
maquina.runAndWait()
elif 'tchau' in comando:
maquina.say('Tchau!, foi bom te ver')
maquina.runAndWait()
break
elif 'definir alarme' in comando:
partes = comando.split(' ')
hora = partes[2]
mensagem = ' '.join(partes[3:])
definir_alarme(hora, mensagem)
maquina.say('Alarme definido para ' + hora + '.')
maquina.runAndWait()
elif 'definir lembrete' in comando:
partes = comando.split(' ')
tempo_espera = int(partes[2])
mensagem = ' '.join(partes[3:])
def alerta():
time.sleep(tempo_espera)
maquina.say(mensagem)
maquina.runAndWait()
thread = threading.Thread(target=alerta)
thread.start()
maquina.say('Lembrete definido para daqui a ' + str(tempo_espera) + ' segundos.')
maquina.runAndWait()
elif 'notícias' in comando:
obter_noticias()
elif 'piada' in comando:
contar_piada()
elif 'ajuda' in comando:
exibir_ajuda()
else:
maquina.say('Comando não reconhecido')
maquina.runAndWait()
def definir_alarme(hora, mensagem):
agora = datetime.datetime.now()
horario_alarme = datetime.datetime.strptime(hora, '%H:%M')
diferenca = horario_alarme - agora
segundos = diferenca.seconds
def alerta():
time.sleep(segundos)
maquina.say(mensagem)
maquina.runAndWait()
thread = threading.Thread(target=alerta)
thread.start()
def obter_clima():
maquina.say('Desculpe, ainda não posso fornecer informações sobre o clima.')
maquina.runAndWait()
def obter_noticias():
newsapi = NewsApiClient(api_key='YOUR_NEWS_API_KEY')
top_headlines = newsapi.get_top_headlines(language='pt')
articles = top_headlines['articles']
maquina.say('Aqui estão as principais notícias:')
maquina.runAndWait()
for article in articles:
title = article['title']
maquina.say(title)
maquina.runAndWait()
def contar_piada():
piadas = [
"Por que a galinha atravessou a rua? Para chegar ao outro lado.",
"O que o pato disse para a pata? 'Vem Quá!'",
"Qual é o cúmulo da velocidade? Levantar a mão para pedir licença ao vento.",
"Por que o livro de matemática cometeu suicídio? Porque tinha muitos problemas.",
"Qual é o doce preferido do átomo? Pé de moléculas."
]
piada = random.choice(piadas)
maquina.say(piada)
maquina.runAndWait()
def exibir_ajuda():
ajuda = "Aqui estão alguns comandos que você pode usar:\n" \
"- Horas: para saber a hora atual.\n" \
"- Procure por [termo]: para pesquisar informações no Wikipedia.\n" \
"- Abrir navegador: para abrir o navegador padrão.\n" \
"- Pesquise por [termo]: para pesquisar no Google.\n" \
"- Toque [música]: para reproduzir uma música no YouTube.\n" \
"- Clima: para obter informações sobre o clima.\n" \
"- Pare de escutar: para pausar a escuta por um determinado tempo.\n" \
"- Tchau: para encerrar o programa.\n" \
"- Definir alarme [hora] [mensagem]: para definir um alarme.\n" \
"- Definir lembrete [tempo] [mensagem]: para definir um lembrete.\n" \
"- Notícias: para obter as principais notícias.\n" \
"- Piada: para ouvir uma piada.\n" \
"- Ajuda: para exibir esta mensagem de ajuda."
maquina.say(ajuda)
maquina.runAndWait()
comando_voz_usuario()
|
lucasss45/Fryday-IA
|
alfredv2.6.py
|
alfredv2.6.py
|
py
| 6,390 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
27884694892
|
from django.contrib import admin
from .models import Division, Farm
# Register your models here.
class DivisionAdmin(admin.ModelAdmin):
list_display = (
"division_name",
"division_code",
)
admin.site.register(Division, DivisionAdmin)
admin.site.register(Farm)
|
Wageesha95/dbapp-live
|
farms/admin.py
|
admin.py
|
py
| 289 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.