seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
30293188969
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path("new", views.create, name='new'),
path('list', views.list, name='list'),
path('edit/<int:task_id>', views.edit, name='edit'),
]
|
drazisil/task-zero
|
tasks/urls.py
|
urls.py
|
py
| 252 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5308350680
|
# from math import sqrt
# def prime_list(n):
# sieve = [True] * n
# m = int(sqrt(n))
# for i in range(2, m+1):
# if sieve[i] == True:
# for j in range(2*i, n, i):
# sieve[j] = False
# return [i for i in range(2,n) if sieve[i] == True]
#
# def prime_num(n):
# li = prime_list(n)
# idx = max([i for i in range(len(li)) if li[i] <= n/2])
# for i in range(idx, -1, -1):
# for j in range(i, len(li)):
# if li[i] + li[j] == n:
# return [li[i], li[j]]
# for _ in range(int(input())):
# n = int(input())
# print(" ".join(map(str,prime_num(n))))
prime_list = [True for i in range(10001)]
for i in range(2, 10001):
if prime_list[i]:
for j in range(2*i, 10001, i):
prime_list[j] = False
T = int(input())
for _ in range(T):
n = int(input())
a = n // 2
b = a
while a > 0:
if prime_list[a] and prime_list[b]:
print(a, b)
break
else:
a-=1
b+=1
# prime_num = [0 for i in range(10001)]
# prime_num[1] = 1
# for i in range(2, 98):
# for j in range(i*2, 10001, i):
# prime_num[j] = 1
# t = int(input())
#
# for _ in range(t):
# a = int(input())
# b = a // 2
# for j in range(b, 1, -1):
# if prime_num[a - j] == 0 and prime_num[j] == 0:
# print(j, a-j)
# break
# import math
# T = int(input())
# lis = list()
# for _ in range(T):
# lis.append(int(input()))
#
# def is_prime(num):
# if num == 1:
# return False
# for i in range(2, int(math.sqrt(num))+1):
# if num % i == 0:
# return False
# return True
#
# for i in lis:
# lis2 = []
# for j in range(2, i+1):
# if is_prime(j):
# lis2.append(j)
# for _ in range(len(lis)-1):
# for l in range(len(lis)-1):
|
louisuss/Algorithms-Code-Upload
|
Python/Baekjoon/Math/9020.py
|
9020.py
|
py
| 1,897 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43281391404
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 23 22:30:25 2023
@author: user
"""
import numpy as np
import sys
print("\n-------------GAUSS ELIMINATION--------------\n")
n = int(input("Enter number of unknowns : "))
# for storing augmented matrix
a = np.zeros((n,n+1))
# for storing solution vector
x = np.zeros(n)
# Reading augmented matrix coefficients
print('\nEnter Augmented Matrix Coefficients :')
for i in range(n):
for j in range(n+1):
a[i][j] = float(input( 'a['+str(i)+']['+ str(j)+']='))
# Applying Gauss Elimination
for i in range(n):
if a[i][i] == 0.0:
sys.exit('Divide by zero detected!')
for j in range(i+1, n):
ratio = a[j][i]/a[i][i]
for k in range(n+1):
a[j][k] = a[j][k] - ratio * a[i][k]
# Back Substitution
x[n-1] = a[n-1][n]/a[n-1][n-1]
for i in range(n-2,-1,-1):
x[i] = a[i][n]
for j in range(i+1,n):
x[i] = x[i] - a[i][j]*x[j]
x[i] = x[i]/a[i][i]
print('\nRequired solution is : \n')
for i in range(n):
print('X%d = %0.2f' %(i,x[i]), end = '\t')
|
AksA1210/Numerical-Methods-Lab
|
Final/Gauss_elimination.py
|
Gauss_elimination.py
|
py
| 1,142 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7007626301
|
def fib(n):
if n == 1:
return 1
return n + fib(n-1)
def main():
n = 0
m = 1
result = 0
while n < 4000000:
tmp = n
n = n + m
m = tmp
if n % 2 == 0:
result += n
# print(n, n % 2)
# print(n, result)
print("Problem 2:", result)
|
minuq/project-euler
|
problems/problem_2.py
|
problem_2.py
|
py
| 318 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73017766588
|
import json
from typing import Dict, List, Tuple
from flask import Flask, jsonify, request
from rb.complexity.complexity_index import compute_indices
from rb.complexity.index_category import IndexCategory
from rb.core.document import Document
from rb.core.lang import Lang
from rb.core.text_element import TextElement
from rb.core.word import Word
from rb.processings.keywords.keywords_extractor import KeywordExtractor
from rb.similarity.vector_model import (CorporaEnum, VectorModel,
VectorModelType)
from rb.similarity.vector_model_factory import VECTOR_MODELS, get_default_model
from rb.utils.utils import str_to_lang
from nltk.corpus import wordnet as wn
import networkx as nx
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
app = Flask(__name__)
def keywordsOption():
return ""
def transform_for_visualization(dataName, JsonName, textType, keywords: List[Tuple[int, Word]], keywordsWithmax: List[Tuple[int, Word]], lang: Lang) -> Dict:
log = logging.getLogger("my-logger")
vector_model: VectorModel = get_default_model(lang)
edge_list, node_list = [], []
edge_list2, node_list2 = [], []
#sort the keywords
G = nx.Graph()
edge_labels={}
from_node = []
to_node = []
value= []
node_size = []
for kw in keywords:
node_list.append({
"type": "Word",
"uri": kw[1],
"displayName": kw[1],
"active": True,
"degree": str(max(0, float(kw[0])))
})
for kw in keywordsWithmax:
node_list2.append({
"type": "Word",
"uri": kw[1],
"displayName": kw[1],
"active": True,
"degree": str(max(0, float(kw[0])))
})
G.add_node(kw[1],weight=max(0, float(kw[0])))
node_size.append(int(max(0, float(kw[0]))*1000))
for i, kw1 in enumerate(keywords):
for j, kw2 in enumerate(keywords):
try:
sim = vector_model.similarity(vector_model.get_vector(kw1[1]), vector_model.get_vector(kw2[1]))
if i != j and sim >= 0.3:
edge_list.append({
"edgeType": "SemanticDistance",
"score": str(max(sim, 0)),
"sourceUri": kw1[1],
"targetUri": kw2[1]
})
except:
print("Problem with " + kw1[1] + " or " + kw2[1])
for i, kw1 in enumerate(keywordsWithmax):
for j, kw2 in enumerate(keywordsWithmax):
try:
sim = vector_model.similarity(vector_model.get_vector(kw1[1]), vector_model.get_vector(kw2[1]))
if i != j and sim >= 0.3:
edge_list2.append({
"edgeType": "SemanticDistance",
"score": str(max(sim, 0)),
"sourceUri": kw1[1],
"targetUri": kw2[1]
})
print("Problem with ****************************************")
from_node.append(kw1[1])
to_node.append(kw2[1])
if not G.has_edge(str(kw1[1]), str(kw2[1])):
G.add_edge(str(kw1[1]), str(kw2[1]))
value.append(int(max(sim, 0)*10))
except:
print("Problem with " + kw1[1] + " or " + kw2[1])
#pos = nx.nx_agraph.graphviz_layout(G, prog="twopi")
#nx.draw(G, with_labels = True, node_size=1500, node_color="skyblue", pos=pos)
#nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
# Build a dataframe with your connections
#df = pd.DataFrame({ 'from':from_node, 'to':to_node, 'value':value})
# Build your graph
#G=nx.from_pandas_edgelist(df, 'from', 'to', create_using=nx.Graph() )
#G = nx.star_graph(30)
plt.clf()
pos = nx.spring_layout(G, k=1, iterations=20, scale=6)
options = {
"node_color": "#A0CBE2",
"edge_color": value,
"width": 2,
"edge_cmap": plt.cm.Blues,
"with_labels": True,
"node_size":node_size
}
plt.figure(figsize=(8, 5))
nx.draw(G, pos, **options)
# Custom the nodes:
#nx.draw(G, with_labels=True, node_color='skyblue', node_size=1500, edge_color=df['value'], width=10.0, edge_cmap=plt.cm.Blues)
plt.savefig('rb_api/pandoc_filters/images/'+ dataName +'.png', dpi=300)
plt.clf()
data = getJson('rb_api/pandoc_filters/'+JsonName+'.json')
data.update({textType : 'rb_api/pandoc_filters/images/'+dataName+'.png'})
data.update({dataName : node_list})
with open('rb_api/pandoc_filters/'+JsonName+'.json', 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
return {
"data": {
"edgeList": edge_list,
"nodeList": node_list
},
"success": True,
"errorMsg": ""
}
def getJson(url):
varData= {}
if os.path.isfile(url):
# checks if file exists
print ("File exists ")
with open(url, encoding='UTF-8') as f:
varData = json.load(f)
return varData
def keywordsPost():
"""TODO, not working"""
params = json.loads(request.get_data())
posTagging = params.get('pos-tagging')
bigrams = params.get('bigrams')
text = params.get('text')
languageString = params.get('language')
lang = str_to_lang(languageString)
threshold = params.get('threshold')
plotName = "wordnet"
#plotName = params.get('saveAs')
# if lang is Lang.RO:
# vector_model = VECTOR_MODELS[lang][CorporaEnum.README][VectorModelType.WORD2VEC](
# name=CorporaEnum.README.value, lang=lang)
# elif lang is Lang.EN:
# vector_model = VECTOR_MODELS[lang][CorporaEnum.COCA][VectorModelType.WORD2VEC](
# name=CorporaEnum.COCA.value, lang=lang)
# elif lang is Lang.ES:
# vector_model = VECTOR_MODELS[lang][CorporaEnum.JOSE_ANTONIO][VectorModelType.WORD2VEC](
# name=CorporaEnum.JOSE_ANTONIO.value, lang=lang)
# lsa = params.get('lsa')
# lda = params.get('lda')
# w2v = params.get('w2v')
# threshold = params.get('threshold')
# textElement = Document(lang=lang, text=text, vector_model=vector_model)
# print(textElement.keywords)
dataName = params.get('saveAs')
textType = params.get('type')
JsonName = params.get('topicName')
keywords = KeywordExtractor.extract_keywords(True, text=text, lang=lang)
keywordsWithmax = KeywordExtractor.extract_keywords(True, text=text, lang=lang, max_keywords=15)
return jsonify(transform_for_visualization(dataName, JsonName, textType, keywords=keywords, keywordsWithmax=keywordsWithmax, lang=lang))
|
rwth-acis/readerbenchpyapi
|
rb_api/keywords/keywords.py
|
keywords.py
|
py
| 6,881 |
python
|
en
|
code
| 1 |
github-code
|
6
|
44793179363
|
# Solution 179 Inorder using Loop and Recursion
class Node:
def __init__(self, value):
self.data = value
self.left = None
self.right = None
def __str__(self):
return str(self.data)
def inorderR(root):
if root is None:
return
inorderR(root.left)
print(root.data, end=' ')
inorderR(root.right)
def inorderL(root):
st = []
current = root
while True:
if current is not None:
st.append(current)
current = current.left
elif st:
temp = st.pop()
print(temp.data, end=' ')
current = temp.right
del temp
else:
break
node1 = Node(1)
node2 = Node(2)
node3 = Node(3)
node4 = Node(4)
node5 = Node(5)
node1.left = node2
node1.right = node3
node2.left = node4
node2.right = node5
inorderL(node1)
inorderR(node1)
|
Shwaubh/LoveBabbarSolution
|
Binary Trees/Solution179InorderOrderTravesalLoop.py
|
Solution179InorderOrderTravesalLoop.py
|
py
| 911 |
python
|
en
|
code
| 2 |
github-code
|
6
|
4406135371
|
class TreeNode():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.parent = None
class BST():
def __init__(self, root=None):
self.root = root
def insert_recursive(self, val):
def recursive(node, val):
if not node:
return TreeNode(val)
if val < node.val:
node.left = recursive(node.left, val)
node.left.parent = node
elif val > node.val:
node.right = recursive(node.right, val)
node.right.parent = node
return node
self.root = recursive(self.root, val)
def inorder_traversal_recursive(self):
def recursive(node):
if node:
recursive(node.left)
result.append(node.val)
recursive(node.right)
result = []
recursive(self.root)
return result
def preorder_traversal_recursive(self):
def recursive(node):
if node:
result.append(node.val)
recursive(node.left)
recursive(node.right)
result = []
recursive(self.root)
return result
def postorder_traversal_recursive(self):
def recursive(node):
if node:
recursive(node.left)
recursive(node.right)
result.append(node.val)
result = []
recursive(self.root)
return result
def get_left_most_node(self, node):
if node is None:
return node
while node.left is not None:
node = node.left
return node
def successor(self, node):
if node is None:
return node
if node.right is not None:
return self.get_left_most_node(node.right)
else:
parent = node.parent
while parent is not None and parent.left != node:
node = parent
parent = node.parent
return parent
|
guzhoudiaoke/data_structure_and_algorithms
|
coding_interview_guide/3_binary_tree/17_find_successor/bst.py
|
bst.py
|
py
| 2,083 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8410504339
|
#! /usr/bin/python
import logging
import os
from pathlib import Path
import coloredlogs
from dotenv import load_dotenv
PROJECT_ROOT = Path(__file__).parent.resolve()
#####################
# CONFIGURE LOGGING #
#####################
LOG_PATH = str(PROJECT_ROOT / "worker.log")
logging.basicConfig(
filename=LOG_PATH,
filemode="a+",
format="%(asctime)s,%(msecs)d [%(name)s] %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
coloredlogs.install(fmt="%(asctime)s [%(programname)s] %(levelname)s %(message)s")
#################
# ENV VARIABLES #
#################
ENV_PATH = str(PROJECT_ROOT / ".env")
ENV_LOCAL_PATH = str(PROJECT_ROOT / ".env.local")
# load default variables
load_dotenv(ENV_PATH)
# overide variables with .env.local
load_dotenv(ENV_LOCAL_PATH, override=True)
#######
# AWS #
#######
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
AWS_REGION_NAME = os.getenv("AWS_REGION_NAME")
AWS_BUCKET_NAME = os.getenv("AWS_BUCKET_NAME")
#########
# MYSQL #
#########
MYSQL_HOST = os.getenv("MYSQL_HOST")
MYSQL_USER = os.getenv("MYSQL_USER")
MYSQL_PASSWORD = os.getenv("MYSQL_PASSWORD")
MYSQL_DB = os.getenv("MYSQL_DB")
|
darwin403/translate-transcribe-videos
|
settings.py
|
settings.py
|
py
| 1,222 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25033983488
|
from django.urls import path
from . import views
urlpatterns = [
# UI & API hybrid routes
path("", views.index, name="index"),
path("posts/<int:page>", views.posts, name="posts"),
path("following/<int:page>", views.following, name="following"),
path("profile/<str:username>/<int:page>", views.profile, name="profile"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
# API routes
path("post-edit/<int:post_id>", views.post_edit, name="post_edit"),
path("toggle-like/<int:post_id>",
views.toggle_like, name="toggle_like"),
path("toggle-follow/<int:user_id>",
views.toggle_follow, name="toggle_follow"),
]
|
csloan29/HES-e-33a-web-django
|
network/network/urls.py
|
urls.py
|
py
| 773 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40299141620
|
import numpy as np
from typing import List, Optional, Tuple
from collections import defaultdict
from kaggle_environments.envs.halite.helpers import Ship
from .board import MyBoard, ALL_SHIP_ACTIONS
from .logger import logger
def ship_converts(board: MyBoard):
""" Convert our ships into shipyards """
if board.step == 0 or board.moves_left < 20:
return
if not board.num_my_shipyards:
is_final_part = board.moves_left <= 40
_create_shipyard(board, is_final_part)
for ship in board.free_ships:
# CHECK if in danger without escape, convert if h > 500
if ship.halite <= board.configuration.convert_cost:
continue
avoid_moves = board.avoid_moves(ship)
if ALL_SHIP_ACTIONS - avoid_moves:
continue
logger.warning(
f"Ship {ship.id} at {ship.position}: Can't run away, converting."
)
board.create_shipyard(ship)
# Generate a shipyard from the best ship
min_score = 1000
if board.num_my_shipyards < 2:
min_score = 400
if (
board.num_my_shipyards <= 3
and board.num_my_ships > 10 + board.num_my_shipyards * 5
and board.moves_left > 100
):
available_ships = [x for x in board.free_ships if _can_convert_ship(board, x)]
if available_ships:
ship, score = _choice_ship_to_convert(board, available_ships)
if ship is not None and score > min_score:
logger.info(
f"Ship {ship.id} at {ship.position}: Create a shipyard, cell score = {score}."
)
board.create_shipyard(ship)
def _can_convert_ship(board: MyBoard, ship: Ship) -> bool:
""" Is this the good place for a shipyard? """
pos = ship.position
if pos in board.position_to_shipyard:
return False
if (
ship.halite + board.my_halite < board.configuration.convert_cost
or board.is_danger_position(pos, ship)
):
return False
num_my_shipyards = sum(
1 for x in board.my_shipyards if board.distance(x.position, pos) <= 2
)
if num_my_shipyards > 0:
return False
num_my_ships = sum(
1 for x in board.my_ships if board.distance(x.position, pos) <= 1
)
if num_my_ships < 1:
return False
min_distance_to_enemy_ship = min(
board.distance(x.position, pos)
for x in board.ships.values()
if x.player_id != board.me.id
)
if min_distance_to_enemy_ship <= 2:
return False
return True
def _create_shipyard(board: MyBoard, is_final_part: bool = False):
""" What we do if we haven't shipyards """
if is_final_part:
# the end of the game, convert one ship if it makes sense
ship_to_halite = defaultdict(int)
available_ships = [
x
for x in board.my_ships
if x.halite + board.my_halite >= board.configuration.convert_cost
]
for ship in available_ships:
distance_to_enemy_ship = board.distance_to_enemy_ship(ship.position, board.me)
distance_to_enemy_ship = distance_to_enemy_ship or board.size
if distance_to_enemy_ship < 3:
# an enemy vessel nearby, can't convert
continue
max_my_ship_distance = min(distance_to_enemy_ship, board.moves_left)
for other_ship in board.my_ships:
if board.distance(ship.position, other_ship.position) < max_my_ship_distance:
ship_to_halite[ship] += other_ship.halite
if not ship_to_halite:
return
max_halite = max(ship_to_halite.values())
if max_halite > board.configuration.convert_cost:
# it makes sense to convert, choose one
ship = [s for s, h in ship_to_halite.items() if h == max_halite][0]
board.create_shipyard(ship)
else:
# meddle of the game, we have to create a shipyard
logger.warning("No shipyards! We must create at least one!")
available_ships = [
x
for x in board.my_ships
if x.halite + board.my_halite >= board.configuration.convert_cost
]
if not available_ships:
logger.warning("Can't create a shipyard, not enough halite! Keep mining.")
return
if (
len(available_ships) == 1
and board.my_halite + available_ships[0].halite
< board.configuration.convert_cost + board.configuration.spawn_cost
):
logger.warning("Can't create a shipyard, not enough halite! Keep mining.")
return
ship, _ = _choice_ship_to_convert(board, available_ships)
if ship:
board.create_shipyard(ship)
def _choice_ship_to_convert(
board: MyBoard, ships: List[Ship]
) -> Tuple[Optional[Ship], float]:
assert len(ships) > 0
ship, score = None, -np.inf
for _ship in ships:
pos = _ship.position
if pos in board.position_to_shipyard:
# shipyard here
continue
_score = board.environment_reserves(pos)
_score -= board.position_to_halite[pos]
if _score > score:
ship, score = _ship, _score
return ship, score
|
w9PcJLyb/HaliteIV-bot
|
halite/ship_converts.py
|
ship_converts.py
|
py
| 5,297 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8630223604
|
# exceptions.py
#
# This module is part of linux_commands/commands module and is released under
# the GNU Public License: https://en.wikipedia.org/wiki/GNU_General_Public_License
""" Module containing all exceptions thrown throughout the cmd package, """
from commands.utils.cmd_utils import safe_decode
class QuietError():
""" Error class that will just be Quiet """
pass
class CmdError(Exception):
""" Base class for all package exceptions """
class NoSuchPathError(CmdError, OSError):
""" Thrown if a path could not be access by the system. """
class MultipleCommandError(CmdError):
""" Thrown if there are Multiple paths for a command """
def __init__(self, command, paths, status=None, stderr=None, stdout=None):
if not isinstance(path, (tuple, list)):
path = path.split()
self.path = path
def __str__(self):
return (self.msg + f"\n cmdline {self._cmd}")
class CommandError(CmdError):
"""
Base class for exceptions thrown at every stage of `Popen()` execution.
:param command:
A non-empty list of argv comprising the command-line.
"""
#: A unicode print-format with 2 `%s for `<cmdline>` and the rest,
#: e.g.
#: "'%s' failed%s"
_msg = "Cmd('%s') failed%s"
def __init__(self, command, status=None, stderr=None, stdout=None):
if not isinstance(command, (tuple, list)):
command = command.split()
self.command = command
self.status = status
if status:
if isinstance(status, Exception):
status = "%s('%s')" % (type(status).__name__, safe_decode(str(status)))
else:
try:
status = 'exit code(%s)' % int(status)
except (ValueError, TypeError):
s = safe_decode(str(status))
status = "'%s'" % s if isinstance(status, str) else s
self._cmd = safe_decode(command[0])
self._cmdline = ' '.join(safe_decode(i) for i in command)
self._cause = status and " due to: %s" % status or "!"
self.stdout = stdout and "\n stdout: '%s'" % safe_decode(stdout) or ''
self.stderr = stderr and "\n stderr: '%s'" % safe_decode(stderr) or ''
def __str__(self):
return (self._msg + "\n cmdline: %s%s%s") % (
self._cmd, self._cause, self._cmdline, self.stdout, self.stderr)
class CommandNotFound(CommandError):
"""Thrown if we cannot find the `cmd` executable in the PATH or at the path given by
the GIT_PYTHON_GIT_EXECUTABLE environment variable"""
def __init__(self, command, cause):
super(CmdCommandNotFound, self).__init__(command, cause)
self._msg = "Cmd('%s') not found%s"
class CmdCommandError(CommandError):
""" Thrown if execution of the cmd command fails with non-zero status code. """
def __init__(self, command, status, stderr=None, stdout=None):
super(CmdCommandError, self).__init__(command, status, stderr, stdout)
class CacheError(CmdError):
"""Base for all errors related to the cmd index, which is called cache internally"""
|
avitko001c/python_linux_command_module
|
exceptions.py
|
exceptions.py
|
py
| 3,139 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16543689747
|
import csv
import sys
from nuitka.__past__ import StringIO
from nuitka.Tracing import my_print
from nuitka.utils.Execution import check_output
def main():
# many cases, pylint: disable=too-many-branches
my_print("Querying openSUSE build service status of Nuitka packages.")
# spell-checker: ignore kayhayen
osc_cmd = ["osc", "pr", "-c", "home:kayhayen"]
stdout_osc = check_output(args=osc_cmd)
if str is not bytes:
stdout_osc = stdout_osc.decode("utf8")
# Response is really a CSV file, so use that for parsing.
csv_file = StringIO(stdout_osc)
osc_reader = csv.reader(csv_file, delimiter=";")
osc_reader = iter(osc_reader)
bad = ("failed", "unresolvable", "broken", "blocked")
titles = next(osc_reader)[1:]
# Nuitka (follow git main branch)
row1 = next(osc_reader)
# Nuitka-Unstable (follow git develop branch)
row2 = next(osc_reader)
# Nuitka-Experimental (follow git factory branch)
row3 = next(osc_reader)
problems = []
def decideConsideration(title, status):
# Ignore other arch builds, they might to not even boot at times.
# spell-checker: ignore aarch
if "ppc" in title or "aarch" in title or "arm" in title:
return False
# This fails for other reasons often, and is not critical to Nuitka.
if "openSUSE_Tumbleweed" in title:
return False
# Ignore old Fedora and RHEL6 32 bit being blocked.
if status == "blocked":
if (
"Fedora_2" in title
or "RedHat_RHEL-6/i586" in title
or "CentOS_CentOS-6/i586" in title
):
return False
# It makes building visible now, that's not an error of course.
if status == "building":
return False
# It makes need to build visible as well, that too is not an error
# really.
if status == "outdated":
return False
return True
for count, title in enumerate(titles):
status = row1[count + 1]
if not decideConsideration(title, status):
continue
if status in bad:
problems.append((row1[0], title, status))
for count, title in enumerate(titles):
status = row2[count + 1]
if not decideConsideration(title, status):
continue
if status in bad:
problems.append((row2[0], title, status))
for count, title in enumerate(titles):
status = row3[count + 1]
if not decideConsideration(title, status):
continue
if status in bad:
problems.append((row3[0], title, status))
if problems:
my_print("There are problems with:", style="yellow")
my_print(
"\n".join("%s: %s (%s)" % problem for problem in problems), style="yellow"
)
if any(problem[0] == "Nuitka" for problem in problems):
my_print(
"Check here: https://build.opensuse.org/package/show/home:kayhayen/Nuitka"
)
if any(problem[0] == "Nuitka-Unstable" for problem in problems):
my_print(
"Check here: https://build.opensuse.org/package/show/home:kayhayen/Nuitka-Unstable"
)
if any(problem[0] == "Nuitka-experimental" for problem in problems):
my_print(
"Check here: https://build.opensuse.org/package/show/home:kayhayen/Nuitka-experimental"
)
sys.exit(1)
else:
my_print("Looks good.", style="blue")
sys.exit(0)
if __name__ == "__main__":
main()
|
Nuitka/Nuitka
|
nuitka/tools/release/osc_check/__main__.py
|
__main__.py
|
py
| 3,651 |
python
|
en
|
code
| 10,019 |
github-code
|
6
|
14654879415
|
"""
OWASP Maryam!
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
class main:
def __init__(self, framework, q, count=30):
""" searchencrypt.com search engine
framework : core attribute
q : query for search
count : count of links
"""
self.framework = framework
self.q = q
self.count = count
self._pages = ''
self._json = {}
self._links = []
self._links_with_title = {}
self.searchencrypt = 'searchencrypt.com'
def run_crawl(self, policy='webpages'):
policies = {'webpages': 'web',
'images': 'web,image',
'news': 'news'}
policy = policy.lower()
if policy not in policies:
search_type = policies['webpages']
else:
search_type = policies[policy]
url = f"https://spapi.{self.searchencrypt}/api/search?q={self.q}&types={search_type}&limit={self.count}"
self.framework.verbose('Opening the searchencrypt.com domain...', end='\r')
try:
req = self.framework.request(url=url)
except:
self.framework.error('[SEARCHENCRYPT] ConnectionError')
self.framework.error('Searchencrypt is missed!')
return
pages = req.text
self._json = req.json()
@property
def pages(self):
return self._pages
@property
def json(self):
return self._json
@property
def links(self):
results = self.json.get('Results')
self._links = [x.get('ClickUrl') for x in results]
return self._links
@property
def links_with_title(self):
results = self.json.get('Results')
if not results:
return {}
self._links_with_title = {x.get('Title'): x.get('ClickUrl') for x in results}
return self._links_with_title
@property
def dns(self):
return self.framework.page_parse(self._pages).get_dns(self.q, self.links)
@property
def emails(self):
return self.framework.page_parse(self._pages).get_emails(self.q)
@property
def docs(self):
return self.framework.page_parse(self._pages).get_docs(self.q, self.links)
|
callforpapers-source/maryam-deb
|
core/util/searchencrypt.py
|
searchencrypt.py
|
py
| 2,452 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31963159871
|
from sys import stdin
import re
input = stdin.readline
pmon, q = map(int, input().split())
pmons = {}
for i in range(1, pmon+1):
pmons[i] = input().strip()
is_numb = re.compile('[0-9]')
reversed_pmons = {v: k for k, v in pmons.items()}
for _ in range(q):
res = input().strip()
res_int_valid = is_numb.search(res)
if res_int_valid:
print(pmons[int(res)])
else:
print(reversed_pmons[res])
|
yongwoo-jeong/Algorithm
|
백준/Silver/1620. 나는야 포켓몬 마스터 이다솜/나는야 포켓몬 마스터 이다솜.py
|
나는야 포켓몬 마스터 이다솜.py
|
py
| 441 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6148825082
|
from selenium import webdriver
from time import sleep
import selenium
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
import pandas as pd
if __name__ == '__main__':
# option=webdriver.ChromeOptions()
# option.add_argument("--user-data-dir="+r"C:\\Users\\20142266\\AppData\\Local\\Google\\Chrome\\User Data")
# driver = webdriver.Chrome(chrome_options=option)
# sleep(2)
driver = webdriver.Ie()
#driver.get('chrome-extension://hehijbfgiekmjfkfjpbkbammjbdenadd/nhc.htm#url=http://tec.cqccms.com.cn/')
driver.get('http://tec.cqccms.com.cn/')
sleep(1)
js = 'document.getElementById("submitButton").click()'
driver.execute_script(js)
sleep(1)
#driver.find_element_by_id("submitButton").send_keys(Keys.ENTER)
#testck.input_windows("核对数字证书口令","")
ratio = driver.find_elements_by_tag_name("input")
for a in ratio:
if a.get_attribute('value') == '4005919':
a.click()
if a.get_attribute("type") == "submit":
a.click()
sleep(2)
#str = driver.get_cookies()
#print(str)
#cookie1 = str[0]['value']
#driver.add_cookie({'name': 'JSESSIONID', 'value': cookie1})
URL = "http://tec.cqccms.com.cn/cocComplete!cocCompleteCreate.action?" \
"id=201709291432251U8205&carType=HFC5181XXYP3K1A57S2QV&carCellCode" \
"=2017011101011956&carTypeSeqCode=N36N&carCellId=5636338&collection" \
"=A013551N36NZM95ZZEZ420000901@null@20201105173042786997%3B1@@1@0@5254506@1"
driver.get(URL)
myselect=driver.find_elements_by_tag_name("select")
for i in myselect:
if i.get_property("name")=="f3":
try:
Select(i).select_by_visible_text("5700")
except selenium.common.exceptions.NoSuchElementException:
Select(i).select_by_index(1)
if i.get_property("name")=="f7":
try:
Select(i).select_by_visible_text("5700")
except selenium.common.exceptions.NoSuchElementException:
Select(i).select_by_index(1)
|
YN3359/runoob-git-test
|
PythonScripts/自动备案COC.py
|
自动备案COC.py
|
py
| 2,122 |
python
|
en
|
code
| 0 |
github-code
|
6
|
519381337
|
from openmdao.core.driver import Driver, RecordingDebugging
from openmdao.api import SimpleGADriver, Problem, LatinHypercubeGenerator, DOEDriver
from dataclasses import dataclass
from copy import deepcopy
import random
import numpy as np
from itertools import chain
from deap import algorithms, base, tools
from deap.benchmarks import rosenbrock
class DeapDriver(Driver):
def _declare_options(self):
self.options.declare("container_class")
def _get_name(self):
return "DeapDriver"
def _setup_driver(self, problem):
super()._setup_driver(problem)
self.container = self.options["container_class"](driver=self)
def run(self):
final_population = self.container.run_algorithm()
# Evaluates a point in the middle of the pareto front to have one of
# the optimal points as the final values in the model
# self.container.evaluate(pareto_front[len(pareto_front) // 2])
# print(pareto_front)
return False
class Individual(list):
def __init__(self, *args, fitness_class, **kwargs):
super().__init__(*args, **kwargs)
self.fitness = fitness_class()
def __repr__(self):
return f"Individual({super().__repr__()})"
@dataclass(frozen=True)
class DeapContainer:
"""
An abstract class for containing the algorithm-specific logic. This is
instantiated in the Driver's _setup_driver() function with the driver
itself passed in as an argument.
This object in itself should be fully stateless.
The motivation for having this in a dedicated object is mainly that the
Driver class is already heavily bloated.
"""
driver: DeapDriver
def __post_init__(self):
# FIXME: this API is inflexible
self.fitness_class = type(
"Fitness",
(base.Fitness,),
{"weights": (-1,) * len(self.problem.model.get_objectives())},
)
self.design_var_shapes = {
name: np.shape(value)
for (name, value) in self.driver.get_design_var_values().items()
}
self.objective_shapes = {
name: np.shape(value)
for (name, value) in self.driver.get_objective_values().items()
}
self.constraint_shapes = {
name: np.shape(value)
for (name, value) in self.driver.get_constraint_values().items()
}
self.individual_bounds = self._individual_bounds()
@property
def problem(self):
return self.driver._problem
def individual_factory(self, *args, **kwargs):
individual = self.individual_class(fitness_class=self.fitness_class, *args, **kwargs)
return individual
def _individual_bounds(self):
design_vars = self.problem.model.get_design_vars()
lower, upper = chain.from_iterable(
(design_vars[key]["lower"].flat, design_vars[key]["upper"].flat)
for key in self.design_var_shapes.keys()
)
return tuple(lower), tuple(upper)
def convert_design_vars_to_individual(self, design_vars):
"""
Converts a dict of OpenMDAO design variables into a DEAP individual.
"""
individual = Individual(
chain.from_iterable(
design_vars[key].flat for key in self.design_var_shapes.keys()
),
fitness_class=self.fitness_class,
)
return individual
def convert_individual_to_design_vars(self, individual):
"""
Converts a DEAP individual into a dict of OpenMDAO design variables.
"""
ind = deepcopy(individual)
design_vars = {}
for name, shape in self.design_var_shapes.items():
ind_items = np.product(shape)
design_vars[name] = np.reshape(ind[:ind_items], shape)
ind = ind[ind_items:]
return design_vars
def get_population_generator(self, count):
return LatinHypercubeGenerator(
samples=count, criterion="correlation", iterations=count // 10
)
def init_population(self, count):
return [
self.convert_design_vars_to_individual(dict(case))
for case in self.get_population_generator(count)(
self.problem.model.get_design_vars()
)
]
def evaluate(self, individual):
pre = id(individual.fitness)
for (name, value) in self.convert_individual_to_design_vars(individual).items():
self.driver.set_design_var(name, value)
assert id(individual.fitness) == pre
with RecordingDebugging(
self.driver._get_name(), self.driver.iter_count, self.driver
):
failure_flag, abs_error, rel_error = self.problem.model._solve_nonlinear()
self.driver.iter_count += 1
# print(tuple(float(x) for x in self.driver.get_objective_values().values()))
return tuple(
chain.from_iterable(
x.flat for x in self.driver.get_objective_values().values()
)
)
def run_algorithm(self):
raise NotImplemented("run_algorithm() method not implemented.")
|
ovidner/openmdao-deap
|
openmdao_deap/__init__.py
|
__init__.py
|
py
| 5,155 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2690012282
|
import boto3
import os
class WasabiUploader:
def __init__(self, directory):
self.directory = directory
self.session = boto3.Session(profile_name="default")
self.credentials = self.session.get_credentials()
self.aws_access_key_id = self.credentials.access_key
self.aws_secret_access_key = self.credentials.secret_key
self.s3 = boto3.resource('s3',
endpoint_url='https://s3.US-central-1.wasabisys.com',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key
)
self.mpdb = self.s3.Bucket('mpdb')
def create_list_of_uploaded_parts(self, directory):
print("Working...")
UploadedPNsFileLocation = "/".join(
directory.split("/", )[:-1])
with open(f'{UploadedPNsFileLocation}/Wasabi_UploadedPNS.txt', 'a+') as f:
f.seek(0)
existing_contents = f.read()
for obj in self.mpdb.objects.all():
item = obj.key.split("/", 1)[1]
if item not in existing_contents:
f.write(f"{item}\n")
f.close()
print(f"Wasabi Data processed, PN file created. "
f"Available at: {UploadedPNsFileLocation}/Wasabi_UploadedPNS.txt'")
def upload_photos(self):
recordNumber = 0
recordsAdded = 0
for filename in os.listdir(self.directory):
recordNumber += 1
with open(f'{self.directory}/Wasabi_UploadedPNS.txt', 'a+') as f:
f.seek(0)
existing_contents = f.read()
file = os.path.join(self.directory, filename)
PN = filename.split(".")[0]
if os.path.isfile(file):
if PN not in existing_contents:
try:
self.mpdb.upload_file(file, f"productimages/{filename}")
f.write(f"{filename}\n")
recordsAdded += 1
if recordNumber % 20 == 0: # only printing every 20th record for confirmation of upload
print(f"{PN} successfully uploaded to Wasabi ({recordsAdded} images uploaded)")
except Exception as e:
print(f"failed to upload {PN} to Wasabi. Error: {e}")
f.close()
print(f"Complete! Records Added: {recordsAdded}")
def count_uploads(self):
counting_mpdb = self.s3.Bucket('mpdb')
count = 0
print("Counting...")
for _ in counting_mpdb.objects.all():
count += 1
print(f"{count} objects found in the library's bucket")
def count_items_in_part_list(self):
"""
Counts the number of items inside the part number upload log created by the Image Uploader.
Assumes the log file is located at 'Wasabi_UploadedPNS.txt' in the specified directory.
"""
directory_parts = self.directory.split("/")[:-1] # Remove the last part (file name) from the path
directory = "/".join(directory_parts)
with open(f'{directory}/Wasabi_UploadedPNS.txt', 'r') as f:
x = len(f.readlines())
print(f"{x} items in the part number log")
|
evanwmeeks/PersonalProjects
|
wasabi_interface/wasabi.py
|
wasabi.py
|
py
| 3,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11502963314
|
with open('./input_day_8.txt') as file:
input = file.read().splitlines()
input = [i.split(' ') for i in input]
unique_len = [2, 4, 3, 7]
count = 0
for i in input:
for j in i[-4:]:
if j != '|':
if len(j) in unique_len:
count += 1
print('part 1: ' + str(count))
sum = 0
for i in input:
map_list = {}
digits = i[:-5]
map_list[1] = [d for d in digits if len(d) == 2][0]
map_list[7] = [d for d in digits if len(d) == 3][0]
map_list[4] = [d for d in digits if len(d) == 4][0]
map_list[8] = [d for d in digits if len(d) == 7][0]
for v in map_list.values():
digits.remove(v)
five_bar = [d for d in digits if len(d) == 5]
six_bar = [d for d in digits if len(d) == 6]
for d in six_bar:
if len(list(set(map_list[4]).intersection(d))) == 4:
map_list[9] = d
elif len(list(set(map_list[4]).intersection(d))) == 3 and len(list(set(map_list[7]).intersection(d))) == 3:
map_list[0] = d
else :
map_list[6] = d
for d in five_bar:
if len(list(set(map_list[1]).intersection(d))) == 2:
map_list[3] = d
elif len(list(set(map_list[9]).intersection(d))) == 4:
map_list[2] = d
else :
map_list[5] = d
inv_map = {''.join(sorted(v)): k for k, v in map_list.items()}
d = i[-4:]
digit = ''
for i in range(4):
digit += str(inv_map[''.join(sorted(d[i]))])
sum += int(digit)
print('part 2: ' + str(sum))
|
Camillemns/advent_of_code
|
day8.py
|
day8.py
|
py
| 1,522 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7796950374
|
# 8min, 239 ms 14.7 MB
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
dict = {}
for num in nums:
if num in dict.keys():
dict[num] += 1
else:
dict[num] = 1
if dict[num] > len(nums) / 2:
return num
if __name__ == '__main__':
nums = [2, 2, 1, 1, 1, 2, 2]
sol = Solution()
print(sol.majorityElement(nums))
|
sky77764/Leetcode
|
Top 100 Liked Questions/easy/169. Majority Element.py
|
169. Majority Element.py
|
py
| 505 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70939657148
|
from selenium import webdriver
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
import time
import datetime
def get_page(url):
header = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"
}
html = requests.get(url,headers=header)
html.encoding = 'utf-8'
return html
def parse_page(html,addr):
#获取网页里面目标信息,以字典的方式储存
dict = {}
doc = BeautifulSoup(html,'lxml')
title = doc.select('h1')
if len(title)==0:
return
articles = doc.select('#artibody p')#得到的是一个列表
content = ''
date = time.strftime('%Y.%m.%d',time.localtime(time.time()))
for article in articles:
content += article.get_text()
dict['date'] = date
dict['title'] = title[0].get_text().strip()
dict['content'] = content
dict['url'] = addr.get_attribute('href')
write_in_database(dict)
def write_in_database(dict):
#当文章未存入时存入
client = MongoClient('mongodb://localhost:27017/')
database = client.xinlang
collection = database.articles
dict['Id'] = collection.find().count()
print(dict)
if collection.find_one({'title':dict['title']}) == None:
collection.insert(dict)
def main():
url = 'https://mobile.sina.com.cn/'
browser = webdriver.Chrome()
browser.get(url)
addrs = browser.find_elements_by_css_selector('#feedCard #feedCardContent .feed-card-item h2 a')
#获取每篇文章的url
for addr in addrs:
html=get_page(addr.get_attribute('href')).text
parse_page(html,addr)
if __name__ == '__main__':
while(True):#定时在9点和21点时运行
now = datetime.datetime.now()
if (now.hour == 9 or now.hour == 21) and now.minute == 0 :
main()
time.sleep(60)
##ok
|
lzzandsx/lizhengzhao_python_homework
|
xinlang.py
|
xinlang.py
|
py
| 1,935 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29279761170
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Ambre chamber
"""
__author__ = "Dennis van Gils"
__authoremail__ = "[email protected]"
__url__ = "https://github.com/Dennis-van-Gils/project-Ambre-chamber"
__date__ = "31-08-2020"
__version__ = "2.0"
# pylint: disable=bare-except, broad-except, try-except-raise
import os
import sys
import time
import numpy as np
import psutil
from PyQt5 import QtCore, QtGui
from PyQt5 import QtWidgets as QtWid
from PyQt5.QtCore import QDateTime
import pyqtgraph as pg
from dvg_debug_functions import tprint, dprint, print_fancy_traceback as pft
from dvg_pyqt_controls import (
create_LED_indicator,
create_Toggle_button,
SS_TEXTBOX_READ_ONLY,
SS_GROUP,
)
from dvg_pyqt_filelogger import FileLogger
from dvg_pyqtgraph_threadsafe import (
HistoryChartCurve,
LegendSelect,
PlotManager,
)
from dvg_devices.Arduino_protocol_serial import Arduino
from dvg_qdeviceio import QDeviceIO
TRY_USING_OPENGL = True
if TRY_USING_OPENGL:
try:
import OpenGL.GL as gl # pylint: disable=unused-import
except:
print("OpenGL acceleration: Disabled")
print("To install: `conda install pyopengl` or `pip install pyopengl`")
else:
print("OpenGL acceleration: Enabled")
pg.setConfigOptions(useOpenGL=True)
pg.setConfigOptions(antialias=True)
pg.setConfigOptions(enableExperimental=True)
# Global pyqtgraph configuration
# pg.setConfigOptions(leftButtonPan=False)
pg.setConfigOption("foreground", "#EEE")
# Constants
# fmt: off
DAQ_INTERVAL_MS = 1000 # [ms]
CHART_INTERVAL_MS = 500 # [ms]
CHART_HISTORY_TIME = 3600 # [s]
# fmt: on
# Show debug info in terminal? Warning: Slow! Do not leave on unintentionally.
DEBUG = False
def get_current_date_time():
cur_date_time = QDateTime.currentDateTime()
return (
cur_date_time.toString("dd-MM-yyyy"), # Date
cur_date_time.toString("HH:mm:ss"), # Time
cur_date_time.toString("yyMMdd_HHmmss"), # Reverse notation date-time
)
# ------------------------------------------------------------------------------
# Arduino state
# ------------------------------------------------------------------------------
class State(object):
"""Reflects the actual readings, parsed into separate variables, of the
Arduino. There should only be one instance of the State class.
"""
def __init__(self):
self.time = np.nan # [s]
self.ds18b20_temp = np.nan # ['C]
self.dht22_temp = np.nan # ['C]
self.dht22_humi = np.nan # [%]
self.is_valve_open = False
# Automatic valve control
self.humi_threshold = np.nan # [%]
self.open_valve_when_super_humi = np.nan
state = State()
# ------------------------------------------------------------------------------
# MainWindow
# ------------------------------------------------------------------------------
class MainWindow(QtWid.QWidget):
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.setWindowTitle("Ambre chamber")
self.setGeometry(350, 50, 960, 800)
self.setStyleSheet(SS_TEXTBOX_READ_ONLY + SS_GROUP)
# -------------------------
# Top frame
# -------------------------
# Left box
self.qlbl_update_counter = QtWid.QLabel("0")
self.qlbl_DAQ_rate = QtWid.QLabel("DAQ: nan Hz")
self.qlbl_DAQ_rate.setStyleSheet("QLabel {min-width: 7em}")
vbox_left = QtWid.QVBoxLayout()
vbox_left.addWidget(self.qlbl_update_counter, stretch=0)
vbox_left.addStretch(1)
vbox_left.addWidget(self.qlbl_DAQ_rate, stretch=0)
# Middle box
self.qlbl_title = QtWid.QLabel(
"Ambre chamber",
font=QtGui.QFont("Palatino", 14, weight=QtGui.QFont.Bold),
)
self.qlbl_title.setAlignment(QtCore.Qt.AlignCenter)
self.qlbl_cur_date_time = QtWid.QLabel("00-00-0000 00:00:00")
self.qlbl_cur_date_time.setAlignment(QtCore.Qt.AlignCenter)
self.qpbt_record = create_Toggle_button(
"Click to start recording to file", minimumWidth=300
)
# fmt: off
self.qpbt_record.clicked.connect(lambda state: log.record(state)) # pylint: disable=unnecessary-lambda
# fmt: on
vbox_middle = QtWid.QVBoxLayout()
vbox_middle.addWidget(self.qlbl_title)
vbox_middle.addWidget(self.qlbl_cur_date_time)
vbox_middle.addWidget(self.qpbt_record)
# Right box
self.qpbt_exit = QtWid.QPushButton("Exit")
self.qpbt_exit.clicked.connect(self.close)
self.qpbt_exit.setMinimumHeight(30)
self.qlbl_recording_time = QtWid.QLabel(alignment=QtCore.Qt.AlignRight)
vbox_right = QtWid.QVBoxLayout()
vbox_right.addWidget(self.qpbt_exit, stretch=0)
vbox_right.addStretch(1)
vbox_right.addWidget(self.qlbl_recording_time, stretch=0)
# Round up top frame
hbox_top = QtWid.QHBoxLayout()
hbox_top.addLayout(vbox_left, stretch=0)
hbox_top.addStretch(1)
hbox_top.addLayout(vbox_middle, stretch=0)
hbox_top.addStretch(1)
hbox_top.addLayout(vbox_right, stretch=0)
# -------------------------
# Bottom frame
# -------------------------
# Charts
# -------------------------
self.gw = pg.GraphicsLayoutWidget()
# Plot: Temperature: DS18B20
p = {"color": "#EEE", "font-size": "10pt"}
self.pi_ds18b20_temp = self.gw.addPlot(row=0, col=0)
self.pi_ds18b20_temp.setLabel("left", text="temperature (°C)", **p)
# Plot: Temperature: DHT 22
self.pi_dht22_temp = self.gw.addPlot(row=1, col=0)
self.pi_dht22_temp.setLabel("left", text="temperature (°C)", **p)
# Plot: Humidity: DHT22
self.pi_dht22_humi = self.gw.addPlot(row=2, col=0)
self.pi_dht22_humi.setLabel("left", text="humidity (%)", **p)
self.plots = [
self.pi_ds18b20_temp,
self.pi_dht22_humi,
self.pi_dht22_temp,
]
for plot in self.plots:
plot.setClipToView(True)
plot.showGrid(x=1, y=1)
plot.setLabel("bottom", text="history (s)", **p)
plot.setMenuEnabled(True)
plot.enableAutoRange(axis=pg.ViewBox.XAxis, enable=False)
plot.enableAutoRange(axis=pg.ViewBox.YAxis, enable=True)
plot.setAutoVisible(y=True)
plot.setRange(xRange=[-CHART_HISTORY_TIME, 0])
# Curves
capacity = round(CHART_HISTORY_TIME * 1e3 / DAQ_INTERVAL_MS)
PEN_01 = pg.mkPen(color=[255, 255, 0], width=3)
PEN_02 = pg.mkPen(color=[0, 255, 255], width=3)
self.tscurve_ds18b20_temp = HistoryChartCurve(
capacity=capacity,
linked_curve=self.pi_ds18b20_temp.plot(
pen=PEN_01, name="DS18B20 temp."
),
)
self.tscurve_dht22_temp = HistoryChartCurve(
capacity=capacity,
linked_curve=self.pi_dht22_temp.plot(
pen=PEN_01, name="DHT22 temp."
),
)
self.tscurve_dht22_humi = HistoryChartCurve(
capacity=capacity,
linked_curve=self.pi_dht22_humi.plot(
pen=PEN_02, name="DHT22 humi."
),
)
self.tscurves = [
self.tscurve_ds18b20_temp,
self.tscurve_dht22_temp,
self.tscurve_dht22_humi,
]
# Group `Readings`
# -------------------------
legend = LegendSelect(
linked_curves=self.tscurves, hide_toggle_button=True
)
p = {
"readOnly": True,
"alignment": QtCore.Qt.AlignRight,
"maximumWidth": 54,
}
self.qlin_ds18b20_temp = QtWid.QLineEdit(**p)
self.qlin_dht22_temp = QtWid.QLineEdit(**p)
self.qlin_dht22_humi = QtWid.QLineEdit(**p)
# fmt: off
legend.grid.setHorizontalSpacing(6)
legend.grid.addWidget(self.qlin_ds18b20_temp , 0, 2)
legend.grid.addWidget(QtWid.QLabel("± 0.5 °C"), 0, 3)
legend.grid.addWidget(self.qlin_dht22_temp , 1, 2)
legend.grid.addWidget(QtWid.QLabel("± 0.5 °C"), 1, 3)
legend.grid.addWidget(self.qlin_dht22_humi , 2, 2)
legend.grid.addWidget(QtWid.QLabel("± 3 %") , 2, 3)
# fmt: on
qgrp_readings = QtWid.QGroupBox("Readings")
qgrp_readings.setLayout(legend.grid)
# Group 'Log comments'
# -------------------------
self.qtxt_comments = QtWid.QTextEdit()
grid = QtWid.QGridLayout()
grid.addWidget(self.qtxt_comments, 0, 0)
qgrp_comments = QtWid.QGroupBox("Log comments")
qgrp_comments.setLayout(grid)
# Group 'Charts'
# -------------------------
self.plot_manager = PlotManager(parent=self)
self.plot_manager.add_autorange_buttons(linked_plots=self.plots)
self.plot_manager.add_preset_buttons(
linked_plots=self.plots,
linked_curves=self.tscurves,
presets=[
{
"button_label": "00:30",
"x_axis_label": "history (sec)",
"x_axis_divisor": 1,
"x_axis_range": (-30, 0),
},
{
"button_label": "01:00",
"x_axis_label": "history (sec)",
"x_axis_divisor": 1,
"x_axis_range": (-60, 0),
},
{
"button_label": "10:00",
"x_axis_label": "history (min)",
"x_axis_divisor": 60,
"x_axis_range": (-10, 0),
},
{
"button_label": "30:00",
"x_axis_label": "history (min)",
"x_axis_divisor": 60,
"x_axis_range": (-30, 0),
},
{
"button_label": "60:00",
"x_axis_label": "history (min)",
"x_axis_divisor": 60,
"x_axis_range": (-60, 0),
},
],
)
self.plot_manager.add_clear_button(linked_curves=self.tscurves)
self.plot_manager.perform_preset(1)
qgrp_chart = QtWid.QGroupBox("Charts")
qgrp_chart.setLayout(self.plot_manager.grid)
# Group 'Valve control'
# -------------------------
self.LED_is_valve_open = create_LED_indicator()
self.qlin_humi_threshold = QtWid.QLineEdit(
"%d" % state.humi_threshold,
alignment=QtCore.Qt.AlignRight,
maximumWidth=36,
)
self.qlin_humi_threshold.editingFinished.connect(
self.process_qlin_humi_threshold
)
self.qpbt_open_when_super_humi = QtWid.QPushButton(
(
"humidity > threshold"
if state.open_valve_when_super_humi
else "humidity < threshold"
),
checkable=True,
checked=state.open_valve_when_super_humi,
)
self.qpbt_open_when_super_humi.clicked.connect(
self.process_qpbt_open_when_super_humi
)
# fmt: off
grid = QtWid.QGridLayout()
grid.addWidget(QtWid.QLabel("Is valve open?") , 0, 0)
grid.addWidget(self.LED_is_valve_open , 0, 1)
grid.addWidget(QtWid.QLabel("Humidity threshold"), 1, 0)
grid.addWidget(self.qlin_humi_threshold , 1, 1)
grid.addWidget(QtWid.QLabel("%") , 1, 2)
grid.addWidget(QtWid.QLabel("Open valve when") , 2, 0)
grid.addWidget(self.qpbt_open_when_super_humi , 2, 1, 1, 2)
grid.setAlignment(QtCore.Qt.AlignTop)
# fmt: on
qgrp_valve = QtWid.QGroupBox("Valve control")
qgrp_valve.setLayout(grid)
# Round up right frame
vbox = QtWid.QVBoxLayout()
vbox.addWidget(qgrp_readings)
vbox.addWidget(qgrp_comments)
vbox.addWidget(qgrp_valve) # , alignment=QtCore.Qt.AlignLeft)
vbox.addWidget(qgrp_chart, alignment=QtCore.Qt.AlignLeft)
vbox.addStretch()
# Round up bottom frame
hbox_bot = QtWid.QHBoxLayout()
hbox_bot.addWidget(self.gw, 1)
hbox_bot.addLayout(vbox, 0)
# -------------------------
# Round up full window
# -------------------------
vbox = QtWid.QVBoxLayout(self)
vbox.addLayout(hbox_top, stretch=0)
vbox.addSpacerItem(QtWid.QSpacerItem(0, 10))
vbox.addLayout(hbox_bot, stretch=1)
# --------------------------------------------------------------------------
# Handle controls
# --------------------------------------------------------------------------
@QtCore.pyqtSlot()
def process_qlin_humi_threshold(self):
try:
humi_threshold = float(self.qlin_humi_threshold.text())
except (TypeError, ValueError):
humi_threshold = 50
except:
raise
state.humi_threshold = np.clip(humi_threshold, 0, 100)
self.qlin_humi_threshold.setText("%.0f" % state.humi_threshold)
qdev_ard.send(ard.write, "th%.0f" % state.humi_threshold)
@QtCore.pyqtSlot()
def process_qpbt_open_when_super_humi(self):
if self.qpbt_open_when_super_humi.isChecked():
state.open_valve_when_super_humi = True
self.qpbt_open_when_super_humi.setText("humidity > threshold")
qdev_ard.send(ard.write, "open when super humi")
else:
state.open_valve_when_super_humi = False
self.qpbt_open_when_super_humi.setText("humidity < threshold")
qdev_ard.send(ard.write, "open when sub humi")
@QtCore.pyqtSlot()
def update_GUI(self):
str_cur_date, str_cur_time, _ = get_current_date_time()
self.qlbl_cur_date_time.setText(
"%s %s" % (str_cur_date, str_cur_time)
)
self.qlbl_update_counter.setText("%i" % qdev_ard.update_counter_DAQ)
self.qlbl_DAQ_rate.setText(
"DAQ: %.1f Hz" % qdev_ard.obtained_DAQ_rate_Hz
)
if log.is_recording():
self.qlbl_recording_time.setText(log.pretty_elapsed())
self.qlin_ds18b20_temp.setText("%.1f" % state.ds18b20_temp)
self.qlin_dht22_temp.setText("%.1f" % state.dht22_temp)
self.qlin_dht22_humi.setText("%.1f" % state.dht22_humi)
self.qlbl_title.setText(
"Interior: %.1f °C, %.1f %%"
% (state.dht22_temp, state.dht22_humi)
)
if state.is_valve_open:
self.LED_is_valve_open.setText("1")
self.LED_is_valve_open.setChecked(True)
else:
self.LED_is_valve_open.setText("0")
self.LED_is_valve_open.setChecked(False)
@QtCore.pyqtSlot()
def update_chart(self):
if DEBUG:
tprint("update_chart")
for tscurve in self.tscurves:
tscurve.update()
# ------------------------------------------------------------------------------
# Program termination routines
# ------------------------------------------------------------------------------
def stop_running():
app.processEvents()
qdev_ard.quit()
log.close()
print("Stopping timers................ ", end="")
timer_GUI.stop()
timer_charts.stop()
print("done.")
@QtCore.pyqtSlot()
def notify_connection_lost():
stop_running()
window.qlbl_title.setText("! ! ! LOST CONNECTION ! ! !")
str_cur_date, str_cur_time, _ = get_current_date_time()
str_msg = "%s %s\nLost connection to Arduino." % (
str_cur_date,
str_cur_time,
)
print("\nCRITICAL ERROR @ %s" % str_msg)
reply_ = QtWid.QMessageBox.warning(
window, "CRITICAL ERROR", str_msg, QtWid.QMessageBox.Ok
)
if reply_ == QtWid.QMessageBox.Ok:
pass # Leave the GUI open for read-only inspection by the user
@QtCore.pyqtSlot()
def about_to_quit():
print("\nAbout to quit")
stop_running()
ard.close()
# ------------------------------------------------------------------------------
# Your Arduino update function
# ------------------------------------------------------------------------------
def DAQ_function():
# Date-time keeping
str_cur_date, str_cur_time, str_cur_datetime = get_current_date_time()
# Query the Arduino for its state
success_, tmp_state = ard.query_ascii_values("?", delimiter="\t")
if not (success_):
dprint(
"'%s' reports IOError @ %s %s"
% (ard.name, str_cur_date, str_cur_time)
)
return False
# Parse readings into separate state variables
try:
(
state.time,
state.ds18b20_temp,
state.dht22_temp,
state.dht22_humi,
state.is_valve_open,
) = tmp_state
state.time /= 1000 # Arduino time, [msec] to [s]
state.is_valve_open = bool(state.is_valve_open)
except Exception as err:
pft(err, 3)
dprint(
"'%s' reports IOError @ %s %s"
% (ard.name, str_cur_date, str_cur_time)
)
return False
# We will use PC time instead
state.time = time.perf_counter()
# Add readings to chart histories
window.tscurve_ds18b20_temp.appendData(state.time, state.ds18b20_temp)
window.tscurve_dht22_temp.appendData(state.time, state.dht22_temp)
window.tscurve_dht22_humi.appendData(state.time, state.dht22_humi)
# Logging to file
log.update(filepath=str_cur_datetime + ".txt", mode="w")
# Return success
return True
def write_header_to_log():
log.write("[HEADER]\n")
log.write(window.qtxt_comments.toPlainText())
log.write("\n\n[DATA]\n")
log.write("time\tDS18B20 temp.\tDHT22 temp.\tDHT22 humi.\tvalve\n")
log.write("[s]\t[±0.5 °C]\t[±0.5 °C]\t[±3 pct]\t[0/1]\n")
def write_data_to_log():
log.write(
"%.1f\t%.1f\t%.1f\t%.1f\t%i\n"
% (
log.elapsed(),
state.ds18b20_temp,
state.dht22_temp,
state.dht22_humi,
state.is_valve_open,
)
)
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set priority of this process to maximum in the operating system
print("PID: %s\n" % os.getpid())
try:
proc = psutil.Process(os.getpid())
if os.name == "nt":
proc.nice(psutil.REALTIME_PRIORITY_CLASS) # Windows
else:
proc.nice(-20) # Other
except:
print("Warning: Could not set process to maximum priority.\n")
# --------------------------------------------------------------------------
# Connect to Arduino
# --------------------------------------------------------------------------
ard = Arduino(name="Ard", connect_to_specific_ID="Ambre chamber")
ard.serial_settings["baudrate"] = 115200
ard.auto_connect()
if not (ard.is_alive):
print("\nCheck connection and try resetting the Arduino.")
print("Exiting...\n")
sys.exit(0)
# Get the initial state of the valve control
success, reply = ard.query("th?")
if success:
state.humi_threshold = float(reply)
success, reply = ard.query("open when super humi?")
if success:
state.open_valve_when_super_humi = bool(int(reply))
# --------------------------------------------------------------------------
# Create application and main window
# --------------------------------------------------------------------------
QtCore.QThread.currentThread().setObjectName("MAIN") # For DEBUG info
app = QtWid.QApplication(sys.argv)
app.aboutToQuit.connect(about_to_quit)
window = MainWindow()
# --------------------------------------------------------------------------
# File logger
# --------------------------------------------------------------------------
log = FileLogger(
write_header_function=write_header_to_log,
write_data_function=write_data_to_log,
)
log.signal_recording_started.connect(
lambda filepath: window.qpbt_record.setText(
"Recording to file: %s" % filepath
)
)
log.signal_recording_stopped.connect(
lambda: window.qpbt_record.setText("Click to start recording to file")
)
# --------------------------------------------------------------------------
# Set up multithreaded communication with the Arduino
# --------------------------------------------------------------------------
# Create QDeviceIO
qdev_ard = QDeviceIO(ard)
# Create workers
# fmt: off
qdev_ard.create_worker_DAQ(
DAQ_function = DAQ_function,
DAQ_interval_ms = DAQ_INTERVAL_MS,
critical_not_alive_count = 1,
debug = DEBUG,
)
# fmt: on
qdev_ard.create_worker_jobs()
# Connect signals to slots
qdev_ard.signal_DAQ_updated.connect(window.update_GUI)
qdev_ard.signal_connection_lost.connect(notify_connection_lost)
# Start workers
qdev_ard.start(DAQ_priority=QtCore.QThread.TimeCriticalPriority)
# --------------------------------------------------------------------------
# Timers
# --------------------------------------------------------------------------
timer_GUI = QtCore.QTimer()
timer_GUI.timeout.connect(window.update_GUI)
timer_GUI.start(100)
timer_charts = QtCore.QTimer()
timer_charts.timeout.connect(window.update_chart)
timer_charts.start(CHART_INTERVAL_MS)
# --------------------------------------------------------------------------
# Start the main GUI event loop
# --------------------------------------------------------------------------
window.show()
sys.exit(app.exec_())
|
Dennis-van-Gils/project-Ambre-chamber
|
src_python/main.py
|
main.py
|
py
| 22,276 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13538653206
|
import pygame
import numpy as np
from util.helpers import *
from physics.colliding_object import Colliding
class EyeBeam(Colliding):
def __init__(self, start, end):
self.start = np.array(start)
super(EyeBeam, self).__init__(self.start)
self.end = np.array(end)
self.collide_type = 'line'
def unobstructed(self, list_of_game_objects):
walls_vector = walls_vector_from_game_objects(list_of_game_objects)
edge_vector = np.array((self.start, self.end))
return unobstructed_edges(edge_vector, walls_vector)[0]
class Eyes:
def __init__(self, view_distance=200):
self.view_distance = view_distance
self.look_ahead = 10
self.list_of_game_objects = []
def update(self, list_of_game_objects):
self.list_of_game_objects = list_of_game_objects
def direct_path_to_goal(self, current_position, goal, exclude=[]):
obstructions = [i for i in self.list_of_game_objects if i not in exclude]
walls_vector = walls_vector_from_game_objects(obstructions)
if len(walls_vector) == 0:
return True
goal_edge = np.array([[current_position[0], current_position[1], goal[0], goal[1]]])
return unobstructed_edges(goal_edge, walls_vector)
def get_mouse_position(self):
return np.array(pygame.mouse.get_pos()).astype(float)
def look_for_collisions(self, coords, vector, radius):
for sign in [1.0, 0.0, -1.0]:
adjustment = normalise_vector(perpendicular_vector(vector)) * (sign * radius)
adjusted_coords = coords + adjustment
ahead_end = adjusted_coords + (vector * self.look_ahead)
ahead = EyeBeam(adjusted_coords, ahead_end)
collision = ahead.get_closest_collision(self.list_of_game_objects)
if collision is not None:
return collision
return None
def look_at_object(self, coords, screen_object):
if self.direct_path_to_goal(coords, screen_object.coords(), exclude=[screen_object]):
return screen_object
else:
return None
def visible_objects(self, coords):
visibles = []
for screen_object in self.list_of_game_objects:
eyes_see = self.look_at_object(coords,
screen_object)
if eyes_see is not None:
visibles.append(eyes_see)
return visibles
def look_for_object(self,
coords,
object_description):
matching_objects_in_range = [screen_object for screen_object in \
self.list_of_game_objects \
if screen_object.image['kind'] != 'wall'
and distance_between_points(coords, screen_object.coords()) < self.view_distance \
and object_description.viewitems() <= screen_object.image.viewitems()]
if len(matching_objects_in_range) > 0:
closest_index = find_closest_point_index(coords, [screen_object.coords() for screen_object in matching_objects_in_range])
target_object = matching_objects_in_range[closest_index]
return self.look_at_object(coords, target_object)
return None
|
SimonCarryer/video_game_ai
|
brains/eyes.py
|
eyes.py
|
py
| 3,313 |
python
|
en
|
code
| 2 |
github-code
|
6
|
74451964986
|
from core.visualization import PlotGraphics
from core.relation_extraction import SpacyRelationExtraction
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
import numpy as np
import joblib
import os
import sys
import warnings
warnings.filterwarnings('ignore')
class TextPipeline:
def __init__(self, predictor_name, data_tuple, classifier_name='decision_tree', resources_path=None):
# root path
self.resources_folder = os.path.join(os.path.dirname(sys.path[0]), 'resources') \
if resources_path is None else resources_path
# initializes the classifier dict
classifiers = {'decision_tree': DecisionTreeClassifier(random_state=0),
'k_neighbors': KNeighborsClassifier(n_neighbors=15)}
# save the predictor name
self.predictor_name = predictor_name
# receives the data
self.x_train, self.x_test, self.y_train, self.y_test = data_tuple
# text extraction pipelines
self.text_extraction_pipes = {}
# prediction model to use
self.prediction_model = classifiers[classifier_name]
# init the visualizer
self.plt_graphics = PlotGraphics(data_tuple, self.text_extraction_pipes)
def create_features_pipeline(self, n_features=150, pipeline_obj=None):
# checks if the transformer
if pipeline_obj is None:
# features vectorization
transformer_obj = TfidfVectorizer(strip_accents='unicode',
stop_words='english',
lowercase=True,
max_features=n_features,
ngram_range=(1, 2),
min_df=0.1, max_df=0.7)
# creates the pipeline obj
pipeline_obj = Pipeline([('vectorizer', transformer_obj)])
# pipeline mapping
self.text_extraction_pipes['feature'] = pipeline_obj
# returns the pipeline obj
return self.text_extraction_pipes['feature']
def create_label_pipeline(self, relation_extraction=False, n_targets=20, n_jobs=8):
# target vectorization
if relation_extraction:
# uses the spacy relation extraction
vectorizer = SpacyRelationExtraction(n_relation=n_targets, n_jobs=n_jobs)
else:
# otherwise uses a normal vectorizer
vectorizer = CountVectorizer(strip_accents='unicode',
stop_words='english',
lowercase=True,
max_features=n_targets,
ngram_range=(1, 2),
min_df=0.1, max_df=0.7)
# pipeline creation
self.text_extraction_pipes['target'] = Pipeline([('vectorizer', vectorizer)])
def pickle_predictor(self):
# save the labels pipeline
labels_extractor = self.text_extraction_pipes['target']['vectorizer']
obj_name = os.path.join(self.resources_folder, '_'.join([self.predictor_name, 'labels', 'vectorizer']))
joblib.dump(labels_extractor, obj_name + '.pkl')
# saves the model
obj_name = os.path.join(self.resources_folder, '_'.join([self.predictor_name, 'predictor']))
joblib.dump(self.prediction_model, obj_name + '.pkl')
def unpickle_predictor(self):
# loads the object
obj_name = os.path.join(self.resources_folder, '_'.join([self.predictor_name, 'labels', 'vectorizer']))
labels_extractor = joblib.load(obj_name + '.pkl')
self.text_extraction_pipes['target'] = Pipeline([('vectorizer', labels_extractor)])
# unpickle the model
obj_name = os.path.join(self.resources_folder, '_'.join([self.predictor_name, 'predictor']))
self.prediction_model = joblib.load(obj_name + '.pkl')
def fit(self, x_vector):
# fit the feature data
y_vector = self.text_extraction_pipes['target'].fit_transform(self.y_train).toarray()
# convert the y_train
y_vector[y_vector > 1] = 1
# print some information data
print('\ninput array, shape:', x_vector.shape)
print('output array, shape:', y_vector.shape, '\n')
# fit the model
self.prediction_model.fit(x_vector, y_vector)
def predict(self, x_test):
# convert using the pipeline
x_test_vector = self.text_extraction_pipes['feature'].transform(x_test)
# convert the y_test
predictions = self.prediction_model.predict(x_test_vector)
# returns the predictions
return predictions
def score(self):
# add the exception treatment
y_test_vector = self.text_extraction_pipes['target'].transform(self.y_test).toarray()
# predict the output for the test set
predictions = self.predict(self.x_test)
# print some metrics
class_labels = self.text_extraction_pipes['target']['vectorizer'].get_feature_names()
class_report = self.calculate_metrics(y_test_vector, predictions, class_labels)
# plot the data
self.plt_graphics.plot_bag_words(class_report)
# return the classification report
return class_report
@staticmethod
def calculate_metrics(y_test, predictions, class_labels):
# print the results
y_test[y_test > 1] = 1
class_report = classification_report(y_test, predictions, target_names=class_labels, output_dict=True)
print("Classification report: \n", classification_report(y_test, predictions, target_names=class_labels))
# print("F1 micro averaging:", f1_score(y_test, predictions, average='micro', labels=np.unique(predictions)))
print("ROC: ", roc_auc_score(y_test, predictions), '\n')
# return the classification results
return class_report
|
eliseu31/MSDS-Analyser
|
core/text_pipeline.py
|
text_pipeline.py
|
py
| 6,284 |
python
|
en
|
code
| 8 |
github-code
|
6
|
41550338634
|
from . animation import Animation
from .. layout import circle
from .. util import deprecated
class Circle(Animation):
LAYOUT_CLASS = circle.Circle
LAYOUT_ARGS = 'rings',
def __init__(self, layout, **kwds):
super().__init__(layout, **kwds)
self.rings = layout.rings
self.ringCount = layout.ringCount
if deprecated.allowed(): # pragma: no cover
self.lastRing = layout.lastRing
self.ringSteps = layout.ringSteps
if deprecated.allowed(): # pragma: no cover
BaseCircleAnim = Circle
|
ManiacalLabs/BiblioPixel
|
bibliopixel/animation/circle.py
|
circle.py
|
py
| 553 |
python
|
en
|
code
| 263 |
github-code
|
6
|
32644614877
|
"""
Given a universal mesh, record the placements of guide nodes as it relative to
universal mesh. And then repoisition guides to that relative position should
the universal mesh change from character to character.
from mgear.shifter import relativeGuidePlacement
reload(relativeGuidePlacement)
Execute the following chunk to record initial placement ----------------------
relativeGuidePlacement.exportGuidePlacement(filepath="Y:/tmp/exampleFile.json",
skip_strings=["hair"])
Load new universal guide mesh with new proportions
Execute the following lines to move the guides to their new position ---------
relativeGuidePlacement.importGuidePlacement(filepath="Y:/tmp/exampleFile.json")
Attributes:
GUIDE_ROOT (str): name of the root guide node
SKIP_CONTAINS (list): nodes to skip if they contain the string
SKIP_CRAWL_NODES (list): nodes to skip crawling hierarchy
SKIP_NODETYPES (list): skip the query of certain node types
SKIP_PLACEMENT_NODES (TYPE): nodes to skip updating their positions
SKIP_SUFFIX (list): skip if node ends with
UNIVERSAL_MESH_NAME (str): default name of the universal mesh
"""
# python
import json
import math
# dcc
import maya.cmds as mc
import pymel.core as pm
import maya.OpenMaya as om
# mgear
from mgear.core import utils
from mgear.core import vector
from mgear.core import transform
from mgear.core import meshNavigation
# constants -------------------------------------------------------------------
# Designate the root of the hierarchy to crawl
GUIDE_ROOT = "guide"
# Nodes to avoid checking the hierarchy
DEFAULT_SKIP_CRAWL_NODES = ("controllers_org",
"spineUI_C0_root",
"faceUI_C0_root",
"legUI_R0_root",
"armUI_L0_root",
"legUI_L0_root",
"armUI_R0_root")
# nodes that will not have their positions updated
DEFAULT_SKIP_PLACEMENT_NODES = ("controllers_org",
"global_C0_root",
"spineUI_C0_root",
"faceUI_C0_root",
"legUI_R0_root",
"armUI_L0_root",
"legUI_L0_root",
"armUI_R0_root")
try:
SKIP_CRAWL_NODES
SKIP_PLACEMENT_NODES
except NameError:
SKIP_CRAWL_NODES = list(DEFAULT_SKIP_CRAWL_NODES)
SKIP_PLACEMENT_NODES = list(DEFAULT_SKIP_PLACEMENT_NODES)
# skip the node if it even contains the characters in the list
# eg SKIP_CONTAINS = ["hair"]
SKIP_CONTAINS = []
# Avoid nodes of a specified suffix
SKIP_SUFFIX = ["sizeRef", "crv", "crvRef", "blade"]
# Types of nodes to avoid
SKIP_NODETYPES = ["aimConstraint", "pointConstraint", "parentConstraint"]
UNIVERSAL_MESH_NAME = "skin_geo_setup"
# general functions -----------------------------------------------------------
def crawlHierarchy(parentNode,
ordered_hierarchy,
skip_crawl_nodes,
skip_strings=None):
"""recursive function to crawl a hierarchy of nodes to return decendents
Args:
parentNode (str): node to query
ordered_hierarchy (str): list to continuesly pass itself
skip_crawl_nodes (list): nodes to skip crawl
"""
if not skip_strings:
skip_strings = []
for node in mc.listRelatives(parentNode, type="transform") or []:
if node in skip_crawl_nodes or node in ordered_hierarchy:
continue
if node.endswith(tuple(SKIP_SUFFIX)):
continue
if mc.objectType(node) in SKIP_NODETYPES:
continue
if [True for skip_str in skip_strings
if skip_str.lower() in node.lower()]:
continue
ordered_hierarchy.append(node)
crawlHierarchy(node,
ordered_hierarchy,
skip_crawl_nodes,
skip_strings=skip_strings)
def getPostionFromLoop(vertList):
"""Get the center position from the list of edge ids provided
Args:
vertList (list): list of edge ids
Returns:
list: of translate XYZ, world space
"""
bb = mc.exactWorldBoundingBox(vertList)
pos = ((bb[0] + bb[3]) / 2, (bb[1] + bb[4]) / 2, (bb[2] + bb[5]) / 2)
return pos
def getVertMatrix(closestVert):
"""create a matrix from the closestVert and the normals of the surrounding
faces for later comparison
Args:
node (str): guide node to query
closestVert (str): closest vert to guide
Returns:
list: of matrices
"""
closestVert = pm.PyNode(closestVert)
faces = closestVert.connectedFaces()
normalVector = faces.getNormal("world")
pm.select(faces)
faces_str = mc.ls(sl=True, fl=True)
pm.select(cl=True)
face_pos = pm.dt.Vector(getPostionFromLoop(faces_str))
normal_rot = getOrient([normalVector.x, normalVector.y, normalVector.z],
[0, 1, 0],
ro=0)
orig_ref_matrix = pm.dt.TransformationMatrix()
orig_ref_matrix.setTranslation(face_pos, pm.dt.Space.kWorld)
orig_ref_matrix.setRotation(normal_rot)
return orig_ref_matrix
def getOrient(normal, tangent, ro=0):
"""convert normal direction into euler rotations
Args:
normal (list): of nomel values
ro (int, optional): rotate order
Returns:
list: of euler rotations
"""
kRotateOrders = [om.MEulerRotation.kXYZ, om.MEulerRotation.kYZX,
om.MEulerRotation.kZXY, om.MEulerRotation.kXZY,
om.MEulerRotation.kYXZ, om.MEulerRotation.kZYX, ]
cross = [normal[1] * tangent[2] - normal[2] * tangent[1],
normal[2] * tangent[0] - normal[0] * tangent[2],
normal[0] * tangent[1] - normal[1] * tangent[0]]
tMatrix = normal + [0] + tangent + [0] + cross + [0, 0, 0, 0, 1]
mMatrix = om.MMatrix()
om.MScriptUtil.createMatrixFromList(tMatrix, mMatrix)
tmMatrix = om.MTransformationMatrix(mMatrix)
rotate = tmMatrix.eulerRotation().reorder(kRotateOrders[ro])
RAD_to_DEG = (180 / math.pi)
return [rotate[0] * RAD_to_DEG,
rotate[1] * RAD_to_DEG,
rotate[2] * RAD_to_DEG]
def getRepositionMatrix(node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix,
closestVerts):
"""Get the delta matrix from the original position and multiply by the
new vert position. Add the rotations from the face normals.
Args:
node_matrix (pm.dt.Matrix): matrix of the guide
orig_ref_matrix (pm.dt.Matrix): matrix from the original vert position
closestVerts (str): name of the closest vert
Returns:
mmatrix: matrix of the new offset position, worldSpace
"""
current_vert = pm.PyNode(closestVerts[0])
mr_current_vert = pm.PyNode(closestVerts[1])
current_length = vector.getDistance(current_vert.getPosition("world"),
mr_current_vert.getPosition("world"))
orig_length = vector.getDistance(orig_ref_matrix.translate,
mr_orig_ref_matrix.translate)
orig_center = vector.linearlyInterpolate(orig_ref_matrix.translate,
mr_orig_ref_matrix.translate)
orig_center_matrix = pm.dt.Matrix()
# orig_center_matrix.setTranslation(orig_center, pm.dt.Space.kWorld)
orig_center_matrix = transform.setMatrixPosition(
orig_center_matrix, orig_center)
current_center = vector.linearlyInterpolate(
current_vert.getPosition("world"),
mr_current_vert.getPosition("world"))
length_percentage = 1
if current_length != 0 or orig_length != 0:
length_percentage = current_length / orig_length
# refPosition_matrix = pm.dt.TransformationMatrix()
refPosition_matrix = pm.dt.Matrix()
# refPosition_matrix.setTranslation(current_center, pm.dt.Space.kWorld)
refPosition_matrix = transform.setMatrixPosition(
refPosition_matrix, current_center)
deltaMatrix = node_matrix * orig_center_matrix.inverse()
deltaMatrix = deltaMatrix * length_percentage
deltaMatrix = transform.setMatrixScale(deltaMatrix)
refPosition_matrix = deltaMatrix * refPosition_matrix
return refPosition_matrix
def getRepositionMatrixSingleRef(node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix,
closestVerts):
"""Get the delta matrix from the original position and multiply by the
new vert position. Add the rotations from the face normals.
Args:
node_matrix (pm.dt.Matrix): matrix of the guide
orig_ref_matrix (pm.dt.Matrix): matrix from the original vert position
closestVerts (str): name of the closest vert
Returns:
mmatrix: matrix of the new offset position, worldSpace
"""
closestVerts = pm.PyNode(closestVerts[0])
faces = closestVerts.connectedFaces()
normalVector = faces.getNormal("world")
pm.select(faces)
faces_str = mc.ls(sl=True, fl=True)
pm.select(cl=True)
face_pos = pm.dt.Vector(getPostionFromLoop(faces_str))
normal_rot = getOrient([normalVector.x, normalVector.y, normalVector.z],
[0, 1, 0],
ro=0)
refPosition_matrix = pm.dt.TransformationMatrix()
refPosition_matrix.setTranslation(face_pos, pm.dt.Space.kWorld)
refPosition_matrix.setRotation(normal_rot)
deltaMatrix = node_matrix * orig_ref_matrix.inverse()
refPosition_matrix = deltaMatrix * refPosition_matrix
return refPosition_matrix
@utils.viewport_off
@utils.one_undo
def getGuideRelativeDictionaryLegacy(mesh, guideOrder):
"""create a dictionary of guide:[[shape.vtx[int]], relativeMatrix]
Args:
mesh (string): name of the mesh
guideOrder (list): the order to query the guide hierarchy
Returns:
dictionary: create a dictionary of guide:[[edgeIDs], relativeMatrix]
"""
relativeGuide_dict = {}
mesh = pm.PyNode(mesh)
for guide in guideOrder:
guide = pm.PyNode(guide)
# slow function A
clst_vert = meshNavigation.getClosestVertexFromTransform(mesh, guide)
vertexIds = [clst_vert.name()]
# slow function B
orig_ref_matrix = getVertMatrix(clst_vert.name())
# --------------------------------------------------------------------
a_mat = guide.getMatrix(worldSpace=True)
mm = ((orig_ref_matrix - a_mat) * -1) + a_mat
pos = mm[3][:3]
mr_vert = meshNavigation.getClosestVertexFromTransform(mesh, pos)
mr_orig_ref_matrix = getVertMatrix(mr_vert.name())
vertexIds.append(mr_vert.name())
node_matrix = guide.getMatrix(worldSpace=True)
relativeGuide_dict[guide.name()] = [vertexIds,
node_matrix.get(),
orig_ref_matrix.get(),
mr_orig_ref_matrix.get()]
mc.select(cl=True)
return relativeGuide_dict
@utils.viewport_off
@utils.one_undo
def yieldGuideRelativeDictionary(mesh, guideOrder, relativeGuide_dict):
"""create a dictionary of guide:[[shape.vtx[int]], relativeMatrix]
Args:
mesh (string): name of the mesh
guideOrder (list): the order to query the guide hierarchy
Returns:
dictionary: create a dictionary of guide:[[edgeIDs], relativeMatrix]
"""
for guide in guideOrder:
guide = pm.PyNode(guide)
# slow function A
clst_vert = meshNavigation.getClosestVertexFromTransform(mesh, guide)
vertexIds = [clst_vert.name()]
# slow function B
orig_ref_matrix = getVertMatrix(clst_vert.name())
# --------------------------------------------------------------------
a_mat = guide.getMatrix(worldSpace=True)
mm = ((orig_ref_matrix - a_mat) * -1) + a_mat
pos = mm[3][:3]
mr_vert = meshNavigation.getClosestVertexFromTransform(mesh, pos)
mr_orig_ref_matrix = getVertMatrix(mr_vert.name())
vertexIds.append(mr_vert.name())
node_matrix = guide.getMatrix(worldSpace=True)
relativeGuide_dict[guide.name()] = [vertexIds,
node_matrix.get(),
orig_ref_matrix.get(),
mr_orig_ref_matrix.get()]
yield relativeGuide_dict
@utils.viewport_off
@utils.one_undo
def getGuideRelativeDictionary(mesh, guideOrder):
"""create a dictionary of guide:[[shape.vtx[int]], relativeMatrix]
Args:
mesh (string): name of the mesh
guideOrder (list): the order to query the guide hierarchy
Returns:
dictionary: create a dictionary of guide:[[edgeIDs], relativeMatrix]
"""
relativeGuide_dict = {}
mesh = pm.PyNode(mesh)
for result in yieldGuideRelativeDictionary(
mesh, guideOrder, relativeGuide_dict):
pass
return relativeGuide_dict
@utils.viewport_off
@utils.one_undo
def updateGuidePlacementLegacy(guideOrder, guideDictionary):
"""update the guides based on new universal mesh, in the provided order
Args:
guideOrder (list): of the hierarchy to crawl
guideDictionary (dictionary): dict of the guide:edge, matrix position
"""
for guide in guideOrder:
if guide not in guideDictionary or not mc.objExists(guide):
continue
elif guide in SKIP_PLACEMENT_NODES:
continue
(vertexIds,
node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix) = guideDictionary[guide]
guideNode = pm.PyNode(guide)
repoMatrix = getRepositionMatrix(pm.dt.Matrix(node_matrix),
pm.dt.Matrix(orig_ref_matrix),
pm.dt.Matrix(mr_orig_ref_matrix),
vertexIds)
guideNode.setMatrix(repoMatrix, worldSpace=True, preserve=True)
@utils.viewport_off
@utils.one_undo
def yieldUpdateGuidePlacement(guideOrder, guideDictionary):
"""update the guides based on new universal mesh, in the provided order
Args:
guideOrder (list): of the hierarchy to crawl
guideDictionary (dictionary): dict of the guide:edge, matrix position
"""
for guide in guideOrder:
if guide not in guideDictionary or not mc.objExists(guide):
continue
elif guide in SKIP_PLACEMENT_NODES:
continue
(vertexIds,
node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix) = guideDictionary[guide]
repoMatrix = getRepositionMatrix(pm.dt.Matrix(node_matrix),
pm.dt.Matrix(orig_ref_matrix),
pm.dt.Matrix(mr_orig_ref_matrix),
vertexIds)
yield repoMatrix
@utils.viewport_off
@utils.one_undo
def updateGuidePlacement(guideOrder, guideDictionary, reset_scale=False):
"""update the guides based on new universal mesh, in the provided order
Args:
guideOrder (list): of the hierarchy to crawl
guideDictionary (dictionary): dict of the guide:edge, matrix position
"""
updateGen = yieldUpdateGuidePlacement(guideOrder, guideDictionary)
for guide in guideOrder:
if guide not in guideDictionary or not mc.objExists(guide):
continue
elif guide in SKIP_PLACEMENT_NODES:
continue
guideNode = pm.PyNode(guide)
scl = guideNode.getScale()
repoMatrix = next(updateGen)
guideNode.setMatrix(repoMatrix, worldSpace=True, preserve=True)
if reset_scale:
guideNode.setScale([1, 1, 1])
else:
guideNode.setScale(scl)
yield True
# ==============================================================================
# Data export, still testing
# ==============================================================================
def _importData(filepath):
try:
with open(filepath, 'r') as f:
data = json.load(f)
return data
except Exception as e:
print(e)
def _exportData(data, filepath):
try:
with open(filepath, 'w') as f:
json.dump(data, f, sort_keys=False, indent=4)
except Exception as e:
print(e)
def exportGuidePlacement(filepath=None,
reference_mesh=UNIVERSAL_MESH_NAME,
root_node=GUIDE_ROOT,
skip_crawl_nodes=SKIP_CRAWL_NODES,
skip_strings=[]):
"""Export the position of the supplied root node to a file.
Args:
filepath (str, optional): path to export too
reference_mesh (str, optional): mesh to query verts
root_node (str, optional): name of node to query against
skip_crawl_nodes (list, optional): of nodes not to crawl
skip_strings (list, optional): strings to check to skip node
Returns:
list: dict, list, str
"""
if filepath is None:
filepath = pm.fileDialog2(fileMode=0,
startingDirectory="/",
fileFilter="Export position(*.json)")
if filepath:
filepath = filepath[0]
(relativeGuide_dict,
ordered_hierarchy) = recordInitialGuidePlacement(
reference_mesh=reference_mesh,
root_node=root_node,
skip_crawl_nodes=skip_crawl_nodes,
skip_strings=skip_strings)
data = {}
data["relativeGuide_dict"] = relativeGuide_dict
data["ordered_hierarchy"] = ordered_hierarchy
_exportData(data, filepath)
print("Guide position exported: {}".format(filepath))
return relativeGuide_dict, ordered_hierarchy, filepath
@utils.one_undo
def importGuidePlacement(filepath):
"""import the position from the provided file
Args:
filepath (str): file to the json
referenceMesh (str, optional): name of mesh to compare against
"""
data = _importData(filepath)
updateGuidePlacement(data["ordered_hierarchy"], data["relativeGuide_dict"])
return data["relativeGuide_dict"], data["ordered_hierarchy"]
def recordInitialGuidePlacement(reference_mesh=UNIVERSAL_MESH_NAME,
root_node=GUIDE_ROOT,
skip_crawl_nodes=SKIP_CRAWL_NODES,
skip_strings=None):
"""convenience function for retrieving a dict of position
Args:
reference_mesh (str, optional): the mesh to query against
root_node (str, optional): root node to crawl
skip_crawl_nodes (list, optional): of nodes to avoid
skip_strings (list, optional): of strings to check if skip
Returns:
dict, list: dict of positions, list of ordered nodes
"""
ordered_hierarchy = []
relativeGuide_dict = {}
crawlHierarchy(root_node,
ordered_hierarchy,
skip_crawl_nodes,
skip_strings=skip_strings)
relativeGuide_dict = getGuideRelativeDictionary(reference_mesh,
ordered_hierarchy)
return relativeGuide_dict, ordered_hierarchy
|
mgear-dev/mgear4
|
release/scripts/mgear/shifter/relative_guide_placement.py
|
relative_guide_placement.py
|
py
| 19,592 |
python
|
en
|
code
| 209 |
github-code
|
6
|
33914485796
|
from django.conf.urls import patterns, url
from ventas import viewsInforme,viewsPedido
urlpatterns = patterns('',
url(r'^$', viewsPedido.venta_desktop, name='venta_desktop'),
url(r'^fac/', viewsPedido.venta_desktop, name='venta_desktop1'),
url(r'^mobile/$', viewsPedido.venta_mobile, name='venta_mobile'),
url(r'^listar/$', viewsInforme.listar, name='listar'),
url(r'^clientes/', viewsPedido.clientes, name='clientes'),
url(r'^vendedores/', viewsPedido.vendedores, name='vendedores'),
url(r'^codproducto/', viewsPedido.codproducto, name='codproducto'),
url(r'^nomproducto/', viewsPedido.nomproducto, name='nomproducto'),
url(r'^save/$', viewsPedido.savePedido, name='save'),
url(r'^save/(?P<anyway>\w+)/$', viewsPedido.savePedido, name='save'),
url(r'^saveDetalle/$', viewsPedido.saveDetalle, name='saveDetalle'),
url(r'^deleteDetalle/(?P<id>\d+)/$', viewsPedido.deleteDetalle, name='deleteDetalle'),
url(r'^pagar/$', viewsPedido.pagarPedido, name='pagarPedido'),
)
|
wilmandx/ipos
|
ventas/urls.py
|
urls.py
|
py
| 1,020 |
python
|
es
|
code
| 0 |
github-code
|
6
|
33105484438
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging; logger = logging.getLogger("main")
FORMAT = '%(asctime)s - %(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT, level=logging.WARNING)
import time
from flask import Flask, escape, url_for,render_template, g, request, redirect, jsonify, session
from werkzeug import secure_filename
import sys, os
from jinja2 import Environment, PackageLoader
import json
app = Flask(__name__, static_folder='static')
maze = []
width = 0
height = 0
STARTPOS=[1,1]
MAXLIFE = 10
MIN_TIME_BETWEEN_INTERACTIONS=0.2 #seconds
robots = {}
def store_map(rawmaze):
global maze, width, height
if is_map_loaded():
logger.warning("Map already loaded. Ignoring it. Restart the backend if you want to update the map.")
return
width = rawmaze["width"]
height = rawmaze["height"]
maze = [True if x in [399,431,463,492,493,494,495] else False for x in rawmaze["data"]]
for j in range(height):
for i in range(width):
idx = i + j * width
if maze[idx]:
sys.stdout.write('.')
else:
sys.stdout.write(' ')
sys.stdout.write('\n')
logger.info("Maze successfully loaded!")
def is_map_loaded():
return width and height
def get_obstacles(x,y):
# obstacle at centre, north, south, east, west?
obstacles = [True, True, True, True, True]
if x >= 0 and y >= 0 and x < width and y < height and maze[x + y * width]:
obstacles[0] = False
if x >= 0 and y-1 >= 0 and x < width and y-1 < height and maze[x + (y-1) * width]:
obstacles[1] = False
if x >= 0 and y+1 >= 0 and x < width and y+1 < height and maze[x + (y+1) * width]:
obstacles[2] = False
if x+1 >= 0 and y >= 0 and x+1 < width and y < height and maze[x+1 + y * width]:
obstacles[3] = False
if x-1 >= 0 and y >= 0 and x-1 < width and y < height and maze[x-1 + y * width]:
obstacles[4] = False
logger.info(str(obstacles))
return obstacles
@app.route("/set/<name>/<x>/<y>")
def set_robot(name, x, y):
logger.info("Placing robot %s to %s,%s" % (name,x,y))
x = int(x)
y=int(y)
c,_,_,_,_ = get_obstacles(x,y)
if c:
logger.info("Can not place robot there!")
return json.dumps(False)
robots[name]["pos"] = [x,y]
return json.dumps(True)
def get_robot(name):
if name not in robots:
return json.dumps([-1,-1])
return json.dumps(robots[name]["pos"])
@app.route("/")
def main():
return render_template('index.html')
@app.route("/live")
def map():
return render_template('map.html')
@app.route("/get_robots")
def get_robots():
now = time.time()
complete_robots = dict(robots)
for k in list(robots.keys()):
if robots[k]["life"] <= 0:
logger.warning("Robot %s has no life left! killing it!" % k)
del robots[k]
del complete_robots[k]
continue
if now - robots[k]["lastinteraction"] > 60 * 10:
logger.warning("Robot %s has not being used for 10 min. Removing it." % k)
del robots[k]
del complete_robots[k]
continue
complete_robots[k]["age"] = now - robots[k]["created"]
return json.dumps(complete_robots)
def create_new_robot(name):
logger.info("Placing new robot %s at start position" % name)
robots[name] = {"pos": STARTPOS,
"created": time.time(),
"lastinteraction": 0,
"life": MAXLIFE
}
@app.route('/move/<name>/<direction>')
def move(name, direction):
if not is_map_loaded():
logger.error("Map not loaded yet! Reload webpage.")
return json.dumps([False,[]])
if name not in robots:
create_new_robot(name)
logger.info("Moving robot %s to %s" % (name,direction))
now = time.time()
if now - robots[name]["lastinteraction"] < MIN_TIME_BETWEEN_INTERACTIONS:
logger.error("Too many interactions with %s. Wait a bit." % name)
return json.dumps([False,[]])
robots[name]["lastinteraction"] = now
x,y = robots[name]["pos"]
if direction == 'N':
nx, ny = x, y-1
if direction == 'S':
nx, ny = x, y+1
if direction == 'E':
nx, ny = x+1, y
if direction == 'W':
nx, ny = x-1, y
c,n,s,e,w = get_obstacles(nx,ny)
if c:
logger.info("...can not move there!")
robots[name]["life"] -= 1
return json.dumps([False,[]])
else:
robots[name]["pos"] = [nx,ny]
return json.dumps([True,[n,s,e,w]])
@app.route("/map", methods=['POST'])
def load_map():
logger.info("Retrieving the map data...")
store_map(json.loads([k for k in request.form.keys()][0]))
return ""
@app.route("/life/<name>")
def life(name):
return json.dumps(robots[name]["life"] if name in robots else 0)
|
severin-lemaignan/robomaze
|
backend/backend.py
|
backend.py
|
py
| 4,956 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27264160200
|
"""
GenT2MF_Trapezoidal.py
Created 3/1/2022
"""
from __future__ import annotations
from typing import List
from juzzyPython.generalType2zSlices.sets.GenT2MF_Prototype import GenT2MF_Prototype
from juzzyPython.intervalType2.sets.IntervalT2MF_Trapezoidal import IntervalT2MF_Trapezoidal
from juzzyPython.type1.sets.T1MF_Trapezoidal import T1MF_Trapezoidal
class GenT2MF_Trapezoidal(GenT2MF_Prototype):
"""
Class GenT2MF_Trapezoidal
Creates a new instance of GenT2zMF_Trapezoidal
Parameters:
primer
primer0
primer1
primers
numberOfzLevels
Functions:
getZSlice
"""
def __init__(self, name: str,primer: IntervalT2MF_Trapezoidal = None,primer0: IntervalT2MF_Trapezoidal = None, primer1: IntervalT2MF_Trapezoidal = None,primers: List[IntervalT2MF_Trapezoidal] = None, numberOfzLevels = None) -> None:
super().__init__(name)
self.DEBUG = False
if primer != None:
stepsize = [0] * 4
self.numberOfzLevels = numberOfzLevels
self.support = primer.getSupport()
self.primer = primer
slices_fs = [0] * numberOfzLevels
self.slices_zValues = [0] * numberOfzLevels
z_stepSize = 1.0/numberOfzLevels
self.zSlices = [0] * numberOfzLevels
stepsize[0] = (primer.getLMF().getA() - primer.getUMF().getA())/(numberOfzLevels-1)/2.0
stepsize[1] = (primer.getLMF().getB() - primer.getUMF().getB())/(numberOfzLevels-1)/2.0
stepsize[2] = (primer.getUMF().getC() - primer.getLMF().getC())/(numberOfzLevels-1)/2.0
stepsize[3] = (primer.getUMF().getD() - primer.getLMF().getD())/(numberOfzLevels-1)/2.0
inner = primer.getLMF().getParameters().copy()
outer = primer.getUMF().getParameters().copy()
self.zSlices[0] = IntervalT2MF_Trapezoidal("Slice 0",primer.getUMF(),primer.getLMF())
self.slices_zValues[0] = z_stepSize
if self.DEBUG:
print(self.zSlices[0].toString()+" Z-Value = "+str(self.slices_zValues[0]))
for i in range(1,numberOfzLevels):
self.slices_zValues[i] = self.slices_zValues[i-1]+z_stepSize
inner[0]-=stepsize[0]
inner[1]-=stepsize[1]
inner[2]+=stepsize[2]
inner[3]+=stepsize[3]
outer[0]+=stepsize[0]
outer[1]+=stepsize[1]
outer[2]-=stepsize[2]
outer[3]-=stepsize[3]
if(inner[0]<outer[0]):
inner[0] = outer[0]
if(inner[1]<outer[1]):
inner[1] = outer[1]
if(inner[2]>outer[2]):
inner[2] = outer[2]
if(inner[3]>outer[3]):
inner[3] = outer[3]
self.zSlices[i] = IntervalT2MF_Trapezoidal("Slice "+str(i), T1MF_Trapezoidal("upper_slice "+str(i),outer),T1MF_Trapezoidal("lower_slice "+str(i),inner))
if self.DEBUG:
print(self.zSlices[i].toString()+" Z-Value = "+str(self.slices_zValues[i]))
elif primer0 != None and primer1 != None:
if self.DEBUG:
print("Number of zLevels: "+str(numberOfzLevels))
self.numberOfzLevels = numberOfzLevels
self.support = primer0.getSupport()
slices_fs = [0] * numberOfzLevels
self.slices_zValues = [0] * numberOfzLevels
self.zSlices = [0] * numberOfzLevels
self.zSlices[0] = primer0
self.zSlices[0].setName(self.getName()+"_Slice_0")
self.zSlices[-1] = primer1
z_stepSize = 1.0/(numberOfzLevels)
self.slices_zValues[0] = z_stepSize
self.slices_zValues[-1] = 1.0
lsu = (primer1.getUMF().getParameters()[0]-primer0.getUMF().getParameters()[0])/(numberOfzLevels-1)
lsl = (primer0.getLMF().getParameters()[0]-primer1.getLMF().getParameters()[0])/(numberOfzLevels-1)
rsu = (primer0.getUMF().getParameters()[3]-primer1.getUMF().getParameters()[3])/(numberOfzLevels-1)
rsl = (primer1.getLMF().getParameters()[3]-primer0.getLMF().getParameters()[3])/(numberOfzLevels-1)
if self.DEBUG:
print("lsu = "+str(lsu)+" lsl = "+str(lsl)+" rsu = "+str(rsu)+" rsl = "+str(rsl))
inner = primer0.getLMF().getParameters().copy()
outer = primer0.getUMF().getParameters().copy()
for i in range(1,numberOfzLevels-1):
self.slices_zValues[i] = self.slices_zValues[i-1]+z_stepSize
inner[0]-=lsl
inner[3]+=rsl
outer[0]+=lsu
outer[3]-=rsu
if self.DEBUG:
print("Slice "+str(i)+" , inner: "+str(inner[0])+" "+str(inner[1])+" "+str(inner[2])+" outer: "+str(outer[0])+" "+str(outer[1])+" "+str(outer[2]))
self.zSlices[i] = IntervalT2MF_Trapezoidal(self.getName()+"_Slice_"+str(i),T1MF_Trapezoidal("upper_slice "+str(i),outer),T1MF_Trapezoidal("lower_slice "+str(i),inner))
if self.DEBUG:
print(self.zSlices[i].toString()+" Z-Value = "+str(self.slices_zValues[i]))
elif primers != None:
self.numberOfzLevels = len(primers)
self.support = primers[0].getSupport()
slices_fs = [0] * self.numberOfzLevels
self.slices_zValues = [0] * self.numberOfzLevels
z_stepSize = 1.0/self.numberOfzLevels
self.slices_zValues[0] = z_stepSize
self.zSlices = primers.copy()
for i in range(self.numberOfzLevels):
self.slices_zValues[i] = z_stepSize*(i+1)
if self.DEBUG:
print(self.zSlices[i].toString()+" Z-Value = "+str(self.slices_zValues[i]))
def clone(self) -> GenT2MF_Trapezoidal:
"""Not implemented"""
print("Not implemented")
return None
def getZSlice(self, slice_number: int) -> IntervalT2MF_Trapezoidal:
"""Return the slice number"""
return self.zSlices[slice_number]
def getLeftShoulderStart(self) -> float:
"""Not implemented"""
print("Not implemented")
return float("Nan")
def getRightShoulderStart(self) -> float:
"""Not implemented"""
print("Not implemented")
return float("Nan")
|
LUCIDresearch/JuzzyPython
|
juzzyPython/generalType2zSlices/sets/GenT2MF_Trapezoidal.py
|
GenT2MF_Trapezoidal.py
|
py
| 6,556 |
python
|
en
|
code
| 4 |
github-code
|
6
|
71066844029
|
from examples.example_imports import *
scene = EagerModeScene()
fixed_point = Sphere(radius=0.08).move_to(ORIGIN).set_color(GREEN_D)
scene.add(fixed_point)
start_rod = Vec3(*UP*3)
end_rod = Vec3(-3, 3, 0)
L = (end_rod - start_rod).norm()
# rod = Line3D(start_rod, end_rod, width=0.08).set_color(RED_D)
fine_line = Line3D(start_rod.to_array(), end_rod.to_array(), width=0.02).set_color(RED_D)
scene.add(fine_line)
massive_bob = Sphere(radius=0.12).move_to(end_rod.to_array()).set_color(GREY_D)
m = 1. #kg
g = 9.8
G = Vec3(0, -g, 0)# m/s^2
scene.add(massive_bob)
f = Vec3(0, 0, 0)
tension = Vec3(0, 0, 0)
f_arrow = Arrow(end_rod.to_array(), f.to_array(), buff=0)
mg_arrow = Arrow(end_rod.to_array(), m*G.to_array(), buff=0)
tension_arrow = Arrow(end_rod.to_array(), tension.to_array(), buff=0)
scene.add(f_arrow, mg_arrow, tension_arrow)
v = Vec3(0, 0, 0)
def update_func(obj, dt):
global v, end_rod, fine_line, f, tension
rob_vec = end_rod - start_rod
theta = rob_vec.angle_between(DOWN)
tension_scalar = m*g*np.cos(theta) + m * (v.norm()**2)/L
tension = tension_scalar * (-1*rob_vec.normalise())
f = tension + m*G
a = f * (1/m)
v += a * dt
end_rod += v*dt
obj.move_to(end_rod.to_array())
def update_line(obj):
obj.put_start_and_end_on(start_rod.to_array(), end_rod.to_array())
def update_f_arrow(obj):
obj.put_start_and_end_on(end_rod.to_array(), (end_rod+f*0.1).to_array())
def update_mg_arrow(obj):
obj.put_start_and_end_on(end_rod.to_array(), (end_rod+m*G*0.1).to_array())
def update_tension_arrow(obj):
obj.put_start_and_end_on(end_rod.to_array(), (end_rod+tension*0.1).to_array())
fine_line.add_updater(update_line)
massive_bob.add_updater(update_func)
f_arrow.add_updater(update_f_arrow)
mg_arrow.add_updater(update_mg_arrow)
tension_arrow.add_updater(update_tension_arrow)
scene.wait(50)
scene.hold_on()
|
beidongjiedeguang/manim-express
|
examples/animate/单摆.py
|
单摆.py
|
py
| 1,885 |
python
|
en
|
code
| 13 |
github-code
|
6
|
22768172274
|
from backend import credential
import urllib.parse
from google.cloud import storage
import streamlit as st
import os
import json
import fnmatch
import file_io
import utils
import traceback
import io
def init():
creds_str = credential.google_creds()
if not os.path.exists('temp'):
os.makedirs('temp')
with open('temp/google-credentials.json', 'w') as f:
json.dump(creds_str, f)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'temp/google-credentials.json'
storage_client = storage.Client()
st.session_state['storage_client'] = storage_client
def upload_to_bucket(root_dir, file, uid, name, metadata=None, compress=None):
dir = f"{root_dir}/{uid}"
try:
# get file extension
extension = os.path.splitext(file.name)[1]
filename = name + extension
compressed_file_path = None
if compress:
# Compress file
if compress == 'gzip':
compressed_file_path = file_io.compress_to_gzip(file)
filename += '.gz' # Add '.gz' extension to the filename
elif compress == 'xz':
compressed_file_path = file_io.compress_to_xz(file)
filename += '.xz' # Add '.xz' extension to the filename
else:
raise ValueError(f'Unsupported compression type: {compress}. Supported types are "gzip" and "xz".'
f'if you do not want to compress the file, set compress=None')
storage_client = st.session_state['storage_client']
bucket = storage_client.get_bucket(st.secrets['gcp']['bucket_name'])
blob = bucket.blob(f"{dir}/{filename}")
if compress:
# Open the compressed file in read-binary mode for upload
with open(compressed_file_path, 'rb') as file_obj:
file_content = file_obj.read() # read file content once
default_meta = {
'md5_hash': utils.calculate_md5(file_content),
'size': utils.calculate_size(file_content),
'owner': st.session_state['student_number'],
'time': utils.get_current_time()
}
# Merge the default metadata with the given metadata
meta = {**default_meta, **metadata} if metadata else default_meta
# Set the blob metadata
blob.metadata = meta
blob.upload_from_file(io.BytesIO(file_content))
# Delete the compressed file
os.remove(compressed_file_path)
else:
# If compress is None or False, upload the file as is
# Convert file_content to a BytesIO object and upload
file_content = file.read()
default_meta = {
'md5_hash': utils.calculate_md5(file_content),
'size': utils.calculate_size(file_content),
'owner': st.session_state['student_number'],
'time': utils.get_current_time()
}
# Merge the default metadata with the given metadata
meta = {**default_meta, **metadata} if metadata else default_meta
# Set the blob metadata
blob.metadata = meta
blob.upload_from_file(io.BytesIO(file_content))
except Exception as e:
tb = traceback.format_exc()
st.error(f'❌Failed to upload to the bucket: **{e}** \n\n **Traceback**:\n ```{tb}```')
st.stop()
def delete_from_bucket(root_dir, filenames, uid):
for filename in filenames:
# Decode the filename to ensure spaces are handled correctly
decoded_filename = urllib.parse.unquote(filename)
try:
storage_client = st.session_state['storage_client']
bucket = storage_client.get_bucket(st.secrets['gcp']['bucket_name'])
blob = bucket.blob(f"{root_dir}/{uid}/{decoded_filename}")
blob.delete()
except Exception as e:
st.error(f'failed to delete file ({root_dir}/{uid}/{decoded_filename}) from bucket. **{e}**')
st.stop()
def download_from_bucket(root_dir, filename, uid):
try:
storage_client = st.session_state['storage_client']
bucket = storage_client.get_bucket(st.secrets['gcp']['bucket_name'])
blob = bucket.blob(f"{root_dir}/{uid}/{filename}")
if not os.path.exists('temp'):
os.makedirs('temp')
with open(f"temp/{filename}", 'wb') as f:
storage_client.download_blob_to_file(blob, f)
return f"temp/{filename}"
except Exception as e:
st.error(f'failed to download file from bucket. **{e}**')
st.stop()
def get_blobs(bucket, dir, name_pattern, extensions):
blobs = []
if '*' in name_pattern:
# If wildcard is present in name_pattern, process as pattern.
prefix, pattern = name_pattern.split('*', 1)
# List blobs whose names start with the given prefix
for blob in bucket.list_blobs(prefix=f"{dir}/{prefix}"):
for extension in extensions:
if blob.name.endswith(extension) and fnmatch.fnmatch(blob.name, f"{dir}/{name_pattern}"):
blobs.append(blob)
# Once a match is found, no need to check other extensions
break
else:
# If no wildcard is present, process name_pattern as exact file name.
for extension in extensions:
blob = bucket.blob(f"{dir}/{name_pattern}{extension}")
if blob.exists():
blobs.append(blob)
return blobs
def get_public_urls_from_blobs(blobs):
return [blob.public_url for blob in blobs]
def get_blob_md5(blobs):
return [blob.md5_hash for blob in blobs]
def get_blob_metadata(blobs):
return [blob.metadata for blob in blobs]
def get_blob_info(root_dir, uid, name_pattern, extensions, infos):
storage_client = st.session_state['storage_client']
bucket = storage_client.get_bucket(st.secrets['gcp']['bucket_name'])
dir = f"{root_dir}/{uid}"
blobs = get_blobs(bucket, dir, name_pattern, extensions)
for info in infos:
if info == 'url':
return get_public_urls_from_blobs(blobs)
else:
metas = get_blob_metadata(blobs)
return [meta[info] for meta in metas]
|
sean1832/Mongrel-Assemblies-DB
|
src/backend/gcp_handler.py
|
gcp_handler.py
|
py
| 6,361 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22329941730
|
from discord.ext import commands, tasks
import discord
import asyncio
import os
import json
import sqlite3
from dotenv import load_dotenv
import requests
from datetime import datetime,time
load_dotenv()
class Birthday(commands.Cog):
"""Birthday commands."""
def __init__(self, client):
self.client = client
self.birthday_announcments.start()
@commands.command(hidden = True)
@commands.is_owner()
async def force_add_user(self, ctx, user: discord.Member, day: int, month: int):
"""Adds a user to the birthday list."""
if day > 31 or day < 1 or month > 12 or month < 1:
await ctx.send("Invalid date.")
return
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
cur.execute("SELECT * FROM birthday WHERE user_id = ?", (user.id,))
if cur.fetchone() is not None:
await ctx.send("User already exists.")
return
cur.execute("INSERT INTO birthday VALUES (?, ?, ?)", (user.id, day, month))
con.commit()
con.close()
await ctx.send("Added user to birthday list.")
@commands.command(hidden=True)
@commands.is_owner()
async def makeservertablebirthday(self,ctx):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
cur.execute("CREATE TABLE server(ServerID int, Servertoggle, birthdaychannel int,birthdaymessage text)")
con.commit()
con.close()
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
cur.execute("CREATE TABLE birthday(UsersID int, birthday)")
con.commit()
con.close()
await ctx.send("Done")
#
#@commands.command(hidden = True)
#@commands.is_owner()
#async def setallbithday(self,ctx):
# for i in self.client.guilds:
# con = sqlite3.connect("databases/server_brithdays.db")
# cur = con.cursor()
# cur.execute("INSERT INTO server(ServerID, Servertoggle,birthdaychannel) VALUES(?, ?,?)", (i.id, False,None))
# await ctx.send(f"{i} has been set")
# con.commit()
# con.close()
@commands.Cog.listener()
async def on_guild_join(self, guild):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
cur.execute("INSERT INTO server(ServerID, Servertoggle) VALUES(?, ?)", (guild.id, False))
con.commit()
con.close()
@commands.command(help = " enable and disable Birthday")
@commands.has_permissions(administrator=True)
async def toggle_birthday(self,ctx):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
datas = cur.execute("SELECT * FROM server WHERE ServerID=?", (ctx.guild.id,))
datas = cur.fetchall()
toggle = datas[0][1]
if toggle == True:
cur.execute("UPDATE server SET Servertoggle = ? WHERE ServerID=?", (False, ctx.guild.id,))
con.commit()
con.close()
await ctx.send("Birthday reminders has been turned off")
if toggle == False:
cur.execute("UPDATE server SET Servertoggle = ? WHERE ServerID=?", (True, ctx.guild.id,))
con.commit()
con.close()
await ctx.send("Birthday reminders has been turrned on")
@commands.slash_command(name="toggle_birthday", description="enable and disable Birthday")
@commands.has_permissions(administrator=True)
async def _toggle_birthday(self,ctx):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
datas = cur.execute("SELECT * FROM server WHERE ServerID=?", (ctx.guild.id,))
datas = cur.fetchall()
toggle = datas[0][1]
if toggle == True:
cur.execute("UPDATE server SET Servertoggle = ? WHERE ServerID=?", (False, ctx.guild.id,))
con.commit()
con.close()
await ctx.respond("Birthday reminders has been turned off")
if toggle == False:
cur.execute("UPDATE server SET Servertoggle = ? WHERE ServerID=?", (True, ctx.guild.id,))
con.commit()
con.close()
await ctx.respond("Birthday reminders has been turrned on")
await ctx.followup.send("If you like the bot, please consider voting for it at https://top.gg/bot/902240397273743361 \n It helps a lot! :D", ephemeral=True)
@commands.slash_command(name="setbirthday", description="Set your birthday use day then month")
async def setbirthday__slash(self, ctx, day: int, month: int):
tocken = os.getenv("TOPGG_TOKEN")
api = requests.get(f"https://top.gg/api/bots/902240397273743361/check?userId={ctx.author.id}", headers={"Authorization": tocken, "Content-Type": "application/json"})
data = api.json()
print(api)
print(data)
voted = data["voted"]
#if the api does not return a 200 status code
if api.status_code != 200:
voted = 1
print("api error")
if voted == 0:
await ctx.respond("You need to have voted for simplex in the last 24 hours to set your birthday. Please vote and then try again, you can vote here: https://top.gg/bot/902240397273743361/vote",ephemeral=True)
return
else:
if day > 31 or day < 1 or month > 12 or month < 1:
await ctx.respond("Invalid date.")
else:
#force 2 digit date
if day < 10:
day = f"0{day}"
if month < 10:
month = f"0{month}"
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
data = cur.execute("SELECT * FROM birthday WHERE UsersID=?", (ctx.author.id,))
data = cur.fetchall()
if data == []:
cur.execute("INSERT INTO birthday(UsersID, birthday) VALUES(?, ?)", (ctx.author.id, f"{day}/{month}"))
con.commit()
con.close()
await ctx.respond("Your birthday has been set")
else:
cur.execute("UPDATE birthday SET birthday = ? WHERE UsersID=?", (f"{day}/{month}", ctx.author.id,))
con.commit()
con.close()
await ctx.respond("Your birthday has been updated")
@commands.command(name="setbirthday", help = "Set your birthday use day then month")
async def setbirthday_commands(self, ctx, day: int, month: int):
if day > 31 or day < 1 or month > 12 or month < 1:
await ctx.send("Invalid date.")
else:
#formate date 2 digit
if len(str(day)) == 1:
day = f"0{day}"
if len(str(month)) == 1:
month = f"0{month}"
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
data = cur.execute("SELECT * FROM birthday WHERE UsersID=?", (ctx.author.id,))
data = cur.fetchall()
if data == []:
cur.execute("INSERT INTO birthday(UsersID, birthday) VALUES(?, ?)", (ctx.author.id, f"{day}/{month}"))
con.commit()
con.close()
await ctx.send("Your birthday has been set")
else:
cur.execute("UPDATE birthday SET birthday = ? WHERE UsersID=?", (f"{day}/{month}", ctx.author.id,))
con.commit()
con.close()
await ctx.send("Your birthday has been updated")
@commands.command(name="set_birthday_channel",help = "Set the birthday channel")
@commands.has_permissions(administrator=True)
async def set_birthday_channel_command(self,ctx, channel: commands.TextChannelConverter):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
cur.execute("UPDATE server SET birthdaychannel = ? WHERE ServerID=?", (channel.id, ctx.guild.id,))
con.commit()
con.close()
await ctx.send(f"Birthday channel has been set to {channel} \n To enable birthday reminders use the command `/toggle_birthday` \n To set a custom message use the command `/birthday_message`")
@commands.slash_command(name="set_birthday_channel",help = "Set the birthday channel")
@commands.has_permissions(administrator=True)
async def set_birthday_channel__slash(self,ctx, channel: commands.TextChannelConverter):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
cur.execute("UPDATE server SET birthdaychannel = ? WHERE ServerID=?", (channel.id, ctx.guild.id,))
con.commit()
con.close()
await ctx.respond(f"Birthday channel has been set to {channel}")
@commands.slash_command(name="findbirthday", description="Find a users birthday")
async def findbirthday__slash(self, ctx, user: discord.Member):
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
data = cur.execute("SELECT * FROM birthday WHERE UsersID=?", (user.id,))
data = cur.fetchall()
if data == []:
await ctx.respond(f"{user} has not set their birthday")
else:
await ctx.respond(f"{user} birthday is {data[0][1]}")
await ctx.followup.send("If you like the bot, please consider voting for it at https://top.gg/bot/902240397273743361 \n It helps a lot! :D", ephemeral=True)
@tasks.loop(time=time(7,00))
async def birthday_announcments(self):
print("Birthday announcments")
for server in self.client.guilds:
print(server)
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
datas = cur.execute("SELECT * FROM server WHERE ServerID=?", (server.id,))
datas = cur.fetchall()
if datas == []:
cur.execute("INSERT INTO server(ServerID, Servertoggle, birthdaychannel) VALUES(?, ?, ?)", (server.id, False, None))
con.commit()
con.close()
else:
pass
con = sqlite3.connect("databases/user_brithdays.db")
cur = con.cursor()
data = cur.execute("SELECT * FROM birthday")
data = cur.fetchall()
if data == []:
print("No birthday")
#does not work below here
else:
for x in data:
if datas[0][1] == True:
if datas[0][2] == None:
pass
else:
user = await self.client.fetch_user(x[0])
if user in server.members:
channel = await self.client.fetch_channel(datas[0][2])
message = datas[0][3]
if message == None:
message = ":tada:"
print(channel)
print(x[1])
print(datetime.now().strftime("%d/%m"))
if x[1] == datetime.now().strftime("%d/%m"):
print("Birthday")
print(x[0])
await channel.send(f"Happy birthday <@{x[0]}>! \n {message}")
else:
username = await self.client.fetch_user(x[0])
print(f"User {username} not in server {x[0]} {server}")
else:
pass
#@commands.command()
#@commands.is_owner()
#async def foramt_all_birthdays(self,ctx):
# con = sqlite3.connect("databases/user_brithdays.db")
# cur = con.cursor()
# data = cur.execute("SELECT * FROM birthday")
# data = cur.fetchall()
# for i in data:
# day = i[1].split("/")[0]
# month = i[1].split("/")[1]
# if len(day) == 1:
# day = "0" + day
# if len(month) == 1:
# month = "0" + month
# cur.execute("UPDATE birthday SET Birthday = ? WHERE UsersID=?", (f"{day}/{month}", i[0],))
# con.commit()
# con.close()
#
@commands.command()
@commands.is_owner()
async def add_message_to_birthday(self,ctx,*,message):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
#creat a new column
cur.execute("ALTER TABLE server ADD COLUMN birthdaymessage TEXT")
#set the message
cur.execute("UPDATE server SET birthdaymessage = ?", (message,))
con.commit()
con.close()
await ctx.send("Done")
@commands.slash_command(name="birthday_message", description="Add a message to the birthday announcment")
@commands.has_permissions(administrator=True)
async def add_message_to_birthday__slash(self,ctx,*,message):
con = sqlite3.connect("databases/server_brithdays.db")
cur = con.cursor()
data = cur.execute("SELECT * FROM server WHERE ServerID=?", (ctx.guild.id,))
data = cur.fetchall()
if data == []:
await ctx.respond("You have not set a birthday channel")
else:
cur.execute("UPDATE server SET birthdaymessage = ? WHERE ServerID=?", (message, ctx.guild.id,))
con.commit()
con.close()
await ctx.respond("Done")
await ctx.followup.send("If you like the bot, please consider voting for it at https://top.gg/bot/902240397273743361 \n It helps a lot! :D", ephemeral=True)
def setup(bot):
bot.add_cog(Birthday(bot))
|
micfun123/Simplex_bot
|
cogs/birthday.py
|
birthday.py
|
py
| 14,104 |
python
|
en
|
code
| 24 |
github-code
|
6
|
21932276295
|
from discord.ext import commands
class ErrorHandeler(commands.Cog):
"""A cog for global error handling"""
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error: commands.CommandError):
if isinstance(error, commands.MemberNotFound):
await ctx.send("Please input a valid user")
if isinstance(error, commands.UnexpectedQuoteError):
await ctx.send("Your message must be surrounded by quotes.")
def setup(bot: commands.Bot):
bot.add_cog(ErrorHandeler(bot))
|
Jarkyc/Franklin-The-Undying
|
errorhandler.py
|
errorhandler.py
|
py
| 599 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27801635646
|
import socket
import time
#traceroute.py 172.217.23.78 udp -p 53 -n 3 -d
class Tracerouter:
def __init__(self, ip,port,timeout,request,sendwait,debug,data,size):
self.ip = ip
self.request = request
self.timeout = timeout
self.port = port
self.sendwait = sendwait
self.debug = debug
self.data=data
self.size=size
def log(self,message):
if self.debug:
with open('logs.txt', "a") as file:
file.write(message + "\n")
def send_and_recive(self, send_socket, recv_socket):
self.log(f'Отправка на {self.ip}')
send_socket.sendto(b"0" * self.size ,(self.ip, self.port))
adr = ''
try:
_, adr = recv_socket.recvfrom(512)
adr = adr[0]
self.log(f'Получение данных от {adr}')
except socket.timeout as e:
self.log(f'Вышло время ожидания ответа')
pass
return adr
def ping(self, ttl, send_socket, recv_socket):
recv_socket.settimeout(self.timeout)
send_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
current = None
start = time.time()
adr = self.send_and_recive(send_socket,recv_socket)
if adr != '':
current = adr
times = round((time.time() - start) * 1000)
self.log(f'Время ответа {times} ms')
return current,times
def run(self):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.getprotobyname("udp")) as send_socket:
with socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.getprotobyname("icmp")) as recv_socket:
for ttl in range(1, self.request + 1):
self.log(f'Отправка пакета на {self.ip} с ttl {ttl}')
current,times= self.ping(ttl, send_socket, recv_socket)
if current is None:
continue
else:
self.data.append([str(ttl), current, f'{times} ms'])
if current == self.ip:
break
time.sleep(self.sendwait)
return self.data
|
belutkautka/Traceroute
|
UDP_traceroute.py
|
UDP_traceroute.py
|
py
| 2,259 |
python
|
en
|
code
| 0 |
github-code
|
6
|
86625823283
|
#! /usr/bin/env python
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Linearly normalize intensity to between 0 and 255')
parser.add_argument("input_spec", type=str, help="Input specification")
parser.add_argument("out_version", type=str, help="Output image version")
args = parser.parse_args()
import sys
import os
sys.path.append(os.environ['REPO_DIR'] + '/utilities')
from utilities2015 import *
from data_manager import *
from metadata import *
from distributed_utilities import *
from learning_utilities import *
input_spec = load_ini(args.input_spec)
image_name_list = input_spec['image_name_list']
stack = input_spec['stack']
prep_id = input_spec['prep_id']
if prep_id == 'None':
prep_id = None
resol = input_spec['resol']
version = input_spec['version']
if version == 'None':
version = None
from scipy.ndimage.interpolation import map_coordinates
from skimage.exposure import rescale_intensity, adjust_gamma
from skimage.transform import rotate
# for section in set(metadata_cache['valid_sections_all'][stack]) - set(metadata_cache['valid_sections'][stack]):
# for section in metadata_cache['valid_sections'][stack]:
for image_name in image_name_list:
# print "Section", section
t = time.time()
img = DataManager.load_image_v2(stack=stack, prep_id=prep_id, fn=image_name, version=version, resol=resol)
sys.stderr.write('Load image: %.2f seconds.\n' % (time.time() - t))
t = time.time()
tb_mask = DataManager.load_thumbnail_mask_v3(stack=stack, prep_id=None, fn=image_name)
# raw_mask = rescale_by_resampling(tb_mask, new_shape=(img.shape[1], img.shape[0]))
raw_mask = resize(tb_mask, img.shape) > .5
save_data(raw_mask,
DataManager.get_image_filepath_v2(stack=stack, prep_id=prep_id, fn=image_name, version='mask', resol=resol, ext='bp'),
upload_s3=False)
sys.stderr.write('Rescale mask: %.2f seconds.\n' % (time.time() - t))
t = time.time()
mean_std_all_regions = []
cx_cy_all_regions = []
region_size = 5000
region_spacing = 3000
# for cx in range(region_size/2, img.shape[1]-region_size/2+1, region_spacing):
# for cy in range(region_size/2, img.shape[0]-region_size/2+1, region_spacing):
for cx in range(0, img.shape[1], region_spacing):
for cy in range(0, img.shape[0], region_spacing):
region = img[max(cy-region_size/2, 0):min(cy+region_size/2+1, img.shape[0]-1),
max(cx-region_size/2, 0):min(cx+region_size/2+1, img.shape[1]-1)]
region_mask = raw_mask[max(cy-region_size/2, 0):min(cy+region_size/2+1, img.shape[0]-1),
max(cx-region_size/2, 0):min(cx+region_size/2+1, img.shape[1]-1)]
if np.count_nonzero(region_mask) == 0:
continue
mean_std_all_regions.append((region[region_mask].mean(), region[region_mask].std()))
cx_cy_all_regions.append((cx, cy))
sys.stderr.write('Compute mean/std for sample regions: %.2f seconds.\n' % (time.time() - t))
t = time.time()
mean_map = resample_scoremap(sparse_scores=np.array(mean_std_all_regions)[:,0],
sample_locations=cx_cy_all_regions,
gridspec=(region_size, region_spacing, img.shape[1], img.shape[0], (0,0)),
downscale=4,
interpolation_order=2)
sys.stderr.write('Interpolate mean map: %.2f seconds.\n' % (time.time() - t)) #10s
t = time.time()
mean_map = rescale_by_resampling(mean_map, new_shape=(img.shape[1], img.shape[0]))
sys.stderr.write('Scale up mean map: %.2f seconds.\n' % (time.time() - t)) #30s
t = time.time()
std_map = resample_scoremap(sparse_scores=np.array(mean_std_all_regions)[:,1],
sample_locations=cx_cy_all_regions,
gridspec=(region_size, region_spacing, img.shape[1], img.shape[0], (0,0)),
downscale=4,
interpolation_order=2)
sys.stderr.write('Interpolate std map: %.2f seconds.\n' % (time.time() - t)) #10s
t = time.time()
std_map = rescale_by_resampling(std_map, new_shape=(img.shape[1], img.shape[0]))
sys.stderr.write('Scale up std map: %.2f seconds.\n' % (time.time() - t)) #30s
# Save mean/std results.
fp = DataManager.get_intensity_normalization_result_filepath(what='region_centers', stack=stack, fn=image_name)
create_parent_dir_if_not_exists(fp)
np.savetxt(fp, cx_cy_all_regions)
fp = DataManager.get_intensity_normalization_result_filepath(what='mean_std_all_regions', stack=stack, fn=image_name)
create_parent_dir_if_not_exists(fp)
np.savetxt(fp, mean_std_all_regions)
fp = DataManager.get_intensity_normalization_result_filepath(what='mean_map', stack=stack, fn=image_name)
create_parent_dir_if_not_exists(fp)
bp.pack_ndarray_file(mean_map.astype(np.float16), fp)
fp = DataManager.get_intensity_normalization_result_filepath(what='std_map', stack=stack, fn=image_name)
create_parent_dir_if_not_exists(fp)
bp.pack_ndarray_file(std_map.astype(np.float16), fp)
# Export normalized image.
t = time.time()
raw_mask = raw_mask & (std_map > 0)
img_normalized = np.zeros(img.shape, np.float32)
img_normalized[raw_mask] = (img[raw_mask] - mean_map[raw_mask]) / std_map[raw_mask]
sys.stderr.write('Normalize: %.2f seconds.\n' % (time.time() - t)) #30s
t = time.time()
# FIX THIS! THIS only save uint16, not float16. Need to save as bp instead.
# img_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=None, version='NtbNormalizedFloat', resol='down8', section=section, )
# create_parent_dir_if_not_exists(img_fp)
# imsave(img_fp, img_normalized[::8, ::8].astype(np.float16))
save_data(img_normalized.astype(np.float16),
DataManager.get_intensity_normalization_result_filepath(what='normalized_float_map', stack=stack, fn=image_name),
upload_s3=False)
sys.stderr.write('Save float version: %.2f seconds.\n' % (time.time() - t)) #30s
# t = time.time()
# img_normalized_uint8 = rescale_intensity_v2(img_normalized, -1, 6)
# sys.stderr.write('Rescale to uint8: %.2f seconds.\n' % (time.time() - t)) #30s
# t = time.time()
# img_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=None, version='NtbNormalized', resol='raw', section=section)
# create_parent_dir_if_not_exists(img_fp)
# imsave(img_fp, img_normalized_uint8)
# sys.stderr.write('Save uint8 version: %.2f seconds.\n' % (time.time() - t)) #30s
# Export histogram.
plt.hist(img_normalized[raw_mask].flatten(), bins=100, log=True);
fp = DataManager.get_intensity_normalization_result_filepath(what='float_histogram_png', stack=stack, fn=image_name)
create_parent_dir_if_not_exists(fp)
plt.savefig(fp)
plt.close();
# hist_fp = DataManager.get_intensity_normalization_result_filepath(what='float_histogram', stack=stack, section=section)
# create_parent_dir_if_not_exists(hist_fp)
# hist, bin_edges = np.histogram(img_normalized[valid_mask].flatten(), bins=np.arange(0,201,5));
# plt.bar(bin_edges[:-1], np.log(hist));
# plt.xticks(np.arange(0, 200, 20), np.arange(0, 200, 20));
# plt.xlabel('Normalized pixel value (float)');
# plt.title(metadata_cache['sections_to_filenames'][stack][section])
# plt.savefig(hist_fp)
# plt.close();
gamma_map = img_as_ubyte(adjust_gamma(np.arange(0, 256, 1) / 255., 8.))
low = -2.
high = 50.
for image_name in image_name_list:
img_normalized = load_data(
DataManager.get_intensity_normalization_result_filepath(what='normalized_float_map', stack=stack, fn=image_name),
download_s3=False)
t = time.time()
img_normalized_uint8 = rescale_intensity_v2(img_normalized, low, high)
sys.stderr.write('Rescale to uint8: %.2f seconds.\n' % (time.time() - t))
t = time.time()
raw_mask = load_data(DataManager.get_image_filepath_v2(stack=stack, prep_id=prep_id, fn=image_name, version='mask', resol=resol, ext='bp'),
download_s3=False)
img_normalized_uint8[~raw_mask] = 0
sys.stderr.write('Load mask: %.2f seconds.\n' % (time.time() - t))
img = 255 - img_normalized_uint8
save_data(gamma_map[img],
DataManager.get_image_filepath_v2(stack=stack, prep_id=prep_id, fn=image_name, version=args.out_version, resol=resol),
upload_s3=False)
|
mistycheney/MouseBrainAtlas
|
preprocess/normalize_intensity_adaptive.py
|
normalize_intensity_adaptive.py
|
py
| 8,733 |
python
|
en
|
code
| 3 |
github-code
|
6
|
29284611127
|
# -*- coding: utf-8 -*-
import sys
import re
import pdb
def main(args):
#pdb.set_trace()
lines = args[1].decode("gb18030").encode("utf8").split("|||")
for line in lines:
if re.search(r"^(\S+)",line):
s = re.search(r"^(\S+)",line)
ss = s.group(1)
print(ss.decode("utf8").encode("gb18030"))
break
if __name__=="__main__":
main(sys.argv)
|
Tubao/xkx
|
pkuxkx/xx/getRoomName.py
|
getRoomName.py
|
py
| 443 |
python
|
en
|
code
| 2 |
github-code
|
6
|
20503848569
|
# Необходимо парсить страницу со свежими статьями (вот эту) и выбирать те статьи, в которых встречается хотя бы одно из ключевых слов (эти слова определяем в начале скрипта). Поиск вести по всей доступной preview-информации (это информация, доступная непосредственно с текущей страницы). Вывести в консоль список подходящих статей в формате: <дата> - <заголовок> - <ссылка>.
# определяем список ключевых слов
KEYWORDS = ['дизайн', 'фото', 'web', 'python']
import requests
from bs4 import BeautifulSoup
# from pprint import pprint
# import string
import re
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7,sv;q=0.6',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': '_ym_uid=1661790138398573269; _ym_d=1661790138; habr_web_home_feed=/all/; hl=ru; fl=ru; _ym_isad=1; _ga=GA1.2.1864422457.1661790139; _gid=GA1.2.2059705457.1661790139; _gat_gtag_UA_726094_1=1',
'DNT': '1',
'Host': 'habr.com',
'Referer': 'https://yandex.ru/',
'sec-ch-ua': '"Chromium";v="104", " Not A;Brand";v="99", "Google Chrome";v="104"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36'
}
url = 'https://habr.com'
responce = requests.get(url+'/ru/all', headers=headers)
text = responce.text
soup = BeautifulSoup(text, 'html.parser')
articles = soup.find_all(class_='tm-articles-list__item')
for article in articles:
preview = article.find(class_=['article-formatted-body article-formatted-body article-formatted-body_version-2', 'article-formatted-body article-formatted-body article-formatted-body_version-1']).text
# Вариант со сравнением множеств
# for p in string.punctuation:
# if p in preview:
# preview = preview.replace(p, '')
# preview = set(preview.split())
# if preview & set(KEYWORDS):
# data_1 = article.find(class_='tm-article-snippet__datetime-published')
# data_2 = data_1.find('time')
# data = data_2.attrs['title']
# print(f'Дата статьи: {data}')
# title = article.find(class_='tm-article-snippet__title-link').text.strip()
# print(f'Название статьи: {title}')
# link = article.find(class_='tm-article-snippet__title tm-article-snippet__title_h2')
# link = link.find('a')
# href = link.attrs['href']
# print(f'Ссылка на статью: {url + href}')
# print()
# Вариант с регуляркой
for i in KEYWORDS:
if re.search(i, preview):
data = article.find(class_='tm-article-snippet__datetime-published').find('time').attrs['title']
print(f'Дата: {data}')
title = article.find(class_='tm-article-snippet__title-link').text.strip()
print(f'Заголовок: {title}')
link = article.find(class_='tm-article-snippet__title tm-article-snippet__title_h2').find('a').attrs['href']
print(f'Ссылка: {url + link}')
print()
|
Dimasuz/HW_4.3
|
HW_4.3.py
|
HW_4.3.py
|
py
| 3,750 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
36697027175
|
from jinja2 import Environment, PackageLoader
import os
from typing import Dict
import re
class SQLTemplate:
_templatePath = os.path.join(
os.path.dirname(os.path.dirname(os.path.relpath(__file__))), "templates"
)
_templatePath = os.path.join("templates")
# raise ValueError(f'templatePath = {_templatePath}')
def getTemplate(self, sqlAction: str, parameters: Dict, **kwargs) -> str:
templateName = f"{sqlAction.lower().strip()}.j2"
templateEnv = Environment(
loader=PackageLoader(
package_name="tips", package_path="framework/templates"
),
trim_blocks=True
# loader=FileSystemLoader(self._templatePath), trim_blocks=True
)
cmd = (
templateEnv.get_template(templateName)
.render(parameters=parameters, kwargs=kwargs)
.strip()
.replace("\n", " ")
)
return re.sub(" +", " ", cmd)
|
ProjectiveGroupUK/tips-snowpark
|
tips/framework/utils/sql_template.py
|
sql_template.py
|
py
| 1,001 |
python
|
en
|
code
| 2 |
github-code
|
6
|
9837393322
|
import networkx as nx
# import pulp
G = nx.DiGraph()
G.add_nodes_from(['A', 'B', 'C', 'D', 'E', 'F'])
G.add_edges_from([('A', 'B'), ('A', 'D'), ('B', 'C'), ('B', 'E'), ('C', 'F'), ('D', 'C'), ('E', 'C'), ('E', 'D'), ('E', 'F')])
capacities = [4,5,5,4,4,3,2,2,1]
costs = [1,7,7,2,3,2,1,1,4]
for i, edge in enumerate(G.edges()):
G.edges[edge]['capacity'] = capacities[i]
G.edges[edge]['cost'] = costs[i]
demands = [-2,-5,-1,3,2,3]
for i, node in enumerate(G.nodes()):
G.nodes[node]['demand'] = demands[i]
myflow = nx.min_cost_flow(G, weight='cost')
mycost = nx.cost_of_flow(G, myflow, weight='cost')
print(mycost, myflow)
|
havarpan/verkkomallit-k21-glitchtest
|
python/luentoesim.py
|
luentoesim.py
|
py
| 639 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21347456845
|
class IP():
def __init__(self,ipaddress):
url='http://m.ip138.com/ip.asp?ip='
self.IP=ipaddress
self.site=url+self.IP
self.header={'User-Agent' :'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'}
def get_phy(self):
import requests as RQ
import re
try:
r=RQ.get(self.site)
r.raise_for_status()
r.encoding=r.apparent_encoding
html=r.text[-1000:]
#print(html)
answer=re.findall('本站主数据:(.*?)</p><p',html,re.S)
answer=answer[0]
return '您查询的IP:%s物理地址应该在:%s '%(self.IP,answer)
except:
return 'sth wrong'
#素质裤衩分析
'''<h1 class="query">您查询的IP:1.1.1.1</h1><p class="result">
本站主数据:澳大利亚 </p><p class="result">
参考数据一:澳大利亚</p>
'''
'''
while True:
point='.'
for I in range(7,100,7):
for j in range(1,100,7):
for k in range(1,100,70):
for L in range(1,100,20):
add=str(I)+point+str(j)+point+str(k)+point+str(L)
print(add)
#ip=input()
i=IP(add)
ans=i.get_phy()
print(ans)
'''
#第一个利用接口写的东西
'''num=input()
num_list=list(num)
num_list.remove(' ')
num_list.remove(' ')
new_num_list=[]
print(num_list)
for i in range(6):
if num_list[i]=='-':
new_num_list.append(int(num_list[i]+num_list[i+1]))
'''
'''
sentinel ='' # 遇到这个就结束
lines = []
for line in iter(input, sentinel):
lines.append(line)
init=list(str(input()))
try:
init.remove(' ')
except:
pass
print(int(init[0])+int(init[1]))
string=input()
str_list=list(string)
str_list=str_list.reverse()
new_str=str(str_list)
print(new_str)
'''
|
Alex-Beng/CubingQQBot
|
IP.py
|
IP.py
|
py
| 1,869 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5312412579
|
import pygame
from flame import Flame
class Firework:
def __init__(self):
self.rect = pygame.Rect(640, 720, 25, 50)
self.image = pygame.Surface( (25, 50) )
self.image.fill( (255, 255, 255) )
self.exploded = False
self.flames = []
def update(self):
if not self.exploded:
self.rect.y -= 2
if self.rect.y <= 200:
self.explode()
else:
for flame in self.flames:
flame.update()
def draw(self, screen):
if not self.exploded:
screen.blit(self.image, self.rect)
else:
for flame in self.flames:
flame.draw(screen)
def explode(self):
self.exploded = True
for i in range(1000):
self.flames.append(Flame())
|
jbedu1024/fajerwerki
|
firework.py
|
firework.py
|
py
| 822 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21725310389
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def printLL(self):
temp = self.head
while (temp):
print(temp.data)
temp = temp.next
def deleteNode(self, key):
temp = self.head
if (temp.data is not None):
if temp.data == key:
self.head = temp.next
temp = None
return
while temp is not None:
if temp.data == key:
break
prev = temp
temp = temp.next
# if key is not present in the linkedlist then return
if temp is None:
return
# unlink the previous node
prev.next = temp.next
temp = None
def deleteSpecificPosition(self, position):
if self.head is None:
return
temp = self.head
for i in range(position - 1):
temp = temp.next
if temp is None:
return
# if position is more then the number of node
if temp is None:
return
if temp.next is None:
return
next = temp.next.next
# Unlink the node from the linkedlist
temp.next = None
temp.next = next
def deleteAll(self):
current = self.head
while current:
nextRef = current.next
del current.data
current = nextRef
if __name__ == "__main__":
llist = LinkedList()
llist.push(7)
llist.push(1)
llist.push(3)
llist.push(2)
llist.push(5)
llist.push(9)
llist.printLL()
llist.deleteNode(3)
print("Linked list after deleting 3")
llist.printLL()
llist.deleteSpecificPosition(2)
print("Linkedlist after deleting node at 2nd position")
llist.printLL()
print("Delete everything")
llist.deleteAll()
|
ItsSamarth/ds-python
|
DataStructures/linkedlist/basic.py
|
basic.py
|
py
| 2,099 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32757721309
|
############### Start stopBackupMaintSched() ###############
def stopBackupMaintSched():
message ="""
##################################################################
# Start stopBackupMaintSched #
##################################################################
"""
printLog(message)
# check if maintenence activities are running
printBoth("check if maintenence activities are running")
output = cmdOut("sudo -u admin status.dpn | egrep -A 2 checkpoint")
lines = output.split('\n')
if lines[0][-3:-1] == 'OK':
printBoth("checkpoint 'OK'")
if lines[1][-3:-1] == 'OK':
printBoth("GC 'OK'")
if lines[0][-3:-1] == 'OK':
printBoth("hfscheck 'OK'")
if (lines[0][-3:-1] == 'OK') and (lines[1][-3:-1] == 'OK') and (lines[2][-3:-1] == 'OK'):
printBoth("Stopping Maintenence Scheduler")
cmdOut("sudo -u admin dpnctl stop maint")
output = cmdOut("sudo -u admin dpnctl status maint 2>&1")
while output.split('\n')[-2].split()[-1]!= 'suspended.':
question = """couldn't stop the Maintenece scheduler
please try manually and when done Press yes to continue or press no to quit"""
if not query_yes_no(question): sys.exit()
output = cmdOut("sudo -u admin dpnctl status 2>&1")
printBoth("Stopping Backup Scheduler")
cmdOut("sudo -u admin dpnctl stop sched")
output = cmdOut("sudo -u admin dpnctl status sched 2>&1")
while output.split('\n')[-2].split()[-1]!= 'down.':
question = """couldn't stop the Backup scheduler
please try manually and when done Press yes to continue or press no to quit"""
if not query_yes_no(question): sys.exit()
output = cmdOut("sudo -u admin dpnctl status sched 2>&1")
printBoth("Backup and Maintenence schedulers are down")
message ="""
##################################################################
# End latestProactiveCheck #
##################################################################
"""
printLog(message)
############### End stopBackupMaintSched() ###############
|
abodup/Avamar-Upgrade-Tasks
|
upgrade_tasks/stopBackupMaintSched.py
|
stopBackupMaintSched.py
|
py
| 2,022 |
python
|
en
|
code
| 1 |
github-code
|
6
|
29510374823
|
import re
import pandas as pd
import fool
from copy import copy
from starter_code1.NER.ner01 import *
test_data = pd.read_csv('../data/info_extract/test_data.csv', encoding='gb2312', header=0)
# print(test_data.head())
test_data['ner'] = None
ner_id = 1001
ner_dict_new = {} # 存储所有实体
ner_dict_reverse_new = {} # 储存所有实体
for i in range(len(test_data)):
sentence = copy(test_data.iloc[i, 1])
# TODO: 调用fool积极性实体识别,得到words和ners结果
words, ners = fool.analysis(sentence)
# print(words)
# print(ners)
ners[0].sort(key=lambda x: x[0], reverse=True)
for start, end, ner_type, ner_name in ners[0]:
if ner_type == 'company' or ner_type == 'person':
# ner_dict_new
lst = main_extract(ner_name, stop_word, d_4_delete, d_city_province)
company_main_name = ''.join(lst) # 对公司名提取主体部分,将包含相同主体部分的公司统一为一个实体
if company_main_name not in ner_dict_new:
ner_dict_new[company_main_name] = ner_id
ner_dict_reverse_new[ner_id] = company_main_name
ner_id += 1
sentence = sentence[:start] + ' ner_' + str(ner_dict_new[company_main_name]) + '_ ' + sentence[end:]
test_data.iloc[i, -1] = sentence
X_test = test_data[['ner']]
# 处理train数据,利用开源工具进行实体识别和并使用实体统一函数储存实体
train_data = pd.read_csv('../data/info_extract/train_data.csv', encoding='gb2312', header=0)
train_data['ner'] = None
for i in range(len(train_data)):
# 判断正负样本
if train_data.iloc[i, :]['member1'] == '0' and train_data.iloc[i, :]['member2'] == '0':
sentence = copy(train_data.iloc[i, 1])
# TODO:调用fool进行实体识别,得到wods和ners结果
words, ners = fool.analysis(sentence)
ners[0].sort(key=lambda x: x[0], reverse=True)
for start, end, ner_type, ner_name in ners[0]:
# TODO:调用实体统一函数,储存统一后的实体
# 并自增ner_id
if ner_type == 'company' or ner_type == 'person':
company_main_name = ''.join(
main_extract(ner_name, stop_word, d_4_delete, d_city_province)) # 提取公司主体名称
if company_main_name not in ner_dict_new:
ner_dict_new[company_main_name] = ner_id
ner_dict_reverse_new[ner_id] = company_main_name
ner_id += 1
# 在句子中用编号替换实体名
sentence = sentence[:start] + ' ner_' + str(ner_dict_new[company_main_name]) + '_ ' + sentence[end:]
train_data.iloc[i, -1] = sentence
else:
# 将训练集中正样本已经标注的实体也使用编码替换
sentence = copy(train_data.iloc[i, :])['sentence']
for company_main_name in [train_data.iloc[i, :]['member1'], train_data.iloc[i, :]['member2']]:
# TODO:调用实体统一函数,储存统一后的实体
# 并自增ner_id
company_main_name = ''.join(
main_extract(company_main_name, stop_word, d_4_delete, d_city_province)) # 提取公司主体名称
if company_main_name not in ner_dict_new:
ner_dict_new[company_main_name] = ner_id
ner_dict_reverse_new[ner_id] = company_main_name
ner_id += 1
# 在句子中用编号替换实体名
sentence = re.sub(company_main_name, ' ner_%s_ ' % (str(ner_dict_new[company_main_name])), sentence)
train_data.iloc[i, -1] = sentence
y = train_data.loc[:, ['tag']]
train_num = len(train_data)
X_train = train_data[['ner']]
# 将train和test放在一起提取特征
# X = pd.concat([X_train, X_test])
# X.to_csv('./x.csv', index=False)
# print(X)
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LogisticRegression
import numpy as np
# TODO: 定义需要遍历的参数
paramaeters = {'C': np.logspace(-3, 3, 7)}
# TODO:选择模型
lr = LogisticRegression()
# TODO:利用GridSearchCV
clf = GridSearchCV(lr, paramaeters, cv=5)
clf.fit(X_train, y)
# TODO:对Test_data进行分类
predict =clf.predict(X_test)
predict_prob = clf.predict_proba(X_test)
print(predict)
print(predict_prob)
|
jiangq195/tanxin
|
starter_code1/NER/ner02.py
|
ner02.py
|
py
| 4,477 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25549551579
|
import logging
from core.connect_db import connect_db
from logger.logger import configLogger
from settings.settings import load_settings
logger = logging.getLogger()
class BaseFetcher(object):
def __init__(self):
super(BaseFetcher, self).__init__()
configLogger()
self._connect_to_db()
def run(self):
running = True
while running:
try:
self._run()
except Exception as e:
logger.error('Got error while running : %r' % e)
running = False
raise
def _run(self):
pass
def _connect_to_db(self):
settings = load_settings()
mongo_config = settings['dbs']['mongo']
con = connect_db(**mongo_config)
|
cipriantruica/news_diffusion
|
news-spreading-master/fetchers/base_fetcher.py
|
base_fetcher.py
|
py
| 774 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72478034429
|
"""HartPro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.core.paginator import Paginator
from django.shortcuts import render
from art.models import Tag,Art
import json
from user import helper
import xadmin as admin
def toIndex(request):
tags1 = Tag.objects.all()
# locals将当前函数的局部变量转成字典的key-value结构
#{'request':request,'tags':tags}
tags = []
for tag in tags1:
#判断该类型中是否有文章,如果有就添加进tags
if Art.objects.filter(tag=tag):
tags.append(tag)
#annotate为每个tag对象增加一个字段(Count('art) 统计每种类型下文章数据)
#
#读取分类id
tag_id = request.GET.get('tag')
if tag_id:
tag_id = int(tag_id)
arts = Art.objects.filter(tag_id=tag_id) #exclude排除条件为真的数据
else:
arts = Art.objects.all()
# #加载所有文章
# arts = Art.objects.all()
#将文章进行分页处理
paginator = Paginator(arts,8) #分页器
page = request.GET.get('page')
page = int(page) if page else 1 # 读取请求参数中page参数,如果没有,默认为1
pager = paginator.page(page) # 获取当前页的数据
#获取登录用户的信息
login_user= helper.getLoginInfo(request)
return render(request,'index.html',locals())
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^ueditor/', include('DjangoUeditor.urls')),
url(r'^user/',include('user.urls')),
url(r'^art/',include('art.urls')),
url(r'^$', toIndex),
]
|
cjxxu/A_Fiction_web
|
HartPro/urls.py
|
urls.py
|
py
| 2,203 |
python
|
en
|
code
| 1 |
github-code
|
6
|
22034953643
|
import pandas as pd
import s3fs
def main(event = None, context = None):
print("Start running LinkedInScraper")
values = [['Atreish Ramlakhan',
'New York, New York, United States',
'Katz School at Yeshiva University',
'Graduate Teaching Assistant',
'https://www.linkedin.com/company/16181365/'],
['Yuxiao (Henry) Shen',
'New York, New York, United States',
'The AAT Project (America’s Amazing Teens, LLC)',
'Full Stack PHP Web Developer',
'https://www.linkedin.com/search/results/all/?keywords=The+AAT+Project+%28America%E2%80%99s+Amazing+Teens%2C+LLC%29'],
['Shichao Zhou',
'New York, New York, United States',
'S&P Global Market Intelligence · Internship',
'Data Analyst',
'https://www.linkedin.com/company/162892/'],
['Mahlet Melese', 'New York, New York, United States', None, None, None]]
df = pd.DataFrame(values,columns = [["Full Name", "Location", "Most Recent Company", 'Job Title', 'Company Url']])
###LOAD THE FILE INTO S3####
# prepare csv file name
pathname = 'ia-final2022-csv/'#specify location of s3:/{my-bucket}/
filenames = f"{pathname}linkedIn_info.csv" #name of the filepath and csv file
#encoding must be adjusted to accommodate abnormal characters. Use s3fs to write to S3 bucket
print("Start adding LinkedIn data to csv")
byte_encoded_df = df.to_csv(None, index=False).encode() #encodes file as binary
s3 = s3fs.S3FileSystem(anon=False)
with s3.open(filenames, 'wb') as file:
file.write(byte_encoded_df) #writes byte-encoded file to s3 location
#print success message
print("Successfull uploaded file to location:"+str(filenames))
print("Complete running LinkedInScraper")
|
sczhou0705/IA-FinalProject-YUconnect
|
LambdaDeployment/Code/LinkedInScraper.py
|
LinkedInScraper.py
|
py
| 1,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39612347075
|
##
## EPITECH PROJECT, 2019
## 108trigo_2019
## File description:
## utils.py
##
def printhelp():
print("USAGE\n"
"\t./108trigo fun a0 a1 a2....\n"
"\n"
"DESCRIPTION\n"
"\tfun\tfunction to be applied,"
' among at least "EXP", "COS", "SIN", "COSH" and "SINH"\n'
'\tai\tcoeficients of the matrix')
exit(0)
def print_matrix(matrix):
for i in range(len(matrix)):
for j in range(len(matrix[i])):
print("%.2f%c" % (matrix[i][j], '\t' if (j != len(matrix[i]) - 1) else '\n'), end="")
|
clementfleur/Epitech_Project
|
tek1/Mathématique/108trigo_2019/utils.py
|
utils.py
|
py
| 571 |
python
|
en
|
code
| 2 |
github-code
|
6
|
42307014223
|
import os
import sys
import time
from acbbs.drivers.ate.ClimCham import ClimCham
from acbbs.drivers.ate.DCPwr import DCPwr
from acbbs.drivers.ate.PwrMeter import PwrMeter
from acbbs.drivers.ate.RFSigGen import RFSigGen
from acbbs.drivers.ate.RFSigGenV import RFSigGenV
from acbbs.drivers.ate.SpecAn import SpecAn
from acbbs.drivers.ate.Swtch import Swtch
from acbbs.tools.log import get_logger
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError, DuplicateKeyError
import configuration
from .drivers.PwrMeterCal import PowerMeterCal
from .drivers.RFSigGenCal import RFSigGenCal
logger = get_logger('calib')
CHANNELS = configuration.CHANNELS
INPUTS = configuration.INPUTS
OUTPUTS = configuration.OUTPUTS
CONF_PATH = configuration.CONF_PATH
LIST_PATH = configuration.LIST_PATH
class NetworkEquipment(object):
def __init__(self, simu):
logger.info('class Ping init')
self.PwrMeter = PwrMeter(simulate=simu)
self.SpecAn = SpecAn(simulate=simu)
self.RFSigGen = RFSigGen(simulate=simu)
self.RFSigGenV = RFSigGenV(simulate=simu)
self.Swtch = Swtch(simulate=simu)
self.ClimCham = ClimCham(simulate=simu)
self.DCPwr = DCPwr(simulate=simu)
self.PwrMeterCal = PowerMeterCal(simulate=simu)
self.RFSigGenCal = RFSigGenCal(simulate=simu)
self.get_ip()
def get_ip(self):
ip_specAn = self.SpecAn.SpecAnConf['ip']
ip_sigGen = self.RFSigGen.sigGenConf['ip']
ip_pwMeter = self.PwrMeter.PwrMeterConf['ip']
ip_sigGenV = self.RFSigGenV.sigGenConf['ip']
ip_ClimCham = self.ClimCham.dcConf['ip']
ip_dc1 = self.DCPwr.dcConf['powerDevice1-ip']
ip_dc2 = self.DCPwr.dcConf['powerDevice2-ip']
self.listIP = {'rx': {'RFSigGen': ip_sigGen, 'RFSigGenV': ip_sigGenV},
'tx': {'PwrMeter': ip_pwMeter, 'SpecAn': ip_specAn},
'DC': {'DC1': ip_dc1, 'DC2': ip_dc2},
'Chamber': {'climCham': ip_ClimCham},
}
def ping_one(self, IP):
response = os.system("ping -c 1 " + IP)
if response == 0:
logger.info("Network Equipement Active at adresse:{0}".format(IP))
return 0
else:
logger.error('Network Equipement Error : {0}'.format(IP))
return 1
def check_one_instrument(self, instrum):
global result
for mode, instrums in self.listIP.items():
if instrum in instrums.keys():
result = self.ping_one(self.listIP[mode][instrum])
break
return result
def ping_all(self):
list_pingReturn = self.listIP
for mode, instrums in self.listIP.items():
for instrum, ip in instrums.items():
list_pingReturn[mode][instrum] = self.ping_one(ip)
return list_pingReturn
def check_all_instruments(self):
listPing = self.ping_all()
if all(i == 0 for i in listPing):
return 0
else:
return 1 # renvoyer un tableau qui indique quel instrument est disconnected
class database(object):
def __init__(self):
self.__openDataBase()
def __openDataBase(self):
# get server, port and database from json configuration file
server = configuration.DATABASE_IP
port = configuration.DATABASE_PORT
database = configuration.DATABASE_NAME_CALIB
maxSevSelDelay = configuration.DATABASE_MAXDELAY
try:
# open MongoDB server
self.client = MongoClient(server, int(port), serverSelectionTimeoutMS=maxSevSelDelay)
# check if connection is well
self.client.server_info()
except ServerSelectionTimeoutError as err:
print("{0}".format(err))
exit(0)
# open MongoDB database
self.db = self.client[database]
def get_available_collection(self):
return self.db.list_collection_names()
def get_collection(self, collection):
if collection not in self.get_available_collection():
print("Error: conf {0} does not exist. You can list available collection with --list".format(collection))
return self.db[collection].find({})
def writeDataBase(self, document, collection):
if collection in self.get_available_collection():
print("Error: conf {0} exist. You can delete it with --delete {0}".format(collection))
self.db_collection = self.db[collection]
try:
self.db_collection.insert_one(document).inserted_id
except DuplicateKeyError as err:
print("{0}".format(err))
def delete_collection(self, collection):
if collection not in self.get_available_collection():
print("Error: conf {0} does not exist. You can list available collection with --list".format(collection))
self.db.drop_collection(collection)
class MatrixCal(object):
def __init__(self):
self.calibFile = {"date": "", "loss": {}}
self.db = database()
def get_cal(self, date):
for doc in self.db.get_collection(date):
calibFile = doc
return calibFile
def getlossPath(self, port_in, port_out, date):
cal = self.get_cal(date)
data = cal[port_in][port_out]
return data
def write_cal(self, data):
self.calibFile["loss"] = data
self.calibFile["date"] = time.strftime("%Y-%m-%d %H:%M:%S")
self.db.writeDataBase(self.calibFile["loss"], self.calibFile["date"])
def readPath_loss(self, port_in, port_out):
return self.data["loss"][port_in][port_out]
def del_cal(self, cal_name):
self.db.delete_collection(cal_name)
def history(self):
return self.db.get_available_collection()
class Calibration(object):
def __init__(self, simu):
self.equipement = NetworkEquipment(simu=simu)
self.channels = CHANNELS
self.simu = simu
self.iteration = 0
self.totalProgress = 0
self.paths = LIST_PATH
self.message = ""
self.response = 0
self.matrixCal = MatrixCal()
self.loss = {INPUTS[4]: {}, INPUTS[2]: {}, INPUTS[3]: {}, INPUTS[0]: {}, INPUTS[1]: {}, INPUTS[5]: {}}
self.delta = {}
self.pathlist = list()
for i in self.paths.keys():
self.pathlist.append(i)
def calibrate(self, tab_freq, pwr):
self.tab_freq = tab_freq
self.OUTPUT_POWER_CALIBRATION = int(pwr)
self.totalProgress = (len(INPUTS) - 2 + len(OUTPUTS)) * len(tab_freq)
print('calibration start')
self.SMBCal()
self.SMBVCal()
self.PwrMeterCal()
self.FSWCal()
self.NoiseCal()
self.makeDelta()
self.makeMatrixCal()
self.matrixCal.write_cal(self.loss)
def SMBCal(self):
loss = configuration.PORT_SMB
pathJ4Jx = self.pathlist[1]
# calibration of J4_20dB - J9
print("calibration of SMB, plug the power meter cal to J9")
while self.response == 0:
self.message = " calibration of SMB, plug the power meter cal to J9 "
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
self.equipement.Swtch.setSwitch(sw1=1, sw3=self.paths[pathJ4Jx]["sw3"], sw4=self.paths[pathJ4Jx]["sw4"])
for freq in self.tab_freq:
self.equipement.RFSigGen.freq = freq
self.equipement.RFSigGen.power = self.OUTPUT_POWER_CALIBRATION
self.equipement.RFSigGen.status = 1
time.sleep(1)
loss["J4_20dB"][str(freq)] = self.OUTPUT_POWER_CALIBRATION - self.equipement.PwrMeterCal.power(nbr_mes=1)
self.equipement.RFSigGen.status = 0
self.iteration += 1
self.loss["J4_20dB"]["J9"] = loss["J4_20dB"]
# calibration of J4 - Jx
for channel in self.channels:
print(" plug the power meter cal to J{0}".format(channel + 8))
while self.response == 0:
self.message = " plug the power meter cal to {0}".format(channel + 8)
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
port = pathJ4Jx.replace("Jx", "J" + str(channel + 8))
self.equipement.Swtch.setSwitch(sw1=channel, sw3=self.paths[pathJ4Jx]["sw3"],sw4=self.paths[pathJ4Jx]["sw4"])
for freq in self.tab_freq:
self.equipement.RFSigGen.freq = freq
self.equipement.RFSigGen.power = self.OUTPUT_POWER_CALIBRATION
self.equipement.RFSigGen.status = 1
time.sleep(1)
loss["J4"][str(freq)] = self.OUTPUT_POWER_CALIBRATION - self.equipement.PwrMeterCal.power(nbr_mes=1)
self.equipement.RFSigGen.status = 0
self.iteration += 1
self.loss["J4"]["J" + str(channel + 8)] = loss["J4"]
def SMBVCal(self):
loss = configuration.PORT_SMBV
pathJ3Jx = self.pathlist[3]
print(" calibration of SMBV, plug the power meter of the cal to J9")
while self.response == 0:
self.message = "plug the power meter cal to J9 "
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
# calibration of J3 - J9
self.equipement.Swtch.setSwitch(sw1=1, sw3=self.paths[pathJ3Jx]["sw3"], sw4=self.paths[pathJ3Jx]["sw4"])
for freq in self.tab_freq:
self.equipement.RFSigGenV.freq = freq
self.equipement.RFSigGenV.power = self.OUTPUT_POWER_CALIBRATION
# self.equipement.PowerMeterCal = freq
self.equipement.RFSigGenV.status = 1
time.sleep(1)
loss["J3"][str(freq)] = self.OUTPUT_POWER_CALIBRATION - self.equipement.PwrMeterCal.power(nbr_mes=1)
self.equipement.RFSigGenV.status = 0
self.iteration += 1
self.loss["J3"]["J9"] = loss["J3"]
def PwrMeterCal(self):
loss = configuration.PORT_PowerMeter
pathJ2Jx = self.pathlist[5]
print(" calibration of Power Meter, plug the RF generator cal to J9")
while self.response == 0:
self.message = "plug the RF generator cal to J9"
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
# calibration of J2 - J9
self.equipement.Swtch.setSwitch(sw1=1, sw3=self.paths[pathJ2Jx]["sw3"], sw4=self.paths[pathJ2Jx]["sw4"])
for freq in self.tab_freq:
self.equipement.PwrMeter.freq = freq
time.sleep(1)
loss["J2"][str(freq)] = self.OUTPUT_POWER_CALIBRATION - self.equipement.PwrMeter.power
self.iteration += 1
self.loss["J2"]["J9"] = loss["J2"]
def FSWCal(self):
loss = configuration.PORT_FSW
pathJ2Jx = self.pathlist[4]
print(" calibration of FSW, plug the RF generator cal to J9")
while self.response == 0:
self.message = "plug the RF generator cal to J9"
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
# calibration of J5 - J9
self.equipement.Swtch.setSwitch(sw1=1, sw3=self.paths[pathJ2Jx]["sw3"], sw4=self.paths[pathJ2Jx]["sw4"])
for freq in self.tab_freq:
self.equipement.SpecAn.freqSpan = 10000000
pic = self.equipement.SpecAn.markerPeakSearch()
time.sleep(1)
loss["J5"][str(freq)] = self.OUTPUT_POWER_CALIBRATION - pic[1]
self.iteration += 1
self.loss["J5"]["J9"] = loss["J5"]
######### NON CODE ################
def NoiseCal(self):
loss = configuration.PORT_NOISE
pathJ18Jx = self.pathlist[0]
print(" calibration of Noise, plug the RF generator cal to J18 and the power meter to J9")
while self.response == 0:
self.message = "plug the RF generator cal to J18 and the power meter to J9"
time.sleep(0.8)
print('wait')
self.message = ""
self.response = 0
# calibration of J5 - J9
self.equipement.Swtch.setSwitch(sw1=1, sw3=self.paths[pathJ18Jx]["sw3"], sw4=self.paths[pathJ18Jx]["sw4"])
for freq in self.tab_freq:
loss["J18"][str(freq)] = self.OUTPUT_POWER_CALIBRATION
self.iteration += 1
self.loss["J18"]["J9"] = loss["J18"]
def makeDelta(self):
for channel in self.channels:
Jout = "J" + str(channel + 8)
delta_freq = {}
self.delta[Jout] = {}
for freq in self.tab_freq:
delta_freq[str(freq)] = self.loss["J4"][Jout][str(freq)] - self.loss["J4"]["J9"][str(freq)]
self.delta[Jout] = delta_freq
def makeMatrixCal(self):
for Jin in self.loss.keys():
for channel in self.channels[1:]:
Jout = "J" + str(channel + 8)
self.loss[Jin][Jout] = {}
estimate_loss = {}
for freq in self.tab_freq:
estimate_loss[str(freq)] = self.loss[Jin]["J9"][str(freq)] + self.delta[Jout][str(freq)]
self.loss[Jin][Jout] = estimate_loss
|
Wonters/IHMweb
|
calib/tasks.py
|
tasks.py
|
py
| 13,334 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23541886221
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 22 15:37:26 2022
@author: jeros
Hu moments analysis
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import PercentFormatter
def plotter(huN = 1, bananas = None,oranges = None,lemons = None):
# if bananas is not None:
# plt.hist(bananas[0,:],bins, alpha=0.5, label='b',weights = weights_b )
# if oranges is not None:
# plt.hist(oranges[0,:],bins, alpha=0.5, label='o',weights = weights_o)
'''Hu moment number histogram'''
if huN == 0:
bins = np.linspace(2.85,3.22,100)
if huN == 1:
bins = np.linspace(5.5,12.5,100)
if huN == 2:
bins = np.linspace(10,16,100)
if huN == 3:
bins = np.linspace(9.8,19,100)
if huN == 4:
bins = np.linspace(-35,35,100)
if huN == 5:
bins = np.linspace(-25,25,100)
if huN == 6:
bins = np.linspace(-35,35,100)
#plt.hist([bananas[huN,:], oranges[huN,:],lemons[huN,:]],label=['B', 'O','L'])
plt.hist([bananas[huN,:], oranges[huN,:],lemons[huN,:]], bins,label=['B', 'O','L'],density = True)
plt.title('Hu'+str(huN))
'''Hu moment number 2 histogram'''
bins = np.linspace(10,16,100)
plt.legend(loc='upper right')
plt.autoscale(enable=True, axis='x', tight=True)
#plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
plt.show()
|
jeroserpa/FruitClassifier
|
histogram_analisys.py
|
histogram_analisys.py
|
py
| 1,459 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7681527858
|
#/usr/bin/python3
from pwn import *
context.arch = 'i386'
if args.REMOTE:
con = remote ('chals.damctf.xyz', 31312)
else:
con = process('./cookie-monster')
# Exploit format string vulnerability to leak stack canary.
def leak_canary():
con.recvuntil(b'Enter your name:')
con.sendline(b'%15$p')
con.recvuntil(b'Hello ')
canary = con.recvline().strip()
con.recvuntil(b'What would you like to purchase?')
return int(canary, 16)
def main():
# ASLR is enabled on the target system. Leak address of system
# in stage 1 to calculate the base address of libc and perform
# a classical ret2libc attack in stage 2.
# Stage 1
main = 0x08048669
plt_puts = 0x08048430
got_system = 0x0804a020
canary = leak_canary()
# Exploit buffer overflow vulnerability to call puts(&system)
# and return to main to set up stage 2.
payload = flat(
b'A' * 32,
canary,
b'B' * 12,
plt_puts,
main,
got_system
)
con.sendline(payload)
con.recvuntil(b'Have a nice day!\n')
libc_system = u32(con.recvline().strip()[:4].ljust(4, b'\x00'))
# Stage 2
libc_offset_system = 0x03ce10
libc_offset_str_binsh = 0x17b88f
libc_base = libc_system - libc_offset_system
libc_str_binsh = libc_base + libc_offset_str_binsh
canary = leak_canary()
# Exploit buffer overflow vulnerability again to call system("/bin/sh").
payload = flat(
b'A' * 32,
canary,
b'B' * 12,
libc_system,
0xdeadbabe,
libc_str_binsh
)
con.sendline(payload)
con.interactive()
if __name__ == '__main__':
main()
|
dystobic/writeups
|
2021/DAMCTF/cookie-monster/exploit.py
|
exploit.py
|
py
| 1,728 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5308409860
|
N = int(input())
map_list = [[0]*101 for _ in range(101)]
dirs = {0:(1,0), 1:(0,-1), 2: (-1,0), 3: (0,1)}
# d=시작방향 / g=세대
for _ in range(N):
x, y, d, g = map(int, input().split())
curve_list = [d]
for _ in range(g):
curve_list += [(i+1)%4 for i in curve_list[::-1]]
map_list[y][x] = 1
for curve in curve_list:
x = x+dirs[curve][0]
y = y+dirs[curve][1]
map_list[y][x] = 1
cnt = 0
for i in range(100):
for j in range(100):
if map_list[i][j] and map_list[i][j+1] and map_list[i+1][j] and map_list[i+1][j+1]:
cnt += 1
print(cnt)
|
louisuss/Algorithms-Code-Upload
|
Python/Baekjoon/Simulation/15685.py
|
15685.py
|
py
| 620 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29010500134
|
import functools
import os
import sys
from typing import Any, Callable, Iterable, Optional, TextIO, Tuple
import click
from click import Command
from click_option_group import MutuallyExclusiveOptionGroup
from . import __version__
from .core import (
CheckHashLineError,
HashFileReader,
HashFileWriter,
ParseHashLineError,
check_hash_line,
generate_hash_line,
)
from .hasher import HashContext, Hasher
from .utils.click import CommandX, PathWithSuffix
from .utils.glob import glob_filters, sorted_path
class ParseHashFileError(ValueError):
def __init__(self, hash_line: str, lineno: int) -> None:
super().__init__(hash_line, lineno)
self.hash_line = hash_line
self.lineno = lineno
class Output:
"""Determine the output mode and provide the output interface."""
def __init__(
self, agg: Optional[str] = None, sep: Optional[bool] = None, null: Optional[bool] = None, sync: bool = False
) -> None:
if (agg and sep) or (agg and null) or (sep and null):
raise ValueError("require exactly one argument")
# Use the null mode by default.
if not (agg or sep or null):
null = True
# Determine the output mode and dump method.
if agg:
self.agg_file = HashFileWriter(agg)
self._dump = self.output_agg
elif sep:
self._dump = self.output_sep
elif null:
self._dump = self.output_null
self.sync = sync
self.maxmtime = 0.0
def close(self) -> None:
try:
agg_file = self.agg_file
except AttributeError:
pass
else:
agg_file.close()
if self.sync:
os.utime(agg_file.name, (self.maxmtime, self.maxmtime))
def dump(self, hash_line: str, hash_path: str, path: str) -> None:
self._dump(hash_line, hash_path, path)
def output_agg(self, hash_line: str, hash_path: str, path: str) -> None:
self.agg_file.write_hash_line(hash_line)
if self.sync:
mtime = os.path.getmtime(path)
self.maxmtime = max(self.maxmtime, mtime)
def output_sep(self, hash_line: str, hash_path: str, path: str) -> None:
with HashFileWriter(hash_path) as f:
f.write_hash_line(hash_line)
if self.sync:
mtime = os.path.getmtime(path)
os.utime(hash_path, (mtime, mtime))
def output_null(self, hash_line: str, hash_path: str, path: str) -> None:
pass
class Gethash:
"""Provide uniform interface for CLI scripts."""
stdout: TextIO
stderr: TextIO
glob_mode: int
glob_type: str
inplace: bool
root: Optional[str]
start: Optional[int]
stop: Optional[int]
dir_ok: bool
def __init__(self, ctx: HashContext, **kwargs: Any) -> None:
self.ctx = ctx
self.sync = kwargs.pop("sync", False)
self.suffix = kwargs.pop("suffix", ".sha")
self.stdout = kwargs.pop("stdout", sys.stdout)
self.stderr = kwargs.pop("stderr", sys.stderr)
self.glob_mode = kwargs.pop("glob", 1)
self.glob_type = kwargs.pop("type", "a")
# Determine the path format.
self.inplace = kwargs.pop("inplace", False)
self.root = kwargs.pop("root", None)
# Determine the output mode.
agg = kwargs.pop("agg", None)
sep = kwargs.pop("sep", None)
null = kwargs.pop("null", None)
self.output = Output(agg, sep, null, sync=self.sync)
# Prepare arguments and construct the hash function.
self.start = kwargs.pop("start", None)
self.stop = kwargs.pop("stop", None)
self.dir_ok = kwargs.pop("dir", False)
tqdm_args = {
"file": self.stderr,
"ascii": kwargs.pop("tqdm_ascii", False),
"disable": kwargs.pop("tqdm_disable", False),
"leave": kwargs.pop("tqdm_leave", False),
}
self.hasher = Hasher(ctx, tqdm_args=tqdm_args)
def __call__(self, files: Iterable[str], *, check: bool) -> None:
if check:
self.check_hash(files)
else:
self.generate_hash(files)
def __enter__(self) -> "Gethash":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def close(self) -> None:
self.output.close()
def generate_hash(self, patterns: Iterable[str]) -> None:
for path in self.glob_function(patterns):
try:
root = self.check_root(path)
hash_line = generate_hash_line(path, self.hash_function, root=root)
hash_path = path + self.suffix
self.output.dump(hash_line, hash_path, path)
except Exception as e:
self.echo_exception(path, e)
else:
# The hash line already has a newline.
self.echo(hash_line, nl=False)
def check_hash(self, patterns: Iterable[str]) -> None:
for hash_path in self.glob_function(patterns):
try:
self._check_hash(hash_path)
except ParseHashFileError as e:
# Strip newline for pretty printing.
hash_line = e.hash_line.rstrip("\n")
msg = f"[ERROR] invalid hash '{hash_line}' in '{hash_path}' at line {e.lineno}"
self.echo_error(msg, fg="white", bg="red")
except Exception as e:
self.echo_exception(hash_path, e)
def _check_hash(self, hash_path: str) -> None:
maxmtime = 0.0
for i, hash_line in enumerate(HashFileReader(hash_path)):
try:
root = self.check_root(hash_path)
path = check_hash_line(hash_line, self.hash_function, root=root)
maxmtime = max(maxmtime, os.path.getmtime(path))
except ParseHashLineError as e:
raise ParseHashFileError(e.hash_line, i)
except CheckHashLineError as e:
self.echo(f"[FAILURE] {e.path}", fg="red")
else:
self.echo(f"[SUCCESS] {path}", fg="green")
if self.sync:
os.utime(hash_path, (maxmtime, maxmtime))
def check_root(self, path: str) -> Optional[str]:
if self.inplace:
return os.path.dirname(path)
return self.root
def glob_function(self, paths: Iterable[str]) -> Iterable[str]:
return sorted_path(
glob_filters(paths, mode=self.glob_mode, type=self.glob_type, recursive=True, user=True, vars=True)
)
def hash_function(self, path: str) -> bytes:
return self.hasher(path, self.start, self.stop, dir_ok=self.dir_ok)
def echo(self, msg: str, **kwargs: Any) -> None:
click.secho(msg, file=self.stdout, **kwargs)
def echo_error(self, msg: str, **kwargs: Any) -> None:
click.secho(msg, file=self.stderr, **kwargs)
def echo_exception(self, path: str, exc: Exception) -> None:
msg = f"[ERROR] {path}\n\t{type(exc).__name__}: {exc}"
click.secho(msg, file=self.stderr, fg="red")
def script_main(ctx: HashContext, files: Tuple[str, ...], **options: Any) -> None:
"""Execute the body for the main function."""
no_stdout = options.pop("no_stdout", False)
no_stderr = options.pop("no_stderr", False)
stdout = open(os.devnull, "w") if no_stdout else sys.stdout # noqa
stderr = open(os.devnull, "w") if no_stderr else sys.stderr # noqa
check = options.pop("check", False)
with Gethash(ctx, stdout=stdout, stderr=stderr, **options) as gethash:
gethash(files, check=check)
def gethashcli(command_name: str, display_name: str, **extras: Any) -> Callable[[Callable], Command]:
"""Apply click decorators to the main function."""
suffix = extras.pop("suffix", "." + command_name.replace("-", "_"))
doc = extras.pop("doc", None)
def decorator(func: Callable) -> Command:
if doc is not None:
func.__doc__ = doc
context_settings = {"help_option_names": ["-h", "--help"], "max_content_width": 120}
path_format = MutuallyExclusiveOptionGroup("Path Format")
output_mode = MutuallyExclusiveOptionGroup("Output Mode")
@click.command(command_name, cls=CommandX, context_settings=context_settings, no_args_is_help=True)
@click.argument("files", nargs=-1)
@click.option(
"-c",
"--check",
is_flag=True,
help=f"Read {display_name} from FILES and check them.",
)
@click.option(
"-y",
"--sync",
is_flag=True,
help="Update mtime of hash files to the same as data files.",
)
@click.option(
"-g",
"--glob",
type=click.IntRange(0, 2),
metavar="[0|1|2]",
default=1,
show_default=True,
help="Set glob mode. If ``0``, disable glob pathname pattern; if ``1``, "
"resolve ``*`` and ``?``; if ``2``, resolve ``*``, ``?`` and ``[]``.",
)
@click.option(
"-t",
"--type",
type=click.Choice(["a", "d", "f"]),
default="a",
show_default=True,
help="Set file type. If ``a``, include all types; if ``d``, include "
"directories; if ``f``, include files.",
)
@path_format.option("-i", "--inplace", is_flag=True, help="Use basename in checksum files.")
@path_format.option(
"-z",
"--root",
type=click.Path(exists=True, file_okay=False),
help="The path field in checksum files is relative to the root directory.",
)
@output_mode.option(
"-o",
"--agg",
type=PathWithSuffix(suffix=suffix, dir_okay=False),
help="Set the aggregate output file.",
)
@output_mode.option("-s", "--sep", is_flag=True, help="Separate output files.")
@output_mode.option(
"-n",
"--null",
is_flag=True,
help="Do not output to files. This is the default output mode.",
)
@click.option("--start", type=click.IntRange(min=0), help="The start offset of files.")
@click.option("--stop", type=click.IntRange(min=0), help="The stop offset of files.")
@click.option(
"-d",
"--dir",
is_flag=True,
help="Allow checksum for directories. Just xor each checksum of files in a given directory.",
)
@click.option("--no-stdout", is_flag=True, help="Do not output to stdout.")
@click.option("--no-stderr", is_flag=True, help="Do not output to stderr.")
@click.option("--tqdm-ascii", type=click.BOOL, default=False, show_default=True)
@click.option("--tqdm-disable", type=click.BOOL, default=False, show_default=True)
@click.option("--tqdm-leave", type=click.BOOL, default=False, show_default=True)
@click.version_option(__version__, "-V", "--version", prog_name=command_name)
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
kwargs.setdefault("suffix", suffix)
return func(*args, **kwargs)
return wrapper
return decorator
|
xymy/gethash
|
src/gethash/script.py
|
script.py
|
py
| 11,381 |
python
|
en
|
code
| 2 |
github-code
|
6
|
44075659516
|
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
import pickle
import os
#put url here
#example_url= "https://archive.thehated3.workers.dev/0:/Station%20X%20-%20The%20Complete%20Cyber%20Security%20Course!/"
durl= "https://archive.thehated3.workers.dev/0:/Station%20X%20-%20The%20Complete%20Cyber%20Security%20Course!/"
#put local path to download here, leave '.' to download in current directory
#example_path="./Station_X_The_Complete_Cyber_Security_Course"
dpath="."
count=0
rcount=0
failed_links=[]
failed_paths=[]
def download(url,path):
global count, failed_links, failed_paths
fireFoxOptions = webdriver.FirefoxOptions()
fireFoxOptions.add_argument("--headless")
# brower = webdriver.Firefox(firefox_options=fireFoxOptions)
driver = webdriver.Firefox(executable_path="./geckodriver.exe",options=fireFoxOptions)
driver.get(url)
time.sleep(3)
previous_height=driver.execute_script('return document.body.scrollHeight')
while True:
driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')
time.sleep(3)
new_height=driver.execute_script('return document.body.scrollHeight')
if new_height==previous_height:
break
previous_height=new_height
try:
element = WebDriverWait(driver,100).until(EC.presence_of_element_located((By.CLASS_NAME, "list-group-item")))
except:
count+=1
print(f"FILE NOT DOWNLOADED:\npath: {path}\n count:{count}")
print("TIMEOUT not LOADING ELEMENTS BY CLASS NAME list-grout-items EXCEPTION")
return
tuna=driver.find_elements_by_class_name("list-group-item")
dlinks=[]
for i in tuna:
folder=i.get_attribute('href')
if folder==None:
target_urls=i.find_elements_by_css_selector('a')
furl=target_urls[1].get_attribute('href')
dlinks.append(furl)
else:
fname=i.text
formated_folder_name=fname.replace(" ","-")
new_path=path+"/"+formated_folder_name
download(folder,new_path)
for x in dlinks:
# print(x)
# cmd=f'wget -c -P '+'"'+f'{path}'+'" '+'"'+ f'{x}'+'"'
print(f"****DOWNLOADING IN PATH****: {path}\nfiles_skipped_till_now={count} \n\n")
failure=os.system(f"""wget -c -P "{path}" "{x}" """)
if failure != 0:
count+=1
failed_links.append(x)
failed_paths.append(path)
print(f"FILE NOT DOWNLOADED:\npath: {path}\nfile: {x}\n count:{count}")
driver.close()
def direct_download(dd_url,dd_path):
rfc=os.system(f"""wget -c -P "{dd_path}" "{dd_url}" """)
return rfc
def retry():
global rcount
new_links=[]
new_paths=[]
rcount=0
try:
failed_file_open=open("failed_links_info.pickle","rb")
except:
print('failed_links_info NOT Available, ABORTING...')
return
get_failed=pickle.load(failed_file_open)
fetch_links=get_failed[0]
fetch_paths=get_failed[1]
failed_file_open.close()
link_size=len(fetch_links)
for k in range(link_size):
l=fetch_links[k]
p=fetch_paths[k]
status=direct_download(l,p)
if status!=0:
rcount+=1
new_links.append(l)
new_paths.append(p)
print(f"FILE NOT DOWNLOADED:\npath: {p}\nfile: {l}\n count:{rcount}")
print(f"Number of files not downloaded: {rcount}")
nf=len(new_paths)
o_again=open("failed_links_info.pickle","wb")
m_list=[new_links,new_paths]
pickle.dump(m_list,o_again)
o_again.close()
for e in range(nf):
ww=new_paths[e]
tt=new_links[e]
print(f"{ww}\n{tt}\n\n")
if __name__=='__main__':
ui=input("Choose:\n1.Retry failed downloads\n2.Download from new link provided\nChoose either '1' or ('2') :")
if ui==1 or ui=='1':
retry()
else:
download(durl,dpath)
print(f"Number of files not downloaded: {count}")
number_failed=len(failed_paths)
fo=open("failed_links_info.pickle","wb")
combined_list=[failed_links,failed_paths]
pickle.dump(combined_list,fo)
fo.close()
for i in range(number_failed):
a=failed_paths[i]
b=failed_links[i]
print(f"{a}\n{b}\n\n")
user_input=input("Do you want to retry {count} failed downloads? (Y)/N : ")
if user_input.lower()=='n':
pass
else:
retry()
# print(turl)
|
aniket328/workers-dev-download-folders
|
fx.py
|
fx.py
|
py
| 4,837 |
python
|
en
|
code
| 0 |
github-code
|
6
|
386960757
|
import os
from flask import Response,Flask, request
from flask_cors import CORS
from insgraph import util, instagram
def create_app(test_config=None):
"""Create and configure an instance of the Flask application."""
app = Flask(__name__, instance_relative_config=True)
print("zhuangjb flask start.....:"+__name__)
CORS(app)
app.config.from_mapping(
# a default secret that should be overridden by instance config
SECRET_KEY='dev',
# store the database in the instance folder
DATABASE=os.path.join(app.instance_path, 'insgraph.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.update(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route('/hello')
def hello():
return 'Hello, World!'
@app.before_request
def option_replay():
if request.method =='OPTIONS':
resp = Response('')
print('xxx')
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['Access-Control-Allow-Headers'] = '*'
resp.headers['Access-Control-Request-Method'] = request.headers['Access-Control-Request-Method']
return resp
# @app.after_request
# def set_allow_origin(resp):
# h = resp.headers
# if request.method != 'OPTIONS' and 'Origin' in request.headers:
# h['Access-Control-Allow-Origin'] = request.headers['Origin']
# register the database commands
from insgraph import db
db.init_app(app)
# apply the blueprints to the app
from insgraph import auth, user,case
app.register_blueprint(auth.bp)
app.register_blueprint(user.bp)
app.register_blueprint(case.bp)
app.register_blueprint(instagram.bp)
# make url_for('index') == url_for('blog.index')
# in another app, you might define a separate main index here with
# app.route, while giving the blog blueprint a url_prefix, but for
# the tutorial the blog will be the main index
app.add_url_rule('/', endpoint='index')
return app
|
jiebinzhuang/insgraph-flask
|
insgraph/__init__.py
|
__init__.py
|
py
| 2,301 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30409488540
|
import os
import pytest
import logging
import cocotb
from cocotb.clock import Clock, Timer
from cocotb.binary import BinaryValue
from cocotb.runner import get_runner
from cocotb.triggers import FallingEdge
from cocotbext.uart import UartSource, UartSink
src_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
tests_dir = os.path.dirname(os.path.abspath(__file__))
sim_build = os.path.join(os.path.dirname(os.path.abspath(__file__)), "sim_build", "soc")
@cocotb.test()
async def check_uart_recv(dut):
""" Test that UART is working """
clock = Clock(dut.clk, 10, units="ns") # Create a 10us period clock on port clk
cocotb.start_soon(clock.start()) # Start the clock
log = logging.getLogger(f"check_uart_recv")
dut.RESET.value = BinaryValue('1')
await FallingEdge(dut.clk)
dut.RESET.value = BinaryValue('0')
await FallingEdge(dut.clk)
rxd = UartSource(dut.RXD, baud=115200, bits=8)
txd = UartSink(dut.TXD, baud=115200, bits=8)
await rxd.write(b'ABCDE')
for i in range(int(1e9/115200/10) * 10):
await FallingEdge(dut.clk)
val = await txd.read()
assert val == b'E'
"""
LI(gp, 32'h0200_0000);
ADD(x12,x0,x0);
ADDI(x2,x0,65);
Label(L0_);
LW(x12, gp, 8);
BNE(x12, x2, LabelRef(L0_));
SW(x12, gp, 8);
EBREAK();
"""
@pytest.mark.skip(reason="no way of currently testing this")
def test_runner():
verilog_sources = [os.path.join(src_dir, "main", "soc.sv")]
sim = os.getenv("SIM", "icarus")
runner = get_runner(sim)()
os.makedirs(os.path.abspath(sim_build), exist_ok=True)
with open(os.path.abspath(os.path.join(sim_build, "cmd.f")), 'w') as cmd:
cmd.write('+timescale+1ns/1ps')
runner.build(
verilog_sources=verilog_sources,
toplevel="soc",
defines=["DEFINE=4", "BENCH=1"],
includes=[os.path.join(src_dir, "main")],
extra_args=[
'-s', 'soc',
'-f', os.path.abspath(os.path.join(sim_build, "cmd.f"))
],
build_dir=sim_build
)
runner.test(
python_search=[tests_dir],
toplevel="soc",
py_module="test_soc",
)
|
ryarnyah/zenika-fpga-pres
|
demo/fpga-risc-cpu/src/test/test_soc.py
|
test_soc.py
|
py
| 2,170 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27044024051
|
from tkinter import *
main = Tk()
main.resizable(width=False, height=False)
main.config(bg="#2d3436")
main.title("Disappearing text App")
stop_writing_id = 'after' # store id of the scheduled call to traduire
main_text = Label(main, text="Start typing and text will disappear after a few seconds...", fg="#dfe6e9", font=("Helvetica", 18), pady=20, padx=20, bg="#2d3436")
main_text.grid(row=0, column=0)
entry = Text(main, wrap=WORD, relief=FLAT, padx=14, pady=15, fg="#dfe6e9", font=("Helvetica", 14))
entry.grid(row=1, column=0)
entry.configure({"background": "#2d3436"})
entry.focus()
def wipe_text():
entry.delete('1.0', END)
def stop_writing(event):
global stop_writing_id
main.after_cancel(stop_writing_id) # cancel previous scheduling of traduire
stop_writing_id = main.after(6000, wipe_text) # wait 1s and execute traduire
entry.bind('<KeyRelease>', stop_writing)
main.mainloop()
|
Tabinka/disappearingTextApp
|
main.py
|
main.py
|
py
| 915 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19637375362
|
import serial
import datetime as dt
import sys
class gps:
def __init__(self, port = "/dev/serial0"):
# Initializes serial connection for gps communication
try:
self.__ser = serial.Serial(port)
except Exception as e:
sys.exit("Can not connect with GPS using uart: " + str(e))
def get_record(self):
# For 50 times tries to read GPRMC record from gps in form of strings
got_record = False
for _ in range(50):
gps_record = self.__ser.readline().decode('UTF-8')
if gps_record[0:6] == "$GPRMC":
got_record = True
break
if got_record == True:
data = gps_record.split(",")
if data[2] == 'A':
self._status = "Correct"
# GMT time
if is_number(data[1][0:2]) and is_number(data[1][2:4]) and is_number(data[1][4:6]):
self._time = data[1][0:2] + ":" + data[1][2:4] + ":" + data[1][4:6]
else:
self._time = dt.datetime.now().strftime('[%H:%M:%S]')
self._status = "Corrupted data"
# Latitude
if (is_number(data[3])):
self._latitude = data[3]
else:
self._status = "Corrupted data"
# Latitude direction N/S
self._hemisphere_NS = data[4]
# Longitude
if (is_number(data[5])):
self._longitude = data[5]
else:
self._status = "Corrupted data"
# Longitude direction W/E
self._hemisphere_WE = data[6]
# Velocity in knots
if (is_number(data[7])):
self._velocity = data[7]
else:
self._status = "Corrupted data"
# True course
if (is_number(data[8])):
self._course = data[8]
elif data[8] == '':
self._course = 0;
else:
self._status = "Corrupted data"
# Date
if is_number(data[9][4:6]) and is_number(data[9][2:4]) and is_number(data[9][0:2]):
self._date = data[9][4:6] + "-" + data[9][2:4] + "-" + data[9][0:2]
else:
self._status = "Corrupted data"
if self._status == "Correct":
return 0
else:
return 1
else:
self._status = "Signal lost"
self._time = dt.datetime.now().strftime('%H:%M:%S')
self._date = dt.datetime.now().strftime('%Y-%m-%d')
return 1
else:
self._status = "Connection error"
self._time = dt.datetime.now().strftime('%H:%M:%S')
self._date = dt.datetime.now().strftime('%Y-%m-%d')
return 1
def _decode(self, coord):
#Converts DDDMM.MMMMM to DD deg MM.MMMMM min
tmp = coord.split(".")
deg = tmp[0][0:-2]
mins = tmp[0][-2:]
return deg + " deg " + mins + "." + tmp[1] + " min"
def get_gps_time(self):
# Returns date and time or 1 if fails to obtain them
if (self.get_record()):
return 1
else:
return self._date + " " + self._time
def get_decimal_degrees_record(self):
# Read from GPS and get current location parameters dictionary in decimal_degrees
if (self.get_record() == 0):
hemi_NE_sign = "+" if self._hemisphere_NS == "N" else "-"
hemi_WE_sign = "+" if self._hemisphere_WE == "E" else "-"
pos = self._latitude.find('.')
lat_deg = self._latitude[:pos-2]
lat_mins = self._latitude[pos-2:pos] + self._latitude[pos+1:]
lat_mins = str(round(float(lat_mins) / 60.0))
pos = self._longitude.find('.')
lng_deg = self._longitude[:pos-2]
lng_mins = self._longitude[pos-2:pos] + self._longitude[pos+1:]
lng_mins = str(round(float(lng_mins) / 60.0))
return {
'timestamp' : self.get_gps_time(),
'status' : self._status,
'latitude' : float(hemi_NE_sign + lat_deg + "." + lat_mins),
'longitude' : float(hemi_WE_sign + lng_deg + "." + lng_mins),
'velocity' : float(self._velocity),
'course' : float(self._course) }
else:
return {
'timestamp' : self._date + " " + self._time,
'status' : self._status,
'latitude' : 0,
'longitude' : 0,
'velocity' : 0,
'course' : 0 }
def get_location_message(self):
# Read from GPS and get current location in a easily readible string
self.get_record()
time_stamp = dt.datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')
return "%s latitude: %s(%s), longitude: %s(%s), velocity: %s, True Course: %s" % (
time_stamp,
self._decode(self._latitude),
self._hemisphere_NS,
self._decode(self._longitude),
self._hemisphere_NS,
self._velocity,
self._course)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
|
maciejzj/pi-observer
|
scripts/gps.py
|
gps.py
|
py
| 5,664 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42992886102
|
import gspread
import numpy as np
import pandas as pd
from datetime import date
from datetime import datetime
import csv
import pytz
from oauth2client.service_account import ServiceAccountCredentials
import requests
#authorization
service_account = gspread.service_account(filename = 'capstone-362722-f3745d9260b7.json' )
worksheet = service_account.open('TeamLiftCyberPhysical').sheet1
rows = worksheet.row_count
scope = ["https://www.googleapis.com/auth/drive", "https://www.googleapis.com/auth/spreadsheets"]
credentials = ServiceAccountCredentials.from_json_keyfile_name('capstone-362722-f3745d9260b7.json', scope)
gc = gspread.authorize(credentials)
wb = gc.open_by_url('https://docs.google.com/spreadsheets/d/10g0fkjjrK0k9sa_ynw3O0Stdfp3leNJiJWS0MOM_b94/edit#gid=0')
#this function gets the last time the spreadsheet was updated
def getLastTimeModified():
revisions_uri = f'https://www.googleapis.com/drive/v3/files/{wb.id}/revisions'
headers = {'Authorization': f'Bearer {credentials.get_access_token().access_token}'}
response = requests.get(revisions_uri, headers=headers).json()
return response['revisions'][-1]['modifiedTime']
#this function adds data row to spreadsheets with given params
def addData(rowEntry):
worksheet.append_row(rowEntry)
#sends a csv file line by line to the spreadhseets file on the cloud
def sendFile(filename):
#mod_time_before = getLastTimeModified()
sent_data = np.loadtxt(filename,delimiter=",",dtype = str, ndmin = 2)
#lines= data_file.readlines()
#for iter in range(len(lines)):
#lines[iter] = lines[iter].replace('\n' , '')
#lines[iter] = lines[iter].split(',')
worksheet.append_rows(sent_data.tolist());
print("sent to spreadsheet");
def replaceNewline(str):
return str.replace("\n","")
#this function gets acknowledgement from google spreadsheets, by retreiving the last n-nows that were previously populated on the spreadsheet
# and doing an elementwise comparison with the numpy array that was just sent
def getSpreadsheetAck(filename):
ackSuccess = False
agg_array= np.loadtxt(filename,delimiter=",",dtype=str, ndmin = 2)
print(agg_array)
rowsSent = np.shape(agg_array)[0]
colsSent = np.shape(agg_array)[1]
#rowsSent = np.shape(agg_array)[0]
#colsSent = 3
#if(len(np.shape(agg_array)) == 2):
#colsSent = np.shape(agg_array)[1]
#else:
#colsSent = len(agg_array)
all_data = np.array(worksheet.get_all_values())
all_data_rows = np.shape(all_data)[0]
numRemoteFields = np.shape(all_data)[1]
print("rowsSent = ",rowsSent,"colsSent = ",colsSent,"rows in database= ", all_data_rows)
if((numRemoteFields - 1) == rowsSent):
print("The Number of Fields match between the local and remote database")
remote_array = all_data[all_data_rows -rowsSent :all_data_rows:1 , 0:colsSent]
print(remote_array)
correctDataSent = np.array_equal(agg_array,remote_array)
if(correctDataSent == True):
print("The Correct Data was sent to the Database\n")
ackSuccess = True
if(correctDataSent == False):
print("The Wrong Data was Sent\n")
print("Attempting to send data again")
print(agg_array == remote_array)
ackSuccess = False
return ackSuccess
# timezone_oregon = pytz.timezone('US/Pacific')
# time_now = (datetime.now(timezone_oregon)).strftime('%Y-%m-%d %H:%M:%S')
# print("Data Was Updated at " + str(time_now) )
#this function updates a row in the spreadsheets file, by looking up the value of a column
#parameter columtype is the column of the data we are updating
#column val is the value of the column to look for
#rowdata is the new data that we are updating it to
def updateData(columntype,columnval,rowdata):
mod_time_before = getLastTimeModified()
#gets all the tabulated data is a 2D array
full_data = worksheet.get_all_values()
# print(full_data)
num_rows = len(full_data)
index = 0
#depending on the columntype, we assign an index,
#this index tells us which column to look inside of
if(columntype == 'pumpvelocity'):
index = 0
if(columntype == 'pressure'):
index = 1
if(columntype == 'timestamp'):
index = 2
#iterates through data
for k in range(0,num_rows):
# print((worksheet.row_values(k))[index])
#finds the row with the target value
#updates that row's data with new values
if((full_data[k])[index] == columnval):
# print("yes")
worksheet.update_cell(k+1,1,rowdata[0])
worksheet.update_cell(k+1,2,rowdata[1])
worksheet.update_cell(k+1,3,rowdata[2])
break
mod_time_after = getLastTimeModified()
print("mod time before update",mod_time_before)
print("mod time after update",mod_time_after)
if(mod_time_before != mod_time_after):
print("Modified at ",mod_time_after )
#this method fetches a data point given the value of a certain column
#for example it might search the data point where flow is equal to 55
def getRecord(columntype,columnval):
full_data = worksheet.get_all_values()
# print(full_data)
num_rows = len(full_data)
index = 0
if(columntype == 'pumpvelocity'):
index = 0
if(columntype == 'pressure'):
index = 1
if(columntype == 'timestamp'):
index = 2
#iterates through data and returns data point that has certain value
for k in range(0,num_rows):
# print((worksheet.row_values(k))[index])
if((full_data[k])[index] == columnval):
# print("yes")
print(full_data[k])
record = full_data[k]
printed_record = {"pumpvelocity":record[0],"pressure":record[1],"timestamp":record[2] }
print(printed_record)
return printed_record
|
mcenek/TeamLiftCSWaterProject
|
CloudUpload/datapusher.py
|
datapusher.py
|
py
| 5,965 |
python
|
en
|
code
| 5 |
github-code
|
6
|
20281135464
|
def notas(* n, sit = False):
'''
-> Função para notas e situacoes de varios alunos.
:param n: uma ou mais notas dos alunos (aceita carias).
:param sit: valor opcional, indicando se deve ou nao adicionar a situacao.
:return: dicionario com varias informacoes da turma.
'''
media = maior = menor = c = 0
tot = len(n)
for c in range(0, tot, 1):
media += n[c]
if c == 0:
maior = n[c]
menor = n[c]
if n[c] > maior:
maior = n[c]
if n[c] < menor:
menor = n[c]
media /= tot
list = {'total': tot, 'maior': maior, 'menor': menor, 'media': media}
if sit:
if media < 5:
list['situacao'] = 'ruim'
elif media < 7:
list['situacao'] = 'razoavel'
else:
list['situacao'] = 'boa'
return list
print(notas(4, 8, 7, 2.5))
print(notas(4, 8, 9, sit = True))
print(help)
'''def notas(* n, sit = False):
//
-> Função para notas e situacoes de varios alunos.
:param n: uma ou mais notas dos alunos (aceita carias).
:param sit: valor opcional, indicando se deve ou nao adicionar a situacao.
:return: dicionario com varias informacoes da turma.
//
dic = {'total': len(n), 'maior': max(n), 'menor': min(n), 'media': sum(n) / len(n)}
if sit:
if dic['media'] < 5:
dic['situacao'] = 'ruim'
elif dic['media'] < 7:
dic['situacao'] = 'razoavel'
else:
dic['situacao'] = 'boa'
return dic
print(notas(4, 8, 7, 2.5))
print(notas(4, 8, 9, sit = True))
print(help(notas))
'''
|
JoooNatan/CursoPython
|
Mundo03/Exs/Ex105.py
|
Ex105.py
|
py
| 1,636 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
34959494378
|
# 220728
# SWEA_D1
# 14553. Game Money
# N명의 사람이 게임을 하는데, 한 판에 두 사람이 참여함
# 두 사람은 각자 1원씩 게임 머니를 내야함
# 주어진 게임 머니를 통해 최대 몇 게임을 할 수 있는지 계산하기
# N은 최대 20, 초기 게임 머니는 최대 100
T = int(input())
for t in range(1, T+1):
N = int(input())
N_ls = sorted(map(int, input().split())) # 숫자 받아서 정렬
game_n = 0
while N_ls[-1] > 0 and N_ls[-2] > 0: # 큰 수 두 개끼리 먼저 게임 시작, 둘 다 돈이 있어야 가능
N_ls[-1] -= 1 # 1원씩 사용
N_ls[-2] -= 1
game_n += 1 # 둘 다 1원씩 사용하면 게임 수 증가
N_ls.sort() # 다시 정렬해서 큰 수 갱신
print(f'#{t} {game_n}')
|
pearl313/Alorithm_study
|
알스_220728.py
|
알스_220728.py
|
py
| 918 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
33198762995
|
import ConfigParser
import io
import sys
import os
import numpy as np
from scipy.stats import cumfreq
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.basemap import Basemap
from matplotlib.backends.backend_pdf import PdfPages
import pickle
configFile = sys.argv[1]
def readConfigFile(configFileName):
global config
with open(configFileName) as f:
sample_config = f.read()
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.readfp(io.BytesIO(sample_config))
return config
def stackedPlotHistogram(metric, catchmentSize, title, legendLoc = 2, ymax=3500):
plotData = []
lims = [0,10**4,25000,50000,10**5,25*10**4,25*10**10]
for lim in range(1,len(lims)):
sel1 = catchmentSize/10**6 < lims[lim]
sel2 = catchmentSize/10**6 > lims[lim-1]
sel = [x and y for x, y in zip(sel1, sel2)]
plotData.append(metric[sel])
ax1 = plt.hist(plotData, bins=np.arange(-1,1.01,0.1), width = 0.1, stacked=True, color=plt.get_cmap("Blues")(np.linspace(0, 1, 6)), label = ["$<10*10^3$","$<25*10^3$","$<50*10^3$","$<100*10^3$","$<250*10^3$","$\geq250*10^3$"], edgecolor = "none")
ax1 = plt.legend(prop={'size': 10}, title="Catchment size ($km^2$)", loc = legendLoc)
ax1 = plt.title(title)
ax1 = plt.xlabel("Value")
ax1 = plt.ylabel("Frequency")
ax1 = plt.xlim(-1, 1)
ax1 = plt.ylim(0, ymax)
ax1 = plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
def plotHistogram(metric, title):
ax1 = plt.hist(metric, bins=np.arange(-1,1.01,0.1))
ax1 = plt.title(title)
ax1 = plt.xlabel("Value")
ax1 = plt.ylabel("Frequency")
ax1 = plt.xlim(-1, 1)
ax1 = plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
def plotCDF(forecast, validation, title, xlims = [-1,1]):
forecast[forecast < -1.01] = -1.01
vals, x1, x2, x3 = cumfreq(forecast, len(forecast))
ax1 = plt.plot(np.linspace(np.min(forecast), np.max(forecast), len(forecast)), vals/len(forecast), label=str(config.get('Main options', 'RunName')))
validation[validation < -1.01] = -1.01
vals, x1, x2, x3 = cumfreq(validation, len(validation))
ax2 = plt.plot(np.linspace(np.min(validation), np.max(validation), len(validation)), vals/len(validation), label=str(config.get('Reference options', 'RunName')))
ax2 = plt.legend(prop={'size': 10}, loc=2)
ax1 = plt.title(title)
ax1 = plt.xlabel("Value")
ax1 = plt.ylabel("ECDF")
ax1 = plt.xlim(xlims[0], xlims[1])
ax1 = plt.ylim(0, 1)
ax1 = plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
def plotScatter(forecast, validation, title):
ax1 = plt.plot(validation, forecast, "ro", markersize=8)
ax1 = plt.plot([-100,100], [-100,100])
ax1 = plt.title(title)
ax1 = plt.xlabel(str(config.get('Reference options', 'RunName')))
ax1 = plt.ylabel(str(config.get('Main options', 'RunName')))
ax1 = plt.xlim(-1, 1)
ax1 = plt.ylim(-1, 1)
ax1 = plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
def plotHexBin(forecast, validation, title):
forecast[forecast < -1.1] = -1.1
validation[validation < -1.1] = -1.1
ax1 = plt.hexbin(validation, forecast, gridsize=20, vmin=1, vmax=20, cmap="OrRd")
ax1 = plt.plot([-100,100], [-100,100])
ax1 = plt.title(title)
ax1 = plt.xlabel(str(config.get('Reference options', 'RunName')))
ax1 = plt.ylabel(str(config.get('Main options', 'RunName')))
ax1 = plt.xlim(-1, 1)
ax1 = plt.ylim(-1, 1)
ax1 = plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
def plotWorldMap(data, lons, lats, title, vmin = -1., vmax = 1., s=5):
plt.figure(figsize=(8, 4))
m = Basemap(projection='mill',lon_0=0, llcrnrlon=-20., llcrnrlat=20.,
urcrnrlon=50., urcrnrlat=75.)
x,y = m(lons, lats)
m.drawcountries(zorder=0, color="white")
#m.drawcoastlines(zorder=0, color="black")
m.fillcontinents(color = 'black',zorder=-1)
m.scatter(x,y, c=data, cmap='RdBu', vmin=vmin, vmax=vmax, s=s, edgecolors='none')
m.colorbar()
plt.title(title)
plt.gcf().set_tight_layout(True)
pdf.savefig()
plt.clf()
plt.figure(figsize=(8, 6))
config = readConfigFile(configFile)
runName = str(config.get('Main options', 'RunName'))
refName = str(config.get('Reference options', 'RunName'))
output, output2 = pickle.load(open('validationResultsPool_%s_%s.obj' %(runName, refName), 'rb') )
sel1 = (np.isnan(output[:,3]+output[:,2]+output[:,4]+output2[:,2]+output2[:,3]+output2[:,4]) == False)
sel2 = np.sum(output[:,3:], axis=1) != 0.0
sel3 = np.sum(output2[:,3:], axis=1) != 0.0
sel = [x and y and z for x, y, z in zip(sel1, sel2, sel3)]
sel5Min = sel
pdf = PdfPages(str(config.get('Output options', 'outputFile')))
matplotlib.rcParams.update({'font.size': 12})
plotWorldMap(output[sel5Min,3], output[sel5Min,0], output[sel5Min,1], 'Correlation with observations (%s)' %(str(config.get('Main options', 'RunName'))))
plotWorldMap(output2[sel,3], output2[sel,0], output2[sel,1], 'Correlation with observations (%s)' %(str(config.get('Reference options', 'RunName'))))
plotWorldMap(output[sel,3]-output2[sel,3], output[sel,0], output[sel,1], 'Correlation difference 5min - 30min', vmin=-0.5, vmax=0.5)
plotWorldMap(output[sel5Min,4], output[sel5Min,0], output[sel5Min,1], 'Anomaly Correlation (%s)' %(str(config.get('Main options', 'RunName'))))
plotWorldMap(output2[sel,4], output2[sel,0], output2[sel,1], 'Anomaly Correlation (%s)' %(str(config.get('Reference options', 'RunName'))))
plotWorldMap(output[sel,4]-output2[sel,4], output[sel,0], output[sel,1], 'Anomaly Correlation difference', vmin=-0.5, vmax=0.5)
plotWorldMap(output[sel5Min,4]-output[sel5Min,3], output[sel5Min,0], output[sel5Min,1], 'Anomaly Correlation - Correlation (%s)' %(str(config.get('Main options', 'RunName'))))
stackedPlotHistogram(output[sel5Min,3], output[sel5Min,2], "Correlation with observations (%s)" %(str(config.get('Main options', 'RunName'))), ymax=750)
stackedPlotHistogram(output2[sel,3], output2[sel,2], "Correlation with observations (%s)" %(str(config.get('Reference options', 'RunName'))), ymax=750)
stackedPlotHistogram(output[sel5Min,4], output[sel5Min,2], "Anomaly Correlation with observations (%s)" %(str(config.get('Main options', 'RunName'))), ymax=750)
stackedPlotHistogram(output2[sel,4], output2[sel,2], "Anomaly Correlation with observations (%s)" %(str(config.get('Reference options', 'RunName'))), ymax=750)
stackedPlotHistogram(output[sel5Min,5], output[sel5Min,2], "Kling-Gupta Efficiency (%s)" %(str(config.get('Main options', 'RunName'))), ymax=500)
stackedPlotHistogram(output2[sel,5], output2[sel,2], "Kling-Gupta Efficiency (%s)" %(str(config.get('Reference options', 'RunName'))), ymax=500)
stackedPlotHistogram(output[sel5Min,4]-output[sel5Min,3], output[sel5Min,2], "AC - R (%s)" %(str(config.get('Main options', 'RunName'))), ymax=550)
plotCDF(output[sel,3], output2[sel,3], "R")
plotCDF(output[sel,4], output2[sel,4], "AC")
plotCDF(output[sel,5], output2[sel,5], "KGE")
plotHexBin(output[sel,3], output2[sel,3], "R")
plotHexBin(output[sel,4], output2[sel,4], "AC")
plotHexBin(output[sel,5], output2[sel,5], "KGE")
pdf.close()
|
edwinkost/PCR-GLOBWB_validation
|
niko_validation_scripts/standAlone/plotValidation.py
|
plotValidation.py
|
py
| 7,155 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35379919905
|
from flask import Flask
from flask_apscheduler import APScheduler
# config scheduling class
from statuschecker import get_health_status
class Config(object):
JOBS = [
{
'id': 'check_health',
'func': 'app:check_health',
'trigger': 'interval',
'seconds': 1800
}
]
SCHEDULER_API_ENABLED = True
# function triggered every 30 minutes
def check_health():
return get_health_status();
# flask startup
app = Flask(__name__)
app.config.from_object(Config())
# initiate scheduler
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
tynorantoni/HealthCheckService
|
app.py
|
app.py
|
py
| 687 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40077076852
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import re
import logging
from .pushbullet import *
class Messenger(object):
"""docstring for Message"""
def __init__(self): #, arg):
# super(Message, self).__init__()
# self.arg = arg
self.ready = False
self.message = ''
self.error_bag = ''
self.result_bag = ''
self.pattern_time=re.compile(r'\d{1,2}h:\d{1,2}m:\d{1,2}s:\d{1,4}ms')
self.pattern_process = re.compile(r'\([0-9\ ]{0,2}/\w{2}\)')
self.pattern_stream = re.compile(r'stream\s+\d{1,2}')
self.pattern_long_space = re.compile(r'\s+')
def _message_chopper(self,line):
if 'Finished' in line:
return line
else:
return
def bb_message_chopper(self,line):
if 'Load test' in line:
if 'finished' in line:
try:
time = re.search(self.pattern_time,line)[0]
except:
logging.info('re seach time failed')
time = 'TIME SEARH FAILED'
message = 'Load finished. Time: '+time
return message
elif 'Power test' in line:
if 'finished' in line:
try:
time = re.search(self.pattern_time,line)[0]
except:
time = ' TIME SEARH FAILED '
try:
process = re.search(self.pattern_process,line)[0]
except:
if time:
process = ' finished.'
else:
process = ' PROCESS SEARCH FAILED '
# logging.info('re seach time failed')
message = 'Power'+process+' Time: '+time
if process == ' finished.':
return message
elif 'throughput' in line:
if 'finished' in line:
try:
self.pattern_long_space = re.compile(r'\s+')
stream_buff = re.search(self.pattern_stream,line)[0]
stream = self.pattern_long_space.sub(' ',stream_buff)
except :
stream =''
try:
time = re.search(self.pattern_time,line)[0]
except:
time = ' TIME SEARH FAILED '
try:
process = re.search(self.pattern_process,line)[0]
except:
if time:
process = ' finished.'
else:
process = ' PROCESS SEARCH FAILED '
# logging.info('re seach time failed')
message = 'Throughput '+ stream +process+' Time: '+time
# if stream == 'stream 0':
if process == ' finished.':
return message
elif 'benchmark: Stop' in line:
if 'finished' in line:
try:
time = re.search(self.pattern_time,line)[0]
except:
time = ' TIME SEARH FAILED '
message = 'Benchmark Stop. '+'Time: '+time
return message
elif 'VALID BBQpm' in line:
self.result_bag+=line
message = line[:-1]
return message
elif 'Benchmark run terminated' in line:
self.error_bag+=line
elif 'Reason:' in line:
self.error_bag+=line
elif 'No final result available.' in line:
self.error_bag+=line
message=self.error_bag
return message
def message_buffer(self, line):
if line[-1] == '\n':
line_tmp=line[:-1]
else:
line_tmp=line
print(line_tmp)
sys.stdout.flush()
logging.info(line_tmp)
if line!='':
message2push=self.bb_message_chopper(line)
if message2push:
self.message+=message2push
self.ready=True
if self.ready == True:
logging.info('Pushing message...(%s)'%self.message)
self.send()
def test(self,message='Your network seems good.'):
p = PushBullet(USER_API_KEY)
try:
# Get a list of devices
devices = p.getDevices()
# print_devices(devices)
except:
print('You may have a network connection probelem to connect pushbullet.com.')
sys.stdout.flush()
logging.info('You may have a network connection probelem to connect pushbullet.com.')
else:
if len(devices)!=0:
print_devices(devices)
sys.stdout.flush()
# Send a note
p.pushNote(USER_DEVICE_IDEN, 'Alfred is with you.', message)
def send(self):
p = PushBullet(USER_API_KEY)
try:
# Get a list of devices
devices = p.getDevices()
# devices = 'pass'
# print_devices(devices)
except:
print('You may have a network connection probelem to connect pushbullet.com.')
sys.stdout.flush()
logging.info('You may have a network connection probelem to connect pushbullet.com.')
else:
if len(devices)!=0:
# Send a note
p.pushNote(USER_DEVICE_IDEN, 'News from Alfred', self.message)
# print('PUSHING NEWS:%s'%self.message)
self.message=''
self.ready=False
def send_all(self,retry=20):
while retry>0 and self.message!='':
M.send()
retry-=1
logging.info('Remaining Attempts:%d'%retry)
|
lucy9215/jobNotification
|
pushbullet/messenger.py
|
messenger.py
|
py
| 5,743 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19645294724
|
from Math import mathObject
import random
class areaObject(mathObject):
def __init__(self):
self.type = "Area"
self.areaType = None
self.pieces = []
self.content = []
def include(self, x):
pass
class areaPiece(areaObject):
def __init__(self, content, tp):
areaObject.__init__(self)
self.areaType = tp
if isinstance(content, list):
self.content = content
else:
self.content = [content]
def include(self, x):
if isinstance(x, list):
if self.areaType == "discrete":
for item in x:
if not item in self.content:
return False
return True
elif self.areaType == "continous":
for item in x:
if not self.content[0] <= item <= self.content[1]:
return False
return true
else:
return False
elif isinstance(x, int) or isinstance(x, float):
if self.areaType == "discrete":
if not x in self.content:
return False
else:
return True
elif self.areaType == "continous":
if not self.content[0] <= x <= self.content[1]:
return False
else:
return True
else:
return False
elif isinstance(x, mathObject) or isinstance(x, object):
if self.areaType != "discrete" and self.areaType != "continous":
return False
if isinstance(x, areaObject):
if isinstance(x, areaPiece):
if x.areaType == "continous" and self.areaType == "discrete":
return False
else:
return self.include(x.content)
else:
rlt = True
for item in x.pieces:
rlt = rlt and self.include(item)
return rlt
else:
if self.areaType != "discrete":
return False
else:
if x in self.content:
return True
else:
return False
else:
return False
class Area(areaObject):
def __init__(self, tp, thetaList):
areaObject.__init__(self)
if isinstance(thetaList, list):
tmp = []
for item in thetaList:
if isinstance(item, list):
self.pieces.append(areaPiece(item, tp))
else:
self.pieces.append(areaPiece(thetaList, tp))
self.areaType = tp
def include(self, x):
for piece in self.pieces:
if piece.include(x):
return True
return False
|
Anon-LeoH/UncleDaLearn
|
UD/Area/__init__.py
|
__init__.py
|
py
| 2,974 |
python
|
en
|
code
| 2 |
github-code
|
6
|
33147997203
|
from covid_constants_and_util import *
import geopandas as gpd
import statsmodels.api as sm
import json
import copy
from fbprophet import Prophet
from collections import Counter
import re
import h5py
import ast
from shapely import wkt
from scipy.stats import pearsonr
import fiona
import geopandas
import csv
import os
from geopandas.tools import sjoin
import time
try:
cast_to_datetime = [datetime.datetime.strptime(s, '%Y-%m-%d') for s in ALL_WEEKLY_STRINGS]
except:
print(ALL_WEEKLY_STRINGS)
raise Exception("At least one weekly string is badly formatted.")
def load_social_distancing_metrics(datetimes, version='v2'):
"""
Given a list of datetimes, load social distancing metrics for those days.
load_social_distancing_metrics(helper.list_datetimes_in_range(datetime.datetime(2020, 3, 1),
datetime.datetime(2020, 3, 7)))
"""
print("Loading social distancing metrics for %i datetimes; using version %s" % (len(datetimes), version))
t0 = time.time()
daily_cols = ['device_count', 'distance_traveled_from_home',
'completely_home_device_count', 'full_time_work_behavior_devices']
concatenated_d = None
for dt in datetimes:
if version == 'v1':
path = os.path.join(PATH_TO_SDM_V1, dt.strftime('%Y/%m/%d/%Y-%m-%d-social-distancing.csv.gz'))
elif version == 'v2':
path = os.path.join(PATH_TO_SDM_V2, dt.strftime('%Y/%m/%d/%Y-%m-%d-social-distancing.csv.gz'))
else:
raise Exception("Version should be v1 or v2")
if os.path.exists(path):
social_distancing_d = pd.read_csv(path, usecols=['origin_census_block_group'] + daily_cols)[['origin_census_block_group'] + daily_cols]
social_distancing_d.columns = ['census_block_group'] + ['%i.%i.%i_%s' %
(dt.year, dt.month, dt.day, a) for a in daily_cols]
old_len = len(social_distancing_d)
social_distancing_d = social_distancing_d.drop_duplicates(keep=False)
n_dropped_rows = old_len - len(social_distancing_d)
assert len(set(social_distancing_d['census_block_group'])) == len(social_distancing_d)
assert(1.*n_dropped_rows/old_len < 0.002) # make sure not very many rows are duplicates.
if version == 'v2':
assert n_dropped_rows == 0 # they fixed the problem in v2.
elif version == 'v1':
assert n_dropped_rows > 0 # this seemed to be a problem in v1.
if concatenated_d is None:
concatenated_d = social_distancing_d
else:
concatenated_d = pd.merge(concatenated_d,
social_distancing_d,
how='outer',
validate='one_to_one',
on='census_block_group')
else:
raise Exception('Missing Social Distancing Metrics for %s' % dt.strftime('%Y/%m/%d'))
if concatenated_d is None: # could not find any of the dates
return concatenated_d
print("Total time to load social distancing metrics: %2.3f seconds; total rows %i" %
(time.time() - t0, len(concatenated_d)))
return concatenated_d
def annotate_with_demographic_info_and_write_out_in_chunks(full_df, just_testing=False):
"""
Annotate the Safegraph POI data with Census data and other useful POI data.
"""
full_df['safegraph_place_id'] = full_df.index
full_df.index = range(len(full_df))
# merge with areas.
safegraph_areas = pd.read_csv(PATH_TO_SAFEGRAPH_AREAS)
print("Prior to merging with safegraph areas, %i rows" % len(full_df))
safegraph_areas = safegraph_areas[['safegraph_place_id', 'area_square_feet']].dropna()
safegraph_areas.columns = ['safegraph_place_id', 'safegraph_computed_area_in_square_feet']
full_df = pd.merge(full_df, safegraph_areas, how='inner', on='safegraph_place_id', validate='one_to_one')
print("After merging with areas, %i rows" % len(full_df))
# map to demo info. The basic class we use here is CensusBlockGroups, which processes the Census data.
print("Mapping SafeGraph POIs to demographic info, including race and income.")
gdb_files = ['ACS_2017_5YR_BG_51_VIRGINIA.gdb'] if just_testing else None
cbg_mapper = CensusBlockGroups(base_directory=PATH_FOR_CBG_MAPPER, gdb_files=gdb_files)
pop_df = load_dataframe_to_correct_for_population_size()
chunksize = 100000
annotated_df = []
for chunk_number in range(len(full_df) // chunksize + 1):
print("******************Annotating chunk %i" % chunk_number)
start, end = chunk_number * chunksize, min((chunk_number + 1) * chunksize, len(full_df))
d = full_df.iloc[start:end].copy()
# Now annotate each POI on the basis of its location.
mapped_pois = cbg_mapper.get_demographic_stats_of_points(d['latitude'].values,
d['longitude'].values,
desired_cols=['p_white', 'p_asian', 'p_black', 'median_household_income', 'people_per_mile'])
mapped_pois['county_fips_code'] = mapped_pois['county_fips_code'].map(lambda x:int(x) if x is not None else x)
mapped_pois.columns = ['poi_lat_lon_%s' % a for a in mapped_pois.columns]
for c in mapped_pois.columns:
d[c] = mapped_pois[c].values
# Then annotate with demographic data based on where visitors come from (visitor_home_cbgs).
d = aggregate_visitor_home_cbgs_over_months(d, population_df=pop_df)
block_group_d = cbg_mapper.block_group_d.copy()
block_group_d['id_to_match_to_safegraph_data'] = block_group_d['GEOID'].map(lambda x:x.split("US")[1]).astype(int)
block_group_d = block_group_d[['id_to_match_to_safegraph_data', 'p_black', 'p_white', 'p_asian', 'median_household_income']]
block_group_d = block_group_d.dropna()
for col in block_group_d:
if col == 'id_to_match_to_safegraph_data':
continue
cbg_dict = dict(zip(block_group_d['id_to_match_to_safegraph_data'].values, block_group_d[col].values))
d['cbg_visitor_weighted_%s' % col] = d['aggregated_cbg_population_adjusted_visitor_home_cbgs'].map(lambda x:compute_weighted_mean_of_cbg_visitors(x, cbg_dict))
# see how well we did.
for c in [a for a in d.columns if 'poi_lat_lon_' in a or 'cbg_visitor_weighted' in a]:
print("Have data for %s for fraction %2.3f of people" % (c, 1 - pd.isnull(d[c]).mean()))
d.to_hdf(os.path.join(ANNOTATED_H5_DATA_DIR, CHUNK_FILENAME) ,f'chunk_{chunk_number}', mode='a', complevel=2)
annotated_df.append(d)
annotated_df = pd.concat(annotated_df)
annotated_df.index = range(len(annotated_df))
return annotated_df
def load_date_col_as_date(x):
# we allow this to return None because sometimes we want to filter for cols which are dates.
try:
year, month, day = x.split('.') # e.g., '2020.3.1'
return datetime.datetime(int(year), int(month), int(day))
except:
return None
def get_h5_filepath(load_backup):
backup_string = 'BACKUP_' if load_backup else ''
filepath = os.path.join(ANNOTATED_H5_DATA_DIR, backup_string + CHUNK_FILENAME)
return filepath
def load_chunk(chunk, load_backup=False):
"""
Load a single 100k chunk from the h5 file; chunks are randomized and so should be reasonably representative.
"""
filepath = get_h5_filepath(load_backup=load_backup)
print("Reading chunk %i from %s" % (chunk, filepath))
d = pd.read_hdf(filepath, key=f'chunk_{chunk}')
date_cols = [load_date_col_as_date(a) for a in d.columns]
date_cols = [a for a in date_cols if a is not None]
print("Dates range from %s to %s" % (min(date_cols), max(date_cols)))
return d
def load_multiple_chunks(chunks, load_backup=False, cols=None):
"""
Loads multiple chunks from the h5 file. Currently quite slow; quicker if only a subset of columns are kept.
Use the parameters cols to specify which columns to keep; if None then all are kept.
"""
dfs = []
for i in chunks:
t0 = time.time()
chunk = load_chunk(i, load_backup=load_backup)
print("Loaded chunk %i in %2.3f seconds" % (i, time.time() - t0))
if cols is not None:
chunk = chunk[cols]
dfs.append(chunk)
t0 = time.time()
df = pd.concat(dfs)
print("Concatenated %d chunks in %2.3f seconds" % (len(chunks), time.time() - t0))
return df
def load_all_chunks(cols=None, load_backup=False):
"""
Load all 100k chunks from the h5 file. This currently takes a while.
"""
filepath = get_h5_filepath(load_backup=load_backup)
f = h5py.File(filepath, 'r')
chunks = sorted([int(a.replace('chunk_', '')) for a in list(f.keys())])
f.close()
assert chunks == list(range(max(chunks) + 1))
print("Loading all chunks: %s" % (','.join([str(a) for a in chunks])))
return load_multiple_chunks(chunks, cols=cols, load_backup=load_backup)
def load_patterns_data(month=None, year=None, week_string=None, extra_cols=[], just_testing=False):
"""
Load in Patterns data for a single month and year, or for a single week. (These options are mutually exclusive).
Use extra_cols to define non-default columns to load.
just_testing is a flag to allow quicker prototyping; it will load only a subset of the data.
"""
change_by_date = ['visitor_home_cbgs', 'visitor_country_of_origin',
'distance_from_home', 'median_dwell', 'bucketed_dwell_times'] # fields that are time-varying
if month is not None and year is not None:
month_and_year = True
assert week_string is None
assert month in range(1, 13)
assert year in [2017, 2018, 2019, 2020]
if (year == 2019 and month == 12) or (year == 2020 and month in [1, 2]):
upload_date_string = '2020-03-16' # we originally downloaded files in two groups; load them in the same way.
else:
upload_date_string = '2019-12-12'
month_and_year_string = '%i_%02d-%s' % (year, month, upload_date_string)
base_dir = os.path.join(UNZIPPED_DATA_DIR, 'SearchofAllRecords-CORE_POI-GEOMETRY-PATTERNS-%s' % month_and_year_string)
print("Loading all files from %s" % base_dir)
filenames = [a for a in os.listdir(base_dir) if
(a.startswith('core_poi-geometry-patterns-part') and a.endswith('.csv.gz'))]
# make sure we're not ignoring any files we don't expect to ignore.
assert all([a in ['brand_info.csv', 'visit_panel_summary.csv', 'README.txt', 'home_panel_summary.csv']
for a in os.listdir(base_dir) if a not in filenames])
if just_testing:
filenames = filenames[:2]
print("Number of files to load: %i" % len(filenames))
full_paths = [os.path.join(base_dir, a) for a in filenames]
x = load_csv_possibly_with_dask(full_paths, use_dask=True, usecols=['safegraph_place_id',
'parent_safegraph_place_id',
'location_name',
'latitude',
'longitude',
'city',
'region',
'postal_code',
'top_category',
'sub_category',
'naics_code',
"polygon_wkt",
"polygon_class",
'visits_by_day',
'visitor_home_cbgs',
'visitor_country_of_origin',
'distance_from_home',
'median_dwell',
'bucketed_dwell_times'] +
extra_cols,
dtype={'naics_code': 'float64'})
print("Fraction %2.3f of NAICS codes are missing" % pd.isnull(x['naics_code']).mean())
x = x.rename(columns={k: f'{year}.{month}.{k}' for k in change_by_date})
else:
# weekly patterns data.
month_and_year = False
assert month is None and year is None
assert week_string in ALL_WEEKLY_STRINGS
filepath = os.path.join(PATH_TO_WEEKLY_PATTERNS, '%s-weekly-patterns.csv.gz' % week_string)
# Filename is misleading - it is really a zipped file.
# Also, we're missing some columns that we had before, so I think we're just going to have to join on SafeGraph ID.
x = pd.read_csv(filepath, escapechar='\\', compression='gzip', nrows=10000 if just_testing else None, usecols=['safegraph_place_id',
'visits_by_day',
'visitor_home_cbgs',
'visitor_country_of_origin',
'distance_from_home',
'median_dwell',
'bucketed_dwell_times',
'date_range_start',
'visits_by_each_hour'])
x['offset_from_gmt'] = x['date_range_start'].map(lambda x:x.split('-')[-1])
assert x['date_range_start'].map(lambda x:x.startswith(week_string + 'T' + '00:00:00')).all() # make sure date range starts where we expect for all rows.
print("Offset from GMT value counts")
print(x['offset_from_gmt'].value_counts())
del x['date_range_start']
x = x.rename(columns={k: f'{week_string}.{k}' for k in change_by_date})
print("Prior to dropping rows with no visits by day, %i rows" % len(x))
x = x.dropna(subset=['visits_by_day'])
x['visits_by_day'] = x['visits_by_day'].map(json.loads) # convert string lists to lists.
if month_and_year:
days = pd.DataFrame(x['visits_by_day'].values.tolist(),
columns=[f'{year}.{month}.{day}'
for day in range(1, len(x.iloc[0]['visits_by_day']) + 1)])
else:
year = int(week_string.split('-')[0])
month = int(week_string.split('-')[1])
start_day = int(week_string.split('-')[2])
start_datetime = datetime.datetime(year, month, start_day)
all_datetimes = [start_datetime + datetime.timedelta(days=i) for i in range(7)]
days = pd.DataFrame(x['visits_by_day'].values.tolist(),
columns=['%i.%i.%i' % (dt.year, dt.month, dt.day) for dt in all_datetimes])
# Load hourly data as well.
# Per SafeGraph documentation:
# Start time for measurement period in ISO 8601 format of YYYY-MM-DDTHH:mm:SS±hh:mm
# (local time with offset from GMT). The start time will be 12 a.m. Sunday in local time.
x['visits_by_each_hour'] = x['visits_by_each_hour'].map(json.loads) # convert string lists to lists.
assert all_datetimes[0].strftime('%A') == 'Sunday'
hours = pd.DataFrame(x['visits_by_each_hour'].values.tolist(),
columns=[f'hourly_visits_%i.%i.%i.%i' % (dt.year, dt.month, dt.day, hour)
for dt in all_datetimes
for hour in range(0, 24)])
days.index = x.index
x = pd.concat([x, days], axis=1)
if not month_and_year:
assert list(x.index) == list(range(len(x)))
assert (hours.index.values == x.index.values).all()
hours.index = x.index
old_len = len(x)
x = pd.concat([x, hours], axis=1)
assert len(x) == old_len
x = x.drop(columns=['visits_by_each_hour'])
# The hourly data has some spurious spikes
# related to the GMT-day boundary which we have to correct for.
date_cols = [load_date_col_as_date(a) for a in x.columns]
date_cols = [a for a in date_cols if a is not None]
assert len(date_cols) == 7
if week_string >= '2020-03-15': # think this is because of DST. Basically, these are the timezone strings we look for and correct; they shift at DST.
hourly_offsets = [4, 5, 6, 7]
else:
hourly_offsets = [5, 6, 7, 8]
hourly_offset_strings = ['0%i:00' % hourly_offset for hourly_offset in hourly_offsets]
percent_rows_being_corrected = (x['offset_from_gmt'].map(lambda a:a in hourly_offset_strings).mean() * 100)
print("%2.3f%% of rows have timezones that we spike-correct for." % percent_rows_being_corrected)
assert percent_rows_being_corrected > 99 # make sure we're correcting almost all rows
# have to correct for each timezone separately.
for hourly_offset in hourly_offsets:
idxs = x['offset_from_gmt'] == ('0%i:00' % hourly_offset)
for date_col in date_cols: # loop over days.
date_string = '%i.%i.%i' % (date_col.year, date_col.month, date_col.day)
# not totally clear which hours are messed up - it's mainly one hour, but the surrounding ones look weird too -
# or what the best way to interpolate is, but this yields plots which look reasonable.
for hour_to_correct in [24 - hourly_offset - 1,
24 - hourly_offset,
24 - hourly_offset + 1]:
# interpolate using hours fairly far from hour_to_correct to avoid pollution.
if hour_to_correct < 21:
cols_to_use = ['hourly_visits_%s.%i' % (date_string, a) for a in [hour_to_correct - 3, hour_to_correct + 3]]
else:
# Use smaller offset so we don't have hours >= 24. This technically overlaps with earlier hours,
# but I think it should be okay because they will already have been corrected.
cols_to_use = ['hourly_visits_%s.%i' % (date_string, a) for a in [hour_to_correct - 2, hour_to_correct + 2]]
assert all([col in x.columns for col in cols_to_use])
x.loc[idxs, 'hourly_visits_%s.%i' % (date_string, hour_to_correct)] = x.loc[idxs, cols_to_use].mean(axis=1)
del x['offset_from_gmt']
x = x.set_index('safegraph_place_id')
x = x.drop(columns=['visits_by_day'])
if month_and_year:
print("%i rows loaded for month and year %s" % (len(x), month_and_year_string))
else:
print("%i rows loaded for week %s" % (len(x), week_string))
return x
def load_weekly_patterns_v2_data(week_string, cols_to_keep, expand_hourly_visits=True):
"""
Load in Weekly Patterns V2 data for a single week.
If week_string <= '2020-06-15': we are using the earlier version of Weekly Pattern v2 in /weekly_20190101_20200615/, and
week_string denotes the first day of the week.
Else: we are using the later version of Weekly Patterns v2 in /weekly_20200615_20201005/, and week_string denotes
the day this update was released.
"""
ts = time.time()
elements = week_string.split('-')
assert len(elements) == 3
week_datetime = datetime.datetime(int(elements[0]), int(elements[1]), int(elements[2]))
cols_to_load = cols_to_keep.copy()
must_load_cols = ['date_range_start', 'visits_by_each_hour'] # required for later logic
for k in must_load_cols:
if k not in cols_to_load:
cols_to_load.append(k)
if week_string <= '2020-06-15':
path_to_csv = os.path.join(CURRENT_DATA_DIR, 'weekly_20190101_20200615/main-file/%s-weekly-patterns.csv.gz' % week_string)
assert os.path.isfile(path_to_csv)
print('Loading from %s' % path_to_csv)
df = load_csv_possibly_with_dask(path_to_csv, use_dask=True, usecols=cols_to_load, dtype={'poi_cbg':'float64'})
start_day_string = week_string
start_datetime = week_datetime
else:
path_to_weekly_dir = os.path.join(CURRENT_DATA_DIR, 'weekly_20200615_20201028/patterns/%s/' % week_datetime.strftime('%Y/%m/%d'))
inner_folder = os.listdir(path_to_weekly_dir)
assert len(inner_folder) == 1 # there is always a single folder inside the weekly folder
path_to_patterns_parts = os.path.join(path_to_weekly_dir, inner_folder[0])
dfs = []
for filename in sorted(os.listdir(path_to_patterns_parts)):
if filename.startswith('patterns-part'): # e.g., patterns-part1.csv.gz
path_to_csv = os.path.join(path_to_patterns_parts, filename)
assert os.path.isfile(path_to_csv)
print('Loading from %s' % path_to_csv)
df = load_csv_possibly_with_dask(path_to_csv, use_dask=True, usecols=cols_to_load, dtype={'poi_cbg':'float64'})
dfs.append(df)
df = pd.concat(dfs, axis=0)
start_day_string = df.iloc[0].date_range_start.split('T')[0]
elements = start_day_string.split('-')
assert len(elements) == 3
start_datetime = datetime.datetime(int(elements[0]), int(elements[1]), int(elements[2]))
assert df['date_range_start'].map(lambda x:x.startswith(start_day_string + 'T00:00:00')).all() # make sure date range starts where we expect for all rows.
if expand_hourly_visits: # expand single hourly visits column into one column per hour
df['visits_by_each_hour'] = df['visits_by_each_hour'].map(json.loads) # convert string lists to lists.
all_dates = [start_datetime + datetime.timedelta(days=i) for i in range(7)] # all days in the week
hours = pd.DataFrame(df['visits_by_each_hour'].values.tolist(),
columns=[f'hourly_visits_%i.%i.%i.%i' % (date.year, date.month, date.day, hour)
for date in all_dates
for hour in range(0, 24)])
assert len(hours) == len(df)
hours.index = df.index
df = pd.concat([df, hours], axis=1)
# The hourly data has some spurious spikes
# related to the GMT-day boundary which we have to correct for.
df['offset_from_gmt'] = df['date_range_start'].map(lambda x:x[len(start_day_string + 'T00:00:00'):])
print("Offset from GMT value counts")
offset_counts = df['offset_from_gmt'].value_counts()
print(offset_counts)
hourly_offset_strings = offset_counts[:4].index # four most common timezones across POIs
assert all(['-0%i:00' % x in hourly_offset_strings for x in [5, 6, 7]]) # should always include GMT-5, -6, -7
assert ('-04:00' in hourly_offset_strings) or ('-08:00' in hourly_offset_strings) # depends on DST
percent_rows_being_corrected = (df['offset_from_gmt'].map(lambda x:x in hourly_offset_strings).mean() * 100)
print("%2.3f%% of rows have timezones that we spike-correct for." % percent_rows_being_corrected)
assert percent_rows_being_corrected > 98 # almost all rows should fall in these timezones
end_datetime = datetime.datetime(all_dates[-1].year, all_dates[-1].month, all_dates[-1].day, 23)
# have to correct for each timezone separately.
for offset_string in sorted(hourly_offset_strings):
print('Correcting GMT%s...' % offset_string)
idxs = df['offset_from_gmt'] == offset_string
offset_int = int(offset_string.split(':')[0])
assert (-8 <= offset_int) and (offset_int <= -4)
for date in all_dates:
# not totally clear which hours are messed up - it's mainly one hour, but the surrounding ones
# look weird too - but this yields plots which look reasonable.
for hour_to_correct in [24 + offset_int - 1,
24 + offset_int,
24 + offset_int + 1]:
# interpolate using hours fairly far from hour_to_correct to avoid pollution.
dt_hour_to_correct = datetime.datetime(date.year, date.month, date.day, hour_to_correct)
start_hour = max(start_datetime, dt_hour_to_correct + datetime.timedelta(hours=-3))
end_hour = min(end_datetime, dt_hour_to_correct + datetime.timedelta(hours=3))
cols_to_use = [f'hourly_visits_%i.%i.%i.%i' % (dt.year, dt.month, dt.day, dt.hour) for dt in list_hours_in_range(start_hour, end_hour)]
assert all([col in df.columns for col in cols_to_use])
# this technically overlaps with earlier hours, but it should be okay because they will
# already have been corrected.
df.loc[idxs, 'hourly_visits_%i.%i.%i.%i' % (date.year, date.month, date.day, hour_to_correct)] = df.loc[idxs, cols_to_use].mean(axis=1)
non_required_cols = [col for col in df.columns if not(col in cols_to_keep or col.startswith('hourly_visits_'))]
df = df.drop(columns=non_required_cols)
df = df.set_index('safegraph_place_id')
te = time.time()
print("%i rows loaded for week %s [total time = %.2fs]" % (len(df), start_day_string, te-ts))
return df
def load_core_places_footprint_data(cols_to_keep):
area_csv = os.path.join(CURRENT_DATA_DIR, 'core_places_footprint/August2020Release/SafeGraphPlacesGeoSupplementSquareFeet.csv.gz')
print('Loading', area_csv)
df = load_csv_possibly_with_dask(area_csv, usecols=cols_to_keep, use_dask=True)
df = df.set_index('safegraph_place_id')
print('Loaded core places footprint data for %d POIs' % len(df))
return df
def load_core_places_data(cols_to_keep):
core_dir = os.path.join(CURRENT_DATA_DIR, 'core_places/2020/10/') # use the most recent core info
dfs = []
for filename in sorted(os.listdir(core_dir)):
if filename.startswith('core_poi-part'):
path_to_csv = os.path.join(core_dir, filename)
print('Loading', path_to_csv)
df = load_csv_possibly_with_dask(path_to_csv, usecols=cols_to_keep, use_dask=True)
dfs.append(df)
df = pd.concat(dfs, axis=0)
df = df.set_index('safegraph_place_id')
print('Loading core places info for %d POIs' % len(df))
return df
def load_google_mobility_data(only_US=True):
df = pd.read_csv(PATH_TO_GOOGLE_DATA)
if only_US:
df = df[df['country_region_code'] == 'US']
return df
def list_datetimes_in_range(min_day, max_day):
"""
Return a list of datetimes in a range from min_day to max_day, inclusive. Increment is one day.
"""
assert(min_day <= max_day)
days = []
while min_day <= max_day:
days.append(min_day)
min_day = min_day + datetime.timedelta(days=1)
return days
def list_hours_in_range(min_hour, max_hour):
"""
Return a list of datetimes in a range from min_hour to max_hour, inclusive. Increment is one hour.
"""
assert(min_hour <= max_hour)
hours = []
while min_hour <= max_hour:
hours.append(min_hour)
min_hour = min_hour + datetime.timedelta(hours=1)
return hours
def normalize_dict_values_to_sum_to_one_and_cast_keys_to_ints(old_dict):
"""
Self-explanatory; used by aggregate_visitor_home_cbgs_over_months.
"""
new_dict = {}
value_sum = 1.*sum(old_dict.values())
if len(old_dict) > 0:
assert value_sum > 0
for k in old_dict:
new_dict[int(k)] = old_dict[k] / value_sum
return new_dict
def cast_keys_to_ints(old_dict):
new_dict = {}
for k in old_dict:
new_dict[int(k)] = old_dict[k]
return new_dict
def aggregate_visitor_home_cbgs_over_months(d, cutoff_year=2019, population_df=None, periods_to_include=None):
"""
Aggregate visitor_home_cbgs across months and produce a normalized aggregate field.
Usage: d = aggregate_visitor_home_cbgs_over_months(d).
cutoff = the earliest time (could be year or year.month) to aggregate data from
population_df = the DataFrame loaded by load_dataframe_to_correct_for_population_size
"""
t0 = time.time()
if periods_to_include is not None:
cols = ['%s.visitor_home_cbgs' % period for period in periods_to_include]
assert cutoff_year is None
else:
# Not using CBG data from weekly files for now because of concerns that it's inconsistently
# processed - they change how they do the privacy filtering.
assert cutoff_year is not None
weekly_cols_to_exclude = ['%s.visitor_home_cbgs' % a for a in ALL_WEEKLY_STRINGS]
cols = [a for a in d.columns if (a.endswith('.visitor_home_cbgs') and (a >= str(cutoff_year)) and (a not in weekly_cols_to_exclude))]
print('Aggregating data from: %s' % cols)
assert all([a in d.columns for a in cols])
# Helper variables to use if visitor_home_cbgs counts need adjusting for differential sampling across CBGs.
adjusted_cols = []
if population_df is not None:
int_cbgs = [int(cbg) for cbg in population_df.census_block_group]
for k in cols:
if type(d.iloc[0][k]) != Counter:
print('Filling %s with Counter objects' % k)
d[k] = d[k].fillna('{}').map(lambda x:Counter(cast_keys_to_ints(json.loads(x)))) # map strings to counters.
if population_df is not None:
sub_t0 = time.time()
new_col = '%s_adjusted' % k
assert new_col not in d.columns
total_population = population_df.total_cbg_population.to_numpy()
time_period = k.strip('.visitor_home_cbgs')
population_col = 'number_devices_residing_%s' % time_period
assert(population_col in population_df.columns)
num_devices = population_df[population_col].to_numpy()
assert np.isnan(num_devices).sum() == 0
assert np.isnan(total_population).sum() == 0
cbg_coverage = num_devices / total_population
median_coverage = np.nanmedian(cbg_coverage)
cbg_coverage = dict(zip(int_cbgs, cbg_coverage))
assert ~np.isnan(median_coverage)
assert ~np.isinf(median_coverage)
assert median_coverage > 0.001
# want to make sure we aren't missing data for too many CBGs, so a small hack - have
# adjust_home_cbg_counts_for_coverage return two arguments, where the second argument
# tells us if we had to clip or fill in the missing coverage number.
d[new_col] = d[k].map(lambda x:adjust_home_cbg_counts_for_coverage(x, cbg_coverage, median_coverage=median_coverage))
print('Finished adjusting home CBG counts for %s [time=%.3fs] had to fill in or clip coverage for %2.6f%% of rows; in those cases used median coverage %2.3f' %
(time_period, time.time() - sub_t0, 100 * d[new_col].map(lambda x:x[1]).mean(), median_coverage))
d[new_col] = d[new_col].map(lambda x:x[0]) # remove the second argument of adjust_home_cbg_counts_for_coverage, we don't need it anymore.
adjusted_cols.append(new_col)
# make sure there are no NAs anywhere.
assert d[k].map(lambda x:len([a for a in x.values() if np.isnan(a)])).sum() == 0
assert d[new_col].map(lambda x:len([a for a in x.values() if np.isnan(a)])).sum() == 0
# add counters together across months.
d['aggregated_visitor_home_cbgs'] = d[cols].aggregate(func=sum, axis=1)
# normalize each counter so its values sum to 1.
d['aggregated_visitor_home_cbgs'] = d['aggregated_visitor_home_cbgs'].map(normalize_dict_values_to_sum_to_one_and_cast_keys_to_ints)
if len(adjusted_cols) > 0:
d['aggregated_cbg_population_adjusted_visitor_home_cbgs'] = d[adjusted_cols].aggregate(func=sum, axis=1)
d['aggregated_cbg_population_adjusted_visitor_home_cbgs'] = d['aggregated_cbg_population_adjusted_visitor_home_cbgs'].map(normalize_dict_values_to_sum_to_one_and_cast_keys_to_ints)
d = d.drop(columns=adjusted_cols)
for k in ['aggregated_cbg_population_adjusted_visitor_home_cbgs',
'aggregated_visitor_home_cbgs']:
y = d.loc[d[k].map(lambda x:len(x) > 0), k]
y = y.map(lambda x:sum(x.values()))
assert np.allclose(y, 1)
print("Aggregating CBG visitors over %i time periods took %2.3f seconds" % (len(cols), time.time() - t0))
print("Fraction %2.3f of POIs have CBG visitor data" % (d['aggregated_visitor_home_cbgs'].map(lambda x:len(x) != 0).mean()))
return d
def adjust_home_cbg_counts_for_coverage(cbg_counter, cbg_coverage, median_coverage, max_upweighting_factor=100):
"""
Adjusts the POI-CBG counts from SafeGraph to estimate the true count, based on the
coverage that SafeGraph has for this CBG.
cbg_counter: a Counter object mapping CBG to the original count
cbg_coverage: a dictionary where keys are CBGs and each data point represents SafeGraph's coverage: num_devices / total_population
This should be between 0 and 1 for the vast majority of cases, although for some weird CBGs it may not be.
Returns the adjusted dictionary and a Bool flag had_to_guess_coverage_value which tells us whether we had to adjust the coverage value.
"""
had_to_guess_coverage_value = False
if len(cbg_counter) == 0:
return cbg_counter, had_to_guess_coverage_value
new_counter = Counter()
for cbg in cbg_counter:
# cover some special cases which should happen very rarely.
if cbg not in cbg_coverage:
upweighting_factor = 1 / median_coverage
had_to_guess_coverage_value = True
elif np.isnan(cbg_coverage[cbg]): # not sure this case ever actually happens, but just in case.
upweighting_factor = 1 / median_coverage
had_to_guess_coverage_value = True
else:
assert cbg_coverage[cbg] >= 0
upweighting_factor = 1 / cbg_coverage[cbg] # need to invert coverage
if upweighting_factor > max_upweighting_factor:
upweighting_factor = 1 / median_coverage
had_to_guess_coverage_value = True
new_counter[cbg] = cbg_counter[cbg] * upweighting_factor
return new_counter, had_to_guess_coverage_value
def compute_weighted_mean_of_cbg_visitors(cbg_visitor_fracs, cbg_values):
"""
Given a dictionary cbg_visitor_fracs which gives the fraction of people from a CBG which visit a POI
and a dictionary cbg_values which maps CBGs to values, compute the weighted mean for the POI.
"""
if len(cbg_visitor_fracs) == 0:
return None
else:
numerator = 0.
denominator = 0.
for cbg in cbg_visitor_fracs:
if cbg not in cbg_values:
continue
numerator += cbg_visitor_fracs[cbg] * cbg_values[cbg]
denominator += cbg_visitor_fracs[cbg]
if denominator == 0:
return None
return numerator/denominator
def load_dataframe_for_individual_msa(MSA_name, nrows=None):
"""
This loads all the POI info for a single MSA.
"""
t0 = time.time()
filename = os.path.join(STRATIFIED_BY_AREA_DIR, '%s.csv' % MSA_name)
d = pd.read_csv(filename, nrows=nrows)
for k in (['aggregated_cbg_population_adjusted_visitor_home_cbgs', 'aggregated_visitor_home_cbgs']):
d[k] = d[k].map(lambda x:cast_keys_to_ints(json.loads(x)))
for k in ['%s.visitor_home_cbgs' % a for a in ALL_WEEKLY_STRINGS]:
d[k] = d[k].fillna('{}')
d[k] = d[k].map(lambda x:cast_keys_to_ints(json.loads(x)))
print("Loaded %i rows for %s in %2.3f seconds" % (len(d), MSA_name, time.time() - t0))
return d
def load_dataframe_to_correct_for_population_size(just_load_census_data=False):
"""
Load in a dataframe with rows for the 2018 ACS Census population code in each CBG
and the SafeGraph population count in each CBG (from home-panel-summary.csv).
The correlation is not actually that good, likely because individual CBG counts are noisy.
Definition of
num_devices_residing: Number of distinct devices observed with a primary nighttime location in the specified census block group.
"""
acs_data = pd.read_csv(PATH_TO_ACS_1YR_DATA,
encoding='cp1252',
usecols=['STATEA', 'COUNTYA', 'TRACTA', 'BLKGRPA','AJWBE001'],
dtype={'STATEA':str,
'COUNTYA':str,
'BLKGRPA':str,
'TRACTA':str})
# https://www.census.gov/programs-surveys/geography/guidance/geo-identifiers.html
# FULL BLOCK GROUP CODE = STATE+COUNTY+TRACT+BLOCK GROUP
assert (acs_data['STATEA'].map(len) == 2).all()
assert (acs_data['COUNTYA'].map(len) == 3).all()
assert (acs_data['TRACTA'].map(len) == 6).all()
assert (acs_data['BLKGRPA'].map(len) == 1).all()
acs_data['census_block_group'] = (acs_data['STATEA'] +
acs_data['COUNTYA'] +
acs_data['TRACTA'] +
acs_data['BLKGRPA'])
acs_data['census_block_group'] = acs_data['census_block_group'].astype(int)
assert len(set(acs_data['census_block_group'])) == len(acs_data)
acs_data['county_code'] = (acs_data['STATEA'] + acs_data['COUNTYA']).astype(int)
acs_data = acs_data[['census_block_group', 'AJWBE001', 'STATEA', 'county_code']]
acs_data = acs_data.rename(mapper={'AJWBE001':'total_cbg_population',
'STATEA':'state_code'}, axis=1)
print("%i rows of 2018 1-year ACS data read" % len(acs_data))
if just_load_census_data:
return acs_data
combined_data = acs_data
# now read in safegraph data to use as normalizer. Months and years first.
all_filenames = []
all_date_strings = []
for month, year in [(1, 2017),(2, 2017),(3, 2017),(4, 2017),(5, 2017),(6, 2017),(7, 2017),(8, 2017),(9, 2017),(10, 2017),(11, 2017),(12, 2017),
(1, 2018),(2, 2018),(3, 2018),(4, 2018),(5, 2018),(6, 2018),(7, 2018),(8, 2018),(9, 2018),(10, 2018),(11, 2018),(12, 2018),
(1, 2019),(2, 2019),(3, 2019),(4, 2019),(5, 2019),(6, 2019),(7, 2019),(8, 2019),(9, 2019),(10, 2019),(11, 2019),(12, 2019),
(1, 2020),(2, 2020)]:
if (year == 2019 and month == 12) or (year == 2020 and month in [1, 2]):
upload_date_string = '2020-03-16' # we downloaded files in two groups; load them in the same way.
else:
upload_date_string = '2019-12-12'
month_and_year_string = '%i_%02d-%s' % (year, month, upload_date_string)
filename = os.path.join(UNZIPPED_DATA_DIR,
'SearchofAllRecords-CORE_POI-GEOMETRY-PATTERNS-%s' % month_and_year_string,
'home_panel_summary.csv')
all_filenames.append(filename)
all_date_strings.append('%i.%i' % (year, month))
# now weeks
for date_string in ALL_WEEKLY_STRINGS:
all_filenames.append(os.path.join(PATH_TO_HOME_PANEL_SUMMARY, '%s-home-panel-summary.csv' % date_string))
all_date_strings.append(date_string)
cbgs_with_ratio_above_one = np.array([False for a in range(len(acs_data))])
for filename_idx, filename in enumerate(all_filenames):
date_string = all_date_strings[filename_idx]
print("\n*************")
safegraph_counts = pd.read_csv(filename, dtype={'census_block_group':str})
print("%s: %i devices read from %i rows" % (
date_string, safegraph_counts['number_devices_residing'].sum(), len(safegraph_counts)))
safegraph_counts = safegraph_counts[['census_block_group', 'number_devices_residing']]
col_name = 'number_devices_residing_%s' % date_string
safegraph_counts.columns = ['census_block_group', col_name]
safegraph_counts['census_block_group'] = safegraph_counts['census_block_group'].map(int)
assert len(safegraph_counts['census_block_group'].dropna()) == len(safegraph_counts)
print("Number of unique Census blocks: %i; unique blocks %i: WARNING: DROPPING NON-UNIQUE ROWS" %
(len(safegraph_counts['census_block_group'].drop_duplicates(keep=False)), len(safegraph_counts)))
safegraph_counts = safegraph_counts.drop_duplicates(subset=['census_block_group'], keep=False)
combined_data = pd.merge(combined_data,
safegraph_counts,
how='left',
validate='one_to_one',
on='census_block_group')
missing_data_idxs = pd.isnull(combined_data[col_name])
print("Missing data for %i rows; filling with zeros" % missing_data_idxs.sum())
combined_data.loc[missing_data_idxs, col_name] = 0
r, p = pearsonr(combined_data['total_cbg_population'], combined_data[col_name])
combined_data['ratio'] = combined_data[col_name]/combined_data['total_cbg_population']
cbgs_with_ratio_above_one = cbgs_with_ratio_above_one | (combined_data['ratio'].values > 1)
combined_data.loc[combined_data['total_cbg_population'] == 0, 'ratio'] = None
print("Ratio of SafeGraph count to Census count")
print(combined_data['ratio'].describe(percentiles=[.25, .5, .75, .9, .99, .999]))
print("Correlation between SafeGraph and Census counts: %2.3f" % (r))
print("Warning: %i CBGs with a ratio greater than 1 in at least one month" % cbgs_with_ratio_above_one.sum())
del combined_data['ratio']
combined_data.index = range(len(combined_data))
assert len(combined_data.dropna()) == len(combined_data)
return combined_data
def load_and_reconcile_multiple_acs_data():
"""
Because we use Census data from two data sources, load a single dataframe that combines both.
"""
acs_1_year_d = load_dataframe_to_correct_for_population_size(just_load_census_data=True)
column_rename = {'total_cbg_population':'total_cbg_population_2018_1YR'}
acs_1_year_d = acs_1_year_d.rename(mapper=column_rename, axis=1)
acs_1_year_d['state_name'] = acs_1_year_d['state_code'].map(lambda x:FIPS_CODES_FOR_50_STATES_PLUS_DC[str(x)] if str(x) in FIPS_CODES_FOR_50_STATES_PLUS_DC else np.nan)
acs_5_year_d = pd.read_csv(PATH_TO_ACS_5YR_DATA)
print('%i rows of 2017 5-year ACS data read' % len(acs_5_year_d))
acs_5_year_d['census_block_group'] = acs_5_year_d['GEOID'].map(lambda x:x.split("US")[1]).astype(int)
# rename dynamic attributes to indicate that they are from ACS 2017 5-year
dynamic_attributes = ['p_black', 'p_white', 'p_asian', 'median_household_income',
'block_group_area_in_square_miles', 'people_per_mile']
column_rename = {attr:'%s_2017_5YR' % attr for attr in dynamic_attributes}
acs_5_year_d = acs_5_year_d.rename(mapper=column_rename, axis=1)
# repetitive with 'state_code' and 'county_code' column from acs_1_year_d
acs_5_year_d = acs_5_year_d.drop(['Unnamed: 0', 'STATEFP', 'COUNTYFP'], axis=1)
combined_d = pd.merge(acs_1_year_d, acs_5_year_d, on='census_block_group', how='outer', validate='one_to_one')
combined_d['people_per_mile_hybrid'] = combined_d['total_cbg_population_2018_1YR'] / combined_d['block_group_area_in_square_miles_2017_5YR']
return combined_d
def compute_cbg_day_prop_out(sdm_of_interest, cbgs_of_interest=None):
'''
Computes the proportion of people leaving a CBG on each day.
It returns a new DataFrame, with one row per CBG representing proportions for each day in sdm_of_interest.
sdm_of_interest: a Social Distancing Metrics dataframe, data for the time period of interest
cbgs_of_interest: a list, the CBGs for which to compute reweighting; if None, then
reweighting is computed for all CBGs in sdm_of_interest
---------------------------------------
Sample usage:
sdm_sq = helper.load_social_distancing_metrics(status_quo_days)
days_of_interest = helper.list_datetimes_in_range(datetime.datetime(2020, 3, 1), datetime.datetime(2020, 4, 1))
sdm_of_interest = helper.load_social_distancing_metrics(days_of_interest)
reweightings_df = helper.compute_cbg_day_reweighting( sdm_of_interest)
'''
# Process SDM of interest dataframe
orig_len = len(sdm_of_interest)
interest_num_home_cols = [col for col in sdm_of_interest.columns if col.endswith('completely_home_device_count')]
interest_device_count_cols = [col for col in sdm_of_interest.columns if col.endswith('device_count') and col not in interest_num_home_cols]
sdm_of_interest = sdm_of_interest.dropna(subset=interest_device_count_cols + interest_num_home_cols)
assert sdm_of_interest['census_block_group'].duplicated().sum() == 0
sdm_of_interest.set_index(sdm_of_interest['census_block_group'].values, inplace=True)
print('Kept %i / %i CBGs with non-NaN SDM for days of interest' % (len(sdm_of_interest), orig_len))
if cbgs_of_interest is None:
cbgs_of_interest = sdm_of_interest.census_block_group.unique()
# Find CBGs in common between SDM dataframe and CBGs of interest
cbgs_with_data = set(cbgs_of_interest).intersection(sdm_of_interest.index)
print('Found SDM data for %i / %i CBGs of interest' % (len(cbgs_with_data), len(cbgs_of_interest)))
# Get proportion of population that goes out during days of interest
sub_sdm_int = sdm_of_interest[sdm_of_interest['census_block_group'].isin(cbgs_with_data)]
assert(len(sub_sdm_int) == len(cbgs_with_data))
sub_sdm_int = sub_sdm_int.sort_values(by='census_block_group')
assert list(sub_sdm_int['census_block_group']) == sorted(cbgs_with_data)
int_num_out = sub_sdm_int[interest_device_count_cols].values - sub_sdm_int[interest_num_home_cols].values
int_prop_out = int_num_out / sub_sdm_int[interest_device_count_cols].values
int_prop_out = np.clip(int_prop_out, 1e-10, None) # so that the reweighting is not zero
N, T = int_prop_out.shape
dates = [col.strip('_device_count') for col in interest_device_count_cols]
dates2 = [col.strip('_completely_home_device_count') for col in interest_num_home_cols]
assert dates == dates2
sorted_cbgs_with_data = sorted(cbgs_with_data)
prop_df = pd.DataFrame(int_prop_out, columns=dates)
prop_df['census_block_group'] = sorted_cbgs_with_data
# If we could not compute reweighting for a CBG, use median reweighting for that day
if len(cbgs_with_data) < len(cbgs_of_interest):
missing_cbgs = set(cbgs_of_interest) - cbgs_with_data
print('Filling %d CBGs with median props' % len(missing_cbgs))
median_prop = np.median(int_prop_out, axis=0)
missing_props = np.broadcast_to(median_prop, (len(missing_cbgs), T))
missing_props_df = pd.DataFrame(missing_props, columns=dates)
missing_props_df['census_block_group'] = list(missing_cbgs)
prop_df = pd.concat((prop_df, missing_props_df))
return prop_df
def write_out_acs_5_year_data():
cbg_mapper = CensusBlockGroups(base_directory=PATH_FOR_CBG_MAPPER, gdb_files=None)
geometry_cols = ['STATEFP',
'COUNTYFP',
'TRACTCE',
'Metropolitan/Micropolitan Statistical Area',
'CBSA Title',
'State Name']
block_group_cols = ['GEOID',
'p_black',
'p_white',
'p_asian',
'median_household_income',
'block_group_area_in_square_miles',
'people_per_mile']
for k in geometry_cols:
cbg_mapper.block_group_d[k] = cbg_mapper.geometry_d[k].values
df_to_write_out = cbg_mapper.block_group_d[block_group_cols + geometry_cols]
print("Total rows: %i" % len(df_to_write_out))
print("Missing data")
print(pd.isnull(df_to_write_out).mean())
df_to_write_out.to_csv(PATH_TO_ACS_5YR_DATA)
class CensusBlockGroups:
"""
A class for loading geographic and demographic data from the ACS.
A census block group is a relatively small area.
Less good than houses but still pretty granular. https://en.wikipedia.org/wiki/Census_block_group
Data was downloaded from https://www.census.gov/geographies/mapping-files/time-series/geo/tiger-data.html
We use the most recent ACS 5-year estimates: 2013-2017, eg:
wget https://www2.census.gov/geo/tiger/TIGER_DP/2017ACS/ACS_2017_5YR_BG.gdb.zip
These files are convenient because they combine both geographic boundaries + demographic data, leading to a cleaner join.
The main method for data access is get_demographic_stats_of_point. Sample usage:
x = CensusBlockGroups(gdb_files=['ACS_2017_5YR_BG_51_VIRGINIA.gdb'])
x.get_demographic_stats_of_points(latitudes=[38.8816], longitudes=[-77.0910], desired_cols=['p_black', 'p_white', 'mean_household_income'])
"""
def __init__(self, base_directory=PATH_TO_CENSUS_BLOCK_GROUP_DATA,
gdb_files=None,
county_to_msa_mapping_filepath=PATH_TO_COUNTY_TO_MSA_MAPPING):
self.base_directory = base_directory
if gdb_files is None:
self.gdb_files = ['ACS_2017_5YR_BG.gdb']
else:
self.gdb_files = gdb_files
self.crs_to_use = WGS_84_CRS # https://epsg.io/4326, WGS84 - World Geodetic System 1984, used in GPS.
self.county_to_msa_mapping_filepath = county_to_msa_mapping_filepath
self.load_raw_dataframes() # Load in raw geometry and demographic dataframes.
# annotate demographic data with more useful columns.
self.annotate_with_race()
self.annotate_with_income()
self.annotate_with_counties_to_msa_mapping()
self.annotate_with_area_and_pop_density()
def annotate_with_area_and_pop_density(self):
# https://gis.stackexchange.com/questions/218450/getting-polygon-areas-using-geopandas.
# See comments about using cea projection.
gdf = self.geometry_d[['geometry']].copy().to_crs({'proj':'cea'})
area_in_square_meters = gdf['geometry'].area.values
self.block_group_d['block_group_area_in_square_miles'] = area_in_square_meters / (1609.34 ** 2)
self.block_group_d['people_per_mile'] = (self.block_group_d['B03002e1'] /
self.block_group_d['block_group_area_in_square_miles'])
print(self.block_group_d[['block_group_area_in_square_miles', 'people_per_mile']].describe())
def annotate_with_race(self):
"""
Analysis focuses on black and non-white population groups. Also annotate with p_asian because of possible anti-Asian discrimination.
B03002e1 HISPANIC OR LATINO ORIGIN BY RACE: Total: Total population -- (Estimate)
B03002e3 HISPANIC OR LATINO ORIGIN BY RACE: Not Hispanic or Latino: White alone: Total population -- (Estimate)
B03002e4 HISPANIC OR LATINO ORIGIN BY RACE: Not Hispanic or Latino: Black or African American alone: Total population -- (Estimate)
B03002e6 HISPANIC OR LATINO ORIGIN BY RACE: Not Hispanic or Latino: Asian alone: Total population -- (Estimate)
"""
print("annotating with race")
self.block_group_d['p_black'] = self.block_group_d['B03002e4'] / self.block_group_d['B03002e1']
self.block_group_d['p_white'] = self.block_group_d['B03002e3'] / self.block_group_d['B03002e1']
self.block_group_d['p_asian'] = self.block_group_d['B03002e6'] / self.block_group_d['B03002e1']
print(self.block_group_d[['p_black', 'p_white', 'p_asian']].describe())
def load_raw_dataframes(self):
"""
Read in the original demographic + geographic data.
"""
self.block_group_d = None
self.geometry_d = None
demographic_layer_names = ['X25_HOUSING_CHARACTERISTICS', 'X01_AGE_AND_SEX', 'X03_HISPANIC_OR_LATINO_ORIGIN', 'X19_INCOME']
for file in self.gdb_files:
# https://www.reddit.com/r/gis/comments/775imb/accessing_a_gdb_without_esri_arcgis/doj9zza
full_path = os.path.join(self.base_directory, file)
layer_list = fiona.listlayers(full_path)
print(file)
print(layer_list)
geographic_layer_name = [a for a in layer_list if a[:15] == 'ACS_2017_5YR_BG']
assert len(geographic_layer_name) == 1
geographic_layer_name = geographic_layer_name[0]
geographic_data = geopandas.read_file(full_path, layer=geographic_layer_name).to_crs(self.crs_to_use)
# by default when you use the read file command, the column containing spatial objects is named "geometry", and will be set as the active column.
print(geographic_data.columns)
geographic_data = geographic_data.sort_values(by='GEOID_Data')[['GEOID_Data', 'geometry', 'STATEFP', 'COUNTYFP', 'TRACTCE']]
for demographic_idx, demographic_layer_name in enumerate(demographic_layer_names):
assert demographic_layer_name in layer_list
if demographic_idx == 0:
demographic_data = geopandas.read_file(full_path, layer=demographic_layer_name)
else:
old_len = len(demographic_data)
new_df = geopandas.read_file(full_path, layer=demographic_layer_name)
assert sorted(new_df['GEOID']) == sorted(demographic_data['GEOID'])
demographic_data = demographic_data.merge(new_df, on='GEOID', how='inner')
assert old_len == len(demographic_data)
demographic_data = demographic_data.sort_values(by='GEOID')
shared_geoids = set(demographic_data['GEOID'].values).intersection(set(geographic_data['GEOID_Data'].values))
print("Length of demographic data: %i; geographic data %i; %i GEOIDs in both" % (len(demographic_data), len(geographic_data), len(shared_geoids)))
demographic_data = demographic_data.loc[demographic_data['GEOID'].map(lambda x:x in shared_geoids)]
geographic_data = geographic_data.loc[geographic_data['GEOID_Data'].map(lambda x:x in shared_geoids)]
demographic_data.index = range(len(demographic_data))
geographic_data.index = range(len(geographic_data))
assert (geographic_data['GEOID_Data'] == demographic_data['GEOID']).all()
assert len(geographic_data) == len(set(geographic_data['GEOID_Data']))
if self.block_group_d is None:
self.block_group_d = demographic_data
else:
self.block_group_d = pd.concat([self.block_group_d, demographic_data])
if self.geometry_d is None:
self.geometry_d = geographic_data
else:
self.geometry_d = pd.concat([self.geometry_d, geographic_data])
assert pd.isnull(self.geometry_d['STATEFP']).sum() == 0
good_idxs = self.geometry_d['STATEFP'].map(lambda x:x in FIPS_CODES_FOR_50_STATES_PLUS_DC).values
print("Warning: the following State FIPS codes are being filtered out")
print(self.geometry_d.loc[~good_idxs, 'STATEFP'].value_counts())
print("%i/%i Census Block Groups in total removed" % ((~good_idxs).sum(), len(good_idxs)))
self.geometry_d = self.geometry_d.loc[good_idxs]
self.block_group_d = self.block_group_d.loc[good_idxs]
self.geometry_d.index = self.geometry_d['GEOID_Data'].values
self.block_group_d.index = self.block_group_d['GEOID'].values
def annotate_with_income(self):
"""
We want a single income number for each block group. This method computes that.
"""
print("Computing household income")
# copy-pasted column definitions right out of the codebook.
codebook_string = """
B19001e2 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): Less than $10,000: Households -- (Estimate)
B19001e3 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $10,000 to $14,999: Households -- (Estimate)
B19001e4 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $15,000 to $19,999: Households -- (Estimate)
B19001e5 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $20,000 to $24,999: Households -- (Estimate)
B19001e6 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $25,000 to $29,999: Households -- (Estimate)
B19001e7 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $30,000 to $34,999: Households -- (Estimate)
B19001e8 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $35,000 to $39,999: Households -- (Estimate)
B19001e9 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $40,000 to $44,999: Households -- (Estimate)
B19001e10 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $45,000 to $49,999: Households -- (Estimate)
B19001e11 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $50,000 to $59,999: Households -- (Estimate)
B19001e12 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $60,000 to $74,999: Households -- (Estimate)
B19001e13 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $75,000 to $99,999: Households -- (Estimate)
B19001e14 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $100,000 to $124,999: Households -- (Estimate)
B19001e15 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $125,000 to $149,999: Households -- (Estimate)
B19001e16 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $150,000 to $199,999: Households -- (Estimate)
B19001e17 HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): $200,000 or more: Households -- (Estimate)
"""
self.income_bin_edges = [0] + list(range(10000, 50000, 5000)) + [50000, 60000, 75000, 100000, 125000, 150000, 200000]
income_column_names_to_vals = {}
column_codes = codebook_string.split('\n')
for f in column_codes:
if len(f.strip()) == 0:
continue
col_name = f.split('HOUSEHOLD INCOME')[0].strip()
if col_name == 'B19001e2':
val = 10000
elif col_name == 'B19001e17':
val = 200000
else:
lower_bound = float(f.split('$')[1].split()[0].replace(',', ''))
upper_bound = float(f.split('$')[2].split(':')[0].replace(',', ''))
val = (lower_bound + upper_bound) / 2
income_column_names_to_vals[col_name] = val
print("The value for column %s is %2.1f" % (col_name, val))
# each column gives the count of households with that income. So we need to take a weighted sum to compute the average income.
self.block_group_d['total_household_income'] = 0.
self.block_group_d['total_households'] = 0.
for col in income_column_names_to_vals:
self.block_group_d['total_household_income'] += self.block_group_d[col] * income_column_names_to_vals[col]
self.block_group_d['total_households'] += self.block_group_d[col]
self.block_group_d['mean_household_income'] = 1.*self.block_group_d['total_household_income'] / self.block_group_d['total_households']
self.block_group_d['median_household_income'] = self.block_group_d['B19013e1'] # MEDIAN HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2017 INFLATION-ADJUSTED DOLLARS): Median household income in the past 12 months (in 2017 inflation-adjusted dollars): Households -- (Estimate)
assert (self.block_group_d['total_households'] == self.block_group_d['B19001e1']).all() # sanity check: our count should agree with theirs.
assert (pd.isnull(self.block_group_d['mean_household_income']) == (self.block_group_d['B19001e1'] == 0)).all()
print("Warning: missing income data for %2.1f%% of census blocks with 0 households" % (pd.isnull(self.block_group_d['mean_household_income']).mean() * 100))
self.income_column_names_to_vals = income_column_names_to_vals
assert len(self.income_bin_edges) == len(self.income_column_names_to_vals)
print(self.block_group_d[['mean_household_income', 'total_households']].describe())
def annotate_with_counties_to_msa_mapping(self):
"""
Annotate with metropolitan area info for consistency with Experienced Segregation paper.
# https://www2.census.gov/programs-surveys/metro-micro/geographies/reference-files/2017/delineation-files/list1.xls
"""
print("Loading county to MSA mapping")
self.counties_to_msa_df = pd.read_csv(self.county_to_msa_mapping_filepath, skiprows=2, dtype={'FIPS State Code':str, 'FIPS County Code':str})
print("%i rows read" % len(self.counties_to_msa_df))
self.counties_to_msa_df = self.counties_to_msa_df[['CBSA Title',
'Metropolitan/Micropolitan Statistical Area',
'State Name',
'FIPS State Code',
'FIPS County Code']]
self.counties_to_msa_df.columns = ['CBSA Title',
'Metropolitan/Micropolitan Statistical Area',
'State Name',
'STATEFP',
'COUNTYFP']
self.counties_to_msa_df = self.counties_to_msa_df.dropna(how='all') # remove a couple blank rows.
assert self.counties_to_msa_df['Metropolitan/Micropolitan Statistical Area'].map(lambda x:x in ['Metropolitan Statistical Area', 'Micropolitan Statistical Area']).all()
print("Number of unique Metropolitan statistical areas: %i" %
len(set(self.counties_to_msa_df.loc[self.counties_to_msa_df['Metropolitan/Micropolitan Statistical Area'] == 'Metropolitan Statistical Area', 'CBSA Title'])))
print("Number of unique Micropolitan statistical areas: %i" %
len(set(self.counties_to_msa_df.loc[self.counties_to_msa_df['Metropolitan/Micropolitan Statistical Area'] == 'Micropolitan Statistical Area', 'CBSA Title'])))
old_len = len(self.geometry_d)
assert len(self.counties_to_msa_df.drop_duplicates(['STATEFP', 'COUNTYFP'])) == len(self.counties_to_msa_df)
self.geometry_d = self.geometry_d.merge(self.counties_to_msa_df,
on=['STATEFP', 'COUNTYFP'],
how='left')
# For some reason the index gets reset here. Annoying, not sure why.
self.geometry_d.index = self.geometry_d['GEOID_Data'].values
assert len(self.geometry_d) == old_len
assert (self.geometry_d.index == self.block_group_d.index).all()
def get_demographic_stats_of_points(self, latitudes, longitudes, desired_cols):
"""
Given a list or array of latitudes and longitudes, matches to Census Block Group.
Returns a dictionary which includes the state and county FIPS code, along with any columns in desired_cols.
This method assumes the latitudes and longitudes are in https://epsg.io/4326, which is what I think is used for Android/iOS -> SafeGraph coordinates.
"""
def dtype_pandas_series(obj):
return str(type(obj)) == "<class 'pandas.core.series.Series'>"
assert not dtype_pandas_series(latitudes)
assert not dtype_pandas_series(longitudes)
assert len(latitudes) == len(longitudes)
t0 = time.time()
# we have to match stuff a million rows at a time because otherwise we get weird memory warnings.
start_idx = 0
end_idx = start_idx + int(1e6)
merged = []
while start_idx < len(longitudes):
print("Doing spatial join on points with indices from %i-%i" % (start_idx, min(end_idx, len(longitudes))))
points = geopandas.GeoDataFrame(pd.DataFrame({'placeholder':np.array(range(start_idx, min(end_idx, len(longitudes))))}), # this column doesn't matter. We just have to create a geo data frame.
geometry=geopandas.points_from_xy(longitudes[start_idx:end_idx], latitudes[start_idx:end_idx]),
crs=self.crs_to_use)
# see eg gdf = geopandas.GeoDataFrame(df, geometry=geopandas.points_from_xy(df.Longitude, df.Latitude)). http://geopandas.org/gallery/create_geopandas_from_pandas.html
merged.append(sjoin(points, self.geometry_d[['geometry']], how='left', op='within'))
assert len(merged[-1]) == len(points)
start_idx += int(1e6)
end_idx += int(1e6)
merged = pd.concat(merged)
merged.index = range(len(merged))
assert list(merged.index) == list(merged['placeholder'])
could_not_match = pd.isnull(merged['index_right']).values
print("Cannot match to a CBG for a fraction %2.3f of points" % could_not_match.mean())
results = {}
for k in desired_cols + ['state_fips_code', 'county_fips_code', 'Metropolitan/Micropolitan Statistical Area', 'CBSA Title', 'GEOID_Data', 'TRACTCE']:
results[k] = [None] * len(latitudes)
results = pd.DataFrame(results)
matched_geoids = merged['index_right'].values[~could_not_match]
for c in desired_cols:
results.loc[~could_not_match, c] = self.block_group_d.loc[matched_geoids, c].values
if c in ['p_white', 'p_black', 'mean_household_income', 'median_household_income', 'new_census_monthly_rent_to_annual_income_multiplier', 'new_census_median_monthly_rent_to_annual_income_multiplier']:
results[c] = results[c].astype('float')
results.loc[~could_not_match, 'state_fips_code'] = self.geometry_d.loc[matched_geoids, 'STATEFP'].values
results.loc[~could_not_match, 'county_fips_code'] = self.geometry_d.loc[matched_geoids, 'COUNTYFP'].values
results.loc[~could_not_match, 'Metropolitan/Micropolitan Statistical Area'] = self.geometry_d.loc[matched_geoids,'Metropolitan/Micropolitan Statistical Area'].values
results.loc[~could_not_match, 'CBSA Title'] = self.geometry_d.loc[matched_geoids, 'CBSA Title'].values
results.loc[~could_not_match, 'GEOID_Data'] = self.geometry_d.loc[matched_geoids, 'GEOID_Data'].values
results.loc[~could_not_match, 'TRACTCE'] = self.geometry_d.loc[matched_geoids, 'TRACTCE'].values
print("Total query time is %2.3f" % (time.time() - t0))
return results
|
snap-stanford/covid-mobility
|
helper_methods_for_aggregate_data_analysis.py
|
helper_methods_for_aggregate_data_analysis.py
|
py
| 68,047 |
python
|
en
|
code
| 146 |
github-code
|
6
|
8655705907
|
import errno
import os
import requests
from pathlib import Path
import sly_globals as g
import supervisely as sly
from supervisely.app.v1.widgets.progress_bar import ProgressBar
progress5 = ProgressBar(g.task_id, g.api, "data.progress5", "Download weights", is_size=True, min_report_percent=5)
local_weights_path = None
def get_models_list():
from train import model_list
res = []
for name, data in model_list.items():
res.append({
"model": name,
"description": data["description"]
})
return res
def get_table_columns():
return [
{"key": "model", "title": "Model", "subtitle": None},
{"key": "description", "title": "Description", "subtitle": None},
]
def get_model_info_by_name(name):
models = get_models_list()
for info in models:
if info["model"] == name:
return info
raise KeyError(f"Model {name} not found")
def init(data, state):
models = get_models_list()
data["models"] = models
data["modelColumns"] = get_table_columns()
state["selectedModel"] = models[0]["model"]
state["weightsInitialization"] = "random" # "custom"
state["collapsed5"] = True
state["disabled5"] = True
progress5.init_data(data)
state["weightsPath"] = ""
data["done5"] = False
def restart(data, state):
data["done5"] = False
@g.my_app.callback("download_weights")
@sly.timeit
@g.my_app.ignore_errors_and_show_dialog_window()
def download_weights(api: sly.Api, task_id, context, state, app_logger):
#"https://download.pytorch.org/models/vgg11-8a719046.pth" to /root/.cache/torch/hub/checkpoints/vgg11-8a719046.pth
from train import model_list
global local_weights_path
try:
if state["weightsInitialization"] == "custom":
weights_path_remote = state["weightsPath"]
if not weights_path_remote.endswith(".pth"):
raise ValueError(f"Weights file has unsupported extension {sly.fs.get_file_ext(weights_path_remote)}. "
f"Supported: '.pth'")
# get architecture type from previous UI state
prev_state_path_remote = os.path.join(str(Path(weights_path_remote).parents[1]), "info/ui_state.json")
prev_state_path = os.path.join(g.my_app.data_dir, "ui_state.json")
api.file.download(g.team_id, prev_state_path_remote, prev_state_path)
prev_state = sly.json.load_json_file(prev_state_path)
api.task.set_field(g.task_id, "state.selectedModel", prev_state["selectedModel"])
local_weights_path = os.path.join(g.my_app.data_dir, sly.fs.get_file_name_with_ext(weights_path_remote))
if sly.fs.file_exists(local_weights_path) is False:
file_info = g.api.file.get_info_by_path(g.team_id, weights_path_remote)
if file_info is None:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), weights_path_remote)
progress5.set_total(file_info.sizeb)
g.api.file.download(g.team_id, weights_path_remote, local_weights_path, g.my_app.cache, progress5.increment)
progress5.reset_and_update()
else:
weights_url = model_list[state["selectedModel"]].get("pretrained")
if weights_url is not None:
default_pytorch_dir = "/root/.cache/torch/hub/checkpoints/"
#local_weights_path = os.path.join(g.my_app.data_dir, sly.fs.get_file_name_with_ext(weights_url))
local_weights_path = os.path.join(default_pytorch_dir, sly.fs.get_file_name_with_ext(weights_url))
if sly.fs.file_exists(local_weights_path) is False:
response = requests.head(weights_url, allow_redirects=True)
sizeb = int(response.headers.get('content-length', 0))
progress5.set_total(sizeb)
os.makedirs(os.path.dirname(local_weights_path), exist_ok=True)
sly.fs.download(weights_url, local_weights_path, g.my_app.cache, progress5.increment)
progress5.reset_and_update()
sly.logger.info("Pretrained weights has been successfully downloaded",
extra={"weights": local_weights_path})
except Exception as e:
progress5.reset_and_update()
raise e
fields = [
{"field": "data.done5", "payload": True},
{"field": "state.collapsed6", "payload": False},
{"field": "state.disabled6", "payload": False},
{"field": "state.activeStep", "payload": 6},
]
g.api.app.set_fields(g.task_id, fields)
def restart(data, state):
data["done5"] = False
|
supervisely-ecosystem/unet
|
supervisely/train/src/ui/step05_models.py
|
step05_models.py
|
py
| 4,736 |
python
|
en
|
code
| 2 |
github-code
|
6
|
5475332432
|
class Empleado():
def __init__(self, nombre, cargo, salario):
self.nombre = nombre
self.cargo = cargo
self.salario = salario
def __str__(self):
return "{} que trabaja como {} tiene un salario de {} €".format(self.nombre, self.cargo, self.salario)
listaEmpleados=[
Empleado("juan", "director", 75000),
Empleado("ana", "presidente", 85000),
Empleado("antonio", "administrativo", 45000),
Empleado("sara", "analista", 25000),
Empleado("mario", "secratario", 15000)
]
salarios_altos=filter(lambda empleado:empleado.salario>50000, listaEmpleados)
for empledo_salario in salarios_altos:
print(empledo_salario)
|
mivargas/ejercicios-de-python
|
funcion_filter2.py
|
funcion_filter2.py
|
py
| 626 |
python
|
es
|
code
| 0 |
github-code
|
6
|
108222953
|
'''
Created on Oct 31, 2010
@author: pekka
'''
from event import MapBuiltEvent, SectorsLitRequest, CharactorMoveEvent, CharactorTurnAndMoveRequest, \
DimAllSectorsRequest, CharactorPlaceEvent, CalculatePathRequest, OccupiedSectorAction, \
FreeSectorAction, ActiveCharactorChangeEvent, CharactorPlaceRequest
import constants
import math
from astar import a_star
#------------------------------------------------------------------------------
class Map:
"""..."""
STATE_PREPARING = 0
STATE_BUILT = 1
#----------------------------------------------------------------------
def __init__(self, event_manager, grid_size_x, grid_size_y, walls_up, walls_right, walls_left, walls_down):
self.event_manager = event_manager
self.event_manager.register_listener( self )
self.state = Map.STATE_PREPARING
self.grid_size_x = grid_size_x
self.grid_size_y = grid_size_y
self.sectors = None
self.free_start_sector_indices = [0, 1, 2, 3]
self.map_state = MapState(event_manager)
self.walls_up = walls_up
self.walls_right = walls_right
self.walls_left = walls_left
self.walls_down = walls_down
#----------------------------------------------------------------------
def build(self):
self.sectors = [Sector(x) for x in xrange(self.grid_size_x*self.grid_size_y)]
for i, sector in enumerate(self.sectors):
if i > self.grid_size_x-1: #not first row
sector.neighbors[constants.DIRECTION_UP] = self.sectors[i-self.grid_size_x]
upleft = i-(self.grid_size_x+1)
if upleft > -1 and not (upleft+1) % self.grid_size_x == 0:
sector.corners[constants.DIRECTION_UP_LEFT] = self.sectors[upleft]
upright = i-(self.grid_size_x-1)
if not (upright) % self.grid_size_x == 0:
sector.corners[constants.DIRECTION_UP_RIGHT] = self.sectors[upright]
if i == 0 or not (i+1) % self.grid_size_x == 0: #not rightmost column
sector.neighbors[constants.DIRECTION_RIGHT] = self.sectors[i+1]
if i < self.grid_size_x*(self.grid_size_y-1): #not last row
sector.neighbors[constants.DIRECTION_DOWN] = self.sectors[i+self.grid_size_x]
downleft = i+(self.grid_size_x-1)
if not (downleft+1) % self.grid_size_x == 0 :
sector.corners[constants.DIRECTION_DOWN_LEFT] = self.sectors[downleft]
downright = i+self.grid_size_x+1
if downright < self.grid_size_x*self.grid_size_y and not (downright) % self.grid_size_x == 0:
sector.corners[constants.DIRECTION_DOWN_RIGHT] = self.sectors[downright]
if not i % self.grid_size_x == 0: #not leftmost column
sector.neighbors[constants.DIRECTION_LEFT] = self.sectors[i-1]
for i in self.walls_up:
self.sectors[i].neighbors[constants.DIRECTION_UP] = None
for i in self.walls_right:
self.sectors[i].neighbors[constants.DIRECTION_RIGHT] = None
for i in self.walls_down:
self.sectors[i].neighbors[constants.DIRECTION_DOWN] = None
for i in self.walls_left:
self.sectors[i].neighbors[constants.DIRECTION_LEFT] = None
for sector in self.sectors:
for corner in sector.corners:
if not self._is_open_corner_of(corner, sector):
sector.corners[sector.corners.index(corner)] = None
self.state = Map.STATE_BUILT
new_event = MapBuiltEvent(self)
self.event_manager.post(new_event)
def _is_open_corner_of(self, corner, sector):
for neighbor in sector.neighbors:
if not neighbor == None and corner in neighbor.neighbors:
return True
return False
def fov(self, charactor):
angle = 0
lit_sectors = set()
lit_sectors.add(charactor.sector)
while angle < 360:
delta_x = math.cos(angle*0.01745)
delta_y = math.sin(angle*0.01745)
lit_sectors = lit_sectors.union(self.determine_fov(charactor.sector, charactor.radius, delta_x, delta_y))
angle += 6 #magic number here
new_event = DimAllSectorsRequest()
self.event_manager.post(new_event)
new_event = SectorsLitRequest(lit_sectors)
self.event_manager.post(new_event)
#----------------------------------------------------------------------
def determine_fov(self, sector, radius, delta_x, delta_y):
i = 0
original_x = self.sector_x(sector)+0.5
original_y = self.sector_y(sector)+0.5
lit_sectors = []
while i < radius:
old_sector = self.sector_by_coordinates((original_x), (original_y))
original_x += delta_x
original_y += delta_y
new_sector = self.sector_by_coordinates((original_x), (original_y))
if not new_sector == None:
if not new_sector == old_sector and (new_sector in old_sector.neighbors or new_sector in old_sector.corners):
lit_sectors.append(new_sector)
else:
return lit_sectors
i += 1
return lit_sectors
def sector_x(self, sector):
return self.sectors.index(sector) % self.grid_size_x
def sector_y(self, sector):
return self.sectors.index(sector)/self.grid_size_x
def sector_by_coordinates(self, x_coordinate, y_coordinate):
if x_coordinate >= 0 and x_coordinate < self.grid_size_x and y_coordinate >= 0 and y_coordinate < self.grid_size_y:
index = int(math.floor(y_coordinate)*11.0+math.floor(x_coordinate)) #TODO magic number
if index > -1:
return self.sectors[index]
def charactor_by_coordinates(self, x_coordinate, y_coordinate):
sector = self.sector_by_coordinates(x_coordinate/constants.GRID_SIZE, y_coordinate/constants.GRID_SIZE)
if sector == None or self.map_state.sector_is_free(sector):
return None
else:
return self.map_state.actors_by_sector_id.get(sector.sector_id, -1)
#----------------------------------------------------------------------
def notify(self, event):
if isinstance(event, CharactorMoveEvent) or isinstance(event, CharactorPlaceEvent) or isinstance(event, ActiveCharactorChangeEvent):
self.fov(event.charactor)
elif isinstance(event, CalculatePathRequest):
goal = self.sector_by_coordinates(event.pos[0]/constants.GRID_SIZE, event.pos[1]/constants.GRID_SIZE)
path = a_star(event.start_sector, goal, self)
if not path == None:
path.append(goal)
for index, node in enumerate(path):
if index < len(path)-1:
new_event = CharactorTurnAndMoveRequest(node.neighbors.index(path[index+1]))
self.event_manager.post(new_event)
elif isinstance(event, OccupiedSectorAction):
event.function(self.charactor_by_coordinates(event.pos[0], event.pos[1]))
elif isinstance(event, CharactorPlaceRequest):
if not len(self.free_start_sector_indices) == 0:
event.charactor.place(self.sectors[self.free_start_sector_indices.pop(0)])
#------------------------------------------------------------------------------
class Sector:
"""..."""
def __init__(self, sector_id=0):
self.sector_id = sector_id
self.neighbors = range(4)
self.corners = range(4)
self.neighbors[constants.DIRECTION_UP] = None
self.neighbors[constants.DIRECTION_DOWN] = None
self.neighbors[constants.DIRECTION_LEFT] = None
self.neighbors[constants.DIRECTION_RIGHT] = None
self.corners[constants.DIRECTION_UP_RIGHT] = None
self.corners[constants.DIRECTION_DOWN_RIGHT] = None
self.corners[constants.DIRECTION_DOWN_LEFT] = None
self.corners[constants.DIRECTION_UP_LEFT] = None
#----------------------------------------------------------------------
def move_possible(self, direction):
if self.neighbors[direction]:
return True
else:
return False
def __repr__(self):
result = "[Sector] "
result += "id: %s, " % (self.sector_id, )
result += "neighbors: %s, " % ([neighbor.sector_id for neighbor in self.neighbors if not neighbor == None], )
result += "open corners: %s" % ([open_corner.sector_id for open_corner in self.corners if not open_corner == None])
return result
class MapState:
"""Keeps record of occupied sectors and actors occupying them"""
def __init__(self, event_manager):
self.event_manager = event_manager
event_manager.register_listener(self)
self.occupied_sectors_by_actor_id = {}
self.actors_by_sector_id = {}
def sector_is_free(self, sector):
if sector not in self.occupied_sectors_by_actor_id.values():
return True
return False
def notify(self, event):
if isinstance(event, CharactorPlaceEvent) or isinstance(event, CharactorMoveEvent):
self.occupied_sectors_by_actor_id[event.charactor.charactor_id] = event.charactor.sector
self.actors_by_sector_id[event.charactor.sector.sector_id] = event.charactor
#print [(c,d) for c,d in enumerate(self.actors_by_sector_id)]
#print [(c, d) for c,d in enumerate(self.occupied_sectors_by_actor_id)]
elif isinstance(event, FreeSectorAction):
event.function(self.sector_is_free(event.sector))
|
speque/shallowspace
|
shallowspace/map.py
|
map.py
|
py
| 10,022 |
python
|
en
|
code
| 2 |
github-code
|
6
|
40260766080
|
import gvar as gv
import corrfitter as cf
import numpy as np
import collections
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.ticker import MultipleLocator
matplotlib.use('Agg')
plt.rc("font",**{"size":18})
import datetime
import os
import pickle
import copy
#from plotting import *
import lsqfit
lsqfit.nonlinear_fit.set(fitter='gsl_multifit',alg='subspace2D',scaler='more',solver='cholesky')#,solver='cholesky')
####################################
maxiter=5000
#######################################################################################################
def read_setup(setup):
#Reads in setups, and strips out currents, parents and daughters, as well as which is which
daughters = []
currents = []
parents = []
for element in setup:
lab = element.split('-')
daughters.append(lab[0])
currents.append(lab[1])
parents.append(lab[2])
return(daughters,currents,parents)
######################################################################################################
def strip_list(l): #Strips elemenst from list l
stripped = ''
for element in l:
stripped = '{0}{1}'.format(stripped,element)
return(stripped)
######################################################################################################
def make_params(Fit,FitMasses,FitTwists,FitTs,daughters,currents,parents):
#Removes things we do not want to fit, specified by FitMasses, FitTwists, FitTs assumes parents have varing mass and daughters varing twist
j = 0
for i in range(len(Fit['masses'])):
if i not in FitMasses:
del Fit['masses'][i-j]
for element in set(parents):
del Fit['tmaxes{0}'.format(element)][i-j]
j += 1
j = 0
for i in range(len(Fit['twists'])):
if i not in FitTwists:
del Fit['twists'][i-j]
for element in set(daughters):
del Fit['tmaxes{0}'.format(element)][i-j]
j += 1
j = 0
for i in range(len(Fit['Ts'])):
if i not in FitTs:
del Fit['Ts'][i-j]
j += 1
return()
#######################################################################################################
def make_data(filename,binsize):
# Reads in filename.gpl, checks all keys have same configuration numbers, returns averaged data
print('Reading data, binsize = ', binsize)
dset = cf.read_dataset(filename,binsize=binsize)
sizes = []
for key in dset:
#print(key,np.shape(dset[key]))
sizes.append(np.shape(dset[key]))
if len(set(sizes)) != 1:
print('Not all elements of gpl the same size')
for key in dset:
print(key,np.shape(dset[key]))
return(gv.dataset.avg_data(dset))
######################################################################################################
def make_pdata(filename,models,binsize):
# Reads in filename.gpl, checks all keys have same configuration numbers, returns averaged data
print('Reading processed data, binsize = ', binsize)
dset = cf.read_dataset(filename,binsize=binsize)
sizes = []
for key in dset:
#print(key,np.shape(dset[key]))
sizes.append(np.shape(dset[key]))
if len(set(sizes)) != 1:
print('Not all elements of gpl the same size')
for key in dset:
print(key,np.shape(dset[key]))
return(cf.process_dataset(dset, models))
#######################################################################################################
def effective_mass_calc(tag,correlator,tp):
#finds the effective mass and amplitude of a two point correlator
M_effs = []
for t in range(2,len(correlator)-2):
thing = (correlator[t-2] + correlator[t+2])/(2*correlator[t])
if thing >= 1:
M_effs.append(gv.arccosh(thing)/2)
#M_effs is all positive masses, we now take a rolling average of 4, and find where this changes the least
rav = []
for i in range(len(M_effs)-4):
rav.append((M_effs[i] + M_effs[i+1] + M_effs[i+2] + M_effs[i+3])/4)
M_eff = rav[0]
diff = abs((rav[1] - rav[0]).mean)
for i in range(1,len(rav)-1):
if abs((rav[i+1]-rav[i]).mean) < diff:
diff = abs((rav[i+1]-rav[i]).mean)
M_eff = (rav[i] + rav[i+1])/2
return(M_eff)
######################################################################################################
def effective_amplitude_calc(tag,correlator,tp,M_eff,Fit,corr):
#finds the effective mass and amplitude of a two point correlator
tmin = Fit['tmin{0}'.format(corr)]
A_effs = []
if len(correlator) == tp:
tmin = 0
for t in range(tmin,tmin+len(correlator)):
numerator = correlator[t-tmin]
if numerator >= 0:
A_effs.append( gv.sqrt(numerator/(gv.exp(-M_eff*t)+gv.exp(-M_eff*(tp-t)))))
rav = []
for i in range(len(A_effs)-4):
rav.append((A_effs[i] + A_effs[i+1] + A_effs[i+2] + A_effs[i+3])/4)
A_eff = rav[0]
diff = abs((rav[1] - rav[0]).mean)
for i in range(1,len(rav)-1):
if abs((rav[i+1]-rav[i]).mean) < diff:
diff = abs((rav[i+1]-rav[i]).mean)
A_eff = (rav[i] + rav[i+1])/2
an = gv.gvar(Fit['an'])
if A_eff.sdev/A_eff.mean > 0.5:
print('Replaced A_eff for {0} {1} -> {2}'.format(tag,A_eff,an))
A_eff = an
return(A_eff)
########################################################################################
def effective_V_calc(corr,daughter,parent,correlator,dcorr,pcorr,Fit,mass,twist,pA_eff,dA_eff):
#finds the effective V_nn[0][0]
tp = Fit['tp']
T = Fit['Ts'][-1]
dtmin = Fit['tmin{0}'.format(daughter)]
ptmin = Fit['tmin{0}'.format(parent)]
Vtmin = Fit['{0}tmin'.format(corr)]
dcorr2 = []
pcorr2 = []
Vcorr2 = []
V_effs = []
#print(corr,daughter,parent,mass,twist)
if len(dcorr) == int(tp):
dcorr2 = dcorr
else:
for i in range(dtmin):
dcorr2.append(0)
dcorr2.extend(dcorr)
for i in range(int(tp/2)-len(dcorr2)+1):
dcorr2.append(0)
#print(dcorr2)
if len(pcorr) == int(tp):
pcorr2 = pcorr
else:
for i in range(ptmin):
pcorr2.append(0)
pcorr2.extend(pcorr)
for i in range(int(tp/2)-len(pcorr2)+1):
pcorr2.append(0)
#print(pcorr2)
if len(correlator) == int(tp):
Vcorr2 = correlator
else:
for i in range(Vtmin):
Vcorr2.append(0)
Vcorr2.extend(correlator)
for i in range(T-len(Vcorr2)+1):
Vcorr2.append(0)
#print(Vcorr2)
for t in range(T):
numerator = Vcorr2[t]*pA_eff*dA_eff
denominator = dcorr2[t]*pcorr2[T-t]
if numerator != 0 and denominator !=0:
V_effs.append(numerator/denominator)
rav = []
for i in range(len(V_effs)-4):
rav.append((V_effs[i] + V_effs[i+1] + V_effs[i+2] + V_effs[i+3])/4)
V_eff = rav[0]
diff = abs((rav[1] - rav[0]).mean)
for i in range(1,len(rav)-1):
if abs((rav[i+1]-rav[i]).mean) < diff:
diff = abs((rav[i+1]-rav[i]).mean)
if (rav[i] + rav[i+1]) > 0:
V_eff = (rav[i] + rav[i+1])/2
V = gv.gvar(Fit['{0}Vnn0'.format(corr)])
if abs((V_eff.mean-V).mean/(V_eff.mean-V).sdev) > 1:
print('Replaced V_eff for {0} m {1} tw {2}: {3} --> {4}'.format(corr,mass,twist,V_eff,V))
V_eff = V
return(V_eff)
#######################################################################################################
def SVD_diagnosis(Fit,models,corrs,svdfac,currents,SepMass):
binsize = Fit['binsize']
#Feed models and corrs (list of corrs in this SVD cut)
if list(set(corrs).intersection(currents)) ==[]:
filename = 'SVD/{0}{1}{2}{3}{4}{5}{6}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(corrs),binsize,SepMass)
else:
filename = 'SVD/{0}{1}{2}{3}{4}{5}{6}{7}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(corrs),strip_list(Fit['Ts']),binsize,SepMass)
#print(filename)
for corr in corrs:
if 'tmin{0}'.format(corr) in Fit:
filename += '{0}'.format(Fit['tmin{0}'.format(corr)])
for element in Fit['tmaxes{0}'.format(corr)]:
filename += '{0}'.format(element)
if '{0}tmin'.format(corr) in Fit:
filename += '{0}'.format(Fit['{0}tmin'.format(corr)])
#print(filename)
if os.path.isfile(filename) and os.path.getsize(filename) > 0:
pickle_off = open(filename,"rb")
svd = pickle.load(pickle_off)
print('Loaded SVD for {0} : {1:.2g} x {2} = {3:.2g}'.format(corrs,svd,svdfac,svd*svdfac))
pickle_off.close()
else:
print('Calculating SVD for {0}'.format(corrs))
s = gv.dataset.svd_diagnosis(cf.read_dataset('{0}{1}.gpl'.format(Fit['file_location'],Fit['filename']),binsize=binsize), models=models, nbstrap=20)
svd = s.svdcut
######## save plot ##########################
plt.figure()
x = s.val / s.val[-1]
ratio = s.bsval / s.val
idx = x > s.mincut
ratio = ratio[idx]
x = x[idx]
y = gv.mean(ratio)
yerr = gv.sdev(ratio)
plt.errorbar(x=x, y=y, yerr=yerr, fmt='+', color='b')
sig = (2. / len(s.val)) ** 0.5
plt.plot([x[0], x[-1]], [1. - sig, 1. - sig], 'k:')
plt.axhline(1,ls='--',color='k')
plt.axvline(s.svdcut,ls=':',color='g')
#plt.axvline(0.013,ls='--',color='g')
plt.xscale('log')
plt.savefig('svd_plots/{0}.pdf'.format(filename.split('/')[1]))
###############################################
pickle_on = open(filename,"wb")
print('Calculated SVD for {0} : {1:.2g} x {2} = {3:.2g}'.format(corrs,svd,svdfac,svd*svdfac))
pickle.dump(svd,pickle_on)
return(svd*svdfac)
#######################################################################################################
def make_models(Fit,FitCorrs,notwist0,non_oscillating,daughters,currents,parents,svdfac,Chained,allcorrs,links,parrlinks,SepMass,NoSVD=False):
#several forms [(A,B,C,D)],[(A,B),(C),(D)],[(A,B),[(C),(D)]]
#First make all models and then stick them into the correct chain
models = collections.OrderedDict()
tp = Fit['tp']
for corr in set(parents):
if corr in allcorrs:
models['{0}'.format(corr)] = []
for i,mass in enumerate(Fit['masses']):
tag = Fit['{0}-Tag'.format(corr)].format(mass)
models['{0}'.format(corr)].append(cf.Corr2(datatag=tag, tp=tp, tmin=Fit['tmin{0}'.format(corr)], tmax=Fit['tmaxes{0}'.format(corr)][i], a=('{0}:a'.format(tag), 'o{0}:a'.format(tag)), b=('{0}:a'.format(tag), 'o{0}:a'.format(tag)), dE=('dE:{0}'.format(tag), 'dE:o{0}'.format(tag)),s=(1,-1)))
for corr in set(daughters):
if corr in allcorrs:
models['{0}'.format(corr)] = []
for i,twist in enumerate(Fit['twists']):
tag = Fit['{0}-Tag'.format(corr)].format(twist)
if twist == '0' and corr in notwist0:
pass
elif twist == '0' and corr in non_oscillating:
models['{0}'.format(corr)].append(cf.Corr2(datatag=tag, tp=tp, tmin=Fit['tmin{0}'.format(corr)], tmax=Fit['tmaxes{0}'.format(corr)][i], a=('{0}:a'.format(tag)), b=('{0}:a'.format(tag)), dE=('dE:{0}'.format(tag))))
else:
models['{0}'.format(corr)].append(cf.Corr2(datatag=tag, tp=tp, tmin=Fit['tmin{0}'.format(corr)], tmax=Fit['tmaxes{0}'.format(corr)][i], a=('{0}:a'.format(tag), 'o{0}:a'.format(tag)), b=('{0}:a'.format(tag), 'o{0}:a'.format(tag)), dE=('dE:{0}'.format(tag), 'dE:o{0}'.format(tag)),s=(1,-1)))
for i,corr in enumerate(currents):
if corr in allcorrs:
models['{0}'.format(corr)] = []
for mass in Fit['masses']:
for twist in Fit['twists']:
for T in Fit['Ts']:
tag = Fit['threePtTag{0}'.format(corr)].format(T,Fit['m_s'],mass,Fit['m_l'],twist)
ptag = Fit['{0}-Tag'.format(parents[i])].format(mass)
dtag = Fit['{0}-Tag'.format(daughters[i])].format(twist)
if twist == '0' and corr in notwist0:
pass
elif twist == '0' and daughters[i] in non_oscillating:
models['{0}'.format(corr)].append(cf.Corr3(datatag=tag, T=T, tmin=Fit['{0}tmin'.format(corr)], a=('{0}:a'.format(dtag)), dEa=('dE:{0}'.format(dtag)), b=('{0}:a'.format(ptag), 'o{0}:a'.format(ptag)), dEb=('dE:{0}'.format(ptag), 'dE:o{0}'.format(ptag)), sb=(1,-1), Vnn='{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist), Vno='{0}Vno_m{1}_tw{2}'.format(corr,mass,twist)))
else:
models['{0}'.format(corr)].append(cf.Corr3(datatag=tag, T=T, tmin=Fit['{0}tmin'.format(corr)], a=('{0}:a'.format(dtag), 'o{0}:a'.format(dtag)), dEa=('dE:{0}'.format(dtag), 'dE:o{0}'.format(dtag)), sa=(1,-1), b=('{0}:a'.format(ptag), 'o{0}:a'.format(ptag)), dEb=('dE:{0}'.format(ptag), 'dE:o{0}'.format(ptag)), sb=(1,-1), Vnn='{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist), Vno='{0}Vno_m{1}_tw{2}'.format(corr,mass,twist),Von='{0}Von_m{1}_tw{2}'.format(corr,mass,twist),Voo='{0}Voo_m{1}_tw{2}'.format(corr,mass,twist)))
#Now we make these models into our chain calculating an svd cut for each. We make them in two halves so we can sndwich a marginalisation term if we like later
if Chained:
finalmodelsA = []
finalmodelsB = []
intermediate = []
for key in links:
link = [] #link is models in link
for corr in links[key]:
link.extend(models['{0}'.format(corr)])
svd = SVD_diagnosis(Fit,link,links[key],svdfac,currents,SepMass)
finalmodelsA.append({'svdcut':svd})
finalmodelsA.append(tuple(link))
for key in parrlinks:
link = [] #link is models in link
for corr in parrlinks[key]:
link.extend(models['{0}'.format(corr)])
svd = SVD_diagnosis(Fit,link,parrlinks[key],svdfac,currents,SepMass)
intermediate.append({'svdcut':svd})
intermediate.append(tuple(link))
finalmodelsB.append(intermediate)
return(finalmodelsA,finalmodelsB)
else:
finalmodels = []
for corr in allcorrs:
finalmodels.extend(models['{0}'.format(corr)])
if NoSVD == False:
svd = SVD_diagnosis(Fit,finalmodels,allcorrs,svdfac,currents,SepMass)
return(tuple(finalmodels),svd)
else:
return(tuple(finalmodels))
#######################################################################################################
def elements_in_FitCorrs(a):
# reads [A,[B,C],[[D,E],F]] and interprets which elements will be chained and how. Returns alphabetical list of all elements, links in chain and links in parallell chain
allcorrs = []
links = collections.OrderedDict()
parrlinks = collections.OrderedDict()
for i in range(np.shape(a)[0]):
links[i] =[]
if len(np.shape(a[i])) == 0: #deals with one corr in chain
#print(a[i],i,'fit alone in chain')
allcorrs.append(a[i])
links[i].append(a[i])
elif len(np.shape(a[i][0])) == 0 : #deals with multiple elements in chain
for j in range(len(a[i])):
#print(a[i][j],i,'fit together in chain')
allcorrs.append(a[i][j])
links[i].append(a[i][j])
else:
del links[i] #don't need thi key if it is in paralell
for j in range(np.shape(a[i])[0]):
parrlinks[j] = []
if len(np.shape(a[i][j])) == 0: #deals with one corr in parr chain
allcorrs.append(a[i][j])
parrlinks[j].append(a[i][j])
else: # deals with multiple elements in parralell chain
for k in range(len(a[i][j])):
allcorrs.append(a[i][j][k])
parrlinks[j].append(a[i][j][k])
return(sorted(allcorrs),links,parrlinks)
######################################################################################################
def make_prior(Fit,N,allcorrs,currents,daughters,parents,loosener,data,notwist0,non_oscillating):
No = N # number of oscillating exponentials
prior = gv.BufferDict()
tw_corr = True
otw_corr = True
if len(daughters) != 0 and '0' in Fit['twists'] and tw_corr:
for corr in set(daughters).intersection(allcorrs):
prior['d2_{0}'.format(corr)] = gv.gvar('0.0(1.0)')
prior['c2_{0}'.format(corr)] = gv.gvar('0.0(1.0)')
print('Daughter twists correlated')
if len(daughters) != 0 and '0' in Fit['twists'] and otw_corr:
for corr in set(daughters).intersection(allcorrs):
prior['oc2_{0}'.format(corr)] = gv.gvar('0.0(1.0)')
print('Daughter oscillating twists correlated')
tp = Fit['tp']
En = '{0}({1})'.format(0.5*Fit['a'],0.25*Fit['a']*loosener) #Lambda with error of half
an = '{0}({1})'.format(gv.gvar(Fit['an']).mean,gv.gvar(Fit['an']).sdev*loosener)
aon = '{0}({1})'.format(gv.gvar(Fit['aon']).mean,gv.gvar(Fit['aon']).sdev*loosener)
for corr in allcorrs:
if corr in parents:
for mass in Fit['masses']:
tag = Fit['{0}-Tag'.format(corr)].format(mass)
M_eff = effective_mass_calc(tag,data[tag],tp)
a_eff = effective_amplitude_calc(tag,data[tag],tp,M_eff,Fit,corr)
# Parent
prior['log({0}:a)'.format(tag)] = gv.log(gv.gvar(N * [an]))
prior['log(dE:{0})'.format(tag)] = gv.log(gv.gvar(N * [En]))
prior['log({0}:a)'.format(tag)][0] = gv.log(gv.gvar(a_eff.mean,loosener*Fit['loosener']*a_eff.mean))
prior['log(dE:{0})'.format(tag)][0] = gv.log(gv.gvar(M_eff.mean,loosener*Fit['Mloosener']*M_eff.mean))
# Parent -- oscillating part
prior['log(o{0}:a)'.format(tag)] = gv.log(gv.gvar(No * [an]))
prior['log(dE:o{0})'.format(tag)] = gv.log(gv.gvar(No * [En]))
prior['log(dE:o{0})'.format(tag)][0] = gv.log(gv.gvar((M_eff+gv.gvar(En)*(4/5)).mean,loosener*Fit['oMloosener']*((M_eff+gv.gvar(En)*(4/5)).mean)))
if corr in daughters:
for twist in Fit['twists']:
if twist =='0' and corr in notwist0:
pass
else:
ap2 = 3*(np.pi*float(twist)/Fit['L'])**2
#print(twist,ap2)
tag0 = Fit['{0}-Tag'.format(corr)].format('0')
M_eff = np.sqrt(effective_mass_calc(tag0,data[tag0],tp)**2 + ap2) #from dispersion relation
tag = Fit['{0}-Tag'.format(corr)].format(twist)
a_eff = effective_amplitude_calc(tag,data[tag],tp,M_eff,Fit,corr)
# Daughter
prior['log({0}:a)'.format(tag)] = gv.log(gv.gvar(N * [an]))
prior['log(dE:{0})'.format(tag)] = gv.log(gv.gvar(N * [En]))
#prior['log(dE:{0})'.format(tag)][1] = gv.log(gv.gvar(gv.gvar(En).mean,0.01*gv.gvar(En).mean))
if twist !='0' and '0' in Fit['twists'] and 'log(dE:{0})'.format(tag0) in prior and tw_corr:
prior['log(dE:{0})'.format(tag)][0] = gv.log(gv.sqrt(prior['dE:{0}'.format(tag0)][0]**2 + ap2) * (1 + prior['c2_{0}'.format(corr)]*ap2/(np.pi)**2) )
prior['log({0}:a)'.format(tag)][0] = gv.log((prior['{0}:a'.format(tag0)][0]/gv.sqrt(gv.sqrt(1 + ap2/(prior['dE:{0}'.format(tag0)][0])**2))) * (1 + prior['d2_{0}'.format(corr)]*ap2/(np.pi)**2) )
else:
prior['log(dE:{0})'.format(tag)][0] = gv.log(gv.gvar(M_eff.mean,loosener*Fit['Mloosener']*M_eff.mean))
prior['log({0}:a)'.format(tag)][0] = gv.log(gv.gvar(a_eff.mean,loosener*Fit['loosener']*a_eff.mean))
# Daughter -- oscillating part
if twist =='0' and corr in non_oscillating:
pass
else:
newaon = aon
if twist == '0':
newaon = '{0}({1})'.format(gv.gvar(aon).mean/4,gv.gvar(aon).mean/2) #v small in the case of tw0
prior['log(o{0}:a)'.format(tag)] = gv.log(gv.gvar(No * [newaon]))
prior['log(dE:o{0})'.format(tag)] = gv.log(gv.gvar(No * [En]))
if twist !='0' and '0' in Fit['twists'] and 'log(dE:o{0})'.format(tag0) in prior and otw_corr:
prior['log(dE:o{0})'.format(tag)][0] = gv.log(gv.sqrt(prior['dE:o{0}'.format(tag0)][0]**2 + ap2) * (1 + prior['oc2_{0}'.format(corr)]*ap2/(np.pi)**2) )
#prior['log(o{0}:a)'.format(tag)][0] = gv.log((prior['o{0}:a'.format(tag0)][0]/gv.sqrt(1 + ap2/(prior['dE:o{0}'.format(tag0)][0])**2)) * (1 + prior['od2']*ap2/(np.pi)**2) )
prior['log(o{0}:a)'.format(tag)][0] = gv.log(gv.gvar(gv.gvar(newaon).mean,loosener*Fit['oloosener']*gv.gvar(newaon).mean))
else:
prior['log(dE:o{0})'.format(tag)][0] = gv.log(gv.gvar((M_eff+gv.gvar(En)/2).mean,loosener*Fit['oMloosener']*((M_eff+gv.gvar(En)/2).mean))) # kaon splitting
#prior['log(dE:o{0})'.format(tag)][0] = gv.log(prior['dE:{0}'.format(tag)][0] + gv.gvar(En))
prior['log(o{0}:a)'.format(tag)][0] = gv.log(gv.gvar(gv.gvar(newaon).mean,loosener*Fit['oloosener']*gv.gvar(newaon).mean))
if corr in currents:
for mass in Fit['masses']:
for twist in Fit['twists']:
if twist =='0' and corr in notwist0:
pass
else:
daughter=daughters[currents.index(corr)]
parent=parents[currents.index(corr)]
dcorr = data[Fit['{0}-Tag'.format(daughter)].format(twist)]
pcorr = data[Fit['{0}-Tag'.format(parent)].format(mass)]
correlator = data[Fit['threePtTag{0}'.format(corr)].format(Fit['Ts'][-1],Fit['m_s'],mass,Fit['m_l'],twist)]
ptag = Fit['{0}-Tag'.format(parent)].format(mass)
pM_eff = effective_mass_calc(ptag,data[ptag],tp)
pa_eff = effective_amplitude_calc(ptag,data[ptag],tp,pM_eff,Fit,parent)
dtag = Fit['{0}-Tag'.format(daughter)].format(twist)
dM_eff = effective_mass_calc(dtag,data[dtag],tp)
da_eff = effective_amplitude_calc(dtag,data[dtag],tp,dM_eff,Fit,daughter)
V_eff = effective_V_calc(corr,daughter,parent,correlator,dcorr,pcorr,Fit,mass,twist,da_eff,pa_eff)
if V_eff.mean != gv.gvar(Fit['{0}Vnn0'.format(corr)]).mean:
Vnn0 = '{0}({1})'.format(V_eff.mean,loosener*V_eff.mean*Fit['Vloosener'])
else:
Vnn0 = '{0}({1})'.format(V_eff.mean,loosener*V_eff.sdev)
Vn = '{0}({1})'.format(gv.gvar(Fit['{0}Vn'.format(corr)]).mean,loosener*gv.gvar(Fit['{0}Vn'.format(corr)]).sdev)
V0 = '{0}({1})'.format(gv.gvar(Fit['{0}V0'.format(corr)]).mean,loosener*gv.gvar(Fit['{0}V0'.format(corr)]).sdev)
if twist =='0' and corr in notwist0:
pass
elif twist =='0' and daughters[currents.index(corr)] in non_oscillating :
prior['{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(N * [N * [Vn]])
prior['{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(Vnn0)
prior['{0}Vno_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(N * [No* [Vn]])
prior['{0}Vno_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(V0)
else:
prior['{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(N * [N * [Vn]])
prior['{0}Vnn_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(Vnn0)
prior['{0}Vno_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(N * [No * [Vn]])
prior['{0}Vno_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(V0)
prior['{0}Voo_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(No * [No * [Vn]])
prior['{0}Voo_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(V0)
prior['{0}Von_m{1}_tw{2}'.format(corr,mass,twist)] = gv.gvar(No * [N * [Vn]])
prior['{0}Von_m{1}_tw{2}'.format(corr,mass,twist)][0][0] = gv.gvar(V0)
# for key in prior:
# if key[0] == corr:
# for i in range(1,N):
# for j in range(1,N):
# prior[key][i][j] = gv.gvar('0.0(5)')
return(prior)
######################################################################################################
def get_p0(Fit,fittype,Nexp,allcorrs,prior,FitCorrs):
# We want to take in several scenarios in this order, choosing the highest in preference.
# 1) This exact fit has been done before, modulo priors, svds t0s etc
# 2) Same but different type of fit, eg marginalised
# 3) This fit has been done before with Nexp+1
# 4) This fit has been done beofore with Nexp-1
# 5a) Some elemnts have bene fitted to Nexp before,
# 5b) Some elements of the fit have been fitted in other combinations before
filename1 = 'p0/{0}{1}{2}{3}{4}{5}{6}{7}{8}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),FitCorrs,strip_list(Fit['Ts']),fittype,Nexp)
filename2 = 'p0/{0}{1}{2}{3}{4}{5}{6}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),strip_list(Fit['Ts']),Nexp)
filename3 = 'p0/{0}{1}{2}{3}{4}{5}{6}{7}{8}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),FitCorrs,strip_list(Fit['Ts']),fittype,Nexp+1)
filename4 = 'p0/{0}{1}{2}{3}{4}{5}{6}{7}{8}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),FitCorrs,strip_list(Fit['Ts']),fittype,Nexp-1)
filename5a = 'p0/{0}{1}{2}'.format(Fit['conf'],Fit['filename'],Nexp)
filename5b = 'p0/{0}{1}'.format(Fit['conf'],Fit['filename'])
#case 1
if os.path.isfile(filename1):
p0 = gv.load(filename1)
print('Loaded p0 from exact fit')
#case 2
elif os.path.isfile(filename2):
p0 = gv.load(filename2)
print('Loaded p0 from exact fit of different type')
#case 3
elif os.path.isfile(filename3):
p0 = gv.load(filename3)
print('Loaded p0 from exact fit Nexp+1')
#case 4
elif os.path.isfile(filename4):
p0 = gv.load(filename4)
print('Loaded p0 from exact fit Nexp-1')
#case 5
elif os.path.isfile(filename5b):
p0 = gv.load(filename5b)
print('Loaded global p0')
if os.path.isfile(filename5a):
pnexp = gv.load(filename5a)
for key in pnexp:
if key in prior:
if key not in p0:
print('Error: {0} in global Nexp but not in global fit'.format(key))
p0[key] = pnexp[key]
del p0[key]
p0[key] = pnexp[key]
print('Loaded {0} p0 from global Nexp'.format(key))
else:
p0 = None
return(p0)
######################################################################################################
def update_p0(p,finalp,Fit,fittype,Nexp,allcorrs,FitCorrs,Q,marg=False):
# We want to take in several scenarios in this order
# 1) This exact fit has been done before, modulo priors, svds t0s etc
# 2) Same but different type of fit, eg marginalised
# 3) Global Nexp
# 4) Global
# 5) if Marg is True, we don't want to save anything but filename 1 as Nexp = nmarg and is not similar to if we do other fits
filename1 = 'p0/{0}{1}{2}{3}{4}{5}{6}{7}{8}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),FitCorrs,strip_list(Fit['Ts']),fittype,Nexp)
filename2 = 'p0/{0}{1}{2}{3}{4}{5}{6}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),strip_list(Fit['Ts']),Nexp)
filename3 = 'p0/{0}{1}{2}'.format(Fit['conf'],Fit['filename'],Nexp)
filename4 = 'p0/{0}{1}'.format(Fit['conf'],Fit['filename'])
#case 1
for element in ['c2','d2','oc2']:
for corr in allcorrs:
if '{0}_{1}'.format(element,corr) in p:
del p['{0}_{1}'.format(element,corr)]
for element in ['c2','d2','oc2']:
for corr in allcorrs:
if '{0}_{1}'.format(element,corr) in finalp:
del finalp['{0}_{1}'.format(element,corr)]
gv.dump(p,filename1)
if marg == False:
#case 2
gv.dump(finalp,filename2)
#case 3
if os.path.isfile(filename3) and Q > 0.05:
p0 = gv.load(filename3) #load exisiting global Nexp
for key in finalp: # key in this output
p0[key] = finalp[key] #Update exisiting and add new
gv.dump(p0,filename3)
else:
gv.dump(finalp,filename3)
if os.path.isfile(filename4) and Q > 0.05:
p0 = gv.load(filename4) # load existing, could be any length
for key in finalp: # key in new
if key in p0: # if
if len(np.shape(p0[key])) == 1 and len(p0[key]) <= Nexp:
#print('shape p0[key]',np.shape(p0[key]),key)
del p0[key]
p0[key] = finalp[key]
print('Updated global p0 {0}'.format(key))
elif np.shape(p0[key])[0] <= Nexp:
#print('shape p0[key]',np.shape(p0[key]),key)
del p0[key]
p0[key] = finalp[key]
print('Updated global p0 {0}'.format(key))
else:
p0[key] = finalp[key]
print('Added new element to global p0 {0}'.format(key))
gv.dump(p0,filename4)
else:
gv.dump(finalp,filename4)
return()
######################################################################################################
def save_fit(fit,Fit,allcorrs,fittype,Nexp,SvdFactor,PriorLoosener,currents,smallsave):
filename = 'Fits/{0}{1}{2}{3}{4}{5}{6}_Nexp{7}_sfac{8}_pfac{9}_Q{10:.2f}_chi{11:.3f}_sm{12}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),strip_list(Fit['Ts']),fittype,Nexp,SvdFactor,PriorLoosener,fit.Q,fit.chi2/fit.dof,smallsave)
for corr in allcorrs:
if corr in currents:
filename += '_{0}tmin{1}'.format(corr,Fit['{0}tmin'.format(corr)])
savedict = gv.BufferDict()
if smallsave:
for key in fit.p:
if key[0] == 'l':
key2 = key.split('(')[1].split(')')[0]
if key2.split(':')[0] =='dE' and key2.split(':')[1][0] != 'o':
savedict[key] = [fit.p[key][0]] #was palt
elif key[2] =='n' and key[3] == 'n':
savedict[key] = [[fit.p[key][0][0]]] #was palt
elif smallsave == False:
savedict = fit.p
print('Started gv.gdump to {1}, smallsave = {0}'.format(smallsave,'{0}.pickle'.format(filename)),datetime.datetime.now())
gv.gdump(savedict,'{0}.pickle'.format(filename))
print('Finished gv.gdump fit, starting save fit output',datetime.datetime.now())
f = open('{0}.txt'.format(filename),'w')
f.write(fit.format(pstyle='v'))
f.close()
print('Finished save fit output',datetime.datetime.now())
return()
######################################################################################################
def do_chained_fit(data,prior,Nexp,modelsA,modelsB,Fit,noise,currents,allcorrs,SvdFactor,PriorLoosener,FitCorrs,save,smallsave,GBF):#if GBF = None doesn't pass GBF, else passed GBF
#do chained fit with no marginalisation Nexp = NMax
models = copy.deepcopy(modelsA)
if len(modelsB[0]) !=0:
models.extend(modelsB)
print('Models',models)
fitter = cf.CorrFitter(models=models, maxit=maxiter, fast=False, tol=(1e-6,0.0,0.0))
p0 = get_p0(Fit,'chained',Nexp,allcorrs,prior,FitCorrs)
print(30 * '=','Chained-Unmarginalised','Nexp =',Nexp,'Date',datetime.datetime.now())
fit = fitter.chained_lsqfit(data=data, prior=prior, p0=p0, noise=noise,debug=True)
update_p0([f.pmean for f in fit.chained_fits.values()],fit.pmean,Fit,'chained',Nexp,allcorrs,FitCorrs,fit.Q) #fittype=chained, for marg,includeN
if GBF == None:
print(fit)
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)
print_Z_V(fit.p,Fit,allcorrs)
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'chained',Nexp,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return()
elif fit.logGBF - GBF < 1 and fit.logGBF - GBF > 0:
print('log(GBF) went up by less than 1: {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
elif fit.logGBF - GBF < 0:
print('log(GBF) went down {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
else:
print(fit)
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)
print_Z_V(fit.p,Fit,allcorrs)
print('log(GBF) went up {0:.2f}'.format(fit.logGBF - GBF))
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'chained',Nexp,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return(fit.logGBF)
######################################################################################################
def do_chained_marginalised_fit(data,prior,Nexp,modelsA,modelsB,Fit,noise,currents,allcorrs,SvdFactor,PriorLoosener,FitCorrs,save,smallsave,GBF,Marginalised):#if GBF = None doesn't pass GBF, else passed GBF
#do chained fit with marginalisation nterm = nexp,nexp Nmarg=Marginalisation us in p0 bits
models = copy.deepcopy(modelsA)
if len(modelsB[0]) !=0:
models.append(dict(nterm=(Nexp,Nexp)))
models.extend(modelsB)
else:
print('Marginalisation not applied as no parrallelised models')
print('Models',models)
fitter = cf.CorrFitter(models=models, maxit=maxiter, fast=False, tol=(1e-6,0.0,0.0))
p0 = get_p0(Fit,'chained-marginalised_N{0}{0}'.format(Nexp),Marginalised,allcorrs,prior,FitCorrs)
print(30 * '=','Chained-marginalised','Nexp =',Marginalised,'nterm = ({0},{0})'.format(Nexp),'Date',datetime.datetime.now())
fit = fitter.chained_lsqfit(data=data, prior=prior, p0=p0, noise=noise,debug=True)
update_p0([f.pmean for f in fit.chained_fits.values()],fit.pmean,Fit,'chained-marginalised_N{0}{0}'.format(Nexp),Marginalised,allcorrs,FitCorrs,fit.Q,True) #fittype=chained, for marg,includeN
if GBF == None:
print(fit)#.format(pstyle='m'))
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)
print_Z_V(fit.p,Fit,allcorrs)
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'chained-marginalised_N{0}{0}'.format(Nexp),Marginalised,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return()
elif fit.logGBF - GBF < 1 and fit.logGBF - GBF > 0:
print('log(GBF) went up by less than 1: {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
elif fit.logGBF - GBF < 0:
print('log(GBF) went down {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
else:
print(fit)#.format(pstyle='m'))
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)
print_Z_V(fit.p,Fit,allcorrs)
print('log(GBF) went up {0:.2f}'.format(fit.logGBF - GBF))
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'chained-marginalised_N{0}{0}'.format(Nexp),Marginalised,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return(fit.logGBF)
######################################################################################################
def do_unchained_fit(data,prior,Nexp,models,svdcut,Fit,noise,currents,allcorrs,SvdFactor,PriorLoosener,save,smallsave,GBF):#if GBF = None doesn't pass GBF, else passed GBF
#do chained fit with no marginalisation Nexp = NMax
print('Models',models)
fitter = cf.CorrFitter(models=models, maxit=maxiter, fast=False, tol=(1e-6,0.0,0.0))
p0 = get_p0(Fit,'unchained',Nexp,allcorrs,prior,allcorrs) # FitCorrs = allcorrs
print(30 * '=','Unchained-Unmarginalised','Nexp =',Nexp,'Date',datetime.datetime.now())
fit = fitter.lsqfit(pdata=data, prior=prior, p0=p0, svdcut=svdcut, noise=noise,debug=True)
update_p0(fit.pmean,fit.pmean,Fit,'unchained',Nexp,allcorrs,allcorrs,fit.Q) #fittype=chained, for marg,includeN
if GBF == None:
print(fit)
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)#,Fit)
print_Z_V(fit.p,Fit,allcorrs)
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'unchained',Nexp,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return()
elif fit.logGBF - GBF < 1 and fit.logGBF - GBF > 0:
print('log(GBF) went up by less than 1: {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
elif fit.logGBF - GBF < 0:
print('log(GBF) went down: {0:.2f}'.format(fit.logGBF - GBF))
return(fit.logGBF)
else:
print(fit)
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)#,Fit)
print_Z_V(fit.p,Fit,allcorrs)
print('log(GBF) went up more than 1: {0:.2f}'.format(fit.logGBF - GBF))
if fit.Q > 0.05 and save: #threshold for a 'good' fit
save_fit(fit,Fit,allcorrs,'unchained',Nexp,SvdFactor,PriorLoosener,currents,smallsave)
#print_fit_results(fit) do this later
return(fit.logGBF)
#######################################################################################################
def do_sep_mass_fit(data,prior,Nexp,models,svdcut,Fit,noise,currents,allcorrs,SvdFactor,PriorLoosener,save,smallsave,GBF):
#if GBF = None doesn't pass GBF, else passed GBF
#do chained fit with no marginalisation Nexp = NMax
print('Models',models)
#print(data)
fitter = cf.CorrFitter(models=models, maxit=maxiter, fast=False, tol=(1e-6,0.0,0.0))
p0 = get_p0(Fit,'sepmass',Nexp,allcorrs,prior,allcorrs) # FitCorrs = allcorrs
print(30 * '=','Seperate Mass Fit','Nexp =',Nexp,'Date',datetime.datetime.now())
fit = fitter.lsqfit(pdata=data, prior=prior, p0=p0, svdcut=svdcut, noise=noise,debug=True)
update_p0(fit.pmean,fit.pmean,Fit,'sepmass',Nexp,allcorrs,allcorrs,fit.Q) #fittype=chained, for marg,includeN
print(fit)
print('chi^2/dof = {0:.3f} Q = {1:.3f} logGBF = {2:.0f}'.format(fit.chi2/fit.dof,fit.Q,fit.logGBF))
print_results(fit.p,prior)#,Fit)
return(fit)
######################################################################################################
def combine_sep_mass_fits(result,Fit,priors,allcorrs,Nexp,SvdFactor,PriorLoosener,currents,save,smallsave):
prior = gv.BufferDict()
combined = []
for mass in Fit['masses']:
smallresult = gv.BufferDict()
fit = result[mass].p
for key in fit:
if key[0] == 'l':
key2 = key.split('(')[1].split(')')[0]
if key2.split(':')[0] =='dE':
smallresult[key] = [fit[key][0]]
elif key[2] =='n' and key[3] == 'n':
smallresult[key] = [[fit[key][0][0]]]
combined.append(smallresult)
prior = copy.deepcopy(priors[Fit['masses'][0]])
for mass in Fit['masses']:
for key in priors[mass]:
if key not in prior:
prior[key] = copy.deepcopy(priors[mass][key])
#print(combined)
final = lsqfit.wavg(combined)
#print(gv.evalcorr([final['SVnn_m0.433_tw0.8563'][0][0],final['SVnn_m0.683_tw0.8563'][0][0]]))
chi = 0
Q = 0
GBF = 0
for mass in Fit['masses']:
chi += (result[mass].chi2/result[mass].dof)/len(Fit['masses'])
Q += (result[mass].Q)/len(Fit['masses'])
GBF += result[mass].logGBF
print('Mean chi^2/dof = {0:.3f} Q = {1:.3f}, total logGBF {2:.1f}'.format(chi,Q,GBF))
print_results(final,prior)#,Fit)
print_Z_V(final,Fit,allcorrs)
if save:
save_combined_fit(final,Fit,allcorrs,'sep_mass',Nexp,SvdFactor,PriorLoosener,currents,smallsave,chi,Q)
return()
######################################################################################################################
def save_combined_fit(fit,Fit,allcorrs,fittype,Nexp,SvdFactor,PriorLoosener,currents,smallsave,chi,Q):
filename = 'Fits/{0}{1}{2}{3}{4}{5}{6}_Nexp{7}_sfac{8}_pfac{9}_Q{10:.2f}_chi{11:.3f}_sm{12}'.format(Fit['conf'],Fit['filename'],strip_list(Fit['masses']),strip_list(Fit['twists']),strip_list(allcorrs),strip_list(Fit['Ts']),fittype,Nexp,SvdFactor,PriorLoosener,Q,chi,smallsave)
for corr in allcorrs:
if corr in currents:
filename += '_{0}tmin{1}'.format(corr,Fit['{0}tmin'.format(corr)])
savedict = gv.BufferDict()
if smallsave:
for key in fit:
if key[0] == 'l':
key2 = key.split('(')[1].split(')')[0]
if key2.split(':')[0] =='dE' and key2.split(':')[1][0] != 'o':
savedict[key] = [fit[key][0]]
elif key[2] =='n' and key[3] == 'n':
savedict[key] = [[fit[key][0][0]]]
elif smallsave == False:
print('Error, can only do small save with sep masses' )
#print(gv.evalcorr([savedict['SVnn_m0.433_tw0.8563'][0][0],savedict['SVnn_m0.683_tw0.8563'][0][0]]))
print('Started gv.gdump to {1}, smallsave = {0}'.format(smallsave,'{0}.pickle'.format(filename)),datetime.datetime.now())
gv.gdump(savedict,'{0}.pickle'.format(filename))
print('Finished gv.gdump fit',datetime.datetime.now())
return()
######################################################################################################
def print_p_p0(p,p0,prior):
print('{0:<30}{1:<20}{2:<40}{3:<20}'.format('key','p','p0','prior'))
for key in prior:
if len(np.shape(p[key])) ==1 :
for element in range(len(p[key])):
if element == 0:
print('{0:<30}{1:<20}{2:<40}{3:<20}'.format(key,p[key][element],p0[key][element],prior[key][element]))
else:
print('{0:>30}{1:<20}{2:<40}{3:<20}'.format('',p[key][element],p0[key][element],prior[key][element]))
return()
#####################################################################################################
def print_results(p,prior):#,Fit):
print(100*'-')
print('{0:<30}{1:<15}{2:<15}{3:<15}{4}'.format('key','p','p error','prior','prior error'))
print(100*'-')
print('Ground state energies')
print(100*'-')
for key in prior:
if key[0] == 'l':
key = key.split('(')[1].split(')')[0]
if key.split(':')[0] =='dE' and key.split(':')[1][0] != 'o':
print('{0:<30}{1:<15}{2:<15.3%}{3:<15}{4:.2%}'.format(key,p[key][0],p[key][0].sdev/p[key][0].mean,prior[key][0],prior[key][0].sdev/prior[key][0].mean))
#if '{0}'.format(key.split(':')[1]) == Fit['BG-Tag'].format(Fit['masses'][0]):
# print('split: ', p['dE:{0}'.format(Fit['BNG-Tag'].format(Fit['masses'][0]))][0]-p[key][0])
print('')
print('Oscillating ground state energies')
print(100*'-')
for key in prior:
if key[0] == 'l':
key = key.split('(')[1].split(')')[0]
if key.split(':')[0] =='dE' and key.split(':')[1][0] == 'o':
print('{0:<30}{1:<15}{2:<15.3%}{3:<15}{4:.2%}'.format(key,p[key][0],p[key][0].sdev/p[key][0].mean,prior[key][0],prior[key][0].sdev/prior[key][0].mean))
print('')
print('V_nn[0][0]')
print(100*'-')
for key in prior:
if key[1] != '2' and key[2] =='n' and key[3] == 'n':
print('{0:<30}{1:<15}{2:<15.3%}{3:<15}{4:.2%}'.format(key,p[key][0][0],p[key][0][0].sdev/p[key][0][0].mean,prior[key][0][0],prior[key][0][0].sdev/prior[key][0][0].mean))
print(100*'-')
return()
#####################################################################################################
def make_Z_V(m_h,m_s,M_parent,M_daughter,S,V):
Z_V = (m_h-m_s)/(M_parent-M_daughter) * S/V
return(Z_V)
#####################################################################################################
# needs generalising
#####################################################################################################
def print_Z_V(p,Fit,allcorrs):
if 'S' in allcorrs and 'V' in allcorrs:
print(100*'-')
for mass in Fit['masses']:
M_parent = p['dE:{0}'.format(Fit['{0}-Tag'.format('BG')].format(mass))][0]
M_daughter = p['dE:{0}'.format(Fit['{0}-Tag'.format('KG')].format('0'))][0]
S = p['SVnn_m{0}_tw0'.format(mass)][0][0]
V = p['VVnn_m{0}_tw0'.format(mass)][0][0]
Z_V = make_Z_V(float(mass),float(Fit['m_s']),M_parent,M_daughter,S,V)
print("Mass = {0} Z_V = {1}".format(mass,Z_V))
print(100*'-')
return()
#####################################################################################################
|
WillParrott/New_bodiddley_fitter
|
functions.py
|
functions.py
|
py
| 47,341 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43067183900
|
# Modified by: Dr. Smruti Panigrahi
import numpy as np
def mean_nav_angle(Rover):
# Add standard deviation to the mean Nav angle to make Rover a left-wall-crawler
return np.clip( (np.mean(Rover.nav_angles) + Rover.wall_offset_angle) * 180/np.pi, -15, 15)
def is_moving(Rover):
# Checks if the Rover has moved a certain distance since the last frame.
distance_travelled = np.sqrt( (Rover.pos[0] - Rover.last_pos[0]) ** 2 +
(Rover.pos[1] - Rover.last_pos[1]) ** 2 )
return distance_travelled > Rover.stuck_dist
def is_near_home(Rover):
# Checks if the Rover is near home.
distance_from_home = np.sqrt( (Rover.pos[0] - Rover.home_pos[0]) ** 2 +
(Rover.pos[1] - Rover.home_pos[1]) ** 2 )
return distance_from_home < Rover.home_dist
# This is where you can build a decision tree for determining throttle, brake and steer
# commands based on the output of the perception_step() function
def decision_step(Rover):
# Implement conditionals to decide what to do given perception data
# Here you're all set up with some basic functionality but you'll need to
# improve on this decision tree to do a good job of navigating autonomously!
# offset in rad used to hug the left wall 15s after the start time to avoid donut mode
if Rover.total_time < 15:
# Steering proportional to the (mean + standard deviation) results in
# smaller offsets on narrow vison map and large offsets in turns and open areas
Rover.wall_offset_angle = 0 #-0.65 * np.std(Rover.nav_angles)
else:
Rover.wall_offset_angle = 0.75 * np.std(Rover.nav_angles)
if Rover.total_time == 0:
Rover.home_pos = Rover.pos
print("Rover Home Position (x, y): ", Rover.home_pos)
if Rover.nav_angles is not None:
# Check for Rover.mode status
# If all samples has been collected and more than 90% mapped then go home
if is_near_home(Rover) and Rover.total_time > 30:
print("Rover is close to Home!")
#Rover.mode = 'gohome'
if Rover.samples_collected == Rover.total_samples:
if Rover.mapped >= 50:
Rover.mode = 'gohome'
else:
Rover.mode = 'forward'
if (Rover.throttle >= Rover.throttle_set and np.abs(Rover.vel) <= 0.01 and not Rover.picking_up):
Rover.stuck_counter += 1
print("Stuck counter: ", Rover.stuck_counter)
if (Rover.stuck_counter >= 2*Rover.fps):
print("Rover is still stuck! Try turning in the opposite direction")
Rover.steer = -15
Rover.brake = 0
Rover.throttle = 0 #5*Rover.throttle_set
Rover.mode = 'stuck'
Rover.stuck_counter = 0
else:
Rover.mode = 'forward'
elif (Rover.throttle == Rover.throttle_set and Rover.steer == 15 and Rover.vel > 0.5):
Rover.donut_counter += 1
if (Rover.donut_counter >= 5*Rover.fps):
print("Rover eating donut!")
print("Donut counter: ", Rover.donut_counter)
Rover.throttle = 0
Rover.brake = 0
Rover.steer = -15
Rover.mode = 'donut'
elif Rover.picking_up == 1:
Rover.throttle = 0
Rover.steer = 0
Rover.brake = Rover.brake_set
Rover.mode = 'stop'
Rover.samples_collected += 1
elif Rover.near_sample == 1:
Rover.throttle = 0
Rover.steer = 0
Rover.brake = Rover.brake_set
Rover.mode = 'stop'
elif Rover.mode == 'pursuit':
print("Pickup counter: ", Rover.pickup_counter)
if Rover.pickup_counter <= 100:
Rover.wall_offset_angle = 0;
print("Rover Picking up Rock..........")
Rover.steer = mean_nav_angle(Rover)
Rover.brake = 0
Rover.throttle = 0
if Rover.vel <= 0.2:
Rover.throttle = Rover.throttle_set
else:
Rover.throttle = 0
Rover.samples_located += 1
Rover.mode == 'stop'
else:
Rover.steer = mean_nav_angle(Rover)
Rover.brake = 0
Rover.throttle = 0
Rover.mode = 'forward'
Rover.pickup_counter = 0
elif Rover.mode == 'forward':
# Check the extent of navigable terrain
if len(Rover.nav_angles) >= Rover.stop_forward:
# Rover.brake = 0
# If mode is forward, navigable terrain looks good
# and velocity is below max, then throttle
if Rover.vel < Rover.max_vel:
# Set throttle value to throttle setting
Rover.throttle = Rover.throttle_set
Rover.brake = 0
elif Rover.vel > Rover.max_vel:
Rover.throttle = 0
Rover.brake = 0.2*Rover.brake_set
else: # Else coast
Rover.throttle = 0
Rover.brake = 0
# Set steering to average angle clipped to the range +/- 15
Rover.steer = mean_nav_angle(Rover)
# If there's a lack of navigable terrain pixels then go to 'stop' mode
elif len(Rover.nav_angles) < Rover.stop_forward:
# Set mode to "stop" and hit the brakes!
Rover.throttle = 0
# Set brake to stored brake value
Rover.brake = 5*Rover.brake_set
Rover.steer = 0
Rover.mode = 'stop'
# If we're already in "stop" mode then make different decisions
elif Rover.mode == 'stop':
# If we're in stop mode but still moving keep braking
if Rover.vel > 0.2:
Rover.throttle = 0
Rover.brake = 5*Rover.brake_set
Rover.steer = 0
# If we're not moving (vel < 0.2) then do something else
elif Rover.vel <= 0.2:
# Now we're stopped and we have vision data to see if there's a path forward
if len(Rover.nav_angles) < Rover.go_forward:
Rover.throttle = 0
# Release the brake to allow turning
Rover.brake = 0
# Turn range is +/- 15 degrees, when stopped the next line will induce 4-wheel turning
Rover.steer = -15
# If we're stopped but see sufficient navigable terrain in front then go!
elif len(Rover.nav_angles) >= Rover.go_forward:
# Set throttle back to stored value
Rover.throttle = Rover.throttle_set
# Release the brake
Rover.brake = 0
# Set steer to mean angle
Rover.steer = mean_nav_angle(Rover)
Rover.mode = 'forward'
elif Rover.mode == 'stuck':
if (Rover.throttle == 0 and Rover.brake == 0 and Rover.steer != 0): #spinning in place
print("Rover is spinning in place")
Rover.throttle = 0
Rover.brake = Rover.brake_set
Rover.steer = 0
Rover.mode = 'forward'
elif (Rover.throttle >= Rover.throttle_set and Rover.vel <= 0.5):
print("Rover is still stuck")
Rover.steer = -15
Rover.throttle = 5*Rover.throttle_set
Rover.brake = 0
Rover.mode = 'stop'
elif Rover.vel < -0.2: #if rover moving backwards go to stop mode
Rover.throttle = 0
Rover.brake = Rover.brake_set
Rover.steer = -15
print("Rover out of stuck mode and going to stop mode")
Rover.mode = 'stop'
else:
Rover.stuck_counter = 0
Rover.throttle = 0
Rover.brake = Rover.brake_set
Rover.steer = -15
Rover.mode = 'stop'
elif Rover.mode == 'donut':
Rover.throttle = 0
Rover.brake = 0 #Rover.brake_set
Rover.wall_offset_angle *= -2
Rover.steer = mean_nav_angle(Rover)
if (Rover.donut_counter >= 5*Rover.fps + 5): # Wait for 6 frames to turn Rover by 90deg
Rover.donut_counter = 0
Rover.mode = 'stop'
else:
Rover.mode = 'forward'
elif Rover.mode == 'gohome':
if is_near_home(Rover):
Rover.throttle = 0
Rover.brake = Rover.brake_set
Rover.steer = 0
print("Rover is home")
else:
Rover.mode = 'forward'
# Just to make the rover do something even if no modifications have been made to the code
else:
Rover.throttle = Rover.throttle_set
Rover.steer = 0
Rover.brake = 0
# If in a state where want to pickup a rock send pickup command
if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:
Rover.send_pickup = True
return Rover
|
DrPanigrahi/RoboND-Rover-Project
|
code/decision.py
|
decision.py
|
py
| 9,602 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22791922218
|
#2022.08.15
#Q1. 다음은 Calcutalor 클래스이다.
class Calculator:
def __init__(self):
self.value = 0
def add(self, val):
self.value += val
#위 클래스를 상속하는 UpgradeCalculator를 만들고 값을 뺄 수 있는 minus 메소드를 추가해 보자.
#즉 다음과 같이 동작하는 클래스를 만들어야 한다.
class UpgradeCalculator(Calculator):
def minus(self, val):
self.value -= val
cal = UpgradeCalculator()
cal.add(10)
cal.minus(7)
print(cal.value) #10에서 7을 뺀 3을 출력
#Q2. 객체변수 Value가 100 이상의 값은 가질 수 없도록 제헌하는 MaxLimitCalculator 클래스를 만들어 보자. 즉 다음과 같이 동작해야 한다.
#Calculator 클래스를 상속하고 add 메소드를 오버라이딩하여 다음과 같은 클래스를 만든다.
class MaxLimitCalculator(Calculator):
def add(self,val):
if self.value > 100:
self.value = 100
cal = MaxLimitCalculator()
cal.add(50) #50 더하기
cal.add(50) #60 더하기
print(cal.value) #100 출력
#단 반드시 다음과 같은 Calculator 클래스를 상속해서 만들어야 한다.
# class Calculator:
# def __init__(self):
# self.value = 0
# def add(self, val):
# self.value += val
#Q3. 다음 결과를 예측해 보자.
#1. all([1, 2, abs(-3)-3])
#False
#abs(-3)은 -3의 절댓값이므로 3이 되어 all([1,2,0])이 되고, 리스트의 요솟값 중 0이 있기 때문에 all 내장 함수의 결과는 False가 된다.
#2. chr(ord('a')) == 'a'
#True
#ord('a')의 결과는 97이 되어 chr(97)로 치환된다. chr(97)의 결과는 다시 'a'가 되므로 'a' == 'a'가 되어 True를 돌려 준다.
#Q4. filter와 lambda를 사용하여 리스트 [1,-2,3,-5,8-,3]에서 음수를 모두 제거해 보자.
#음수를 제거하기 위한 filter의 함수로 lambda 함수를 다음과 같이 만들어 실행한다.
print(list(filter(lambda x:x>0,[1,-2,3,-5,8,-3])))
#Q5.234라는 10진수의 16진수는 ㄷ음과 같이 구할 수 있다.
print(hex(234))
#이번에는 반대로 16진수 문자열 0xea를 10진수로 변경해 보자.
print(int('0xea',16))
#Q6.Map과 lambda를 사용하여 [1,2,3,4] 리스트의 각 요솟값에 3이 곱해진 리스트 [3,6,9,12]를 만들어 보자.
#입력에 항상 3을 곱하여 돌려 주는 lambda 함수를 다음과 같이 만들고 map과 조합하여 실행한다.
print(list(map(lambda x:x*3,[1,2,3,4])))
#Q7.다음 리스트의 최댓값과 최솟값의 합을 구해보자.
a = [-8,2,7,5,-3,5,0,1]
print(max(a) + min(a))
#Q8. 17 / 3 의 결과는 다음과 같다.
print(17 / 3 )
#5.666666666666667
#위와 같은 결괏값 5.666666666666667을 소숫점 4자리까지만 반올림하여 표시해 보자.
print(round(17/3,4))
#Q9. 다음과 같이 실행할 때 입력값을 모두 더하여 출력하는 스크립트(C:/doit/myargv.py)를 작성해 보자.
#C:>cd doit
#C:/doit>python myargv.py 1 2 3 4 5 6 7 8 9 10
#55
#다음처럼 sys모듈의 argv를 사용하여 명령 행 입력값 모두를 차례로 더해 준다.
import sys
numbers = sys.argv[1:] #파일 이름을 제외한 명령 행의 모든 입력
result = 0
for number in numbers:
result += int(number)
print(result)
#Q10. os 모듈을 사용하여 다음과 같이 동작하도록 코드를 작성해 보자.
#1. C:>doit 디렉터리로 이동한다.
#다음처럼 os 모듈의 chdir을 사용하여 C:/doit 이라는 디렉터리로 이동한다.
import os
print(os.chdir("c:/doit"))
#2. dir 명령을 실행하고 그 결과를 변수에 담는다.
#그리고 다음처럼 os 모듈의 popen을 사용하여 시스템 명령어인 dir을 수행한다.
result = os.popen("dir")
#3. dir 명령의 결과를 출력한다.
#opoen의 결과를 출력하기 위해 다음과 같이 수행한다.
print(result.read())
#Q11. glob 모듈을 사용하여 C:/doit 디렉터리의 파일 중 확장자가 .py인 파일만 출력하는 프로그램을 작성해 보자.
#다음과 같이 glob 모듈을 사용한다.
import glob
print(glob.glob("c:/doit/*.py"))
#Q12. time 모듈을 사용하여 현재 날짜와 시간을 다음과 같은 형식으로 출력해 보자.
#2018/04/03 17:20:32
#time 모듈의 strftime을 사용하여 다음과 같이 작성한다.
import time
print(time.strftime("%Y/%m/%d %H:%M:%S")) #%Y:년, %m:월, %d:일, %H:시, %M:분, %S:초
#Q13. random 모듈을 사용하여 로또 번호(1~45 사이의 숫자 6개)를 생성해 보자.
#random 모듈의 randint를 사용하여 다음과 같이 작성한다.
import random
result = []
while len(result) < 6:
num = random.randint(1, 45) #1부터 45까지의 난수 발생
if num not in result:
result.append(num)
print(result)
|
Yoon-kiyeong/Jump_Up_To_Python
|
Ch01/Part 04/Practice.py
|
Practice.py
|
py
| 4,802 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
8103602238
|
#
# @lc app=leetcode id=25 lang=python3
#
# [25] Reverse Nodes in k-Group
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseKGroup(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
dummy = groupPrev = ListNode(0, head)
while True:
kth = self.getk(groupPrev, k)
if not kth:
break
groupPrev.next = kth
groupNext = kth.next
#reverse
curr, prev = head, groupNext
while curr != groupNext:
nxt = curr.next
curr.next = prev
prev = curr
curr = nxt
groupPrev = head
head = groupNext
return dummy.next
def getk(self, head, k):
while k > 0 and head:
head = head.next
k -= 1
return head
# @lc code=end
|
HongyuZhu999/LeetCode
|
25.reverse-nodes-in-k-group.py
|
25.reverse-nodes-in-k-group.py
|
py
| 1,035 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74921836986
|
import tkinter as tt
from tkinter import *
from tkinter import Tk, Label, Button,Entry, StringVar
win = tt.Tk()# 创建窗体对象
# win.title('来自我的表白')#标题
# win.geometry('350x200+430+350')
# label = tt.Label(win,text='能做我女朋友吗?',font="微软雅黑",fg='#666',bg='red')
# label.pack()
# def mClick():
# label = tt.Label(win,text='爱你哦!',font="宋体",fg='#888888')
# label.place(x=70,y=100)
# def mClick1():
# label = tt.Label(win, text='再考虑一下不!我是认真的呢', font="宋体", fg='#888888')
# label.place(x=70,y=100)
# btn = Button(win,text='可以',command=mClick)
# btn1 = Button(win,text='不可以',command=mClick1)
# btn.place(x=70,y=50)
# btn1.place(x=230,y=50)
win.title('程序验证')#标题
win.geometry('350x200+430+350')
txt1=StringVar() # 声明为StringVar对象
txt2=StringVar()
label = Label(win, text="请输入密码!", font=('宋体','16'))
label.pack()
def mClick():
# L1 = Label(win, textvariable=txt2,font=('宋体', '16'))
# L1.pack()
str = txt1.get()
# txt2.set(str)
if str == '123':
L2 = Label(win, text='恭喜你输入正确', font=('宋体','16'))
L2.pack()
else:
L3 = Label(win, text='输入错误', font=('宋体', '16'))
L3.pack()
txt1=Entry(win, textvariable=txt1, width=16, font=('宋体','16'))
txt1.pack()
btn=Button(win, text='确认', command=mClick)
btn.pack()
#成功了!
win.mainloop()# 循环事件
|
git123hub121/Python-analysis
|
Tkinter/Tk.py
|
Tk.py
|
py
| 1,481 |
python
|
en
|
code
| 4 |
github-code
|
6
|
21396441749
|
import os
from django.conf import settings
from django.db import connection, close_old_connections
from django.db.utils import OperationalError
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from racetrack_client.utils.shell import shell, CommandError
from lifecycle.django.registry.database import db_access
from lifecycle.config import Config
def setup_health_endpoint(api: FastAPI, config: Config):
@api.get("/live", tags=['root'])
async def _live():
"""Report service liveness: whether it has started"""
return {
'service': 'lifecycle',
'live': True,
}
@api.get("/ready", tags=['root'])
async def _ready():
"""Report service readiness: whether it's available for accepting traffic"""
return {
'service': 'lifecycle',
'ready': True,
}
@api.get("/health", tags=['root'])
def _health():
"""Report current application status"""
db_connected = is_database_connected()
status_code = 200 if db_connected else 500
content = {
'service': 'lifecycle',
'live': True,
'ready': db_connected,
'database_connected': db_connected,
'git_version': os.environ.get('GIT_VERSION', 'dev'),
'docker_tag': os.environ.get('DOCKER_TAG', ''),
'auth_required': config.auth_required,
}
return JSONResponse(content=content, status_code=status_code)
@db_access
def is_database_connected() -> bool:
try:
django_db_type = os.environ.get('DJANGO_DB_TYPE', 'sqlite')
if django_db_type == 'postgres':
db_name = settings.DATABASES['default']['NAME']
user = settings.DATABASES['default']['USER']
host = settings.DATABASES['default']['HOST']
port = settings.DATABASES['default']['PORT']
shell(f'pg_isready -h {host} -p {port} -U {user} -d {db_name}', print_stdout=False)
close_old_connections()
with connection.cursor() as cursor:
cursor.execute('select 1')
cursor.fetchone()
cursor.close()
connection.close()
return True
except CommandError:
return False
except OperationalError:
return False
|
TheRacetrack/racetrack
|
lifecycle/lifecycle/endpoints/health.py
|
health.py
|
py
| 2,317 |
python
|
en
|
code
| 27 |
github-code
|
6
|
29128123138
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import embed_video.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tracks', '0006_auto_20150604_1856'),
]
operations = [
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(default=b'untitled', max_length=128, verbose_name='Title')),
('video', embed_video.fields.EmbedVideoField(help_text=b'Link to youtube or vimeo', verbose_name='Video Link')),
('user', models.ForeignKey(related_name='videos', to=settings.AUTH_USER_MODEL)),
],
),
]
|
TimBest/ComposersCouch
|
tracks/migrations/0007_video.py
|
0007_video.py
|
py
| 924 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32742347893
|
import requests,time
from bs4 import BeautifulSoup
import p_mysql,json
class jxy_all():
def xunhuan(self,gol_cookies):
wrong = 0
first_run = 0
jishu = 0
toufayu = False
multiple = [1, 3, 7, 15, 31, 63, 127, 34, 55, 89, 144, 1, 1]
maxwrong = 6
global moni
firstflag_vote = ''
current_period = ''
vote_retime = 0
endf = 1
wrongflag = False
vote_list = []
self.header = {"Accept": "text/html, application/xhtml+xml, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN",
"Connection": "Keep-Alive",
"Host": "www.juxiangyou.com",
"Referer": "http://www.juxiangyou.com/",
"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64;Trident/5.0)"}
post_head = {"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-cn",
"Cache-Control": "no-cache",
"Connection": "Keep-Alive",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Host": "www.juxiangyou.com",
"Referer": "http://www.juxiangyou.com/fun/play/crazy28/index",
"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
"X-Requested-With": "XMLHttpRequest"}
self.url = 'http://www.juxiangyou.com/fun/play/crazy28/index'
yinshu = 1
list_v = []
czlst = []
c_time = time.strftime('%m-%d %H:%M', time.localtime(time.time()))
try:
req = requests.get(self.url, cookies=gol_cookies, headers=self.header)
soup = BeautifulSoup(req.text, 'lxml')
# 查询当前投注信息
vote_info = soup.find('p', attrs={'class': 'time-static1'})
# 第一步 找到当前期 这里必然找出当前期,目的是为了投注。
if vote_info != None:
if (vote_info.text).find('正在开奖') > 0:
print('正在开奖,等待5秒')
time.sleep(5)
else:
# 如果没有开奖,则查询当前投注期
try:
vote_current = vote_info.find_all('span')
# 结束标识,查询
end_flag = (vote_info.text).find('截止投注')
if end_flag > 0:
# 即使投注了,当前期也需要展示出来,为投注判断
print(vote_current[0].string + '期已经截止投注')
current_period = vote_current[0].string
else:
print('当前期' + vote_current[0].string + '剩余' + vote_current[1].string + '秒投注')
vote_retime = int(vote_current[1].string)
current_period = vote_current[0].string
except Exception as e:
print('搜索资料出错,列表错误')
print('traceback.format_exc():%s' % traceback.format_exc())
if current_period != '':
# 添加保存第一次金币部分
try:
current_jinbi = (soup.find('span', attrs={'class': 'J_udou'}).string).replace(',', '')
except Exception as e:
print(repr(e))
if firstflag_vote == '':
firstflag_vote = current_period
firstflag_jinbi = current_jinbi
config = configparser.ConfigParser()
config.read("Config_jxyfk28.ini")
config_title = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
try:
config.add_section(config_title)
config.set(config_title, "starttime:", config_title)
config.set(config_title, "firstvote:", firstflag_vote)
config.set(config_title, "firstjinbi", firstflag_jinbi)
config.write(open("Config_jxyfk28.ini", "w"))
tempa = config.sections()
newa = []
findtime = time.strftime('%Y-%m-%d', time.localtime(time.time()))
# print(findtime)
for x in tempa:
# print(x.find(findtime))
if x.find(findtime) >= 0:
newa.append(x)
todayfirstjinbi = int(config.get(newa[0], 'firstjinbi'))
except configparser.DuplicateSectionError:
print("Section already exists")
# 循环采集部分
mydb = p_mysql.MySQL()
# 查询数据库最后一期,然后显示出来
sql_text = "select period from jx_fk28 ORDER BY period DESC limit 1"
sql_re = mydb.query(sql_text)
if len(sql_re) <= 0:
endf = 44
else:
endf = int((int(current_period) - int(sql_re[0][0])) / 25) + 1
if endf >= 44:
endf = 44
self.up_dt_info.emit("需采集" + str(endf) + "页数")
w = 1
while w <= endf:
self.up_dt_info.emit("开始采集,第" + str(w) + "页---")
try:
base_time = int(time.time()) * 1000
x_sign = baseN(base_time, 36)
# 为header字典添加一个X-sign标识,毫秒级时间戳36进制
post_head['X-Sign'] = x_sign
# 服务器接受str格式,把字典格式json格式转化
a = json.dumps(
{"c": "quiz", "fun": "getEachList", "items": "crazy28", "pageSize": 23, "pageIndex": w})
b = json.dumps({"items": "crazy28"})
# 毫秒级时间戳,同时作为postdatspeed16a数据发现服务器
pst_data = {'jxy_parameter': a, 'timestamp': base_time, 'params': b,
'xtpl': 'fun/private/jc-index-tbl'}
url = 'http://www.juxiangyou.com/fun/play/interaction'
# Post数据服务器,cookies使用登录页面与验证码 合并cookies提交
req_one = requests.post(url, data=pst_data, cookies=gol_cookies, headers=post_head,
allow_redirects=False)
vote_data = json.loads(req_one.text)
if vote_data['code'] == 10000:
for x in vote_data['itemList']:
period = x['num']
vote_time = x['date']
jcjg = x['jcjg2']
state = x['state']
if state == 1:
sql = "insert into jx_fk28 values ('" + period + "','" + vote_time + "','" + str(
jcjg) + "')"
mydb.query(sql)
w = w + 1
except Exception as e:
self.up_dt_info.emit("采集过程中,页面信息问题,重新采集该页")
print("错误:%s" % traceback.format_exc())
w = w - 1
if w <= 0:
w = 1
self.up_dt_info.emit("采集完成")
self.up_table_info.emit(req.text)
# if moni == 1 and first_run == 0:
# wrong = firstwrong
# print('当我更新wrong时,我的值还是',firstwrong)
if first_run == 0:
self.up_dt_info.emit('先搜索最近的一次错6')
remax = self.remaxwrong()
if int(current_period) - int(remax) <= 30:
moni = 0
first_run = 1
self.up_statusinfo.emit(
'第一次查询错六为: ' + str(remax) + " ,间隔期 : " + str(int(current_period) - int(remax)))
self.up_dt_info.emit('搜索结束')
# 每一次,必须采集完成后,才开始从数据库中拿数据判断
if vote_list: # 如果不为空,说明上一次投注了,判断是否正确。
try:
vote_period = str(vote_list[-1]).strip()
sql = "select * from jx_fk28 where period='" + vote_period + "' limit 1"
redata = mydb.query(sql)
last_vote = redata[0][2]
# print('返回列表', vote_list, '查找返回投注期的结果', last_vote[0])
self.up_dt_info.emit('上期投注列表' + str(vote_list))
if int(last_vote) in vote_list:
print('投注正确,倍率清空')
self.up_lastinfo.emit((vote_period, '', '', last_vote, '正确', ''))
wrong = 0
if wrongflag == True and moni == 1:
wrongflag = False
toufayu = True
jishu = 0
moni = 0
else:
self.up_lastinfo.emit((vote_period, '', '', last_vote, '错误', ''))
if int(last_vote) > 0:
# print('投注错误,次数加 1 ,错误次数:', wrong)
wrong = wrong + 1
if wrong >= maxwrong:
wrongflag = True
moni = 1
except Exception as e:
self.up_dt_info.emit("查询已投注的结果错误:%s" % traceback.format_exc())
# ---------------------------------------------------
s1 = int(current_period) - 1
s2 = str(int(current_period) - 2)
s3 = str(int(current_period) - 3)
s4 = str(int(current_period) - 4)
# sql = "select * from jx_fk28 where period='" + s1 + "' or period='" + s2 + "' or period='" + s3 + "' or period='" + s4 + "' order by period DESC"
sql = "select * from jx_fk28 where period <= %s order by period DESC LIMIT 20" % (s1)
# print(sql)
redata_1 = mydb.query(sql)
# print(redata_1)
last_1 = redata_1[0][2]
last_2 = redata_1[1][2]
last_3 = redata_1[2][2]
last_4 = redata_1[3][2]
print(last_1, last_2, last_3, last_4)
for x in redata_1:
czlst.append(int(x[2]))
print(czlst)
if vote_retime > 9:
if moni == 0:
if jishu >= 6 and wrong == 0:
toufayu = False
if toufayu == True:
yinshu = 20
jishu = jishu + 1
if jishu >= 250 and wrong <= 2:
moni = 1
jishu = 0
# print('lezhuan,最大错:', maxwrong, '当前错误', wrong, "金币:", '倍数', yinshu, '模拟', moni, '投注次数', jishu,
# '错标', wrongflag, '偷发育', toufayu)
# list_v = daxiao_1(last_1, last_2, last_3, last_4, multiple[wrong], yinshu)
list_v = daxiao_2(last_1, last_2, last_3, last_4, multiple[wrong], yinshu, czlst)
if list_v:
vote_list = vote_thing(current_period, list_v)
if int(vote_list[0]) < 10:
dd = '小'
else:
dd = '大'
self.up_curinfo.emit((current_period, multiple[wrong] * yinshu * 500, jishu, wrong,
int(current_jinbi) - todayfirstjinbi, moni, dd))
else:
vote_list = []
self.up_curinfo.emit((current_period, '', '', '', '', moni, ''))
del mydb
dealy_time = vote_retime + 28
self.up_dt_info.emit('延时%s刷新' % dealy_time)
for m in range(dealy_time, -1, -1):
self.up_lcd_num.emit(m)
time.sleep(1)
else:
self.up_dt_info.emit("当前期都没找到,继续延时30秒查找")
time.sleep(5)
except Exception as e:
print('traceback.format_exc():%s' % traceback.format_exc())
self.up_dt_info.emit("访问网站出错,等待10秒,重新访问" + repr(e))
time.sleep(5)
|
ssolsu/newproject
|
server_jxy.py
|
server_jxy.py
|
py
| 13,611 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40253497699
|
from django.urls import path
from . import views
app_name = 'chat'
urlpatterns = [
path('', views.index, name='index'),
path('create_room/', views.create_room, name='create_room'),
path('my_rooms/', views.rooms_list, name='rooms_list'),
path('<str:room_name>/', views.room, name='room'),
]
|
michalr45/django-chat
|
chat/urls.py
|
urls.py
|
py
| 308 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73790991549
|
from typing import List, Tuple
from abstract_puzzles import AbstractPuzzles
DATA_TYPE = List[Tuple[Tuple[int, int], Tuple[int, int]]]
class Puzzles(AbstractPuzzles):
def __init__(self, method_name):
super().__init__(
method_name,
day=4,
puzzle_1_example_answer=2,
puzzle_1_answer=487,
puzzle_2_example_answer=4,
puzzle_2_answer=849,
)
def read(self, file_path: str) -> Tuple[DATA_TYPE]:
data = []
with open(file_path, 'r') as f:
for line in f.read().splitlines():
elf1, elf2 = line.split(',')
elf1_start, elf1_end = elf1.split('-')
elf2_start, elf2_end = elf2.split('-')
data.append((
(int(elf1_start), int(elf1_end)),
(int(elf2_start), int(elf2_end)),
))
return data,
def puzzle_1(self, schedules: DATA_TYPE) -> int:
return len(list(filter(
lambda elfs: (elfs[0][0] >= elfs[1][0] and elfs[0][1] <= elfs[1][1]) or
(elfs[1][0] >= elfs[0][0] and elfs[1][1] <= elfs[0][1]),
schedules
)))
def puzzle_2(self, schedules: DATA_TYPE) -> int:
return len(list(filter(
lambda elfs: elfs[1][0] <= elfs[0][0] <= elfs[1][1] or
elfs[1][0] <= elfs[0][1] <= elfs[1][1] or
elfs[0][0] <= elfs[1][0] <= elfs[0][1] or
elfs[0][0] <= elfs[1][1] <= elfs[0][1],
schedules
)))
|
Lynxens/AdventOfCode2022
|
advent_of_code/day4.py
|
day4.py
|
py
| 1,600 |
python
|
en
|
code
| 4 |
github-code
|
6
|
20236247442
|
# WAP count how many times the word India is repeated
# Get the data from the file
f = open('about_india.txt', "r")
data = f.read()
f.close()
#print(data)
words = data.split(" ")
#print(words)
c = 0
for word in words:
if word == "India":
#print(word)
c = c+1
print(c)
|
SreekanthChowdary19/PYCLS
|
class_examples/EVENING/example8.py
|
example8.py
|
py
| 293 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70097868029
|
import pygame as pg
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, screen, settings):
super(Ship, self).__init__()
self.screen = screen
self.settings = settings
self.sprite = pg.image.load('./assets/spaceship.png')
self.scale_factor = 10
self.sprite = pg.transform.scale(self.sprite, (self.sprite.get_width() // self.scale_factor , self.sprite.get_height() // self.scale_factor))
self.rect = self.sprite.get_rect()
self.screen_rect = self.screen.get_rect()
self.isMovingRight = False
self.isMovingLeft = False
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom - 5
def update(self):
if self.isMovingRight and (self.rect.right < self.screen_rect.right):
self.rect.centerx += self.settings.space_ship_speed
if self.isMovingLeft and (self.rect.left > self.screen_rect.left):
self.rect.centerx -= self.settings.space_ship_speed
def draw(self):
self.screen.blit(self.sprite, self.rect)
def center_ship(self):
self.rect.centerx = self.screen_rect.centerx
|
hoangdesu/Alien-Invasion-Pygame
|
ship.py
|
ship.py
|
py
| 1,239 |
python
|
en
|
code
| 1 |
github-code
|
6
|
24199809037
|
class Solution:
def maxSubArray(self, nums):
"""
parameter:
nums: list[int]
return: int
"""
temp = nums[0]
max_ = temp
for i in range(1, len(nums)):
if temp > 0:
temp += nums[i]
max_ = max(temp, max_)
else:
temp = nums[i]
max_ = max(temp, max_)
return max_
# class Solution:
# def maxSubArray(self, nums):
# """
# parameter:
# nums: list[int]
# return: int
# """
# if not nums:
# return 0
# self.nums = nums
# return self.divide_and_conquer(0, len(nums)-1)
# def divide_and_conquer(self, left, right):
# """
# parameter:
# left: int
# right: int
# return: int
# """
# if left == right:
# return self.nums[left]
# mid = (left + right) // 2
# left_max_sum = self.divide_and_conquer(left, mid)
# right_max_sum = self.divide_and_conquer(mid+1, right)
# left_board_sum = self.nums[mid]
# right_board_sum = self.nums[mid+1]
# max_left_board_sum = self.nums[mid]
# max_right_board_sum = self.nums[mid+1]
# # 向左扫描
# for i in range(mid-1, -1, -1):
# left_board_sum += self.nums[i]
# if left_board_sum > max_left_board_sum:
# max_left_board_sum = left_board_sum
# # 向右扫描
# for i in range(mid+2, right+1):
# right_board_sum += self.nums[i]
# if right_board_sum > max_right_board_sum:
# max_right_board_sum = right_board_sum
# return max(left_max_sum, right_max_sum, max_left_board_sum+max_right_board_sum)
|
AiZhanghan/Leetcode
|
code/53. 最大子序和.py
|
53. 最大子序和.py
|
py
| 1,851 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28857307321
|
import torch
import numpy as np
from six import string_types
from torch import optim
import inspect
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
import torch.nn.functional as F
from tqdm import tqdm
import copy
def get_function_args( fn ):
"""returns a list of all argumnts, dict of all the defualts , and list of all non default arguments
Args:
fn (function): [description]
Returns:
[type]: [description]
"""
args = inspect.getargspec( fn ).args
if inspect.getargspec( fn ).defaults is None:
n_defaults = 0
def_args = []
else:
n_defaults = len(inspect.getargspec( fn ).defaults )
def_args = list(inspect.getargspec( fn ).defaults )
if n_defaults > 0:
default_args = args[ -1*n_defaults : ]
else:
default_args = []
defaults = { a[0]:a[1] for a in zip(default_args , def_args ) }
non_defaults = args[: len( args) - n_defaults ]
return args , defaults , non_defaults
# given a dictionary kwargs .. this will return which all of those can be sent to the function fn_name
def filter_functions_kwargs(fn_name , kwargs ):
fn_args = inspect.getargspec( fn_name ).args
ret = {}
for k in kwargs:
if k in fn_args:
ret[ k ] = kwargs[k]
return ret
def str_to_auto_type(var):
#first test bools
if var == 'True' or var=='true':
return True
elif var == 'False' or var=='false':
return False
else:
#int
try:
return int(var)
except ValueError:
pass
#float
try:
return float(var)
except ValueError:
pass
# homogenus list
# todo
#string
try:
return str(var)
except ValueError:
raise NameError('Something Messed Up Autocasting var %s (%s)'
% (var, type(var)))
# returns a dictionarly of named args from cli!!
def get_cli_opts(argv):
opts = {} # Empty dictionary to store key-value pairs.
argv= copy.deepcopy(argv)
while argv: # While there are arguments left to parse...
if argv[0][0] == '-' and argv[0][1] == '-': # Found a "--name value" pair.
argv[0] = argv[0][2:] # remove '--'
assert argv[0] != '' , "There is some issue with the cli args becasue a key cannot be empty"
assert not argv[0] in opts , "Repeated argument: "+argv[0]
opts[argv[0]] = str_to_auto_type( argv[1] ) # Add key and value to the dictionary.
argv = argv[1:] # Reduce the argument list by copying it starting from index 1.
return opts
def get_vars( data , cuda=False , numpy=False ):
# list( map( lambda x :Variable(torch.FloatTensor(x.float() )).cuda() , imgs ))
if type( data ) is tuple:
return tuple([ get_vars(d , cuda=cuda , numpy=numpy) for d in data ])
elif type( data ) is list:
return list([ get_vars(d , cuda=cuda , numpy=numpy) for d in data ])
elif type( data ) is dict:
return { k:get_vars(data[k] , cuda=cuda , numpy=numpy) for k in data }
else:
if numpy:
data = torch.from_numpy(data)
r = Variable( data )
if cuda:
r = r.cuda()
return r
def get_np_arrs( data ):
if type( data ) is tuple:
return tuple([ get_np_arrs(d ) for d in data ])
elif type( data ) is list:
return list([ get_np_arrs(d ) for d in data ])
elif type( data ) is dict:
return { k:get_np_arrs(data[k] ) for k in data }
else:
return data.cpu().detach().numpy()
class ProgressBar(tqdm):
def __init__( self , iterator ):
super(ProgressBar, self).__init__(iterator)
self.vals_history_dict = {}
def add( self , vals_dict ):
for k in vals_dict:
if not k in self.vals_history_dict:
self.vals_history_dict[k] = []
self.vals_history_dict[k].append( vals_dict[k])
self.bar_str = ""
for k in self.vals_history_dict:
self.bar_str += k+":"+ "%.3f"%(np.mean(self.vals_history_dict[k])) + " "
self.set_description(self.bar_str )
|
divamgupta/pytorch-propane
|
pytorch_propane/utils.py
|
utils.py
|
py
| 4,467 |
python
|
en
|
code
| 5 |
github-code
|
6
|
32872627279
|
from SofiPackage.enum_converter import ANSWERS_AND_QUESTIONS
from SofiPackage.db_choise import sample_of_values_to_enum
import random
def choose_from_random(options_dict):
rand = random.randint(0,100)
last_weight = 0
for option in options_dict:
if last_weight <= rand <= options_dict[option]:
return option
last_weight = options_dict[option]
def generate_oracle():
db_size = choose_from_random({'db_size_inf': 10, 'db_size_100_mb': 20, 'db_size_1_gb': 40, 'db_size_100_gb': 100})
db_flow_rate = choose_from_random({'db_flow_rate_100_mbd': 5, 'db_flow_rate_1_gbd': 20, 'db_flow_rate_100_gbd': 80, 'db_flow_rate_inf': 100})
store_time = choose_from_random({'store_time_1_month': 5, 'store_time_1_year': 20, 'store_time_2_years': 80, 'store_time_5_years': 100})
spatial_use = choose_from_random({'spatial_use': 30, 'non_spatial_use': 100})
complex_select = choose_from_random({'complex_select_10': 10, 'complex_select_25': 30, 'complex_select_60': 80, 'complex_select_100': 100})
select_by_user = choose_from_random({'select_by_user_10': 10, 'select_by_user_25': 30, 'select_by_user_50': 70, 'select_by_user_100': 100})
select_rate = choose_from_random({'select_rate_10_opm': 10, 'select_rate_100_opm': 30, 'select_rate_1000_opm': 80, 'select_rate_inf': 100})
schema_change = choose_from_random({'schema_change_1_year': 80, 'schema_change_5_year': 90, 'schema_change_10_year': 95, 'schema_change_dynamic': 100})
sample = ['oracle', 'schema', db_size, db_flow_rate, store_time, 'non_text_search', spatial_use, 'non_dynamic_schema',
complex_select, 'select_by_column', select_by_user, select_rate, schema_change, 'data_type_text', 'scale_up']
sample_string = ', '.join(str(elem) for elem in sample)
sample_string += '\n'
return sample_string
for i in range(100):
print(generate_oracle(), end='')
# oracle = ['schema', none, none, none, 'non_text_search', none, 'non_dynamic_schema',
# none, 'select_by_column', none, none, none, 'data_type_text', 'scale_up']
#
# mssql = ['schema', none, none, none, 'non_text_search', 'non_spatial_use', 'non_dynamic_schema',
# none, 'select_by_column', none, none, none, none, 'scale_up']
#
# hbase = ['non_schema', none, none, none, 'non_text_search', 'non_spatial_use', 'dynamic_schema',
# 'complex_select_10', 'select_by_key', 'select_by_user_10', none, none, none, 'scale_out']
#
# elastic = ['non_schema', none, none, none, 'text_search', none, 'dynamic_schema',
# none, none, none, none, none, 'data_type_text', 'scale_out']
#
# mongo = ['non_schema', none, none, none, 'non_text_search', none, 'dynamic_schema',
# none, none, none, none, none, none, 'scale_out']
# a = {'a': 10, 'b': 30, 'c': 60, 'd': 100}
# print(choose_from_random(a))
|
IdanM75/Sofi
|
generate_dataset.py
|
generate_dataset.py
|
py
| 3,039 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36651482906
|
from os import system
while True:
login = str(input("Informe o seu login: "))
senha = str(input("Informe a sua senha: "))
if senha == login:
print("Você não pode usar a mesma palavra em login e senha, pois não é seguro.")
print("Informe uma senha valida!")
else:
print("Você esta cadastrado, bem vindo(a)")
break
system("Cls")
while True:
login2 = str(input("Informe seu login: "))
senha2 = str(input("Informe seu login:"))
if login2 == login:
print("Nome de usuario não esta disponivel, tente outro")
elif login2 == senha2:
print("Você não pode usar a mesma palavra em login e senha, pois não é seguro.")
print("Informe uma senha válida!")
else:
print("Você esta cadastrado, bem vinda(a)")
break
|
ellencamile/pythonEllen
|
Excercicios while/Questão1.py
|
Questão1.py
|
py
| 826 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
30569513843
|
from flask import Flask, render_template, flash, redirect, url_for, session, logging, request
from wtforms import Form, StringField, validators
import Project
import re
app = Flask(__name__)
@app.route("/search")
def search():
return render_template('search.html')
class WordPredictionForm(Form):
word = StringField('', [validators.Length(min=1, max=1000)])
# PROJECT NLP
@app.route('/', methods=['GET', 'POST'])
def index():
form = WordPredictionForm(request.form)
if request.method == 'POST' and form.validate():
word = form.word.data
print(word)
#Predict the Model
project = Project
word = re.sub(r'([^\s\w]|_)+', '', word)
seq = word[:40].lower()
# print(seq)
list = project.predict_completions(seq, 5)
chosen = list[0]
print(list)
flash("loading...")
# redirect(url_for('index', list=list))
return render_template('index.html', form=form, list=list, seq=seq, chosen=chosen, scroll='result')
return render_template('index.html', form=form)
if __name__ == "__main__":
app.secret_key = "secret123"
app.run(debug=True)
|
jmgang/wordpredictor
|
app.py
|
app.py
|
py
| 1,218 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17661433287
|
from itertools import islice
from collections import defaultdict
def distance(point):
return abs(point[0]) + abs(point[1])
def neighbours(point):
x, y = point
return ((x+1, y), (x-1, y), (x, y+1), (x, y-1),
(x+1, y+1), (x-1, y-1), (x+1, y-1), (x-1, y+1))
def spiral_seq():
yield 0, 0
x, y = 1, 0
inc_x, inc_y = 0, 1
while True:
yield x, y
if abs(x) == abs(y):
if x <= 0 and y <= 0:
inc_x, inc_y = 1, 0
elif x > 0 and y <= 0:
x += 1
y -= 1
inc_x, inc_y = 0, 1
elif x <= 0 and y > 0:
inc_x, inc_y = 0, -1
else:
inc_x, inc_y = -1, 0
x += inc_x
y += inc_y
def sequential_spiral(nth):
return next(islice(spiral_seq(), nth - 1, nth))
def neighbour_spiral(limit):
matrix = defaultdict(int)
matrix[(0, 0)] = 1
for point in islice(spiral_seq(), 1, None):
value = sum(matrix[neighbour] for neighbour in neighbours(point))
if value > limit:
return value
else:
matrix[point] = value
print(distance(sequential_spiral(368078)))
print(neighbour_spiral(368078))
|
pdhborges/advent-of-code
|
2017/3.py
|
3.py
|
py
| 1,231 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17035306194
|
def solve(input_str):
SIZE = 26
OFFSET = 97
a = list(input_str.strip().split()[1:])
result = [0] * SIZE
for char in a:
result[ord(char) - OFFSET] += 1
return " ".join(map(str, result))
print(solve(open(0).read()))
|
atsushi0919/paiza_workbook
|
data_structure/03-02_dict_step2.py
|
03-02_dict_step2.py
|
py
| 250 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5487284095
|
import os
import subprocess
from typing import List # noqa: F401
from libqtile import bar, layout, widget, hook
from libqtile.config import Click, Drag, Group, Key, Match, Screen
from libqtile.lazy import lazy
from libqtile.utils import guess_terminal
mod = "mod4"
alt = "mod1"
terminal = guess_terminal()
qtile_path = os.path.expanduser('~/.config/qtile')
keys = [
# Switch between windows
Key([alt], "Tab",
lazy.layout.next(),
desc="Move window focus to other window"),
# Move windows
Key([mod, "shift"], "h",
lazy.layout.shuffle_left(),
desc="Move window to the left"),
Key([mod, "shift"], "l",
lazy.layout.shuffle_right(),
desc="Move window to the right"),
Key([mod, "shift"], "j",
lazy.layout.shuffle_down(),
desc="Move window down"),
Key([mod, "shift"], "k",
lazy.layout.shuffle_up(),
desc="Move window up"),
# Grow windows
Key([mod], "h",
lazy.layout.shrink_main(),
desc="Shrink Master"),
Key([mod], "l",
lazy.layout.grow_main(),
desc="Grow Master"),
Key([mod], "j",
lazy.layout.shrink(),
desc="Shrink secondary"),
Key([mod], "k",
lazy.layout.grow(),
desc="Grow Secondary"),
# Window keybindings
Key([mod, "control"], "space",
lazy.window.toggle_floating(),
desc="Toggle floating"),
Key([mod], "m",
lazy.window.toggle_maximize(),
desc="Toggle maximize"),
Key([mod], "n",
lazy.window.toggle_minimize(),
desc="Toggle minimize"),
Key([mod], "f",
lazy.window.toggle_fullscreen(),
desc="Toggle fullscreen"),
Key([mod], "q",
lazy.window.kill(),
desc="Kill focused window"),
# Toggle between different layouts as defined below
Key([mod], "space",
lazy.next_layout(),
desc="Next layouts"),
# Qtile control
Key([mod, "control"], "r",
lazy.restart(),
desc="Restart Qtile"),
Key([mod, "shift"], "q",
lazy.shutdown(),
desc="Shutdown Qtile"),
# Programs
Key([mod], "Return",
lazy.spawn(terminal),
desc="Launch terminal"),
Key([mod], "r",
lazy.spawn("dmenu_run_history -f -i -p 'Run: '"),
desc="Spawn a command using a prompt widget"),
Key([mod], "b",
lazy.spawn("brave-browser"),
desc="Open Default Browser"),
Key([mod], "c",
lazy.spawn("copyq toggle"),
desc="Open copyq prompt"),
Key([mod], "tab",
lazy.spawn("rofi -show"),
desc="rofi window"),
Key([mod], "s",
lazy.spawn("smplayer"),
desc="Open smplayer"),
Key([mod], "e",
lazy.spawn("nautilus"),
desc="Open nautilus"),
Key([mod, 'shift'], "e",
lazy.spawn("lf_fm"),
desc="Open lf"),
Key([mod], "comma",
lazy.spawn("codium " + qtile_path),
desc="Open qtile config"),
Key([mod, "shift"], "comma",
lazy.spawn("dmconf"),
desc="Open dmconf"),
# Media Keys
Key([mod, "control"], "Up",
lazy.spawn("pactl set-sink-volume @DEFAULT_SINK@ +10%"),
desc="Raise volume"),
Key([mod, "control"], "Down",
lazy.spawn("pactl set-sink-volume @DEFAULT_SINK@ -10%"),
desc="lower volume"),
Key([], "XF86AudioPlay",
lazy.spawn("playerctl play-pause"),
desc="Play/Pause"),
Key([], "XF86AudioNext",
lazy.spawn("playerctl next"),
desc="Next track"),
Key([], "XF86AudioPrev",
lazy.spawn("playerctl previous"),
desc="Previous track"),
]
groups = [Group(i) for i in "123456789"]
for i in groups:
keys.extend([
# mod1 + letter of group = switch to group
Key([mod], i.name, lazy.group[i.name].toscreen(),
desc="Switch to group {}".format(i.name)),
# mod1 + shift + letter of group = switch to & move focused window to group
Key([mod, "control"], i.name, lazy.window.togroup(i.name, switch_group=True),
desc="Switch to & move focused window to group {}".format(i.name)),
# Or, use below if you prefer not to switch to that group.
# mod1 + shift + letter of group = move focused window to group
Key([mod, "shift"], i.name, lazy.window.togroup(i.name),
desc="move focused window to group {}".format(i.name)),
])
layouts = [
layout.MonadTall(margin=5, ratio=.55, new_client_position='bottom'),
# layout.Max(),
# layout.Floating()
# layout.Columns(num_columns=2, insert_position=1, margin=5),
# Try more layouts by unleashing below layouts.
# layout.Columns(border_focus_stack='#d75f5f'),
# layout.Stack(num_stacks=2),
# layout.Bsp(),
# layout.Matrix(),
# layout.MonadWide(),
# layout.RatioTile(),
# layout.TreeTab(),
# layout.VerticalTile(),
# layout.Zoomy(),
]
widget_defaults = dict(
font='Monospace',
fontsize=12,
padding=3,
)
extension_defaults = widget_defaults.copy()
screens = [
Screen(
top=bar.Bar(
[
widget.GroupBox(),
widget.Prompt(),
# widget.WindowName(),
widget.WindowTabs(),
widget.Systray(),
widget.Clock(format='%Y-%m-%d %a %I:%M %p'),
widget.CurrentLayout(),
],
24,
),
),
]
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position(),
start=lazy.window.get_position()),
Drag([mod, "control"], "Button1", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
main = None # WARNING: this is deprecated and will be removed soon
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(float_rules=[
# Run the utility of `xprop` to see the wm class and name of an X client.
*layout.Floating.default_float_rules,
Match(wm_class='confirmreset'), # gitk
Match(wm_class='makebranch'), # gitk
Match(wm_class='maketag'), # gitk
Match(wm_class='ssh-askpass'), # ssh-askpass
Match(title='branchdialog'), # gitk
Match(title='pinentry'), # GPG key password entry
Match(wm_class='Albert'),
Match(wm_class='copyq'),
])
auto_fullscreen = True
focus_on_window_activation = "smart"
@hook.subscribe.startup_once
def autostart():
autostart_script = os.path.expanduser('~/.config/qtile/autostart.sh')
subprocess.call([autostart_script])
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, GitHub issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = "LG3D"
|
AhmedHalim96/dotfiles
|
.config/qtile/config.py
|
config.py
|
py
| 7,199 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10561942242
|
# scrip for generation of charging points
#############################################################
import random
rng = random.Random()
import pandas as pd
import sys
import os
#############################################################
def eucl_dist(x1,y1,x2,y2):
return ( (x1-x2)**2 + (y1-y2)**2 )**0.5
#############################################################
def usage():
"""
Explain correct script call
"""
print("Please use the following command: <Python-Interpreter> instance_generator.py <Solomon instance folder> <number of service stations per quadrant> <charging time> <battery capacity> <Output folder>")
return None
def read_file(filename):
"""
Collect all data from a given file
with read_csv-method of pandas
Input:
path to Solomon file
Output:
list of depot and customer data
"""
result = []
data = pd.read_csv(filename, header = 5)
datalist = data.values.tolist()
for element in datalist:
result.append(element[0].split())
return result
def generate_coordinates_r(count):
"""
generate and return a given number (count)
of coordinates for service stations
randomly distributed in a 50 units radius
"""
result = []
while(len(result) < count):
x = rng.randint(0,100)
y = rng.randint(0,100)
if (eucl_dist(x,y,50,50) < 50):
check = True
for elem in result:
if (eucl_dist(x,y, elem[0], elem[1]) < 10):
check = False
if check:
result.append( (x,y) )
return result
def generate_coordinates_c(count):
"""
generate and return a given number (count)
of coordinates for service stations
distributed around congestion centers
NOTE: count should be divisible by the
length of centers
"""
result = []
centers = [(25,25), (25,75), (75,25), (75,75)]
partial_count = count//len(centers)
for center in centers:
subresult = []
while(len(subresult) < partial_count):
x = rng.randint(0,100)
y = rng.randint(0,100)
if (eucl_dist(x,y,center[0],center[1]) < 15):
subresult.append( (x,y) )
for elem in subresult:
result.append(elem)
return result
def generate_instance(name, solomon_data, service_stations, service_time, battery, capacity):
"""
compile the whole instance and
write it to name-file
NOTE: solomon data entry 0 is the depot!
"""
output = "ID\tType\tx\ty\tDemand\tReady\tDue\tService\n"
output += "D" + solomon_data[0][0] + "\td\t"
for index in range(1,7):
output += solomon_data[0][index] + "\t"
output += "\n"
output += "S" + solomon_data[0][0] + "\tf\t"
for index in range(1,6):
output += solomon_data[0][index] + "\t"
output += str(service_time) + "\n"
counter = 1
for element in service_stations:
output += "S" + str(counter) + "\tf\t" + str(element[0]) + "\t" + str(element[1]) + "\t"
for index in range(3,6):
output += solomon_data[0][index] + "\t"
output += str(service_time) + "\n"
counter += 1
for index_out in range(1,101):
output += "C" + solomon_data[index_out][0] + "\tc\t"
for index_in in range(1,7):
output += solomon_data[index_out][index_in] + "\t"
output += "\n"
output += "\n"
output += "Q battery capacity /" + str(battery) + "/ \n"
output += "C vehicle load /" + str(capacity) + "/ \n"
output += "R replenishment time /" + str(service_time) + "/ \n"
with open (name, 'w') as file_handle:
file_handle.write(output)
##############################################################
c1_num = 9
c2_num = 8
r1_num = 12
r2_num = 11
rc1_num = 8
rc2_num = 8
instance_numbers = [c1_num, c2_num, r1_num, r2_num, rc1_num, rc2_num]
c1_pre = 'c1'
c2_pre = 'c2'
r1_pre = 'r1'
r2_pre = 'r2'
rc1_pre = 'rc1'
rc2_pre = 'rc2'
prefixes = [c1_pre, c2_pre, r1_pre, r2_pre, rc1_pre, rc2_pre]
capacities = [200, 700, 200, 1000, 200, 1000]
suffix = '.txt'
##############################################################
if __name__ == '__main__':
if (len(sys.argv) != 6):
usage()
exit()
in_directory = sys.argv[1]
num_of_servicestations = 4 * int(sys.argv[2])
service_time = int(sys.argv[3])
battery = int(sys.argv[4])
out_directory = sys.argv[5]
if not os.path.exists(out_directory):
os.makedirs(out_directory)
service_stations = generate_coordinates_c(num_of_servicestations)
for index in range(6):
capacity = capacities[index]
for count in range(1,instance_numbers[index]+1):
#get input directory
input_file = in_directory + prefixes[index]
if count < 10:
input_file += "0" + str(count)
else:
input_file += str(count)
input_file += suffix
#write instance file
solomon_instance = read_file(input_file)
output_file = out_directory + prefixes[index]
if count < 10:
output_file += "0" + str(count)
else:
output_file += str(count)
output_file += "_" + str(num_of_servicestations) + suffix
generate_instance(output_file, solomon_instance, service_stations, service_time, battery, capacity)
|
SteffenPottel/td_vrptw_instancegenerator
|
src/instances/instance_generator.py
|
instance_generator.py
|
py
| 4,851 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21071659263
|
from enum import Enum
import ffmpeg
import numpy as np
import pandas as pd
import torch
from data_processing.custom_segmentation import CustomSegmentationStrategy
from data_processing.simple_segmentation import SimpleSegmentation
from data_processing.voice_activity_detection import VADSilero
class Method(Enum):
CUSTOM = "CUSTOM"
SILERO = "SILERO"
SIMPLE = "SIMPLE"
class AudioConvert:
def __init__(self, method: Method = Method.CUSTOM, use_gpu: bool = False):
self.method = method
if method == method.SILERO:
self.custom_speaker_activity_detection = VADSilero(use_gpu=use_gpu)
self.custom_segmentation = None
self.simple_segmentation = None
elif method == method.CUSTOM:
self.custom_segmentation = CustomSegmentationStrategy()
self.custom_speaker_activity_detection = None
self.simple_segmentation = None
elif method == method.SIMPLE:
self.custom_segmentation = None
self.custom_speaker_activity_detection = None
self.simple_segmentation = SimpleSegmentation()
@staticmethod
def read_file_to_np(audiofile_path: str):
out, err = (
ffmpeg
.input(audiofile_path)
.output('pipe:', format="wav", acodec="pcm_s16le", ar=16000, ac=1)
.run(capture_stdout=True)
)
numpy_array = np.frombuffer(out, dtype=np.int16)
return numpy_array
def convert_file_to_segments(self, audiofile_path: str):
audio = self.read_file_to_np(audiofile_path)
audio_tensor = torch.Tensor(audio)
if self.method == Method.CUSTOM:
vad_matrix = self.custom_speaker_activity_detection.get_VAD_matrix(audio_tensor)
self.custom_segmentation.plot_VAD(vad_matrix)
segments = self.custom_segmentation.segment(vad_matrix.numpy())
audio_segments = self.custom_speaker_activity_detection.audio_to_segments_from_stamps(audio, segments)
elif self.method == Method.SILERO:
timestamps = self.custom_speaker_activity_detection._get_speech_ts_adaptive(audio_tensor)
audio_segments = self.custom_speaker_activity_detection.audio_to_segments(audio, timestamps)
elif self.method == Method.SIMPLE:
audio_segments = self.simple_segmentation.segment(audio_tensor)
else:
raise RuntimeError()
return audio_segments
if __name__ == '__main__':
method = Method.SILERO
converter = AudioConvert(method=method, use_gpu=False)
audio_files = [
#"/media/rafje/danspeech/data_mining/unlabeled/podcasts/foelg_pengende/Foelg-pengene--Hvem-sk_5e5eee8c464747fdaab37a30a626df9b_192.mp3",
#"/media/rafje/danspeech/data_mining/unlabeled/podcasts/24_spørgsmål_til_professoren/Historier_fra_de_varme_lande.mp3",
#"/media/rafje/danspeech/data_mining/unlabeled/podcasts/danske_statsministre/Bang_Andr_f_rdigproduceret_med_intro_og_outro_online-audio-converter_com_.mp3",
#"/media/rafje/danspeech/data_mining/unlabeled/podcasts/den_agile_podcast/Podcast#3 - Agile kontra vandfald.mp3",
#"/media/rafje/danspeech/data_mining/unlabeled/podcasts/supertanker/Supertanker--USA-paa-r_2c271306def14480840af87150e5d636_192.mp3",
"/home/rafje/Downloads/Foelg-pengene--Apple--_823566a09c664d17aad77862d288473a_192.mp3"
]
audio_lenghts = []
for audio_file in audio_files:
lengths = map(lambda x: len(x[2]) / 16000, converter.convert_file_to_segments(audio_file))
audio_lenghts.append(lengths)
import matplotlib.pyplot as plt
all_lengths = []
lower_seconds = 4
upper_seconds = 15
under_seconds = []
between = []
over_seconds = []
for i in range(len(audio_lenghts)):
current_lengths = list(audio_lenghts[i])
all_lengths += current_lengths
df = pd.DataFrame(current_lengths, columns=['one'])
ax = df.plot.hist(bins=20, alpha=0.5)
plt.show()
for audio_length in current_lengths:
if audio_length < lower_seconds:
under_seconds.append(audio_length)
if audio_length > upper_seconds:
over_seconds.append(audio_length)
else:
between.append(audio_length)
df = pd.DataFrame(all_lengths, columns=['Audio lengths'])
ax = df.plot.hist(bins=20, alpha=0.5)
plt.show()
print(f"Length under: {len(under_seconds)}")
print(f"Length over: {len(over_seconds)}")
print(f"Length between: {len(between)}")
print(f"total length: {len(under_seconds) + len(over_seconds) + len(between)}")
print(f"Length under seconds: {sum(under_seconds)}")
print(f"Length over seconds: {sum(over_seconds)}")
print(f"Length between seconds: {sum(between)}")
print(f"total length seconds: {sum(under_seconds) + sum(over_seconds) + sum(between)}")
|
centre-for-humanities-computing/Gjallarhorn
|
data_processing/convert_audiofile_to_segments.py
|
convert_audiofile_to_segments.py
|
py
| 4,941 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18015910724
|
import os
import numpy as np
import matplotlib.pyplot as plt
import cv2
# Import PyWavelets library
import pywt
import pywt.data
# Load an example image
path = os.path.dirname(__file__)
image_path = "image.jpg"
original_image = cv2.imread(os.path.join(path, image_path), cv2.IMREAD_GRAYSCALE)
# Perform 2D wavelet transform (MRA) on the original image
''' The output is a tuple with 4 elements: LL, (LH, HL, HH)
LL = Approximation, LH = Horizontal detail, HL = Vertical detail, HH = Diagonal detail
"haar" is the name of the wavelet used '''
coeffs2 = pywt.dwt2(original_image, 'haar')
LL, (LH, HL, HH) = coeffs2
# Define meta information (for example, a watermark)
'''Random meta-information is generated using NumPy's
np.random.randint function. The meta_info variable
contains random integer values between 0 and 127.
The goal is to embed this meta-information into the
approximation component (LL) of the wavelet-transformed image.'''
meta_info = np.random.randint(0, 128, size=LL.shape) # Ensure meta_info has the same dimensions as LL
# Resize meta_info to match the shape of LL
meta_info_resized = cv2.resize(meta_info, (LL.shape[1], LL.shape[0]))
# Exchange the LL (approximation) coefficients with meta information
LL_with_meta_info = LL + meta_info_resized
# Reconstruct the image using the modified coefficients
'''The modified coefficients, including LL_with_meta_info,
LH, HL, and HH, are used to reconstruct the modified image
using the inverse wavelet transform with the 'haar' wavelet.
The reconstructed image is stored in the modified_image variable.'''
modified_image = pywt.idwt2((LL_with_meta_info, (LH, HL, HH)), 'haar')
# Plot the original and modified images
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.imshow(original_image, cmap='gray')
plt.title('Original Image')
plt.axis('off')
plt.subplot(1, 2, 2)
plt.imshow(modified_image, cmap='gray')
plt.title('Modified Image with Meta Information')
plt.axis('off')
plt.tight_layout()
plt.show()
|
kio7/smart_tech
|
Submission 2/Task_4/wavelet_transform.py
|
wavelet_transform.py
|
py
| 1,989 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38265334911
|
"""
https://www.jianshu.com/p/892ebd063ad9
https://svn.python.org/projects/python/trunk/Objects/listsort.txt
https://hg.python.org/cpython/file/5c1bacba828d/Objects/listobject.c
https://www.infopulse.com/blog/timsort-sorting-algorithm/
https://github.com/RonTang/SimpleTimsort/blob/master/SimpleTimsort.py
是归并排序法和插入排序法的结合
"""
class timsort:
def __init__(self,a):
self.a=a;
a_len=len(a)
self.minRun=32
self.minRun=self.dyminRun(a_len)
def dyMinRun(self,n):
# becomes 1 if the least significant bits contain at least one off bit
r=0
while n>=64:
r|=n&1
n>>=1
return n+r
def rangeCheck(self,len,fromIdx,toIdx):
if fromIdx>toIdx:
raise Exception("fromdx>toIdx")
if fromIdx<0:
raise Exception("fromIdx<0")
if toIdx>len:
raise Exception("toIdx>len")
def sort(self):
self.sort(self.a,0,len(self.a))
def sort(self,array,lo,hi):
self.rangeCheck(len(array),lo,hi)
nRemaining=hi-lo
if nRemaining<2:
return
# 小于MIN_MERGE长度的数组就不用归并排序了
if nRemaining<self.minRun:
pass
|
wangbl11/yirobot
|
a7m/sort/timsort.py
|
timsort.py
|
py
| 1,266 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31963305131
|
import sys, math
number_list=list(range(0,2*123456+1))
root_number = int(math.sqrt(123456*2))
for i in range(2,root_number+1):
if number_list[i]==0:
continue
target = i+i
while target <= 2*123456:
number_list[target] = 0
target +=i
while True:
N = int(sys.stdin.readline())
if N == 0 :
break
cnt=0
new_number = number_list[N+1:2*N+1]
for i in new_number:
if i != 0:
if i == 1:
pass
cnt +=1
print(cnt)
|
yongwoo-jeong/Algorithm
|
백준/Silver/4948. 베르트랑 공준/베르트랑 공준.py
|
베르트랑 공준.py
|
py
| 465 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7354238248
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from kouzi_crawler.items import KouziCrawlerItem
class QzkeySpider(CrawlSpider):
name = 'qzkey'
allowed_domains = ['qzkey.com']
start_urls = ['http://mimi1688.aly611.qzkey.com/']
rules = (
Rule(LinkExtractor(allow=r'Product.aspx\?typeid=\d+'), callback='parse_item', follow=True),
)
def parse_item(self, response):
app_list = response.xpath('//dl[@class="cpDl2"]/dd/ul//li')
kouzi_name = '有鱼汇'
kouzi_link = response.url
kouzi_type = 'web'
for item in app_list:
app_item = KouziCrawlerItem()
app_item['app_name'] = item.xpath('./a//dd//h3/text()').extract_first().strip()
app_item['app_link'] = item.xpath('./a/@href').extract_first()
app_item['kouzi_type'] = kouzi_type
app_item['kouzi_name'] = kouzi_name
app_item['kouzi_link'] = kouzi_link
yield app_item
|
largerbigsuper/kouzi_crawler
|
kouzi_crawler/spiders/qzkey.py
|
qzkey.py
|
py
| 1,054 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42543021370
|
# Initial code written by Stevo.
"""Wraps BGT's timer object in Python.
"""
import time
class TimerException(Exception):
"""Raised when an error occurs.
Currently does nothing when raised.
"""
pass
class Timer:
"""Timer object."""
def __init__(self):
"""Initializes the object."""
self.start_time: int = get_current_ms()
self.running: bool = True
self.current_time: int = 0
def restart(self):
"""Restarts the timer backed to 0."""
self.start_time = get_current_ms()
self.running = True
self.current_time = 0
def pause(self):
"""Pauses the timer, but does not reset it."""
self.running = False
self.current_time = get_current_ms() - self.start_time
def resume(self):
"""Resumes the timer (if it was paused)."""
self.running = True
self.start_time = get_current_ms() - self.current_time
self.current_time = 0
def get_elapsed(self):
"""Property function for elapsed."""
if not self.running:
return int(self.current_time)
else:
return int(get_current_ms() - self.start_time)
def set_elapsed(self, elapsed):
"""Property function to set elapsed."""
if elapsed < 0:
raise TimerException("This value must be at least 0")
self.starttime = get_current_ms() - elapsed
if not self.running:
self.current_time = elapsed
elapsed: property = property(get_elapsed, set_elapsed)
def get_current_ms():
"""Returns the current time in the propper format for the timer."""
c_time: int = time.time()
c_time *= 1000
return c_time
|
trypolis464/ag_py
|
agpy/timer.py
|
timer.py
|
py
| 1,710 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70063372668
|
from pwn import *
from LibcSearcher import *
context.log_level = 'debug'
# p=process('./babyconact')
p=remote('t.ctf.qwq.cc',49512)
pause()
elf=ELF('./babyconact')
infos=0x4036E0
backdoor=0x0000000000401722
def show():
p.recvuntil(b'option> ')
p.sendline(b'1')
def create(name,val):
p.recvuntil(b'option> ')
p.sendline(b'2')
p.recvuntil(b'Input contact name:\n')
p.sendline(name)
p.recvuntil(b'Input contact phone-number:\n')
p.sendline(val)
def delete(index):
p.recvuntil(b'option> ')
p.sendline(b'3')
p.sendline(str(index))
def edit(index,name,val):
p.recvuntil(b'option> ')
p.sendline(b'4')
p.recvuntil(b'Input contact index:\n')
p.sendline(str(index))
p.recvuntil(b'Input contact name:\n')
p.sendline(name)
p.recvuntil(b'Input contact phone-number:\n')
p.sendline(val)
for i in range(10):
create(b'aaaa',b'bbbb')
delete(0)
payload1=b'\x56\x10\x40'
payload2=p64(backdoor)+p64(backdoor)
edit(-2,payload1,payload2)
p.interactive()
|
CookedMelon/mypwn
|
NPU/babyconact/exp.py
|
exp.py
|
py
| 1,060 |
python
|
fr
|
code
| 3 |
github-code
|
6
|
74280993467
|
import os
import sys
import threading
import asyncio
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
import discord
client = None
channel = None
ready = False
def init():
global client
global channel
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
# discord.utils.get(channels.guild.channels, name="")
@client.event
async def on_ready():
global ready
ready = True
print(f"We have logged in as {client.user}")
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('$hello'):
await message.channel.send('Hello!')
def start(token):
threading.Thread(target=client.run, args=(token,)).start()
def send_message(channel_id, text, files=[]):
channel = client.get_channel(channel_id)
if channel == None:
print("no such channel")
return
client.loop.create_task(channel.send(text, files=[discord.File(p) for p in files]))
def stop():
client.loop.create_task(client.close())
|
mojyack/rpi-cat-monitor
|
remote.py
|
remote.py
|
py
| 1,161 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11044907424
|
import tkinter
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
from UI import helper_functions as hf
from operations import globalVars
from UI.PCCLI import PCCli
class PCCanvasObject(object):
def __init__(self, canvas, block_name, icons, class_object, master, time_class, load=False):
self._x = None
self._y = None
self.canvas = canvas
self.block_name = block_name
self.class_object = class_object
self.class_object.set_canvas_object(self)
self.master = master
self.icons = icons
self.internal_clock = time_class
self.internal_clock.add_pc(self)
self.class_object.set_internal_clock(self.internal_clock)
# Cursor Location when object is created
x = self.canvas.canvasx(self.canvas.winfo_pointerx() - self.canvas.winfo_rootx())
y = self.canvas.canvasy(self.canvas.winfo_pointery() - self.canvas.winfo_rooty())
# Cursor Location when object is created
# Icon Stuff
self.icon = self.icons[0]
self.config_icon = self.icons[1]
self.terminal_icon = self.icons[2]
self.ethernet_del_icon = self.icons[3]
self.x_node_icon = self.icons[4]
# Assigned to canvas_object to allow to delete
self.canvas_object = self.canvas.create_image(x, y, image=self.icon, tags=(self.block_name, "PC", "Node"))
self.canvas.photo = self.icon
# Icon Stuff
# Hover menu Stuff
self.hover_area = self.canvas.create_polygon(x - 50, y - 50, x + 45, y - 50, x + 45, y - 75, x + 95, y - 75,
x + 95, y + 75, x + 45, y + 75, x + 45, y + 50, x - 50, y + 50,
fill="")
self.menu_buttons = self.canvas.create_polygon(x + 40, y + 0, x + 50, y - 5, x + 50, y - 72, x + 92, y - 72,
x + 92, y + 72, x + 50, y + 72, x + 50, y + 5,
outline="black", fill="NavajoWhite2", width=1,
tags=('Hover_Menus', ))
self.canvas.itemconfigure(self.menu_buttons, state='hidden')
self.config_button = tk.Button(self.canvas, width=25, height=25, image=self.config_icon)
self.terminal_button = tk.Button(self.canvas, width=25, height=25, image=self.terminal_icon)
self.disconnect_button = tk.Button(self.canvas, width=25, height=25, image=self.ethernet_del_icon)
self.delete_button = tk.Button(self.canvas, width=25, height=25, image=self.x_node_icon)
self.config_button.config(background='gray75', foreground="white", relief=tk.GROOVE)
self.terminal_button.config(background='gray75', foreground="white", relief=tk.GROOVE)
self.disconnect_button.config(background='gray75', foreground="white", relief=tk.GROOVE)
self.delete_button.config(background='gray75', foreground="white", relief=tk.GROOVE)
self.config_button_window = self.canvas.create_window(x + 57, y - 65, window=self.config_button, state='hidden', tag="Menu_Button")
self.terminal_button_window = self.canvas.create_window(x + 57, y - 31, window=self.terminal_button, state='hidden', tag="Menu_Button")
self.disconnect_button_window = self.canvas.create_window(x + 57, y + 3, window=self.disconnect_button, state='hidden', tag="Menu_Button")
self.delete_button_window = self.canvas.create_window(x + 57, y + 37, window=self.delete_button, state='hidden', tag="Menu_Button")
# Hover menu Stuff
# Button Bindings
if not load:
self.canvas.tag_bind(self.block_name, '<Motion>', self.motion) # When creating the object
self.canvas.tag_bind(self.block_name, '<Button-1>', self.motion) # When creating the object
self.canvas.tag_bind(self.block_name, '<B1-Motion>', self.motion) # When moving the object after it is created
self.canvas.tag_bind(self.block_name, '<ButtonRelease-1>',
self.button_release) # When moving the object after it is created
# Button Bindings
# Config Window Stuff
self.config_window = None
self.ipv4_field = None # To set focus
self.hostname_field = None # To set focus
self.gateway_field = None
self.prefix_field = None
self.ipv6_field = None
self.ipv6_link_local_field = None
self.ipv6_link_local_prefix_field = None
self.netmask_field = None
self.auto_config_nic = tk.BooleanVar() # DHCP
# Config Window Stuff
# CLI Stuff
self.cli_window = None
self.cli = None
self.cli_object = None
self.cli_busy = False
self.cli_text = "PC> "
self.created_terminal = False
self.command_history = []
self.command_history_index = -1
# CLI Stuff
# Light Stuff
self.line_connections = {}
self.tag_1 = ""
self.tag_2 = ""
self.interface_1 = None
self.interface_2 = None
self.l1 = None
self.l2 = None
# Light Stuff
def motion(self, event=None):
if not event:
event_x = self.canvas.coords(self.block_name)[0] + 0.000005
event_y = self.canvas.coords(self.block_name)[1] + 0.000005
else:
event_x = self.canvas.canvasx(event.x)
event_y = self.canvas.canvasy(event.y)
# Hide the menu
self.unbind_menu_temporarily()
# Move the object
self.canvas.coords(self.block_name, event_x, event_y)
# Move the hover area and menu buttons
self.canvas.coords(self.hover_area, event_x - 50, event_y - 50,
event_x + 45, event_y - 50,
event_x + 45, event_y - 75,
event_x + 95, event_y - 75,
event_x + 95, event_y + 75,
event_x + 45, event_y + 75,
event_x + 45, event_y + 50,
event_x - 50, event_y + 50)
self.canvas.coords(self.menu_buttons, event_x + 40, event_y,
event_x + 50, event_y - 5,
event_x + 50, event_y - 72,
event_x + 92, event_y - 72,
event_x + 92, event_y + 72,
event_x + 50, event_y + 72,
event_x + 50, event_y + 5)
try:
line = self.canvas.find_withtag(self.tag_1 + "_line_" + self.tag_2 + "_0")
light_1 = self.canvas.find_withtag(self.tag_1 + "_light_" + self.tag_2 + "_0")
light_2 = self.canvas.find_withtag(self.tag_2 + "_light_" + self.tag_1 + "_0")
if line:
self.canvas.delete(light_1)
self.l1 = hf.draw_circle(self.canvas.coords(line)[0], self.canvas.coords(line)[1],
self.canvas.coords(line)[2],
self.canvas.coords(line)[3], 4, self.canvas,
self.tag_1 + "_light_" + self.tag_2 + "_0")
self.canvas.delete(light_2)
self.l2 = hf.draw_circle(self.canvas.coords(line)[2], self.canvas.coords(line)[3],
self.canvas.coords(line)[0],
self.canvas.coords(line)[1], 4, self.canvas,
self.tag_2 + "_light_" + self.tag_1 + "_0")
self.canvas.tag_lower(self.l1, list(self.line_connections.keys())[0].get_obj_1().get_canvas_object())
self.canvas.tag_lower(self.l2, list(self.line_connections.keys())[0].get_obj_2().get_canvas_object())
[self.canvas.tag_raise(self.menu_buttons, light) for light in self.canvas.find_withtag('light')]
if ((globalVars.show_link_lights and globalVars.light_state) or
(not globalVars.show_link_lights and globalVars.light_state)):
self.canvas.itemconfig(self.l1, state='normal')
self.canvas.itemconfig(self.l2, state='normal')
elif ((globalVars.show_link_lights and not globalVars.light_state) or
(not globalVars.show_link_lights and not globalVars.light_state)):
self.canvas.itemconfig(self.l1, state='hidden')
self.canvas.itemconfig(self.l2, state='hidden')
if 0 <= abs(event_x - self.canvas.coords(line)[0]) <= 30 and 0 <= abs(
event_y - self.canvas.coords(line)[1]) <= 30:
self.canvas.coords(line, event_x, event_y,
self.canvas.coords(line)[2], self.canvas.coords(line)[3])
self.canvas.itemconfig(self.l1, fill=hf.get_color_from_op(self.interface_1.get_is_operational()))
self.canvas.itemconfig(self.l2, fill=hf.get_color_from_op(self.interface_2.get_is_operational()))
elif 0 <= abs(event_x - self.canvas.coords(line)[2]) <= 30 and 0 <= abs(
event_y - self.canvas.coords(line)[3]) <= 30:
self.canvas.coords(line, self.canvas.coords(line)[0], self.canvas.coords(line)[1],
event_x, event_y)
self.canvas.itemconfig(self.l2, fill=hf.get_color_from_op(self.interface_1.get_is_operational()))
self.canvas.itemconfig(self.l1, fill=hf.get_color_from_op(self.interface_2.get_is_operational()))
except StopIteration:
pass
self._x = event_x
self._y = event_y
globalVars.prompt_save = True
return
def button_release(self, event):
self.canvas.tag_unbind(self.block_name, "<Motion>")
self.canvas.tag_unbind(self.block_name, "<Button-1>")
# For the object menu
self.canvas.tag_bind(self.hover_area, '<Enter>', self.on_start_hover)
self.canvas.tag_bind(self.hover_area, '<Leave>', self.on_end_hover)
self.canvas.tag_bind(self.block_name, '<Enter>', self.on_start_hover)
self.canvas.tag_bind(self.block_name, '<Leave>', self.on_end_hover)
self.canvas.tag_bind(self.menu_buttons, '<Enter>', self.on_start_hover)
self.config_button.bind('<Enter>', self.config_button_bg_enter)
self.config_button.bind('<Leave>', self.config_button_bg_leave)
self.config_button.bind('<Button-1>', self.open_config_menu)
self.terminal_button.bind('<Enter>', self.terminal_button_bg_enter)
self.terminal_button.bind('<Leave>', self.terminal_button_bg_leave)
self.terminal_button.bind('<Button-1>', self.menu_pc_cli)
self.disconnect_button.bind('<Enter>', self.disconnect_button_bg_enter)
self.disconnect_button.bind('<Leave>', self.disconnect_button_bg_leave)
self.disconnect_button.bind('<Button-1>', self.disconnect_cable)
self.delete_button.bind('<Enter>', self.delete_button_bg_enter)
self.delete_button.bind('<Leave>', self.delete_button_bg_leave)
self.delete_button.bind('<Button-1>', lambda e, q=False: self.menu_delete(e, q))
if event:
self.on_start_hover(event)
def hide_menu(self, on_delete=False):
self.canvas.itemconfigure(self.menu_buttons, state='hidden')
self.canvas.itemconfigure(self.config_button_window, state='hidden')
self.canvas.itemconfigure(self.terminal_button_window, state='hidden')
self.canvas.itemconfigure(self.disconnect_button_window, state='hidden')
self.canvas.itemconfigure(self.delete_button_window, state='hidden')
self.config_button.place_forget()
self.terminal_button.place_forget()
self.disconnect_button.place_forget()
self.delete_button.place_forget()
if on_delete:
self.config_button.destroy()
self.terminal_button.destroy()
self.disconnect_button.destroy()
self.delete_button.destroy()
def unbind_menu_temporarily(self):
self.canvas.tag_unbind(self.hover_area, '<Enter>')
self.canvas.tag_unbind(self.hover_area, '<Leave>')
self.canvas.tag_unbind(self.block_name, '<Enter>')
self.canvas.tag_unbind(self.block_name, '<Leave>')
self.canvas.tag_unbind(self.menu_buttons, '<Enter>')
self.canvas.tag_unbind(self.menu_buttons, '<Leave>')
self.config_button.unbind('<Enter>')
self.terminal_button.unbind('<Enter>')
self.disconnect_button.unbind('<Enter>')
self.delete_button.unbind('<Enter>')
# Hide menu
self.hide_menu()
def open_config_menu(self, event):
def hide_window():
self.config_window.withdraw()
self.config_window = tk.Toplevel(self.canvas)
self.config_window.protocol('WM_DELETE_WINDOW', hide_window)
x = (globalVars.screen_width / 2) - (700 / 2)
y = (globalVars.screen_height / 2) - (375 / 2) - 100
self.config_window.geometry('%dx%d+%d+%d' % (700, 375, x, y))
self.config_window.wm_iconphoto(False, self.icons[1])
self.config_window.wm_title("Configure PC")
self.config_window.resizable(False, False)
configure_menu = ttk.Notebook(self.config_window)
general_tab = ttk.Frame(configure_menu)
interface_tab = ttk.Frame(configure_menu)
configure_menu.bind("<<NotebookTabChanged>>", lambda e=event: self.set_focus_on_tab_change(e))
configure_menu.add(general_tab, text='General Configuration')
configure_menu.add(interface_tab, text='Interface Configuration')
configure_menu.pack(expand=1, fill="both")
# General Tab
tk.Label(general_tab, text="Hostname:").place(x=50, y=75)
self.hostname_field = tk.Entry(general_tab, width=20)
self.hostname_field.insert(0, self.class_object.get_host_name())
self.hostname_field.place(x=150, y=75)
tk.Label(general_tab, text="MAC Address:").place(x=50, y=150)
mac_address = tk.Entry(general_tab, width=20)
mac_address.insert(0, self.class_object.get_mac_address())
mac_address.place(x=150, y=150)
# General Tab
# Interface Tab
ask_b4_quick_del_check = tk.Checkbutton(interface_tab, text='Auto-configure interface settings '
'(Requires a DHCP server)',
variable=self.auto_config_nic, onvalue=True, offvalue=False,
command=self.set_auto_configure)
ask_b4_quick_del_check.place(x=50, y=25)
tk.Label(interface_tab, text="IPv4 Address:").place(x=50, y=75)
self.ipv4_field = tk.Entry(interface_tab, width=20)
self.ipv4_field.insert(0, self.class_object.get_ipv4_address())
self.ipv4_field.place(x=150, y=75)
tk.Label(interface_tab, text="Subnet Mask:").place(x=335, y=75)
self.netmask_field = tk.Entry(interface_tab, width=20)
self.netmask_field.insert(0, self.class_object.get_netmask())
self.netmask_field.place(x=435, y=75)
tk.Label(interface_tab, text="Default Gateway:").place(x=50, y=125)
self.gateway_field = tk.Entry(interface_tab, width=20)
self.gateway_field.insert(0, self.class_object.get_default_gateway())
self.gateway_field.place(x=150, y=125)
tk.Label(interface_tab, text="IPv6 Address:").place(x=50, y=175)
self.ipv6_field = tk.Entry(interface_tab, width=54)
self.ipv6_field.insert(0, self.class_object.get_ipv6_address())
self.ipv6_field.place(x=53, y=195)
tk.Label(interface_tab, text="/").place(x=380, y=195)
self.prefix_field = tk.Entry(interface_tab, width=3)
self.prefix_field.insert(0, self.class_object.get_prefix())
self.prefix_field.place(x=390, y=195)
tk.Label(interface_tab, text="IPv6 Link Local Address:").place(x=50, y=245)
self.ipv6_link_local_field = tk.Entry(interface_tab, width=54)
self.ipv6_link_local_field.insert(0, self.class_object.get_ipv6_link_local_address())
self.ipv6_link_local_field.place(x=53, y=265)
tk.Label(interface_tab, text="/").place(x=380, y=265)
self.ipv6_link_local_prefix_field = tk.Entry(interface_tab, width=3)
self.ipv6_link_local_prefix_field.insert(0, self.class_object.get_link_local_prefix())
self.ipv6_link_local_prefix_field.place(x=390, y=265)
# Interface Tab
# Save Button
save_btn = tk.Button(configure_menu, width=10, height=1, text="Save", relief=tk.GROOVE,
command=lambda: self.save_general_parameters(self.hostname_field.get(), mac_address.get(),
self.ipv4_field.get(),
self.netmask_field.get(),
self.gateway_field.get(),
self.ipv6_field.get(),
self.prefix_field.get(),
self.ipv6_link_local_field.get(),
self.ipv6_link_local_prefix_field.get(),
self.config_window))
save_btn.place(x=590, y=325)
save_btn.bind('<Enter>', lambda e, btn=save_btn: hf.button_enter(e, btn))
save_btn.bind('<Leave>', lambda e, btn=save_btn: hf.button_leave(e, btn))
# Save Button
self.toggle_config_fields()
self.hide_menu()
def set_auto_configure(self):
self.class_object.set_auto_configure(self.auto_config_nic.get())
self.toggle_config_fields()
def set_fields_from_dhcp(self, ipv4_address, netmask, default_gateway):
if not ipv4_address:
ipv4_address = ''
if not netmask:
netmask = ''
if not default_gateway:
default_gateway = ''
try:
# Empty fields
self.ipv4_field.delete('0', tk.END)
self.netmask_field.delete('0', tk.END)
self.gateway_field.delete('0', tk.END)
self.ipv4_field.insert(tk.END, ipv4_address)
self.netmask_field.insert(tk.END, netmask)
self.gateway_field.insert(tk.END, default_gateway)
except (AttributeError, tk.TclError):
pass
def toggle_config_fields(self):
if self.auto_config_nic.get():
self.ipv4_field.config(state= "disabled")
self.netmask_field.config(state= "disabled")
self.gateway_field.config(state= "disabled")
# self.ipv6_field.config(state= "disabled")
# self.prefix_field.config(state= "disabled")
else:
self.ipv4_field.config(state="normal")
self.netmask_field.config(state="normal")
self.gateway_field.config(state="normal")
# self.ipv6_field.config(state="normal")
# self.prefix_field.config(state="normal")
def set_focus_on_tab_change(self, event):
if event.widget.select() == '.!canvas.!toplevel.!notebook.!frame':
self.hostname_field.focus_set()
else:
self.ipv4_field.focus_set()
def disconnect_cable(self, event):
try:
cable = self.class_object.get_interfaces()[0].get_canvas_cable()
cable.delete_canvas_cable()
except (tk.TclError, AttributeError):
pass
self.hide_menu()
globalVars.prompt_save = True
# Disable the hover area when disconnect cable is clicked because mouse lands on the hover area causing the menu
# to reappear instantly. It is re-enabled in self.on_end_hover()
self.canvas.itemconfigure(self.hover_area, state="hidden")
def menu_delete(self, event, is_quick_del, reset=False):
if ((not is_quick_del and globalVars.ask_before_delete) or (is_quick_del and globalVars.ask_before_quick_delete)
and not reset):
answer = messagebox.askokcancel("Delete PC", "Delete this PC?")
else:
answer = True
if answer:
self.disconnect_cable(event)
self.internal_clock.remove_pc(self)
if not is_quick_del:
globalVars.pc_objects.remove(self)
globalVars.objects.remove(self)
self.canvas.delete(self.canvas_object)
self.canvas.delete(self.hover_area)
self.canvas.delete(self.menu_buttons)
self.class_object = None
# Destroy windows when deleting node
if self.cli_window:
self.cli_window.destroy()
if self.config_window:
self.config_window.destroy()
# In case, remove all tooltips
[self.canvas.delete(i) for i in self.canvas.find_withtag("Config_Tooltip")]
[self.canvas.delete(i) for i in self.canvas.find_withtag("Terminal_Tooltip")]
[self.canvas.delete(i) for i in self.canvas.find_withtag("Disconnect_Tooltip")]
[self.canvas.delete(i) for i in self.canvas.find_withtag("Delete_Tooltip")]
self.hide_menu(on_delete=True)
else:
self.hide_menu()
globalVars.prompt_save = True
def save_general_parameters(self, hostname, mac_address, ipv4, netmask, default_route, ipv6, ipv6_prefix,
ipv6_ll, ipv6_ll_prefix, parent):
hostname_flag = True
ipv4_flag = True
netmask_flag = True
default_route_flag = True
ipv6_flag = True
ipv6_prefix_flag = True
ipv6_ll_flag = True
ipv6_ll_prefix_flag = True
# must have a hostname
if not hostname:
hostname_flag = False
# check mac address
mac_address_flag = hf.check_mac_address(mac_address)
# following fields are optional
if ipv4:
ipv4_flag = hf.check_ipv4(ipv4)
netmask_flag = hf.check_subnet_mask(netmask)
if default_route:
default_route_flag = hf.check_ipv4(default_route)
if ipv6:
ipv6_flag = hf.check_ipv6(ipv6)
ipv6_prefix_flag = hf.check_ipv6_prefix(ipv6_prefix)
if ipv6_ll:
ipv6_ll_flag = hf.check_ipv6(ipv6_ll)
ipv6_ll_prefix_flag = hf.check_ipv6_prefix(ipv6_ll_prefix)
if (hostname_flag and mac_address_flag and ipv4_flag and ipv6_flag and netmask_flag and default_route_flag and
ipv6_flag and ipv6_prefix_flag and ipv6_ll_flag and ipv6_ll_prefix_flag):
self.class_object.set_host_name(hostname)
self.class_object.set_mac_address(mac_address)
self.class_object.set_ipv4_address(ipv4)
self.class_object.set_netmask(netmask)
self.class_object.set_ipv6_address(ipv6)
self.class_object.set_prefix(ipv6_prefix)
self.class_object.set_default_gateway(default_route)
self.class_object.set_ipv6_link_local_address(ipv6_ll)
self.class_object.set_ipv6_link_local_prefix(ipv6_ll_prefix)
parent.withdraw()
globalVars.prompt_save = True
else:
if not hostname_flag:
messagebox.showerror('Invalid Parameter', 'Please Enter a Hostname', parent=parent)
elif not mac_address_flag:
messagebox.showerror('Invalid Parameter', 'Please Enter a valid MAC Address', parent=parent)
elif not ipv4_flag:
messagebox.showerror('Invalid Parameter', 'Please Enter a valid IPv4 Address', parent=parent)
elif not netmask_flag:
messagebox.showerror('Invalid Parameter', 'Please Enter a valid Subnet Mask', parent=parent)
elif not default_route_flag:
messagebox.showerror('Invalid Parameter', 'Please Enter a valid Default Gateway', parent=parent)
elif not ipv6_flag:
messagebox.showerror('Invalid Parameter', 'Please Enter a valid IPv6 Address', parent=parent)
elif not ipv6_prefix_flag:
messagebox.showerror('Invalid Parameter', 'Please Enter a valid IPv6 Prefix', parent=parent)
elif not ipv6_ll_flag:
messagebox.showerror('Invalid Parameter', 'Please Enter a valid IPv6 Link Local Address', parent=parent)
elif not ipv6_ll_prefix_flag:
messagebox.showerror('Invalid Parameter', 'Please Enter a valid IPv6 Link Local Prefix', parent=parent)
def menu_pc_cli(self, main_event):
def hide_window():
self.cli_window.withdraw()
if not self.created_terminal:
self.cli_window = tk.Toplevel(self.canvas)
x = (globalVars.screen_width / 2) - (700 / 2)
y = (globalVars.screen_height / 2) - (800 / 2) - 50
self.cli_window.geometry("%dx%d+%d+%d" % (700, 800, x, y))
self.cli_window.wm_iconphoto(False, self.icons[2])
self.cli_window.wm_title("Terminal")
self.cli_window.protocol('WM_DELETE_WINDOW', hide_window)
self.cli_window.focus_set()
self.cli_object = PCCli(self, self.class_object, self.cli_window, self.cli_text, "PC> ", 'white', 'white')
self.cli = self.cli_object.get_cli()
self.created_terminal = True
else:
self.cli_window.deiconify()
self.hide_menu()
def get_class_object(self):
return self.class_object
def get_canvas_object(self):
return self.canvas_object
def get_block_name(self):
return self.block_name
def get_info(self, info, linebreak, last):
if linebreak:
self.cli.insert(tk.END, info)
self.cli.insert(tk.END, "\n")
else:
self.cli.insert(tk.END, info)
def toggle_cli_busy(self):
if not self.cli_busy:
self.cli_busy = True
self.cli.bind("<Key>", lambda e: "break")
else:
self.cli_busy = False
self.cli.unbind("<Key>")
self.cli.insert(tk.END, "\n\n" + self.class_object.get_host_name() + "> ")
def add_line_connection(self, tag1, tag2, ignored_1, ignored_2, canvas_cable_object):
self.line_connections[canvas_cable_object] = [tag1, tag2]
self.tag_1 = tag1
self.tag_2 = tag2
def del_line_connection(self, cable):
self.line_connections.pop(cable)
def get_line_connection_count(self, ignored_1, ignored_2):
if len(self.line_connections) > 0:
return len(self.line_connections), self.line_connections
else:
return 0, None
def set_interfaces(self, ignored, int1, int2):
self.interface_1 = int1
self.interface_2 = int2
def on_start_hover(self, event):
for item in globalVars.objects:
item.hide_menu()
try:
if type(self.master.focus_displayof()) == tkinter.Tk: # If the root has focus
self.canvas.itemconfigure(self.menu_buttons, state='normal') # Add the frame to the canvas
self.canvas.itemconfigure(self.config_button_window, state='normal')
self.canvas.moveto(self.config_button_window, self._x + 57, self._y - 65)
self.canvas.itemconfigure(self.terminal_button_window, state='normal')
self.canvas.moveto(self.terminal_button_window, self._x + 57, self._y - 31)
self.canvas.itemconfigure(self.disconnect_button_window, state='normal')
self.canvas.moveto(self.disconnect_button_window, self._x + 57, self._y + 3)
self.canvas.itemconfigure(self.delete_button_window, state='normal')
self.canvas.moveto(self.delete_button_window, self._x + 57, self._y + 37)
self.config_button.place()
self.terminal_button.place()
self.disconnect_button.place()
self.delete_button.place()
return
except tkinter.TclError:
pass
self.master.update() # Program hangs without calling update
def on_end_hover(self, event):
self.config_button.place_forget()
self.terminal_button.place_forget()
self.disconnect_button.place_forget()
self.delete_button.place_forget()
self.canvas.itemconfigure(self.menu_buttons, state='hidden')
self.canvas.itemconfigure(self.config_button_window, state='hidden')
self.canvas.itemconfigure(self.terminal_button_window, state='hidden')
self.canvas.itemconfigure(self.disconnect_button_window, state='hidden')
self.canvas.itemconfigure(self.delete_button_window, state='hidden')
# The hover area is disabled when a cable is disconnected because the mouse will land in the hove area and
# make the menu reappear instantly. This line re-enables it.
self.canvas.itemconfigure(self.hover_area, state="normal")
self.master.update() # Program hangs without calling update
return
def get_lights(self, ignored):
return self.l1, self.l2
def config_button_bg_enter(self, event):
self.on_start_hover(event)
self.canvas.after(600, lambda c=self.canvas, b=self.config_button, text="Configure this PC",
tag="Config_Tooltip",
p=(self._x + 57, self._y - 65): hf.create_tooltip(c, b, text, tag, p))
self.config_button.config(background='gray89', foreground="white", relief=tk.SUNKEN)
def config_button_bg_leave(self, event):
self.config_button.config(background='gray75', foreground="white", relief=tk.GROOVE)
[self.canvas.delete(i) for i in self.canvas.find_withtag("Config_Tooltip")]
def terminal_button_bg_enter(self, event):
self.on_start_hover(event)
self.canvas.after(600, lambda c=self.canvas, b=self.terminal_button, text="Open the Terminal",
tag="Terminal_Tooltip", p=(self._x + 57, self._y - 31),
offset=(1, 0): hf.create_tooltip(c, b, text, tag, p, offset))
self.terminal_button.config(background='gray89', foreground="white", relief=tk.SUNKEN)
def terminal_button_bg_leave(self, event):
self.terminal_button.config(background='gray75', foreground="white", relief=tk.GROOVE)
[self.canvas.delete(i) for i in self.canvas.find_withtag("Terminal_Tooltip")]
def disconnect_button_bg_enter(self, event):
self.on_start_hover(event)
self.canvas.after(600, lambda c=self.canvas, b=self.disconnect_button, text="Disconnect Connections",
tag="Disconnect_Tooltip", p=(self._x + 57, self._y + 3),
offset=(20, 0): hf.create_tooltip(c, b, text, tag, p, offset))
self.disconnect_button.config(background='gray89', foreground="white", relief=tk.SUNKEN)
def disconnect_button_bg_leave(self, event):
self.disconnect_button.config(background='gray75', foreground="white", relief=tk.GROOVE)
[self.canvas.delete(i) for i in self.canvas.find_withtag("Disconnect_Tooltip")]
def delete_button_bg_enter(self, event):
self.on_start_hover(event)
self.canvas.after(600, lambda c=self.canvas, b=self.delete_button, text="Delete PC", tag="Delete_Tooltip",
p=(self._x + 57, self._y + 37), offset=(-20, 0): hf.create_tooltip(c, b, text,
tag, p,
offset))
self.delete_button.config(background='gray89', foreground="white", relief=tk.SUNKEN)
def delete_button_bg_leave(self, event):
self.delete_button.config(background='gray75', foreground="white", relief=tk.GROOVE)
[self.canvas.delete(i) for i in self.canvas.find_withtag("Delete_Tooltip")]
# -------------------------- Save & Load Methods -------------------------- #
def get_save_info(self):
return [self._x, self._y, self.block_name, self.cli_text, self.command_history, self.command_history_index,
self.tag_1, self.tag_2, self.l1, self.l2, self.class_object.get_save_info()]
def set_pos(self, x_pos, y_pos):
self._x = x_pos
self._y = y_pos
self.canvas.coords(self.canvas_object, x_pos, y_pos)
# Move the hover area and menu buttons
self.canvas.coords(self.hover_area, self._x - 50, self._y - 50,
self._x + 45, self._y - 50,
self._x + 45, self._y - 75,
self._x + 95, self._y - 75,
self._x + 95, self._y + 75,
self._x + 45, self._y + 75,
self._x + 45, self._y + 50,
self._x - 50, self._y + 50)
self.canvas.coords(self.menu_buttons, self._x + 40, self._y,
self._x + 50, self._y - 5,
self._x + 50, self._y - 72,
self._x + 92, self._y - 72,
self._x + 92, self._y + 72,
self._x + 50, self._y + 72,
self._x + 50, self._y + 5)
self.button_release(None)
def get_coords(self):
return [self._x, self._y]
# -------------------------- Save & Load Methods -------------------------- #
|
KarimKabbara00/Network-Simulator
|
UI/PCCanvasObject.py
|
PCCanvasObject.py
|
py
| 34,256 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26531202521
|
from simplivity.resources.resource import ResourceBase
URL = '/hosts'
DATA_FIELD = 'hosts'
class Hosts(ResourceBase):
"""Implements features available for SimpliVity Host resources."""
def __init__(self, connection):
super(Hosts, self).__init__(connection)
def get_all(self, pagination=False, page_size=0, limit=500, offset=0,
sort=None, order='descending', filters=None, fields=None,
case_sensitive=True, show_optional_fields=False):
"""Gets all hosts.
Args:
pagination: True if need pagination
page_size: Size of the page (Required when pagination is on)
limit: A positive integer that represents the maximum number of results to return
offset: A positive integer that directs the service to start returning
the <offset value> instance, up to the limit.
sort: The name of the field where the sort occurs.
order: The sort order preference. Valid values: ascending or descending.
filters: Dictionary with filter values. Example: {'name': 'name'}
id: The unique identifier (UID) of the host
Accepts: Single value, comma-separated list
name: The name of the host
Accepts: Single value, comma-separated list, pattern using one or more
asterisk characters as a wildcard
type: The type of host
Accepts: Single value, comma-separated list, pattern using one or more
asterisk characters as a wildcard
model: The model of the host
Accepts: Single value, comma-separated list, pattern using one or more
asterisk characters as a wildcard
version: The version of the host
Accepts: Single value, comma-separated list, pattern using one or more
asterisk characters as a wildcard
hypervisor_management_system: The IP address of the Hypervisor Management System (HMS)
associated with the host
Accepts: Single value, comma-separated list, pattern using one or more asterisk
characters as a wildcard
hypervisor_management_system_name: The name of the Hypervisor Management System (HMS)
associated with the host
Accepts: Single value, comma-separated list, pattern using one or more asterisk
characters as a wildcard
hypervisor_object_id: The unique identifier (UID) of the hypervisor associated
with the host
Accepts: Single value, comma-separated list, pattern using one or more asterisk
characters as a wildcard
compute_cluster_name: The name of the compute cluster associated with the host
Accepts: Single value, comma-separated list, pattern using one or more asterisk
characters as a wildcard
compute_cluster_hypervisor_object_id: The unique identifier (UID)
of the Hypervisor Management System (HMS) for the associated compute cluster
Accepts: Single value, comma-separated list, pattern using one or more asterisk
characters as a wildcard
management_ip: The IP address of the HPE OmniStack management module that
runs on the host
Accepts: Single value, comma-separated list, pattern using one or more asterisk
characters as a wildcard
storage_ip: The IP address of the HPE OmniStack storage module that runs on the host
Accepts: Single value, comma-separated list, pattern using one or more
asterisk characters as a wildcard
federation_ip: The IP address of the federation
Accepts: Single value, comma-separated list, pattern using one or more asterisk
characters as a wildcard
virtual_controller_name: The name of the Virtual Controller that runs on the host
Accepts: Single value, comma-separated list, pattern using one or more asterisk
characters as a wildcard
compute_cluster_parent_name: The name of the hypervisor that contains the omnistack
cluster that is associated with the instance
Accepts: Single value, comma-separated list, pattern using one or more asterisk
characters as a wildcard
compute_cluster_parent_hypervisor_object_id: The unique identifier (UID) of the
hypervisor that contains the omnistack_cluster that is associated with the instance
Accepts: Single value, comma-separated list, pattern using one or more asterisk
characters as a wildcard
policy_enabled: An indicator to show the status of the backup policy for the host
Valid values:
True: The backup policy for the host is enabled.
False: The backup policy for the host is disabled.
current_feature_level_min: The minimum current feature level of the HPE OmniStack
software running on the host
current_feature_level_max: The maximum current feature level of the HPE OmniStack
software running on the host
potential_feature_level_min: The minimum potential feature level of the HPE OmniStack
software running on the host
potential_feature_level_max: The maximum potential feature level of the HPE OmniStack
software running on the host
upgrade_state: The state of the most recent HPE OmniStack software upgrade for this
host (SUCCESS, FAIL, IN_PROGRESS, NOOP, UNKNOWN)
Accepts: Single value, comma-separated list, pattern using one or more asterisk
characters as a wildcard
can_rollback: An indicator to show if the current HPE OmniStack software running on
the host can roll back to the previous version
Valid values:
True: The current HPE OmniStack software for the host can roll back to the previous version.
False: The current HPE OmniStack software for the host cannot roll back to the previous version.
Returns:
list: list of Host objects
"""
return self._client.get_all(URL,
members_field=DATA_FIELD,
pagination=pagination,
page_size=page_size,
limit=limit,
offset=offset,
sort=sort,
order=order,
filters=filters,
fields=fields,
case_sensitive=case_sensitive,
show_optional_fields=show_optional_fields)
def get_by_data(self, data):
"""Gets Host object from host data.
Args:
data: host data
Returns:
object: Host object.
"""
return Host(self._connection, self._client, data)
class Host(object):
"""Implements features available for single Host resource."""
OBJECT_TYPE = 'host'
def __init__(self, connection, resource_client, data):
self.data = data
self._connection = connection
self._client = resource_client
self._hosts = Hosts(self._connection)
def __refresh(self):
"""Updates the host data."""
resource_uri = "{}/{}".format(URL, self.data["id"])
self.data = self._client.do_get(resource_uri)[self.OBJECT_TYPE]
def reload_data(self):
self.__refresh()
def remove(self, force=False, timeout=-1):
"""Removes the specified host from the federation.
Args:
force: An indicator that specifies if the host should be removed forcefully or not.
Valid values:
True: Forces the removal of the host even if active virtual machines are
present and if the host is not HA-compliant. This may cause data loss.
False: Returns an error if there are any virtual machines on the host or if the host is not HA-compliant.
"""
http_headers = {"Content-type": 'application/vnd.simplivity.v1.9+json'}
method_url = "{}/{}/remove_from_federation".format(URL, self.data["id"])
data = {"force": force}
self._client.do_post(method_url, data, timeout, http_headers)
self.data = None
def get_hardware(self):
"""Retrieves the hardware information for the host"""
resource_uri = "{}/{}/hardware".format(URL, self.data["id"])
return self._client.do_get(resource_uri)
def get_virtual_controller_shutdown_status(self):
"""Retrieves the shutdown status of the Virtual Controller"""
resource_uri = "{}/{}/virtual_controller_shutdown_status".format(URL, self.data["id"])
status = self._client.do_get(resource_uri)
return status['shutdown_status']['status']
def shutdown_virtual_controller(self, ha_wait=True, timeout=-1):
"""Shuts down the Virtual Controller safely (by reaching HA compliance) or by force.
Args:
ha_wait: An indicator to show if the user wants to shut down the Virtual Controller safely or forcefully.
Valid values:
True: Virtual Controller waits for the virtual machines to reach HA compliance before shutting down.
False: Virtual Controller forced to shut down without waiting for HA compliance.
timeout: Time out for the request in seconds.
Returns:
status: Possible values are 'SUCCESS', 'FAILURE', 'UNKNOWN', 'IN_PROGRESS'.
"""
method_url = "{}/{}/shutdown_virtual_controller".format(URL, self.data["id"])
data = {"ha_wait": ha_wait}
status = self._client.do_post(method_url, data, timeout)
return status['shutdown_status']['status']
def cancel_virtual_controller_shutdown(self, timeout=-1):
"""Cancels the virtual controller shutdown.
Args:
timeout: Time out for the request in seconds.
Returns:
status: Possible values are 'SUCCESS', 'FAILURE', 'UNKNOWN', 'IN_PROGRESS'.
"""
resource_uri = "{}/{}/cancel_virtual_controller_shutdown".format(URL, self.data["id"])
status = self._client.do_post(resource_uri, None, timeout)
return status['cancellation_status']['status']
def get_capacity(self, fields=None, time_offset=0, range=43200, resolution="MINUTE"):
"""Gets host capacity.
Args:
fields: Comma-separated list of fields to include in the returned objects.
time_offset: A time offset in seconds (from now) or a datetime, expressed in ISO-8601 form,
based on Coordinated Universal Time (UTC).
range: A range in seconds (the duration from the specified point in time).
resolution: The resolution (SECOND, MINUTE, HOUR, or DAY).
Returns:
dict: Dictionary of the capacity details.
"""
resource_uri = "{}/{}/capacity".format(URL, self.data["id"])
filters = {'time_offset': time_offset, 'range': range, 'resolution': resolution}
if fields:
filters["fields"] = fields
return self._client.do_get(resource_uri, filters)
def get_metrics(self, time_offset=0, range=43200, resolution="MINUTE"):
"""Retrieves throughput, IOPS, and latency data for the host.
Args:
time_offset: A time offset in seconds (from now) or a datetime, expressed in ISO-8601 form,
based on Coordinated Universal Time (UTC).
range: A range in seconds (the duration from the specified point in time).
resolution: The resolution (SECOND, MINUTE, HOUR, or DAY).
Returns:
dict: Dictionary of the metrics details.
"""
resource_uri = "{}/{}/metrics".format(URL, self.data["id"])
filters = {'time_offset': time_offset, 'range': range, 'resolution': resolution}
return self._client.do_get(resource_uri, filters)
|
HewlettPackard/simplivity-python
|
simplivity/resources/hosts.py
|
hosts.py
|
py
| 12,467 |
python
|
en
|
code
| 7 |
github-code
|
6
|
18602034777
|
from django import forms
from bankapp.models import Person, City
GENDER_CHOICES = [
('Male', 'Male'),
('Female', 'Female')
]
MATERIALS_PROVIDE_CHOICE = [
('Debit Card', 'Debit Card'),
('Credit Card', 'Credit Card'),
('Check Book', 'Check Book'),
]
class PersonCreationForm(forms.ModelForm):
gender = forms.ChoiceField(choices=GENDER_CHOICES, widget=forms.RadioSelect)
materials = forms.MultipleChoiceField(label='Materials Provide', choices=MATERIALS_PROVIDE_CHOICE,
widget=forms.CheckboxSelectMultiple)
class Meta:
model = Person
fields = '__all__'
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control','placeholder':'Enter Your Name'}),
'email': forms.EmailInput(attrs={'class': 'form-control','placeholder':'Enter Your Email-ID'}),
'address': forms.TextInput(attrs={'class': 'form-control','placeholder':'Enter Your Address'}),
'age': forms.TextInput(attrs={'class': 'form-control','placeholder':'Enter Your Age'}),
'dob': forms.DateInput(attrs={'class': 'form-control','type':'date'}),
'account': forms.Select(attrs={'class': 'form-control'}),
'district': forms.Select(attrs={'class': 'form-control'}),
'city': forms.Select(attrs={'class': 'form-control'}),
'mob': forms.NumberInput(attrs={'class': 'form-control','placeholder':'Enter Your Mobile Number'}),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['city'].queryset = City.objects.none()
if 'district' in self.data:
try:
district_id = int(self.data.get('district'))
self.fields['city'].queryset = City.objects.filter(district_id=district_id).order_by('name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['city'].queryset = self.instance.district.city_set.order_by('name')
|
Manjith123/Easybankproject
|
bankapp/forms.py
|
forms.py
|
py
| 2,110 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.