seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
39956836499
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 19 14:11:57 2022
@author: Saskia Hustinx
"""
import re
import gpt_2_simple as gpt2
import tensorflow as tf
import json
import tweepy
import random
import time
### NLP SECTION ###
def words_in_string(word_list, a_string):
return set(word_list).intersection(a_string.split())
def generate_tweets(num):
texts = gpt2.generate(sess, run_name='run2', temperature=0.9, length=50, nsamples=num, return_as_list=True)
res = []
for text in texts:
match = re.findall('<|startoftext|>(.*?[\.\?!])', text)
if len(match) == 3 and len(match[2]) > 40:
if not words_in_string(filter_list, match[2]):
res.append(match[2])
return res
### TWITTER ###
# Parse the credentials for the twitter bot
with open("cred.json", "r") as json_file:
twitter_creds = json.load(json_file)
# Set the credentials based on the credentials file
API_KEY = twitter_creds['api-key']
API_SECRET = twitter_creds['api-secret']
BEARER_TOKEN = twitter_creds['bearer-token']
ACCESS_KEY = twitter_creds['access-token']
ACCESS_SECRET = twitter_creds['access-secret']
def api():
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
return tweepy.API(auth)
def fetch_dms():
# fetching the direct messages
direct_messages = api.get_direct_messages()
print(len(direct_messages))
print(direct_messages)
return direct_messages
def respond_dm(dms):
recipients = []
for dm in dms:
if dm.message_create['sender_id'] != '1577695345865367553' and dm.message_create['sender_id'] not in recipients:
recipients.append(dm.message_create['sender_id'])
res_tweets = []
while len(res_tweets) < len(recipients):
res_tweets = generate_tweets(len(recipients) + 10)
for recipient in recipients:
api.send_direct_message(recipient_id = recipient, text = str("My message for you is: \n \n"+ random.choice(res_tweets) + " ✨"))
time.sleep(5)
# main
api = api()
dms = fetch_dms()
if len(dms) > 0:
tf.config.set_visible_devices([], 'GPU')
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='run2')
# delete the outgoing dms
dms = fetch_dms()
for dm in dms:
if dm.message_create['sender_id'] == '1577695345865367553':
api.delete_direct_message(dm.id)
dms.remove(dm)
filter_list = ['victim', 'abuse', 'sex', 'planetary']
respond_dm(dms)
# delete reponded dms
for dm in dms:
api.delete_direct_message(dm.id)
|
sHustinx/nlp-fortune-cookie-bot
|
respond-dms.py
|
respond-dms.py
|
py
| 2,664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21098037404
|
# -*- coding: utf-8 -*-
from flask import Blueprint, g, request, redirect, url_for, current_app
import os
from invenio.ext.template.context_processor import \
register_template_context_processor, template_args
from invenio.base.decorators import templated
from invenio.modules.formatter import format_record
from invenio.modules.search.models import Collection
from invenio.modules.search.forms import EasySearchForm
from invenio.modules.search.views.search import collection
blueprint = Blueprint('main', __name__, url_prefix="",
template_folder='templates',
static_url_path='', # static url path has to be empty
# if url_prefix is empty
static_folder='static')
@blueprint.route('/', methods=['GET', 'POST'])
@templated('index.html')
def index():
""" Renders homepage. """
# legacy app support
c = request.values.get('c')
if c == current_app.config['CFG_SITE_NAME']:
return redirect(url_for('.index', ln=g.ln))
elif c is not None:
return redirect(url_for('.collection', name=c, ln=g.ln))
collection = Collection.query.get_or_404(1)
from invenio.b2share.modules.b2deposit.latest_deposits import get_latest_deposits
latest_deposits = get_latest_deposits()
func = current_app.config.get("CFG_SITE_FUNCTION") or ""
@register_template_context_processor
def index_context():
return dict(
of=request.values.get('of', collection.formatoptions[0]['code']),
easy_search_form=EasySearchForm(csrf_enabled=False),
format_record=format_record,
)
return dict(collection=collection,latest_deposits=latest_deposits, pagetitle="EUDAT B2SHARE",site_function=func)
# list all domain logos in this module's static assets folder
domain_logos = [ img for img in os.listdir(os.path.join(blueprint.static_folder, 'img'))
if img.startswith('domain-') ]
@template_args(collection)
def domain_collection_helpers():
"""Add helpers to the '/collection' templates"""
def get_domain_icon(collection_name):
"""Return the url to the given domain collection logo if it exists"""
if not collection_name or not isinstance(collection_name, basestring):
return;
logo_file_prefix = 'domain-' + collection_name.lower()
matching_logo = [ logo for logo in domain_logos if logo.startswith(logo_file_prefix)]
if len(matching_logo) == 1:
return url_for('static', filename=os.path.join('img',
matching_logo[0]))
elif len(matching_logo) > 0:
raise Exception('multiple logos matching domain collection ' +
collection_name)
return { 'get_domain_icon': get_domain_icon }
|
cjhak/b2share
|
invenio/b2share/modules/main/views.py
|
views.py
|
py
| 2,867 |
python
|
en
|
code
| null |
github-code
|
6
|
39735748017
|
class BankAccount:
# Class attributes
number = 1
all_accounts = []
# Constructor
def __init__(self, int_rate=0.05, balance=0, acc_id=number):
self.int_rate = int_rate
self.balance = balance
self.id = acc_id
BankAccount.all_accounts.append(self)
BankAccount.number += 1
# Deposit method to add to the instance/variable balance
def deposit(self, amount):
self.balance += amount
return self
# Withdraw Mthod to take money out with the condition its there
# otherwise a fee is charged
def withdraw(self, amount):
if self.balance - amount >= 0:
self.balance -= amount
return self
else:
print(f"Insufficient funds: Charging a $5 fee")
self.balance -= 5
return self
def display_account_info(self):
print(f"Balance: {self.balance}")
return self
def yield_interest(self):
if self.balance >= 0:
self.balance = self.balance + (self.balance * self.int_rate)
return self
@classmethod
def get_account_info(cls):
for inst in cls.all_accounts:
inst.display_account_info()
class User:
# Class attributes
users_accounts = {}
# Constructor
def __init__(self, name, email, account_name, deposit=0):
self.name = name
self.email = email
self.account = BankAccount(int_rate=0.02, balance=deposit)
User.users_accounts.update({f"{account_name}": self.account})
def make_deposit(self, account_name, amount):
self.users_accounts[account_name].deposit(amount)
def make_withdraw(self, account_name, amount):
self.users_accounts[account_name].withdraw(amount)
def display_user_balance(self, account_name):
self.users_accounts[account_name].display_account_info()
def transfer_money(self, account_name, amount, other_user, ou_account_name):
self.users_accounts[account_name].withdraw(amount)
other_user.make_deposit(f"{ou_account_name}", amount)
@classmethod
def add_account(cls, account_name, deposit=0):
User.users_accounts.update({f"{account_name}": BankAccount(int_rate=0.02, balance=deposit)})
SomeGuy = User("Jeffry","[email protected]","Account1")
Person2 = User("Jane", "[email protected]", "Monniiesss", 150)
SomeGuy.display_user_balance("Account1")
print("---------Depositing--------")
SomeGuy.make_deposit("Account1", 300)
print("---------Finished--------")
SomeGuy.display_user_balance("Account1")
print("---------Withdrawing--------")
SomeGuy.make_withdraw("Account1", 30)
SomeGuy.display_user_balance("Account1")
print("---------Before Transfer--------")
SomeGuy.display_user_balance("Account1")
Person2.display_user_balance("Monniiesss")
SomeGuy.transfer_money("Account1", 100, Person2, "Monniiesss")
print("---------After Transfer--------")
SomeGuy.display_user_balance("Account1")
Person2.display_user_balance("Monniiesss")
|
r-lutrick/Coding-Dojo
|
Python/Fundamentals/OOP/Users_with_Bank_Accounts/users_with_bank_accounts.py
|
users_with_bank_accounts.py
|
py
| 3,006 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33626981361
|
from flask import Flask, request
import requests
import tkinter as tk
from tkinter import simpledialog
import pdfrw
import json
from flask_cors import CORS
import io
import base64
app = Flask(__name__)
CORS(app)
@app.route("/")
def hello_world():
ROOT = tk.Tk()
ROOT.withdraw()
# the input dialog
# USER_INP = simpledialog.askstring(title="Wallet Info",
# prompt="Please Enter your Wallet ID:")
APIKEY = '43044ac0170dc40fa60cfd249ef3307b64edbab8'
BASE = 'https://rest.cryptoapis.io/v2'
BLOCKCHAIN = 'bitcoin'
NETWORK = 'mainnet'
WALLETID = request.args.get('walletId')
print(WALLETID)
#used if we are looking for data on a particular transaction
# myTestNetWallet - 62a8e61a25a05500079dda90
# random MainnetWallet - 3R2UHDGKLQkPmAjBGbdzpET95xYV59hkyw
#TID = '4b66461bf88b61e1e4326356534c135129defb504c7acb2fd6c92697d79eb250'
#blockchain-data/bitcoin/testnet/addresses/mzYijhgmzZrmuB7wBDazRKirnChKyow4M3?
#get Bitcoin amount from wallet
with requests.Session() as session:
h = {'Content-Type': 'application/json',
'X-API-KEY': APIKEY}
r = session.get(f'https://rest.cryptoapis.io/v2/wallet-as-a-service/wallets/{WALLETID}/bitcoin/testnet',headers=h)
r.raise_for_status()
qdata = r.json()
bitCoinAmount = qdata['data']['item']['confirmedBalance']['amount']
#get Ethereum amount from wallet
with requests.Session() as session:
h1 = {'Content-Type': 'application/json',
'X-API-KEY': APIKEY}
r1 = session.get(f'https://rest.cryptoapis.io/v2/wallet-as-a-service/wallets/{WALLETID}/ethereum/ropsten',headers=h1)
r1.raise_for_status()
qdata1 = r1.json()
ethereumAmount = qdata1['data']['item']['confirmedBalance']['amount']
# #test for a wallet on the chain
# #blockchain-data/bitcoin/testnet/addresses/mzYijhgmzZrmuB7wBDazRKirnChKyow4M3?
# with requests.Session() as session:
# h = {'Content-Type': 'application/json',
# 'X-API-KEY': APIKEY}
# r = session.get(f'https://rest.cryptoapis.io/v2/blockchain-data/bitcoin/testnet/addresses/{WALLETID}', headers=h)
# r.raise_for_status()
# print(json.dumps(r.json(), indent=4, sort_keys=True))
import os
#directory = os.getcwd()
#print(os.path.abspath("AtomicTest.pdf"))
#pdf_template = "/Users/adityabora/Desktop/AtomicTest.pdf"
pdf_template = "./PortfolioAnalysisV2.pdf"
pdf_output = "output7.pdf"
#template_pdf = pdfrw.PdfReader(pdf_template) # create a pdfrw object from our template.pdf
print(os.path.exists(pdf_template))
template_pdf = pdfrw.PdfReader(pdf_template)
ANNOT_KEY = '/Annots'
ANNOT_FIELD_KEY = '/T'
ANNOT_VAL_KEY = '/V'
ANNOT_RECT_KEY = '/Rect'
SUBTYPE_KEY = '/Subtype'
WIDGET_SUBTYPE_KEY = '/Widget'
for page in template_pdf.pages:
annotations = page[ANNOT_KEY]
for annotation in annotations:
if annotation[SUBTYPE_KEY] == WIDGET_SUBTYPE_KEY:
if annotation[ANNOT_FIELD_KEY]:
key = annotation[ANNOT_FIELD_KEY][1:-1]
print(key)
from datetime import date
data_dict = {
'Risk': '3.8',
'BitcoinAmount': bitCoinAmount,
'EthAmount': ethereumAmount,
'USDCAmount': '30',
'RiskGPA': '3.7'
}
def fill_pdf(input_pdf_path, data_dict):
template_pdf = pdfrw.PdfReader(input_pdf_path)
for page in template_pdf.pages:
annotations = page[ANNOT_KEY]
for annotation in annotations:
if annotation[SUBTYPE_KEY] == WIDGET_SUBTYPE_KEY:
if annotation[ANNOT_FIELD_KEY]:
key = annotation[ANNOT_FIELD_KEY][1:-1]
if key in data_dict.keys():
if type(data_dict[key]) == bool:
if data_dict[key] == True:
annotation.update(pdfrw.PdfDict(
AS=pdfrw.PdfName('Yes')))
else:
annotation.update(
pdfrw.PdfDict(V='{}'.format(data_dict[key]))
)
annotation.update(pdfrw.PdfDict(AP=''))
# pdfrw.PdfWriter().write(output_pdf_path, template_pdf)
buf = io.BytesIO()
pdfrw.PdfWriter().write(buf, template_pdf)
buf.seek(0)
return base64.encodebytes(buf.read()).decode()
data = fill_pdf(pdf_template, data_dict)
template_pdf.Root.AcroForm.update(pdfrw.PdfDict(NeedAppearances=pdfrw.PdfObject('true'))) # NEW
return data
|
MHSiles/yoloco-be
|
other/main-2.py
|
main-2.py
|
py
| 4,824 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16907993758
|
import numpy as np
from .sigmoid import sigmoid
def predict(Theta1, Theta2, X):
'''隐藏层'''
m = X.shape[0]
num_labels = Theta2.shape[0]
a1 = np.vstack((np.ones(m), X.T)).T
a2 = sigmoid(np.dot(a1, Theta1.T))
a2 = np.vstack((np.ones(m), a2.T)).T
a3 = sigmoid(np.dot(a2, Theta2.T))
return np.argmax(a3, axis=1)
|
2332256766/python_test
|
MachineL_The_4_week_practise/predict.py
|
predict.py
|
py
| 348 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73268001788
|
# Uncomment the next two lines to enable the admin:
from django.conf.urls import patterns, include, url
from productes import views
urlpatterns = patterns('',
url(r'^$', views.llistarProductes, name='llistarProductes'),
url(r'^llistarCategories/$', views.llistarCategories, name='llistarCategories'),
url(r'^llistarCategoriesAjax/$', views.llistarCategoriesAjax, name='llistarCategoriesAjax'),
url(r'^llistarProductes/$', views.llistarProductes, name='llistarProductes'),
url(r'^afegirProducte/(?P<categoria>\w+)/$', views.afegirProducte, name='afegirProducte'),
url(r'^afegirProducte/$', views.afegirProducte, name='afegirProducte'),
url(r'^afegirCategoria/$', views.afegirCategoria, name='afegirCategoria'),
url(r'^editarProducte/(?P<idProducte>\d+)/$', views.editarProducte, name='editarProducte'),
url(r'^dadesProducte/(?P<idProducte>\d+)/$', views.dadesProducte, name='dadesProducte'),
)
|
kimpa2007/restoGestio
|
tpv/productes/urls.py
|
urls.py
|
py
| 938 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2088985509
|
#!/usr/bin/env python
"""@namespace IMP.pmi.tools
Miscellaneous utilities.
"""
from __future__ import print_function, division
import IMP
import IMP.algebra
import IMP.isd
import IMP.pmi
import IMP.pmi.topology
try:
from collections.abc import MutableSet # needs Python 3.3 or later
except ImportError:
from collections import MutableSet
import itertools
import math
import sys
import ast
try:
from time import process_time # needs python 3.3 or later
except ImportError:
from time import clock as process_time
import RMF
import IMP.rmf
from collections import defaultdict, OrderedDict
import warnings
import numpy
def _get_system_for_hier(hier):
"""Given a hierarchy, return the System that created it, or None"""
# If we are given the raw particle, get the corresponding Hierarchy
# decorator if available
if hier and not hasattr(hier, 'get_parent'):
if IMP.atom.Hierarchy.get_is_setup(hier):
hier = IMP.atom.Hierarchy(hier)
else:
return None
while hier:
# See if we labeled the Python object directly with the System
if hasattr(hier, '_pmi2_system'):
h = hier._pmi2_system()
if h:
return h
# Otherwise (maybe we got a new Python wrapper around the same C++
# object), try all extant systems
for s in IMP.pmi.topology.System._all_systems:
if s.hier == hier:
return s
# Try the next level up in the hierarchy
hier = hier.get_parent()
def _all_protocol_outputs(hier):
"""Iterate over all (ProtocolOutput, State) pairs for the
given hierarchy"""
system = _get_system_for_hier(hier)
if system:
for state in system.states:
for p in state._protocol_output:
yield p
def _add_pmi_provenance(p):
"""Tag the given particle as being created by the current version
of PMI."""
IMP.core.add_imp_provenance(p)
IMP.core.add_software_provenance(
p, name="IMP PMI module", version=IMP.pmi.get_module_version(),
location="https://integrativemodeling.org")
IMP.core.add_script_provenance(p)
def _get_restraint_set_keys():
if not hasattr(_get_restraint_set_keys, 'pmi_rs_key'):
_get_restraint_set_keys.pmi_rs_key = IMP.ModelKey("PMI restraints")
_get_restraint_set_keys.rmf_rs_key = IMP.ModelKey("RMF restraints")
return (_get_restraint_set_keys.pmi_rs_key,
_get_restraint_set_keys.rmf_rs_key)
def _add_restraint_sets(model, mk, mk_rmf):
rs = IMP.RestraintSet(model, "All PMI restraints")
rs_rmf = IMP.RestraintSet(model, "All PMI RMF restraints")
model.add_data(mk, rs)
model.add_data(mk_rmf, rs_rmf)
return rs, rs_rmf
def add_restraint_to_model(model, restraint, add_to_rmf=False):
"""Add a PMI restraint to the model.
Since Model.add_restraint() no longer exists (in modern IMP restraints
should be added to a ScoringFunction instead) store them instead in
a RestraintSet, and keep a reference to it in the Model.
If `add_to_rmf` is True, also add the restraint to a separate list
of restraints that will be written out to RMF files (by default, most
PMI restraints are not)."""
mk, mk_rmf = _get_restraint_set_keys()
if model.get_has_data(mk):
rs = IMP.RestraintSet.get_from(model.get_data(mk))
rs_rmf = IMP.RestraintSet.get_from(model.get_data(mk_rmf))
else:
rs, rs_rmf = _add_restraint_sets(model, mk, mk_rmf)
rs.add_restraint(restraint)
if add_to_rmf:
rs_rmf.add_restraint(restraint)
def get_restraint_set(model, rmf=False):
"""Get a RestraintSet containing all PMI restraints added to the model.
If `rmf` is True, return only the subset of these restraints that
should be written out to RMF files."""
mk, mk_rmf = _get_restraint_set_keys()
if not model.get_has_data(mk):
warnings.warn("no restraints added to model yet",
IMP.pmi.ParameterWarning)
_add_restraint_sets(model, mk, mk_rmf)
if rmf:
return IMP.RestraintSet.get_from(model.get_data(mk_rmf))
else:
return IMP.RestraintSet.get_from(model.get_data(mk))
class Stopwatch(object):
"""Collect timing information.
Add an instance of this class to outputobjects to get timing information
in a stat file."""
def __init__(self, isdelta=True):
"""Constructor.
@param isdelta if True (the default) then report the time since the
last use of this class; if False, report cumulative time."""
self.starttime = process_time()
self.label = "None"
self.isdelta = isdelta
def set_label(self, labelstr):
self.label = labelstr
def get_output(self):
output = {}
if self.isdelta:
newtime = process_time()
output["Stopwatch_" + self.label + "_delta_seconds"] \
= str(newtime - self.starttime)
self.starttime = newtime
else:
output["Stopwatch_" + self.label + "_elapsed_seconds"] \
= str(process_time() - self.starttime)
return output
class SetupNuisance(object):
def __init__(self, m, initialvalue, minvalue, maxvalue, isoptimized=True,
name=None):
p = IMP.Particle(m)
if name:
p.set_name(name)
nuisance = IMP.isd.Scale.setup_particle(p, initialvalue)
if minvalue:
nuisance.set_lower(minvalue)
if maxvalue:
nuisance.set_upper(maxvalue)
# m.add_score_state(IMP.core.SingletonConstraint(IMP.isd.NuisanceRangeModifier(),None,nuisance))
nuisance.set_is_optimized(nuisance.get_nuisance_key(), isoptimized)
self.nuisance = nuisance
def get_particle(self):
return self.nuisance
class SetupWeight(object):
def __init__(self, m, isoptimized=True, nweights_or_weights=None):
pw = IMP.Particle(m)
if isinstance(nweights_or_weights, int):
self.weight = IMP.isd.Weight.setup_particle(
pw, nweights_or_weights
)
else:
try:
nweights_or_weights = list(nweights_or_weights)
self.weight = IMP.isd.Weight.setup_particle(
pw, nweights_or_weights
)
except (TypeError, IMP.UsageException):
self.weight = IMP.isd.Weight.setup_particle(pw)
self.weight.set_weights_are_optimized(isoptimized)
def get_particle(self):
return self.weight
class SetupSurface(object):
def __init__(self, m, center, normal, isoptimized=True):
p = IMP.Particle(m)
self.surface = IMP.core.Surface.setup_particle(p, center, normal)
self.surface.set_coordinates_are_optimized(isoptimized)
self.surface.set_normal_is_optimized(isoptimized)
def get_particle(self):
return self.surface
def get_cross_link_data(directory, filename, dist, omega, sigma,
don=None, doff=None, prior=0, type_of_profile="gofr"):
(distmin, distmax, ndist) = dist
(omegamin, omegamax, nomega) = omega
(sigmamin, sigmamax, nsigma) = sigma
filen = IMP.isd.get_data_path("CrossLinkPMFs.dict")
with open(filen) as xlpot:
dictionary = ast.literal_eval(xlpot.readline())
xpot = dictionary[directory][filename]["distance"]
pot = dictionary[directory][filename][type_of_profile]
dist_grid = get_grid(distmin, distmax, ndist, False)
omega_grid = get_log_grid(omegamin, omegamax, nomega)
sigma_grid = get_log_grid(sigmamin, sigmamax, nsigma)
if don is not None and doff is not None:
xlmsdata = IMP.isd.CrossLinkData(
dist_grid,
omega_grid,
sigma_grid,
xpot,
pot,
don,
doff,
prior)
else:
xlmsdata = IMP.isd.CrossLinkData(
dist_grid,
omega_grid,
sigma_grid,
xpot,
pot)
return xlmsdata
def get_grid(gmin, gmax, ngrid, boundaries):
grid = []
dx = (gmax - gmin) / float(ngrid)
for i in range(0, ngrid + 1):
if not boundaries and i == 0:
continue
if not boundaries and i == ngrid:
continue
grid.append(gmin + float(i) * dx)
return grid
def get_log_grid(gmin, gmax, ngrid):
grid = []
for i in range(0, ngrid + 1):
grid.append(gmin * math.exp(float(i) / ngrid * math.log(gmax / gmin)))
return grid
def cross_link_db_filter_parser(inputstring):
'''
example '"{ID_Score}" > 28 AND "{Sample}" ==
"%10_1%" OR ":Sample}" == "%10_2%" OR ":Sample}"
== "%10_3%" OR ":Sample}" == "%8_1%" OR ":Sample}" == "%8_2%"'
'''
import pyparsing as pp
operator = pp.Regex(">=|<=|!=|>|<|==|in").setName("operator")
value = pp.QuotedString(
'"') | pp.Regex(
r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?")
identifier = pp.Word(pp.alphas, pp.alphanums + "_")
comparison_term = identifier | value
condition = pp.Group(comparison_term + operator + comparison_term)
expr = pp.operatorPrecedence(condition, [
("OR", 2, pp.opAssoc.LEFT, ),
("AND", 2, pp.opAssoc.LEFT, ),
])
parsedstring = str(expr.parseString(inputstring)) \
.replace("[", "(") \
.replace("]", ")") \
.replace(",", " ") \
.replace("'", " ") \
.replace("%", "'") \
.replace("{", "float(entry['") \
.replace("}", "'])") \
.replace(":", "str(entry['") \
.replace("}", "'])") \
.replace("AND", "and") \
.replace("OR", "or")
return parsedstring
def open_file_or_inline_text(filename):
try:
fl = open(filename, "r")
except IOError:
fl = filename.split("\n")
return fl
def get_ids_from_fasta_file(fastafile):
ids = []
with open(fastafile) as ff:
for line in ff:
if line[0] == ">":
ids.append(line[1:-1])
return ids
def get_closest_residue_position(hier, resindex, terminus="N"):
'''
this function works with plain hierarchies, as read from the pdb,
no multi-scale hierarchies
'''
p = []
niter = 0
while len(p) == 0:
niter += 1
sel = IMP.atom.Selection(hier, residue_index=resindex,
atom_type=IMP.atom.AT_CA)
if terminus == "N":
resindex += 1
if terminus == "C":
resindex -= 1
if niter >= 10000:
print("get_closest_residue_position: exiting while loop "
"without result")
break
p = sel.get_selected_particles()
if len(p) == 1:
return IMP.core.XYZ(p[0]).get_coordinates()
elif len(p) == 0:
print("get_closest_residue_position: got NO residues for hierarchy "
"%s and residue %i" % (hier, resindex))
raise Exception(
"get_closest_residue_position: got NO residues for hierarchy "
"%s and residue %i" % (hier, resindex))
else:
raise ValueError(
"got multiple residues for hierarchy %s and residue %i; the list "
"of particles is %s"
% (hier, resindex, str([pp.get_name() for pp in p])))
def get_residue_gaps_in_hierarchy(hierarchy, start, end):
'''
Return the residue index gaps and contiguous segments in the hierarchy.
@param hierarchy hierarchy to examine
@param start first residue index
@param end last residue index
@return A list of lists of the form
[[1,100,"cont"],[101,120,"gap"],[121,200,"cont"]]
'''
gaps = []
for n, rindex in enumerate(range(start, end + 1)):
sel = IMP.atom.Selection(hierarchy, residue_index=rindex,
atom_type=IMP.atom.AT_CA)
if len(sel.get_selected_particles()) == 0:
if n == 0:
# set the initial condition
rindexgap = start
rindexcont = start - 1
if rindexgap == rindex - 1:
# residue is contiguous with the previously discovered gap
gaps[-1][1] += 1
else:
# residue is not contiguous with the previously discovered gap
# hence create a new gap tuple
gaps.append([rindex, rindex, "gap"])
# update the index of the last residue gap
rindexgap = rindex
else:
if n == 0:
# set the initial condition
rindexgap = start - 1
rindexcont = start
if rindexcont == rindex - 1:
# residue is contiguous with the previously discovered
# continuous part
gaps[-1][1] += 1
else:
# residue is not contiguous with the previously discovered
# continuous part, hence create a new cont tuple
gaps.append([rindex, rindex, "cont"])
# update the index of the last residue gap
rindexcont = rindex
return gaps
class map(object):
def __init__(self):
self.map = {}
def set_map_element(self, xvalue, yvalue):
self.map[xvalue] = yvalue
def get_map_element(self, invalue):
if isinstance(invalue, float):
n = 0
mindist = 1
for x in self.map:
dist = (invalue - x) * (invalue - x)
if n == 0:
mindist = dist
minx = x
if dist < mindist:
mindist = dist
minx = x
n += 1
return self.map[minx]
elif isinstance(invalue, str):
return self.map[invalue]
else:
raise TypeError("wrong type for map")
def select_by_tuple_2(hier, tuple_selection, resolution):
"""New tuple format: molname OR (start,stop,molname,copynum,statenum)
Copy and state are optional. Can also use 'None' for them which will
get all. You can also pass -1 for stop which will go to the end.
Returns the particles
"""
kwds = {} # going to accumulate keywords
kwds['resolution'] = resolution
if isinstance(tuple_selection, str):
kwds['molecule'] = tuple_selection
elif isinstance(tuple_selection, tuple):
rbegin = tuple_selection[0]
rend = tuple_selection[1]
kwds['molecule'] = tuple_selection[2]
try:
copynum = tuple_selection[3]
if copynum is not None:
kwds['copy_index'] = copynum
except: # noqa: E722
pass
try:
statenum = tuple_selection[4]
if statenum is not None:
kwds['state_index'] = statenum
except: # noqa: E722
pass
if rend == -1:
if rbegin > 1:
s = IMP.atom.Selection(hier, **kwds)
s -= IMP.atom.Selection(hier,
residue_indexes=range(1, rbegin),
**kwds)
return s.get_selected_particles()
else:
kwds['residue_indexes'] = range(rbegin, rend+1)
s = IMP.atom.Selection(hier, **kwds)
return s.get_selected_particles()
def get_db_from_csv(csvfilename, encoding=None):
if sys.version_info[0] == 2:
def open_with_encoding(fname, encoding):
return open(fname)
else:
open_with_encoding = open
import csv
outputlist = []
with open_with_encoding(csvfilename, encoding=encoding) as fh:
csvr = csv.DictReader(fh)
for ls in csvr:
outputlist.append(ls)
return outputlist
def get_prot_name_from_particle(p, list_of_names):
'''Return the component name provided a particle and a list of names'''
root = p
protname = root.get_name()
is_a_bead = False
while protname not in list_of_names:
root0 = root.get_parent()
if root0 == IMP.atom.Hierarchy():
return (None, None)
protname = root0.get_name()
# check if that is a bead
# this piece of code might be dangerous if
# the hierarchy was called Bead :)
if "Beads" in protname:
is_a_bead = True
root = root0
return (protname, is_a_bead)
def get_residue_indexes(hier):
'''
Retrieve the residue indexes for the given particle.
The particle must be an instance of Fragment,Residue, Atom or Molecule
or else returns an empty list
'''
resind = []
if IMP.atom.Fragment.get_is_setup(hier):
resind = IMP.atom.Fragment(hier).get_residue_indexes()
elif IMP.atom.Residue.get_is_setup(hier):
resind = [IMP.atom.Residue(hier).get_index()]
elif IMP.atom.Atom.get_is_setup(hier):
a = IMP.atom.Atom(hier)
resind = [IMP.atom.Residue(a.get_parent()).get_index()]
elif IMP.atom.Molecule.get_is_setup(hier):
resind_tmp = IMP.pmi.tools.OrderedSet()
for lv in IMP.atom.get_leaves(hier):
if IMP.atom.Fragment.get_is_setup(lv) or \
IMP.atom.Residue.get_is_setup(lv) or \
IMP.atom.Atom.get_is_setup(lv):
for ind in get_residue_indexes(lv):
resind_tmp.add(ind)
resind = list(resind_tmp)
else:
resind = []
return resind
def sort_by_residues(particles):
particles_residues = [(p, list(IMP.pmi.tools.get_residue_indexes(p)))
for p in particles]
sorted_particles_residues = sorted(
particles_residues,
key=lambda tup: tup[1])
particles = [p[0] for p in sorted_particles_residues]
return particles
#
# Parallel Computation
#
def scatter_and_gather(data):
"""Synchronize data over a parallel run"""
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
number_of_processes = comm.size
comm.Barrier()
if rank != 0:
comm.send(data, dest=0, tag=11)
elif rank == 0:
for i in range(1, number_of_processes):
data_tmp = comm.recv(source=i, tag=11)
if isinstance(data, list):
data += data_tmp
elif isinstance(data, dict):
data.update(data_tmp)
else:
raise TypeError("data not supported, use list or dictionaries")
for i in range(1, number_of_processes):
comm.send(data, dest=i, tag=11)
if rank != 0:
data = comm.recv(source=0, tag=11)
return data
#
# Lists and iterators
#
def sublist_iterator(ls, lmin=1, lmax=None):
'''
Yield all sublists of length >= lmin and <= lmax
'''
if lmax is None:
lmax = len(ls)
n = len(ls)
for i in range(n):
for j in range(i + lmin, min(n + 1, i + 1 + lmax)):
yield ls[i:j]
def flatten_list(ls):
return [item for sublist in ls for item in sublist]
def list_chunks_iterator(list, length):
""" Yield successive length-sized chunks from a list.
"""
for i in range(0, len(list), length):
yield list[i:i + length]
def chunk_list_into_segments(seq, num):
seq = list(seq)
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
class Segments(object):
''' This class stores integers
in ordered compact lists eg:
[[1,2,3],[6,7,8]]
the methods help splitting and merging the internal lists
Example:
s=Segments([1,2,3]) is [[1,2,3]]
s.add(4) is [[1,2,3,4]] (add right)
s.add(3) is [[1,2,3,4]] (item already existing)
s.add(7) is [[1,2,3,4],[7]] (new list)
s.add([8,9]) is [[1,2,3,4],[7,8,9]] (add item right)
s.add([5,6]) is [[1,2,3,4,5,6,7,8,9]] (merge)
s.remove(3) is [[1,2],[4,5,6,7,8,9]] (split)
etc.
'''
def __init__(self, index):
'''index can be a integer or a list of integers '''
if isinstance(index, int):
self.segs = [[index]]
elif isinstance(index, list):
self.segs = [[index[0]]]
for i in index[1:]:
self.add(i)
else:
raise TypeError("index must be an int or list of ints")
def add(self, index):
'''index can be a integer or a list of integers '''
if isinstance(index, (int, numpy.int32, numpy.int64)):
mergeleft = None
mergeright = None
for n, s in enumerate(self.segs):
if index in s:
return 0
else:
if s[0]-index == 1:
mergeleft = n
if index-s[-1] == 1:
mergeright = n
if mergeright is None and mergeleft is None:
self.segs.append([index])
if mergeright is not None and mergeleft is None:
self.segs[mergeright].append(index)
if mergeleft is not None and mergeright is None:
self.segs[mergeleft] = [index]+self.segs[mergeleft]
if mergeleft is not None and mergeright is not None:
self.segs[mergeright] = \
self.segs[mergeright]+[index]+self.segs[mergeleft]
del self.segs[mergeleft]
for n in range(len(self.segs)):
self.segs[n].sort()
self.segs.sort(key=lambda tup: tup[0])
elif isinstance(index, list):
for i in index:
self.add(i)
else:
raise TypeError("index must be an int or list of ints")
def remove(self, index):
'''index can be a integer'''
for n, s in enumerate(self.segs):
if index in s:
if s[0] == index:
self.segs[n] = s[1:]
elif s[-1] == index:
self.segs[n] = s[:-1]
else:
i = self.segs[n].index(index)
self.segs[n] = s[:i]
self.segs.append(s[i+1:])
for n in range(len(self.segs)):
self.segs[n].sort()
if len(self.segs[n]) == 0:
del self.segs[n]
self.segs.sort(key=lambda tup: tup[0])
def get_flatten(self):
''' Returns a flatten list '''
return [item for sublist in self.segs for item in sublist]
def __repr__(self):
ret_tmp = "["
for seg in self.segs:
ret_tmp += str(seg[0])+"-"+str(seg[-1])+","
ret = ret_tmp[:-1]+"]"
return ret
#
# Tools to simulate data
#
def normal_density_function(expected_value, sigma, x):
return (
1 / math.sqrt(2 * math.pi) / sigma *
math.exp(-(x - expected_value) ** 2 / 2 / sigma / sigma)
)
def log_normal_density_function(expected_value, sigma, x):
return (
1 / math.sqrt(2 * math.pi) / sigma / x *
math.exp(-(math.log(x / expected_value) ** 2 / 2 / sigma / sigma))
)
def print_multicolumn(list_of_strings, ncolumns=2, truncate=40):
ls = list_of_strings
cols = ncolumns
# add empty entries after ls
for i in range(len(ls) % cols):
ls.append(" ")
split = [ls[i:i + len(ls) // cols]
for i in range(0, len(ls), len(ls) // cols)]
for row in zip(*split):
print("".join(str.ljust(i, truncate) for i in row))
class ColorChange(object):
'''Change color code to hexadecimal to rgb'''
def __init__(self):
self._NUMERALS = '0123456789abcdefABCDEF'
self._HEXDEC = dict((v, int(v, 16)) for v in
(x+y for x in self._NUMERALS
for y in self._NUMERALS))
self.LOWERCASE, self.UPPERCASE = 'x', 'X'
def rgb(self, triplet):
return (float(self._HEXDEC[triplet[0:2]]),
float(self._HEXDEC[triplet[2:4]]),
float(self._HEXDEC[triplet[4:6]]))
def triplet(self, rgb, lettercase=None):
if lettercase is None:
lettercase = self.LOWERCASE
return format(rgb[0] << 16 | rgb[1] << 8 | rgb[2], '06'+lettercase)
# -------------- Collections --------------- #
class OrderedSet(MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
if last:
key = self.end[1][0]
else:
key = self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class OrderedDefaultDict(OrderedDict):
"""Store objects in order they were added, but with default type.
Source: http://stackoverflow.com/a/4127426/2608793
"""
def __init__(self, *args, **kwargs):
if not args:
self.default_factory = None
else:
if not (args[0] is None or callable(args[0])):
raise TypeError('first argument must be callable or None')
self.default_factory = args[0]
args = args[1:]
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = default = self.default_factory()
return default
def __reduce__(self): # optional, for pickle support
args = (self.default_factory,) if self.default_factory else ()
if sys.version_info[0] >= 3:
return self.__class__, args, None, None, self.items()
else:
return self.__class__, args, None, None, self.iteritems()
# -------------- PMI2 Tools --------------- #
def set_coordinates_from_rmf(hier, rmf_fn, frame_num=0):
"""Extract frame from RMF file and fill coordinates. Must be identical
topology.
@param hier The (System) hierarchy to fill (e.g. after you've built it)
@param rmf_fn The file to extract from
@param frame_num The frame number to extract
"""
rh = RMF.open_rmf_file_read_only(rmf_fn)
IMP.rmf.link_hierarchies(rh, [hier])
IMP.rmf.load_frame(rh, RMF.FrameID(frame_num))
del rh
def input_adaptor(stuff, pmi_resolution=0, flatten=False, selection_tuple=None,
warn_about_slices=True):
"""Adapt things for PMI (degrees of freedom, restraints, ...)
Returns list of list of hierarchies, separated into Molecules if possible.
The input can be a list, or a list of lists (iterable of ^1 or
iterable of ^2)
(iterable of ^2) Hierarchy -> returns input as list of list of hierarchies,
only one entry, not grouped by molecules.
(iterable of ^2) PMI::System/State/Molecule/TempResidue ->
returns residue hierarchies, grouped in molecules, at requested
resolution
@param stuff Can be one of the following inputs:
IMP Hierarchy, PMI System/State/Molecule/TempResidue, or a
list/set (of list/set) of them.
Must be uniform input, however. No mixing object types.
@param pmi_resolution For selecting, only does it if you pass PMI
objects. Set it to "all" if you want all resolutions!
@param flatten Set to True if you just want all hierarchies in one list.
@param warn_about_slices Print a warning if you are requesting only part
of a bead. Sometimes you just don't care!
@note since this relies on IMP::atom::Selection, this will not return
any objects if they weren't built! But there should be no problem
if you request unbuilt residues - they should be ignored.
"""
if stuff is None:
return stuff
if hasattr(stuff, '__iter__'):
if len(stuff) == 0:
return stuff
thelist = list(stuff)
# iter of iter of should be ok
if all(hasattr(el, '__iter__') for el in thelist):
thelist = [i for sublist in thelist for i in sublist]
elif any(hasattr(el, '__iter__') for el in thelist):
raise Exception('input_adaptor: input_object must be a list '
'or a list of lists')
stuff = thelist
else:
stuff = [stuff]
# check that it is a hierarchy homogeneously:
try:
is_hierarchy = all(IMP.atom.Hierarchy.get_is_setup(s) for s in stuff)
except (NotImplementedError, TypeError):
is_hierarchy = False
# get the other types homogeneously
is_system = all(isinstance(s, IMP.pmi.topology.System) for s in stuff)
is_state = all(isinstance(s, IMP.pmi.topology.State) for s in stuff)
is_molecule = all(isinstance(s, IMP.pmi.topology.Molecule) for s in stuff)
is_temp_residue = all(isinstance(s, IMP.pmi.topology.TempResidue)
for s in stuff)
# now that things are ok, do selection if requested
hier_list = []
pmi_input = False
if is_system or is_state or is_molecule or is_temp_residue:
# if PMI, perform selection using gathered indexes
pmi_input = True
# key is Molecule object, value are residues
indexes_per_mol = OrderedDefaultDict(list)
if is_system:
for system in stuff:
for state in system.get_states():
mdict = state.get_molecules()
for molname in mdict:
for copy in mdict[molname]:
indexes_per_mol[copy] += \
[r.get_index() for r in copy.get_residues()]
elif is_state:
for state in stuff:
mdict = state.get_molecules()
for molname in mdict:
for copy in mdict[molname]:
indexes_per_mol[copy] += [r.get_index()
for r in copy.get_residues()]
elif is_molecule:
for molecule in stuff:
indexes_per_mol[molecule] += [r.get_index()
for r in molecule.get_residues()]
else: # is_temp_residue
for tempres in stuff:
indexes_per_mol[tempres.get_molecule()].append(
tempres.get_index())
for mol in indexes_per_mol:
if pmi_resolution == 'all':
# because you select from the molecule,
# this will start the search from the base resolution
ps = select_at_all_resolutions(
mol.get_hierarchy(), residue_indexes=indexes_per_mol[mol])
else:
sel = IMP.atom.Selection(mol.get_hierarchy(),
resolution=pmi_resolution,
residue_indexes=indexes_per_mol[mol])
ps = sel.get_selected_particles()
# check that you don't have any incomplete fragments!
if warn_about_slices:
rset = set(indexes_per_mol[mol])
for p in ps:
if IMP.atom.Fragment.get_is_setup(p):
fset = set(IMP.atom.Fragment(p).get_residue_indexes())
if not fset <= rset:
minset = min(fset)
maxset = max(fset)
found = fset & rset
minf = min(found)
maxf = max(found)
resbreak = maxf if minf == minset else minset-1
warnings.warn(
'You are trying to select only part of the '
'bead %s:%i-%i. The residues you requested '
'are %i-%i. You can fix this by: '
'1) requesting the whole bead/none of it; or'
'2) break the bead up by passing '
'bead_extra_breaks=[\'%i\'] in '
'molecule.add_representation()'
% (mol.get_name(), minset, maxset, minf, maxf,
resbreak), IMP.pmi.ParameterWarning)
hier_list.append([IMP.atom.Hierarchy(p) for p in ps])
elif is_hierarchy:
# check
ps = []
if pmi_resolution == 'all':
for h in stuff:
ps += select_at_all_resolutions(h)
else:
for h in stuff:
ps += IMP.atom.Selection(
h, resolution=pmi_resolution).get_selected_particles()
hier_list = [IMP.atom.Hierarchy(p) for p in ps]
if not flatten:
hier_list = [hier_list]
else:
raise Exception('input_adaptor: you passed something of wrong type '
'or a list with mixed types')
if flatten and pmi_input:
return [h for sublist in hier_list for h in sublist]
else:
return hier_list
def get_sorted_segments(mol):
"""Returns sequence-sorted segments array, each containing the first
particle the last particle and the first residue index."""
from operator import itemgetter
hiers = IMP.pmi.tools.input_adaptor(mol)
if len(hiers) > 1:
raise ValueError("only pass stuff from one Molecule, please")
hiers = hiers[0]
segs = []
for h in hiers:
try:
start = IMP.atom.Hierarchy(h).get_children()[0]
except IndexError:
start = IMP.atom.Hierarchy(h)
try:
end = IMP.atom.Hierarchy(h).get_children()[-1]
except IndexError:
end = IMP.atom.Hierarchy(h)
startres = IMP.pmi.tools.get_residue_indexes(start)[0]
segs.append((start, end, startres))
return sorted(segs, key=itemgetter(2))
def display_bonds(mol):
"""Decorate the sequence-consecutive particles from a PMI2 molecule
with a bond, so that they appear connected in the rmf file"""
SortedSegments = get_sorted_segments(mol)
for x in range(len(SortedSegments) - 1):
last = SortedSegments[x][1]
first = SortedSegments[x + 1][0]
p1 = last.get_particle()
p2 = first.get_particle()
if not IMP.atom.Bonded.get_is_setup(p1):
IMP.atom.Bonded.setup_particle(p1)
if not IMP.atom.Bonded.get_is_setup(p2):
IMP.atom.Bonded.setup_particle(p2)
if not IMP.atom.get_bond(IMP.atom.Bonded(p1), IMP.atom.Bonded(p2)):
IMP.atom.create_bond(
IMP.atom.Bonded(p1),
IMP.atom.Bonded(p2), 1)
def get_all_leaves(list_of_hs):
""" Just get the leaves from a list of hierarchies """
lvs = list(itertools.chain.from_iterable(
IMP.atom.get_leaves(item) for item in list_of_hs))
return lvs
def select_at_all_resolutions(hier=None, hiers=None, **kwargs):
"""Perform selection using the usual keywords but return ALL
resolutions (BEADS and GAUSSIANS).
Returns in flat list!
"""
if hiers is None:
hiers = []
if hier is not None:
hiers.append(hier)
if len(hiers) == 0:
warnings.warn("You passed nothing to select_at_all_resolutions()",
IMP.pmi.ParameterWarning)
return []
ret = OrderedSet()
for hsel in hiers:
try:
htest = IMP.atom.Hierarchy.get_is_setup(hsel)
except: # noqa: E722
raise Exception('select_at_all_resolutions: you have to pass '
'an IMP Hierarchy')
if not htest:
raise Exception('select_at_all_resolutions: you have to pass '
'an IMP Hierarchy')
if 'resolution' in kwargs or 'representation_type' in kwargs:
raise Exception("don't pass resolution or representation_type "
"to this function")
selB = IMP.atom.Selection(hsel, resolution=IMP.atom.ALL_RESOLUTIONS,
representation_type=IMP.atom.BALLS,
**kwargs)
selD = IMP.atom.Selection(hsel, resolution=IMP.atom.ALL_RESOLUTIONS,
representation_type=IMP.atom.DENSITIES,
**kwargs)
ret |= OrderedSet(selB.get_selected_particles())
ret |= OrderedSet(selD.get_selected_particles())
return list(ret)
def get_particles_within_zone(hier,
target_ps,
sel_zone,
entire_residues,
exclude_backbone):
"""Utility to retrieve particles from a hierarchy within a
zone around a set of ps.
@param hier The hierarchy in which to look for neighbors
@param target_ps The particles for zoning
@param sel_zone The maximum distance
@param entire_residues If True, will grab entire residues
@param exclude_backbone If True, will only return sidechain particles
"""
test_sel = IMP.atom.Selection(hier)
backbone_types = ['C', 'N', 'CB', 'O']
if exclude_backbone:
test_sel -= IMP.atom.Selection(
hier, atom_types=[IMP.atom.AtomType(n) for n in backbone_types])
test_ps = test_sel.get_selected_particles()
nn = IMP.algebra.NearestNeighbor3D([IMP.core.XYZ(p).get_coordinates()
for p in test_ps])
zone = set()
for target in target_ps:
zone |= set(nn.get_in_ball(IMP.core.XYZ(target).get_coordinates(),
sel_zone))
zone_ps = [test_ps[z] for z in zone]
if entire_residues:
final_ps = set()
for z in zone_ps:
final_ps |= set(IMP.atom.Hierarchy(z).get_parent().get_children())
zone_ps = [h.get_particle() for h in final_ps]
return zone_ps
def get_rbs_and_beads(hiers):
"""Returns unique objects in original order"""
rbs = set()
beads = []
rbs_ordered = []
if not hasattr(hiers, '__iter__'):
hiers = [hiers]
for p in get_all_leaves(hiers):
if IMP.core.RigidMember.get_is_setup(p):
rb = IMP.core.RigidMember(p).get_rigid_body()
if rb not in rbs:
rbs.add(rb)
rbs_ordered.append(rb)
elif IMP.core.NonRigidMember.get_is_setup(p):
rb = IMP.core.NonRigidMember(p).get_rigid_body()
if rb not in rbs:
rbs.add(rb)
rbs_ordered.append(rb)
beads.append(p)
else:
beads.append(p)
return rbs_ordered, beads
def get_molecules(input_objects):
"This function returns the parent molecule hierarchies of given objects"
stuff = input_adaptor(input_objects, pmi_resolution='all', flatten=True)
molecules = set()
for h in stuff:
is_root = False
is_molecule = False
while not (is_molecule or is_root):
root = IMP.atom.get_root(h)
if root == h:
is_root = True
is_molecule = IMP.atom.Molecule.get_is_setup(h)
if is_molecule:
molecules.add(IMP.atom.Molecule(h))
h = h.get_parent()
return list(molecules)
def get_molecules_dictionary(input_objects):
moldict = defaultdict(list)
for mol in IMP.pmi.tools.get_molecules(input_objects):
name = mol.get_name()
moldict[name].append(mol)
for mol in moldict:
moldict[mol].sort(key=lambda x: IMP.atom.Copy(x).get_copy_index())
return moldict
def get_molecules_dictionary_by_copy(input_objects):
moldict = defaultdict(dict)
for mol in IMP.pmi.tools.get_molecules(input_objects):
name = mol.get_name()
c = IMP.atom.Copy(mol).get_copy_index()
moldict[name][c] = mol
return moldict
def get_selections_dictionary(input_objects):
moldict = IMP.pmi.tools.get_molecules_dictionary(input_objects)
seldict = defaultdict(list)
for name, mols in moldict.items():
for m in mols:
seldict[name].append(IMP.atom.Selection(m))
return seldict
def get_densities(input_objects):
"""Given a list of PMI objects, returns all density hierarchies within
these objects. The output of this function can be inputted into
things such as EM restraints. This function is intended to gather
density particles appended to molecules (and not other hierarchies
which might have been appended to the root node directly).
"""
# Note that Densities can only be selected at the Root or Molecule
# level and not at the Leaves level.
# we'll first get all molecule hierarchies corresponding to the leaves.
molecules = get_molecules(input_objects)
densities = []
for i in molecules:
densities += IMP.atom.Selection(
i, representation_type=IMP.atom.DENSITIES).get_selected_particles()
return densities
def shuffle_configuration(objects,
max_translation=300., max_rotation=2.0 * math.pi,
avoidcollision_rb=True, avoidcollision_fb=False,
cutoff=10.0, niterations=100,
bounding_box=None,
excluded_rigid_bodies=[],
hierarchies_excluded_from_collision=[],
hierarchies_included_in_collision=[],
verbose=False,
return_debug=False):
"""Shuffle particles. Used to restart the optimization.
The configuration of the system is initialized by placing each
rigid body and each bead randomly in a box. If `bounding_box` is
specified, the particles are placed inside this box; otherwise, each
particle is displaced by up to max_translation angstroms, and randomly
rotated. Effort is made to place particles far enough from each other to
prevent any steric clashes.
@param objects Can be one of the following inputs:
IMP Hierarchy, PMI System/State/Molecule/TempResidue, or
a list/set of them
@param max_translation Max translation (rbs and flexible beads)
@param max_rotation Max rotation (rbs only)
@param avoidcollision_rb check if the particle/rigid body was
placed close to another particle; uses the optional
arguments cutoff and niterations
@param avoidcollision_fb Advanced. Generally you want this False because
it's hard to shuffle beads.
@param cutoff Distance less than this is a collision
@param niterations How many times to try avoiding collision
@param bounding_box Only shuffle particles within this box.
Defined by ((x1,y1,z1),(x2,y2,z2)).
@param excluded_rigid_bodies Don't shuffle these rigid body objects
@param hierarchies_excluded_from_collision Don't count collision
with these bodies
@param hierarchies_included_in_collision Hierarchies that are not
shuffled, but should be included in collision calculation
(for fixed regions)
@param verbose Give more output
@note Best to only call this function after you've set up degrees
of freedom
For debugging purposes, returns: <shuffled indexes>,
<collision avoided indexes>
"""
# checking input
hierarchies = IMP.pmi.tools.input_adaptor(objects,
pmi_resolution='all',
flatten=True)
rigid_bodies, flexible_beads = get_rbs_and_beads(hierarchies)
if len(rigid_bodies) > 0:
mdl = rigid_bodies[0].get_model()
elif len(flexible_beads) > 0:
mdl = flexible_beads[0].get_model()
else:
raise Exception("Could not find any particles in the hierarchy")
if len(rigid_bodies) == 0:
print("shuffle_configuration: rigid bodies were not initialized")
# gather all particles
gcpf = IMP.core.GridClosePairsFinder()
gcpf.set_distance(cutoff)
# Add particles from excluded hierarchies to excluded list
collision_excluded_hierarchies = IMP.pmi.tools.input_adaptor(
hierarchies_excluded_from_collision, pmi_resolution='all',
flatten=True)
collision_included_hierarchies = IMP.pmi.tools.input_adaptor(
hierarchies_included_in_collision, pmi_resolution='all', flatten=True)
collision_excluded_idxs = set(
leaf.get_particle().get_index()
for h in collision_excluded_hierarchies
for leaf in IMP.core.get_leaves(h))
collision_included_idxs = set(
leaf.get_particle().get_index()
for h in collision_included_hierarchies
for leaf in IMP.core.get_leaves(h))
# Excluded collision with Gaussians
all_idxs = [] # expand to representations?
for p in IMP.pmi.tools.get_all_leaves(hierarchies):
if IMP.core.XYZ.get_is_setup(p):
all_idxs.append(p.get_particle_index())
if IMP.core.Gaussian.get_is_setup(p):
collision_excluded_idxs.add(p.get_particle_index())
if bounding_box is not None:
((x1, y1, z1), (x2, y2, z2)) = bounding_box
ub = IMP.algebra.Vector3D(x1, y1, z1)
lb = IMP.algebra.Vector3D(x2, y2, z2)
bb = IMP.algebra.BoundingBox3D(ub, lb)
all_idxs = set(all_idxs) | collision_included_idxs
all_idxs = all_idxs - collision_excluded_idxs
debug = []
print('shuffling', len(rigid_bodies), 'rigid bodies')
for rb in rigid_bodies:
if rb not in excluded_rigid_bodies:
# gather particles to avoid with this transform
if avoidcollision_rb:
rb_idxs = set(rb.get_member_particle_indexes()) - \
collision_excluded_idxs
other_idxs = all_idxs - rb_idxs
debug.append([rb, other_idxs if avoidcollision_rb else set()])
# iterate, trying to avoid collisions
niter = 0
while niter < niterations:
rbxyz = (rb.get_x(), rb.get_y(), rb.get_z())
# local transform
if bounding_box:
translation = IMP.algebra.get_random_vector_in(bb)
# First move to origin
transformation_orig = IMP.algebra.Transformation3D(
IMP.algebra.get_identity_rotation_3d(),
-IMP.core.XYZ(rb).get_coordinates())
IMP.core.transform(rb, transformation_orig)
rotation = IMP.algebra.get_random_rotation_3d()
transformation = IMP.algebra.Transformation3D(rotation,
translation)
else:
transformation = \
IMP.algebra.get_random_local_transformation(
rbxyz, max_translation, max_rotation)
IMP.core.transform(rb, transformation)
# check collisions
if avoidcollision_rb and other_idxs:
mdl.update()
npairs = len(gcpf.get_close_pairs(mdl,
list(other_idxs),
list(rb_idxs)))
if npairs == 0:
break
else:
niter += 1
if verbose:
print("shuffle_configuration: rigid body placed "
"close to other %d particles, trying "
"again..." % npairs)
print("shuffle_configuration: rigid body name: "
+ rb.get_name())
if niter == niterations:
raise ValueError(
"tried the maximum number of iterations to "
"avoid collisions, increase the distance "
"cutoff")
else:
break
print('shuffling', len(flexible_beads), 'flexible beads')
for fb in flexible_beads:
# gather particles to avoid
if avoidcollision_fb:
fb_idxs = set(IMP.get_indexes([fb]))
other_idxs = all_idxs - fb_idxs
if not other_idxs:
continue
# iterate, trying to avoid collisions
niter = 0
while niter < niterations:
if bounding_box:
translation = IMP.algebra.get_random_vector_in(bb)
transformation = IMP.algebra.Transformation3D(translation)
else:
fbxyz = IMP.core.XYZ(fb).get_coordinates()
transformation = IMP.algebra.get_random_local_transformation(
fbxyz, max_translation, max_rotation)
# For gaussians, treat this fb as an rb
if IMP.core.NonRigidMember.get_is_setup(fb):
memb = IMP.core.NonRigidMember(fb)
xyz = memb.get_internal_coordinates()
if bounding_box:
# 'translation' is the new desired position in global
# coordinates; we need to convert that to internal
# coordinates first using the rigid body's ref frame
rf = memb.get_rigid_body().get_reference_frame()
glob_to_int = rf.get_transformation_from()
memb.set_internal_coordinates(
glob_to_int.get_transformed(translation))
else:
xyz_transformed = transformation.get_transformed(xyz)
memb.set_internal_coordinates(xyz_transformed)
if niter == 0:
debug.append(
[xyz, other_idxs if avoidcollision_fb else set()])
else:
d = IMP.core.XYZ(fb)
if bounding_box:
# Translate to origin first
if IMP.core.RigidBody.get_is_setup(fb.get_particle()):
IMP.core.transform(
IMP.core.RigidBody(fb.get_particle()),
-d.get_coordinates())
else:
IMP.core.transform(d, -d.get_coordinates())
d = IMP.core.XYZ(fb)
if niter == 0:
debug.append(
[d, other_idxs if avoidcollision_fb else set()])
if IMP.core.RigidBody.get_is_setup(fb.get_particle()):
IMP.core.transform(
IMP.core.RigidBody(fb.get_particle()), transformation)
else:
IMP.core.transform(d, transformation)
if avoidcollision_fb:
mdl.update()
npairs = len(gcpf.get_close_pairs(mdl,
list(other_idxs),
list(fb_idxs)))
if npairs == 0:
break
else:
niter += 1
print("shuffle_configuration: floppy body placed close "
"to other %d particles, trying again..." % npairs)
if niter == niterations:
raise ValueError(
"tried the maximum number of iterations to avoid "
"collisions, increase the distance cutoff")
else:
break
if return_debug:
return debug
class ColorHierarchy(object):
def __init__(self, hier):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
self.mpl = mpl
self.plt = plt
hier.ColorHierarchy = self
self.hier = hier
mols = IMP.pmi.tools.get_molecules(IMP.atom.get_leaves(self.hier))
self.mols = [IMP.pmi.topology.PMIMoleculeHierarchy(mol)
for mol in mols]
self.method = self.nochange
self.scheme = None
self.first = None
self.last = None
def nochange(self):
pass
def get_color(self, fl):
return IMP.display.Color(*self.scheme(fl)[0:3])
def get_log_scale(self, fl):
import math
eps = 1.0
return math.log(fl+eps)
def color_by_resid(self):
self.method = self.color_by_resid
self.scheme = self.mpl.cm.rainbow
for mol in self.mols:
self.first = 1
self.last = len(IMP.pmi.topology.PMIMoleculeHierarchy(
mol).get_residue_indexes())
for p in IMP.atom.get_leaves(mol):
if IMP.atom.Residue.get_is_setup(p):
ri = IMP.atom.Residue(p).get_index()
c = self.get_color(float(ri)/self.last)
IMP.display.Colored(p).set_color(c)
if IMP.atom.Fragment.get_is_setup(p):
ris = IMP.atom.Fragment(p).get_residue_indexes()
avr = sum(ris)/len(ris)
c = self.get_color(float(avr)/self.last)
IMP.display.Colored(p).set_color(c)
def color_by_uncertainty(self):
self.method = self.color_by_uncertainty
self.scheme = self.mpl.cm.jet
ps = IMP.atom.get_leaves(self.hier)
unc_dict = {}
for p in ps:
if IMP.pmi.Uncertainty.get_is_setup(p):
u = IMP.pmi.Uncertainty(p).get_uncertainty()
unc_dict[p] = u
self.first = self.get_log_scale(1.0)
self.last = self.get_log_scale(100.0)
for p in unc_dict:
value = self.get_log_scale(unc_dict[p])
if value >= self.last:
value = self.last
if value <= self.first:
value = self.first
c = self.get_color((value-self.first) / (self.last-self.first))
IMP.display.Colored(p).set_color(c)
def get_color_bar(self, filename):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.clf()
fig = plt.figure(figsize=(8, 3))
ax1 = fig.add_axes([0.05, 0.80, 0.9, 0.15])
cmap = self.scheme
norm = mpl.colors.Normalize(vmin=0.0, vmax=1.0)
if self.method == self.color_by_uncertainty:
angticks = [1.0, 2.5, 5.0, 10.0, 25.0, 50.0, 100.0]
vvalues = []
marks = []
for at in angticks:
vvalue = (self.get_log_scale(at)-self.first) \
/ (self.last-self.first)
if vvalue <= 1.0 and vvalue >= 0.0:
vvalues.append(vvalue)
marks.append(str(at))
cb1 = mpl.colorbar.ColorbarBase(
ax1, cmap=cmap, norm=norm, ticks=vvalues,
orientation='horizontal')
print(self.first, self.last, marks, vvalues)
cb1.ax.set_xticklabels(marks)
cb1.set_label('Angstorm')
plt.savefig(filename, dpi=150, transparent=True)
plt.show()
def color2rgb(colorname):
"""Given a Chimera color name or hex color value, return RGB"""
d = {'aquamarine': (0.4980392156862745, 1.0, 0.8313725490196079),
'black': (0.0, 0.0, 0.0),
'blue': (0.0, 0.0, 1.0),
'brown': (0.6470588235, 0.16470588235294117, 0.16470588235294117),
'chartreuse': (0.4980392156862745, 1.0, 0.0),
'coral': (1.0, 0.4980392156862745, 0.3137254901960784),
'cornflower blue': (0.39215686, 0.58431372549, 0.9294117647058824),
'cyan': (0.0, 1.0, 1.0),
'dark cyan': (0.0, 0.5450980392156862, 0.5450980392156862),
'dark gray': (0.6627450980, 0.6627450980392157, 0.6627450980392157),
'dark green': (0.0, 0.39215686274509803, 0.0),
'dark khaki': (0.74117647, 0.7176470588235294, 0.4196078431372549),
'dark magenta': (0.5450980392156862, 0.0, 0.5450980392156862),
'dark olive green': (0.333333333, 0.419607843, 0.1843137254901961),
'dark red': (0.5450980392156862, 0.0, 0.0),
'dark slate blue': (0.28235294, 0.239215686, 0.5450980392156862),
'dark slate gray': (0.1843137, 0.30980392, 0.30980392156862746),
'deep pink': (1.0, 0.0784313725490196, 0.5764705882352941),
'deep sky blue': (0.0, 0.7490196078431373, 1.0),
'dim gray': (0.41176470, 0.4117647058823529, 0.4117647058823529),
'dodger blue': (0.11764705882352941, 0.5647058823529412, 1.0),
'firebrick': (0.6980392, 0.13333333333333333, 0.13333333333333333),
'forest green': (0.13333333, 0.5450980392156862, 0.13333333333333333),
'gold': (1.0, 0.8431372549019608, 0.0),
'goldenrod': (0.85490196, 0.6470588235294118, 0.12549019607843137),
'gray': (0.7450980392156863, 0.7450980392156863, 0.7450980392156863),
'green': (0.0, 1.0, 0.0),
'hot pink': (1.0, 0.4117647058823529, 0.7058823529411765),
'khaki': (0.9411764705882353, 0.9019607843137255, 0.5490196078431373),
'light blue': (0.67843137, 0.8470588235294118, 0.9019607843137255),
'light gray': (0.82745098, 0.8274509803921568, 0.8274509803921568),
'light green': (0.56470588, 0.9333333333333333, 0.5647058823529412),
'light sea green': (0.125490, 0.6980392156862745, 0.6666666666666666),
'lime green': (0.1960784, 0.803921568627451, 0.19607843137254902),
'magenta': (1.0, 0.0, 1.0),
'medium blue': (0.1960784, 0.19607843137254902, 0.803921568627451),
'medium purple': (0.57647, 0.4392156862745098, 0.8588235294117647),
'navy blue': (0.0, 0.0, 0.5019607843137255),
'olive drab': (0.4196078, 0.5568627450980392, 0.13725490196078433),
'orange red': (1.0, 0.27058823529411763, 0.0),
'orange': (1.0, 0.4980392156862745, 0.0),
'orchid': (0.85490196, 0.4392156862745098, 0.8392156862745098),
'pink': (1.0, 0.7529411764705882, 0.796078431372549),
'plum': (0.8666666666666667, 0.6274509803921569, 0.8666666666666667),
'purple': (0.62745098, 0.12549019607843137, 0.9411764705882353),
'red': (1.0, 0.0, 0.0),
'rosy brown': (0.7372549, 0.5607843137254902, 0.5607843137254902),
'salmon': (0.980392, 0.5019607843137255, 0.4470588235294118),
'sandy brown': (0.956862745, 0.6431372549019608, 0.3764705882352941),
'sea green': (0.18039, 0.5450980392156862, 0.3411764705882353),
'sienna': (0.6274509, 0.3215686274509804, 0.17647058823529413),
'sky blue': (0.52941176, 0.807843137254902, 0.9215686274509803),
'slate gray': (0.439215686, 0.50196078, 0.5647058823529412),
'spring green': (0.0, 1.0, 0.4980392156862745),
'steel blue': (0.2745098, 0.50980392, 0.70588235),
'tan': (0.8235294117647058, 0.7058823529411765, 0.5490196078431373),
'turquoise': (0.25098039, 0.87843137, 0.81568627),
'violet red': (0.81568627, 0.125490196, 0.56470588235),
'white': (1.0, 1.0, 1.0),
'yellow': (1.0, 1.0, 0.0)}
if colorname.startswith('#'):
return tuple(int(colorname[i:i+2], 16) / 255. for i in (1, 3, 5))
else:
return d[colorname]
|
salilab/pmi
|
pyext/src/tools.py
|
tools.py
|
py
| 60,875 |
python
|
en
|
code
| 12 |
github-code
|
6
|
43970042116
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR
Pedro Cerqueira
github: @pedrorvc
DESCRIPTION
This script serves to create xml files contaning the information necessary
for the execution of BRIG (Blast Ring Image Generator), reducing the time
performing the tedious task of setting up all the information on the GUI
and provides a quick way to produce an image.
The arguments for this script provide some (but not all)
of the available options in BRIG, which were the ones I used to change the most.
USAGE:
brigaid.py -q reference_sequence.fna -rfd path/to/reference/dir -od path/to/output/dir -of path/to/output/dir/output_file
-oi path/to/output/BRIG/output_image -t Image_title -a annotation_file.gbk --genes genes_of_interest.txt
--contig-order contig_order.tsv
"""
import argparse
import csv
import os
import xml.etree.ElementTree as ET
from collections import OrderedDict
from xml.dom import minidom
from Bio import SeqIO
from matplotlib import cm
def listdir_fullpath(path):
""" Gets the full path of the files from a directory
Args:
path (str): full path to a directory
Returns:
list containing the full path of every file contained in the input directory
"""
return [os.path.join(path, f) for f in os.listdir(path)]
def ring_attributes(colour, name, position):
""" Creates ring attributes.
Args:
colour (str): color of the ring.
name (str): name of the ring.
position (str): position of the ring.
Returns:
ring_attrs (dict): attributes of any regular ring of the BRIG xml.
"""
ring_attrs = {"colour" : colour,
"name": name,
"position" : position,
"upperInt" : "90",
"lowerInt" : "70",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return ring_attrs
def annotation_ring_attributes(position):
""" Creates annotation ring attributes.
Args:
position (str): position of the ring.
Returns:
annotation_ring_attrs (dict): attributes of the annotation ring of the BRIG xml.
"""
annotation_ring_attrs = {"colour" : '172,14,225',
"name": 'null',
"position" : position,
"upperInt" : "70",
"lowerInt" : "50",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return annotation_ring_attrs
def create_feature_attrs(label, colour, decoration, start, stop):
""" Create attributes for the Feature SubElements of the annotation ring.
Args:
label (str): name of the gene/CDS to annotate
colour (str): colour of the decoration for the annotation
decoration (str): shape of the gene/CDS to annotate, for example, 'clockwise-arrow'
start (str): start of the gene/CDS to annotate
stop (str): stop of the gene/CDS to annotate
Results:
feature_element_attrs (dict): attributes of the feature element.
feature_range_element_attrs (dict): attributes of the feature range element
"""
feature_element_attrs = {'label' : label,
'colour' : colour,
'decoration' : decoration}
feature_range_element_attrs = {'start' : start,
'stop' : stop}
return feature_element_attrs, feature_range_element_attrs
def create_annotation_ring_tsv(annotation_ring, annotation_file):
""" Uses a tsv file to annotate the reference genome.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
"""
with open(annotation_file) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Obtain the annotations from the file contents
for row in reader:
start = row['#START']
stop = row['STOP']
label = row['Label']
colour = row['Colour']
decoration = row['Decoration']
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, colour, decoration, start, stop)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
def annotation_ring_feature_elements_gbk_concat(annotation_ring, record, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
#if type(genome_size) == int:
# Obtain the features of the Genbank file records
for fea in record.features:
# Get the start and end position of the genome
# Also get the strand
if fea.type == 'CDS':
start = str(fea.location.start.position)
end = str(fea.location.end.position)
strand = fea.location.strand
# Get the label of the gene or product
if 'gene' in fea.qualifiers:
label = str(fea.qualifiers['gene'][0])
elif 'product' in fea.qualifiers:
product = fea.qualifiers['product'][0]
label = str(product)
else:
continue
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if fea.type == 'source':
size = fea.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, record, genes, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file
and specific gene annotations.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
for f in record.features:
if f.type == 'CDS':
# Find the 'gene' tag and determine if the gene belongs to the specified genes to be annotated
if 'gene' in f.qualifiers and f.qualifiers['gene'][0] in genes:
label = f.qualifiers['gene'][0]
elif 'product' in f.qualifiers and f.qualifiers['product'][0] in genes:
product = f.qualifiers['product'][0]
label = product
else:
continue
# Determine the start, stop and strand of the gene
start = str(f.location.start.position + genome_size)
end = str(f.location.end.position + genome_size)
strand = f.location.strand
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if f.type == "source":
size = f.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records):
""" Create annotation ring using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
"""
if genes_of_interest != []:
# Get the genes to serach in the Genbank file
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
# Create feature elements of the annotation ring
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order):
""" Create annotation ring using a Genbank annotation file divided by contigs.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
contig_order (str): Full path to the file containing the order of the contigs.
"""
if contig_order != []:
with open(contig_order) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Create an OrderedDict with the contents of the file
# The keys are the order are a number representing the order of the contig
# The values are the names of the contigs
content_dict = OrderedDict()
for r in reader:
content_dict[r["order"]] = r["contig"]
# Create an OrderedDict with the content of each contig
# The keys are the names of the contigs
# The values are SeqRecord objects from BipPython
seq_records_dict = OrderedDict()
for record in records:
seq_records_dict[record.id] = record
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, ord_record, genes, genome_size)
genome_size = gsize
else:
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_gbk_concat(annotation_ring, ord_record, genome_size)
genome_size = gsize
else:
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def write_xml(root_elem, output_file):
""" Writes a xml file.
Args:
root_elem is a ElementTree Element object containing all the information
required for the output file.
output_file (str): full path to the output file
"""
xml_file = ET.tostring(root_elem, encoding='utf8').decode('utf8')
pretty_xml_file = minidom.parseString(xml_file).toprettyxml(indent=' ')
output_file = output_file + ".xml"
with open(output_file, "w") as f:
f.write(pretty_xml_file)
####### Create xml elemnts
# Create root element
def create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format):
"""
Creates the root element of the xml file and its attributes.
Args:
blast_options (str): additional options for blast, for example, -evalue or num_threads
legend_position (str): position of the legend on the image
query_file (str): full path to the query file
output_folder (str): full path to the output folder
image_output_file (str): full path to the image output file
title (str): title of the output image
image_format (str): format of the image output file
Returns:
root: ElementTree Element object containing the BRIG tag and its attributes
"""
root_attrs = {"blastOptions" : blast_options,
"legendPosition" : legend_position,
"queryFile" : query_file,
"outputFolder" : output_folder,
"blastPlus" : "yes",
"outputFile" : os.path.join(output_folder, image_output_file),
"title" : title,
"imageFormat" : image_format,
"queryFastaFile" : query_file,
"cgXML" : os.path.join(output_folder + "/scratch", os.path.basename(query_file) + ".xml")}
root = ET.Element('BRIG', attrib=root_attrs)
return root
#### Create root children
# Create cgview_settings element
def create_cgview_settings_element(root, height, width):
""" Creates the cgview_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
height (str): height of the output image in pixels
width (str): width of the output image in pixels
Returns:
cgview_settings: ElementTree SubElement object containing the cgview settings tag and its attributes
"""
cgview_settings_attrs = {"arrowheadLength" : "medium",
"backboneColor" : "black",
"backboneRadius" : "600",
"backboneThickness" : "medium",
"backgroundColor" : "white",
"borderColor" : "black",
"featureSlotSpacing" : "medium",
"featureThickness" : "30",
"giveFeaturePositions" : "false",
"globalLabel" : "true",
"height" : height,
"isLinear" : "false",
"labelFont" : "SansSerif,plain,25",
"labelLineLength" : "medium",
"labelLineThickness" : "medium",
"labelPlacementQuality" : "best",
"labelsToKeep" : "1000",
"longTickColor" : "black",
"minimumFeatureLength" : "medium",
"moveInnerLabelsToOuter" :"true",
"origin" : "12",
"rulerFont" : "SansSerif,plain,35",
"rulerFontColor" : "black",
"rulerPadding" : "40",
"rulerUnits" : "bases",
"shortTickColor" : "black",
"shortTickThickness" : "medium",
"showBorder" : "false",
"showShading" : "true",
"showWarning" : "false",
"tickDensity" : "0.2333",
"tickThickness" : "medium",
"titleFont" : "SansSerif,plain,45",
"titleFontColor" : "black",
"useColoredLabelBackgrounds" : "false",
"useInnerLabels" : "true",
"warningFont" : "Default,plain,35",
"warningFontColor" : "black",
"width" : width,
"zeroTickColor" : "black",
"tickLength" : "medium"}
cgview_settings = ET.SubElement(root, 'cgview_settings', attrib=cgview_settings_attrs)
return cgview_settings
# Create brig_settings element
def create_brig_settings_element(root, java_memory):
""" Creates the brig_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
java_memory (str): amount of memory (in bytes) java is allowed to use for BRIG
Returns:
brig_settings: ElementTree SubElement object containing the brig settings tag and its attributes
"""
brig_settings_attrs = {"Ring1" : "172,14,225",
"Ring2" : "222,149,220",
"Ring3" : "161,221,231",
"Ring4" : "49,34,221",
"Ring5" : "116,152,226",
"Ring6" : "224,206,38",
"Ring7" : "40,191,140",
"Ring8" : "158,223,139",
"Ring9" : "226,38,122",
"Ring10" :"211,41,77",
"defaultUpper" : "70",
"defaultLower" : "50",
"defaultMinimum" : "50",
"genbankFiles" : "gbk,gb,genbank",
"fastaFiles" : "fna,faa,fas,fasta,fa",
"emblFiles" : "embl",
"blastLocation" : "",
"divider" : "3",
"multiplier" : "3",
"memory" : java_memory,
"defaultSpacer" : "0"}
brig_settings = ET.SubElement(root,
"brig_settings",
attrib=brig_settings_attrs)
return brig_settings
## Create special element
def create_special_element(root):
"""Creates the 'special' element of the xml file and its attributes
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
Returns:
gc_content_special: ElementTree SubElement object containing the 'special' tag and its attributes
gc_skew_special: ElementTree SubElement object containing the 'special' tag and its attributes
"""
gc_content_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Content'})
gc_skew_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Skew'})
return gc_content_special, gc_skew_special
# Create reference dir element
def create_reference_directory_element(root, reference_directory):
""" Creates the 'reference directory' element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
Returns:
ref_file: ElementTree SubElement object containing the 'refFile' tag and its attributes
"""
ref_dir = ET.SubElement(root,
"refDir",
attrib={"location" : reference_directory})
# Obtain the full path for all the files in the directory
ref_dir_list = listdir_fullpath(reference_directory)
for f in ref_dir_list:
ref_file = ET.SubElement(ref_dir,
"refFile",
attrib={"location" : f})
return ref_file
# Create the ring where the annotations are defined
def create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order):
""" Creates the ring that will contain the annotations for the reference genome.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing a list of specific genes.
contig_order (str): Full path to the tab-delimited file containing the order of the contigs.
"""
# Determine the position of the annotation ring, which will be the position after the last reference genome
ring_position = len(os.listdir(reference_directory)) + 2
# Create the annotation ring element
annotation_ring = ET.SubElement(root, 'ring', attrib=annotation_ring_attributes(str(ring_position)))
# Check for tab-delimited annotation file input
if list(SeqIO.parse(annotation_file, "genbank")) == []:
create_annotation_ring_tsv(annotation_ring, annotation_file)
else:
# Get the records of the Genbank file
records = [r for r in SeqIO.parse(annotation_file, "genbank")]
### Check if a contig order file has been provided
if len(records) > 1: # If more than 1 record exists, then the Genbank file is divided by contigs
create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order)
else:
create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records)
## Create remaining rings
def create_ring_element(root, reference_directory, colormap):
""" Creates the ring elements of the xml file, containing the position and color of the rings.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
colormap (str): name of the colormap (available in matplotlib) to use for the color of the rings
Returns:
ring_number_element: ElementTree SubElement object containing the 'ring' tag and its attributes
ring_sequence_element: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
ref_dir_list = listdir_fullpath(reference_directory)
# Gets the colormap from matplotlib with as many colors as the number of files
cmap = cm.get_cmap(colormap, len(ref_dir_list))
list_colormap = cmap.colors.tolist()
# Remove the fourth element (transparency) because it is not necessary
colors_to_use = []
for l in list_colormap:
convert = [round(x * 255) for x in l]
convert.pop()
colors_to_use.append(convert)
#reversed_colors_to_use = colors_to_use[::-1]
# Check if the user provided an order for the rings
has_digit = [os.path.basename(x).split("_")[0].isdigit() for x in ref_dir_list]
if True in has_digit:
# Obtain the ring positions
ring_positions = [os.path.basename(x).split("_")[0] for x in ref_dir_list]
# Reverse sort the positions of the rings, because they will be created
# in a descending order of their positions
ring_positions.sort(reverse=True)
ref_dir_list.sort(reverse=True)
for ring in range(len(ref_dir_list)):
# The ring positions start at 2 due to the special rings (GC Content and GC Skew)
ring_position = int(ring_positions[ring]) + 1
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[1]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_position)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
else:
# Sort files by lowercase
ref_dir_list.sort(key=lambda y: y.lower())
# The number of rings starts at 2 due to the GC Content and GC Skew
ring_number = len(ref_dir_list) + 1
for ring in range(len(ref_dir_list)):
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[0]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_number)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
ring_number -= 1
return ring_number_element, ring_sequence_element
## Create special rings
def create_special_ring_element(root):
""" Create the 'special' ring element and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
Returns:
gc_content_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
gc_skew_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
# Create ring attributes
gc_content_ring_attrs = ring_attributes('225,0,0', "GC Content", "0")
gc_skew_ring_attrs = ring_attributes('225,0,0', "GC Skew", "1")
# Add ring element to root
gc_skew_ring = ET.SubElement(root, 'ring', attrib=gc_skew_ring_attrs)
gc_content_ring = ET.SubElement(root, 'ring', attrib=gc_content_ring_attrs)
# Add sequence element to ring
gc_content_location = ET.SubElement(gc_content_ring, 'sequence', attrib={'location' : 'GC Content'})
gc_skew_location = ET.SubElement(gc_skew_ring, 'sequence', attrib={'location' : 'GC Skew'})
return gc_content_location, gc_skew_location
def main(query_file, reference_directory, output_folder, output_xml, image_output_file, title, annotation_file,
genes_of_interest, contig_order, blast_options, legend_position, image_format, height, width, java_memory, colormap):
root = create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format)
cgview_settings = create_cgview_settings_element(root, height, width)
brig_settings = create_brig_settings_element(root, java_memory)
special = create_special_element(root)
refdir = create_reference_directory_element(root, reference_directory)
if annotation_file:
create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order)
rings = create_ring_element(root, reference_directory, colormap)
special_ring = create_special_ring_element(root)
write_xml(root, output_xml)
print("\n File written to {}".format(output_xml))
def parse_arguments():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-q', '--query', type=str, required=True, dest='query_file',
help='Path to the query/reference FASTA file.')
parser.add_argument('-rfd', '--ref_dir', type=str, required=True, dest='reference_directory',
help='Path to the directory where the FASTA files to compare against the reference are located.')
parser.add_argument('-od', '--out_dir', type=str, required=True, dest='output_folder',
help='Path to the output directory for the results of BRIG.')
parser.add_argument('-of', '--out_xml', type=str, required=True, dest='output_file',
help='Path to the output of this script.')
parser.add_argument('-oi', '--out_img', type=str, required=True, dest='image_output_file',
help='Path to the output file of the resulting image of BRIG.')
parser.add_argument('-t', '--title', type=str, required=True, dest='title',
help='Title of the resulting image from BRIG.')
parser.add_argument('-a', '--annotation', type=str, required=False, dest='annotation_file', default=False,
help='File containing annotations for the reference genome. '
'The annoation file can be a tab-delimited file (.tsv) or a Genbank format file (.gbk, .gb)')
parser.add_argument('--genes', type=str, required=False, dest='genes_of_interest', default=[],
help='File containing a list of specific genes (one gene per line) to search when a Genbank annotation file is provided. ')
parser.add_argument('--contig_order', type=str, required=False, dest='contig_order', default=[],
help='Tab-delimited file containing the order of the contigs when a Genbank (divided by contigs) annotation file is provided. '
'Example: order contig '
'1 Contig8')
parser.add_argument('-b', '--blast_options', type=str, required=False, dest="blast_options", default="-evalue 0.001 -num_threads 6",
help='Options for running BLAST.')
parser.add_argument('-l', '--legend_pos', type=str, required=False, dest="legend_position", default="middle-right",
help='Positon of the legend on the resulting image.'
'The options available are upper, center or lower, '
'paired with left, center or right')
parser.add_argument('-if', '--image_format', type=str, required=False, dest="image_format", default="jpg",
help='Format of the resulting image file.'
'The available options are: jpg, png, svg or svgz.')
parser.add_argument('-ht', '--height', type=str, required=False, dest="height", default="3000",
help='Height (in pixels) of the resulting image.')
parser.add_argument('-wd', '--width', type=str, required=False, dest="width", default="3000",
help='Width (in pixels) of the resulting image.')
parser.add_argument('-jm', '--java_memory', type=str, required=False, dest="java_memory", default="1500",
help='Amount of memory (in bytes) that Java is allowed to use for BRIG.')
parser.add_argument('-cm', '--colormap', type=str, required=False, dest="colormap", default="viridis",
help='Colormap from matplotlib to use for the color of the rings. '
'The available options are: viridis, plasma, inferno, magma and cividis.'
'More options for colormaps at: '
'https://matplotlib.org/users/colormaps.html')
args = parser.parse_args()
return [args.query_file, args.reference_directory, args.output_folder, args.output_file,
args.image_output_file, args.title, args.annotation_file, args.genes_of_interest, args.contig_order,
args.blast_options, args.legend_position, args.image_format, args.height, args.width, args.java_memory, args.colormap]
if __name__ == '__main__':
args = parse_arguments()
main(args[0], args[1], args[2], args[3], args[4], args[5], args[6],
args[7], args[8], args[9], args[10], args[11], args[12], args[13],
args[14], args[15])
|
TAMU-CPT/galaxy-tools
|
tools/genome_viz/brigaid.py
|
brigaid.py
|
py
| 36,126 |
python
|
en
|
code
| 5 |
github-code
|
6
|
28895134213
|
apuntador = None
class Nodo(object):
def __init__(self, data):
self.data = data
self.next = None
def push(self):
global apuntador
apuntador.next = self
apuntador = self
print("Se ha ingresado: "+self.data)
opcion = 0
raiz = Nodo("raiz")
apuntador = raiz
miNodo = None
while (opcion != 5):
opcion = int(input("\n\nOpcion a realizar: \n\t1)Ingresar elemento\n\t2)Eliminar elemento\n\t3)Mostrar\n\t5)Salir \t"))
if opcion == 1:
miNodo = Nodo(input("Ingresa elemento: "))
miNodo.push()
else:
if opcion == 2:
print("Aun no se hace.")
else:
if opcion == 3:
print("Aun no se hace.")
else:
if opcion != 5:
print("Ingrese opcion valida")
print("Adios.")
|
RamirezNOD/EstructurasNOD
|
Practica17-ListasPush.py
|
Practica17-ListasPush.py
|
py
| 896 |
python
|
es
|
code
| 0 |
github-code
|
6
|
5592159173
|
from .http import *
from .abc import User, Channel
from .channels import DMChannel
import asyncio as aio
class Client:
"""
Base class for interacting with discord
"""
def __init__(self, token: str):
self.http = HTTPClient(token)
self.event_loop = aio.new_event_loop()
aio.set_event_loop(self.event_loop)
self.events = {
"ready":None,
"tick":None
}
def event(self, _coro):
"""
Override event
=====
event_name: can be 'ready' to initiate on login
"""
self.events[_coro.__name__] = _coro
return _coro
async def login(self) -> User:
"""
Get the bot's userdata
"""
response = await self.http.connect(Route("GET", "users/@me"))
if self.events["ready"]:
await self.events["ready"]()
user = User()
await user.from_dictionary(response)
return user
async def run(self):
await self.login()
while True:
await aio.sleep(0.05)
await self.events["tick"]()
async def send_typing(self, channel:Channel):
response = await self.http.connect(Route("POST", f"channels/{channel.id}/typing"))
return response
async def get_user(self, id:int) -> User:
"""
Get userdata from ID
"""
response = await self.http.connect(Route("GET", f"users/{id}"))
user = User()
await user.from_dictionary(response)
return user
async def get_channel(self, id: int, dm:bool=False):
"""
Get a channel
"""
url = f"channels/@me/{id}" if dm else f"channels/{id}"
response = await self.http.connect(Route("GET", url))
channel = Channel() if not dm else DMChannel()
await channel.from_dictionary(response)
channel.bot_caller = self
return channel
async def close_connection(self):
await self.http.close_session()
|
ledanne/descapede
|
diswrap/client.py
|
client.py
|
py
| 2,060 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27627859074
|
#!/usr/bin/python3
def add_tuple(tuple_a=(), tuple_b=()):
# use slice to get the first two element
a = tuple_a[:2]
b = tuple_b[:2]
# fill the missing element with 0
while len(a) < 2:
a += (0,)
while len(b) < 2:
b += (0,)
# get the sum of the tuple
sum_tuple = (a[0] + b[0], a[1] + b[1])
return sum_tuple
|
Hovixen/alx-higher_level_programming
|
0x03-python-data_structures/7-add_tuple.py
|
7-add_tuple.py
|
py
| 357 |
python
|
en
|
code
| 0 |
github-code
|
6
|
39400883437
|
import boto3
import pickle
from typing import Any, Tuple
import logging
from re import sub
import pandas as pd
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.model_selection import StratifiedShuffleSplit
import xgboost as xgb
# ----- Class for uploading and downloading Python objects to and from S3 ---- #
class S3Pickle:
"""
A class for uploading and downloading Python objects to and from S3.
"""
def __init__(self, s3_client=None):
"""
Constructor for S3Pickle class.
Parameters
----------
s3_client : _type_, optional
A boto3 S3 client. The default is None.
"""
if s3_client is None:
self.s3_client = boto3.client('s3')
else:
self.s3_client = s3_client
def upload_pickle(self, obj: Any, bucket_name: str, key_name: str) -> None:
"""
Upload a Python object to S3 as a pickle byte string.
Parameters
----------
obj : Any
A Python object.
bucket_name : str
S3 bucket name.
key_name : str
S3 key name.
"""
# Serialize the object to a pickle byte string
pickle_byte_string = pickle.dumps(obj)
# Upload the pickle byte string to S3
self.s3_client.put_object(Body=pickle_byte_string, Bucket=bucket_name, Key=key_name)
return None
def download_pickle(self, bucket_name: str, key_name: str) -> Any:
"""
Download a Python object from S3 as a pickle byte string.
Parameters
----------
bucket_name : str
S3 bucket name.
key_name : str
S3 key name.
"""
# Download the pickle byte string from S3
response = self.s3_client.get_object(Bucket=bucket_name, Key=key_name)
pickle_byte_string = response['Body'].read()
# Deserialize the pickle byte string to a Python object
obj = pickle.loads(pickle_byte_string)
return obj
# ----------------------------------- Data ----------------------------------- #
def load_data(data_s3_url: str, logger: logging.Logger = None) -> Tuple[pd.DataFrame, np.ndarray]:
"""
Load data from S3 bucket and return X and y.
Parameters
----------
data_s3_url : str
S3 url of data.
logger : logging.Logger
Logger object.
Returns
-------
Tuple[pd.DataFrame, np.ndarray]
Feature matrix and target array.
"""
data = pd.read_csv(
data_s3_url,
index_col=0
)
# Drop ID column and 'churn category' column (not useful for prediction)
data.drop(['Customer ID', 'Churn Category'], axis=1, inplace=True)
# Change column names to lower case and relace white spaces with underscore
data.columns = [sub('\s', '_', col.lower()) for col in data.columns]
X, y = data.drop(['churn_value'], axis=1), data.churn_value.values
if logger is not None:
logger.info('Data Loaded')
logger.info(f'The shape of training set: {(X.shape, y.shape)}')
return X, y
# ----------------------- Custom metric for evaluation ----------------------- #
def weighted_ap_score(predt: np.ndarray, data: np.ndarray) -> Tuple[str, float]:
y_true = data
y_score = predt
weighted_ap_score = average_precision_score(y_true=y_true, y_score=y_score, average='weighted', pos_label=1)
return 'avgAP', weighted_ap_score
# ------------------------ Stratified train/test split ----------------------- #
def stratified_split(X_train: pd.DataFrame, y_train: np.ndarray) -> Tuple[pd.DataFrame, np.ndarray, pd.DataFrame, np.ndarray]:
"""
Split the training set into train and validation sets, stratifying on the target variable.
Parameters
----------
X_train : pd.DataFrame
Training features.
y_train : np.ndarray
Training target.
Returns
-------
Tuple[pd.DataFrame, np.ndarray, pd.DataFrame, np.ndarray]
X_train, y_train, X_val, y_val.
"""
ssf = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
for train_index, val_index in ssf.split(X_train, y_train):
X_train, X_val = X_train.iloc[train_index], X_train.iloc[val_index]
y_train, y_val = y_train[train_index], y_train[val_index]
return X_train, y_train, X_val, y_val
|
YangWu1227/python-for-machine-learning
|
tree_based/projects/telco_churn_sagemaker/src/custom_utils.py
|
custom_utils.py
|
py
| 4,461 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33627877063
|
#Exercice 2 Mascaro Matteo
import math
limit = 100
limit = str(limit)
print('Welcome palindromiques numbers put any multipliation with 2 numbers in each side :')
print("Rentrez la multiplication ex : (91 * 99) ")
A = input("Rentrez la valeur A = ")
B = input("Rentrez la valeur B = ")
"""
print(A)
J'arrive pas à faire une boucle erreur ( à voir plus tard)
print(B)
"""
var = int(A) * int(B)
print(var)
var = str(var)
if(var == var[::-1]):
print("L'entrée est un palindrome")
else:
print("L'entrée n'est pas un palindrome")
|
m4skro/programmation-securisee
|
TP1/Palindromiques.py
|
Palindromiques.py
|
py
| 542 |
python
|
fr
|
code
| null |
github-code
|
6
|
70126940349
|
def build_profile(first_name: str, last_name: str, **user_info: str) -> dict:
"""Build a dictionary containing everything we know about a user"""
user_info["first_name"] = first_name
user_info["last_name"] = last_name
return user_info
user_profile = build_profile(
"albert", "einstein", location="princeton", field="physics"
)
print(user_profile)
|
paulr909/python-snippets
|
python-various/functions/build_dict_with_function.py
|
build_dict_with_function.py
|
py
| 369 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28427006920
|
list1 = [1, 3, 5, 7, 100]
# 通过循环用下标遍历列表元素
for a in range(len(list1)): # range() 方法
print(a, list1[a], " ", end='')
print()
# 通过for循环遍历列表元素
for elem in list1:
print(elem, " ", end="")
print()
# 通过enumerate函数处理列表之后再遍历可以同时获得元素索引和值
for index, elem in enumerate(list1): # enumerate()同时列出数据和数据下标,
print(index, elem)
list1.append(200)
list1.insert(1, '400a')
list1.extend([1000, 2000])
list1.remove(3)
if 1234 in list1:
list1.remove(1234)
for index, elem in enumerate(list1): # enumerate()同时列出数据和数据下标,
print(index, elem)
for a in range(3,8): # range() 方法
print(a, " ", end='')
print(len(list1))
|
sunhuimoon/Python100Days
|
day07/day0704.py
|
day0704.py
|
py
| 776 |
python
|
zh
|
code
| 0 |
github-code
|
6
|
21929473251
|
import re
from sys import argv
#Steven A
#THE "CORRECT" REGEX:
#([^,]*),"(.*)",([^,]*),\[(.*)\],\[(.*)\],"(.*)",\[(.*)\],\[(.*)\],\[(.*)\],"(.*)"
#id ,"name",release_year,[developers],[publishers],"image",[src],[genres],[consoles],"description"
#this program merges entries based of GID
#please do not use this to overwrite the original csv before checking to make sure your changes are what you wanted
#this program writes all at once at the very end
#priority is given to input1
input1 = argv[1]
input2 = argv[2]
output = argv[3]
FileInput1 = open(input1, 'r', encoding = "utf_16");
FileInput2 = open(input2, 'r', encoding = "utf_16");
if not FileInput1:
print("error file 1 not read")
if not FileInput2:
print("error file 2 not read")
outputLines = []
List1 = FileInput1.readlines()
List2 = FileInput2.readlines()
outputLines.append(List1[0])
FileData = {}
for line in List2[1:]:#file2 data goes into FileData[GID]
match = re.match(r'([^,]*),"(.*)",([^,]*),\[(.*)\],\[(.*)\],"(.*)",\[(.*)\],\[(.*)\],\[(.*)\],"(.*)"',line)
FileData[match.group(1)]=match
print(match.group(2))#print the name
print("")
for line in List1[1:]:
match = re.match(r'([^,]*),"(.*)",([^,]*),\[(.*)\],\[(.*)\],"(.*)",\[(.*)\],\[(.*)\],\[(.*)\],"(.*)"',line)
if match:
GID=match.group(1)
name=match.group(2)
year = match.group(3)
devs = match.group(4)
pubs = match.group(5)
img = match.group(6)
src = match.group(7)
genres=match.group(8).replace("'","")
console=match.group(9)
desc=match.group(10)
print(name)
print("is updated?",(GID in FileData))
if GID in FileData:
if img=="n/a":
img = FileData[GID].group(6)#take the image from the other csv
if FileData[GID].group(7)!="n/a":
src += ","+FileData[GID].group(7)
src.replace("n/a,","").replace("n/a","")#remove old "empty" marker
if FileData[GID].group(8)!="n/a":
if FileData[GID].group(8) not in genres:#the genres may not be unique, so check first
genres+=","+FileData[GID].group(8)
genres.replace("n/a,","").replace("n/a","")#remove old "empty" marker
if desc =="n/a":
desc = FileData[GID].group(10)
outputLines.append(GID+','+'"'+name+'"'+','+year+','+'['+devs+']'+','+'['+pubs+']'+','+'"'+img+'"'+','+'['+src+']'+','+'['+genres+']'+','+'['+console+']'+','+'"'+desc+'"'+'\n')
else:
print("Error in match\n",line)
break;
FileInput1.close()
FileInput2.close()
FileOutput = open(output, 'w', encoding = "utf_16");
FileOutput.writelines(outputLines)
|
schoolfromage/RetroSounding
|
backend/scrapers/csv_limited_merger.py
|
csv_limited_merger.py
|
py
| 2,464 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32108920209
|
import json
from typing import List
import mlflow
import pandas
from pandas import DataFrame
class SpambugInference(mlflow.pyfunc.PythonModel):
"""
Inference code copied from MLFlow bugs.py
"""
def __init__(self, extraction_pipeline, clf, le):
self.extraction_pipeline = extraction_pipeline
self.clf = clf
self.le = le
def predict(self, context, bugs: List[str]):
"""
Args:
context ([type]): MLflow context where the model artifact is stored.
model_input ([type]): the input data to fit into the model.
"""
bugs = [json.loads(s) for s in bugs]
probs = self.classify(bugs, True)
indexes = probs.argmax(axis=-1)
suggestions = self.le.inverse_transform(indexes)
return {"probs": probs, "indexes": indexes, "suggestions": suggestions}
def classify(
self,
items,
probabilities=False
):
assert items is not None
assert (
self.extraction_pipeline is not None and self.clf is not None
), "The module needs to be initialized first"
if not isinstance(items, list):
items = [items]
assert isinstance(items[0], (dict, tuple))
X = self.extraction_pipeline.transform(lambda: items)
if probabilities:
classes = self.clf.predict_proba(X)
else:
classes = self.clf.predict(X)
classes = self.overwrite_classes(items, classes, probabilities)
return classes
def overwrite_classes(self, bugs, classes, probabilities):
for i, bug in enumerate(bugs):
if "@mozilla" in bug["creator"]:
if probabilities:
classes[i] = [1.0, 0.0]
else:
classes[i] = 0
return classes
|
mozilla/mlops-platform-spike-library
|
bugbug/mlflow/bugbug/trackers/spambug_inference.py
|
spambug_inference.py
|
py
| 1,839 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35383876896
|
import json
from random import randint
import discord
from discord.ext import tasks, commands
def getData():
with open("data.json", "r") as levelsFile:
return json.loads(levelsFile.read())
def setData(_dict):
with open("data.json", "w") as levelsFile:
levelsFile.write(json.dumps(_dict))
levelxp = [0, 300, 900, 2700, 6500, 14000, 23000, 34000, 48000, 64000, 85000, 100000, 120000, 140000, 165000, 195000, 225000, 265000, 305000, 355000]
class DND(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.ac = randint(1, 20)
self.base = {
"xp": 0,
"level": 0,
"health": 20,
"class": "",
"inventory": [],
"stats": {
"str": {"base": 0, "mod": 0},
"dex": {"base": 0, "mod": 0},
"con": {"base": 0, "mod": 0},
"int": {"base": 0, "mod": 0},
"wis": {"base": 0, "mod": 0},
"cha": {"base": 0, "mod": 0}
}
}
self.hell_ac.start()
def cog_unload(self):
self.hell_ac.cancel()
@commands.Cog.listener()
async def on_message(self, message):
if not message.author.bot:
try:
data = getData()
try:
level = data[message.author.name]["level"]
data[message.author.name]["xp"] += (randint(1, 10))
except:
data[message.author.name] = self.base
level = data[message.author.name]["level"]
data[message.author.name]["xp"] += randint(1, 10)
bonus = 2 if level < 5 else (3 if level < 9 else (4 if level < 13 else (5 if level < 17 else 6)))
if message.channel.name == "hell":
roll = randint(1, 20) + bonus
if roll <= self.ac:
await message.delete()
await message.channel.send("(%s > %s+%s) **%s** tried to send a message, but failed the roll!" % (self.ac, roll - bonus, bonus, message.author.name))
else:
await message.delete()
await message.channel.send("(%s < %s+%s) **%s**: %s" % (self.ac, roll - bonus, bonus, message.author.name, message.content))
else:
try:
words = data["banned words"]
except:
data["banned words"] = []
words = data["banned words"]
for word in words:
if word in message.content.lower() and not (message.content.startswith("2m.unban") or message.content.startswith("2m.ban")):
data[message.author.name]["health"] -= 1
await message.channel.send("Uh oh! You fucking idiot. You just said '%s'.\n\nDie." % word)
if data[message.author.name]["xp"] >= levelxp[level]:
data[message.author.name]["level"] += 1
await message.channel.send("**%s** levelled up to level %s!" % (message.author.name, data[message.author.name]["level"]))
if data[message.author.name]["health"] <= 0:
data[message.author.name] = self.base
await message.channel.send("Oop, **%s** is dead. Now you gotta reroll stats!" % message.author.name)
setData(data)
except:
pass
@tasks.loop(minutes=5)
async def hell_ac(self):
hell = discord.utils.get(self.bot.get_guild(677689511525875715).channels, name="hell")
if randint(1, 100) <= 33:
ac = randint(1, 20)
self.ac = ac
await hell.send("__**Hell's AC is now %s!**__" % ac)
@commands.command(brief="Roll up your stats.")
async def rollstats(self, ctx, *, order):
data = getData()
try:
if not (0 in (data[ctx.author.name]["stats"][key]["base"] for key in data[ctx.author.name]["stats"])):
return await ctx.send("You've already rolled your stats! Theres no going back now.")
except:
data[ctx.author.name] = self.base
order = order.split(" ")
for item in order:
if item not in ["str", "dex", "con", "int", "wis", "cha"]:
return await ctx.send("Please use the correct stat names!\nThey are:\n%s" % ("\n".join(["str", "dex", "con", "int", "wis", "cha"])))
final = []
allrolls = []
for i in range(6):
allrolls.append([randint(1, 6) for x in range(4)])
for arr in range(len(allrolls)):
del allrolls[arr][allrolls[arr].index(min(allrolls[arr]))]
allrolls[arr] = sum(allrolls[arr])
allrolls.sort(reverse=True)
for i in range(6):
num = allrolls[i]
tempnum = allrolls[i]
if tempnum % 2 == 1:
tempnum -= 1
bonuses = {
0: -5,
2: -4,
4: -3,
6: -2,
8: -1,
10: 0,
12: 1,
14: 2,
16: 3,
18: 4,
20: 5,
22: 6,
24: 7,
26: 8,
28: 9,
30: 10
}
final.append("%s -> %s (%s)" % (order[i], num, bonuses[tempnum] if num < 10 else ("+%s" % bonuses[tempnum])))
data[ctx.author.name]["stats"][order[i]] = {"base": num, "mod": bonuses[tempnum]}
await ctx.send("\n".join(final))
setData(data)
@commands.command(brief="Get the AC of hell.")
async def getac(self, ctx):
await ctx.send("Hell's AC is currently **%s**!" % self.ac)
@commands.command(brief="Get information on a level.")
async def levelinfo(self, ctx, level: int):
if not level > 20:
await ctx.send("__**Level %s Information**__\nNeeded XP: %s\nProficiency Bonus: %s" % (level, levelxp[level - 1], "+2" if level < 5 else ("+3" if level < 9 else ("+4" if level < 13 else ("+5" if level < 17 else "+6")))))
else:
await ctx.send("That level is too high! Level 20 is the maximum.")
print("%s GOT LEVEL INFORMATION. (%s)" % (ctx.author.name, level))
@commands.command(brief="Get your current level and XP.")
async def stats(self, ctx):
data = getData()
level = data[ctx.author.name]["level"]
bonus = 2 if level < 5 else (3 if level < 9 else (4 if level < 13 else (5 if level < 17 else 6)))
await ctx.send(
"__**%s's Information**__\nHealth: %s\nLevel: %s\nXP: %s\nProficiency Bonus: +%s\n\n%s" % (ctx.author.name, data[ctx.author.name]["health"], data[ctx.author.name]["level"], data[ctx.author.name]["xp"], bonus, "\n".join(
["%s: %s (%s)" % (key, data[ctx.author.name]["stats"][key]["base"], data[ctx.author.name]["stats"][key]["mod"]) for key in data[ctx.author.name]["stats"]])))
print("%s GOT THEIR LEVEL INFORMATION." % ctx.author.name)
def setup(bot):
bot.add_cog(DND(bot))
|
JONKKKK/Codes
|
2MS2A/dnd.py
|
dnd.py
|
py
| 7,250 |
python
|
en
|
code
| 0 |
github-code
|
6
|
69952730427
|
#!/usr/bin/env python3
import rospy
from sebot_service.srv import GetImage, SetGoal
class Sebot:
def __init__(self):
rospy.init_node('sebot_server')
self.image_msg = None
self.img_srv = rospy.Service("get_image", GetImage, self.get_image)
while not rospy.is_shutdown():
print(self.image_msg)
def get_image(self, req):
self.image_msg = req.image
print(req.image)
return True
s = Sebot()
|
JiHwonChoi/TEAM_B
|
sebot_service/src/sebot_server.py
|
sebot_server.py
|
py
| 465 |
python
|
en
|
code
| 3 |
github-code
|
6
|
28156207354
|
import torch
from projects.thre3ingan.singans.networks import Thre3dGenerator
from torch.backends import cudnn
cudnn.benchmark = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def test_thre3d_generator() -> None:
batch_size = 1
random_input = torch.randn(batch_size, 128, 64, 64, 64).to(device)
network = Thre3dGenerator().to(device)
print(network)
output = network(random_input)
assert output.shape == (batch_size, 8, 64, 64, 64)
|
akanimax/3inGAN
|
projects/thre3ingan/singans/tests/test_networks.py
|
test_networks.py
|
py
| 491 |
python
|
en
|
code
| 3 |
github-code
|
6
|
27259802510
|
"""We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""513. Find Bottom Left Tree Value [Two Passes]
"""
from collections import deque
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution:
def height(self, root):
if not root:
return 0
return 1 + max(self.height(root.left), self.height(root.right))
def findBottomLeftValue(self, root):
final_row = self.height(root)
queue = deque()
queue.append((root, 1))
while queue:
node, level = queue.popleft()
if level == final_row:
return node.val
if node.left:
queue.append((node.left, level+1))
if node.right:
queue.append((node.right, level+1))
|
asperaa/back_to_grind
|
Trees/bottom_left_tree.py
|
bottom_left_tree.py
|
py
| 890 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7983714433
|
import random
random.seed(2023)
# Dividir los datos de test en oraciones en español e inglés
data_es = []
data_en = []
with open('Bicleaner AI/Full/Paracrawl.AIFull.shortPhrases.threshold05_shuffled.txt', 'r', encoding='utf-8') as file:
for line in file:
columns = line.strip().split('\t')
if len(columns) == 3:
en = columns[1]
es = columns[2]
data_es.append(es)
data_en.append(en)
with open('Data test/Full/prueba.short05.test.en', 'w', encoding='utf-8') as file:
for s in data_en:
file.writelines(s + "\n")
with open('Data test/Full/prueba.short05.test.es', 'w', encoding='utf-8') as file:
for s in data_es:
file.writelines(s + "\n")
# Generar datos de test a partir de la semilla
"""
with open('first_tus/XLEnt.en-es.en', 'r', encoding='utf-8') as file:
data_en = file.readlines()
with open('first_tus/XLEnt.en-es.es', 'r', encoding='utf-8') as file:
data_es = file.readlines()
index = random.sample(range(len(data_en)), 100000)
choosen_lines = [data_en[i] for i in index]
choosen_lines_es = [data_es[i] for i in index]
random.shuffle(data_es)
choosen_lines = data_en[:100000]
choosen_lines_es = data_es[:100000]
with open('first_tus/XLEnt100k.en-es.en', 'w', encoding='utf-8') as file:
file.writelines(choosen_lines)
with open('first_tus/XLEnt100k.en-es.es', 'w', encoding='utf-8') as file:
file.writelines(choosen_lines_es)
"""
|
jairosg/TFM
|
scripts/genTest.py
|
genTest.py
|
py
| 1,515 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1008711212
|
'''Problem 22: Names scores'''
import time
t1 = time.time()
#first get "alphabetical value" of each letter
ALPHA = 'abcdefghijklmnopqrstuvwxyz'.upper()
ALPHA = {c:i+1 for i,c in enumerate(ALPHA)}
def alphaValue(name):
'''adds up alpha values for letters'''
sum1= 0
for letter in name:
sum1 += ALPHA[letter]
return sum1
f = open('names.txt','r') #open the names file
text = f.read() #read it into text form
names = list(eval(text)) #turn into a list
#print(names[:10])
names.sort() #sort the list
#print(names[:10])
def nameValue():
sum2 = 0
for name in names:
sum2 += alphaValue(name)*(names.index(name)+1)
print(sum2)
nameValue()
#print(alphaValue('COLIN'), names.index('COLIN'))
f.close()
t2 = time.time()
print(t2 - t1)
'''Correct answer: 871198282
0.3169882297515869 seconds'''
|
hackingmath/Project-Euler
|
euler22.py
|
euler22.py
|
py
| 913 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6153769301
|
from django.shortcuts import render
from . models import Department, Employee, User, Phone, Book, Store
from django.http import HttpResponse
# Create your views here.
def index(request):
# without relationship:
user = User.objects.get(pk=1)
phone = Phone.objects.get(user_id=user)
# with relationship:
# related to the model Phone
# user = User.objects.get(pk=1).phone
# using related_name = "number"
# returning object
user_phone = User.objects.get(pk=1).number
# returning string
user_phone_str = User.objects.get(pk=1).number.phone_no
print("--------------------------------------")
print(user_phone)
# reverse geting user from phone
user = Phone.objects.get(id=1).user_id
return HttpResponse(user)
def foreign_key(request):
# using REVERSE !!!IMPORTANT!!!
user = Employee.objects.get(name="Simpson").department_name
user = Employee.objects.get(name="Simpson").department_name.name
# print(user)
# print(type(user))
first_deparment = Department.objects.get(pk=1) #hr
second_deparment = Department.objects.get(pk=2) #accouts
third_deparment = Department.objects.get(pk=5) #purchase
deparments = Employee.objects.filter(department_name=third_deparment)
# related name !!! IMPORTANT!!! -- another way employee_set.all() -- doesnt work
employees_from_dep = first_deparment.employeees_rel.all()
employees_from_dep = first_deparment.employeees_rel.all().filter()
#print(employees_from_dep)
# related name !!! IMPORTANT!!!
# after rel name __double underscore through which i can manipulate others model fields
dep_all_empl = Department.objects.all().filter(employeees_rel__name__startswith="John")
#if there is NO related name it will work next line will work with name of model
#dep_all_empl = Department.objects.all().filter(employee__name__startswith="John")
# reverse !!!!!!! VERY IMPORTANT !!!!!!
employees_in_HR = Employee.objects.filter(department_name__name='Accounts')
print(employees_in_HR)
return HttpResponse(employees_in_HR)
# select_related
# https://docs.djangoproject.com/en/3.0/ref/models/querysets/#select-related
def sel_related(request):
# fetching all employees and printing their names #INNER join
employees = Employee.objects.all().select_related('department_name')
#employees = Employee.objects.all()
for i in employees:
print(i.name, i.department_name.name)
return render(request, 'core/stuff.html')
# for debuger
def users(request):
qs = User.objects.all()
return render(request, 'core/users.html',{
"users": qs,
})
def prefetched(request):
books = Book.objects.all()
books = Book.objects.all().prefetch_related('store_set')
#looking for all STORES and check THIS BOOK in it
for i in books:
print(i.store_set.all())
return render(request, 'core/users.html')
|
oruchkin/biteofpithon
|
django relationships/relation/core/views.py
|
views.py
|
py
| 3,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31127201950
|
# to run, execute this command in the command line:
# python create_plots.py pagecounts-20160802-150000.txt pagecounts-20160803-150000.txt
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
filename1 = sys.argv[1]
filename2 = sys.argv[2]
data1 = pd.read_table(filename1, sep=' ', header=None, index_col=1,
names=['lang', 'page', 'views', 'bytes'])
data1_sort = data1.sort_values(by=['views'], ascending=False)
# print(data1_sort)
data2 = pd.read_table(filename2, sep=' ', header=None, index_col=1,
names=['lang', 'page', 'views', 'bytes'])
data2_sort = data2.sort_values(by=['views'], ascending=False)
data1_sort['views2'] = data2_sort['views']
# print (data1_sort)
# print (data2_sort)
plt.figure(figsize=(10, 5)) # change the size to something sensible
plt.subplot(1, 2, 1) # subplots in 1 row, 2 columns, select the first
plt.plot(data1_sort['views'].values)
plt.title('Popularity Distribution')
plt.xlabel('Rank')
plt.ylabel('Views')
plt.subplot(1, 2, 2) # ... and then select the second
plt.scatter(data1_sort['views'].values, data1_sort['views2'].values)
plt.title('Daily Correlation')
plt.xlabel('Day 1 views')
plt.ylabel('Day 2 views')
plt.xscale('log')
plt.yscale('log')
# plt.show()
plt.savefig('wikipedia_Tom.png')
|
tomliangg/Plotting_Wikipedia_Page_Views
|
create_plots.py
|
create_plots.py
|
py
| 1,324 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28114724866
|
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
# 数据
sizes = ["0.5x0.5", "2x2", "5x5", "7x7"]
postgres = [2, 10, 94, 153]
accumulo = [8, 15, 22, 41]
# 绘图
plt.plot(sizes, postgres, label="PostgreSQL")
plt.plot(sizes, accumulo, label="Accumulo")
plt.xlabel("Extent(km²)")
plt.ylabel("Read Time(s)")
plt.legend()
# 显示图形
plt.show()
|
KiktMa/gdal_tiff
|
shange/paintsd.py
|
paintsd.py
|
py
| 386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3981685761
|
from django.test import TestCase
from django import forms
from parameterized import parameterized
from MDM.forms import ItemGroupForm
from MDM.bootstrap import INPUT
class ItemGroupFormTest(TestCase):
@parameterized.expand([
('description', 'Descrição do Grupo de Item'),
])
def test_form_labels(self, field, label):
form = ItemGroupForm()
self.assertEqual(form.fields[field].label, label)
@parameterized.expand([
('description', forms.TextInput),
])
def test_form_widgets(self, field, widget):
form = ItemGroupForm()
self.assertIsInstance(form.fields[field].widget, widget)
def test_form_valid_data(self):
form = ItemGroupForm(data={
'description': 'Grupo de Itens',
})
self.assertTrue(form.is_valid())
def test_form_invalid_data(self):
form = ItemGroupForm(data={
'description': '',
})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
@parameterized.expand([
('description', INPUT['class']),
])
def test_form_widgets_attrs(self, field, attrs):
form = ItemGroupForm()
self.assertEqual(form.fields[field].widget.attrs['class'], attrs)
|
tiagomend/flow_erp
|
MDM/tests/test_item_group_form.py
|
test_item_group_form.py
|
py
| 1,264 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27742958751
|
"""
POO
* Elaborar un Programa el cual calcule el costo de produccion, nesecitaras conocer:
- Costo de la materia prima.
- Costo de la mano de obra.
- Cantidad de unidades producidas.
* Mostrar el:
- Costo de produccion total.
- Precio de produccion por unidad.
- El precio del producto al mercado(El doble del precio de produccion).
- Ganancia Generada por unidad.
"""
class produccion():
def __init__(self, _cmp, _cmo, _cup):
self.cmp = _cmp
self.cmo = _cmo
self.cup = _cup
self.cpt = (_cmp + _cmo)
self.ppu = self.cpt / _cup
self.ppm = self.ppu * 2
self.gpu = self.ppu
def mostrar(self):
print('Costo de la materia prima:', self.cmp)
print('Costo de la mano de obra:', self.cmo)
print('Cantidad de unidades producidas:', self.cup)
print('Costo de produccion total:', self.cpt)
print('Precio de produccion por unidad:', self.ppu)
print('El precio del producto al mercado:', self.ppm)
print('Ganancia Generada por unidad:', self.gpu)
cmp = float(input('Escribe el costo de la materia prima: '))
cmo = float(input('Escribe el costo de la mano de obra: '))
cup = float(input('Escribe la cantidad de unidades producidas: '))
pp = produccion(cmp, cmo, cup)
pp.mostrar()
|
proyecto3erpacial/proyecto3erpacial
|
eje4.py
|
eje4.py
|
py
| 1,340 |
python
|
es
|
code
| 0 |
github-code
|
6
|
70111562109
|
import requests
def run():
api_key = 'f258ca5a16d84339a5f6cdb4c7700756'
query_map = {}
url = 'http://api.weatherbit.io/v2.0/current'
query_map['lat'] = 39.757
query_map['lon'] = -75.742
query_map['key'] = api_key
query_map['lang'] = 'en'
response = requests.get(url,params=query_map).json()
return [(float(response['data'][0]['temp']) * 9/5) + 32,response['data'][0]['weather']['description']]
run()
|
cthacker-udel/Raspberry-Pi-Scripts
|
py/getcurrweather.py
|
getcurrweather.py
|
py
| 441 |
python
|
en
|
code
| 7 |
github-code
|
6
|
38386704949
|
import connections
import time
import ubinascii
import struct
import math
def byte_to_info(uuid):
gas_res_d = 0
name = uuid[0:3]
name_text = ''.join(chr(t) for t in name)
if name_text == "PyN":
sensor_id = uuid[7]
mac = ubinascii.hexlify(uuid[10:16])
press = ubinascii.hexlify(uuid[8:10])
press_d = int(press, 16)
gas_res = ubinascii.hexlify(uuid[3:7])
gas_res_d = int(gas_res, 16)
return (name_text,gas_res_d, press_d)
def air_quality_score(hum, gas_res):
gas_reference = 250000
hum_reference = 40
gas_lower_limit = 5000
gas_upper_limit = 50000
if (hum >= 38 and hum <= 42):
hum_score = 0.25*100
else:
if (hum < 38):
hum_score = 0.25/hum_reference*hum*100
else:
hum_score = ((-0.25/(100-hum_reference)*hum)+0.416666)*100
if (gas_reference > gas_upper_limit):
gas_reference = gas_upper_limit
if (gas_reference < gas_lower_limit):
gas_reference = gas_lower_limit
gas_score = (0.75/(gas_upper_limit-gas_lower_limit)*gas_reference -(gas_lower_limit*(0.75/(gas_upper_limit-gas_lower_limit))))*100
air_quality_score = hum_score + gas_score
return(air_quality_score)
|
MatiasRaya/IoT-PS
|
Proyecto/EXPOTRONICA/PYTRACK/airq.py
|
airq.py
|
py
| 1,275 |
python
|
en
|
code
| 1 |
github-code
|
6
|
38997636297
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from getpass import getpass
from utility_classes import (
check_version,
get_coin_selection,
get_user_choice,
get_user_number,
UATOM,
UKUJI,
ULUNA,
UOSMO,
UserConfig,
UUSD,
Wallets,
Wallet,
WETH,
)
from utility_constants import (
GAS_ADJUSTMENT_INCREMENT,
GAS_ADJUSTMENT_SWAPS,
FULL_COIN_LOOKUP,
MAX_GAS_ADJUSTMENT,
USER_ACTION_CONTINUE,
USER_ACTION_QUIT
)
def get_user_singlechoice(question:str, user_wallets:dict):
"""
Get a single user selection from a list.
This is a custom function because the options are specific to this list.
"""
label_widths = []
label_widths.append(len('Number'))
label_widths.append(len('Wallet name'))
label_widths.append(len('LUNC'))
label_widths.append(len('USTC'))
for wallet_name in user_wallets:
if len(wallet_name) > label_widths[1]:
label_widths[1] = len(wallet_name)
if ULUNA in user_wallets[wallet_name].balances:
uluna_val = user_wallets[wallet_name].formatUluna(user_wallets[wallet_name].balances[ULUNA], ULUNA)
else:
uluna_val = ''
if UUSD in user_wallets[wallet_name].balances:
ustc_val = user_wallets[wallet_name].formatUluna(user_wallets[wallet_name].balances[UUSD], UUSD)
else:
ustc_val = ''
if len(str(uluna_val)) > label_widths[2]:
label_widths[2] = len(str(uluna_val))
if len(str(ustc_val)) > label_widths[3]:
label_widths[3] = len(str(ustc_val))
padding_str = ' ' * 100
header_string = ' Number |'
if label_widths[1] > len('Wallet name'):
header_string += ' Wallet name' + padding_str[0:label_widths[1] - len('Wallet name')] + ' '
else:
header_string += ' Wallet name '
if label_widths[2] > len('LUNC'):
header_string += '| LUNC' + padding_str[0:label_widths[2] - len('LUNC')] + ' '
else:
header_string += '| LUNC '
if label_widths[3] > len('USTC'):
header_string += '| USTC' + padding_str[0:label_widths[3] - len('USTC')] + ' '
else:
header_string += '| USTC '
horizontal_spacer = '-' * len(header_string)
wallets_to_use = {}
user_wallet = {}
while True:
count = 0
wallet_numbers = {}
print (horizontal_spacer)
print (header_string)
print (horizontal_spacer)
for wallet_name in user_wallets:
wallet:Wallet = user_wallets[wallet_name]
count += 1
wallet_numbers[count] = wallet
if wallet_name in wallets_to_use:
glyph = '✅'
else:
glyph = ' '
count_str = f' {count}' + padding_str[0:6 - (len(str(count)) + 2)]
wallet_name_str = wallet_name + padding_str[0:label_widths[1] - len(wallet_name)]
if ULUNA in wallet.balances:
lunc_str = wallet.formatUluna(wallet.balances[ULUNA], ULUNA, False)
else:
lunc_str = ''
lunc_str = lunc_str + padding_str[0:label_widths[2] - len(lunc_str)]
if UUSD in wallet.balances:
ustc_str = wallet.formatUluna(wallet.balances[UUSD], UUSD, False)
else:
ustc_str = ' '
print (f"{count_str}{glyph} | {wallet_name_str} | {lunc_str} | {ustc_str}")
print (horizontal_spacer + '\n')
answer = input(question).lower()
if answer.isdigit() and int(answer) in wallet_numbers:
wallets_to_use = {}
key = wallet_numbers[int(answer)].name
if key not in wallets_to_use:
wallets_to_use[key] = wallet_numbers[int(answer)]
else:
wallets_to_use.pop(key)
if answer == USER_ACTION_CONTINUE:
if len(wallets_to_use) > 0:
break
else:
print ('\nPlease select a wallet first.\n')
if answer == USER_ACTION_QUIT:
break
# Get the first (and only) validator from the list
for item in wallets_to_use:
user_wallet = wallets_to_use[item]
break
return user_wallet, answer
def main():
# Check if there is a new version we should be using
check_version()
# Get the password that decrypts the user wallets
decrypt_password:str = getpass() # the secret password that encrypts the seed phrase
if decrypt_password == '':
print (' 🛑 Exiting...\n')
exit()
# Get the user config file contents
user_config:str = UserConfig().contents()
if user_config == '':
print (' 🛑 The user_config.yml file could not be opened - please run configure_user_wallets.py before running this script.')
exit()
print ('Decrypting and validating wallets - please wait...\n')
# Create the wallet object based on the user config file
wallet_obj = Wallets().create(user_config, decrypt_password)
decrypt_password = None
# Get all the wallets
user_wallets = wallet_obj.getWallets(True)
# Get the balances on each wallet (for display purposes)
for wallet_name in user_wallets:
wallet:Wallet = user_wallets[wallet_name]
wallet.getBalances()
if len(user_wallets) > 0:
print (f'You can make swaps on the following wallets:')
wallet, answer = get_user_singlechoice("Select a wallet number 1 - " + str(len(user_wallets)) + ", 'X' to continue, or 'Q' to quit: ", user_wallets)
if answer == USER_ACTION_QUIT:
print (' 🛑 Exiting...\n')
exit()
else:
print (" 🛑 This password couldn't decrypt any wallets. Make sure it is correct, or rebuild the wallet list by running the configure_user_wallet.py script again.\n")
exit()
# List all the coins in this wallet, with the amounts available:
print ('\nWhat coin do you want to swap FROM?')
coin_from, answer, null_value = get_coin_selection("Select a coin number 1 - " + str(len(wallet.balances)) + ", 'X' to continue, or 'Q' to quit: ", wallet.balances)
if answer == USER_ACTION_QUIT:
print (' 🛑 Exiting...\n')
exit()
available_balance:float = wallet.formatUluna(wallet.balances[coin_from], coin_from)
print (f'This coin has a maximum of {available_balance} {FULL_COIN_LOOKUP[coin_from]} available.')
swap_uluna = get_user_number("How much do you want to swap? (Or type 'Q' to quit) ", {'max_number': float(available_balance), 'min_number': 0, 'percentages_allowed': True, 'convert_percentages': True, 'keep_minimum': False, 'target_denom': coin_from})
if swap_uluna == USER_ACTION_QUIT:
print (' 🛑 Exiting...\n')
exit()
print ('\nWhat coin do you want to swap TO?')
coin_to, answer, estimated_amount = get_coin_selection("Select a coin number 1 - " + str(len(wallet.balances)) + ", 'X' to continue, or 'Q' to quit: ", wallet.balances, False, {'denom':coin_from, 'amount':swap_uluna}, wallet)
if answer == USER_ACTION_QUIT:
print (' 🛑 Exiting...\n')
exit()
estimated_amount = str(("%.6f" % (estimated_amount)).rstrip('0').rstrip('.'))
print (f'You will be swapping {wallet.formatUluna(swap_uluna, coin_from, False)} {FULL_COIN_LOOKUP[coin_from]} for approximately {estimated_amount} {FULL_COIN_LOOKUP[coin_to]}')
complete_transaction = get_user_choice('Do you want to continue? (y/n) ', [])
if complete_transaction == False:
print (' 🛑 Exiting...\n')
exit()
# Create the swap object
swaps_tx = wallet.swap().create(wallet.getPrefix(wallet.address))
# Assign the details:
swaps_tx.swap_amount = int(swap_uluna)
swaps_tx.swap_denom = coin_from
swaps_tx.swap_request_denom = coin_to
swaps_tx.sender_address = wallet.address
swaps_tx.sender_prefix = wallet.getPrefix(wallet.address)
# Bump up the gas adjustment - it needs to be higher for swaps it turns out
swaps_tx.terra.gas_adjustment = float(GAS_ADJUSTMENT_SWAPS)
# Set the contract based on what we've picked
# As long as the swap_denom and swap_request_denom values are set, the correct contract should be picked
use_market_swap = swaps_tx.setContract()
#if swaps_tx.swap_request_denom == UKUJI:
# swaps_tx.max_spread = 0.005
# print ('swap amount:', swaps_tx.swap_amount)
# print ('swap denom:', swaps_tx.swap_denom)
# print ('request denom:', swaps_tx.swap_request_denom)
# print ('sender address:', swaps_tx.sender_address)
# print ('sender prefix:', swaps_tx.sender_prefix)
# print ('use market swap?', use_market_swap)
# print ('max spread:', swaps_tx.max_spread)
#if swaps_tx.swap_request_denom == UOSMO and swaps_tx.sender_prefix != 'terra':
if swaps_tx.swap_request_denom in [UOSMO, UATOM, UKUJI, WETH] or swaps_tx.swap_denom in [UOSMO, UATOM, UKUJI, WETH]:
# This is an off-chain swap. Something like LUNC->OSMO
result = swaps_tx.offChainSimulate()
if result == True:
print (swaps_tx.readableFee())
user_choice = get_user_choice('Do you want to continue? (y/n) ', [])
if user_choice == False:
exit()
result = swaps_tx.offChainSwap()
else:
if use_market_swap == True:
result = swaps_tx.marketSimulate()
if result == True:
print (swaps_tx.readableFee())
user_choice = get_user_choice('Do you want to continue? (y/n) ', [])
if user_choice == False:
exit()
result = swaps_tx.marketSwap()
else:
result = swaps_tx.simulate()
if result == True:
print (swaps_tx.readableFee())
user_choice = get_user_choice('Do you want to continue? (y/n) ', [])
if user_choice == False:
exit()
result = swaps_tx.swap()
#print ('about to broadcast... exiting')
#exit()
if result == True:
swaps_tx.broadcast()
# if swaps_tx.broadcast_result.code == 11:
# while True:
# print (' 🛎️ Increasing the gas adjustment fee and trying again')
# swaps_tx.terra.gas_adjustment += GAS_ADJUSTMENT_INCREMENT
# print (f' 🛎️ Gas adjustment value is now {swaps_tx.terra.gas_adjustment}')
# if use_market_swap == True:
# swaps_tx.marketSimulate()
# print (swaps_tx.readableFee())
# swaps_tx.marketSwap()
# else:
# swaps_tx.simulate()
# print (swaps_tx.readableFee())
# swaps_tx.swap()
# swaps_tx.broadcast()
# if swaps_tx.broadcast_result.code != 11:
# break
# if swaps_tx.terra.gas_adjustment >= MAX_GAS_ADJUSTMENT:
# break
if swaps_tx.broadcast_result is not None and swaps_tx.broadcast_result.code == 32:
while True:
print (' 🛎️ Boosting sequence number and trying again...')
swaps_tx.sequence = swaps_tx.sequence + 1
swaps_tx.simulate()
print (swaps_tx.readableFee())
swaps_tx.swap()
swaps_tx.broadcast()
if swaps_tx is None:
break
# Code 32 = account sequence mismatch
if swaps_tx.broadcast_result.code != 32:
break
if swaps_tx.broadcast_result is None or swaps_tx.broadcast_result.is_tx_error():
if swaps_tx.broadcast_result is None:
print (' 🛎️ The swap transaction failed, no broadcast object was returned.')
else:
print (' 🛎️ The swap transaction failed, an error occurred:')
if swaps_tx.broadcast_result.raw_log is not None:
print (f' 🛎️ {swaps_tx.broadcast_result.raw_log}')
else:
print ('No broadcast log was available.')
else:
print (f' ✅ Swapped amount: {wallet.formatUluna(swaps_tx.swap_amount, swaps_tx.swap_denom)} {FULL_COIN_LOOKUP[swaps_tx.swap_denom]}')
print (f' ✅ Tx Hash: {swaps_tx.broadcast_result.txhash}')
else:
print (' 🛎️ The swap transaction could not be completed')
print (' 💯 Done!\n')
if __name__ == "__main__":
""" This is executed when run from the command line """
main()
|
geoffmunn/utility-scripts
|
swap.py
|
swap.py
|
py
| 12,915 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18964566006
|
#coding: utf-8
#Crispiniano
#Unidade 6: Quanto Tempo
def quanto_tempo(horario1,horario2):
h1 = int(horario1[0] + horario1[1])
m1 = int(horario1[3] + horario1[4])
minutos_totais_1 = h1 * 60 + m1
h2 = int(horario2[0] + horario2[1])
m2 = int(horario2[3] + horario2[4])
minutos_totais_2 = h2 * 60 + m2
diferenca = minutos_totais_2 - minutos_totais_1
h3 = diferenca // 60
m3 = diferenca % 60
saida = '%i hora(s) e %i minuto(s)' % (h3, m3)
return saida
assert quanto_tempo("07:15", "09:18") == "2 hora(s) e 3 minuto(s)"
|
almirgon/LabP1
|
Unidade-6/tempo.py
|
tempo.py
|
py
| 532 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
42433734628
|
import pytest
import random
from torchrl.envs import make_gym_env, TransitionMonitor
@pytest.mark.parametrize('spec_id', [
'Acrobot-v1',
'CartPole-v1',
'MountainCar-v0',
'MountainCarContinuous-v0',
'Pendulum-v0',
])
def test_transition_monitor(spec_id: str):
env = TransitionMonitor(make_gym_env(spec_id))
for _ in range(3):
env.reset()
info = env.info
assert not env.is_done
assert len(env.transitions) == 0
assert info.get('len') == 0
assert info.get('return') == 0.0
flushed_transitions = []
while not env.is_done:
env.step(env.action_space.sample())
if random.random() < 0.2: # Flush with probability 0.2
flushed_transitions += env.flush()
flushed_transitions += env.flush()
info = env.info
assert info.get('return') is not None
assert info.get('len') > 0
assert info.get('len') == len(flushed_transitions)
assert len(env.transitions) == 0
env.close()
|
activatedgeek/torchrl
|
torchrl/envs/test_wrappers.py
|
test_wrappers.py
|
py
| 968 |
python
|
en
|
code
| 110 |
github-code
|
6
|
811926906
|
'''Find Leaves of Binary Tree - https://leetcode.com/problems/find-leaves-of-binary-tree/
Given the root of a binary tree, collect a tree's nodes as if you were doing this:
Collect all the leaf nodes.
Remove all the leaf nodes.
Repeat until the tree is empty.
Example 1:
Input: root = [1,2,3,4,5]
Output: [[4,5,3],[2],[1]]
Explanation:
[[3,5,4],[2],[1]] and [[3,4,5],[2],[1]] are also considered correct answers since per each level
it does not matter the order on which elements are returned.'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findLeaves(self, root: Optional[TreeNode]) -> List[List[int]]:
output = []
def getLeaves(root):
if not root.left and not root.right:
output[-1].append(root.val)
root = None
return root
if root.left:
root.left = getLeaves(root.left)
if root.right:
root.right = getLeaves(root.right)
return root
while root:
output.append([])
root = getLeaves(root)
return output
|
Saima-Chaity/Leetcode
|
Tree/Find Leaves of Binary Tree.py
|
Find Leaves of Binary Tree.py
|
py
| 1,265 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70384349309
|
import numpy as np
import scipy
import cv2
import matplotlib.pyplot as plt
from matplotlib.colors import LightSource
def rotate_and_crop(arr, ang):
"""Array arr to be rotated by ang degrees and cropped afterwards"""
arr_rot = scipy.ndimage.rotate(arr, ang, reshape=True, order=0)
shift_up = np.ceil(np.arcsin(abs(ang) / 360 * 2 * np.pi) * arr.shape[1])
shift_right = np.ceil(np.arcsin(abs(ang) / 360 * 2 * np.pi) * arr.shape[0])
arr_crop = arr_rot[
int(shift_up) : arr_rot.shape[0] - int(shift_up),
int(shift_right) : arr_rot.shape[1] - int(shift_right),
]
return arr_crop
def contourf_to_array(cs, nbpixels_x, nbpixels_y, scale_x, scale_y):
"""Draws filled contours from contourf or tricontourf cs on output array of size (nbpixels_x, nbpixels_y)"""
image = np.zeros((nbpixels_x, nbpixels_y)) - 5
for i, collection in enumerate(cs.collections):
z = cs.levels[i] # get contour levels from cs
for path in collection.get_paths():
verts = (
path.to_polygons()
) # get vertices of current contour level (is a list of arrays)
for v in verts:
# rescale vertices to image size
v[:, 0] = (
(v[:, 0] - np.min(scale_x))
/ (np.max(scale_x) - np.min(scale_x))
* nbpixels_y
)
v[:, 1] = (
(v[:, 1] - np.min(scale_y))
/ (np.max(scale_y) - np.min(scale_y))
* nbpixels_x
)
poly = np.array(
[v], dtype=np.int32
) # dtype integer is necessary for the next instruction
cv2.fillPoly(image, poly, z)
return image
def contourf_to_array_3d(cs, nbpixels_x, nbpixels_y, scale_x, scale_y, levels):
res = np.zeros((nbpixels_x, nbpixels_y, cs.shape[-1])) - 5
for i in range(res.shape[-1]):
cf = plt.contourf(scale_x, scale_y, cs[:, :, i], levels=levels)
res[:, :, i] = np.flip(
contourf_to_array(cf, nbpixels_x, nbpixels_y, scale_x, scale_y), axis=0
)
res[:, :, i][np.where(res[:, :, i] < -4)] = np.nan
plt.close("all")
return res
def create_bounds():
bounds_k = {
'x': {
'min': 137089.2373932857299224,
'max': 168249.9520578108495101,
},
'y': {
'min': 589482.3877100050449371,
'max': 610702.8749795859912410,
},
}
bounds_g = {
'x': {
'min': 129971.5049754020292312,
'max': 170784.9834783510013949,
},
'y': {
'min': 584191.5390384565107524,
'max': 611985.5710535547696054,
},
}
for key, value in bounds_k.items():
bounds_k[key]['delta'] = ((bounds_k[key]['max'] - bounds_k[key]['min']) / (8.94 / 10.22) - (bounds_k[key]['max'] - bounds_k[key]['min'])) / 2
bounds_g[key]['delta'] = ((bounds_g[key]['max'] - bounds_g[key]['min']) / (8.94 / 10.22) - (bounds_g[key]['max'] - bounds_g[key]['min'])) / 2
bounds_k[key]['min'] -= bounds_k[key]['delta']
bounds_k[key]['max'] += bounds_k[key]['delta']
bounds_g[key]['min'] -= bounds_g[key]['delta']
bounds_g[key]['max'] += bounds_g[key]['delta']
return bounds_k, bounds_g
def get_bathymetry_extent(ds, bounds):
print(np.where(ds.x.values >= bounds['x']['min'])[0].min())
x_index_min = np.where(ds.x.values >= bounds['x']['min'])[0].min() - 1
x_index_max = np.where(ds.x.values >= bounds['x']['max'])[0].min()
y_index_min = np.where(ds.y.values >= bounds['y']['min'])[0].max() + 1
y_index_max = np.where(ds.y.values >= bounds['y']['max'])[0].max()
extent = (x_index_min, x_index_max, y_index_min, y_index_max)
return extent
def prep_bathymetry_data(ds, extent):
x_b, y_b = np.array(ds.x[extent[0]:extent[1]]), np.array(ds.y[extent[3]:extent[2]])
bodem = np.array(ds[0, extent[3]:extent[2], extent[0]:extent[1]])
bodem[np.where(bodem == -9999)] = -43.8
return x_b, y_b, bodem
def get_conc_extent(ds, x_b, y_b):
x_min = np.where(x_b >= ds.x.values.min())[0].min() - 1
x_max = np.where(x_b <= ds.x.values.max())[0].max() + 1
y_min = np.where(y_b >= ds.y.values.min())[0].max() + 1
y_max = np.where(y_b <= ds.y.values.max())[0].min() - 1
extent = (x_min, x_max, y_min, y_max)
return extent
def rescale_and_fit_ds(ds, ds_bounds, rescale_size1, rescale_size2, axis=0):
"""
Rescales dataset ds to fit over the satellite image with size rescale_size2
It also fits the dataset values such that the x and y bounds of the dataset are
placed on the right positions over the satellite image
Input:
ds - dataset to rescale and fit
ds_bounds - indices of the bounds of the bathymetry, corresponding to the ds bounds
rescale_size1 - shape of the bathymetry
rescale_size2 - shape of the satellite image
axis - axis of ds over which we want to rescale and fit
Output:
ds_rescaled - dataset with known values on the right positions over the satellite and
nan's everywhere else
"""
xmin, ymin = ds_bounds[0]
xmax, ymax = ds_bounds[1]
ds_sub = np.zeros(rescale_size1)
ds_sub[:] = np.nan
for i in range(ds.shape[axis]):
ds_inter = cv2.resize(ds[i, :, :], dsize=(xmax - xmin, ymin - ymax), interpolation=cv2.INTER_CUBIC)
ds_sub[ymax:ymin, xmin:xmax] = ds_inter
ds_sub2 = np.expand_dims(cv2.resize(ds_sub, dsize=(rescale_size2[1], rescale_size2[0]), interpolation=cv2.INTER_CUBIC), axis=axis)
if i == 0:
ds_rescaled = ds_sub2
else:
ds_rescaled = np.concatenate([ds_rescaled, ds_sub2], axis=axis)
return ds_rescaled
def create_shaded_image(sat, bodem, shape):
ls = LightSource(azdeg=315, altdeg=45)
# Create shade using lightsource
rgb = ls.hillshade(bodem, vert_exag=5, dx=20, dy=20)
# Scale satellite image to bathymetry shapes
sat_scaled = cv2.resize(sat, dsize=(bodem.shape[1], bodem.shape[0]), interpolation=cv2.INTER_CUBIC).astype('float64')
# Add shade to scaled image
img_shade = ls.shade_rgb(sat_scaled, bodem, vert_exag=5, blend_mode='soft')
img_shade = cv2.resize(img_shade, dsize=shape, interpolation=cv2.INTER_CUBIC).astype('float64')
return img_shade
|
openearth/vcl
|
vcl/data.py
|
data.py
|
py
| 6,478 |
python
|
en
|
code
| 2 |
github-code
|
6
|
29476131174
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Fine tune CTC network (CSJ corpus, for dialog)."""
import os
import sys
import time
import tensorflow as tf
from setproctitle import setproctitle
import yaml
import shutil
import tensorflow.contrib.slim as slim
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../../../')
from data.read_dataset_ctc import DataSet
from data.read_dataset_ctc_dialog import DataSet as DataSetDialog
from models.ctc.load_model import load
from evaluation.eval_ctc import do_eval_per, do_eval_cer
from evaluation.eval_ctc_dialog import do_eval_fmeasure
from utils.data.sparsetensor import list2sparsetensor, sparsetensor2list
from utils.util import mkdir, join
from utils.parameter import count_total_parameters
from utils.loss import save_loss
from utils.labels.phone import num2phone
from utils.labels.character import num2char
def do_fine_tune(network, optimizer, learning_rate, batch_size, epoch_num,
label_type, num_stack, num_skip, social_signal_type,
trained_model_path, restore_epoch=None):
"""Run training.
Args:
network: network to train
optimizer: adam or adadelta or rmsprop
learning_rate: initial learning rate
batch_size: size of mini batch
epoch_num: epoch num to train
label_type: phone or character
num_stack: int, the number of frames to stack
num_skip: int, the number of frames to skip
social_signal_type: insert or insert2 or insert3 or remove
trained_model_path: path to the pre-trained model
restore_epoch: epoch of the model to restore
"""
# Tell TensorFlow that the model will be built into the default graph
with tf.Graph().as_default():
# Read dataset
train_data = DataSetDialog(data_type='train', label_type=label_type,
social_signal_type=social_signal_type,
num_stack=num_stack, num_skip=num_skip,
is_sorted=True)
dev_data = DataSetDialog(data_type='dev', label_type=label_type,
social_signal_type=social_signal_type,
num_stack=num_stack, num_skip=num_skip,
is_sorted=False)
test_data = DataSetDialog(data_type='test', label_type=label_type,
social_signal_type=social_signal_type,
num_stack=num_stack, num_skip=num_skip,
is_sorted=False)
# TODO:作る
# eval1_data = DataSet(data_type='eval1', label_type=label_type,
# social_signal_type=social_signal_type,
# num_stack=num_stack, num_skip=num_skip,
# is_sorted=False)
# eval2_data = DataSet(data_type='eval2', label_type=label_type,
# social_signal_type=social_signal_type,
# num_stack=num_stack, num_skip=num_skip,
# is_sorted=False)
# eval3_data = DataSet(data_type='eval3', label_type=label_type,
# social_signal_type=social_signal_type,
# num_stack=num_stack, num_skip=num_skip,
# is_sorted=False)
# Add to the graph each operation
loss_op = network.loss()
train_op = network.train(optimizer=optimizer,
learning_rate_init=learning_rate,
is_scheduled=False)
decode_op = network.decoder(decode_type='beam_search',
beam_width=20)
per_op = network.ler(decode_op)
# Build the summary tensor based on the TensorFlow collection of
# summaries
summary_train = tf.summary.merge(network.summaries_train)
summary_dev = tf.summary.merge(network.summaries_dev)
# Add the variable initializer operation
init_op = tf.global_variables_initializer()
# Create a saver for writing training checkpoints
saver = tf.train.Saver(max_to_keep=None)
# Count total parameters
parameters_dict, total_parameters = count_total_parameters(
tf.trainable_variables())
for parameter_name in sorted(parameters_dict.keys()):
print("%s %d" % (parameter_name, parameters_dict[parameter_name]))
print("Total %d variables, %s M parameters" %
(len(parameters_dict.keys()), "{:,}".format(total_parameters / 1000000)))
csv_steps = []
csv_train_loss = []
csv_dev_loss = []
# Create a session for running operation on the graph
with tf.Session() as sess:
# Instantiate a SummaryWriter to output summaries and the graph
summary_writer = tf.summary.FileWriter(
network.model_dir, sess.graph)
# Initialize parameters
sess.run(init_op)
# Restore pre-trained model's parameters
ckpt = tf.train.get_checkpoint_state(trained_model_path)
if ckpt:
# Use last saved model
model_path = ckpt.model_checkpoint_path
if restore_epoch is not None:
model_path = model_path.split('/')[:-1]
model_path = '/'.join(model_path) + \
'/model.ckpt-' + str(restore_epoch)
else:
raise ValueError('There are not any checkpoints.')
exclude = ['output/Variable', 'output/Variable_1']
variables_to_restore = slim.get_variables_to_restore(
exclude=exclude)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, model_path)
print("Model restored: " + model_path)
# Train model
iter_per_epoch = int(train_data.data_num / batch_size)
if (train_data.data_num / batch_size) != int(train_data.data_num / batch_size):
iter_per_epoch += 1
max_steps = iter_per_epoch * epoch_num
start_time_train = time.time()
start_time_epoch = time.time()
start_time_step = time.time()
fmean_best = 0
for step in range(max_steps):
# Create feed dictionary for next mini batch (train)
inputs, labels, seq_len, _ = train_data.next_batch(
batch_size=batch_size)
indices, values, dense_shape = list2sparsetensor(labels)
feed_dict_train = {
network.inputs_pl: inputs,
network.label_indices_pl: indices,
network.label_values_pl: values,
network.label_shape_pl: dense_shape,
network.seq_len_pl: seq_len,
network.keep_prob_input_pl: network.dropout_ratio_input,
network.keep_prob_hidden_pl: network.dropout_ratio_hidden,
network.lr_pl: learning_rate
}
# Create feed dictionary for next mini batch (dev)
inputs, labels, seq_len, _ = dev_data.next_batch(
batch_size=batch_size)
indices, values, dense_shape = list2sparsetensor(labels)
feed_dict_dev = {
network.inputs_pl: inputs,
network.label_indices_pl: indices,
network.label_values_pl: values,
network.label_shape_pl: dense_shape,
network.seq_len_pl: seq_len,
network.keep_prob_input_pl: network.dropout_ratio_input,
network.keep_prob_hidden_pl: network.dropout_ratio_hidden
}
# Update parameters & compute loss
_, loss_train = sess.run(
[train_op, loss_op], feed_dict=feed_dict_train)
loss_dev = sess.run(loss_op, feed_dict=feed_dict_dev)
csv_steps.append(step)
csv_train_loss.append(loss_train)
csv_dev_loss.append(loss_dev)
if (step + 1) % 10 == 0:
# Change feed dict for evaluation
feed_dict_train[network.keep_prob_input_pl] = 1.0
feed_dict_train[network.keep_prob_hidden_pl] = 1.0
feed_dict_dev[network.keep_prob_input_pl] = 1.0
feed_dict_dev[network.keep_prob_hidden_pl] = 1.0
# Compute accuracy & \update event file
ler_train, summary_str_train = sess.run([per_op, summary_train],
feed_dict=feed_dict_train)
ler_dev, summary_str_dev, labels_st = sess.run([per_op, summary_dev, decode_op],
feed_dict=feed_dict_dev)
summary_writer.add_summary(summary_str_train, step + 1)
summary_writer.add_summary(summary_str_dev, step + 1)
summary_writer.flush()
# Decode
# try:
# labels_pred = sparsetensor2list(labels_st, batch_size)
# except:
# labels_pred = [[0] * batch_size]
duration_step = time.time() - start_time_step
print('Step %d: loss = %.3f (%.3f) / ler = %.4f (%.4f) (%.3f min)' %
(step + 1, loss_train, loss_dev, ler_train, ler_dev, duration_step / 60))
# if label_type == 'character':
# if social_signal_type == 'remove':
# map_file_path = '../evaluation/mapping_files/ctc/char2num_remove.txt'
# else:
# map_file_path = '../evaluation/mapping_files/ctc/char2num_' + \
# social_signal_type + '.txt'
# print('True: %s' % num2char(labels[-1], map_file_path))
# print('Pred: %s' % num2char(
# labels_pred[-1], map_file_path))
# elif label_type == 'phone':
# if social_signal_type == 'remove':
# map_file_path = '../evaluation/mapping_files/ctc/phone2num_remove.txt'
# else:
# map_file_path = '../evaluation/mapping_files/ctc/phone2num_' + \
# social_signal_type + '.txt'
# print('True: %s' % num2phone(
# labels[-1], map_file_path))
# print('Pred: %s' % num2phone(
# labels_pred[-1], map_file_path))
sys.stdout.flush()
start_time_step = time.time()
# Save checkpoint and evaluate model per epoch
if (step + 1) % iter_per_epoch == 0 or (step + 1) == max_steps:
duration_epoch = time.time() - start_time_epoch
epoch = (step + 1) // iter_per_epoch
print('-----EPOCH:%d (%.3f min)-----' %
(epoch, duration_epoch / 60))
# Save model (check point)
checkpoint_file = os.path.join(
network.model_dir, 'model.ckpt')
save_path = saver.save(
sess, checkpoint_file, global_step=epoch)
print("Model saved in file: %s" % save_path)
start_time_eval = time.time()
if label_type == 'character':
print('■Dev Evaluation:■')
fmean_epoch = do_eval_fmeasure(session=sess, decode_op=decode_op,
network=network, dataset=dev_data,
label_type=label_type,
social_signal_type=social_signal_type)
# error_epoch = do_eval_cer(session=sess,
# decode_op=decode_op,
# network=network,
# dataset=dev_data,
# eval_batch_size=batch_size)
if fmean_epoch > fmean_best:
fmean_best = fmean_epoch
print('■■■ ↑Best Score (F-measure)↑ ■■■')
do_eval_fmeasure(session=sess, decode_op=decode_op,
network=network, dataset=test_data,
label_type=label_type,
social_signal_type=social_signal_type)
# print('■eval1 Evaluation:■')
# do_eval_cer(session=sess, decode_op=decode_op,
# network=network, dataset=eval1_data,
# eval_batch_size=batch_size)
# print('■eval2 Evaluation:■')
# do_eval_cer(session=sess, decode_op=decode_op,
# network=network, dataset=eval2_data,
# eval_batch_size=batch_size)
# print('■eval3 Evaluation:■')
# do_eval_cer(session=sess, decode_op=decode_op,
# network=network, dataset=eval3_data,
# eval_batch_size=batch_size)
else:
print('■Dev Evaluation:■')
fmean_epoch = do_eval_fmeasure(session=sess, decode_op=decode_op,
network=network, dataset=dev_data,
label_type=label_type,
social_signal_type=social_signal_type)
# error_epoch = do_eval_per(session=sess,
# per_op=per_op,
# network=network,
# dataset=dev_data,
# eval_batch_size=batch_size)
if fmean_epoch < fmean_best:
fmean_best = fmean_epoch
print('■■■ ↑Best Score (F-measure)↑ ■■■')
do_eval_fmeasure(session=sess, decode_op=decode_op,
network=network, dataset=test_data,
label_type=label_type,
social_signal_type=social_signal_type)
# print('■eval1 Evaluation:■')
# do_eval_per(session=sess, per_op=per_op,
# network=network, dataset=eval1_data,
# eval_batch_size=batch_size)
# print('■eval2 Evaluation:■')
# do_eval_per(session=sess, per_op=per_op,
# network=network, dataset=eval2_data,
# eval_batch_size=batch_size)
# print('■eval3 Evaluation:■')
# do_eval_per(session=sess, per_op=per_op,
# network=network, dataset=eval3_data,
# eval_batch_size=batch_size)
duration_eval = time.time() - start_time_eval
print('Evaluation time: %.3f min' %
(duration_eval / 60))
start_time_epoch = time.time()
start_time_step = time.time()
duration_train = time.time() - start_time_train
print('Total time: %.3f hour' % (duration_train / 3600))
# Save train & dev loss
save_loss(csv_steps, csv_train_loss, csv_dev_loss,
save_path=network.model_dir)
# Training was finished correctly
with open(os.path.join(network.model_dir, 'complete.txt'), 'w') as f:
f.write('')
def main(config_path, trained_model_path):
restore_epoch = None # if None, restore the final epoch
# Read a config file (.yml)
with open(config_path, "r") as f:
config = yaml.load(f)
corpus = config['corpus']
feature = config['feature']
param = config['param']
if corpus['label_type'] == 'phone':
if corpus['social_signal_type'] in ['insert', 'insert3']:
output_size = 41
elif corpus['social_signal_type'] == 'insert2':
output_size = 44
elif corpus['social_signal_type'] == 'remove':
output_size = 38
elif corpus['label_type'] == 'character':
if corpus['social_signal_type'] in ['insert', 'insert3']:
output_size = 150
elif corpus['social_signal_type'] == 'insert2':
output_size = 153
elif corpus['social_signal_type'] == 'remove':
output_size = 147
# Load model
CTCModel = load(model_type=config['model_name'])
network = CTCModel(batch_size=param['batch_size'],
input_size=feature['input_size'] * feature['num_stack'],
num_cell=param['num_cell'],
num_layer=param['num_layer'],
output_size=output_size,
clip_gradients=param['clip_grad'],
clip_activation=param['clip_activation'],
dropout_ratio_input=param['dropout_input'],
dropout_ratio_hidden=param['dropout_hidden'],
num_proj=param['num_proj'],
weight_decay=param['weight_decay'])
network.model_name = config['model_name'].upper()
network.model_name += '_' + str(param['num_cell'])
network.model_name += '_' + str(param['num_layer'])
network.model_name += '_' + param['optimizer']
network.model_name += '_lr' + str(param['learning_rate'])
if feature['num_stack'] != 1:
network.model_name += '_stack' + str(feature['num_stack'])
network.model_name += '_transfer_' + corpus['transfer_data_size']
# Set save path
network.model_dir = mkdir('/n/sd8/inaguma/result/csj/dialog/')
network.model_dir = join(network.model_dir, 'ctc')
network.model_dir = join(network.model_dir, corpus['label_type'])
network.model_dir = join(network.model_dir, corpus['social_signal_type'])
network.model_dir = join(network.model_dir, network.model_name)
# Reset model directory
if not os.path.isfile(os.path.join(network.model_dir, 'complete.txt')):
tf.gfile.DeleteRecursively(network.model_dir)
tf.gfile.MakeDirs(network.model_dir)
else:
raise ValueError('File exists.')
# Set process name
setproctitle('ctc_csj_dialog_' + corpus['label_type'] + '_' +
param['optimizer'] + '_' + corpus['social_signal_type'] +
'_transfer_' + corpus['transfer_data_size'])
# Save config file
shutil.copyfile(config_path, os.path.join(network.model_dir, 'config.yml'))
sys.stdout = open(os.path.join(network.model_dir, 'train.log'), 'w')
print(network.model_name)
do_fine_tune(network=network,
optimizer=param['optimizer'],
learning_rate=param['learning_rate'],
batch_size=param['batch_size'],
epoch_num=param['num_epoch'],
label_type=corpus['label_type'],
num_stack=feature['num_stack'],
num_skip=feature['num_skip'],
social_signal_type=corpus['social_signal_type'],
trained_model_path=trained_model_path,
restore_epoch=restore_epoch)
sys.stdout = sys.__stdout__
if __name__ == '__main__':
args = sys.argv
if len(args) != 3:
ValueError(
'Usage: python fine_tune_ctc.py path_to_config path_to_trained_model')
main(config_path=args[1], trained_model_path=args[2])
|
hirofumi0810/tensorflow_end2end_speech_recognition
|
examples/csj/fine_tuning/finetune_ctc_dialog.py
|
finetune_ctc_dialog.py
|
py
| 20,860 |
python
|
en
|
code
| 312 |
github-code
|
6
|
34294677016
|
from django.contrib import admin
from django.urls import path
from final import views
urlpatterns = [
path("", views.index, name="root"),
path("about/", views.about, name="about"),
path("signup/", views.signup, name="signup"),
path("login/", views.loginUser, name="login"),
path("contact/", views.contact, name="contact"),
path("logout/", views.logoutUser, name="logout"),
path("services/", views.services, name="services")
]
|
supratim531/useless-django-app
|
final/urls.py
|
urls.py
|
py
| 455 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18467755288
|
import random
import pandas as pd
#This is where the wagon class will be.
class Wagon():
#One weird variable here may be wagonStructure. This variable is a boolean variable which will if set to
#true means the wagon can move. If it is set to false the wagon cannot move.
def __init__(self, ration, health, weather, month):
self.ration = ration
self.health = health
self.weather = weather
self.month = month
self.moving = False
self.wagonStructure = True
self.oxen = 0
self.food = 0
self.cloth = 0
self.wheel = 0
self.axle = 0
self.tongue = 0
self.bullets = 0
self.speed = 0
self.distance = 2000
#This method is what will determine the weather type. I really think, that I should have had some trail class
#and off of that had a wagon and human class subordinate to that class.
def weatherType(self):
weatherType = random.randint(1,3)
if weatherType == 1:
self.weather = "Sunny"
elif weatherType == 2:
self.weather = "Cloudy"
elif weatherType == 3:
self.weather = "Rainy"
#This method will keep track of the speed and subtract that number from the distance each day.
def move(self):
if self.wagonStructure == True:
self.speed = 2.5 * self.oxen
self.distance -= self.speed
self.moving = True
if self.distance == 0:
print("You have reached Oregon!")
#This will affect the amount of food eating by the ration level. I decided to place this method here
#and not in the human class becuase one cannot change the individual ration levels-as of now. Thus,
#since ration and food are properties of wagon I figured it would be easier to keep them here.
def eat(self):
if self.ration == "Meager":
self.food -= .25
elif self.ration == "Normal":
self.food -= .5
elif self.ration == "Tons":
self.food -= 1
#This method will determine the overall health for the wagon.
def healthLevel(self, leader, personOne, personTwo, personThree, personFour):
if leader.life > 20 and personOne.life > 20 and personTwo.life > 20 and personThree.life > 20 and personFour.life > 20:
self.health = "Good"
elif leader.life > 10 and personOne.life > 10 and personTwo.life > 10 and personThree.life > 10 and personFour.life > 10:
self.health = "Average"
elif leader.life > 5 and personOne.life > 5 and personTwo.life > 5 and personThree.life > 5 and personFour.life > 5:
self.health = "Poor"
#This method was probably the most annoying thing to write! It essentially changes the date for the player.
#it takes in the starting date that the player entered and then increases that date by one. Not a hard thing to do
#but actually doing this took a lot of time to find something that was simple to use and which I understood.
def changeDate(self):
startdate = self.month
self.month = pd.to_datetime(startdate) + pd.DateOffset(days=1)
#This method will determine if a wheel breaks and then asks the player if they want to fix it.
def wagonWheel(self):
wheelBreak = random.randint(1,100)
if wheelBreak > 85:
print("A wagon wheel has broken!")
self.wagonStructure == False
print("Wheel: " + str(self.wheel))
fix = input("Do you want to fix it? (y/n): ")
if fix == "y" and self.wheel > 0:
print("You fixed the wagon wheel!")
self.wheel -= 1
self.wagonStructure == True
#This method will determine if an axle breaks and then asks the player if they want to fix it.
def wagonAxle(self):
wheelaxle = random.randint(1,100)
if wheelaxle > 90:
print("A wagon axle has broken!")
print("You will not be able to move until it is fixed!")
self.wagonStructure == False
print("Axles: " + str(self.axle))
fix = input("Do you want to fix it? (y/n): ")
if fix == "y" and self.axle > 0:
print("You fixed the wagon axle!")
self.axle -= 1
self.wagonStructure == True
#This method will determine if a tongue breaks and then asks the player if they want to fix it.
def wagonTongue(self):
wheelTongue = random.randint(1,100)
if wheelTongue > 95:
print("A wagon tongue has broken!")
print("You will not be able to move until it is fixed!")
self.wagonStructure == False
print("Tongues: " + str(self.tongue))
fix = input("Do you want to fix it? (y/n): ")
if fix == "y" and self.tongue > 0:
print("You fixed the wagon axle!")
self.tongue -= 1
self.wagonStructure == True
#This method is kind of pointless. Not sure what I would do with clothing. Other than the fact that it
#Gets worn out on the journey.
def clothingWorn(self):
self.cloth -= .1
if self.cloth < .5:
print("You are low on clothes!")
#print("Better get new ones soon or you will die!")
|
ravenusmc/trail
|
wagon.py
|
wagon.py
|
py
| 4,906 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8516167540
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
path_Lf3D = "/mn/stornext/d19/RoCS/alma/emissa_sim/linfor3D/outhdf/"
f = {"500nm" : h5py.File(path_Lf3D + "d3t57g44c_v000G_n019_it000_05000_mu1_00_linfor_3D_2.hdf", "r"),
"1mm" : h5py.File(path_Lf3D + "d3t57g44c_v000G_n019_it000_01mm_mu1_00_linfor_3D_2.hdf", "r"),
"3mm" : h5py.File(path_Lf3D + "d3t57g44c_v000G_n019_it000_03mm_mu1_00_linfor_3D_2.hdf", "r")}
F = h5py.File("/mn/stornext/d19/RoCS/svenwe/jonast/data/art/input/tst/d3t57g44_v000G_n019_art_it000_mode1.h5", "r")
fig,ax = plt.subplots(figsize=(8,6))
for i,key in enumerate(f.keys()):
z_CF = np.array(f[key]["contfctz"])*1e-8
k = np.argmax(z_CF)
z_CF = z_CF[:k+1]
CF = np.mean(np.array(f[key]["contfunc"][:k+1,:,:]), axis=(1,2))
ax.plot(z_CF, CF/np.max(CF))
ax.fill_between(z_CF, np.zeros(len(CF)), CF/np.max(CF), alpha=0.5, label=r"$\lambda = $ {:} {:}".format(key[:-2], key[-2:]))
ax.set_axisbelow(True)
ax.grid()
#ax.axvline(x=
ax.legend()
ax.set_xlabel("z [Mm]")
ax.set_ylabel("norm. cont. func.")
figname="mean_CF.pdf"
plt.savefig("figures/"+figname, bbox_inches="tight")
|
jonasrth/MSc-plots
|
mean_CF.py
|
mean_CF.py
|
py
| 1,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17559223931
|
import unittest
from sudoku import Sudoku
class TestUtilities(unittest.TestCase):
def setUp(self):
"""
Init a completed sudoku for testing
"""
self.grid = [[1, 3, 9, 5, 7, 6, 8, 4, 2],
[2, 5, 8, 4, 9, 3, 1, 6, 7],
[7, 4, 6, 2, 8, 1, 9, 5, 3],
[9, 6, 3, 1, 4, 5, 2, 7, 8],
[4, 2, 7, 8, 6, 9, 3, 1, 5],
[8, 1, 5, 7, 3, 2, 6, 9, 4],
[6, 7, 4, 3, 1, 8, 5, 2, 9],
[5, 8, 1, 9, 2, 4, 7, 3, 6],
[3, 9, 2, 6, 5, 7, 4, 8, 1]]
self.sudoku = Sudoku()
def test_check_row_gui(self):
row = 0
self.assertTrue(self.sudoku.utils.check_row_gui(row, self.grid))
def test_check_row_gui_false(self):
row = 0
self.grid[0][3] = 20
self.assertFalse(self.sudoku.utils.check_row_gui(row, self.grid))
def test_check_column_gui(self):
column = 0
self.assertTrue(self.sudoku.utils.check_column_gui(column, self.grid))
def test_check_column_gui_false(self):
column = 0
self.grid[2][0] = 13
self.assertFalse(self.sudoku.utils.check_column_gui(column, self.grid))
def test_check_3x3_grid_gui(self):
locations = [(0, 0), (0, 3), (0, 6), (3, 0), (3, 3),
(3, 6), (6, 0), (6, 3), (6, 6)]
for loc in locations:
self.assertTrue(
self.sudoku.utils.check_3x3_grid_gui(self.grid, loc))
def test_valid_num(self):
self.grid[5][3] = 0
loc = self.sudoku.utils.find_empty_cell(self.grid)
self.assertTrue(self.sudoku.utils.valid_num(self.grid, 7, loc))
def test_find_empty_cell(self):
self.grid[3][8] = 0
empty = self.sudoku.utils.find_empty_cell(self.grid)
self.assertEqual(empty, (3, 8))
def test_find_empty_cell_none(self):
empty = self.sudoku.utils.find_empty_cell(self.grid)
self.assertIsNone(empty)
|
ArttuLe/ot-harjoitustyo
|
src/tests/utilities_test.py
|
utilities_test.py
|
py
| 2,040 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38137944576
|
import dash
from dash import html, dcc
from dash.dependencies import Input, Output
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import pandas as pd
# Load data
df = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv")
# Create subplots
fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.05)
# Add candlestick chart
fig.add_trace(go.Candlestick(x=df['Date'],
open=df['AAPL.Open'],
high=df['AAPL.High'],
low=df['AAPL.Low'],
close=df['AAPL.Close'],
name='Candlestick'),
row=1, col=1)
# Add volume chart
fig.add_trace(go.Bar(x=df['Date'],
y=df['AAPL.Volume'],
name='Volume'),
row=2, col=1)
# Update layout
fig.update_layout(height=600, title_text="Candlestick and Volume Chart")
# Create Dash app
app = dash.Dash(__name__)
# Define dropdown options
dropdown_options = [{'label': 'Hour', 'value': '1H'},
{'label': 'Day', 'value': '1D'},
{'label': 'Week', 'value': '1W'},
{'label': 'Month', 'value': '1M'}]
# Define app layout
app.layout = html.Div(children=[
html.Label('Select timeframe:'),
dcc.Dropdown(id='timeframe-dropdown', options=dropdown_options, value='1H', clearable=False),
dcc.Graph(id='graph', figure=fig),
html.Br(),
])
# Define callback to update chart based on dropdown selection
@app.callback(Output('graph', 'figure'),
[Input('timeframe-dropdown', 'value')])
def update_chart(timeframe):
# Filter data based on selected timeframe
if timeframe == '1H':
df_filtered = df[-252:]
elif timeframe == '1D':
df_filtered = df[-126:]
elif timeframe == '1W':
df_filtered = df[-63:]
elif timeframe == '1M':
df_filtered = df[-21:]
# Create new chart based on filtered data
fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.05)
fig.add_trace(go.Candlestick(x=df_filtered['Date'],
open=df_filtered['AAPL.Open'],
high=df_filtered['AAPL.High'],
low=df_filtered['AAPL.Low'],
close=df_filtered['AAPL.Close'],
name='Candlestick'),
row=1, col=1)
fig.add_trace(go.Bar(x=df_filtered['Date'],
y=df_filtered['AAPL.Volume'],
name='Volume'),
row=2, col=1)
fig.update(layout_xaxis_rangeslider_visible=False)
return fig
# Run app
if __name__ == '__main__':
app.run_server(debug=True, port=5000)
|
TIIIIIIW/SOFTWARE-DEVELOPMENT-2
|
ML/Data/TestDash.py
|
TestDash.py
|
py
| 2,975 |
python
|
en
|
code
| 1 |
github-code
|
6
|
6951725407
|
import turtle
import time
import random
delay = 0.1
#Score
score =0
highScore=0
#Setting up the screen
window = turtle.Screen()
window.title("Snake Game by Lexiang Pan and Ryan Shen")
window.bgcolor("green")
window.setup(width=600, height = 600)
window.tracer(0)
#Head of the snake:
head = turtle.Turtle()
head.speed(0)
head.shape("square")
head.color("black")
head.penup()
head.goto(0,0)
head.direction = "stop"
#Food of snake
apple = turtle.Turtle()
apple.speed(0)
apple.shape("circle")
apple.color("red")
apple.penup()
apple.goto(0,100)
apple.direction = "stop"
segments = []
#Pen
pen = turtle.Turtle()
pen.speed(0)
pen.shape("square")
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Score: 0 High Score: 0", align="center", font=("Courier", 24, "normal"))
#functions
def going_up():
if head.direction != "down":
head.direction = "up"
def going_down():
if head.direction != "up":
head.direction = "down"
def going_left():
if head.direction != "right":
head.direction = "left"
def going_right():
if head.direction != "left":
head.direction = "right"
#Functions
def snakeMoving():
if head.direction == "up":
y = head.ycor()
head.sety(y + 20)
if head.direction == "down":
y = head.ycor()
head.sety(y - 20)
if head.direction == "left":
x = head.xcor()
head.setx(x - 20)
if head.direction == "right":
x = head.xcor()
head.setx(x + 20)
#Function
def restartGame(segments, delay):
time.sleep(1)
head.goto(0,0)
head.direction = "stop"
#hide segments
for s in segments:
s.goto(1000,1000)
#Clear segments list
segments = []
#reset score
score=0
#reset delay
delay =0.1
pen.clear()
pen.write("Score: {} High Score: {}".format(score, highScore), align="center", font=("Courier", 24, "normal"))
#Keyboard Binding
window.listen()
window.onkeypress(going_up, "w")
window.onkeypress(going_down, "s")
window.onkeypress(going_left, "a")
window.onkeypress(going_right, "d")
while True:
window.update()
#check for collision with border
if head.xcor()>290 or head.xcor()<-290 or head.ycor()>290 or head.ycor()<-290:
restartGame(segments, delay)
#check for collision with food
if head.distance(apple) < 20:
#move food to a new coordinate
x = random.randint(-290,290)
y = random.randint(-290,290)
apple.goto(x, y)
#add segment
new_segment = turtle.Turtle()
new_segment.speed(0)
new_segment.shape("square")
new_segment.color("blue")
new_segment.penup()
segments.append(new_segment)
#Shorten delay
delay -=0.001
#increase score
score +=10
if score >highScore:
highScore = score
pen.clear()
pen.write("Score: {} High Score: {}".format(score, highScore), align="center", font=("Courier", 24, "normal"))
#moving the end segments first
for index in range(len(segments)-1, 0, -1):
x = segments[index-1].xcor()
y = segments[index-1].ycor()
segments[index].goto(x, y)
#moving segments to where the head is
if len(segments) > 0:
x = head.xcor()
y = head.ycor()
segments[0].goto(x, y)
snakeMoving()
#check for head collision with body segments
for segment in segments:
if segment.distance(head) < 20:
restartGame(segments, delay)
time.sleep(delay)
window.mainloop()
|
RyanSh3n/ICS3U1
|
CPT/SnakeGame.py
|
SnakeGame.py
|
py
| 3,567 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43133597312
|
# This script is developed by Team 11 CIS 3760
# Parser - to parse the plain text file into json file
import sys
import re
import json
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import course_util
uOCourseCodeIsolatedPat = re.compile(r'^\w{3}[ ]?\d{4}$')
uOCourseCodePat = re.compile(r'\w{3}[ ]?\d{4}')
uOOneOfCondPat = re.compile(r'One of .+', re.IGNORECASE)
uOAntiReqPat = re.compile(r'The courses? (\w{3}\w?[ ]?\d{4},?[ ]?)+', re.IGNORECASE)
uOCarlAltPat = re.compile(r'\w{3}[ ]?\d{4} \(\w{4}[ ]?\d{4}(?:(?: or | ou | and | et )\w{4}[ ]?\d{4})+\)')
uOOrListPat = re.compile(r'\(\w{3,4}[ ]?\d{4}(?:(?: or | ou )\w{3,4}[ ]?\d{4})\)')
uOOrListNoBracketPat = re.compile(r'\w{3}[ ]?\d{4}(?:(?: or | ou )\w{3}[ ]?\d{4})+')
uOOrCondNoBracketIsoPat = re.compile(r'^\w{3,4}[ ]?\d{4}(?:(?: or | ou )\w{3}[ ]?\d{3,4})[.]?$')
uOEnglishCoursePat = re.compile(r'\w{3}[ ]?\d[1234]\d{2}')
uOAndListIsoPat = re.compile(r'^\w{3,4}[ ]?\d{4}(?:(?: and | et |, )\w{3,4}[ ]?\d{4})+$')
uOAndCondIsoPat = re.compile(r'^\w{3,4}[ ]?\d{4}(?:(?: and | et |, )\w{3,4}[ ]?\d{4})$')
uOAndCondPat = re.compile(r'\w{3,4}[ ]?\d{4}(?:(?: and | et |, )\w{3,4}[ ]?\d{4})')
uOAndOptionPat = re.compile(r'^\(?\w{3}[ ]?\d{4}(?:(?: and | et |, )\w{3}[ ]?\d{4})\)?$')
uOAndListPat = re.compile(r'^\(?\w{3}[ ]?\d{4}(?:(?: and | et |, )\w{3}[ ]?\d{4})+\)?$')
uONotCombineFRPat = re.compile(r'^Les cours \w{3}[ ]?\d{3,4}(?:(?: or | ou | and | et |, )\w{3,4}[ ]?\d{4})+ ne peuvent être combinés pour l\'obtention de crédits[.;]?', re.IGNORECASE)
uONotCombineENGPat = re.compile(r'^\w{3}[ ]?\d{3,4}(?:(?: or | ou | and | et |, )\w{3,4}[ ]?\d{4})+ cannot be combined for units[.;]?', re.IGNORECASE)
uOSingleThirdPartyOptPat = re.compile(r'^\w{3}[ ]?\d{4} \(\w{4}[ ]?\d{4}\)$')
uOThirdPartyCourseListPat = re.compile(r'\(?\w{4}[ ]?\d{4}(?:(?: or | ou | and | et |, )\w{4}[ ]?\d{4})\)?')
uOCreditPrereqENGPat = re.compile(r'^\d+ university units[.;]?$')
uOCreditPrereqFRPat = re.compile(r'^\d+ crédits universitaires[.;]?$')
# splits string by commas not appearing within () or []
stringListSplitterPat = re.compile(r'(?=,\s*(?![^\[\]\[\]]*[\]\]]))(?=,\s*(?![^()\[\]]*[\)\]]))')
def splitCourseOrCond(raw : str, pattern, coursePat=None) -> list:
courseOrList = []
splitOrCond = re.split(pattern, raw)
for courseCode in splitOrCond:
# remove any parenthesis
courseCode = courseCode.replace('(', '')
courseCode = courseCode.replace(')', '')
if coursePat:
if re.search(pattern, courseCode):
courseOrList.append(courseCode.strip())
else:
courseOrList.append(courseCode.strip())
return courseOrList
def splitOnPeriod(string : str):
splitList = re.split(r'\.', string)
return splitList
def splitOnComma(string : str):
newList = re.split(stringListSplitterPat, string)
for i, item in enumerate(newList):
if item.startswith(', '):
newList[i] = item[2:]
if item == '':
newList.pop(i)
return newList
def normalizeUnicodeString(string : str) -> str:
string = string.replace(u'\xa0', ' ')
# item = unicodedata.normalize('NFKC', item)
string = string.strip()
string = string.replace('Préalable : ', '')
string = string.replace('Préalables : ', '')
string = string.replace('Prerequisite: ', '')
string = string.replace('Prerequisites: ', '')
return string
def parseUOttawaPrereqElm(prereqString : str):
prereqList = []
splitList = splitOnPeriod(prereqString)
# print('splitList:', splitList)
for item in splitList:
item = normalizeUnicodeString(item)
if item == '' or re.search(uOAntiReqPat, item) or re.match(uONotCombineENGPat, item) \
or re.match(uONotCombineFRPat, item):
continue
# Case where or condition list in the form 'One of Calculus and Vectors (MCV4U) or MAT 1339'
if re.match(uOOneOfCondPat, item):
temp = item[7:]
orList = splitCourseOrCond(temp, r' or | ou ')
newList = []
numCourses = len(orList)
for newElem in orList:
newList.append(parseUOttawaPrereqElm(newElem))
prereqList.append((numCourses, newList))
commaSplitList = splitOnComma(item)
for element in commaSplitList:
# Single isolated couse code
if re.match(uOCourseCodeIsolatedPat, element):
prereqList.append(element)
# Message regarding 'cannot be combined' -- ignore case
elif re.match(uONotCombineFRPat, element) or re.match(uONotCombineENGPat, element):
continue
# premission required case (2 of several) -- rest ignored
elif re.match(r'Permission de l\'Institut', element) \
or re.match(r'permission of the instructor', element):
prereqList.append(element)
elif re.match(uOCreditPrereqENGPat, element) or re.match(uOCreditPrereqFRPat, element):
prereqList.append(element)
elif re.search(r'This course is (?:primarily )?intended', element) \
or re.search(r'principalement aux étudiants', element) \
or re.search(r'cours est destiné', element) or re.search(r'cours ne peut pas', element)\
or re.search(r'an equivalent', element) \
or re.search(r'verify your program', element) \
or re.search(r'cannot count', element):
pass
# case of a list with third party courses
elif re.search(uOThirdPartyCourseListPat, element):
temp = re.split(uOThirdPartyCourseListPat, element)
for item in temp:
if re.search(r'\w{4}[ ]?\d{4}', item):
pass
else:
prereqList.append(parseUOttawaPrereqElm(item))
# case where single course code has third party alternative
elif re.match(uOSingleThirdPartyOptPat, element):
newCourse = re.search(uOCourseCodePat, element).group()
prereqList.append(newCourse)
# single or condition without brackets
elif re.match(uOOrCondNoBracketIsoPat, element):
orList = splitCourseOrCond(element, r' or | ou ')
tempList = []
for item in orList:
if re.match(uOCourseCodeIsolatedPat, item):
tempList.append(item)
# case where something other than a course code is part of an OR group
# which then becomes of length 1
if len(tempList) == 1:
prereqList.append(tempList[0])
elif len(tempList) > 1:
prereqList.append(tempList)
# and list, possibly multiple
elif re.match(uOAndListIsoPat, element):
# single and condition (two courses)
if re.match(uOAndCondIsoPat, element):
andList = splitCourseOrCond(element, r' and | et |, ')
for item in andList:
if re.match(uOCourseCodeIsolatedPat, item):
prereqList.append(item)
# Ontario Highschool course code
elif re.search(r'[ \(][A-Z]{3}[ ]?\d[A-Z]', element):
newItem = re.search(r'[ \(][A-Z]{3}[ ]?\d[A-Z]', element).group()
if newItem.startswith('(') or newItem.startswith(' '):
newItem = newItem[1:]
prereqList.append(newItem)
# check if brackets surrounding text exist
elif re.search(r'\(.+\)', element):
#case where there is an OR list
if re.match(uOOrListPat, element):
prereqList.append(splitCourseOrCond(element, r' or | ou ', uOCourseCodePat))
# check if a uOttawa course code exists
elif re.search(uOCourseCodePat, element):
#split by commas outside of brackets
bracketSplitList = re.split(stringListSplitterPat, element)
tempList = []
for item in bracketSplitList:
# filter out split prereqs starting with 'or'
if re.search(r' or | ou ', item):
if item.startswith('or'):
pass
splitList = splitCourseOrCond(item, r' or | ou ', uOCourseCodePat)
for ele in splitList:
tempList.append(parseUOttawaPrereqElm(ele))
# filter out coreq cases
elif re.search(r'coreq', item) or re.search(r'concomitant', item):
pass
# if starting with a uOttawa course code, add it
elif re.match(r'^[a-zA-Z]{3}[ ]?\d{4}', item):
prereqList.append(re.match(r'^[a-zA-Z]{3}[ ]?\d{4}', item).group())
prereqList.append(tempList)
# filter everything else
else:
pass
return prereqList
def parseUOttawaPrereqs(courseData : list):
for i, element in enumerate(courseData):
# print('subject:', element['course_name'])
prereqString = element['prereqs']
# print('prereqstring:', prereqString)
if len(prereqString) == 0:
continue
prereqList = []
for ele in prereqString:
# if it is not an english course split and discard english translations if they exist
# potentially breaking if the ` / ` sequence exists for a different purpose
if not re.match(uOEnglishCoursePat, element['course_num']):
ele = ele.split(' / ', 2)[0]
prereq = parseUOttawaPrereqElm(ele)
if len(prereq) == 1 and prereq[0] == '':
pass
else:
if isinstance(prereq, list):
for item in prereq:
prereqList.append(item)
else:
prereqList.append(prereq)
#
#TODO: convert prereq to new object as per #115
#print(newPrereq)
#
courseData[i]['prereqs'] = prereqList
##################################
if __name__ == "__main__":
if not len(sys.argv) == 3:
exit('Invalid Arguments')
fileIn = sys.argv[1] # plain text file
fileOut = sys.argv[2] # output JSON file
tempList = []
courseDataList = course_util.get_courses(fileIn)
parseUOttawaPrereqs(courseDataList)
json_object = json.dumps(courseDataList, indent=2, ensure_ascii=False) # dumping into JSON
# Writing to sample.json
with open(fileOut, "w") as outfile: # writing to JSON file
outfile.truncate(0)
outfile.write(json_object)
|
jessendasilva1/UniveristySearch
|
graphing/util/parser/ottawaCourseParser.py
|
ottawaCourseParser.py
|
py
| 11,463 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22168670744
|
from odoo import models, api
from odoo.addons.l10n_ar.models.account_fiscal_position import AccountFiscalPosition
@api.model
def _get_fiscal_position(self, partner, delivery=None):
company = self.env.company
if company.country_id.code == "AR":
self = self.with_context(
company_code='AR',
l10n_ar_afip_responsibility_type_id=partner.l10n_ar_afip_responsibility_type_id.id)
return super(AccountFiscalPosition, self)._get_fiscal_position(partner, delivery=delivery)
AccountFiscalPosition._get_fiscal_position = _get_fiscal_position
class AccountFiscalPositionMp(models.Model):
_inherit = 'account.fiscal.position'
@api.model
def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
""" Take into account the partner afip responsibility in order to auto-detect the fiscal position """
if self._context.get('company_code') == 'AR':
args += [('l10n_ar_afip_responsibility_type_ids', '=', self._context.get('l10n_ar_afip_responsibility_type_id'))]
return super()._search(args, offset, limit, order, count=count, access_rights_uid=access_rights_uid)
def _onchange_afip_responsibility(self):
return {}
@api.model
def _get_fpos_by_region(self, country_id=False, state_id=False, zipcode=False, vat_required=False):
if country_id and 'website_id' in self._context and 'l10n_ar_afip_responsibility_type_id' not in self._context:
company = self.env['res.company'].browse(self._context.get('force_company', self.env.company.id))
if company.country_id.code == 'AR':
self = self.with_context(company_code='AR')
# odoo only match fiscal positions if partner has a country, we've many customers with partners with country_id = False
# so, for ARG, if no country is configured we use Argentina for the fiscal autodetection
if not country_id and 'l10n_ar_afip_responsibility_type_id' in self._context:
country_id = self.env.ref('base.ar').id
return super()._get_fpos_by_region(country_id=country_id, state_id=state_id, zipcode=zipcode, vat_required=vat_required)
|
ingadhoc/odoo-argentina
|
l10n_ar_ux/models/account_fiscal_position.py
|
account_fiscal_position.py
|
py
| 2,196 |
python
|
en
|
code
| 89 |
github-code
|
6
|
36011166018
|
# encoding: utf8
# Import local files:
import rois as ROIS
import gui_functions as GUIF
import structure_set_functions as SSF
from tkinter import messagebox
# Clinical Goal class
class ClinicalGoal(object):
def __init__(self, name, criteria, type, tolerance, value, priority):
self.name = name
self.criteria = criteria
self.type = type
self.tolerance = tolerance
self.value = value
self.priority = priority
# Applies the clinical goal object to a given EvaluationSetup.
# Parameters:
# es - A RayStation EvaluationSetup instance, in which the clinical goal is to be created
# normalized_tolerance - an alternative to the original tolerance (AcceptanceLevel), e.q. recalculated as a percentage value of the prescription dose.
# normalized_value - an alternative to the original dose value (ParameterValue), e.q. recalculated as a percentage value of the prescription dose.
def apply_to(self, es, normalized_tolerance = None, normalized_value = None):
# Use preset values if normalized arguments are not given:
if normalized_tolerance is None:
normalized_tolerance = self.tolerance
if normalized_value is None:
normalized_value = self.value
try:
if self.type == 'AverageDose':
# When clinical goal is of type AverageDose, we do not use the ParameterValue when invoking the RayStation AddClinicalGoal function:
es.AddClinicalGoal(RoiName = self.name, GoalCriteria = self.criteria, GoalType = self.type, AcceptanceLevel = normalized_tolerance, Priority = self.priority)
else:
# Call AddClinicalGoal function with ParameterValue:
es.AddClinicalGoal(RoiName = self.name, GoalCriteria = self.criteria, GoalType = self.type, AcceptanceLevel = normalized_tolerance, ParameterValue = normalized_value, Priority = self.priority)
except Exception as e:
GUIF.handle_error_on_clinical_goal_creation(self, normalized_tolerance, normalized_value, e)
# Gives a text representation of the clinical goal object.
def text(self):
return f"Name: {self.name}\nCriteria: {self.criteria}\nType: {self.type}\nTolerance: {self.tolerance}\nValue: {self.value}\nPriority: {self.priority}"
# Types:
volume_at_dose = 'VolumeAtDose'
abs_volume_at_dose = 'AbsoluteVolumeAtDose'
dose_at_abs_volume = 'DoseAtAbsoluteVolume'
dose_at_volume = 'DoseAtVolume'
average_dose = 'AverageDose'
homogeneity_index = 'HomogeneityIndex'
conformity_index = 'ConformityIndex'
# Sets up clinical goals.
# Creates clinical goals in RayStation from clinical goal objects from the given Site,
# using the given prescription to determine target clinical goals as well as doing EQD2 conversion on OAR clinical goals.
def setup_clinical_goals(ss, es, site, prescription, target):
for cg in site.target_clinical_goals:
# Make sure corresponding ROI exists before trying to create clinical goal:
if SSF.has_roi(ss, cg.name):
if cg.name in [ROIS.external.name, ROIS.igtv.name, ROIS.gtv.name] and cg.criteria == 'AtMost' and cg.tolerance != 5000:
cg.apply_to(es, normalized_tolerance = round(cg.tolerance*prescription.total_dose*100,0))
elif cg.name in [ROIS.ctv_sb.name, ROIS.ptv_sbc.name] and target != ROIS.ctv_sb.name or cg.tolerance == 5000 or cg.type == homogeneity_index:
cg.apply_to(es)
elif cg.type == conformity_index:
cg.apply_to(es, normalized_value = round(cg.value*prescription.total_dose*100,0))
# Attempt fix of VolumeAtDose for targets (had to implement for Breast SIB CTVp-CTVsb):
elif cg.type == volume_at_dose:
cg.apply_to(es)
else:
cg.apply_to(es, normalized_tolerance = round(cg.tolerance*prescription.total_dose*100,0))
else:
# Missing ROI:
GUIF.handle_missing_roi_for_clinical_goal(cg.name)
for cg in site.oar_clinical_goals:
# Make sure corresponding ROI exists before trying to create clinical goal:
if SSF.has_roi(ss, cg.name):
if cg.type in [dose_at_volume, dose_at_abs_volume, average_dose]:
cg.apply_to(es, normalized_tolerance = round(cg.tolerance.equivalent(prescription.nr_fractions)*100,0))
else:
cg.apply_to(es, normalized_value = round(cg.value.equivalent(prescription.nr_fractions)*100,0))
else:
# Missing ROI:
GUIF.handle_missing_roi_for_clinical_goal(cg.name)
|
dicom/raystation-scripts
|
rt_classes/clinical_goal.py
|
clinical_goal.py
|
py
| 4,341 |
python
|
en
|
code
| 40 |
github-code
|
6
|
25911867612
|
from Solution import Solution
class P004(Solution):
def is_palindromic(self, number):
number_as_string = str(number)
reverse = number_as_string[::-1]
if reverse == number_as_string:
return True
else:
return False
def solve(self):
self.problem_number = 4
start = 99
finish = 999
result = 0
not_done = True
i = finish
while not_done and (i > start):
for j in range(finish, start, -1):
number = i * j
if self.is_palindromic(number) and (number > result):
result = number
break
i -= 1
return result
def main():
P004().run()
if __name__ == "__main__":
main()
|
TalaatHarb/project-euler-100
|
python-project-euler-100/p004.py
|
p004.py
|
py
| 783 |
python
|
en
|
code
| 2 |
github-code
|
6
|
2768054561
|
# add is O(1) because it appends which doesnt take any time.
# Remove is O(1) because if stack_1 is empty, returns empty queue
# It appends popped values from stack_1 to stack_2 and returns popped value of stack_2
class QueueStack:
def __init__(self):
self.stack_1 = []
self.stack_2 = []
def add(self, item): #adds item to stack
self.stack_1.append(item)
return (self.stack_1)
def remove(self): #will remove item from stack
if len(self.stack_1) == 0 and len(self.stack_2) == 0:
print("Queue is empty")
else:
if len(self.stack_2) == 0 and len(self.stack_1) > 0:
while len(self.stack_1) > 0:
self.stack_2.append(self.stack_1.pop())
print(self.stack_2.pop())
queue = QueueStack()
queue.remove()
queue.add(1)
queue.add(2)
queue.add(3)
queue.remove()
queue.add(4)
queue.remove()
queue.remove()
queue.add(5)
queue.remove()
queue.remove()
queue.remove()
# it prints “Queue is empty”
# it returns 1
# it returns 2 # it returns 3
# it returns 4
# it returns 5
# it prints “Queue is empty”
|
CarlBorillo/CECS-274
|
CECS 274 PROJ 1/CECS 274 PROJ 1/1_5_queue.py
|
1_5_queue.py
|
py
| 1,148 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34398796086
|
from typing import List
import docx
from .IngestorInterface import IngestorInterface
from .QuoteModel import QuoteModel
class DocxIngestor(IngestorInterface):
allowed_extensions = ['docx']
@classmethod
def parse(cls, path: str) -> List[QuoteModel]:
if not cls.can_ingest(path):
raise Exception('Cannot Ingest Exception')
quotes = []
doc = docx.Document(path)
for para in doc.paragraphs:
if para.text != "":
parsed = para.text.split('-')
parsed[0] = parsed[0].strip(' ').strip('"')
parsed[1] = parsed[1].strip(' ')
new_quote = QuoteModel(parsed[0], parsed[1])
quotes.append(new_quote)
return quotes
|
KosziDrimi/Meme-Generator-project
|
QuoteEngine/DocxIngestor.py
|
DocxIngestor.py
|
py
| 800 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12138793296
|
from dataclasses import fields
from pyexpat import model
import django
from django import forms
from django.db.models import fields
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from django.forms import ModelForm
from .models import *
class CustomUserCreationForm(UserCreationForm):
class Meta:
model=Userreg
fields=('username','email','phoneno','password1','password2')
def clean(self):
val = super(CustomUserCreationForm, self).clean()
email = val.get("email")
if email == "":
self.add_error('email','this field is required')
return val
class EditUserForm(forms.ModelForm):
class Meta:
model=Userreg
fields=('username','email','phoneno')
class Productform(ModelForm):
class Meta:
model = Product
fields = ('product_name','description','category','mrp_price','stocks','product_image1','product_image2','product_image3','product_image4','descriptionone','descriptiontwo','descriptionthree','descriptionfour')
labels = {
'product_name':'Product name',
'description' : 'Description',
'descriptionone' : 'Specification 1',
'descriptiontwo' : 'Specification 2',
'descriptionthree' : 'Specification 3',
'descriptionfour' : 'Specification 4',
'category' : 'Category',
'mrp_price' : 'Price',
'stocks' : 'InStock',
'product_image1' : 'Cover Image 1',
'product_image2' : 'Cover Image 2',
'product_image3' : 'Cover Image 3',
'product_image4' : 'Cover Image 4',
}
class CategoryForm(forms.ModelForm):
class Meta:
model=Category
fields = '__all__'
class EditCategoryForm(forms.ModelForm):
class Meta:
model=Category
fields = '__all__'
class AddressForm(ModelForm):
class Meta:
model = Address
fields = ('type','first_name','last_name','mobile','email','address_lane_1','address_lane_2','city','district','state','country','pincode')
labels = {
'type' : 'Address Type',
'first_name':'First name',
'last_name' : 'Last name',
'mobile' : 'Mobile',
'address_lane_1' : 'Address Lane 1',
'address_lane_2' : 'Address Lane 1',
'city' : 'City',
'state' : 'State',
'country' : 'Country',
'pincode' : 'Pincode',
}
class DateInput(forms.DateTimeInput):
input_type = 'date'
# class CouponForm(ModelForm):
# class Meta:
# model=Coupon
# fields = ('coupon_title','coupon_code','coupon_limit','coupn_offer')
# labels = {
# 'coupon_title' : 'Coupon Title',
# 'coupon_code':'Coupon Code',
# 'coupon_limit' : 'Coupon Limit',
# 'coupn_offer' : 'Coupon Offer Price',
# }
class CouponApplyForm(forms.ModelForm):
class Meta:
model = CouponCode
fields = ['code','valid_from','valid_to','discount','active']
widgets = {
'valid_from': DateInput(),
'valid_to':DateInput(),
}
def __init__(self,*args,**kwargs):
super(CouponApplyForm, self).__init__(*args, **kwargs)
class EditCouponForm(forms.ModelForm):
class Meta:
model=CouponCode
fields=('code','valid_from','valid_to','discount','active')
class ProductOfferForm(forms.ModelForm):
class Meta:
model = ProductOffer
fields = ['code','product_id', 'valid_from','valid_to','discount','is_active']
widgets = {
'valid_from': DateInput(),
'valid_to':DateInput(),
}
def __init__(self,*args,**kwargs):
super(ProductOfferForm, self).__init__(*args, **kwargs)
class CategoryOfferForm(forms.ModelForm):
class Meta:
model = CategoryOffer
fields = ['code','category_id', 'valid_from','valid_to','discount','is_active']
widgets = {
'valid_from': DateInput(),
'valid_to': DateInput(),
}
def __init__(self,*args,**kwargs):
super(CategoryOfferForm, self).__init__(*args, **kwargs)
class EditCouponCatForm(forms.ModelForm):
class Meta:
model=CategoryOffer
fields=('code','category_id', 'valid_from','valid_to','discount','is_active')
class EditProductOffer(forms.ModelForm):
class Meta:
model=ProductOffer
fields=('code','product_id', 'valid_from','valid_to','discount','is_active')
|
baadhira/shopify-ecommerce
|
adminapp/forms.py
|
forms.py
|
py
| 4,635 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27615702847
|
"""
Get 10 titles of the most popular movies/series etc. by each genre.
Получите 10 наименований самых популярных фильмов/сериалов и т. д. в каждом жанре.
title.basics.tsv.gz title.ratings.tsv.gz
"""
from pyspark import SparkConf
from pyspark.sql import SparkSession
import pyspark.sql.types as t
import pyspark.sql.functions as f
from pyspark.sql import Window
def task8():
spark_session = (SparkSession.builder
.master("local")
.appName("task app")
.config(conf=SparkConf())
.getOrCreate())
schema_title_basics = t.StructType([
t.StructField("tconst", t.StringType(), nullable=True),
t.StructField("titleType", t.StringType(), nullable=True),
t.StructField("primaryTitle", t.StringType(), nullable=True),
t.StructField("originalTitle", t.StringType(), nullable=True),
t.StructField("isAdult", t.StringType(), nullable=True),
t.StructField("startYear", t.IntegerType(), nullable=True),
t.StructField("endYear", t.IntegerType(), nullable=True),
t.StructField("runtimeMinutes", t.IntegerType(), nullable=True),
t.StructField("genres", t.StringType(), nullable=True),
])
schema_ratings_basics = t.StructType([
t.StructField("tconst", t.StringType(), nullable=True),
t.StructField("averageRating", t.DoubleType(), nullable=True),
t.StructField("numVotes", t.IntegerType(), nullable=True)
])
file_read_basics = r'.\Data\input\title.basics.tsv.gz'
file_read_ratings = r'.\Data\input\title.ratings.tsv.gz'
from_csv_basics_df = spark_session.read.csv(
file_read_basics, header=True, nullValue='null', sep=r'\t', schema=schema_title_basics)
from_csv_ratings_df = spark_session.read.csv(
file_read_ratings, header=True, nullValue='null', sep=r'\t', schema=schema_ratings_basics)
temp_df1 = from_csv_basics_df.withColumn("genres", f.explode(f.split(f.col("genres"), ",")))
temp_df1 = temp_df1.select("tconst", "titleType", "primaryTitle", "genres")
temp_df2 = from_csv_ratings_df.select("tconst", "averageRating")
temp_df3 = temp_df1.join(temp_df2, "tconst")
window = (Window.orderBy(f.desc("genres"), f.desc("averageRating")).partitionBy("genres"))
from_csv_df_task8 = temp_df3.withColumn("Rating_genre", f.row_number().over(window)).where(f.col("Rating_genre") <= 10)
#from_csv_df_task8.show(100)
file_write = r'.\Data\output\task08'
from_csv_df_task8.write.csv(file_write, header=True, mode="overwrite")
return 0
|
Tetyana83/spark
|
task8.py
|
task8.py
|
py
| 2,591 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42242066819
|
from random import Random
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
class Partition:
def __init__(self, data, index):
self.data = data
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
data_idx = self.index[idx]
return self.data[data_idx]
class DataPartitioner:
def __init__(self, data, sizes=[1], seed=1340):
self.data = data
self.partitions = []
rng = Random()
rng.seed(seed)
data_len = len(data)
indexes = list(range(data_len))
rng.shuffle(indexes)
for part in sizes:
part_len = int(part * data_len)
self.partitions.append(indexes[0: part_len])
indexes = indexes[part_len:]
def use(self, rank):
return Partition(self.data, self.partitions[rank])
def get_mnist(data_dir, rank, size):
trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(0.1307, 0.3081),
])
batch_size = 128
num_workers = 8
download = True
dataset_train = datasets.MNIST(root=data_dir, train=False,
transform=trans,
download=download)
batch_size_part = int(batch_size / size)
partition_sizes = [1.0 / size for _ in range(size)]
paritition = DataPartitioner(dataset_train, partition_sizes)
paritition = paritition.use(rank)
train_data = DataLoader(dataset=paritition,
batch_size=batch_size_part,
num_workers=num_workers,
shuffle=True)
print('data shape', next(iter(train_data))[0].shape)
return train_data
if __name__ == '__main__':
data = get_mnist('~/data/', 0, 3)
|
DragonChen-TW/torch_DDP
|
data_partition.py
|
data_partition.py
|
py
| 1,832 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36551632681
|
import matplotlib.pyplot as plt
from DataHandler import DataHandler
from LinearClassifier import LinearClassifier
if __name__ == '__main__':
#generating a normal distributed data (1000 samples per class)
data_handler = DataHandler()
class0Dataset = data_handler.get2DGaussian(1000, [-2, -2])
class1Dataset = data_handler.get2DGaussian(1000, [2, 2])
#labling the data
class0Dataset = data_handler.label(class0Dataset, 0)
class1Dataset = data_handler.label(class1Dataset, 1)
#shuffling the data
dataset = data_handler.shuffle(class0Dataset + class1Dataset)
###############################################################
classifier = LinearClassifier()
print("initial weights : ", classifier.weights)
print("initial bais : ", classifier.bais)
actual = [row[-1] for row in dataset]
pridected = [classifier.predict(row) for row in dataset]
print("Accuracy before training : %.2f%%\n" %
data_handler.accuracy(actual, pridected))
classifier.plot()
plt.show()
learning_rate = 0.01
n_folds = 5
n_epoch = 2
scores = data_handler.evaluate_model(dataset, classifier, n_folds, learning_rate, n_epoch)
print('Scores: %s' % scores)
print('Average Accuracy: %.2f%%' % (sum(scores) / float(len(scores))))
print("final weights : ", classifier.weights)
print("final bais : ", classifier.bais)
# plot results
x, y, label = zip(*class0Dataset)
X, Y, label = zip(*class1Dataset)
plt.plot(x, y, 'x')
plt.plot(X, Y, 'x')
classifier.plot()
plt.show()
|
Mustapha-Belkacim/Linear-classifier
|
main.py
|
main.py
|
py
| 1,623 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40113246551
|
import os
import sys
import ruamel.yaml as yaml
import llnl.util.tty as tty
import llnl.util.lang
import spack.repo
import spack.cmd.common.arguments as arguments
from spack.cmd import display_specs
from spack.filesystem_view import filter_exclude
from spack.build_systems.python import PythonPackage
import spack.util.spack_yaml as syaml
from spack.util.spack_yaml import syaml_dict, syaml_list
description = "create a package.yaml from installed packages"
section = "administration"
level = "long"
class PackagesDumper(syaml.OrderedLineDumper):
"""Customization to match common packages.yaml style
"""
def represent_list(self, seq, flow_style=None):
"""Impose an arbitrary length limit up to flow lists
"""
res = super(PackagesDumper, self).represent_list(seq)
tot_len = sum(len(e.value) for e in res.value)
res.flow_style = tot_len < 60
return res
PackagesDumper.add_representer(syaml_list, PackagesDumper.represent_list)
def setup_parser(sp):
scopes = spack.config.scopes()
sp.add_argument('-f', '--format',
help="specify format for path/module keys",
metavar="FMT", default='$_$@')
sp.add_argument('-d', '--dependencies',
help="add selected dependencies to the specs",
action='store_true')
sp.add_argument('-m', '--module',
choices=spack.modules.module_types.keys(),
default=None,
help="point to modules generated for MOD",
metavar="MOD")
sp.add_argument("--scope", choices=scopes,
default=spack.config.default_modify_scope(),
help="configuration scope to modify.")
sp.add_argument("-v", "--variants", choices=('all', 'changed'),
default='all',
help="which variant flags to store: only changed ones or all (default)")
arguments.add_common_arguments(sp, ['tags', 'constraint'])
sp.add_argument('--exclude', action='append', default=[],
help="exclude packages with names matching the given regex pattern")
sp.add_argument('--explicit',
help='export specs that were installed explicitly',
default=None,
action='store_true')
def _to_key(spec, fmt, variants):
"""Convert the provided `spec` to a simple, identifiable string, using
the spec format given by `fmt`, and using all variants if `variants` is
set to ``"all"``, otherwise only the ones changed from the default
value.
"""
key = spec.format(fmt)
sflags = []
bflags = []
for k, v in spec.variants.items():
default = None
if k in spec.package.variants:
default = spec.package.variants[k].default
if v.value != default or variants == 'all':
if v.value in (True, False):
bflags.append(v)
elif v.name != 'patches':
sflags.append(v)
sflags = ' '.join(str(f) for f in sorted(sflags))
bflags = ''.join(str(f) for f in sorted(bflags))
key = ' '.join([e for e in (key, sflags, bflags) if len(e) > 0])
return str(key)
def export(parser, args):
q_args = {"explicit": True if args.explicit else any}
specs = args.specs(**q_args)
# Exit early if no package matches the constraint
if not args.specs and args.constraint:
msg = "No package matches the query: {0}"
msg = msg.format(' '.join(args.constraint))
tty.msg(msg)
return
packages = spack.config.get('packages', scope=args.scope)
# If tags have been specified on the command line, filter by tags
if args.tags:
packages_with_tags = spack.repo.path.packages_with_tags(*args.tags)
specs = [x for x in specs if x.name in packages_with_tags]
if args.exclude:
specs = set(filter_exclude(specs, args.exclude))
cls = None
if args.module:
cls = spack.modules.module_types[args.module]
# Add all selected specs to the external packages
new_packages = {}
for spec in specs:
pkg_toplevel = new_packages.setdefault(spec.name, {})
pkg_externals = pkg_toplevel.setdefault("externals", [])
pkg_versions = pkg_toplevel.setdefault("version", syaml_list())
key = _to_key(spec, args.format, args.variants)
externality = dict(spec=key, prefix=str(spec.prefix))
if key in [ext["spec"] for ext in pkg_externals]:
tty.warn("spec already present, skipping: {0}".format(key))
continue
mod = cls(spec) if cls else None
if mod and not mod.conf.blacklisted:
if os.path.exists(mod.layout.filename):
externality["modules"] = [str(mod.layout.use_name)]
else:
msg = "module not present for {0}"
msg = msg.format(spec.format("$_$@"))
tty.warn(msg)
version = str(spec.version)
if version not in pkg_versions:
pkg_versions.append(version)
pkg_externals.append(externality)
spack.config.merge_yaml(packages, new_packages)
# Restore ordering
packages = syaml_dict(sorted((k, v) for (k, v) in packages.items() if len(v) > 0))
if 'all' in packages:
packages['all'] = packages.pop('all')
yaml.dump({'packages': packages},
stream=sys.stdout,
default_flow_style=False,
Dumper=PackagesDumper)
|
tomdele/spack
|
lib/spack/spack/cmd/export.py
|
export.py
|
py
| 5,533 |
python
|
en
|
code
| null |
github-code
|
6
|
9378129688
|
import operator
import pandas as pd
def segmentation(dataset: pd.DataFrame, rfm: list, d: int):
"""
Sort RFM Segmentation function
:param dataset: given dataset
:param rfm: a list of three column name R, F, M
:param d: number of delimiters to divide data based on each factor
:return: dataset with new segment column
"""
datalists = [dataset.values.tolist()]
for factor in rfm:
new_datalists = []
for datalist in datalists:
datalist.sort(key=operator.itemgetter(dataset.columns.get_loc(factor)))
size = len(datalist)
low_index = 0
rem = size % d
step = size / d
for i in range(d):
up_index = low_index + int(step) + (1 if rem > 0 else 0)
new_datalists.append(datalist[low_index: up_index])
rem -= 1
low_index = up_index
datalists = new_datalists
# Determining customer segments
customer_segment = dict()
id_index = dataset.columns.get_loc('id')
for segment_id in range(d**3):
for customer in datalists[segment_id]:
customer_segment[customer[id_index]] = segment_id + 1
# Adding segmentation results to dataset
dataset['Sort_Approch_Segment'] = dataset.apply(lambda row: customer_segment[row['id']], axis=1)
return dataset
|
smh997/Audiobook-Customer-Segmentation-and-Purchase-Prediction
|
Customer Segmentation/RFM/sort_segmentation.py
|
sort_segmentation.py
|
py
| 1,371 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72650143867
|
cod1,num1,valor1 = input().split()
cod1,num1,valor1 = int(cod1),int(num1),float(valor1)
cod2,num2,valor2 = input().split()
cod2,num2,valor2 = int(cod2),int(num2),float(valor2)
peca1 = num1*valor1
peca2 = num2*valor2
total = peca1 + peca2
print(f'VALOR A PAGAR: R$ {total:.2f}')
|
hpalermoemerick/Exercicios-do-Beecrowd
|
1010_Calculo_Simples.py
|
1010_Calculo_Simples.py
|
py
| 279 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
26536074156
|
def regresiva(n):
while n > 0:
yield n
n -= 1
for x in regresiva(10):
print(x, end=" ")
list(regresiva(10))
#%%
def filematch(filename, substr):
with open(filename, 'r') as f:
for line in f:
if substr in line:
yield line
for line in open('Data/camion.csv'):
print(line, end="")
for line in filematch('Data/camion.csv', 'Naranja'):
print(line, end="")
|
francosbenitez/unsam
|
10-generadores-e-iteradores/tests.py
|
tests.py
|
py
| 442 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75137098747
|
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import Permission
from .models import Organization, OrganizationUser
class OrganizationBackend(ModelBackend):
supports_object_permissions = True
def authenticate(self, organization=None, username=None, password=None):
if organization is None:
return None
try:
organization = Organization.objects.get(code__iexact=organization)
except Organization.DoesNotExist:
return None
try:
user = OrganizationUser.objects.get(organization=organization,
username__iexact=username,
user__is_active=True)
if user.check_password(password):
return user
except OrganizationUser.DoesNotExist:
return None
def _create_permission_set(self, perms=None):
"""
Expects a queryset of permissions, returns a formatted
set.
"""
if perms is None:
return set()
if isinstance(perms, (list, tuple)):
perms = [(perm.content_type.app_label, perm.codename)
for perm in perms]
else:
perms = perms.values_list('content_type__app_label',
'codename').order_by()
return set(['%s.%s' % (ct, name) for ct, name in perms])
def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of all permission strings that this user has through
his/her roles for the given object.
We accomplish this by pulling the set of all available permissions, then
checking the object. A superuser immediately gets all of the available
permissions, and a super role gets all of their super role permissions.
The supplied object can be None, an `Organization` object,
or an object with an organization attribute.
If the object is None, then this function returns all permissions that
this user has available, regardless of object. This facilitates
situations where you want to limit functionality based off of whether or
not a permission exists at all.
If the object is an `Organization` object, we only return permissions
granted via SuperRoles and Roles the user is a member of, that are part
of the supplied organization.
If the supplied object has an `organization` attribute (or an
_ORGANIZATION_ATTRIBUTE attribute with the name of an actual attribute
that returns an `Organization` object), then the returned permissions
are all permissions granted via SuperRoles, as well as permissions
granted from Roles that the user is a member of, that are part of the
organization that owns the object.
Finally, if an object is supplied, but it is not an `Organization`
object, nor does it have an attribute that points to an `Organization`
object, then return all available permissions (as if the supplied object
was None)
"""
# superusers get all permissions, like usual
if user_obj.is_superuser:
perms = Permission.objects.all()
return self._create_permission_set(perms)
# if the user is not an OrganizationUser, they get no permissions
if not isinstance(user_obj, OrganizationUser):
return set()
# if the user is not in any roles, they get no permissions
if not any([user_obj.super_roles.count(), user_obj.roles.count()]):
return set()
# at this point, they should have some permissions
# start off with the set of super role permissions
perms = Permission.objects.filter(superrole__organizationuser=user_obj)
# next, get the set of permissions provided by the regular roles
if isinstance(obj, Organization):
# if the supplied object is an `Organization` object
object_org = obj
else:
# check the object's organization
attname = getattr(obj, '_ORGANIZATION_ATTRIBUTE', 'organization')
# if no object was passed in, or the object doesn't have an
# organization attribute, include all permissions from all roles
if obj is None or not hasattr(obj, attname):
roles = user_obj.roles.all()
perms = perms | Permission.objects.filter(role__in=roles)
# done calculating at this point, return early
return self._create_permission_set(perms)
# At this point, we know the object is not None and the object
# has an organization attribute, so fetch the value of the
# organization
object_org = getattr(obj, attname, None)
# If the value of the organization attribute is None, then return
# the currently collected permissions
if object_org is None:
return self._create_permission_set(perms)
# Finally, collect the permissions this user has on this object, based
# off of the set of organizations they are a member of
# If the user is not a member of the organization attached to this
# object, then return the collected permissions
if object_org not in user_obj.get_all_organizations():
return self._create_permission_set(perms)
# The user is in the organization that owns this object, so collect
# all of the permissions this user has for this organization
roles = user_obj.roles.filter(organization=object_org)
perms = perms | Permission.objects.filter(role__in=roles)
return self._create_permission_set(perms)
def get_all_permissions(self, user_obj, obj=None):
if user_obj.is_anonymous():
return set()
# we don't support user permissions
return self.get_group_permissions(user_obj, obj=obj)
def has_perm(self, user_obj, perm, obj=None):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj, obj=obj)
def has_module_perms(self, user_obj, app_label, obj=None):
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj, obj=obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
return OrganizationUser.objects.get(pk=user_id)
except OrganizationUser.DoesNotExist:
return None
|
avidal/django-organizations
|
organizations/backends.py
|
backends.py
|
py
| 6,718 |
python
|
en
|
code
| 1 |
github-code
|
6
|
12091024325
|
from typing import List, Iterator
import torch
from torch.utils.data.sampler import Sampler
from nltk import Tree
from nltk.tokenize.treebank import TreebankWordTokenizer
class TokenizedLengthSampler(Sampler[List[int]]):
"""
PyTorch DataLoader - compatible sampler class that batchify sentences with the most similar lengths for maximum efficiency.
"""
def __init__(self, data_source: List[str], batch_size: int, seed: int):
self.data_source = data_source
self.length = len(data_source)
self.batch_size = batch_size
tokenize = TreebankWordTokenizer().tokenize
seq_lengths = [len(tokenize(sent)) for sent in data_source]
indices = list(range(len(data_source)))
indices = sorted(indices, key=lambda i: seq_lengths[i])
batches = []
if self.length % self.batch_size != 0 :
batches.append(indices[:self.length % self.batch_size])
for start in range(self.length % self.batch_size, self.length, batch_size):
end = start + batch_size
batches.append(indices[start:end])
self.length_batches = len(batches)
self.batches = [batches[i] for i in torch.randperm(n=self.length_batches, dtype=torch.long).tolist()]
self.seq_lengths = seq_lengths
def __len__(self):
return self.length_batches
def __iter__(self) -> Iterator[List[int]]:
for batch in self.batches:
yield batch
|
jinulee-v/bert_diora
|
bert_diora/utils.py
|
utils.py
|
py
| 1,470 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21929427517
|
# Trial project - Number guessing
import time
import random
random_number = random.randint(1,100)
player_point = 0
attempt_counter = 0
print("Welcome to number guessing challenge. You will get 10 chance to guess the correct number!\n")
while attempt_counter <=10:
print(f"Currect time : {time.asctime()}")
user_input = int(input("Enter your guess : "))
if user_input<random_number:
print("Please enter larger number!")
attempt_counter += 1
print(f"You have only {10-attempt_counter} attempts remaing !\n")
continue
elif user_input>random_number:
print("Please enter lower number!")
attempt_counter += 1
print(f"You have only {10-attempt_counter} attempts remaining !\n")
continue
else:
print("\nCongratulations! You have guessed the correct number.\n")
attempt_counter += 1
break
print(f"You have took {attempt_counter} attempt")
|
MahbinAhmed/Learning
|
Python/Python Practice/number_guessing.py
|
number_guessing.py
|
py
| 941 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41211802240
|
#import cv2
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input, decode_predictions
import numpy as np
import os
import sys
import json
from PIL import Image
# import Image
import requests
from io import BytesIO
import urllib3
import h5py
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# y_test = []
# 計算相似矩陣
def cosine_similarity(ratings):
sim = ratings.dot(ratings.T)
if not isinstance(sim, np.ndarray):
sim = sim.toarray()
norms = np.array([np.sqrt(np.diagonal(sim))])
return (sim / norms / norms.T)
def saveVector(vector): ## don't use, just use it in colab to get the vector
save_file = h5py.File('../test.h5', 'w')
save_file.create_dataset('test', data=vector)
save_file.close()
def readvector():
open_file = h5py.File('/Users/liujiazhen/Documents/2020-2021/PFE/PFE/PFE/information-retrival-search-engine/informationRetrival/vgg16_p/test.h5', 'r')
vector = open_file['test'][:]
open_file.close()
return vector
def getTitleCheck_VGG():
a = np.load('/Users/liujiazhen/Documents/2020-2021/PFE/PFE/PFE/information-retrival-search-engine/informationRetrival/vgg16_p/title.npy', allow_pickle= True)
return a.item()
def compare():
# y_test = []
model = VGG16(weights='imagenet', include_top=False)
# 取样本
image_sample = Image.open("/Users/liujiazhen/Documents/2020-2021/PFE/PFE/PFE/information-retrival-search-engine/informationRetrival/frontend/static/frontend/images/temp.jpg")
imageS = image_sample.crop()
thisImage = imageS.resize((224, 224))
my_image = image.img_to_array(thisImage)
my_x = np.expand_dims(my_image, axis=0)
my_x = preprocess_input(my_x)
my_features = model.predict(my_x)
my_features_compress = my_features.reshape(1, 7 * 7 * 512)
# features_compress.append(my_features_compress)
features_compress = readvector()
# print(np.shape(features_compress))
# print(np.shape(my_features_compress))
new_features = np.append(features_compress, my_features_compress, axis=0)
# print(np.shape(new_features))
# exit(0)
sim = cosine_similarity(new_features)
# print("sim:", np.shape(sim))
# # 依命令行參數,取1個樣本測試測試
# inputNo = int(sys.argv[1]) # tiger, np.random.randint(0,len(y_test),1)[0]
# sample = y_test[inputNo]
# print(sample)
top = np.argsort(-sim[-1, :], axis=0)[1:3]
# 取得最相似的前2名序號
y_test = getTitleCheck_VGG()
recommend = [y_test[i] for i in top]
print(recommend)
# print(sim)
def compare_melanger():
# y_test = []
model = VGG16(weights='imagenet', include_top=False)
# 取样本
image_sample = Image.open("/Users/liujiazhen/Documents/2020-2021/PFE/PFE/PFE/information-retrival-search-engine/informationRetrival/frontend/static/frontend/images/temp.jpg") # 此处添加修改地址
imageS = image_sample.crop()
thisImage = imageS.resize((224, 224))
my_image = image.img_to_array(thisImage)
my_x = np.expand_dims(my_image, axis=0)
my_x = preprocess_input(my_x)
my_features = model.predict(my_x)
my_features_compress = my_features.reshape(1, 7 * 7 * 512)
# features_compress.append(my_features_compress)
features_compress = readvector()
# print(np.shape(features_compress))
# print(np.shape(my_features_compress))
new_features = np.append(features_compress, my_features_compress, axis=0)
# print(np.shape(new_features))
# exit(0)
sim = cosine_similarity(new_features)
return sim
def main():
# 自 images 目錄找出所有 JPEG 檔案
y_test = []
x_test = []
# x_test_final = []
# FILE_PATH = "/Users/panda/Desktop/movie_1202"
FILE_PATH = "/Users/panda/Downloads/archive/movies/movies"
IMAGE_BASE_PATH = "https://image.tmdb.org/t/p/w500"
flag = 0
# read file which is in the id_list
open_file = h5py.File('./id_list.h5', 'r')
id = open_file['id'][:]
open_file.close()
tmp = []
for i in range(len(id)):
tmp.append(int(id[i].decode('UTF-8')))
# print(tmp)
for movie in os.listdir(FILE_PATH):
# if flag < 50:
# flag += 1
# if flag == 245 or flag == 246 or flag == 247 or flag == 248:
# print(movie)
# else:
# continue
if movie.split(".")[1] != "json":
continue
movie_id = int(movie.split('_')[1].split('.')[0])
if movie_id in tmp:
# print(movie_id)
# open file
fr = open(FILE_PATH + "/" + movie)
# print(movie)
# print(movie_id)
movie_model = json.load(fr)
fr.close()
if movie_model['poster_path']:
img_path = IMAGE_BASE_PATH + movie_model['poster_path']
html = requests.get(img_path, verify=False)
poster = Image.open(BytesIO(html.content))
poster_img = poster.crop()
# poster = html.content
# imgByteArr = BytesIO()
# poster.save(imgByteArr, format=poster.format)
# poster = imgByteArr.getvalue()
# poster_img.show()
# img = poster_img.resize((224, 224))
# img.show()
# exit(1)
if poster:
# img = image.load_img(poster_img, target_size=(224, 224))
img = poster_img.resize((224, 224))
# img.show()
y_test.append(movie_id)
x = image.img_to_array(img)
# print(movie_id)
# print(x[:,:,0])
# print(np.shape(x[:,:,0]))
# exit(0)
if np.shape(x)[2] == 1:
x = np.stack((x[:, :, 0],) * 3, axis=-1)
x = np.expand_dims(x, axis=0)
if len(x_test) > 0:
# print(1)
# print(np.shape(x_test))
# print(np.shape(x))
# exit(0)
x_test = np.concatenate((x_test, x))
else:
# print(2)
x_test = x
# flag = flag + 1
# else:
# if len(x_test_final) > 0:
# # print(np.shape(x_test))
# # print(np.shape(x))
# # exit(0)
# #x_test = preprocess_input(x_test)
# x_test_final = np.concatenate((x_test_final, x_test))
# else:
# # x_test = preprocess_input(x_test)
# x_test_final = x_test
# x_test = []
# flag = 0
# x_test_final = np.concatenate((x_test_final, x_test))
# 轉成 VGG 的 input 格式
# print(x_test)
# print(type(x_test))
# print(np.shape(x_test))
x_test = preprocess_input(x_test)
# print(np.shape(x_test_final))
np.save("title.npy", y_test)
# include_top=False,表示會載入 VGG16 的模型,不包括加在最後3層的卷積層,通常是取得 Features (1,7,7,512)
model = VGG16(weights='imagenet', include_top=False)
# 萃取特徵
features = model.predict(x_test)
# print(np.shape(features))
# 計算相似矩陣
features_compress = features.reshape(len(y_test), 7 * 7 * 512)
# print(np.shape(features_compress))
# sim = cosine_similarity(features_compress)
saveVector(features_compress)
compare()
# # 取样本
# image_sample = Image.open("/Users/panda/Desktop/test_image/test.jpg")
# imageS = image_sample.crop()
# thisImage = imageS.resize((224, 224))
# my_image = image.img_to_array(thisImage)
# my_x = np.expand_dims(my_image, axis=0)
#
# my_x = preprocess_input(my_x)
#
# my_features = model.predict(my_x)
#
# my_features_compress = my_features.reshape(1, 7 * 7 * 512)
#
# # features_compress.append(my_features_compress)
#
# # print(np.shape(features_compress))
# # print(np.shape(my_features_compress))
# new_features = np.append(features_compress, my_features_compress, axis=0)
# # print(np.shape(new_features))
# # exit(0)
# sim = cosine_similarity(new_features)
# # print("sim:", np.shape(sim))
#
#
# # # 依命令行參數,取1個樣本測試測試
# # inputNo = int(sys.argv[1]) # tiger, np.random.randint(0,len(y_test),1)[0]
# # sample = y_test[inputNo]
# # print(sample)
# top = np.argsort(-sim[-1,:], axis=0)[1:3]
#
# # 取得最相似的前2名序號
# recommend = [y_test[i] for i in top]
# print(recommend)
# #print(sim)
# if __name__ == "__main__":
# main()
print(getTitleCheck_VGG())
|
ming19956/PFE
|
information-retrival-search-engine/informationRetrival/vgg16_p/newvgg.py
|
newvgg.py
|
py
| 9,112 |
python
|
en
|
code
| 2 |
github-code
|
6
|
71839302589
|
import unittest, os
from pre_requirements import BASE_FOLDER
from budget_system import PurchaseList
from budget_system.settings.Config import ConfigBudget
from pandas.core.frame import DataFrame
from numpy import float64
class PurchaseListTest(unittest.TestCase):
def setUp(self) -> None:
os.environ['CONFIG_BUDGET'] = os.path.join(BASE_FOLDER, 'config.ini')
self.year, self.month = 2022, 'May'
self.month_path = ConfigBudget().MONTH_PATH.format(year=self.year, month=self.month)
self.file_name = 'test_table_one_storeone_[15-05-22].csv'
self.location = os.path.join(self.month_path, self.file_name)
def test_get_productsdata(self):
products_data = PurchaseList(self.location).get_all()
least_expensive, most_expensive, spent = products_data
self.assertEqual(type(least_expensive), DataFrame)
self.assertEqual(type(most_expensive), DataFrame)
self.assertEqual(type(spent), float64)
def test_get_n_most_expensive_products(self):
n_products = 5
most_expensive = PurchaseList(self.location).most_expensive(n_products)
self.assertEqual(type(most_expensive), DataFrame)
def test_get_n_least_expensive_products(self):
n_products = 5
least_expensive = PurchaseList(self.location).least_expensive(n_products)
self.assertEqual(type(least_expensive), DataFrame)
def test_get_total_spent(self):
spent_by_table = PurchaseList(self.location).spending_by_sector()
self.assertEqual(type(spent_by_table), float64)
def test_get_full_dataframe(self):
df = PurchaseList(self.location).data_frame
self.assertEqual(type(df), DataFrame)
def test_get_full_dataframe_sorted_by_price(self):
df_by_price = PurchaseList(self.location).df_by_price
self.assertEqual(type(df_by_price), DataFrame)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
carlosmperilla/budget-system
|
tests/test_purchaselist.py
|
test_purchaselist.py
|
py
| 1,997 |
python
|
en
|
code
| 2 |
github-code
|
6
|
22218524626
|
''' Convienence methods on VTK routines only '''
import director.vtkAll as vtk
import director.vtkNumpy as vnp
from director.shallowCopy import shallowCopy
import numpy as np
def thresholdPoints(polyData, arrayName, thresholdRange):
assert(polyData.GetPointData().GetArray(arrayName))
f = vtk.vtkThresholdPoints()
f.SetInputData(polyData)
f.ThresholdBetween(thresholdRange[0], thresholdRange[1])
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, arrayName)
f.Update()
return shallowCopy(f.GetOutput())
def thresholdCells(polyData, arrayName, thresholdRange, arrayType='cells'):
assert arrayType in ('points', 'cells')
f = vtk.vtkThreshold()
f.SetInputData(polyData)
f.ThresholdBetween(thresholdRange[0], thresholdRange[1])
if arrayType == 'cells':
assert(polyData.GetCellData().GetArray(arrayName))
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS, arrayName)
else:
assert(polyData.GetPointData().GetArray(arrayName))
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, arrayName)
f.Update()
g = vtk.vtkGeometryFilter()
g.SetInputConnection(f.GetOutputPort())
g.Update()
return shallowCopy(g.GetOutput())
def transformPolyData(polyData, transform):
t = vtk.vtkTransformPolyDataFilter()
t.SetTransform(transform)
t.SetInputData(shallowCopy(polyData))
t.Update()
return shallowCopy(t.GetOutput())
def computeDelaunay3D(polyData):
f = vtk.vtkDelaunay3D()
f.SetInputData(polyData)
f.SetOffset(100.0)
f.Update()
surface = vtk.vtkGeometryFilter()
surface.SetInputData(f.GetOutput())
surface.Update()
clean = vtk.vtkCleanPolyData()
clean.SetInputData(surface.GetOutput())
clean.Update()
return shallowCopy(clean.GetOutput())
def computeDelaunay2D(polyData):
f = vtk.vtkDelaunay2D()
f.SetInputData(polyData)
f.Update()
return shallowCopy(f.GetOutput())
def computeCentroid(polyData):
return np.average(vnp.getNumpyFromVtk(polyData, 'Points'), axis=0)
def appendPolyData(polyDataList):
append = vtk.vtkAppendPolyData()
if polyDataList:
for polyData in polyDataList:
append.AddInputData(polyData)
append.Update()
return shallowCopy(append.GetOutput())
def computeNormals(polyData, featureAngle=45):
normals = vtk.vtkPolyDataNormals()
normals.SetFeatureAngle(featureAngle)
normals.SetInputData(polyData)
normals.Update()
return shallowCopy(normals.GetOutput())
def cleanPolyData(polyData):
clean = vtk.vtkCleanPolyData()
clean.SetInputData(polyData)
clean.Update()
return shallowCopy(clean.GetOutput())
def triangulatePolyData(polyData):
f = vtk.vtkTriangleFilter()
f.SetInputData(polyData)
f.Update()
return shallowCopy(f.GetOutput())
def decimateMesh(polyData, targetReduction=0.1):
'''
Reduce the number of triangles in the input mesh by targetReduction.
0.1 = 10% reduction (if there was 100 triangles, now there will be 90)
'''
f = vtk.vtkDecimatePro()
f.SetInputData(polyData)
f.SetTargetReduction(targetReduction)
f.Update()
return shallowCopy(f.GetOutput())
def hasNonFinitePoints(polyData, arrayName='Points'):
pts = vnp.getNumpyFromVtk(polyData, arrayName)
return np.isfinite(pts).any()
def labelNonFinitePoints(polyData, arrayName='Points'):
'''
adds is_nonfinite label to polyData. non finite includes nan and +/- inf.
'''
pts = vnp.getNumpyFromVtk(polyData, arrayName)
labels = np.logical_not(np.isfinite(pts)).any(axis=1)
vnp.addNumpyToVtk(polyData, np.array(labels, dtype=np.int32), 'is_nonfinite')
def removeNonFinitePoints(polyData, arrayName='Points'):
polyData = shallowCopy(polyData)
labelNonFinitePoints(polyData, arrayName)
return thresholdPoints(polyData, 'is_nonfinite', [0, 0])
def flipImage(image, flipAxis=1):
'''
Flip a vtkImageData using the vtkImageFlip filter.
The flipAxis can be 0 or 1 to flip horizontally or vertically.
'''
assert flipAxis in (0, 1)
f = vtk.vtkImageFlip()
f.SetFilteredAxis(flipAxis)
f.SetInputData(image)
f.Update()
return shallowCopy(f.GetOutput())
def rotateImage180(image):
'''
rotates an image by 180 degrees
'''
r1 = vtk.vtkImageFlip()
r1.SetInputData(image)
r1.SetFilteredAxis(0)
r1.Update()
r2 = vtk.vtkImageFlip()
r2.SetInputData(r1.GetOutput())
r2.SetFilteredAxis(1)
r2.Update()
return shallowCopy(r2.GetOutput())
|
RobotLocomotion/director
|
src/python/director/filterUtils.py
|
filterUtils.py
|
py
| 4,630 |
python
|
en
|
code
| 176 |
github-code
|
6
|
22781339759
|
import IMP
import IMP.pmi
import IMP.pmi.macros
import RMF
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import numpy as np
import argparse
#########
# PARSER
#########
p = argparse.ArgumentParser(
description="Align selected RMF files. \n"
"Example of usage: align_rmf.py -d mc_tags -cl 2 -st 0"
)
p.add_argument('-d', action="store", dest="dir_name",
help="directory name to process")
p.add_argument('-cl', action="store", dest="cluster",
help="Specify cluster")
p.add_argument('-st', action="store", dest="state",
help="Specify RMF state")
parsero = p.parse_args()
def get_coordinates_alignment(hier, selection=None):
coord_dict = {}
if selection:
for k, v in selection.items():
sel = IMP.atom.Selection(hier,
molecule=v[0],
residue_indexes=np.arange(v[1], v[2], 1),
resolution=IMP.atom.ALL_RESOLUTIONS,
copy_index=v[3]).get_selected_particles()
coords = [np.array(IMP.core.XYZ(p).get_coordinates())
for p in sel]
coord_dict[k] = coords
else:
mols = IMP.pmi.tools.get_molecules(hier)
# print(mols)
for m in mols:
sel = IMP.atom.Selection(hier,
molecule=m.get_name(),
copy_index=IMP.atom.Copy(m).get_copy_index(),
resolution=IMP.atom.ALL_RESOLUTIONS).get_selected_particles()
coords = [np.array(IMP.core.XYZ(p).get_coordinates())
for p in sel]
coord_dict[m.get_name()] = coords
return coord_dict
def transform_coordinates(hier, transformation):
# Transform all coordinates
rbs, beads = IMP.pmi.tools.get_rbs_and_beads(hier)
for rb in rbs:
IMP.core.transform(rb, transformation)
for p in beads:
temp_coord = IMP.core.XYZ(p)
IMP.core.transform(temp_coord, transformation)
def get_reference_coordinates(rmf_in, selection=None):
"""
Get reference coordinates in reference rmf file
:param rmf_in: reference rmf file
:return: coordinates
"""
m = IMP.Model()
f = RMF.open_rmf_file_read_only(rmf_in)
hier = IMP.rmf.create_hierarchies(f, m)[0]
IMP.rmf.load_frame(f, RMF.FrameID(0))
# Get coordinates from frame 1
ref_coord = get_coordinates_alignment(hier, selection)
del m, f
return ref_coord
def align_rmf(rmf_in, rmf_out, ref_coord, selection=None, frames=None):
"""
Align selected frames in rmf_in to ref_coordinates and
calculate RMSD.
:param rmf_in: input rmf
:param rmf_out: output rmf
:param selection: selection of particles
:param ref_coord: reference coordinates after running Sampcon.py
:param frames: passing selected frames
:return:
"""
fh_out = RMF.create_rmf_file(rmf_out)
m = IMP.Model()
f = RMF.open_rmf_file_read_only(rmf_in)
print('Number of frames', f.get_number_of_frames())
if not frames:
frames = np.arange(0, f.get_number_of_frames(), 100)
hier = IMP.rmf.create_hierarchies(f, m)[0]
states = IMP.atom.get_by_type(hier, IMP.atom.STATE_TYPE)
for i, s in enumerate(states):
if i == sel_state:
p = IMP.Particle(m, 'System')
hier_temp = IMP.atom.Hierarchy.setup_particle(p)
hier_temp.add_child(s)
IMP.rmf.add_hierarchy(fh_out, hier_temp)
RMSD = []
for i in frames:
if i % 100 == 0: print('Frame:', i)
IMP.rmf.load_frame(f, RMF.FrameID(i))
temp_coord = get_coordinates_alignment(hier, selection)
ali = IMP.pmi.analysis.Alignment(ref_coord, temp_coord)
(rmsd, transformation) = ali.align()
RMSD.append(rmsd)
transform_coordinates(hier, transformation)
IMP.rmf.save_frame(fh_out, str(i))
del temp_coord
del f
print('Mean RMSD:', np.mean(np.array(RMSD)))
return RMSD
if __name__ == '__main__':
#####################
# MAIN
#####################
# IO files
dir_name = parsero.dir_name # mc_tags_k1
cl = parsero.cl # 2
st = parsero.st # 0
sel_state = 0
ref_rmf = f'../output/{dir_name}/analysis/rmsd/cluster.{cl}/cluster_center_model.rmf3'
rmf_in_A = f'../output/{dir_name}/analysis/A_models_clust{cl}_{st}.rmf3'
rmf_out_A = f'../output/{dir_name}/analysis/A_models_clust{cl}_{st}_aligned.rmf3'
rmf_in_B = f'../output/{dir_name}/analysis/B_models_clust{cl}_{st}.rmf3'
rmf_out_B = f'../output/{dir_name}/analysis/B_models_clust{cl}_{st}_aligned.rmf3'
cluster_frames_A = f'../output/{dir_name}/analysis/rmsd/cluster.{cl}.sample_A.txt'
cluster_frames_B = f'../output/{dir_name}/analysis/rmsd/cluster.{cl}.sample_B.txt'
m = IMP.Model()
f = RMF.open_rmf_file_read_only(rmf_in_A)
nframes_A = f.get_number_of_frames()
frames_A = [int(l.strip()) for l in open(cluster_frames_A, 'r')]
frames_B = [int(l.strip()) - nframes_A for l in open(cluster_frames_B, 'r')]
del m, f
#################################
# Get reference and align
#################################
reference_coordinates = get_reference_coordinates(ref_rmf)
rmsd_A = align_rmf(rmf_in_A, rmf_out_A, reference_coordinates, frames=frames_A)
rmsd_B = align_rmf(rmf_in_B, rmf_out_B, reference_coordinates, frames=frames_B)
#################################
# Plot RMSD distribution
#################################
sns.set(font_scale=3)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(30, 40))
ax1.set_title(f'RMSD_A', size=50, y=1.15, fontweight='bold')
sns.histplot(x=rmsd_A, stat='density', fill=True, ax=ax1)
ax2.set_title(f'RMSD_B', size=50, y=1.15, fontweight='bold')
sns.histplot(x=rmsd_B, stat='density', fill=True, ax=ax2)
plt.tight_layout(pad=3.0)
# plt.show()
plt.savefig(f'../output/{dir_name}/analysis/pict_tags_rmsd.png')
print('\nDONE!\n')
sys.exit(0)
|
Altairch95/ExocystDYN
|
scripts/align_rmf.py
|
align_rmf.py
|
py
| 5,528 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12028632350
|
import tkinter
from tkinter import messagebox
from src.game.minesweeper import Minesweeper
from src.game.minesweeper import CellStatus
from src.game.minesweeper import GameStatus
from datetime import datetime
import platform
class MineweeperUI:
def __init__(self, root):
self.ui_window = root
self.ui_window.title("Minesweeper")
self.minesweeper = Minesweeper()
self.minesweeper.set_mines((int(round(datetime.now().timestamp() * 1000))))
self.cells= []
self.grid_init()
def grid_init(self):
right_click_type = self.button_os_config()
for row in range(0, 10):
self.cells.append([])
for column in range(0, 10):
cell = tkinter.Button(self.ui_window, text=" ", width=5, height=3,
command=lambda row=row, column=column, left_button = True: self.button_clicked(row, column, left_button))
cell.bind(right_click_type, lambda event, row=row, column=column, left_button = False: self.button_clicked(row, column, left_button))
cell.grid(row=row + 1, column=column)
self.cells[row].append(cell)
def button_os_config(self):
if platform.system() == "Darwin":
return "<Button-2>"
else:
return "<Button-3>"
def button_clicked(self,row, column, left_button):
if left_button:
self.minesweeper.expose(row, column)
else:
self.minesweeper.toggle_seal(row, column)
self.update_grid()
if self.minesweeper.get_game_status() == GameStatus.LOST:
self.show_mines()
messagebox.showinfo("Game Over", "You have step on a mine! ")
if platform.system() == "Windows":
exit()
elif self.minesweeper.get_game_status() == GameStatus.WON:
messagebox.showinfo("Congratulations!", "You are a minesweeper master! ")
if platform.system() == "Windows":
exit()
def show_mines(self):
for row in range(10):
for column in range(10):
if self.minesweeper.is_mine_at(row,column):
self.cells[row][column]["text"] = "*"
if platform.system() == "Darwin":
self.cells[row][column].config(highlightbackground="red", highlightthickness=1)
else:
self.cells[row][column].config(background='red')
def update_grid(self):
for row in range(10):
for column in range(10):
if platform.system() == "Darwin":
if self.minesweeper.get_status(row,column) == CellStatus.EXPOSED:
adjacent_value = self.minesweeper.adjacent_mine_count(row, column)
if adjacent_value > 0: self.cells[row][column]["text"] = str(adjacent_value)
self.cells[row][column].config(highlightbackground="Yellow", highlightthickness=1)
elif self.minesweeper.get_status(row, column) == CellStatus.SEAL:
self.cells[row][column].config(highlightbackground="green", highlightthickness=1)
else:
self.cells[row][column].config(highlightbackground="#DCDCDC", highlightthickness=1)
else:
if self.minesweeper.get_status(row, column) == CellStatus.EXPOSED:
adjacent_value = self.minesweeper.adjacent_mine_count(row, column)
if adjacent_value > 0: self.cells[row][column]["text"] = str(adjacent_value)
self.cells[row][column].config(background='Yellow')
elif self.minesweeper.get_status(row, column) == CellStatus.SEAL:
self.cells[row][column].config(background='green')
else:
self.cells[row][column].config(background='#DCDCDC')
class Main:
def __init__(self):
root = tkinter.Tk()
MineweeperUI(root)
root.mainloop()
|
PriscillaRoy/MinesweeperGame
|
src/gui/minesweeper_ui.py
|
minesweeper_ui.py
|
py
| 3,636 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13530263096
|
n, m = map(int, input().split())
S = []
strings = []
for _ in range(n):
S.append(input())
for _ in range(m):
strings.append(input())
answer = 0
for i in strings:
if i in S:
answer += 1
print(answer)
|
zooonsp/Baekjoon_zooonsp
|
백준/Silver/14425. 문자열 집합/문자열 집합.py
|
문자열 집합.py
|
py
| 243 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25390435322
|
import logging
import requests
import pandas as pd
import time
from .sqlite import Orders
from .sqlite import Balances
class BitMex():
def pull_bitmex_orderbooks(symbol, limit, mode='live'):
# Tracking execution time
start_ts = time.time() * 1000
# Get request
request = requests.get('https://www.bitmex.com/api/v1/orderBook/L2?symbol={}&depth={}'.format(symbol, limit))
bitmex = request.json()
# Check to make sure data is pulled properly
if request.status_code == 200:
# Track latency
req_ts = int(time.time())
sell_arr = []
buy_arr = []
for item in bitmex:
# ['Price','Amount','Value']
row = [item['price'], item['size'], item['price'] * item['size']]
if item['side'] == "Sell":
sell_arr.append(row)
if item['side'] == "Buy":
buy_arr.append(row)
# Extract Bids and Asks to DFs
df_buy = pd.DataFrame(buy_arr)
df_sell = pd.DataFrame(sell_arr)
#Ensure that DFs are not empty
if len(df_buy) == 0:
df_buy = pd.DataFrame([[0,0,0]])
if len(df_sell) == 0:
df_sell = pd.DataFrame([[0,0,0]])
df_buy.columns = df_sell.columns = ['Price','Amount','Value']
# # Write order book data to databae
for row in buy_arr:
Orders.create(
ts=req_ts,
price=row[0],
amount=row[1],
value=row[2],
b_a='b'
)
for row in sell_arr:
Orders.create(
ts=req_ts,
price=row[0],
amount=row[1],
value=row[2],
b_a='a'
)
final_ts = time.time() * 1000
# Log request
req_log = [start_ts, req_ts, final_ts, request.status_code, symbol, 'orders']
logging.info(req_log)
return (df_buy, df_sell)
else:
logging.warning("Orderbook request failure.")
logging.warning(request.json())
return(None, None)
## check_pending_orders will check if there are pending orders.
## It makes decision based on whether there are pending orders and whether we have
## an appropriate balance and will return a decision of 'BUY' or 'SELL'
## default: 'SELL'
def check_pending_orders(symbol, client, c_maj, c_min, current_balance, thresh, trade_alloc):
# Get pending orders
logging.info("Checking pending orders...")
# TODO: get this dynamically
symbol = "BTC/USD"
bitmex = client.fetch_open_orders(symbol)
## this is some data munging that we have to do because bitmex doesn't
## return a nice object
sell_arr = []
buy_arr = []
for item in bitmex:
# ['orderID','Price','Amount','Value']
row = [item['info']['orderID'], item['info']['price'], item['info']['orderQty'], item['info']['price'] * item['info']['orderQty']]
if item['info']['side'] == "Sell":
sell_arr.append(row)
if item['info']['side'] == "Buy":
buy_arr.append(row)
pending_orders = {'BUY': buy_arr, 'SELL': sell_arr}
if pending_orders != []:
if(len(pending_orders['BUY']) + len(pending_orders['SELL']) == 0):
for c in (c_maj, c_min):
coin = Balances.select().where(Balances.coin == c).order_by(Balances.id.desc()).get()
current_balance[c] = coin.balance
logging.info("Checking balances....")
# do a balance check to see whether we can trade with current balance
# based on threshold
decision = BitMex.balance_check(current_balance[c_maj], current_balance[c_min], thresh, trade_alloc)
if decision:
return('BUY', pending_orders)
else:
return('SELL', pending_orders)
else:
if(len(pending_orders['BUY']) > 0):
return('BUY', pending_orders)
else:
return('SELL', pending_orders)
# TODO: what should we do if no pending orders?
# return('SELL', pending_orders)
## DONE
def balance_check(balance_maj, balance_min, thresh, trade_alloc):
# major = the one you're quoting.
# minor = the one you're quoting in.
# balance_maj is major coin balance
# balance_min is minor coin balance
# thresh is threshold under which you buy the major pair
# trade_alloc is the allocated amount to trade
return((balance_maj <= thresh) and (balance_min >= trade_alloc))
## eliminate_excess_orders will _ all but the best order
def eliminate_excess_orders(df, decision):
# checks for all excess orders and returns list of non-optimal oID to cancel
logging.info("Eliminating excess orders...")
print(o_df)
o_df = pd.DataFrame(df)
o_df.columns = ['ts','bs','p','a','deal','oid']
if(decision == 'BUY'):
o_optimal = o_df.p.max()
else:
o_optimal = o_df.p.min()
oid_keep = o_df[o_df.p == o_optimal].oid
orders_to_cancel = [i for i in o_df[o_df.oid != oid_keep[0]].oid]
return orders_to_cancel
def update_order(pending_orders, o_optimal, decision, trade_alloc, client, symbol):
pair = symbol.replace('-','')
# cancel all orders
resp = self.cancel_all_orders(client, pending_orders, decision)
logging.info("Canceling All Orders for {}: {} Side: {}".format(pair, resp, decision))
log_request(conn, time.time(), resp, pair, 'cancel_order - {}'.format(decision))
# issue order
resp = issue_order(decision, symbol, o_optimal, trade_alloc/o_optimal, conn)
logging.info("Issuing Orders for {}: {} Side: {}".format(pair, resp, decision))
return('Order Updated')
def cancel_all_orders(self, client, orders, decision):
# order[0] = orderID
for order in orders[decision]:
logging.info("Cancelling order: {}".format(order[0]))
try:
client.cancelOrder(order[0])
except OrderNotFound as e:
logging.info("Cancelling Excess Orders {} [Fail]:".format(order[0], e))
## TODO: update with better logging
def issue_order(decision, symbol, price, amount, client, precision=0):
try:
# initialize temporary client to avoid UNAUTH
# TODO: don't hard code this
ccxt_sym = "BTC/USD"
print("issue order")
if(decision == 'BUY'):
rresp = client.create_limit_buy_order(ccxt_sym, amount, price)
oid = rresp['id']
log_trade(conn, symbol, price, amount, oid, decision)
return(oid)
if(decision == 'SELL'):
# To catch bad precision loopback re-order
if (precision > 0):
print('Debug precision: ', amount, str(amount))
rresp = client.create_limit_sell_order(ccxt_sym, amount, price)
else:
rresp = client.create_limit_sell_order(ccxt_sym, amount, price)
oid = rresp['id']
log_trade(conn, symbol, price, amount, oid, decision)
return(oid)
except Exception as issue_error:
print(type(issue_error))
print(issue_error.args)
print(str(issue_error.args[0]).replace(',','|'))
# In scenario with improper amount precision
if ('precision of amount' in str(issue_error.args)):
logging.warning(str('Improper Amount Precision - {}'.format(str(issue_error.args[0]))))
m = re.search('(The precision of amount).*[0-9]{1}', str(issue_error.args[0]))
precision = int(m.group(0)[-1])
print(precision)
order_amount = truncate(amount, precision)
if (order_amount > 0.0):
print('Reissuing order', order_amount, precision)
issue_order(decision, symbol, price, order_amount, conn, precision)
return('Reissued Order')
else:
return('Error issueing order: order_amount too low for precision')
return(str(issue_error).replace(',','|'))
def is_best_order(decision, symbol, o_optimal, client, pending_orders, order_df):
pair = symbol.replace('-','')
if (decision == 'BUY'):
if (o_optimal > pending_orders['BUY'][0][2]):
return(False)
else:
return(True)
elif (decision == 'SELL'):
if (o_optimal < pending_orders['SELL'][0][2]):
return(False)
else:
return(True)
|
noqcks/bmex-algo
|
src/bitmex.py
|
bitmex.py
|
py
| 8,025 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30821160760
|
import pandas
data = pandas.read_csv("Squirrel_Data.csv")
#the begining and the end of the first and last lines
#gray_squirrels = data[data["Primary Fur Color"] == "Gray"]
#print(gray_squirrels)
gray_squirrels_count = len(data[data["Primary Fur Color"] == "Gray"])
red_squirrels_count = len(data[data["Primary Fur Color"] == "Cinnamon"])
black_squirrels_count = len(data[data["Primary Fur Color"] == "Black"])
#print(gray_squirrels_count)
data_dict ={
"Fur Color": ["Gray", "Cinnamon", "Black"],
"Count": [gray_squirrels_count, red_squirrels_count, black_squirrels_count]
}
#make a csv file from data
#new_data_frame = pandas.DataFrame(data_dict)
#new_data_frame.to_csv("squirrel_count.csv")
#print(("Made Squirrel_Count.csv"))
#make panda data frame from dictionary
student_dict = {
"student": ["Angela", "James", "Lily"],
"score": [56, 76, 98]
}
student_data_frame = pandas.DataFrame(student_dict)
print(student_data_frame)
#loop through a data frame
#for(key,value) in student_data_frame.items():
# print(value)
#loop through rows of a data frame
for (index, row) in student_data_frame.iterrows():
print(index)
print(row)
print(row.student)
print(row.score)
if row.student == "Angela":
print(row.score)
#{new_key:new_value for (index, row) in df.iterrows()}
|
d3cod3d/notes
|
pandas_cheat_sheet.py
|
pandas_cheat_sheet.py
|
py
| 1,312 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22837983090
|
import pandas as pd
import networkx as nx
# def splitDataFrameList(df,target_column,separator):
# ''' df = dataframe to split,
# target_column = the column containing the values to split
# separator = the symbol used to perform the split
# returns: a dataframe with each entry for the target column separated, with each element moved into a new row.
# The values in the other columns are duplicated across the newly divided rows.
# '''
# def splitListToRows(row,row_accumulator,target_column,separator):
# split_row = row[target_column].split(separator)
# for s in split_row:
# new_row = row.to_dict()
# new_row[target_column] = s
# row_accumulator.append(new_row)
# new_rows = []
# df.apply(splitListToRows,axis=1,args = (new_rows,target_column,separator))
# new_df = pd.DataFrame(new_rows)
# return new_df
# df = pd.read_csv('../reading_and_cleaning/cleaned_podcasts.csv', sep='\t', index_col=0)
# df = df.replace(r'', np.nan, regex=True)
# df = df[pd.notnull(df['guests'])]
# split_hosts = splitDataFrameList(df, 'hosts', ', ')
#G1 = nx.from_pandas_dataframe(split_hosts, 'guests', 'hosts', edge_attr=['date', 'duration', 'podcast'], create_using=nx.MultiDiGraph())
# for index, row in split_hosts.iterrows():
# if(row['hosts'] == row['guests']):
# split_hosts.drop(index=index, inplace=True)
# guest_durations = split_hosts.groupby(['hosts', 'guests'])['duration'].sum()
# guest_durations = guest_durations.reset_index()
guest_durations = pd.read_csv('../reading_and_cleaning/guest_durations.csv', sep='\t', index_col=0)
G1 = nx.from_pandas_dataframe(guest_durations, 'guests', 'hosts', edge_attr=['duration'], create_using=nx.DiGraph())
G2 = nx.from_pandas_dataframe(guest_durations, 'guests', 'hosts', edge_attr=['duration'], create_using=nx.Graph())
##################################################################################################################################
remove = [node for node,degree in G2.degree().items() if degree < 3]
G2.remove_nodes_from(remove)
#print(nx.number_of_nodes(G2))
remove = [node for node,degree in G1.degree().items() if degree < 3]
G1.remove_nodes_from(remove)
#print(nx.number_of_nodes(G1))
##################################################################################################################################
pr = nx.pagerank(G1, weight='duration')
hubs, authorities = nx.hits(G1)
nodes_df = pd.DataFrame.from_dict(pr, orient='index')
nodes_df.rename(columns = {0:'pr'}, inplace = True)
nodes_df['hub'] = hubs.values()
nodes_df['auth'] = authorities.values()
#print(len(nodes_df), len(nx.eccentricity(G2).values()))
nodes_df['eccentricity'] = nx.eccentricity(G2).values()
nodes_df['closeness'] = nx.closeness_centrality(G2).values()
nodes_df['betweenness'] = nx.betweenness_centrality(G2).values()
nodes_df['degree_cen'] = nx.degree_centrality(G2).values()
nodes_df['eigen'] = nx.eigenvector_centrality(G2).values()
nodes_df.to_csv('node_values.csv', sep='\t')
|
brooksjaredc/podcast_network_analysis
|
analyzing_functions/set_node_attr.py
|
set_node_attr.py
|
py
| 2,988 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23142628393
|
# Neuon AI - PlantCLEF 2020
import tensorflow as tf
from preprocessing import inception_preprocessing
slim = tf.contrib.slim
import numpy as np
import cv2
from nets.inception_v4 import inception_v4
from nets import inception_utils
from PIL import Image
from six.moves import cPickle
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
# ============================================================= #
# Directories
# ============================================================= #
image_dir_parent_train = "PlantCLEF2020TrainingData"
image_dir_parent_test = "PlantCLEF2020TrainingData"
checkpoint_model = "checkpoints\run16\040000.ckpt"
species_name_map_csv = "list\clef2020_herbarium_species.csv"
classmap_txt = "list\clef2020_herbarium_species_classid_map_to_index.txt"
herbarium_dictionary_file = "mean_emb_dict_997_herb_500_run16_40k_crops.pkl"
test_image = "PlantCLEF2020TrainingData\photo\373\5859.jpg"
# ============================================================= #
# Parameters
# ============================================================= #
topN = 5 # Number of predictions to output
batch = 10
# Assign batch = 10,
# 10 variations of flipped cropped imgs (center, top left, top right, bottom left, bottom right,
# center flipped, top left flipped, top right flipped, bottom left flipped, bottom right flipped)
numclasses1 = 997 # Class number of Herbarium network
numclasses2 = 10000 # Class number of Field network
input_size = (299,299,3) # Image input size
# ============================================================= #
# Load data
# ============================================================= #
# ----- Read herbarium dictionary pkl file ----- #
with open(herbarium_dictionary_file,'rb') as fid1:
herbarium_dictionary = cPickle.load(fid1)
# ----- Map species index to folder ----- #
with open(classmap_txt,'r') as fid:
classmap = [x.strip().split(' ')[0] for x in fid.readlines()]
# ----- Map species name to index ----- #
species_name_map_df = pd.read_csv(species_name_map_csv, sep=',')
species_list = species_name_map_df['species'].to_list()
# ============================================================= #
# Run network / validate image
# ============================================================= #
# ----- Initiate tensors ----- #
x1 = tf.placeholder(tf.float32,(batch,) + input_size)
x2 = tf.placeholder(tf.float32,(batch,) + input_size)
y1 = tf.placeholder(tf.int32,(batch,))
y2 = tf.placeholder(tf.int32,(batch,))
is_training = tf.placeholder(tf.bool)
is_train = tf.placeholder(tf.bool, name="is_training")
# ----- Image preprocessing methods ----- #
train_preproc = lambda xi: inception_preprocessing.preprocess_image(
xi,input_size[0],input_size[1],is_training=True)
test_preproc = lambda xi: inception_preprocessing.preprocess_image(
xi,input_size[0],input_size[1],is_training=False)
def data_in_train1():
return tf.map_fn(fn = train_preproc,elems = x1,dtype=np.float32)
def data_in_test1():
return tf.map_fn(fn = test_preproc,elems = x1,dtype=np.float32)
def data_in_train2():
return tf.map_fn(fn = train_preproc,elems = x2,dtype=np.float32)
def data_in_test2():
return tf.map_fn(fn = test_preproc,elems = x2,dtype=np.float32)
data_in1 = tf.cond(
is_training,
true_fn = data_in_train1,
false_fn = data_in_test1
)
data_in2 = tf.cond(
is_training,
true_fn = data_in_train2,
false_fn = data_in_test2
)
def read_img(img_path):
img = []
try:
current_img = img_path
im = cv2.imread(current_img)
if im is None:
im = cv2.cvtColor(np.asarray(Image.open(current_img).convert('RGB')),cv2.COLOR_RGB2BGR)
im = cv2.resize(im,(input_size[0:2]))
if np.ndim(im) == 2:
im = cv2.cvtColor(im,cv2.COLOR_GRAY2RGB)
else:
im = cv2.cvtColor(im,cv2.COLOR_BGR2RGB)
# Center and Corner crops
im1 = im[0:260,0:260,:]
im2 = im[0:260,-260:,:]
im3 = im[-260:,0:260,:]
im4 = im[-260:,-260:,:]
im5 = im[19:279,19:279,:]
imtemp = [cv2.resize(ims,(input_size[0:2])) for ims in (im1,im2,im3,im4,im5)]
[img.append(ims) for ims in imtemp]
# Flip image
flip_img = cv2.flip(im, 1)
flip_im1 = flip_img[0:260,0:260,:]
flip_im2 = flip_img[0:260,-260:,:]
flip_im3 = flip_img[-260:,0:260,:]
flip_im4 = flip_img[-260:,-260:,:]
flip_im5 = flip_img[19:279,19:279,:]
flip_imtemp = [cv2.resize(imf,(input_size[0:2])) for imf in (flip_im1,flip_im2,flip_im3,flip_im4,flip_im5)]
[img.append(imf) for imf in flip_imtemp]
except:
print("Exception found: Image not read...")
pass
img = np.asarray(img,dtype=np.float32)/255.0
return img
# ----- Construct network 1 ----- #
with slim.arg_scope(inception_utils.inception_arg_scope()):
logits,endpoints = inception_v4(data_in1,
num_classes=numclasses1,
is_training=is_training,
scope='herbarium')
herbarium_embs = endpoints['PreLogitsFlatten']
herbarium_bn = tf.layers.batch_normalization(herbarium_embs, training=is_train)
herbarium_feat = tf.contrib.layers.fully_connected(
inputs=herbarium_bn,
num_outputs=500,
activation_fn=None,
normalizer_fn=None,
trainable=True,
scope='herbarium'
)
herbarium_feat = tf.math.l2_normalize(
herbarium_feat,
axis=1
)
# ----- Construct network 2 ----- #
with slim.arg_scope(inception_utils.inception_arg_scope()):
logits2,endpoints2 = inception_v4(data_in2,
num_classes=numclasses2,
is_training=is_training,
scope='field')
field_embs = endpoints2['PreLogitsFlatten']
field_bn = tf.layers.batch_normalization(field_embs, training=is_train)
field_feat = tf.contrib.layers.fully_connected(
inputs=field_bn,
num_outputs=500,
activation_fn=None,
normalizer_fn=None,
trainable=True,
scope='field'
)
field_feat = tf.math.l2_normalize(
field_feat,
axis=1
)
feat_concat = tf.concat([herbarium_feat, field_feat], 0)
variables_to_restore = slim.get_variables_to_restore()
restorer = tf.train.Saver(variables_to_restore)
# ----- Run session ----- #
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
restorer.restore(sess, checkpoint_model)
test_image = read_img(test_image)
sample_embedding = sess.run(
field_feat,
feed_dict = {
x2:test_image,
is_training : False,
is_train : False
}
)
# Average center + corner crop embeddings
averaged_flip = np.mean(sample_embedding, axis=0)
reshaped_emb_sample = averaged_flip.reshape(1,500)
print('Getting herbarium dictionary...')
herbarium_emb_list = []
for herbarium_class, herbarium_emb in herbarium_dictionary.items():
herbarium_emb_list.append(np.squeeze(herbarium_emb))
herbarium_emb_list = np.array(herbarium_emb_list)
print('Comparing sample embedding with herbarium distance...')
similarity = cosine_similarity(reshaped_emb_sample, herbarium_emb_list)
print('Getting probability distribution...')
similarity_distribution = []
for sim in similarity:
new_distribution = []
for d in sim:
new_similarity = 1 - d # 1 - cosine value (d)
new_distribution.append(new_similarity)
similarity_distribution.append(new_distribution)
similarity_distribution = np.array(similarity_distribution)
# Apply inverse weighting with power of 5
probabilty_list = []
for d in similarity_distribution:
inverse_weighting = (1/np.power(d,5))/np.sum(1/np.power(d,5))
probabilty_list.append(inverse_weighting)
probabilty_list = np.array(probabilty_list)
print('Getting topN predictions...')
for prediction in probabilty_list:
topN_class_list = prediction.argsort()[-topN:][::-1]
topN_probability_list = np.sort(prediction)[-topN:][::-1]
counter = 0
for cl, prob in zip(topN_class_list, topN_probability_list):
counter += 1
class_index = classmap[int(cl)]
pred_name = species_list[int(cl)]
print('\nPREDICTION:', counter)
print('Species:', pred_name)
print('Class index (folder):', class_index)
print('Probability:', prob)
|
NeuonAI/plantclef2020_challenge
|
validate_image.py
|
validate_image.py
|
py
| 9,376 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32509221733
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as sc
import csv
def myAnova(Matrice, pvalue_crit):
# Initialisation :
H = 0
F = 0
var_intra = 0
var_inter = 0
obs_moy = 0
eff_tot = 0
# Moyenne des classes :
for i in range(len(Matrice)):
obs_moy = sum(Matrice[i]) + obs_moy
eff_tot = len(Matrice[i]) + eff_tot
# print("Moyenne de la classe", i, "=", round(obs_moy/eff_tot,1))
# print("Effectif total =", eff_tot, "\n")
obs_moy = obs_moy / eff_tot
# print("Moyenne de la moyenne des classes =", round(obs_moy,1))
# print("Effectif Total =", eff_tot)
# Variance intra (= moyenne des classes) :
sum_var_intra = 0
for j in range(0, len(Matrice)):
for i in range(0, len(Matrice[j])):
sum_var_intra = sum_var_intra + (Matrice[j][i] - np.mean(Matrice[j])) ** 2
var_intra = (1 / eff_tot) * sum_var_intra
# print("Variance Intra = ", round(var_intra, 3))
# Variance inter (= moyenne des observations) :
sum_var_inter = 0
for j in range(0, len(Matrice)):
sum_var_inter = sum_var_inter + (len(Matrice[j]) * (np.mean(Matrice[j]) - obs_moy) ** 2)
var_inter = (1 / eff_tot) * sum_var_inter
# print("Variance Inter = ", round(var_inter, 3))
# Valeur de la stat F :
# var_tot = var_intra + var_inter
F = (var_inter / (len(Matrice) - 1)) / (var_intra / (eff_tot - len(Matrice)))
# print("Variance Totale = ", round(var_tot, 3))
# print("Satistique F = ", round(F, 3))
# Hypothèse H (=0 ou 1) :
p_value = sc.f.cdf(F, len(Matrice) - 1, eff_tot - len(Matrice))
# print("pValue = ", round(p_value, 4))
if (p_value > 1 - pvalue_crit):
H = False
# print("H =", H, "--> Donc on rejette H0.\n")
else:
H = True
# print("H =", H, "--> Donc on valide H0.\n")
return (H, F, var_intra, var_inter)
def readDat_csv(NomDuFichierCSV, nbline, nbcol):
# Auteur P. Maurine
# Date : 13/12/2019
# Prend le fichier csv NomDuFichierCSV de n lignes p colonnes et retourne
# une matrice de nxp floats
L = []
Sample = np.array([], dtype=float)
with open(NomDuFichierCSV, newline='') as f:
read = csv.reader(f, delimiter=";")
for row in read:
L.extend(row)
Sample = [float(i) for i in L]
Sample = np.reshape(Sample, [nbline, nbcol])
return (Sample)
def Temp_Dept(Sample, Departement):
Temp = np.zeros(len(Sample[1]) - 1)
indice = 0
for i in range(1, len(Sample[1])):
Temp[indice] = Sample[np.where(Sample[:, 0] == Departement), i]
indice += 1
return (Temp)
def Temp_An(Sample, Annee):
Temp = np.zeros((len(Sample) - 1))
indice = 0
for i in range(1, len(Sample)):
Temp[indice] = Sample[i, np.where(Sample[0, :] == Annee)]
indice += 1
return (Temp)
def Temp_Dept(Sample, Departement):
Temp = np.zeros(len(Sample[1]) - 1)
indice = 0
for i in range(1, len(Sample[1])):
Temp[indice] = Sample[np.where(Sample[:, 0] == Departement), i]
indice += 1
return (Temp)
matCSV = readDat_csv("DonneesMeteoFrance.csv", 95, 47)
Temp_Fr_2000 = Temp_An(matCSV, 2000)
# print("Températures enregistrées en France lors de l'an 2000 :\n", Temp_Fr_2000, "\n")
Temp_Fr_2005 = Temp_An(matCSV, 2005)
# print("Températures enregistrées en France lors de l'an 2005 :\n", Temp_Fr_2005, "\n")
Temp_Fr_2010 = Temp_An(matCSV, 2010)
# print("Températures enregistrées en France lors de l'an 2010 :\n", Temp_Fr_2010, "\n")
Mat_An_1 = np.zeros((3, len(Temp_Fr_2000)))
for i in range(0, len(Temp_Fr_2000)):
Mat_An_1[0][i] = Temp_Fr_2000[i]
Mat_An_1[1][i] = Temp_Fr_2005[i]
Mat_An_1[2][i] = Temp_Fr_2010[i]
# print("Matrice pour 2000, 2005 et 2010 :\n", Mat_An_1, "\n")
H_An1, F_An1, var_intra_An1, var_inter_An1 = myAnova(Mat_An_1, 0.05)
print("Utilisation de la fonction myAnova (Année 2000, 2005 et 2010) :")
print("--> Hypothèse H =", H_An1, "(On rejette H0)")
print("--> Statistique F =", round(F_An1, 2))
print("--> Variance Intra =", round(var_intra_An1, 2))
print("--> Variance Inter =", round(var_inter_An1, 2), "\n")
# Températures en Frane entre 1970, 1975 et 1980 : ----------------------------
# Extraction des données :
Temp_Fr_1970 = Temp_An(matCSV, 1970)
# print("Températures enregistrées en France lors de l'an 1970 :\n", Temp_Fr_1970, "\n")
Temp_Fr_1975 = Temp_An(matCSV, 1975)
# print("Températures enregistrées en France lors de l'an 1975 :\n", Temp_Fr_1975, "\n")
Temp_Fr_1980 = Temp_An(matCSV, 1980)
# print("Températures enregistrées en France lors de l'an 1980 :\n", Temp_Fr_1980, "\n")
Mat_An_2 = np.zeros((3, len(Temp_Fr_2000)))
for i in range(0, len(Temp_Fr_2000)):
Mat_An_2[0][i] = Temp_Fr_1970[i]
Mat_An_2[1][i] = Temp_Fr_1975[i]
Mat_An_2[2][i] = Temp_Fr_1980[i]
# print("Matrice pour 1970, 1975 et 1980 :\n", Mat_An_2, "\n")
H_An2, F_An2, var_intra_An2, var_inter_An2 = myAnova(Mat_An_2, 0.05)
print("Utilisation de la fonction myAnova (Année 1970, 1975 et 1980) :")
print("--> Hypothèse H =", H_An2, "(On rejette H0)")
print("--> Statistique F =", round(F_An2, 2))
print("--> Variance Intra =", round(var_intra_An2, 2))
print("--> Variance Inter =", round(var_inter_An2, 2), "\n")
|
Varelafv/TD6.py
|
TD2-EXO2.py
|
TD2-EXO2.py
|
py
| 5,319 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
7874070036
|
import random
r,p,s = "Rock", "Paper","Scissor"
words = ("Rock", "Paper","Scissor")
cpu = random.choice(words)
me = input("Rock, Paper or Scissor: \n\n")
print(f"{cpu} \n")
if me == cpu:
print("Game Draw!!")
elif me == "Rock":
if cpu == "Scissor":
print("You Won!!")
elif cpu == "Paper":
print("You Lost!!")
elif me == "Paper":
if cpu == "Scissor":
print("You Lost!!")
elif cpu == "Rock":
print("You Won!!")
elif me == "Scissor":
if cpu == "Paper":
print("You Won!!")
elif cpu == "Rock":
print("You Lost!!")
|
Sheham30/Python
|
RockPaperScissor/01.py
|
01.py
|
py
| 586 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35226848142
|
import torch
import torch.nn.functional
from .calculate_ssim import ssim
from .utils import fspecial_gauss
def ms_ssim(image1: torch.Tensor, image2: torch.Tensor, filter_weight: torch.Tensor) -> float:
""" Multi scale structural similarity
Args:
image1 (np.array): Original tensor picture.
image2 (np.array): Target tensor picture.
filter_weight (torch.Tensor): Gaussian filter weight.
Returns:
MS_SSIM value.
"""
assert image1.shape == image2.shape
ssim_value = ssim(image1, image2, filter_weight=filter_weight)
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(image1.device, dtype=image2.dtype)
mcs = []
for _ in range(weights.shape[0]):
_, cs_value = ssim(image1, image2, filter_weight=filter_weight, cs=True)
mcs.append(cs_value)
padding = (image1.shape[2] % 2, image2.shape[3] % 2)
image1 = torch.nn.functional.avg_pool2d(image1, kernel_size=2, padding=padding)
image2 = torch.nn.functional.avg_pool2d(image2, kernel_size=2, padding=padding)
mcs = torch.stack(mcs, dim=0)
out = torch.prod((mcs[:-1] ** weights[:-1].unsqueeze(1)) * (ssim_value ** weights[-1]), dim=0)
return out
class MS_SSIM(torch.nn.Module):
def __init__(self) -> None:
super(MS_SSIM, self).__init__()
self.filter_weight = fspecial_gauss(11, 1.5)
def forward(self, image1_tensor: torch.Tensor, image2_tensor: torch.Tensor) -> torch.Tensor:
"""
Args:
image1_tensor (torch.Tensor): Original tensor picture.
image2_tensor (torch.Tensor): Target tensor picture.
Returns:
torch.Tensor.
"""
assert image1_tensor.shape == image2_tensor.shape
out = torch.mean(ms_ssim(image1_tensor, image2_tensor, filter_weight=self.filter_weight))
return out
|
avacaondata/SpainAI_Hackaton_ComputerVision
|
ESRGAN-PyTorch/esrgan_pytorch/utils/image_quality_assessment/calculate_mssim.py
|
calculate_mssim.py
|
py
| 1,882 |
python
|
en
|
code
| 1 |
github-code
|
6
|
2018426878
|
import unittest
import sys
import os
import tempfile
import shutil
from appliapps.flow.branch import Branch
from appliapps.flow.collate import Collate
from appliapps.flow.merge import Merge
from appliapps.flow.split import Split
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tdir = tempfile.mkdtemp(dir=".")
os.chdir(cls.tdir)
with open("input.ini", "w") as f:
f.write("""COMMENT = comm,ent
SOMEKEY = some, key
LOG_LEVEL = INFO
LOG_STORAGE = memory""")
def test1_branch(self):
sys.argv = ['--INPUT', 'input.ini', '--BRANCH', 'tandem.ini', 'omssa.ini', '--COMMENT', 'kommentar']
Branch.main()
assert os.path.exists('tandem.ini')
assert os.path.exists('omssa.ini')
def test2_collate(self):
sys.argv = ['--COLLATE', 'tandem.ini', 'omssa.ini', '--OUTPUT', 'collate.ini']
Collate.main()
assert os.path.exists('collate.ini')
def test3_split(self):
sys.argv = ['--INPUT', 'input.ini', '--SPLIT', 'split.ini', '--SPLIT_KEY', 'SOMEKEY']
Split.main()
assert os.path.exists('split.ini_0')
assert os.path.exists('split.ini_1')
def test4_merge(self):
sys.argv = ['--MERGE', 'split.ini', '--MERGED', 'merged.ini']
Merge.main()
assert os.path.exists('merged.ini_0')
@classmethod
def tearDownClass(cls):
os.chdir("..")
shutil.rmtree(cls.tdir)
|
lcb/applicake
|
tests/test_flow.py
|
test_flow.py
|
py
| 1,454 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74077927228
|
from PyQt5.QtCore import QFile, QTextStream, QIODevice
class StyleLoader:
def __init__(self, variables_path: str = None):
self._variables = {}
self._stylesheets = {}
self._init_variables(variables_path)
def get_merged_stylesheets(self, names: list):
return self._merge_stylesheets([self.get_stylesheet(name=name) for name in names])
def get_stylesheet(self, name: str) -> str:
stylesheet = self._stylesheets.get(name)
if stylesheet is None:
stylesheet = self._create_stylesheet(name)
self._stylesheets[name] = stylesheet
return stylesheet
def _merge_stylesheets(self, stylesheets: list) -> str:
return "\n".join(stylesheets)
def _create_stylesheet(self, path: str) -> str:
stylesheet = self._load_unmapped_stylesheet(path)
return self._map_stylesheet(stylesheet)
def _load_unmapped_stylesheet(self, path: str) -> str:
file = QFile(path)
if not file.open(QIODevice.ReadOnly | QIODevice.Text):
return ""
content = file.readAll().data().decode("utf-8")
file.close()
return content
def _map_stylesheet(self, stylesheet: str) -> str:
for variable_name, variable_value in self._variables.items():
stylesheet = stylesheet.replace(variable_name, variable_value)
return stylesheet
def _init_variables(self, path: str) -> None:
if path is None:
return
file = QFile(path)
if not file.open(QIODevice.ReadOnly | QIODevice.Text):
return
stream = QTextStream(file)
while not stream.atEnd():
line = stream.readLine().strip().replace(" ", "")
if line.startswith("@"):
variable_name, variable_value = line.split("=", 1)
self._variables[variable_name] = variable_value
file.close()
|
lennertsoffers/KeyCursor
|
key_cursor_config/model/StyleLoader.py
|
StyleLoader.py
|
py
| 1,926 |
python
|
en
|
code
| 1 |
github-code
|
6
|
19459348473
|
import os
import csv
def find_duration(indexlist, index, f):
index = index + 1
if 'LOG:' in f[index].split():
indexlist.append(index)
else:
find_duration(indexlist, index, f)
location = '/Users/karinstaring/thesis/script/finally/Karin_Staring_Geomatics_Thesis/results/queries/query2/'
for filename in os.listdir(location):
if filename == 'quer2_postgresql.log':
f = open(os.path.join(location, 'query2_postgresql.log'), "r").readlines()
cityobject_number = 0
all_times = []
i = 0
for line in f:
split_line = line.split()
if 'statement:' in split_line and 'SELECT' in split_line:
print(split_line)
if 'surfaces.id' in split_line:
if cityobject_number == 1:
all_times.append(one_query)
cityobject_number = 0
cityobject_number = cityobject_number + 1
one_query = []
indexlist = []
find_duration(indexlist, i, f)
first_index = indexlist[0]
duration = f[first_index]
one_query.append(float(duration.split('duration:')[1].split('ms')[0].replace(' ', '')))
else:
indexlist = []
find_duration(indexlist, i, f)
first_index = indexlist[0]
duration = f[first_index]
one_query.append(float(duration.split('duration:')[1].split('ms')[0].replace(' ', '')))
i = i + 1
all_times.append(one_query)
file_name = 'query2_postgresql.csv'
with open('/Users/karinstaring/thesis/script/finally/Karin_Staring_Geomatics_Thesis/results/queries/query2/' + str(file_name), 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for row in all_times:
writer.writerow(row)
|
kjstaring/scripts
|
results/queries/log_file_analysis.py
|
log_file_analysis.py
|
py
| 1,810 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2248378771
|
import random
Kaarten = ("2","3","4","5","6","7","8","9","10","boer","vrouw","heer","aas")
Kleur = ("harten ","klaveren ","schoppen ","ruiten ")
Deck = []
teller = 0
for x in Kleur[0:4]:
for i in Kaarten[0:13]:
Deck.append (x + i)
Deck.append ("Joker1")
Deck.append ("Joker2")
for y in range(7):
RandomKaart= random.choices(Deck)
teller += 1
print(f"Karten {teller} : {RandomKaart}")
print(Deck)
|
MaxQutimu/leren-programmeren
|
Leren Programmeren/M-04-Lijstjes en Samenstellingen/Deck.py
|
Deck.py
|
py
| 420 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32463868402
|
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch
import torchvision
import torchvision.transforms as transforms
class DataLoader:
def __init__(self, batch_size = 4):
'''
num_workers should be 0 in window env (otherwise pipe_error occurs)
'''
print("---- downloading dataset from online... ----")
trainset, testset = self.download_data()
self.trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=0)
self.testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=0)
def download_data(self):
'''
download CIFAR-10 data which can be replaced by MNIST or etc later on.
'''
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
return trainset, testset
def show_image(self):
'''
Just for the testing.
'''
# get some random training images
dataiter = iter(self.trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
|
jindeok/XAI_torch_captum
|
XAI_torch/utils.py
|
utils.py
|
py
| 2,051 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32562155548
|
from django.conf.urls.defaults import *
from django.conf import settings
from gallery.feeds import Photos, Videos, Tags, TagContents, Comments
try:
import django_openidconsumer
except ImportError:
django_openidconsumer = None
feeds = {
'comments': Comments,
'photos': Photos,
'videos': Videos,
'tags': Tags,
'tag': TagContents,
}
urls = [
(r'^$', 'gallery.views.index'),
(r'^feeds/(?P<url>.*)/$',
'django.contrib.syndication.views.feed', {'feed_dict': feeds}),
(r'^tag/(?P<tag_name>[\w\+\-]*)/$', 'gallery.views.medias_in_tag'),
(r'^tag/(?P<tag_name>[\w\+\-]*)/(?P<page>\d+)/$',
'gallery.views.medias_in_tag'),
(r'^event/(?P<media_type>\w+)/(?P<media_id>\d+)/(?P<event_id>\d+)/$',
'gallery.views.medias_in_event'),
(r'^photo/(?P<photo_id>\d+)/(?P<tag_name>[\w\-]*)/$',
'gallery.views.medias_in_tag'),
(r'^photo/(?P<photo_id>\d+)/$', 'gallery.views.photo'),
(r'^video/(?P<video_id>\d+)/(?P<tag_name>[\w\-]*)/$',
'gallery.views.medias_in_tag'),
(r'^date/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/$',
'gallery.views.date'),
(r'^date/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<page>\d+)/$',
'gallery.views.date'),
(r'^recent/$', 'gallery.views.recent'),
(r'^recent/(?P<tag_name>[\w\+\-]*)/$', 'gallery.views.recent'),
(r'^recent/(?P<tag_name>[\w\+\-]*)/(?P<page>\d+)/$', 'gallery.views.recent'),
(r'^events/$', 'gallery.views.events'),
(r'^event/(?P<event_id>\d+)/$', 'gallery.views.event'),
(r'^slideshow/(?P<tag_name>[\w\+\-]*)/(?P<photo_id>\d+)/$', 'gallery.views.slideshow'),
]
if django_openidconsumer:
urls.extend([
#(r'^comment/(?P<comment_id>\d+)/$', 'gallery.views.comment'),
(r'^openid/$', 'django_openidconsumer.views.begin', {'sreg': 'fullname'}),
(r'^openid/complete/$', 'django_openidconsumer.views.complete'),
(r'^openid/signout/$', 'django_openidconsumer.views.signout'),
(r'^status/cache/$', 'gallery.memcached_status.cache_status'),
])
media_path = settings.GALLERY_SETTINGS.get('media_path')
static_path = settings.GALLERY_SETTINGS.get('static_path')
if media_path:
urls.append((r'^media/(.*)$',
'django.views.static.serve', {'document_root': media_path}))
if static_path:
urls.append((r'^static/(.*)$',
'django.views.static.serve', {'document_root': static_path}))
urlpatterns = patterns('', *urls)
|
ginking/Gallery-1
|
urls.py
|
urls.py
|
py
| 2,422 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8915382368
|
from datetime import datetime
from astral import Astral
from pyHS100 import Discover, SmartPlug, SmartBulb
import time, socket, requests, pytz, simplejson
#Used to determine daylight status, occupancy status, and does the things
#if the things are needed based on prior info
class SmartSwitchControl():
a = Astral()
city = a['Toronto']
count = 0
def run(self):
global count
global lightson
now = datetime.now(pytz.utc)
sun = city.sun(date = now, local = True)
time.sleep(0.5)
if now >= sun['dusk'] or now <= sun['dawn']:
requests.post("https://maker.ifttt.com/trigger/motion/with/key/ctOeqYQKH00WbPhjj-fCRyio_MW6GdmEQ2as2h5bQvI")
Lightson = True
#print("Lights on")
elif now >= sun['dawn']:
#print("It's not dark yet")
pass
#Creates JSON syntaxed representation of current smart device info and status
def updateStatus(self):
devices = []
deviceCount = 0
try:
for dev in Discover.discover().values():
ipBreak = str(dev).split(' (')[0]
ip = ipBreak.split('at ')[1]
idBreak = str(dev).split('(')[1]
ID = idBreak.split(')')[0]
statusBreak = str(dev).split('is_on: ')[1]
status = statusBreak.split(' - ')[0]
if status == "True":
status = "on"
if status == "False":
status = "off"
entry = {'id': "switch"+str(deviceCount),
'name': ID,
'is_on': status,
'ip': ip
}
devices.append(entry)
deviceCount += 1
return devices
except Exception as e:
print("Error in device detection...resetting", e)
pass
|
bradyjibanez/Voyager
|
occupantInference/smartSwitchControl.py
|
smartSwitchControl.py
|
py
| 2,417 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24604094520
|
# Packages
import time
import selenium
from selenium import webdriver
import NameExtractor
app_names = []
element_web = []
k = 0
count = 1
def decompiler(path, file_path):
global app_names, driver
driver = webdriver.Chrome(path)
driver.maximize_window()
app_names = NameExtractor.name_extractor(file_path)
for i in app_names:
try:
driver.refresh()
driver.get("https://www.apkdecompilers.com/")
time.sleep(5)
# find element
# extra code
print(app_names)
driver.get("https://www.apkdecompilers.com/")
driver.find_element_by_id('apk')
time.sleep(10)
# send element
element_send = driver.find_element_by_id("apk")
element_send.send_keys(i)
print(i)
driver.find_element_by_id("submit-btn").click()
time.sleep(270)
# download element
driver.find_element_by_xpath("/html/body/section[1]/div/div/div/div/div/div/div/div[2]/div/div/div/div/div/div/div[2]/a/b/u/h3").click()
time.sleep(50)
except:
# find element
# extra code
driver.refresh()
driver.get("http://www.javadecompilers.com/apk")
# send element
element_send = driver.find_element_by_id("upload_datafile")
element_send.send_keys(i)
print(i)
send_element = driver.find_element_by_xpath("/html/body/div[2]/div/div[2]/div/div[2]/div/form/div/div/div/div[2]/div[1]/div/button")
webdriver.ActionChains(driver).move_to_element(send_element).click(send_element).perform()
time.sleep(270)
# download element
down_element = driver.find_element_by_xpath("/html/body/div[2]/div/div[2]/div/div[2]/div/div[2]/div/div[1]/div/div[2]/div/div[1]/div[2]/a")
webdriver.ActionChains(driver).move_to_element(down_element).click(down_element).perform()
time.sleep(50)
driver.close()
driver_path = "C:\Program Files (x86)\Python38-32\Chrome Driver\chromedriver.exe"
file_path = "D:\Project\Privacy Detection\Apps\App Data.xlsx"
decompiler(driver_path,file_path)
|
Neilnarnaware/Privacy-Detection-of-Android-Application
|
Decompiler.py
|
Decompiler.py
|
py
| 2,296 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16106180145
|
#!/usr/bin/env python3
if __name__ == "__main__":
import argparse
import os
import benj
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", dest="h5ad", required=True)
ap.add_argument("-o", "--output", required=True)
ap.add_argument("--labels", required=True, nargs="+")
ap.add_argument("-b", "--batch", required=True)
ap.add_argument("--hvg", type=int, default=5000)
ap.add_argument("--compression", type=int, default=6)
ap.add_argument("--with-mean", dest="with_mean", action="store_true")
ap.add_argument("--without-mean", dest="with_mean", action="store_false")
ap.set_defaults(with_mean=False)
args = benj.parse_args(ap, ["log", "scanpy", "anndata"])
if "subset" not in args or args["subset"] is None:
args["subset"] = []
sw = benj.stopwatch()
with sw("Reading H5AD"):
adata = benj.parse_anndata(**args)
adata = benj.integrate_rna(adata,
batch=args["batch"], hvg=args["hvg"], use_scaling=True, use_harmony=True, use_bbknn=False, use_rgg=False, plot=args["labels"], target_sum=1e4,
output=args["output"], compression=args["compression"])
with sw("Training celltypist for " + ",".join(label)):
import celltypist
import scanpy as sc
ct = celltypist.train(adata.raw.to_adata(), labels=adata.obs.loc[:, labels], genes=adata.var_names, n_jobs=-1, with_mean=args["with_mean"])
ct.write(os.path.join(sc.settings.figdir, f"celltypist_{label}.pkl"))
|
KellisLab/benj
|
scripts/integrate_and_train.py
|
integrate_and_train.py
|
py
| 1,676 |
python
|
en
|
code
| 2 |
github-code
|
6
|
11735575748
|
"""Add File table
Revision ID: 3822d04489a0
Revises:
Create Date: 2021-06-26 16:18:52.167545
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3822d04489a0'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('filepath', sa.String(), nullable=False),
sa.Column('storage', sa.String(), nullable=False),
sa.Column('primary', sa.Boolean(), nullable=False),
sa.Column('has_thumbnail', sa.Boolean(), nullable=False),
sa.Column('category', sa.Enum('unknown', 'other', 'image', 'photo', 'scan', 'dump', 'dump_metadata', 'text', 'prose', 'transcription', 'collection', name='filecategory'), nullable=True),
sa.Column('title', sa.String(), nullable=True),
sa.Column('comment', sa.Text(), nullable=True),
sa.Column('analyzed', sa.DateTime(), nullable=True),
sa.Column('upload_date', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('asset_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['asset_id'], ['assets.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('files')
# ### end Alembic commands ###
|
retroherna/rhinventory
|
alembic/versions/3822d04489a0_add_file_table.py
|
3822d04489a0_add_file_table.py
|
py
| 1,531 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33696612007
|
from __future__ import print_function
from seq import *
from parentSeq import *
from ore_algebra import *
import time
def test1():
R,n = ZZ['n'].objgen()
A,Sn = OreAlgebra(R, 'Sn').objgen()
a1 = Sn**2 - Sn - 1
init1 = [0,1]
range_ = 1000
sum_ = 0
for _ in range (range_):
begin = time.time()
m1 = a1.to_list (init1, 100)[-1]
end = time.time()
sum_ += end - begin
print ("Single call to to_list() :",sum_)
sum_ = 0
for _ in range(range_):
begin = time.time()
init1 = [0,1]
for i in range(2,100):
a1.to_list (init1, 3, start=i-2,append=True)
init1 = init1[1:]
m2 = init1[-1]
end = time.time()
sum_ += end - begin
print ("Multiple calls to to_list() :", sum_)
if m1!=m2:
print("ERROR")
print(m1, "\n", m2)
else :
print ("SUCCESS")
def test2():
pass
if __name__ == "__main__":
#n = ZZ['n'].gen()
#P = ParentSeqRec (ZZ['n'], 'Sn', RR)
#a = P ([0,1], Sn^2 - Sn - 1)
test2()
|
Kiskuit/SuitesPRecursives
|
src/test.py
|
test.py
|
py
| 1,087 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26822225482
|
import hashlib
import base64
import os
import sys
from Crypto.Cipher import AES
from hashlib import md5
from PyQt4 import QtGui, QtCore
import collections
from eclib import EC
from eclib import DiffieHellman
class MainWindow(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
global to_enc
global dec1
global dec2
global label_ans
self.setGeometry(0, 0, 500, 650)
self.setWindowTitle("Elliptic Curve Cryptography")
self.setWindowIcon(QtGui.QIcon("icon.png"))
self.resize(500, 650)
self.setMinimumSize(500, 650)
self.center()
self.tab_widget = QtGui.QTabWidget()
tab = QtGui.QWidget()
tab2 = QtGui.QWidget()
p3_vertical = QtGui.QVBoxLayout(tab)
self.tab_widget.addTab(tab, "EC Diffie Hellman")
# ECDH GUI DECLARATIONS
labele1 = QtGui.QLabel(" Elliptical Curve EQUATION ")
labele2 = QtGui.QLabel("y^3 = x^2 + ax + b( mod q )")
labele1.setStyleSheet('font-size: 13pt')
labele2.setStyleSheet('font-size: 12pt')
labele1.setAlignment(QtCore.Qt.AlignCenter)
labele2.setAlignment(QtCore.Qt.AlignCenter)
labela = QtGui.QLabel("Enter value of a:")
labelb = QtGui.QLabel("Enter value of b:")
labelc = QtGui.QLabel("Enter value of q (prime):")
label_PrivA = QtGui.QLabel("Enter Private Key of A:")
label_PrivB = QtGui.QLabel("Enter Private Key of B:")
label_result = QtGui.QLabel("ENCODED / DECODED TEXT")
label_result.setStyleSheet('font-size: 12pt')
textEdit = QtGui.QTextEdit()
button_file = QtGui.QPushButton("Import File")
button_encrypt = QtGui.QPushButton("Encrypt")
button_decrypt = QtGui.QPushButton("Decrypt")
button_file.clicked.connect(self.importfile)
button_encrypt.clicked.connect(self.ecdhencrypt)
button_decrypt.clicked.connect(self.ecdhdecrypt)
self.vala = QtGui.QTextEdit()
self.valb = QtGui.QTextEdit()
self.valc = QtGui.QTextEdit()
self.apriv = QtGui.QTextEdit()
self.bpriv = QtGui.QTextEdit()
self.textEdit = QtGui.QTextEdit()
self.vala.setMaximumHeight(labela.sizeHint().height()*1.5)
self.valb.setMaximumHeight(labelb.sizeHint().height()*1.5)
self.valc.setMaximumHeight(labelc.sizeHint().height()*1.5)
self.apriv.setMaximumHeight(label_PrivA.sizeHint().height()*1.5)
self.bpriv.setMaximumHeight(label_PrivB.sizeHint().height()*1.5)
hbox = QtGui.QHBoxLayout()
hbox1 = QtGui.QHBoxLayout()
vbox1 = QtGui.QHBoxLayout()
vbox2 = QtGui.QHBoxLayout()
# GUI LAYOUT
p3_vertical.addWidget(labele1)
p3_vertical.addWidget(labele2)
vbox1.addWidget(labela)
vbox1.addWidget(self.vala)
vbox2.addWidget(labelb)
vbox2.addWidget(self.valb)
hbox1.addLayout(vbox1)
hbox1.addLayout(vbox2)
p3_vertical.addLayout(hbox1)
p3_vertical.addWidget(labelc)
p3_vertical.addWidget(self.valc)
p3_vertical.addWidget(label_PrivA)
p3_vertical.addWidget(self.apriv)
p3_vertical.addWidget(label_PrivB)
p3_vertical.addWidget(self.bpriv)
p3_vertical.addWidget(button_file)
p3_vertical.addWidget(label_result)
p3_vertical.addWidget(self.textEdit)
hbox.addWidget(button_encrypt)
hbox.addWidget(button_decrypt)
p3_vertical.addStretch(1)
p3_vertical.addLayout(hbox)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.tab_widget)
self.setLayout(vbox)
# GUI Functionality
def ecdhencrypt(self):
global A, B, C, PrivA, PrivB
A = int(self.vala.toPlainText())
B = int(self.valb.toPlainText())
C = int(self.valc.toPlainText())
PrivA = int(self.apriv.toPlainText())
PrivB = int(self.bpriv.toPlainText())
txt = data
ec = EC(A, B, C)
g, _ = ec.at(7)
assert ec.order(g) <= ec.q
dh = DiffieHellman(ec, g)
apub = dh.gen(PrivA)
bpub = dh.gen(PrivB)
assert dh.secret(PrivA, bpub) == dh.secret(PrivB, apub)
BLOCK_SIZE = 64
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
x, y = dh.secret(PrivA, apub)
secret = x+y
secret = hashlib.md5('secret').hexdigest()
cipher = AES.new(secret)
encoded = EncodeAES(cipher, txt)
self.textEdit.setText(encoded)
fileName = open('Encrypted.txt', 'w')
fileName.write(encoded)
fileName.close()
def ecdhdecrypt(self):
global A, B, C, PrivA, PrivB
A = int(self.vala.toPlainText())
B = int(self.valb.toPlainText())
C = int(self.valc.toPlainText())
PrivA = int(self.apriv.toPlainText())
PrivB = int(self.bpriv.toPlainText())
txt = data
ec = EC(A, B, C)
g, _ = ec.at(7)
assert ec.order(g) <= ec.q
dh = DiffieHellman(ec, g)
apub = dh.gen(PrivA)
bpub = dh.gen(PrivB)
assert dh.secret(PrivA, bpub) == dh.secret(PrivB, apub)
BLOCK_SIZE = 64
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
x, y = dh.secret(PrivA, apub)
secret = x+y
secret = hashlib.md5('secret').hexdigest()
cipher = AES.new(secret)
decoded = DecodeAES(cipher, txt)
self.textEdit.setText(decoded)
fileName = open('Decrypted.txt', 'w')
fileName.write(decoded)
fileName.close()
def importfile(self):
global data
fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file', '/home')
f = open(fname, 'r')
with f:
data = f.read()
def center(self):
screen = QtGui.QDesktopWidget().screenGeometry()
size = self.geometry()
self.move(
(screen.width()-size.width())/2, (screen.height()-size.height())/2)
def loadValues(self):
global ec
global eg
idx = self.tab_widget.currentIndex()
if idx == 1:
global g
global pub
ec = EC(a, b, q)
g, _ = ec.at(7)
eg = ElGamal(ec, g)
pub = eg.gen(priv)
print_pub = str(pub[0]) + "," + str(pub[1])
self.elg_key.insertPlainText(print_pub)
app = QtGui.QApplication(sys.argv)
frame = MainWindow()
frame.show()
sys.exit(app.exec_())
|
iCHAIT/Elliptical-Curve-Cryptography
|
gui.py
|
gui.py
|
py
| 6,704 |
python
|
en
|
code
| 24 |
github-code
|
6
|
21158960410
|
#!/usr/bin/env python
# Remove bootstrap solutions from a list of mss
from __future__ import print_function
import sys
import pyrap.tables as pt
def remove_columns(mslist_name,colnames=['SCALED_DATA']):
if mslist_name.endswith('.ms'):
mslist=[mslist_name]
else:
mslist=[s.strip() for s in open(mslist_name).readlines()]
for ms in mslist:
t = pt.table(ms)
cpresent = t.colnames()
t.close()
if isinstance(colnames,str):
colnames=[colnames]
for colname in colnames:
print('Removing',colname,'column in',mslist_name)
if colname in cpresent:
print('Removing',colname,' from',ms)
t=pt.table(ms,readonly=False)
t.removecols(colname)
t.close()
else:
print('Table',ms,'has no',colname,'column')
if __name__=='__main__':
remove_columns(sys.argv[1],sys.argv[2])
|
mhardcastle/ddf-pipeline
|
utils/remove_bootstrap.py
|
remove_bootstrap.py
|
py
| 955 |
python
|
en
|
code
| 22 |
github-code
|
6
|
5145788640
|
import enum
from ocrdgen.font.font import FontManager
from ocrdgen.image.background import BgManager
from pathlib import Path
import numpy as np
from PIL import ImageDraw, Image
from ocrdgen.ops import boxes_ops
import cv2 as cv
from collections import OrderedDict
from .base import BaseDrawer
from ocrdgen import models
class WordDrawer(BaseDrawer):
def __init__(self, image: Image, font, text, xy, align="left", anchor=None,
image_mode="RGBA", fill=(0,0,0)):
super().__init__(image=image, font=font, text=text, xy=xy,
anchor=anchor,
align=align, image_mode=image_mode, fill=fill)
text_test = self.text.strip().split(" ")
# print(len(text_split), text_split)
assert len(text_test) == 1, f"Error, expected one word only, but more word is given!"
def draw_text(self, image=None):
if type(image) == type(None):
image = self.image.copy()
idraw = ImageDraw.Draw(image)
idraw.text(self.xy, self.text, font=self.font, fill=self.fill)
# idraw.textbbox()
return image
def draw_bbox(self, image, color=(255,0,0,255), thick=1):
xmin, ymin, xmax, ymax = self.xymm_with_offset(self.text, self.x, self.y)
np_img = cv.rectangle(np.array(image), (xmin, ymin), (xmax, ymax), color, thick)
return Image.fromarray(np_img)
def draw(self, image=None, draw_bbox=False, bbox_color=(255, 0, 0, 255), bbox_thick=1):
image = self.draw_text(image)
bbox = self.wordbbox()
if draw_bbox:
image = self.draw_bbox(image, color=bbox_color, thick=bbox_thick)
return image, bbox
def wordbbox(self):
wordbox = models.WordBox(text=self.text, bbox=self.textbbox(), chars=self.charbbox())
return wordbox
def charbbox(self):
data = []
xmin, ymin = self.x, self.y
for i in range(len(self.text)):
if len(self.text[i])>0:
xymm = self.xymm_with_offset(self.text[i], xmin, ymin)
xywh = boxes_ops.xymm_to_xywh(xymm)
dt = models.CharBox(char=self.text[i], bbox=xywh, seq_id=i)
# dt = (self.text[i], xywh)
_, _, xmax, _ = xymm
xmin = xmax
data.append(dt)
return data
def draw_char_text(self, image=None):
image = self.draw_text(image)
return image
def draw_char_bbox(self, image, color=(0,255,0,255), thick=1):
image: np.ndarray = np.array(image)
charboxes = self.charbbox()
for idx, charbox in enumerate(charboxes):
char, xywh = charbox.char, charbox.bbox
xmin,ymin,xmax,ymax = boxes_ops.xywh_to_xymm(xywh)
if char!=" ":
# xmin, ymin, xmax, ymax = self.xymm_with_offset(char, x, y)
image = cv.rectangle(image, (xmin, ymin), (xmax, ymax), color, thick)
image: Image = Image.fromarray(image)
return image
def draw_char(self, image=None, draw_bbox=False, bbox_color=(0,255,0,255), bbox_thick=1):
image = self.draw_char_text(image)
bbox = self.charbbox()
if draw_bbox:
image = self.draw_char_bbox(image, color=bbox_color, thick=bbox_thick)
return image, bbox
|
nunenuh/ocrdgen
|
ocrdgen/drawer/word.py
|
word.py
|
py
| 3,432 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13842222350
|
import cv2
import numpy as np
import math
from numpy import random as nr
import sys
def lines(code=None, step=12):
l = np.zeros((h, w, 3), np.uint8)
l[:] = 255
if code == 0: # - horizontal
for i in range(0, h, step):
l = cv2.line(l, (0, i), (w, i), black)
elif code == 1: # | horizontal
for i in range(0, w, step):
l = cv2.line(l, (i, 0), (i, h), black)
elif code == 2: # \ 45
l = lines(code=3, step=step)
l = cv2.flip(l, 0)
elif code == 3: # / 45
for i in range(0, 2*w, step):
l = cv2.line(l, (i, 0), (0, i), black)
elif code == 4: # / 22.5
cotheta = 2.4142
tantheta = 0.4142
for i in range(0, int(w+h*cotheta), step):
l = cv2.line(l, (i, 0), (0, int(i*tantheta)), black)
elif code == 5: # / 67.5
cotheta = 0.4142
tantheta = 2.4142
for i in range(0, int(w+h*cotheta), step):
l = cv2.line(l, (i, 0), (0, int(i*tantheta)), black)
else:
pass # empty
return l
def tsh(img, stage=None, Numberoftsh=None, equalizeHist=False):
type = cv2.THRESH_BINARY
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if equalizeHist == False:
pass
else:
img_gray = cv2.equalizeHist(img_gray, img_gray)
_, th = cv2.threshold(img_gray, 255-int(((stage)/Numberoftsh)*255), 255, type)
th = cv2.cvtColor(th, cv2.COLOR_GRAY2BGR)
return th
def createmasks(img, Numberoftsh=None):
global masks
for i in range(Numberoftsh):
if seqline[i] == 4:
step = 16
elif seqline[i] == 5:
step = 10
else:
step = 8
if masks is not None:
masks = np.append(masks, np.expand_dims(lines(code=seqline[i], step=step), axis=0), axis=0)
else:
masks = lines(code=seqline[i], step=step)
masks = np.expand_dims(masks, axis=0)
#print(masks.shape)
return masks
def crosshatching(img, Numberoftsh=None, equalizeHist=False, color=False):
global frame, flag, w, h
h, w, _ = img.shape
frame = np.zeros((h, w, 3), np.uint8)
frame[:] = 255
if flag is False:
createmasks(img, Numberoftsh=Numberoftsh)
flag = True
for i in range(Numberoftsh):
th = tsh(img, stage=i, Numberoftsh=Numberoftsh, equalizeHist=equalizeHist)
dst = cv2.addWeighted(masks[i], 1, th, 1, 0)
dst = cv2.bitwise_and(dst, frame)
frame = dst
if color is False:
return frame
else:
frame = cv2.bitwise_or(frame, img)
return frame
def showimage(img, Numberoftsh = 7, equalizeHist=False):
global w, h
h, w, _ = img.shape
dst = crosshatching(img, Numberoftsh=Numberoftsh, equalizeHist=equalizeHist, color=True)
#dst = cv2.resize(dst, (int(w/2), int(h/2)))
cv2.imshow('dst', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
def playvideo(video=None, Numberoftsh=None, color=False):
global w, h
if video is None:
cap = cv2.VideoCapture(0)
else:
cap = video
while True:
_, frame = cap.read()
if video is None:
frame = cv2.flip(frame, 1)
frame = crosshatching(frame, Numberoftsh=Numberoftsh, equalizeHist=False, color=color)
cv2.imshow('main', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
black = (0, 0, 0)
white = (255, 255, 255)
#red = (0, 0, 255)
#green = (0, 255, 0)
#blue = (255, 0, 0)
seqline = (-1, 0, 4, 3, 5, 2, 1)
masks = None
flag = False # existence of line masks
if __name__ == "__main__":
if len(sys.argv) > 1:
pass
else:
#img = cv2.imread('eagle.jpg')
#h, w, _ = img.shape
#img = cv2.resize(img, (int(w/8), int(h/8)))
#showimage(img)
#video = cv2.VideoCapture(0)
#video = cv2.VideoCapture('video/Wildlife.wmv')
playvideo(video=None, Numberoftsh=7, color=True)
|
ZZ76/filters
|
crosshatching.py
|
crosshatching.py
|
py
| 3,968 |
python
|
en
|
code
| 4 |
github-code
|
6
|
25145519000
|
from dataclasses import dataclass
from typing import Any
from msg.serializers import BaseExpenseCreationSerializer, BaseExpensePropertySerializer
@dataclass
class ExpenseCreationHelper:
data: dict
def __call__(self, *args: Any, **kwds: Any) -> Any:
if not self._parse_data():
return
return True
def _parse_data(self):
user_id = self.data.pop('user_id', None)
expense_category_name = self.data.pop('expense_category_name', None)
expense_ser = BaseExpenseCreationSerializer(data={
'user_id': user_id,
'expense_category_name': expense_category_name
})
expense_ser.is_valid(raise_exception=True)
expense_inst = expense_ser.save()
self.data['expense_id'] = expense_inst.id
expense_property_ser = BaseExpensePropertySerializer(data=self.data)
expense_property_ser.is_valid(raise_exception=True)
expense_property_ser.save()
return True
|
enamsaraev/tg_api
|
msg/helpers.py
|
helpers.py
|
py
| 1,000 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38081298604
|
import asyncio
import threading
from sqlalchemy.orm import Query
from ConsumerService.consumer.persistence import db
from ConsumerService.consumer.business import manage_event_data
from aio_pika import connect, ExchangeType
from flask import Flask, request, jsonify, Response
app = Flask(__name__)
@app.route('/getPatientMedsPeriods')
def get_patient_med_period():
p_id = request.args['p_id']
med = request.args['med']
patient_meds_periods = []
result = manage_event_data.get_patient_med_period(p_id, med)
if isinstance(result,Query):
_len = result.count()
else:
_len = len(result)
if _len > 0:
for r in result:
a = {"p_id": r.p_id, "medication_name": r.medication_name,
"medication_period_start": r.medication_period_start,
"medication_period_end": r.medication_period_end}
patient_meds_periods.append(a)
return jsonify(patient_meds_periods)
else: # No elements in the result --> return NOT_FOUND_404
return Response('No medication periods has been found for patient {} with medication: {}'.format(p_id, med),
404)
@app.route('/getPatientAllMedsPeriods')
def get_patient_all_meds_period():
p_id = request.args['p_id']
patient_meds_periods = []
result = manage_event_data.get_patient_all_meds_periods(p_id)
if isinstance(result,Query):
_len = result.count()
else:
_len = len(result)
if _len > 0:
for r in result:
a = {"p_id": r.p_id, "medication_name": r.medication_name,
"medication_period_start": r.medication_period_start,
"medication_period_end": r.medication_period_end}
patient_meds_periods.append(a)
return jsonify(patient_meds_periods)
else: # No elements in the result --> return NOT_FOUND_404
return Response('No medication periods has been found for patient:{}'.format(p_id), 404)
async def main(loop):
print("Connecting to the PostgreSQL database...")
if not db.is_table_exist():
conn = db.create_tables()
else:
conn = db.get_connection()
connection = await connect(host="localhost",
login="admin",
password="password",
loop=loop
)
# connection = await connect(host=os.environ.get('RABBIT_HOST'),
# login=os.environ.get('RABBIT_USER'),
# password=os.environ.get('RABBIT_PASS'),
# loop=loop
# )
async with connection:
# Creating a channel
channel = await connection.channel()
# Declaring the queue
queue = await channel.declare_queue(name='events', auto_delete=True)
exchange = await channel.declare_exchange("meds", ExchangeType.FANOUT)
routing_key = "new.events"
await queue.bind(exchange, routing_key)
async with queue.iterator() as queue_iter:
async for message in queue_iter:
async with message.process():
event_str = message.body.decode('UTF-8')
manage_event_data.save_event(event_str)
if __name__ == '__main__':
# start flask on separate thread
threading.Thread(target=lambda : app.run(debug=True, use_reloader=False)).start()
# Get the current event loop.
# If there is no current event loop set in the current OS thread,
# the OS thread is main, and set_event_loop() has not yet been called,
# asyncio will create a new event loop and set it as the current one.
loop = asyncio.get_event_loop()
if loop is not None:
loop.run_until_complete(main(loop))
else:
print("Error establishing event loop!")
|
oran1980/clewMedical
|
application-assignment/ConsumerService/consumer/main.py
|
main.py
|
py
| 3,890 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26113020545
|
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "03/04/2017"
# TODO
# keep aspect ratio managed here?
# smarter dirty flag handling?
import datetime as dt
import math
import weakref
import logging
import numbers
from typing import Optional, Union
from collections import namedtuple
import numpy
from .... import qt
from ...._glutils import gl, Program
from ..._utils import checkAxisLimits, FLOAT32_MINPOS
from .GLSupport import mat4Ortho
from .GLText import Text2D, CENTER, BOTTOM, TOP, LEFT, RIGHT, ROTATE_270
from ..._utils.ticklayout import niceNumbersAdaptative, niceNumbersForLog10
from ..._utils.dtime_ticklayout import calcTicksAdaptive, bestFormatString
from ..._utils.dtime_ticklayout import timestamp
_logger = logging.getLogger(__name__)
# PlotAxis ####################################################################
class PlotAxis(object):
"""Represents a 1D axis of the plot.
This class is intended to be used with :class:`GLPlotFrame`.
"""
def __init__(self, plotFrame,
tickLength=(0., 0.),
foregroundColor=(0., 0., 0., 1.0),
labelAlign=CENTER, labelVAlign=CENTER,
titleAlign=CENTER, titleVAlign=CENTER,
titleRotate=0, titleOffset=(0., 0.)):
self._ticks = None
self._plotFrameRef = weakref.ref(plotFrame)
self._isDateTime = False
self._timeZone = None
self._isLog = False
self._dataRange = 1., 100.
self._displayCoords = (0., 0.), (1., 0.)
self._title = ''
self._tickLength = tickLength
self._foregroundColor = foregroundColor
self._labelAlign = labelAlign
self._labelVAlign = labelVAlign
self._titleAlign = titleAlign
self._titleVAlign = titleVAlign
self._titleRotate = titleRotate
self._titleOffset = titleOffset
@property
def dataRange(self):
"""The range of the data represented on the axis as a tuple
of 2 floats: (min, max)."""
return self._dataRange
@dataRange.setter
def dataRange(self, dataRange):
assert len(dataRange) == 2
assert dataRange[0] <= dataRange[1]
dataRange = float(dataRange[0]), float(dataRange[1])
if dataRange != self._dataRange:
self._dataRange = dataRange
self._dirtyTicks()
@property
def isLog(self):
"""Whether the axis is using a log10 scale or not as a bool."""
return self._isLog
@isLog.setter
def isLog(self, isLog):
isLog = bool(isLog)
if isLog != self._isLog:
self._isLog = isLog
self._dirtyTicks()
@property
def timeZone(self):
"""Returnss datetime.tzinfo that is used if this axis plots date times."""
return self._timeZone
@timeZone.setter
def timeZone(self, tz):
"""Sets dateetime.tzinfo that is used if this axis plots date times."""
self._timeZone = tz
self._dirtyTicks()
@property
def isTimeSeries(self):
"""Whether the axis is showing floats as datetime objects"""
return self._isDateTime
@isTimeSeries.setter
def isTimeSeries(self, isTimeSeries):
isTimeSeries = bool(isTimeSeries)
if isTimeSeries != self._isDateTime:
self._isDateTime = isTimeSeries
self._dirtyTicks()
@property
def displayCoords(self):
"""The coordinates of the start and end points of the axis
in display space (i.e., in pixels) as a tuple of 2 tuples of
2 floats: ((x0, y0), (x1, y1)).
"""
return self._displayCoords
@displayCoords.setter
def displayCoords(self, displayCoords):
assert len(displayCoords) == 2
assert len(displayCoords[0]) == 2
assert len(displayCoords[1]) == 2
displayCoords = tuple(displayCoords[0]), tuple(displayCoords[1])
if displayCoords != self._displayCoords:
self._displayCoords = displayCoords
self._dirtyTicks()
@property
def devicePixelRatio(self):
"""Returns the ratio between qt pixels and device pixels."""
plotFrame = self._plotFrameRef()
return plotFrame.devicePixelRatio if plotFrame is not None else 1.
@property
def title(self):
"""The text label associated with this axis as a str in latin-1."""
return self._title
@title.setter
def title(self, title):
if title != self._title:
self._title = title
self._dirtyPlotFrame()
@property
def titleOffset(self):
"""Title offset in pixels (x: int, y: int)"""
return self._titleOffset
@titleOffset.setter
def titleOffset(self, offset):
if offset != self._titleOffset:
self._titleOffset = offset
self._dirtyTicks()
@property
def foregroundColor(self):
"""Color used for frame and labels"""
return self._foregroundColor
@foregroundColor.setter
def foregroundColor(self, color):
"""Color used for frame and labels"""
assert len(color) == 4, \
"foregroundColor must have length 4, got {}".format(len(self._foregroundColor))
if self._foregroundColor != color:
self._foregroundColor = color
self._dirtyTicks()
@property
def ticks(self):
"""Ticks as tuples: ((x, y) in display, dataPos, textLabel)."""
if self._ticks is None:
self._ticks = tuple(self._ticksGenerator())
return self._ticks
def getVerticesAndLabels(self):
"""Create the list of vertices for axis and associated text labels.
:returns: A tuple: List of 2D line vertices, List of Text2D labels.
"""
vertices = list(self.displayCoords) # Add start and end points
labels = []
tickLabelsSize = [0., 0.]
font = qt.QApplication.instance().font()
xTickLength, yTickLength = self._tickLength
xTickLength *= self.devicePixelRatio
yTickLength *= self.devicePixelRatio
for (xPixel, yPixel), dataPos, text in self.ticks:
if text is None:
tickScale = 0.5
else:
tickScale = 1.
label = Text2D(text=text,
font=font,
color=self._foregroundColor,
x=xPixel - xTickLength,
y=yPixel - yTickLength,
align=self._labelAlign,
valign=self._labelVAlign,
devicePixelRatio=self.devicePixelRatio)
width, height = label.size
if width > tickLabelsSize[0]:
tickLabelsSize[0] = width
if height > tickLabelsSize[1]:
tickLabelsSize[1] = height
labels.append(label)
vertices.append((xPixel, yPixel))
vertices.append((xPixel + tickScale * xTickLength,
yPixel + tickScale * yTickLength))
(x0, y0), (x1, y1) = self.displayCoords
xAxisCenter = 0.5 * (x0 + x1)
yAxisCenter = 0.5 * (y0 + y1)
xOffset, yOffset = self.titleOffset
# Adaptative title positioning:
# tickNorm = math.sqrt(xTickLength ** 2 + yTickLength ** 2)
# xOffset = -tickLabelsSize[0] * xTickLength / tickNorm
# xOffset -= 3 * xTickLength
# yOffset = -tickLabelsSize[1] * yTickLength / tickNorm
# yOffset -= 3 * yTickLength
axisTitle = Text2D(text=self.title,
font=font,
color=self._foregroundColor,
x=xAxisCenter + xOffset,
y=yAxisCenter + yOffset,
align=self._titleAlign,
valign=self._titleVAlign,
rotate=self._titleRotate,
devicePixelRatio=self.devicePixelRatio)
labels.append(axisTitle)
return vertices, labels
def _dirtyPlotFrame(self):
"""Dirty parent GLPlotFrame"""
plotFrame = self._plotFrameRef()
if plotFrame is not None:
plotFrame._dirty()
def _dirtyTicks(self):
"""Mark ticks as dirty and notify listener (i.e., background)."""
self._ticks = None
self._dirtyPlotFrame()
@staticmethod
def _frange(start, stop, step):
"""range for float (including stop)."""
while start <= stop:
yield start
start += step
def _ticksGenerator(self):
"""Generator of ticks as tuples:
((x, y) in display, dataPos, textLabel).
"""
dataMin, dataMax = self.dataRange
if self.isLog and dataMin <= 0.:
_logger.warning(
'Getting ticks while isLog=True and dataRange[0]<=0.')
dataMin = 1.
if dataMax < dataMin:
dataMax = 1.
if dataMin != dataMax: # data range is not null
(x0, y0), (x1, y1) = self.displayCoords
if self.isLog:
if self.isTimeSeries:
_logger.warning("Time series not implemented for log-scale")
logMin, logMax = math.log10(dataMin), math.log10(dataMax)
tickMin, tickMax, step, _ = niceNumbersForLog10(logMin, logMax)
xScale = (x1 - x0) / (logMax - logMin)
yScale = (y1 - y0) / (logMax - logMin)
for logPos in self._frange(tickMin, tickMax, step):
if logMin <= logPos <= logMax:
dataPos = 10 ** logPos
xPixel = x0 + (logPos - logMin) * xScale
yPixel = y0 + (logPos - logMin) * yScale
text = '1e%+03d' % logPos
yield ((xPixel, yPixel), dataPos, text)
if step == 1:
ticks = list(self._frange(tickMin, tickMax, step))[:-1]
for logPos in ticks:
dataOrigPos = 10 ** logPos
for index in range(2, 10):
dataPos = dataOrigPos * index
if dataMin <= dataPos <= dataMax:
logSubPos = math.log10(dataPos)
xPixel = x0 + (logSubPos - logMin) * xScale
yPixel = y0 + (logSubPos - logMin) * yScale
yield ((xPixel, yPixel), dataPos, None)
else:
xScale = (x1 - x0) / (dataMax - dataMin)
yScale = (y1 - y0) / (dataMax - dataMin)
nbPixels = math.sqrt(pow(x1 - x0, 2) + pow(y1 - y0, 2)) / self.devicePixelRatio
# Density of 1.3 label per 92 pixels
# i.e., 1.3 label per inch on a 92 dpi screen
tickDensity = 1.3 / 92
if not self.isTimeSeries:
tickMin, tickMax, step, nbFrac = niceNumbersAdaptative(
dataMin, dataMax, nbPixels, tickDensity)
for dataPos in self._frange(tickMin, tickMax, step):
if dataMin <= dataPos <= dataMax:
xPixel = x0 + (dataPos - dataMin) * xScale
yPixel = y0 + (dataPos - dataMin) * yScale
if nbFrac == 0:
text = '%g' % dataPos
else:
text = ('%.' + str(nbFrac) + 'f') % dataPos
yield ((xPixel, yPixel), dataPos, text)
else:
# Time series
try:
dtMin = dt.datetime.fromtimestamp(dataMin, tz=self.timeZone)
dtMax = dt.datetime.fromtimestamp(dataMax, tz=self.timeZone)
except ValueError:
_logger.warning("Data range cannot be displayed with time axis")
return # Range is out of bound of the datetime
tickDateTimes, spacing, unit = calcTicksAdaptive(
dtMin, dtMax, nbPixels, tickDensity)
for tickDateTime in tickDateTimes:
if dtMin <= tickDateTime <= dtMax:
dataPos = timestamp(tickDateTime)
xPixel = x0 + (dataPos - dataMin) * xScale
yPixel = y0 + (dataPos - dataMin) * yScale
fmtStr = bestFormatString(spacing, unit)
text = tickDateTime.strftime(fmtStr)
yield ((xPixel, yPixel), dataPos, text)
# GLPlotFrame #################################################################
class GLPlotFrame(object):
"""Base class for rendering a 2D frame surrounded by axes."""
_TICK_LENGTH_IN_PIXELS = 5
_LINE_WIDTH = 1
_SHADERS = {
'vertex': """
attribute vec2 position;
uniform mat4 matrix;
void main(void) {
gl_Position = matrix * vec4(position, 0.0, 1.0);
}
""",
'fragment': """
uniform vec4 color;
uniform float tickFactor; /* = 1./tickLength or 0. for solid line */
void main(void) {
if (mod(tickFactor * (gl_FragCoord.x + gl_FragCoord.y), 2.) < 1.) {
gl_FragColor = color;
} else {
discard;
}
}
"""
}
_Margins = namedtuple('Margins', ('left', 'right', 'top', 'bottom'))
# Margins used when plot frame is not displayed
_NoDisplayMargins = _Margins(0, 0, 0, 0)
def __init__(self, marginRatios, foregroundColor, gridColor):
"""
:param List[float] marginRatios:
The ratios of margins around plot area for axis and labels.
(left, top, right, bottom) as float in [0., 1.]
:param foregroundColor: color used for the frame and labels.
:type foregroundColor: tuple with RGBA values ranging from 0.0 to 1.0
:param gridColor: color used for grid lines.
:type gridColor: tuple RGBA with RGBA values ranging from 0.0 to 1.0
"""
self._renderResources = None
self.__marginRatios = marginRatios
self.__marginsCache = None
self._foregroundColor = foregroundColor
self._gridColor = gridColor
self.axes = [] # List of PlotAxis to be updated by subclasses
self._grid = False
self._size = 0., 0.
self._title = ''
self._devicePixelRatio = 1.
@property
def isDirty(self):
"""True if it need to refresh graphic rendering, False otherwise."""
return self._renderResources is None
GRID_NONE = 0
GRID_MAIN_TICKS = 1
GRID_SUB_TICKS = 2
GRID_ALL_TICKS = (GRID_MAIN_TICKS + GRID_SUB_TICKS)
@property
def foregroundColor(self):
"""Color used for frame and labels"""
return self._foregroundColor
@foregroundColor.setter
def foregroundColor(self, color):
"""Color used for frame and labels"""
assert len(color) == 4, \
"foregroundColor must have length 4, got {}".format(len(self._foregroundColor))
if self._foregroundColor != color:
self._foregroundColor = color
for axis in self.axes:
axis.foregroundColor = color
self._dirty()
@property
def gridColor(self):
"""Color used for frame and labels"""
return self._gridColor
@gridColor.setter
def gridColor(self, color):
"""Color used for frame and labels"""
assert len(color) == 4, \
"gridColor must have length 4, got {}".format(len(self._gridColor))
if self._gridColor != color:
self._gridColor = color
self._dirty()
@property
def marginRatios(self):
"""Plot margin ratios: (left, top, right, bottom) as 4 float in [0, 1].
"""
return self.__marginRatios
@marginRatios.setter
def marginRatios(self, ratios):
ratios = tuple(float(v) for v in ratios)
assert len(ratios) == 4
for value in ratios:
assert 0. <= value <= 1.
assert ratios[0] + ratios[2] < 1.
assert ratios[1] + ratios[3] < 1.
if self.__marginRatios != ratios:
self.__marginRatios = ratios
self.__marginsCache = None # Clear cached margins
self._dirty()
@property
def margins(self):
"""Margins in pixels around the plot."""
if self.__marginsCache is None:
width, height = self.size
left, top, right, bottom = self.marginRatios
self.__marginsCache = self._Margins(
left=int(left*width),
right=int(right*width),
top=int(top*height),
bottom=int(bottom*height))
return self.__marginsCache
@property
def devicePixelRatio(self):
return self._devicePixelRatio
@devicePixelRatio.setter
def devicePixelRatio(self, ratio):
if ratio != self._devicePixelRatio:
self._devicePixelRatio = ratio
self._dirty()
@property
def grid(self):
"""Grid display mode:
- 0: No grid.
- 1: Grid on main ticks.
- 2: Grid on sub-ticks for log scale axes.
- 3: Grid on main and sub ticks."""
return self._grid
@grid.setter
def grid(self, grid):
assert grid in (self.GRID_NONE, self.GRID_MAIN_TICKS,
self.GRID_SUB_TICKS, self.GRID_ALL_TICKS)
if grid != self._grid:
self._grid = grid
self._dirty()
@property
def size(self):
"""Size in device pixels of the plot area including margins."""
return self._size
@size.setter
def size(self, size):
assert len(size) == 2
size = tuple(size)
if size != self._size:
self._size = size
self.__marginsCache = None # Clear cached margins
self._dirty()
@property
def plotOrigin(self):
"""Plot area origin (left, top) in widget coordinates in pixels."""
return self.margins.left, self.margins.top
@property
def plotSize(self):
"""Plot area size (width, height) in pixels."""
w, h = self.size
w -= self.margins.left + self.margins.right
h -= self.margins.top + self.margins.bottom
return w, h
@property
def title(self):
"""Main title as a str in latin-1."""
return self._title
@title.setter
def title(self, title):
if title != self._title:
self._title = title
self._dirty()
# In-place update
# if self._renderResources is not None:
# self._renderResources[-1][-1].text = title
def _dirty(self):
# When Text2D require discard we need to handle it
self._renderResources = None
def _buildGridVertices(self):
if self._grid == self.GRID_NONE:
return []
elif self._grid == self.GRID_MAIN_TICKS:
def test(text):
return text is not None
elif self._grid == self.GRID_SUB_TICKS:
def test(text):
return text is None
elif self._grid == self.GRID_ALL_TICKS:
def test(_):
return True
else:
logging.warning('Wrong grid mode: %d' % self._grid)
return []
return self._buildGridVerticesWithTest(test)
def _buildGridVerticesWithTest(self, test):
"""Override in subclass to generate grid vertices"""
return []
def _buildVerticesAndLabels(self):
# To fill with copy of axes lists
vertices = []
labels = []
for axis in self.axes:
axisVertices, axisLabels = axis.getVerticesAndLabels()
vertices += axisVertices
labels += axisLabels
vertices = numpy.array(vertices, dtype=numpy.float32)
# Add main title
xTitle = (self.size[0] + self.margins.left -
self.margins.right) // 2
yTitle = self.margins.top - self._TICK_LENGTH_IN_PIXELS
labels.append(Text2D(text=self.title,
font=qt.QApplication.instance().font(),
color=self._foregroundColor,
x=xTitle,
y=yTitle,
align=CENTER,
valign=BOTTOM,
devicePixelRatio=self.devicePixelRatio))
# grid
gridVertices = numpy.array(self._buildGridVertices(),
dtype=numpy.float32)
self._renderResources = (vertices, gridVertices, labels)
_program = Program(
_SHADERS['vertex'], _SHADERS['fragment'], attrib0='position')
def render(self):
if self.margins == self._NoDisplayMargins:
return
if self._renderResources is None:
self._buildVerticesAndLabels()
vertices, gridVertices, labels = self._renderResources
width, height = self.size
matProj = mat4Ortho(0, width, height, 0, 1, -1)
gl.glViewport(0, 0, width, height)
prog = self._program
prog.use()
gl.glLineWidth(self._LINE_WIDTH)
gl.glUniformMatrix4fv(prog.uniforms['matrix'], 1, gl.GL_TRUE,
matProj.astype(numpy.float32))
gl.glUniform4f(prog.uniforms['color'], *self._foregroundColor)
gl.glUniform1f(prog.uniforms['tickFactor'], 0.)
gl.glEnableVertexAttribArray(prog.attributes['position'])
gl.glVertexAttribPointer(prog.attributes['position'],
2,
gl.GL_FLOAT,
gl.GL_FALSE,
0, vertices)
gl.glDrawArrays(gl.GL_LINES, 0, len(vertices))
for label in labels:
label.render(matProj)
def renderGrid(self):
if self._grid == self.GRID_NONE:
return
if self._renderResources is None:
self._buildVerticesAndLabels()
vertices, gridVertices, labels = self._renderResources
width, height = self.size
matProj = mat4Ortho(0, width, height, 0, 1, -1)
gl.glViewport(0, 0, width, height)
prog = self._program
prog.use()
gl.glLineWidth(self._LINE_WIDTH)
gl.glUniformMatrix4fv(prog.uniforms['matrix'], 1, gl.GL_TRUE,
matProj.astype(numpy.float32))
gl.glUniform4f(prog.uniforms['color'], *self._gridColor)
gl.glUniform1f(prog.uniforms['tickFactor'], 0.) # 1/2.) # 1/tickLen
gl.glEnableVertexAttribArray(prog.attributes['position'])
gl.glVertexAttribPointer(prog.attributes['position'],
2,
gl.GL_FLOAT,
gl.GL_FALSE,
0, gridVertices)
gl.glDrawArrays(gl.GL_LINES, 0, len(gridVertices))
# GLPlotFrame2D ###############################################################
class GLPlotFrame2D(GLPlotFrame):
def __init__(self, marginRatios, foregroundColor, gridColor):
"""
:param List[float] marginRatios:
The ratios of margins around plot area for axis and labels.
(left, top, right, bottom) as float in [0., 1.]
:param foregroundColor: color used for the frame and labels.
:type foregroundColor: tuple with RGBA values ranging from 0.0 to 1.0
:param gridColor: color used for grid lines.
:type gridColor: tuple RGBA with RGBA values ranging from 0.0 to 1.0
"""
super(GLPlotFrame2D, self).__init__(marginRatios, foregroundColor, gridColor)
self.axes.append(PlotAxis(self,
tickLength=(0., -5.),
foregroundColor=self._foregroundColor,
labelAlign=CENTER, labelVAlign=TOP,
titleAlign=CENTER, titleVAlign=TOP,
titleRotate=0))
self._x2AxisCoords = ()
self.axes.append(PlotAxis(self,
tickLength=(5., 0.),
foregroundColor=self._foregroundColor,
labelAlign=RIGHT, labelVAlign=CENTER,
titleAlign=CENTER, titleVAlign=BOTTOM,
titleRotate=ROTATE_270))
self._y2Axis = PlotAxis(self,
tickLength=(-5., 0.),
foregroundColor=self._foregroundColor,
labelAlign=LEFT, labelVAlign=CENTER,
titleAlign=CENTER, titleVAlign=TOP,
titleRotate=ROTATE_270)
self._isYAxisInverted = False
self._dataRanges = {
'x': (1., 100.), 'y': (1., 100.), 'y2': (1., 100.)}
self._baseVectors = (1., 0.), (0., 1.)
self._transformedDataRanges = None
self._transformedDataProjMat = None
self._transformedDataY2ProjMat = None
def _dirty(self):
super(GLPlotFrame2D, self)._dirty()
self._transformedDataRanges = None
self._transformedDataProjMat = None
self._transformedDataY2ProjMat = None
@property
def isDirty(self):
"""True if it need to refresh graphic rendering, False otherwise."""
return (super(GLPlotFrame2D, self).isDirty or
self._transformedDataRanges is None or
self._transformedDataProjMat is None or
self._transformedDataY2ProjMat is None)
@property
def xAxis(self):
return self.axes[0]
@property
def yAxis(self):
return self.axes[1]
@property
def y2Axis(self):
return self._y2Axis
@property
def isY2Axis(self):
"""Whether to display the left Y axis or not."""
return len(self.axes) == 3
@isY2Axis.setter
def isY2Axis(self, isY2Axis):
if isY2Axis != self.isY2Axis:
if isY2Axis:
self.axes.append(self._y2Axis)
else:
self.axes = self.axes[:2]
self._dirty()
@property
def isYAxisInverted(self):
"""Whether Y axes are inverted or not as a bool."""
return self._isYAxisInverted
@isYAxisInverted.setter
def isYAxisInverted(self, value):
value = bool(value)
if value != self._isYAxisInverted:
self._isYAxisInverted = value
self._dirty()
DEFAULT_BASE_VECTORS = (1., 0.), (0., 1.)
"""Values of baseVectors for orthogonal axes."""
@property
def baseVectors(self):
"""Coordinates of the X and Y axes in the orthogonal plot coords.
Raises ValueError if corresponding matrix is singular.
2 tuples of 2 floats: (xx, xy), (yx, yy)
"""
return self._baseVectors
@baseVectors.setter
def baseVectors(self, baseVectors):
self._dirty()
(xx, xy), (yx, yy) = baseVectors
vectors = (float(xx), float(xy)), (float(yx), float(yy))
det = (vectors[0][0] * vectors[1][1] - vectors[1][0] * vectors[0][1])
if det == 0.:
raise ValueError("Singular matrix for base vectors: " +
str(vectors))
if vectors != self._baseVectors:
self._baseVectors = vectors
self._dirty()
def _updateTitleOffset(self):
"""Update axes title offset according to margins"""
margins = self.margins
self.xAxis.titleOffset = 0, margins.bottom // 2
self.yAxis.titleOffset = -3 * margins.left // 4, 0
self.y2Axis.titleOffset = 3 * margins.right // 4, 0
# Override size and marginRatios setters to update titleOffsets
@GLPlotFrame.size.setter
def size(self, size):
GLPlotFrame.size.fset(self, size)
self._updateTitleOffset()
@GLPlotFrame.marginRatios.setter
def marginRatios(self, ratios):
GLPlotFrame.marginRatios.fset(self, ratios)
self._updateTitleOffset()
@property
def dataRanges(self):
"""Ranges of data visible in the plot on x, y and y2 axes.
This is different to the axes range when axes are not orthogonal.
Type: ((xMin, xMax), (yMin, yMax), (y2Min, y2Max))
"""
return self._DataRanges(self._dataRanges['x'],
self._dataRanges['y'],
self._dataRanges['y2'])
def setDataRanges(self, x=None, y=None, y2=None):
"""Set data range over each axes.
The provided ranges are clipped to possible values
(i.e., 32 float range + positive range for log scale).
:param x: (min, max) data range over X axis
:param y: (min, max) data range over Y axis
:param y2: (min, max) data range over Y2 axis
"""
if x is not None:
self._dataRanges['x'] = checkAxisLimits(
x[0], x[1], self.xAxis.isLog, name='x')
if y is not None:
self._dataRanges['y'] = checkAxisLimits(
y[0], y[1], self.yAxis.isLog, name='y')
if y2 is not None:
self._dataRanges['y2'] = checkAxisLimits(
y2[0], y2[1], self.y2Axis.isLog, name='y2')
self.xAxis.dataRange = self._dataRanges['x']
self.yAxis.dataRange = self._dataRanges['y']
self.y2Axis.dataRange = self._dataRanges['y2']
_DataRanges = namedtuple('dataRanges', ('x', 'y', 'y2'))
@property
def transformedDataRanges(self):
"""Bounds of the displayed area in transformed data coordinates
(i.e., log scale applied if any as well as skew)
3-tuple of 2-tuple (min, max) for each axis: x, y, y2.
"""
if self._transformedDataRanges is None:
(xMin, xMax), (yMin, yMax), (y2Min, y2Max) = self.dataRanges
if self.xAxis.isLog:
try:
xMin = math.log10(xMin)
except ValueError:
_logger.info('xMin: warning log10(%f)', xMin)
xMin = 0.
try:
xMax = math.log10(xMax)
except ValueError:
_logger.info('xMax: warning log10(%f)', xMax)
xMax = 0.
if self.yAxis.isLog:
try:
yMin = math.log10(yMin)
except ValueError:
_logger.info('yMin: warning log10(%f)', yMin)
yMin = 0.
try:
yMax = math.log10(yMax)
except ValueError:
_logger.info('yMax: warning log10(%f)', yMax)
yMax = 0.
try:
y2Min = math.log10(y2Min)
except ValueError:
_logger.info('yMin: warning log10(%f)', y2Min)
y2Min = 0.
try:
y2Max = math.log10(y2Max)
except ValueError:
_logger.info('yMax: warning log10(%f)', y2Max)
y2Max = 0.
self._transformedDataRanges = self._DataRanges(
(xMin, xMax), (yMin, yMax), (y2Min, y2Max))
return self._transformedDataRanges
@property
def transformedDataProjMat(self):
"""Orthographic projection matrix for rendering transformed data
:type: numpy.matrix
"""
if self._transformedDataProjMat is None:
xMin, xMax = self.transformedDataRanges.x
yMin, yMax = self.transformedDataRanges.y
if self.isYAxisInverted:
mat = mat4Ortho(xMin, xMax, yMax, yMin, 1, -1)
else:
mat = mat4Ortho(xMin, xMax, yMin, yMax, 1, -1)
self._transformedDataProjMat = mat
return self._transformedDataProjMat
@property
def transformedDataY2ProjMat(self):
"""Orthographic projection matrix for rendering transformed data
for the 2nd Y axis
:type: numpy.matrix
"""
if self._transformedDataY2ProjMat is None:
xMin, xMax = self.transformedDataRanges.x
y2Min, y2Max = self.transformedDataRanges.y2
if self.isYAxisInverted:
mat = mat4Ortho(xMin, xMax, y2Max, y2Min, 1, -1)
else:
mat = mat4Ortho(xMin, xMax, y2Min, y2Max, 1, -1)
self._transformedDataY2ProjMat = mat
return self._transformedDataY2ProjMat
@staticmethod
def __applyLog(
data: Union[float, numpy.ndarray],
isLog: bool
) -> Optional[Union[float, numpy.ndarray]]:
"""Apply log to data filtering out """
if not isLog:
return data
if isinstance(data, numbers.Real):
return None if data < FLOAT32_MINPOS else math.log10(data)
isBelowMin = data < FLOAT32_MINPOS
if numpy.any(isBelowMin):
data = numpy.array(data, copy=True, dtype=numpy.float64)
data[isBelowMin] = numpy.nan
with numpy.errstate(divide='ignore'):
return numpy.log10(data)
def dataToPixel(self, x, y, axis='left'):
"""Convert data coordinate to widget pixel coordinate.
"""
assert axis in ('left', 'right')
trBounds = self.transformedDataRanges
xDataTr = self.__applyLog(x, self.xAxis.isLog)
if xDataTr is None:
return None
yDataTr = self.__applyLog(y, self.yAxis.isLog)
if yDataTr is None:
return None
# Non-orthogonal axes
if self.baseVectors != self.DEFAULT_BASE_VECTORS:
(xx, xy), (yx, yy) = self.baseVectors
skew_mat = numpy.array(((xx, yx), (xy, yy)))
coords = numpy.dot(skew_mat, numpy.array((xDataTr, yDataTr)))
xDataTr, yDataTr = coords
plotWidth, plotHeight = self.plotSize
xPixel = (self.margins.left +
plotWidth * (xDataTr - trBounds.x[0]) /
(trBounds.x[1] - trBounds.x[0]))
usedAxis = trBounds.y if axis == "left" else trBounds.y2
yOffset = (plotHeight * (yDataTr - usedAxis[0]) /
(usedAxis[1] - usedAxis[0]))
if self.isYAxisInverted:
yPixel = self.margins.top + yOffset
else:
yPixel = self.size[1] - self.margins.bottom - yOffset
return (
int(xPixel) if isinstance(xPixel, numbers.Real) else xPixel.astype(numpy.int64),
int(yPixel) if isinstance(yPixel, numbers.Real) else yPixel.astype(numpy.int64),
)
def pixelToData(self, x, y, axis="left"):
"""Convert pixel position to data coordinates.
:param float x: X coord
:param float y: Y coord
:param str axis: Y axis to use in ('left', 'right')
:return: (x, y) position in data coords
"""
assert axis in ("left", "right")
plotWidth, plotHeight = self.plotSize
trBounds = self.transformedDataRanges
xData = (x - self.margins.left + 0.5) / float(plotWidth)
xData = trBounds.x[0] + xData * (trBounds.x[1] - trBounds.x[0])
usedAxis = trBounds.y if axis == "left" else trBounds.y2
if self.isYAxisInverted:
yData = (y - self.margins.top + 0.5) / float(plotHeight)
yData = usedAxis[0] + yData * (usedAxis[1] - usedAxis[0])
else:
yData = self.size[1] - self.margins.bottom - y - 0.5
yData /= float(plotHeight)
yData = usedAxis[0] + yData * (usedAxis[1] - usedAxis[0])
# non-orthogonal axis
if self.baseVectors != self.DEFAULT_BASE_VECTORS:
(xx, xy), (yx, yy) = self.baseVectors
skew_mat = numpy.array(((xx, yx), (xy, yy)))
skew_mat = numpy.linalg.inv(skew_mat)
coords = numpy.dot(skew_mat, numpy.array((xData, yData)))
xData, yData = coords
if self.xAxis.isLog:
xData = pow(10, xData)
if self.yAxis.isLog:
yData = pow(10, yData)
return xData, yData
def _buildGridVerticesWithTest(self, test):
vertices = []
if self.baseVectors == self.DEFAULT_BASE_VECTORS:
for axis in self.axes:
for (xPixel, yPixel), data, text in axis.ticks:
if test(text):
vertices.append((xPixel, yPixel))
if axis == self.xAxis:
vertices.append((xPixel, self.margins.top))
elif axis == self.yAxis:
vertices.append((self.size[0] - self.margins.right,
yPixel))
else: # axis == self.y2Axis
vertices.append((self.margins.left, yPixel))
else:
# Get plot corners in data coords
plotLeft, plotTop = self.plotOrigin
plotWidth, plotHeight = self.plotSize
corners = [(plotLeft, plotTop),
(plotLeft, plotTop + plotHeight),
(plotLeft + plotWidth, plotTop + plotHeight),
(plotLeft + plotWidth, plotTop)]
for axis in self.axes:
if axis == self.xAxis:
cornersInData = numpy.array([
self.pixelToData(x, y) for (x, y) in corners])
borders = ((cornersInData[0], cornersInData[3]), # top
(cornersInData[1], cornersInData[0]), # left
(cornersInData[3], cornersInData[2])) # right
for (xPixel, yPixel), data, text in axis.ticks:
if test(text):
for (x0, y0), (x1, y1) in borders:
if min(x0, x1) <= data < max(x0, x1):
yIntersect = (data - x0) * \
(y1 - y0) / (x1 - x0) + y0
pixelPos = self.dataToPixel(
data, yIntersect)
if pixelPos is not None:
vertices.append((xPixel, yPixel))
vertices.append(pixelPos)
break # Stop at first intersection
else: # y or y2 axes
if axis == self.yAxis:
axis_name = 'left'
cornersInData = numpy.array([
self.pixelToData(x, y) for (x, y) in corners])
borders = (
(cornersInData[3], cornersInData[2]), # right
(cornersInData[0], cornersInData[3]), # top
(cornersInData[2], cornersInData[1])) # bottom
else: # axis == self.y2Axis
axis_name = 'right'
corners = numpy.array([self.pixelToData(
x, y, axis='right') for (x, y) in corners])
borders = (
(cornersInData[1], cornersInData[0]), # left
(cornersInData[0], cornersInData[3]), # top
(cornersInData[2], cornersInData[1])) # bottom
for (xPixel, yPixel), data, text in axis.ticks:
if test(text):
for (x0, y0), (x1, y1) in borders:
if min(y0, y1) <= data < max(y0, y1):
xIntersect = (data - y0) * \
(x1 - x0) / (y1 - y0) + x0
pixelPos = self.dataToPixel(
xIntersect, data, axis=axis_name)
if pixelPos is not None:
vertices.append((xPixel, yPixel))
vertices.append(pixelPos)
break # Stop at first intersection
return vertices
def _buildVerticesAndLabels(self):
width, height = self.size
xCoords = (self.margins.left - 0.5,
width - self.margins.right + 0.5)
yCoords = (height - self.margins.bottom + 0.5,
self.margins.top - 0.5)
self.axes[0].displayCoords = ((xCoords[0], yCoords[0]),
(xCoords[1], yCoords[0]))
self._x2AxisCoords = ((xCoords[0], yCoords[1]),
(xCoords[1], yCoords[1]))
if self.isYAxisInverted:
# Y axes are inverted, axes coordinates are inverted
yCoords = yCoords[1], yCoords[0]
self.axes[1].displayCoords = ((xCoords[0], yCoords[0]),
(xCoords[0], yCoords[1]))
self._y2Axis.displayCoords = ((xCoords[1], yCoords[0]),
(xCoords[1], yCoords[1]))
super(GLPlotFrame2D, self)._buildVerticesAndLabels()
vertices, gridVertices, labels = self._renderResources
# Adds vertices for borders without axis
extraVertices = []
extraVertices += self._x2AxisCoords
if not self.isY2Axis:
extraVertices += self._y2Axis.displayCoords
extraVertices = numpy.array(
extraVertices, copy=False, dtype=numpy.float32)
vertices = numpy.append(vertices, extraVertices, axis=0)
self._renderResources = (vertices, gridVertices, labels)
@property
def foregroundColor(self):
"""Color used for frame and labels"""
return self._foregroundColor
@foregroundColor.setter
def foregroundColor(self, color):
"""Color used for frame and labels"""
assert len(color) == 4, \
"foregroundColor must have length 4, got {}".format(len(self._foregroundColor))
if self._foregroundColor != color:
self._y2Axis.foregroundColor = color
GLPlotFrame.foregroundColor.fset(self, color) # call parent property
|
silx-kit/silx
|
src/silx/gui/plot/backends/glutils/GLPlotFrame.py
|
GLPlotFrame.py
|
py
| 42,835 |
python
|
en
|
code
| 106 |
github-code
|
6
|
5589205557
|
import re
import plac
import ujson as json
from utils import load_jsonl_file, dumps_jsonl
regex_ws = re.compile(r'\s+')
def load_corpus(path):
documents = json.load(open(path, 'r'))
return documents
def hydrate(parses, relation):
doc = parses.get(relation['DocID'])
text = doc.get('text', '') if doc else ''
return {
'Arg1': {
'TokenList': relation['Arg1'],
'RawText': ' '.join([text[t[0]:t[1]] for t in relation['Arg1']]),
},
'Arg2': {
'TokenList': relation['Arg2'],
'RawText': ' '.join([text[t[0]:t[1]] for t in relation['Arg2']]),
},
'Connective': {
'TokenList': relation['Connective'],
'RawText': ' '.join([text[t[0]:t[1]] for t in relation['Connective']]),
},
'DocId': relation['DocID']
}
def main(parses_path, relation_path):
corpus = load_corpus(parses_path)
relations = load_jsonl_file(relation_path)
dumps_jsonl(map(lambda r: hydrate(corpus, r), relations))
if __name__ == '__main__':
plac.call(main)
|
rknaebel/bbc-discourse
|
hydrate.py
|
hydrate.py
|
py
| 1,089 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1059675909
|
"""
This module defines the interface for the Server.
.. autoclass:: Server
:members:
:undoc-members:
:show-inheritance:
"""
import atexit
import base64
import logging
import os
import threading
from functools import partial, wraps
import pluginbase
import tornado.httpserver
import tornado.web
from flask import (Flask, flash, jsonify, redirect, render_template, request,
send_from_directory, session, url_for)
from flask_babel import Babel
from flask_login import current_user, logout_user
from sockjs.tornado import SockJSRouter
from tornado import web
from tornado.ioloop import IOLoop
from tornado.wsgi import WSGIContainer
from opsoro.apps import Apps
from opsoro.console_msg import *
from opsoro.expression import Expression
from opsoro.preferences import Preferences
from opsoro.robot import Robot
from opsoro.server.request_handlers import RHandler
from opsoro.users import SocketConnection, Users
# Helper function
get_path = partial(os.path.join, os.path.abspath(os.path.dirname(__file__)))
class Server(object):
def __init__(self):
self.request_handler = RHandler(self)
# Create flask instance for webserver
self.flaskapp = Flask(__name__)
# self.flaskapp.config['DEBUG'] = True
self.flaskapp.config['TEMPLATES_AUTO_RELOAD'] = True
# Translation support
self.flaskapp.config.from_pyfile('settings.cfg')
self.babel = Babel(self.flaskapp)
# Setup key for sessions
self.flaskapp.secret_key = "5\x075y\xfe$\x1aV\x1c<A\xf4\xc1\xcfst0\xa49\x9e@\x0b\xb2\x17"
# Setup login manager
Users.setup(self.flaskapp)
# Setup app system
Apps.register_apps(self)
# self.activeapp = None
# Initialize all URLs
self.request_handler.set_urls()
# Run stop function at exit
atexit.register(self.at_exit)
def at_exit(self):
print_info('Goodbye!')
# Sleep robot
Robot.sleep()
Apps.stop_all()
if threading.activeCount() > 0:
threads = threading.enumerate()
for thread in threads:
try:
thread.stop()
thread.join()
except AttributeError:
pass
def render_template(self, template, **kwargs):
return self.request_handler.render_template(template, **kwargs)
def run(self):
# Setup SockJS
flaskwsgi = WSGIContainer(self.flaskapp)
self.socketrouter = SockJSRouter(SocketConnection, '/sockjs')
tornado_app = tornado.web.Application(self.socketrouter.urls + [(r".*", tornado.web.FallbackHandler, {"fallback": flaskwsgi})])
tornado_app.listen(80)
# Wake up robot
Robot.wake()
# Start default app
startup_app = Preferences.get('general', 'startup_app', None)
if startup_app in Apps.apps:
self.request_handler.page_openapp(startup_app)
# SSL security
# http_server = tornado.httpserver.HTTPServer(tornado_app, ssl_options={
# "certfile": "/etc/ssl/certs/server.crt",
# "keyfile": "/etc/ssl/private/server.key",
# })
# http_server.listen(443)
try:
# ioloop.PeriodicCallback(UserSocketConnection.dump_stats, 1000).start()
IOLoop.instance().start()
except KeyboardInterrupt:
print_info('Keyboard interupt')
self.at_exit()
def shutdown(self):
logging.info("Stopping server")
io_loop = IOLoop.instance()
io_loop.stop()
def protected_view(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
if current_user.is_authenticated:
if current_user.is_admin:
# the actual page
return f(*args, **kwargs)
else:
flash("You do not have permission to access the requested page. Please log in below.")
return redirect(url_for("login"))
else:
flash("You do not have permission to access the requested page. Please log in below.")
return redirect(url_for("login"))
return wrapper
def app_view(self, f):
appname = f.__module__.split(".")[-1]
@wraps(f)
def wrapper(*args, **kwargs):
# Protected page
if current_user.is_authenticated:
if not current_user.is_admin:
flash("You do not have permission to access the requested page. Please log in below.")
return redirect(url_for("login"))
else:
flash("You do not have permission to access the requested page. Please log in below.")
return redirect(url_for("login"))
# Check if app is active
if appname in Apps.active_apps:
# This app is active
return f(*args, **kwargs)
else:
# Return app not active page
assert appname in Apps.apps, "Could not find %s in list of loaded apps." % appname
data = {
"app": {},
# "appname": appname,
"page_icon": Apps.apps[appname].config["icon"],
"page_caption": Apps.apps[appname].config["full_name"]
}
data["title"] = self.request_handler.title
# if self.activeapp in Apps.apps:
# # Another app is active
# data["app"]["active"] = True
# data["app"]["name"] = Apps.apps[self.activeapp].config["full_name"]
# data["app"]["icon"] = Apps.apps[self.activeapp].config["icon"]
# data["title"] += " - %s" % Apps.apps[self.activeapp].config["full_name"]
# else:
# # No app is active
# data["app"]["active"] = False
return render_template("app_not_active.html", **data)
return wrapper
def app_api(self, f):
appname = f.__module__.split(".")[-1]
@wraps(f)
def wrapper(*args, **kwargs):
# Protected page
if current_user.is_authenticated:
if not current_user.is_admin:
return jsonify(status="error", message="You do not have permission to access the requested page.")
else:
return jsonify(status="error", message="You do not have permission to access the requested page.")
# Check if app is active
if appname in Apps.active_apps:
# This app is active
data = f(*args, **kwargs)
if data is None:
data = {}
if "status" not in data:
data["status"] = "success"
return jsonify(data)
else:
# Return app not active page
assert appname in Apps.apps, "Could not find %s in list of loaded apps." % appname
return jsonify(status="error", message="This app is not active.")
return wrapper
|
OPSORO/OS
|
src/opsoro/server/__init__.py
|
__init__.py
|
py
| 7,251 |
python
|
en
|
code
| 9 |
github-code
|
6
|
73831952826
|
from odoo import api, models, fields, _
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
class LgpsPartner(models.Model):
_inherit = 'res.partner'
client_type = fields.Selection(
[
('new', _('New')),
('aftersales', _('After Sales')),
],
default='new',
string=_("Client Type")
)
first_installation_day = fields.Date(
string=_("First Installation Day")
)
custom_status = fields.Selection(
[
('active', _('Active')),
('cancelled', _('Cancelled')),
('demo', _('Demo')),
('inactive', _('Inactive')),
('suspended', _('Suspended')),
('on_negotiation', _('On Negotiation')),
('in_credit_bureau', _('In Credit Bureau')),
],
default='active'
)
client_rank = fields.Selection(
[
('a', 'A'),
('b', 'B'),
('c', 'C'),
('d', 'D'),
('e', 'E'),
],
default='e',
string=_("Client Rank")
)
coordination_executive = fields.Many2one(
comodel_name="res.users",
string=_("Coordination"),
ondelete="set null",
)
credit_collection_executive = fields.Many2one(
comodel_name="res.users",
string=_("Credit and Collection"),
ondelete="set null",
)
after_sales_executive = fields.Many2one(
comodel_name="res.users",
string=_("After Sales"),
ondelete="set null",
)
special_negotiations = fields.Boolean(
string=_("Special Negotiations"),
default=False
)
special_negotiation_notes = fields.Html(
string=_("Special Negotiations Notes")
)
gpsdevice_ids = fields.One2many(
comodel_name="lgps.device",
inverse_name="client_id",
string="Gps Devices",
readonly=True,
)
@api.model
def create(self, values):
if self._check_if_can_create():
new_record = super(LgpsPartner, self).create(values)
return new_record
# def write(self, values):
# if self._check_if_can_create():
# return super(LgpsPartner, self).write(values)
def _check_if_can_create(self):
user = self.env.user
if not user.has_group('lgps.lgps_group_create_contacts'):
raise UserError('Solo personal de Administración y Finanzas puede dar alta de Clientes y Proveedores '
'nuevos.')
return True
|
intralix/odoo-addons
|
lgps/models/custom_partner.py
|
custom_partner.py
|
py
| 2,580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2721867306
|
"""
api.py
~~~~~~
This file define simple REST APi for a Machine Learning Model
"""
from os import environ as env
from joblib import load
from flask import abort, Flask, jsonify, make_response, request
from pandas import DataFrame
service_name = env['SERVICE_NAME']
version = env['API_VERSION']
model = load('data/model.joblib')
app = Flask(__name__)
@app.route(f'/{service_name}/v{version}/predict', methods=['POST'])
def predict():
"""Predict Incoming Request"""
try:
req = request.json
print(req)
features = DataFrame(req)
prediction = model.predict(features).tolist()
return make_response(jsonify({'prediction': prediction}))
except ValueError:
raise RuntimeError('Features are not in the correct format.')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001)
|
repodevs/flask-machine-learning-service
|
api.py
|
api.py
|
py
| 846 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10806067701
|
from itertools import count
from random import choice
chance = ['h', 'h', 'h', 'h', 'h', 'h', 'h', 'h', 'h', 't']
works = 0
for i in range(100000):
outcomes = []
for x in range(10):
outcomes.append(choice(chance))
if outcomes.count('h') >= 3:
works += 1
print(works)
|
Theeran-SK/Miscellaneous
|
bmc.py
|
bmc.py
|
py
| 297 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32500164614
|
"""
כתבו תוכנית המקבלת 10 מספרים מהמשתמש ומדפיסה את הגדול ביותר.
"""
def age_in_month(user_age):
while True:
age = user_age * 12
print(f"your AGE in month is {age}")
break
while True:
try:
age_in_month(int(input("Hello user,Please enter your AGE\n")))
except ValueError:
print("The AGE Must Have to be a Number")
age_in_month(int(input("Hello user,Please enter your AGE\n")))
|
eehud738/python-
|
Section 2/HW2.py
|
HW2.py
|
py
| 500 |
python
|
he
|
code
| 0 |
github-code
|
6
|
1419172936
|
"""scene.py module"""
# Michael Gresham
# CPSC 386-01
# 2021-11-29
# [email protected]
# @Michael-Gresham
#
# Lab 03-00
#
# My scene class
# Holds all the scenes that are present in snek game.
import pygame
from pygame.constants import SCRAP_SELECTION
from random import randint
import os
import pickle
from datetime import datetime
class Scene:
"""General Scene class that's inherited by all other Scene types."""
def __init__(self, screen, clock, background_color=(0, 0, 0)):
self._is_valid = True
self._frame_rate = 60
self._screen = screen
self._background = pygame.Surface(self._screen.get_size())
self._background_color = background_color
self._background.fill(self._background_color)
self._clock = clock
def is_valid(self):
"""If game state is valid return true."""
return self._is_valid
def frame_rate(self):
"""return the frame rate of the game."""
return self._frame_rate
def start_scene(self):
"""method driver for the class"""
pass
def end_scene(self):
"""Does nothing here but meant to return next scene."""
pass
def update(self):
"""update the display of the scene."""
pygame.display.update()
def draw(self):
"""Display the screen background onto the screen."""
self._screen.blit(self._background, (0, 0))
def process_event(self, event):
"""Handles all the events at the particular scene."""
if event.type == pygame.QUIT:
self._is_valid = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
print("Good bye!")
self._is_valid = False
class TitleScene(Scene):
"""Class which handles the title screen of snake."""
def __init__(self, screen, clock, title_size, background_color=(0, 0, 0)):
# class initializer. Initializes basic displays.
super().__init__(screen, clock, background_color)
(w, h) = self._screen.get_size()
self._speed = [0.5, 1, 2]
self._title_name = "Snek Game"
self._title_size = title_size
self._title_color = [50, 50, 50]
title_font = pygame.font.Font(
pygame.font.get_default_font(), self._title_size
)
self._title = title_font.render(
self._title_name, True, self._title_color
)
self._title_pos = self._title.get_rect(center=(w // 2, h // 4))
instruction_name = "Press any key to continue"
self._instruction_size = title_size // 4
print(str(self._instruction_size))
instruction_font = pygame.font.Font(
pygame.font.get_default_font(), self._instruction_size
)
self._instruction = instruction_font.render(
instruction_name, True, (255, 255, 0)
)
self._instruction_pos = self._instruction.get_rect(
center=(w // 2, h // 2 + h // 4)
)
self._reverse = False
def start_scene(self):
"""Method driver of the class. Calls other method in order to run the scene."""
while True:
self.draw()
self.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
return self.end_scene()
self.process_event(event)
pass
def draw(self):
"""redraws the background, title name, and instructions and updates screen."""
super().draw()
self._screen.blit(self._title, self._title_pos)
self._screen.blit(self._instruction, self._instruction_pos)
self.display_rules()
pygame.display.update()
def display_rules(self):
"""Displays the instructions for Snake Game."""
instructions = [
"Press arrow keys to change the direction of the snake.",
"Goal of game is to survive and eat as many apples as possible.",
"Every apple you eat extends the length of the snake.",
"Try not to hit yourself or hit into a wall or its game over.",
"Overall score is based off time survived and apples eaten",
"Good Luck!",
]
(w, h) = self._screen.get_size()
height = (h // 4) + 100
width = w // 4
count = 0
for instruction in instructions:
instruction_font = pygame.font.Font(
pygame.font.get_default_font(), 15
)
instruction_render = instruction_font.render(
instruction, True, (255, 255, 255)
)
if count == 5:
self._screen.blit(instruction_render, (w // 2 - 50, height))
else:
self._screen.blit(instruction_render, (width, height))
height += 50
count += 1
def update(self):
"""Updates the color of title text and updates background."""
super().update()
for x in range(3):
self._title_color[x] += 1 * self._speed[x]
if self._title_color[x] <= 0 or self._title_color[x] >= 255:
self._reverse = not self._reverse
self._speed[x] *= -1
if self._title_color[x] > 255:
self._title_color[x] = 255
if self._title_color[x] < 0:
self._title_color[x] = 0
title_font = pygame.font.Font(
pygame.font.get_default_font(), self._title_size
)
self._title = title_font.render(
self._title_name, True, self._title_color
)
def end_scene(self):
"""returns the next scene."""
return "Level1"
pass
def process_event(self, event):
"""handles the exit button."""
if event.type == pygame.QUIT:
print("Goodbye!")
pygame.quit()
class GameScene(Scene):
"""Start of the GameScene Class"""
def __init__(self, screen, clock, background_color=(0, 0, 0)):
"""
This function initializes the GameScene class setting
the snake game board and the display as well.
"""
super().__init__(screen, clock, background_color)
# sets the board and initializes location of snake and apple.
self.direction = None
self.start_ticks = pygame.time.get_ticks()
self.score = 0
self.time = 0
self.snake_size = 1
self.snake = []
# self.player = player
self.offset = 100
(w, h) = self._screen.get_size()
self.board = []
for x in range(0, ((h - 100) // 20)):
row = []
for y in range(0, (w // 20)):
if (
x == 0
or y == 0
or y == (w // 20) - 1
or x == ((h - 100) // 20) - 1
):
row.append("border")
elif x == ((h - 100) // 20) // 2 and y == (w // 20) // 2:
row.append("snek")
self.snake.append((x, y))
elif (
x == ((((h - 100) // 20) // 2) + (((h - 100) // 20) // 4))
and y == (w // 20) // 2
):
row.append("apple")
else:
row.append("empty")
self.board.append(row)
self.timer = "Timer: " + str(self.time)
title_font = pygame.font.Font(pygame.font.get_default_font(), 25)
self._title_time = title_font.render(self.timer, True, (255, 255, 255))
self._timer_pos = self._title_time.get_rect(
center=(w // 4, self.offset // 2)
)
self.title_score = "Score: " + str(self.score)
self._title_score = title_font.render(
self.title_score, True, (255, 255, 255)
)
self._score_pos = self._title_score.get_rect(
center=(w // 2 + w // 4, self.offset // 2)
)
def start_scene(self):
"""method driver that drives the game logic."""
self.__init__(self._screen, self._clock)
while True:
# gets the time in game in miliseconds.
miliseconds = pygame.time.get_ticks() - self.start_ticks
exit = self.move()
if exit != None:
return exit
self.draw()
self.update(miliseconds)
for event in pygame.event.get():
self.process_event(event)
pass
def update(self, miliseconds):
"""handles updating the timer, background, and score."""
if (miliseconds // 1000) > self.time:
self.time = miliseconds // 1000
self.timer = "Timer: " + str(self.time)
title_font = pygame.font.Font(pygame.font.get_default_font(), 25)
self._title_time = title_font.render(
self.timer, True, (255, 255, 255)
)
if (self.time % 3) == 0 and self.time != 0:
self.score += 1
self.title_score = "Score: " + str(self.score)
title_font = pygame.font.Font(pygame.font.get_default_font(), 25)
self._title_score = title_font.render(
self.title_score, True, (255, 255, 255)
)
pygame.display.update()
pass
def create_apple(self):
"""Handles the logic that places a new apple when one is eaten."""
valid = False
while valid == False:
row = randint(1, len(self.board) - 1)
column = randint(1, len(self.board[0]) - 1)
if self.board[row][column] == "empty":
self.board[row][column] = "apple"
valid = True
pass
def move(self):
"""Handles the movement logic of the snake. and loss conditions."""
if self.direction == None:
pass
else:
row = self.snake[0][0]
column = self.snake[0][1]
added = False
if self.direction == "up":
row -= 1
elif self.direction == "down":
row += 1
elif self.direction == "left":
column -= 1
elif self.direction == "right":
column += 1
if (
self.board[row][column] == "border"
or self.board[row][column] == "snek"
):
return self.end_scene()
if (
self.board[row][column] != "border"
and self.board[row][column] != "apple"
):
self.board[row][column] = "snek"
self.snake.insert(0, (row, column))
if self.board[row][column] == "apple":
print("hello World")
added = True
self.score += 10
self.create_apple()
self.board[row][column] = "snek"
self.snake.insert(0, (row, column))
miliseconds = pygame.time.get_ticks() - self.start_ticks
self.draw()
self.update(miliseconds)
if added == False:
(x, y) = self.snake.pop()
self.board[x][y] = "empty"
self._clock.tick(10)
pass
def end_scene(self):
"""returns next scene which is end scene."""
print(str(self.score))
print(str(self.time))
return "End Scene"
def draw(self):
"""displays the score, time, and the game screen on pygame display."""
super().draw()
self._screen.blit(self._title_score, self._score_pos)
self._screen.blit(self._title_time, self._timer_pos)
for x in range(0, len(self.board)):
for y in range(0, len(self.board[0])):
if self.board[x][y] == "border":
pygame.draw.rect(
self._screen,
(164, 116, 73),
pygame.Rect((y * 20), (x * 20) + self.offset, 20, 20),
)
elif self.board[x][y] == "empty":
pygame.draw.rect(
self._screen,
(0, 154, 23),
pygame.Rect((y * 20), (x * 20) + self.offset, 20, 20),
)
elif self.board[x][y] == "apple":
pygame.draw.rect(
self._screen,
(0, 154, 23),
pygame.Rect((y * 20), (x * 20) + self.offset, 20, 20),
)
pygame.draw.circle(
self._screen,
(255, 0, 0),
((y * 20) + 10, (x * 20) + 10 + self.offset),
10,
)
elif self.board[x][y] == "snek":
pygame.draw.rect(
self._screen,
(0, 0, 255),
pygame.Rect((y * 20), (x * 20) + self.offset, 20, 20),
)
pass
def process_event(self, event):
"""handle various events in game: movement and exit button."""
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP and self.direction != "down":
self.direction = "up"
if event.key == pygame.K_DOWN and self.direction != "up":
self.direction = "down"
if event.key == pygame.K_LEFT and self.direction != "right":
self.direction = "left"
if event.key == pygame.K_RIGHT and self.direction != "left":
self.direction = "right"
class End_Scene(Scene):
"""The end screen of snake, handles leader board and reset logic."""
main_dir = os.path.split(os.path.abspath(__file__))[0]
data_dir = os.path.join(main_dir, "data")
def __init__(self, screen, clock, background_color=(0, 0, 0)):
"""
This function initializes the end scene by setting
up visual text and visual instructions.
"""
super().__init__(screen, clock, background_color)
self.player_score = 0
self.player_time = 0
(w, h) = self._screen.get_size()
self.leaderboard = []
# code for Game over screen.
self._title_name = "Leader Board"
self._title_size = 60
self._title_color = [255, 255, 255]
title_font = pygame.font.Font(
pygame.font.get_default_font(), self._title_size
)
self._title = title_font.render(
self._title_name, True, self._title_color
)
self._title_pos = self._title.get_rect(center=(w // 2, h // 8))
self._score_name = " Date Score Time"
self._score_size = 30
self._score_color = [255, 255, 255]
title_font = pygame.font.Font(
pygame.font.get_default_font(), self._score_size
)
self._score = title_font.render(
self._score_name, True, self._score_color
)
self._score_pos = self._title.get_rect(center=(w // 4, h // 4))
pass
def draw(self):
"""draws the leaderboard and options onto the screen."""
super().draw()
self._screen.blit(self._score, self._score_pos)
self._screen.blit(self._title, self._title_pos)
count = 10
if 10 > len(self.leaderboard):
count = len(self.leaderboard)
for x in range(0, count):
(w, h) = self._screen.get_size()
date = self.leaderboard[x][2].strftime("%d/%m/%Y %H:%M:%S")
record_name = "{0:<2} {1:<10} {2:<10} {3:<30}".format(
str(x + 1), date, self.leaderboard[x][0], self.leaderboard[x][1]
)
print(record_name)
record_size = 25
record_color = (255, 255, 255)
record_font = pygame.font.SysFont("couriernew", record_size, 1, 0)
record = record_font.render(record_name, True, record_color)
record_pos = self._title.get_rect(
center=(w // 4 + 10, h // 4 + (30 * (x + 1)))
)
self._screen.blit(record, record_pos)
restart_title = "Press Space to play again!"
restart_size = 20
restart_color = (255, 255, 255)
restart_font = pygame.font.Font(
pygame.font.get_default_font(), restart_size
)
restart = restart_font.render(restart_title, True, restart_color)
restart_pos = record_pos = self._title.get_rect(
center=(w // 2, h // 2 + h // 4)
)
self._screen.blit(restart, restart_pos)
restart_title = "Press Escape to exit the game!"
restart_size = 20
restart_color = (255, 255, 255)
restart_font = pygame.font.Font(
pygame.font.get_default_font(), restart_size
)
restart = restart_font.render(restart_title, True, restart_color)
restart_pos = record_pos = self._title.get_rect(
center=(w // 2, h // 2 + h // 4 + 50)
)
self._screen.blit(restart, restart_pos)
def pickle_in_player(self):
"""takes player game records and puts it in pickle file."""
game_record = []
game_date = datetime.now()
game_record.append(self.player_score)
game_record.append(self.player_time)
game_record.append(game_date)
with open(self.data_dir + "/leaderboard.pickle", "ab") as fh:
pickle.dump(game_record, fh, pickle.HIGHEST_PROTOCOL)
def load_in(self):
"""loads in all game records."""
with open(self.data_dir + "/leaderboard.pickle", "rb") as fh:
while True:
try:
yield pickle.load(fh)
except EOFError:
break
def start_scene(self, score, time):
"""method driver that handles End Scene logic."""
print(pygame.font.get_fonts())
print(score)
print(time)
self.player_score = score
self.player_time = time
self.pickle_in_player()
self.leaderboard = list(self.load_in())
self.leaderboard.sort(key=lambda l: l[0], reverse=True)
self.draw()
self.update()
print(self.leaderboard)
while True:
for event in pygame.event.get():
next_scene = self.process_event(event)
if next_scene != None:
return next_scene
pass
def process_event(self, event):
"""handles the event in end screen: new game and exit game."""
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
return "exit"
if event.key == pygame.K_SPACE:
return "Title"
|
Michael-Gresham/Portfolio
|
cpsc-386-04-snake-Michael-Gresham-main/scene.py
|
scene.py
|
py
| 19,048 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35023516423
|
import cv2
import time
from base_camera import BaseCamera
from Process import Process
global sess
class Camera(BaseCamera):
video_source = 0
process = Process()
@staticmethod
def set_video_source(source):
Camera.video_source = source
# @staticmethod
def frames(self):
camera = cv2.VideoCapture(Camera.video_source)
video_FourCC = int(camera.get(cv2.CAP_PROP_FOURCC))
video_fps = camera.get(cv2.CAP_PROP_FPS)
video_size = (int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)))
if not camera.isOpened():
raise RuntimeError('Could not start camera.')
print("!!! TYPE:", type(video_FourCC), type(video_fps), type(video_size))
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
font = cv2.FONT_HERSHEY_SIMPLEX
# prev_time = timer()
in_num = 0
out_num = 0
num = 0
data = {}
while True:
# read current frame
print(in_num, out_num, "in_numin_numin_numin_numin_numin_numoutin_numin_numin_numin_numin_num")
_, img = camera.read()
if img is None:
break
result, centers = self.process.process(img)
for center in centers:
x, y, w, h = center
# cv2.circle(result,(x,y), 30, (0,0,255), -1)
if len(data) == 0:
data[f'{x},{y},{w},{h},{num}'] = [x, y, w, h, x, y, w, h] # 最初检测点 最后检测点
continue
for key in list(data.keys()):
tx, ty, tw, th, tn = key.split(',')
tx, ty, tw, th, tn = int(tx), int(ty), int(tw), int(th), int(tn)
if num - tn > 4:
del data[key]
continue
else:
print('distance', self.process.overlap([x, y, w, h], [tx, ty, tw, th]))
if self.process.overlap([x, y, w, h], [tx, ty, tw, th]) > 0.5:
value = data[key]
value[4], value[5], value[6], value[7] = x, y, w, h
del data[key]
data[f'{x},{y},{w},{h},{num}'] = value
else:
data[f'{x},{y},{w},{h},{num}'] = [x, y, w, h, x, y, w, h]
for key in list(data.keys()):
value = data[key]
y1 = value[1] + value[3] // 2
y2 = value[5] + value[7] // 2
# print(y1,y2,"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
if y1 < 700 and y2 >= 700:
del data[key]
out_num += 1
continue
elif y1 > 700 and y2 < 700:
del data[key]
in_num += 1
continue
elif num == video_fps:
num = 0
tx, ty, tn = key.split(',')
if video_fps - int(tn) > 4:
del data[key]
continue
else:
del data[key]
data[f'{tx},{ty},{num}'] = value
cv2.line(result, (0, 700), (800, 700), (0, 0, 255), 5)
cv2.putText(result, f'in: {in_num} out: {out_num}', (50, 780), font, 1.5, (0, 0, 255), 2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print(data.keys(), "data.keys()data.keys()data.keys()")
# encode as a jpeg image and return it
yield cv2.imencode('.jpg', img)[1].tobytes()
|
Micbetter/ISense-flow
|
camera_opencv.py
|
camera_opencv.py
|
py
| 3,981 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41728763711
|
import os
import datetime
import time
# requires import of opencv through pip
# pip install opencv-python
import cv2
# requires import of PIL pillow through pip
# python -m pip install pillow
from PIL import Image, ImageTk
import sys
import tkinter
def my_VidFunction(vid_name):
cap = cv2.VideoCapture(vid_name)
#check if the video capture is open
if(cap.isOpened() == False):
print("Error Opening Video Stream Or File")
while(cap.isOpened()):
ret, frame =cap.read()
if ret == True:
cv2.namedWindow('frame', cv2.WINDOW_KEEPRATIO)
cv2.setWindowProperty('frame',cv2.WND_PROP_ASPECT_RATIO,cv2.WINDOW_KEEPRATIO)
#cv2.namedWindow('frame', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('frame', frame)
if cv2.waitKey(25) == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
def showPIL(pilImage):
root = tkinter.Tk()
w, h = root.winfo_screenwidth(), root.winfo_screenheight()
root.overrideredirect(1)
root.geometry("%dx%d+0+0" % (w, h))
root.focus_set()
root.bind("<Escape>", lambda e: (e.widget.withdraw(), e.widget.quit()))
canvas = tkinter.Canvas(root,width=w,height=h)
canvas.pack()
canvas.configure(background='black')
imgWidth, imgHeight = pilImage.size
if imgWidth > w or imgHeight > h:
ratio = min(w/imgWidth, h/imgHeight)
imgWidth = int(imgWidth*ratio)
imgHeight = int(imgHeight*ratio)
pilImage = pilImage.resize((imgWidth,imgHeight), Image.ANTIALIAS)
image = ImageTk.PhotoImage(pilImage)
imagesprite = canvas.create_image(w/2,h/2,image=image)
root.after(2000, root.destroy)
root.mainloop()
#grab time now
now = datetime.datetime.now()
print('Press ctl and c in terminal or command window to exit slide show.')
print('Make sure default image viewer opens full screen. Then close it when full screen.')
im = Image.open(r"black_all.png")
showPIL(im)
while (True):
# open method used to open different extension image file
# resized_im = im.resize((round(im.size[0]*4), round(im.size[1]*4)))
# This method will show image in any image viewer
# resized_im.show()
if ((now.hour >= 8) and (now.hour <= 22 )):
if ((now.hour == 12) and ((now.minute % 30 == 0) or (now.minute % 30 == 1))) or ((now.hour == 17) and ((now.minute % 60 == 0) or (now.minute % 60 == 1))):
my_VidFunction('ShiftChangeYouTubeHIGH.mp4')
# grab time again, so not using time at stop of this loop instance
now = datetime.datetime.now()
else:
my_VidFunction('cci_icommons_album.mp4')
# grab time again, so not using time at stop of this loop instance
now = datetime.datetime.now()
else:
showPIL(im)
# close the image
#im.close()
#resized_im.close()
|
icommonscrc/Looney-Toon
|
OpenVideoAtTimeV8.py
|
OpenVideoAtTimeV8.py
|
py
| 2,716 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23225332326
|
import copy
import six
from lxml import etree
from ems.exceptions import SchemaException
from ems.exceptions import ValidationException
from ems.exceptions import XMLException
from ems.schema import fields
def parse_meta(name, bases, dct):
"""
Parse the _META_ attribute from a schema definition.
"""
# Set default metadata
schema_meta = {
# The XML tag for the root element.
'tag': name,
# Validate the schema values when rendering.
'validate': True,
# Always omit empty values for fields which are not required.
'omit_empty': False,
# Omit strings which are equal to ''
'omit_blank': False,
# Validation fails for Choice types with more than one value populated.
'strict_choice': False,
}
# Sets up defaults if the _META_ attribute is not found.
if '_META_' in dct:
for k, v in six.iteritems(dct['_META_']):
schema_meta[k] = v
# Remove original definition
del dct['_META_']
dct['_schema_meta'] = schema_meta
def parse_fields(name, bases, dct):
"""
Parse the _SCHEMA_ attribute and set up the appropriate methods for each
defined field.
"""
if '_SCHEMA_' not in dct:
raise SchemaException('No _SCHEMA_ attribute found for %s' % name)
# Holds the fields from the schema definition
schema_fields = {}
# Holds a reverse lookup to the fields from their tag names. Used when
# parsing an XML into an object.
field_lookup = {}
for k, v in six.iteritems(dct['_SCHEMA_']):
if not isinstance(v, dict):
raise SchemaException('Schema definitions must be dict objects')
# Default to string type.
field_type = v.get('type', 'string')
# Tag name defaults to field name.
if 'tag' not in v:
v['tag'] = k
# Get field class from factory helper method.
schema_fields[k] = fields.factory(field_type, **v)
# Lookup for the XML tag -> attribute name
field_lookup[v['tag']] = k
# Create new property functions for the field values.
# Functions are wrapped to force 'k' to be evaluated now. Otherwise k
# will always be the value of the last element in the loop.
def wrap_get_f(k=k):
def get_f(self):
return self._schema_fields[k].value
return get_f
def wrap_set_f(k=k):
def set_f(self, value):
self._schema_fields[k].value = value
return set_f
dct[k] = property(wrap_get_f(), wrap_set_f())
# Remove the original schema definition and add the new one
del dct['_SCHEMA_']
dct['_schema_fields'] = schema_fields
dct['_field_lookup'] = field_lookup
def webservice_meta(**kwds):
"""
Class decorator for creating new web service objects.
Takes _META_ args as keyword arguments.
"""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if orig_vars.get('_META_', None) is None:
orig_vars['_META_'] = {}
if kwds is not None:
for k, v in six.iteritems(kwds):
orig_vars['_META_'][k] = v
return WebServiceMeta(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class WebServiceMeta(type):
"""
Metaclass used to create new WebService objects from a schema, defined
as a dictionary.
"""
def __new__(meta, name, bases, dct):
# Sets up the _schema_meta attribute.
parse_meta(name, bases, dct)
# Sets up the _schema_fields attribute.
parse_fields(name, bases, dct)
return super(WebServiceMeta, meta).__new__(meta, name, bases, dct)
class WebServiceObject(object):
"""
Base class for objects to be serialized/deserialized for the API. Also
used by nested objects.
Subclasses should also use the WebServiceMeta metaclass.
"""
def __init__(self, **kwds):
# Ensure that the field values are NOT shared across instances of the
# same class.
self._schema_fields = copy.deepcopy(self._schema_fields)
# Allow any field to be set via keyword arguments
for k, v in six.iteritems(kwds):
if k in self._schema_fields:
self._schema_fields[k].value = v
else:
raise TypeError('%s got unexpected keyword argument %s' %
(self.__class__.__name__, k))
def validate(self):
"""
Checks whether the values are valid for the class schema.
Returns None if valid. Otherwise, raises ValidationException.
"""
for field in six.itervalues(self._schema_fields):
field.validate()
def is_valid(self):
"""
Convenience wrapper for validate() to return True or False.
"""
try:
self.validate()
except ValidationException:
return False
return True
def to_element(self, root_element=None):
"""
Returns the object as an lxml.Element instance.
"""
if root_element is None:
root_element = etree.Element(self._schema_meta['tag'])
for field in six.itervalues(self._schema_fields):
if self._schema_meta['validate']:
field.validate()
children = field.to_element(
omit_empty=self._schema_meta['omit_empty'],
omit_blank=self._schema_meta['omit_blank'])
# Append each child element if the rendered elements form a list.
# This means that each child gets a root tag. E.g.
# <attribute>
# <a>1</a>
# </attribute>
# <attribute>
# <a>2</a>
# </attribute>
if isinstance(children, list):
[root_element.append(elem)
for elem
in children]
elif children is not None:
root_element.append(children)
return root_element
def render(self, pretty=False):
"""
Renders the object into an XML string representation.
"""
element = self.to_element()
return etree.tostring(element, pretty_print=pretty)
@classmethod
def parse(cls, root_element, strict=True):
"""
Returns a new instance of the class from an XML.
"""
# New instance of myself to return
obj = cls()
for elem in root_element:
attr_name = cls._field_lookup.get(elem.tag, None)
if attr_name is None and strict:
raise XMLException('Unexpected element: %s' % elem.tag)
elif attr_name is None:
continue
# Field objects should provide a parse method
obj._schema_fields[attr_name].parse(elem)
return obj
@classmethod
def from_file(cls, filename, strict=True):
"""
Parse an XML from a file.
"""
tree = etree.parse(filename)
root = tree.getroot()
return cls.parse(root, strict=strict)
@classmethod
def from_text(cls, text, strict=True):
"""
Parse an XML from a string.
"""
bytes_ = six.BytesIO(text.encode('utf-8'))
return cls.from_file(bytes_)
|
ceramyq/python-ems
|
ems/schema/base.py
|
base.py
|
py
| 7,670 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.