content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from setuptools import setup
from distutils.util import convert_path
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
main_ns = {}
ver_path = convert_path('betacal/version.py')
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
setup(
name='betacal',
version=main_ns['__version__'],
description='Beta calibration',
author='Telmo de Menezes e Silva Filho and Miquel Perello Nieto',
author_email='[email protected]',
url = 'https://betacal.github.io/',
download_url = 'https://github.com/betacal/python/archive/refs/tags/{}.tar.gz'.format(main_ns['__version__']),
keywords = ['classifier calibration', 'calibration', 'classification'],
license='MIT',
packages=['betacal'],
install_requires=[
'numpy',
'scikit-learn',
],
long_description=long_description,
long_description_content_type='text/markdown'
)
|
python
|
# Accept three subject marks and display % marks.
m1 = float(input("Enter marks 1.: "))
m2 = float(input("Enter marks 2: "))
m3 = float(input("Enter marks 3: "))
percent = (m1+m2+m3)/3
print("% marks", percent)
|
python
|
class ReturnValue(object):
"""\brief Container for a return value for a daemon task or command. If the
the code is set to 0 the operation succeeded and the return value is
available. Any other code indicates an error.
[NOTE: we need to define other values, ala HTTP]
"""
CODE_SUCCESS = 0
CODE_FAILURE = -1
SIMPLE_STR_VARS = ["code", "msg", "value"]
def __init__(self, code=None, msg=None, value=None):
"""\brief Initializes class
\param code (\c int) The numeric code
\param msg (\c string) The message, useful in case of error
\param value (\c void) The actual return value
"""
self.__code = code
self.__msg = msg
self.__value = value
def get_code(self):
"""\brief Retrieves the numeric code
\return (\c int) The numeric code
"""
return self.__code
def get_msg(self):
"""\brief Retrieves the message
\return (\c string) The message
"""
return self.__msg
def get_value(self):
"""\brief Retrieves the actual return value
\return (\c void) The actual return value
"""
return self.__value
def __str__(self):
string = str(self.__class__.__name__) + ": "
for str_var in self.SIMPLE_STR_VARS:
string += str_var + "=" + str(eval('self.get_' + str_var)()) + ","
string = string[:len(string) - 1]
return string
|
python
|
# This example shows how to train a built in DQN model to play CartPole-v0.
# Example usage:
# python -m example.cartpole.train_dqn
# --- built in ---
import time
import json
# --- 3rd party ---
import gym
import tensorflow as tf
# run on cpu
tf.config.set_visible_devices([], 'GPU')
# --- my module ---
import unstable_baselines as ub
from unstable_baselines.algo.ppo import PPO
def main():
ub.logger.Config.use(level='INFO', colored=True)
ub.utils.set_seed(1)
# create envs
env = ub.envs.VecEnv([gym.make('CartPole-v0') for _ in range(10)])
env.seed(1)
eval_env = gym.make('CartPole-v0')
eval_env.seed(0)
start_time = time.time()
# create and train model
model = PPO(
env,
learning_rate=1e-3,
gamma=0.8,
batch_size=128,
n_steps=500,
).learn(
20000,
verbose=1
)
# evaluate model
results = model.eval(eval_env, 20, 200)
metrics = model.get_eval_metrics(results)
print(json.dumps(metrics))
print('time spent:', time.time()-start_time)
env.close()
eval_env.close()
if __name__ == '__main__':
main()
|
python
|
#
# General Electricity sector Decarbonization Model (GEDM)
# Copyright (C) 2020 Cheng-Ta Chu.
# Licensed under the MIT License (see LICENSE file).
#
# Module note:
# Functions to initialize instance settings
#
#----------------------------------------------------
# sets
#----------------------------------------------------
def getCountryIndList(objMarket):
""" get country index list in the market """
lsCountryList = list()
for objZone in objMarket.lsZone:
if objZone.iCountryIndex not in lsCountryList:
lsCountryList.append(objZone.iCountryIndex )
return lsCountryList
def getCountryCodeList(objMarket):
""" get country code list in the market """
lsCountryList = list()
for objZone in objMarket.lsZone:
if objZone.sCountry not in lsCountryList:
lsCountryList.append(objZone.sCountry )
return lsCountryList
#----------------------------------------------------
# Fixed Parameters
#----------------------------------------------------
def getZonesInCountry(objMarket, model):
''' get TS representing hours in a year '''
dData = {}
for sCountry in model.setCountryCode_CN:
sZoneList = ""
for objZone in objMarket.lsZone:
if objZone.sCountry == sCountry:
sZoneList = sZoneList + objZone.sZoneID + ";"
dData[sCountry] = sZoneList
return dData
##### time slice #####
def getTSRepHourYear(instance, model):
''' get TS representing hours in a year '''
dData = {}
for objTS in instance.lsTimeSlice:
dData[objTS.sTSIndex] = objTS.iRepHoursInYear
return dData
def getTSRepHourDay(instance, model):
''' get TS representing hours in a day '''
dData = {}
for objTS in instance.lsTimeSlice:
dData[objTS.sTSIndex] = objTS.iRepHoursInDay
return dData
def getTSRepHourYear_CE(instance, model):
''' get TS representing hours in a year, for CE model '''
dData = {}
for objTS in instance.lsTimeSlice_CEP:
dData[objTS.sTSIndex] = objTS.iRepHoursInYear
return dData
def getTSRepHourDay_CE(instance, model):
''' get TS representing hours in a day, for CE model '''
dData = {}
for objTS in instance.lsTimeSlice_CEP:
dData[objTS.sTSIndex] = objTS.iRepHoursInDay
return dData
def getTSIndInDay(instance, model):
''' get the set of index of the TS in a day '''
dData = {}
for sDay_DY in model.setDay_DY:
TSIndlist = ""
for objTS in instance.lsTimeSlice:
if (objTS.sMonth + objTS.sDay) == sDay_DY:
TSIndlist = TSIndlist + objTS.sTSIndex + ";"
TSIndlist = TSIndlist[0:-1] # remove the last ";"
dData[sDay_DY] = TSIndlist
return dData
def getTSIndInDay_CE(instance, model):
''' get the set of index of the TS in a day, for CE model '''
dData = {}
for sDay_DY in model.setDay_DY:
TSIndlist = ""
for objTS in instance.lsTimeSlice_CEP:
if (objTS.sMonth + objTS.sDay) == sDay_DY:
TSIndlist = TSIndlist + objTS.sTSIndex + ";"
TSIndlist = TSIndlist[0:-1] # remove the last ";"
dData[sDay_DY] = TSIndlist
return dData
def getTSRepHourYear_Day(model, objDayTS):
''' get the TS representing hours in a year '''
dData = {}
for objTS in objDayTS.lsDiurnalTS:
dData[objTS.sTSIndex] = objTS.iRepHoursInYear
return dData
def getTSRepHourDay_Day(model, objDayTS):
''' get the TS representing hours in a day '''
dData = {}
for objTS in objDayTS.lsDiurnalTS:
dData[objTS.sTSIndex] = objTS.iRepHoursInDay
return dData
#----------------------------------------------------
# Transmission Parameters
#----------------------------------------------------
def getTransCapacity(model, objMarket, iYear):
''' get transmission capacity of terrestrial links '''
dData = {}
for sTrans in model.setTransLDZ_TRL:
for objTrans in objMarket.lsTrans:
if objTrans.sTransID == sTrans:
if iYear in objTrans.dicTransAccCap_YS:
dData[sTrans] = objTrans.dicTransAccCap_YS[iYear]
else:
dData[sTrans] = 0
break
return dData
def getTransCapacityOffs(model, objMarket, iYear):
''' get transmission capacity of offhsore links '''
dData = {}
for sTrans in model.setTransOFZ_TRF:
for objTrans in objMarket.lsTrans_off:
if objTrans.sTransID == sTrans:
if iYear in objTrans.dicTransAccCap_YS:
dData[sTrans] = objTrans.dicTransAccCap_YS[iYear]
else:
dData[sTrans] = 0
break
return dData
def getTransLoss(model, objMarket, ind_year):
''' get transmission loss of terrestrial links '''
dData = {}
for sTrans in model.setTransLDZ_TRL:
for objTrans in objMarket.lsTrans:
if objTrans.sTransID == sTrans:
if objTrans.fDistance > 600:
# HVDC 600km as break point
dData[sTrans] = (objTrans.fDistance / 1000 * objMarket.lsDCLineLoss[ind_year] / 100) \
+ (objMarket.lsDCConvLoss[ind_year] / 100)
else:
# line loss of HVAC lines
dData[sTrans] = objTrans.fDistance / 1000 * objMarket.lsACLineLoss[ind_year] / 100
break
return dData
def getTransLossOffs(model, objMarket, ind_year):
''' get transmission loss of offshore links '''
dData = {}
for sTrans in model.setTransOFZ_TRF:
for objTrans in objMarket.lsTrans_off:
if objTrans.sTransID == sTrans:
# assume all HCAV
dData[sTrans] = (objTrans.fDistance / 1000 * objMarket.lsDCLineLoss[ind_year] / 100) \
+ (objMarket.lsDCConvLoss[ind_year] / 100)
break
return dData
def getTransCost(model, objMarket, ind_year):
''' get transmission cost of terrestrial links '''
##### cost assumptions #####
HVAC_CAPEX = objMarket.lsACCapex[ind_year] # USD per kW km
HVAC_OPEX = objMarket.lsACOpex[ind_year] # USD per kW km
HVDC_CAPEX = objMarket.lsDCCapex[ind_year] # USD per kW km
HVDC_OPEX = objMarket.lsDCOpex[ind_year] # USD per kW km
HVDC_CAPEX_converter = objMarket.lsDCCapexConv[ind_year] # USD per kW
HVDC_OPEX_converter = objMarket.lsDCOpexConv[ind_year] # USD per kW
CRF = objMarket.lsCRF[ind_year] / 100 # lifetime 50 years, discount rate 5%
dData = {}
for sTrans in model.setTransLDZ_TRL:
for objTrans in objMarket.lsTrans:
if objTrans.sTransID == sTrans:
distance = objTrans.fDistance
if distance > 0:
CostPerMW = 0
if distance > 600: # HVDC 600km as break point
# annual cost per MW
CostPerMW = distance * (HVDC_CAPEX*CRF + HVDC_OPEX) * 1000
# converter cost per MW
CostPerMW = CostPerMW + ( (HVDC_CAPEX_converter*CRF + HVDC_OPEX_converter) * 1000 )
# change unit from USD per MW to M.USD per MW
CostPerMW = CostPerMW / 1000000
else: # HVAC
# annual cost per MW
CostPerMW = distance * (HVAC_CAPEX*CRF + HVAC_OPEX) * 1000
# change unit from USD per MW to M.USD per MW
CostPerMW = CostPerMW / 1000000
dData[sTrans] = CostPerMW
else:
dData[sTrans] = 9999
break
return dData
def getTransCostOffs(model, objMarket, ind_year):
''' get transmission cost of offshore links '''
##### cost assumptions #####
HVDC_CAPEX = objMarket.lsDCCapex[ind_year] # USD per kW km
HVDC_OPEX = objMarket.lsDCOpex[ind_year] # USD per kW km
HVDC_CAPEX_converter = objMarket.lsDCCapexConv[ind_year] # USD per kW
HVDC_OPEX_converter = objMarket.lsDCOpexConv[ind_year] # USD per kW
CRF = objMarket.lsCRF[ind_year] / 100 # lifetime 50 years, discount rate 5%
dData = {}
for sTrans in model.setTransOFZ_TRF:
for objTrans in objMarket.lsTrans_off:
if objTrans.sTransID == sTrans:
distance = objTrans.fDistance
if distance > 0:
CostPerMW = 0
# annual cost per MW
CostPerMW = distance * (HVDC_CAPEX*CRF + HVDC_OPEX) * 1000
# converter cost per MW
CostPerMW = CostPerMW + ( (HVDC_CAPEX_converter*CRF + HVDC_OPEX_converter) * 1000 )
# change unit from USD per MW to M.USD per MW
CostPerMW = CostPerMW / 1000000
dData[sTrans] = CostPerMW
else:
dData[sTrans] = 9999
break
return dData
|
python
|
from email import header
from multimethod import distance
import scipy
from scipy.signal import find_peaks as scipy_find
from core.preprocessing import read_data
from scipy.signal._peak_finding_utils import _local_maxima_1d
from scipy.signal._peak_finding import _arg_x_as_expected
from utils.visualize import see_spectrum
if __name__ == "__main__":
data = read_data("sample_data/sample04.csv", headers=0, delimineter=",")
print(data)
peaks = scipy_find(x=data.y, prominence=10)
properties = peaks[1]
peaks = peaks[0]
print(peaks)
print(properties)
see_spectrum(data)
|
python
|
# <auto-generated>
# This code was generated by the UnitCodeGenerator tool
#
# Changes to this file will be lost if the code is regenerated
# </auto-generated>
import unittest
import units.pressure.pascals
class TestPascalsMethods(unittest.TestCase):
def test_convert_known_pascals_to_atmospheres(self):
self.assertAlmostEqual(0.88823094, units.pressure.pascals.to_atmospheres(90000.0), places=1)
self.assertAlmostEqual(12.18422897, units.pressure.pascals.to_atmospheres(1234567.0), places=1)
self.assertAlmostEqual(2.01391562, units.pressure.pascals.to_atmospheres(204060.0), places=1)
def test_convert_known_pascals_to_bars(self):
self.assertAlmostEqual(0.1, units.pressure.pascals.to_bars(10000.0), places=1)
self.assertAlmostEqual(0.12345, units.pressure.pascals.to_bars(12345.0), places=1)
self.assertAlmostEqual(0.8, units.pressure.pascals.to_bars(80000.0), places=1)
def test_convert_known_pascals_to_torrs(self):
self.assertAlmostEqual(600.04935, units.pressure.pascals.to_torrs(80000.0), places=1)
self.assertAlmostEqual(9.255761, units.pressure.pascals.to_torrs(1234.0), places=1)
self.assertAlmostEqual(0.600049, units.pressure.pascals.to_torrs(80.0), places=1)
def test_convert_known_pascals_to_psi(self):
self.assertAlmostEqual(0.11603, units.pressure.pascals.to_psi(800.0), places=1)
self.assertAlmostEqual(1.257477, units.pressure.pascals.to_psi(8670.0), places=1)
self.assertAlmostEqual(0.145038, units.pressure.pascals.to_psi(1000.0), places=1)
if __name__ == '__main__':
unittest.main()
|
python
|
from binaryninja import *
from pathlib import Path
import hashlib
import os
import subprocess
class Decompiler(BackgroundTaskThread):
def __init__(self,file_name,current_path):
self.progress_banner = f"[Ghinja] Running the decompiler job ... Ghinja decompiler output not available until finished."
BackgroundTaskThread.__init__(self, "", True)
self.file_name = str(Path(file_name).name)
self.file_path = file_name
self.current_path = current_path
self.decompile_result_path = Path(self.current_path / "decomp_")
def run(self):
self.progress = f"[Ghinja] Running the decompiler job ... Ghinja decompiler output not available until finished."
with open(os.path.dirname(os.path.realpath(__file__)) + "/Decompile_TEMPLATE.java",'r') as decomp_file:
data = decomp_file.read()
with open(os.path.dirname(os.path.realpath(__file__)) + "/Gnidja/Gnidja.java",'w') as tmp_decomp_file:
tmp_decomp_file.write(data.replace("PLACEHOLDER_OUTPUT",str(self.decompile_result_path).replace("\\","\\\\")))
os.system(f"{Settings().get_string('ghinja.ghidra_install_path')} \"{str(self.current_path)}\" \"{self.file_name}\" -import \"{self.file_path}\" -scriptPath \"{os.path.dirname(os.path.realpath(__file__)) + '/Gnidja'}\" -postScript \"Gnidja.java\"")
#os.system(f"{Settings().get_string('ghinja.ghidra_install_path')} \"{str(self.current_path)}\" \"{self.file_name}\" -import \"{self.file_path}\" -postscript Decompile.java -scriptPath \"{os.path.dirname(os.path.realpath(__file__))}\"") #new
#os.system(f"{Settings().get_string('ghinja.ghidra_install_path')} \"{str(self.current_path)}\" \"{self.file_name}\" -import \"{self.file_path}\" -postscript \"{os.path.dirname(os.path.realpath(__file__)) + '/Decompile.java'}\"") #combi
#log_info(f"{Settings().get_string('ghinja.ghidra_install_path')} \"{str(self.current_path)}\" \"{self.file_name}\" -import \"{self.file_path}\" -postscript \"/home/c4t/.binaryninja/plugins/ghinja/Decompile.java\"")
os.remove(os.path.dirname(os.path.realpath(__file__)) + "/Gnidja/Gnidja.java")
|
python
|
# coding: utf8
from __future__ import unicode_literals
from ..norm_exceptions import BASE_NORMS
from ...attrs import NORM
from ...attrs import LIKE_NUM
from ...util import add_lookups
_stem_suffixes = [
["ो","े","ू","ु","ी","ि","ा"],
["कर","ाओ","िए","ाई","ाए","ने","नी","ना","ते","ीं","ती","ता","ाँ","ां","ों","ें"],
["ाकर","ाइए","ाईं","ाया","ेगी","ेगा","ोगी","ोगे","ाने","ाना","ाते","ाती","ाता","तीं","ाओं","ाएं","ुओं","ुएं","ुआं"],
["ाएगी","ाएगा","ाओगी","ाओगे","एंगी","ेंगी","एंगे","ेंगे","ूंगी","ूंगा","ातीं","नाओं","नाएं","ताओं","ताएं","ियाँ","ियों","ियां"],
["ाएंगी","ाएंगे","ाऊंगी","ाऊंगा","ाइयाँ","ाइयों","ाइयां"]
]
#reference 1:https://en.wikipedia.org/wiki/Indian_numbering_system
#reference 2: https://blogs.transparent.com/hindi/hindi-numbers-1-100/
_num_words = ['शून्य', 'एक', 'दो', 'तीन', 'चार', 'पांच', 'छह', 'सात', 'आठ', 'नौ', 'दस',
'ग्यारह', 'बारह', 'तेरह', 'चौदह', 'पंद्रह', 'सोलह', 'सत्रह', 'अठारह', 'उन्नीस',
'बीस', 'तीस', 'चालीस', 'पचास', 'साठ', 'सत्तर', 'अस्सी', 'नब्बे', 'सौ', 'हज़ार',
'लाख', 'करोड़', 'अरब', 'खरब']
def norm(string):
# normalise base exceptions, e.g. punctuation or currency symbols
if string in BASE_NORMS:
return BASE_NORMS[string]
# set stem word as norm, if available, adapted from:
# http://computing.open.ac.uk/Sites/EACLSouthAsia/Papers/p6-Ramanathan.pdf
# http://research.variancia.com/hindi_stemmer/
# https://github.com/taranjeet/hindi-tokenizer/blob/master/HindiTokenizer.py#L142
for suffix_group in reversed(_stem_suffixes):
length = len(suffix_group[0])
if len(string) <= length:
break
for suffix in suffix_group:
if string.endswith(suffix):
return string[:-length]
return string
def like_num(text):
text = text.replace(',', '').replace('.', '')
if text.isdigit():
return True
if text.count('/') == 1:
num, denom = text.split('/')
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
return False
LEX_ATTRS = {
NORM: norm,
LIKE_NUM: like_num
}
|
python
|
from rpasdt.common.enums import StringChoiceEnum
class NodeAttributeEnum(StringChoiceEnum):
"""Available node attributes."""
COLOR = "COLOR"
SIZE = "SIZE"
EXTRA_LABEL = "EXTRA_LABEL"
LABEL = "LABEL"
SOURCE = "SOURCE"
class DistanceMeasureOptionEnum(StringChoiceEnum):
pass
NETWORK_OPTIONS = {
"bridge": ("Bridges", "Generate all bridges in a graph."),
"cycle": (
"Simple cycles",
"Find simple cycles (elementary circuits) of a directed graph.",
),
"degree_assortativity": (
"Degree assortativity",
"Compute degree assortativity of graph.",
),
"average_neighbor_degree": (
"Average neighbor degree",
"Returns the average degree of the neighborhood of each node.",
),
"k_nearest_neighbors": (
"K-nearest neighbors",
"Compute the average degree connectivity of graph.",
),
"average_clustering": (
"Average clustering",
"Compute the average clustering coefficient.",
),
}
|
python
|
"""ProtocolEngine tests module."""
|
python
|
# -*- coding: utf-8 -*-
import logging
import requests
from bs4 import BeautifulSoup
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('wb')
class Client:
def __init__(self):
self.session = requests.Session()
self.session.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36',
'Accept-Language': 'ru',
}
def load_page(self):
url = 'https://www.wildberries.ru/catalog/aksessuary/aksessuary-dlya-volos'
res = self.session.get(url=url)
res.raise_for_status()
return res.text
def parse_page(self, text: str):
soup = BeautifulSoup(text, 'lxml')
container = soup.select('div.dtList.i-dtList.j-card-item')
for block in container:
self.parse_block(block=block)
def parse_block(self, block):
logger.info(block)
logger.info('=' * 100)
def run(self):
text = self.load_page()
self.parse_page(text=text)
if __name__ == "__main__":
parser = Client()
parser.run()
|
python
|
import json
import os
from pathlib import Path
from time import sleep
from tqdm import tqdm
from ..graph.models import Concept, Work
from ..utils import get_logger
from . import get_concepts_es_client
from .format import (
format_concept_for_elasticsearch,
format_story_for_elasticsearch,
format_work_for_elasticsearch,
)
data_path = Path("/data")
mappings_path = data_path / "mappings"
settings_path = data_path / "settings"
log = get_logger(__name__)
def create_index(client, name, mappings, settings):
log.info(f"Creating index: {name}")
client.indices.delete(index=name, ignore=404)
client.indices.create(
index=name,
mappings=mappings,
settings=settings,
)
def update_mapping(client, index, mapping):
log.info(f"Updating mapping for index: {index}")
client.indices.put_mapping(
index=index,
body=mapping,
ignore=400,
)
response = client.update_by_query(
index=index,
body={"query": {"match_all": {}}},
wait_for_completion=False,
)
task_id = response["task"]
log.info(f"Update Task ID: {task_id}")
while task_in_progress(client, task_id):
log.info(f"Waiting for update to complete")
sleep(5)
log.info(f"Update complete")
def task_in_progress(client, task_id):
task_status = client.tasks.get(task_id)
if task_status["completed"]:
return False
else:
return True
def index_stories(start_index=0):
concepts_es_client = get_concepts_es_client()
log.info(
f"Creating the stories index: {os.environ['ELASTIC_STORIES_INDEX']}"
)
with open(mappings_path / "stories.json", "r") as f:
stories_mappings = json.load(f)
with open(settings_path / "stories.json", "r") as f:
stories_settings = json.load(f)
if not start_index:
concepts_es_client.indices.delete(
index=os.environ["ELASTIC_STORIES_INDEX"], ignore=404
)
concepts_es_client.indices.create(
index=os.environ["ELASTIC_STORIES_INDEX"],
mappings=stories_mappings,
settings=stories_settings,
)
log.info("Populating the stories index")
progress_bar = tqdm(
Work.nodes.filter(type="story"),
total=len(Work.nodes.filter(type="story")),
unit="stories",
)
for story in progress_bar:
if progress_bar.n < start_index:
progress_bar.set_description(f"Skipping story {story.uid}")
else:
progress_bar.set_description(f"Indexing story {story.uid}")
concepts_es_client.index(
index=os.environ["ELASTIC_STORIES_INDEX"],
id=story.uid,
document=format_story_for_elasticsearch(story),
)
def index_works(start_index=0):
concepts_es_client = get_concepts_es_client()
log.info(f"Creating the works index: {os.environ['ELASTIC_WORKS_INDEX']}")
with open(mappings_path / "works.json", "r") as f:
works_mappings = json.load(f)
with open(settings_path / "works.json", "r") as f:
works_settings = json.load(f)
if not start_index:
concepts_es_client.indices.delete(
index=os.environ["ELASTIC_WORKS_INDEX"], ignore=404
)
concepts_es_client.indices.create(
index=os.environ["ELASTIC_WORKS_INDEX"],
mappings=works_mappings,
settings=works_settings,
)
log.info("Populating the works index")
progress_bar = tqdm(
Work.nodes.filter(type="work"),
total=len(Work.nodes.filter(type="work")),
unit="works",
)
for work in progress_bar:
if progress_bar.n < start_index:
progress_bar.set_description(f"Skipping work {work.uid}")
else:
progress_bar.set_description(f"Indexing work {work.uid}")
concepts_es_client.index(
index=os.environ["ELASTIC_WORKS_INDEX"],
id=work.wellcome_id,
document=format_work_for_elasticsearch(work),
)
def index_concepts(start_index=0, create=False):
concepts_es_client = get_concepts_es_client()
concepts_index_name = os.environ["ELASTIC_CONCEPTS_INDEX"]
if create:
with open(mappings_path / "concepts.json", "r") as f:
concepts_mappings = json.load(f)
with open(settings_path / "concepts.json", "r") as f:
concepts_settings = json.load(f)
create_index(
client=concepts_es_client,
name=concepts_index_name,
mappings=concepts_mappings,
settings=concepts_settings,
)
log.info("Populating the concepts index")
progress_bar = tqdm(
Concept.nodes.filter(type="concept"),
total=len(Concept.nodes.filter(type="concept")),
unit="concepts",
)
for concept in progress_bar:
if progress_bar.n < start_index:
progress_bar.set_description(f"Skipping concept {concept.uid}")
else:
progress_bar.set_description(f"Indexing concept {concept.uid}")
concepts_es_client.index(
index=concepts_index_name,
id=concept.uid,
document=format_concept_for_elasticsearch(concept),
)
def index_people(start_index=0):
concepts_es_client = get_concepts_es_client()
concepts_index_name = os.environ["ELASTIC_CONCEPTS_INDEX"]
log.info("Populating the concepts index")
progress_bar = tqdm(
Concept.nodes.filter(type="person"),
total=len(Concept.nodes.filter(type="person")),
unit="people",
)
for person in progress_bar:
if progress_bar.n < start_index:
progress_bar.set_description(f"Skipping person {person.uid}")
else:
progress_bar.set_description(f"Indexing person {person.uid}")
concepts_es_client.index(
index=concepts_index_name,
id=person.uid,
document=format_concept_for_elasticsearch(person),
)
|
python
|
# ex_sin.py
# author: C.F.Kwok
# date: 2018-1-5
from simp_py import tft
from math import sin, radians
import time
class SIN:
global tft
def __init__(self):
self.init_plot()
tft.tft.clear()
def run(self):
global sin, radians
ang=0
for x in range(320):
v = int(round((sin(radians(ang * 4 )) + 1.05) * 120))
self.plot(x,v)
ang +=1
if ang>=90:
ang=0
time.sleep(0.02)
def init_plot(self):
self.tscale = 0.02
self.vmax = 255
self.ymax = 200
self.xmax = 319
self.yscale = self.ymax / self.vmax
def plot(self,x,v):
y = self.ymax - round(v * self.yscale)
tft.tft.pixel(x, y,0xffff00)
self.show_value(v,self.vmax)
def show_value(self,v,vmax):
tft.tft.text(10,210,'%s' % v)
tft.tft.text(100,210,'%s' % vmax)
if __name__=='__main__':
t = SIN()
t.run()
|
python
|
from django import forms
class ModelMultiValueWidget(forms.MultiWidget):
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model')
self.labels = kwargs.pop('labels', [None])
self.field_names = kwargs.pop('field_names', [None])
super(ModelMultiValueWidget, self).__init__(*args, **kwargs)
def decompress(self, value):
if value:
obj = self.model.objects.get(pk=value)
return [getattr(obj, label, None) for label in self.field_names]
return [None]*len(self.labels)
def format_output(self, rendered_widgets):
output = ''.join(['<p><label for="id_{model_name}_{i}">{label}:</label>{widget}</p>'.format(i=i,
model_name=self.model._meta.verbose_name,
label=label if label else '',
widget=widget)
for i, (widget, label) in enumerate(zip(rendered_widgets, self.labels))])
return output
class SelectModelMultiValueWidget(ModelMultiValueWidget):
def __init__(self, select=None, *args, **kwargs):
self.select = select
widgets_in = kwargs.pop('widgets', list())
widgets = [self.select.widget] + widgets_in
labels_in = kwargs.pop('labels', list())
labels = [''] + labels_in
field_names_in = kwargs.pop('field_names', list())
field_names = [''] + field_names_in
super(SelectModelMultiValueWidget, self).__init__(field_names=field_names, labels=labels, widgets=widgets, *args, **kwargs)
def decompress(self, value):
decompressed = super(SelectModelMultiValueWidget, self).decompress(None)
return [value] + decompressed
def format_output(self, rendered_widgets):
return super(SelectModelMultiValueWidget, self).format_output(rendered_widgets)
|
python
|
import atexit
import subprocess
from multimedia.constants import AUDIO_OUTPUT_ANALOG, KEY_DOWN_ARROW, KEY_LEFT_ARROW, KEY_RIGHT_ARROW, KEY_UP_ARROW
from multimedia.functions import exit_process_send_keystroke, popen_and_wait_for_exit, send_keytroke_to_process
from multimedia.playerhandler import PlayerHandler
class OmxPlayerHandler(PlayerHandler):
"""
Interface for omxplayer which is a command line application installed on Raspberry Pi by default that uses Linux
Frame Buffer to display video files. It is controlled by sending spoofed keystrokes to the standard input.
"""
####################################################################################################################
# Constructor.
####################################################################################################################
def __init__(self, binary_path=None):
### Private attributes.
# The path of the executable.
if binary_path is None:
self._binary = '/usr/bin/omxplayer'
else:
self._binary = binary_path
# A handle to the running player process.
self._current_process = None
####################################################################################################################
# "PlayerHandler" implementation.
####################################################################################################################
def faster(self):
return send_keytroke_to_process(self._current_process, '2')
def fast_forward(self):
return send_keytroke_to_process(self._current_process, KEY_UP_ARROW)
def fast_rewind(self):
return send_keytroke_to_process(self._current_process, KEY_DOWN_ARROW)
def forward(self):
return send_keytroke_to_process(self._current_process, KEY_RIGHT_ARROW)
def pause(self):
"""
Simulates keypress used for pausing omxplayer.
"""
return send_keytroke_to_process(self._current_process, ' ')
def play(self, audio_output, file_to_play, subtitle_to_use=None, on_exit=None):
"""
Start playing the specified file using the given audio output and the optionally specified subtitle.
"""
# Stop previous playing.
self.stop()
# Prepare command.
program = self._prepare_process_arguments(audio_output, file_to_play, subtitle_to_use)
# Execute command.
if on_exit is None:
self._current_process = subprocess.Popen(
program,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
popen_and_wait_for_exit(True, on_exit, self._set_process, program)
atexit.register(self.stop)
return True
def rewind(self):
return send_keytroke_to_process(self._current_process, KEY_LEFT_ARROW)
def slower(self):
return send_keytroke_to_process(self._current_process, '1')
def stop(self):
"""
Simulates keypress used for quitting omxplayer, then kills the process if it is still exists.
"""
result = exit_process_send_keystroke(self._current_process, 'q')
self._current_process = None
return result
def volume_down(self):
return send_keytroke_to_process(self._current_process, '-')
def volume_up(self):
return send_keytroke_to_process(self._current_process, '+')
####################################################################################################################
# Auxiliary methods.
####################################################################################################################
def _prepare_process_arguments(self, audio_output, file_to_play, subtitle_to_use=None):
# Construct command to execute.
program = [self._binary, file_to_play]
# Use different output if needed.
if audio_output == AUDIO_OUTPUT_ANALOG:
program.append('-o')
program.append('local')
# The else branch is somewhat redundant, because this is the default behavior.
else:
program.append('-o')
program.append('hdmi')
# Append the subtitle-related part to the command if necessary.
if subtitle_to_use is not None:
program.append('--subtitles')
program.append(subtitle_to_use)
return program
def _set_process(self, process):
self._current_process = process
|
python
|
# -*- coding: utf-8 -
#
# This file is part of tproxy released under the MIT license.
# See the NOTICE for more information.
version_info = (0, 5, 4)
__version__ = ".".join(map(str, version_info))
|
python
|
''' Calculates the Frechet Inception Distance (FID) to evalulate GANs or
other image generating functions.
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectivly.
'''
import numpy as np
import os
import tensorflow as tf
from scipy import linalg
import pathlib
import urllib
import warnings
from tqdm import tqdm
from edflow.iterators.batches import make_batches
from edflow.data.util import adjust_support
from edflow.util import retrieve
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
def create_inception_graph(pth):
"""Creates a graph from saved GraphDef file."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile( pth, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString( f.read())
_ = tf.import_graph_def( graph_def, name='FID_Inception_Net')
# code for handling inception net derived from
# https://github.com/openai/improved-gan/blob/master/inception_score/model.py
def _get_inception_layer(sess):
"""Prepares inception net for batched usage and returns pool_3 layer. """
layername = 'FID_Inception_Net/pool_3:0'
pool3 = sess.graph.get_tensor_by_name(layername)
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims != []:
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.__dict__['_shape_val'] = tf.TensorShape(new_shape)
return pool3
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma1: The covariance matrix over activations of the pool_3 layer for
generated samples.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths"
assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
def calculate_activation_statistics(images, sess, batch_size=50, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 255.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations(images, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
#------------------
# The following methods are implemented to obtain a batched version of the activations.
# This has the advantage to reduce memory requirements, at the cost of slightly reduced efficiency.
# - Pyrestone
#------------------
def get_activations_from_dset(dset, imsupport, sess, batch_size=50, imkey='image', verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- dset : DatasetMixin which contains the images.
-- imsupport : Support of images. One of '-1->1', '0->1' or '0->255'
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- imkey : Key at which the images can be found in each example.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
inception_layer = _get_inception_layer(sess)
d0 = len(dset)
if batch_size > d0:
print("warning: batch size is bigger than the data size. setting batch size to data size")
batch_size = d0
batches = make_batches(dset, batch_size, shuffle=False)
n_batches = len(batches)
n_used_imgs = n_batches*batch_size
pred_arr = np.empty((n_used_imgs,2048))
print('d0', d0)
print('n_batches', n_batches)
print('n_ui', n_used_imgs)
for i, batch in enumerate(tqdm(batches, desc='FID')):
if i >= n_batches:
break
if verbose:
print("\rPropagating batch %d/%d" % (i+1, n_batches), end="", flush=True)
start = i*batch_size
end = start + batch_size
images = retrieve(batch, imkey)
images = adjust_support(np.array(images),
future_support='0->255',
current_support=imsupport,
clip=True)
if len(images.shape) == 3:
images = images[:,:,:,None]
images = np.tile(images, [1,1,1,3])
elif images.shape[-1] == 1:
images = np.tile(images, [1, 1, 1, 3])
images = images.astype(np.float32)[..., :3]
if len(pred_arr[start:end]) == 0:
continue
pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': images})
pred_arr[start:end] = pred.reshape(batch_size,-1)
del batch # clean up memory
batches.finalize()
if verbose:
print(" done")
return pred_arr
def calculate_activation_statistics_from_dset(dset, imsupport, sess, batch_size=50, imkey='image', verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- dset : DatasetMixin which contains the images.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- imkey : Key at which the images can be found in each example.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations_from_dset(dset, imsupport, sess, batch_size, imkey, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# The following functions aren't needed for calculating the FID
# they're just here to make this module work as a stand-alone script
# for calculating FID scores
#-------------------------------------------------------------------------------
def check_or_download_inception(inception_path):
''' Checks if the path to the inception file is valid, or downloads
the file if it is not present. '''
INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
if inception_path is None:
inception_path = '/tmp'
inception_path = pathlib.Path(inception_path)
model_file = inception_path / 'classify_image_graph_def.pb'
if not model_file.exists():
print("Downloading Inception model")
from urllib import request
import tarfile
fn, _ = request.urlretrieve(INCEPTION_URL)
with tarfile.open(fn, mode='r') as f:
f.extract('classify_image_graph_def.pb', str(model_file.parent))
return str(model_file)
def calculate_fid_given_dsets(dsets, imsupports, imkeys, inception_path,
batch_size=50, save_data_in_path=None):
''' Calculates the FID of two paths. '''
inception_path = check_or_download_inception(inception_path)
create_inception_graph(str(inception_path))
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
sess.run(tf.global_variables_initializer())
m1, s1 = calculate_activation_statistics_from_dset(
dsets[0], imsupports[0], sess, batch_size, imkeys[0]
)
if save_data_in_path is not None:
print('\nSaved input data statistics to {}'.format(save_data_in_path))
np.savez(save_data_in_path, mu=m1, sigma=s1)
m2, s2 = calculate_activation_statistics_from_dset(
dsets[1], imsupports[1], sess, batch_size, imkeys[1])
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
def calculate_fid_given_npz_and_dset(npz_path, dsets, imsupports, imkeys, inception_path,
batch_size=50):
''' Calculates the FID where data statistics is given in npz and evaluation in dataset. '''
inception_path = check_or_download_inception(inception_path)
create_inception_graph(str(inception_path))
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
sess.run(tf.global_variables_initializer())
assert npz_path.endswith('.npz')
f = np.load(npz_path)
m1, s1 = f['mu'][:], f['sigma'][:]
f.close()
m2, s2 = calculate_activation_statistics_from_dset(dsets[1],
imsupports[1], sess, batch_size, imkeys[1])
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
def calculate_fid_from_npz_if_available(npz_path, dsets, imsupports, imkeys, inception_path,
batch_size=50):
try:
# calculate from npz
print('\nFound a .npz file, loading from it...')
fid_value = calculate_fid_given_npz_and_dset(npz_path, dsets,
imsupports, imkeys, inception_path, batch_size=batch_size)
except FileNotFoundError:
# if not possible to calculate from npz, calc from input data and save to npz
os.makedirs(os.path.split(npz_path)[0], exist_ok=True)
fid_value = calculate_fid_given_dsets(dsets, imsupports, imkeys, inception_path, batch_size=batch_size,
save_data_in_path=npz_path[:-4])
print('\nNo npz file found, calculating statistics from data...')
return fid_value
def fid(root, data_in, data_out, config,
im_in_key='image', im_out_key='image',
im_in_support=None, im_out_support=None,
name='fid'):
incept_p = os.environ.get(
'INCEPTION_PATH',
'/export/scratch/jhaux/Models/inception_fid'
)
inception_path = retrieve(config, 'fid/inception_path', default=incept_p)
batch_size = retrieve(config, 'fid/batch_size', default=50)
pre_calc_stat_path = retrieve(config, 'fid_stats/pre_calc_stat_path', default='none')
fid_iterations = retrieve(config, 'fid/fid_iterations', default=1)
save_dir = os.path.join(root, name)
os.makedirs(save_dir, exist_ok=True)
fids = []
for ii in range(fid_iterations):
if pre_calc_stat_path is not 'none':
print('\nLoading pre-calculated statistics from {} if available.'.format(pre_calc_stat_path))
fid_value = calculate_fid_from_npz_if_available(pre_calc_stat_path, [data_in, data_out],
[im_in_support, im_out_support],
[im_in_key, im_out_key],
inception_path,
batch_size)
else:
print('\nNo path of pre-calculated statistics specified. Falling back to default behavior.')
fid_value = calculate_fid_given_dsets(
[data_in, data_out],
[im_in_support, im_out_support],
[im_in_key, im_out_key],
inception_path,
batch_size,
save_data_in_path=os.path.join(save_dir, 'pre_calc_stats'))
fids.append(fid_value)
if 'model_output.csv' in root:
root = root[:-len('model_output.csv')]
savename_score = os.path.join(save_dir, 'score.txt')
savename_std = os.path.join(save_dir, 'std.txt')
fid_score = np.array(fids).mean()
fid_std = np.array(fids).std()
with open(savename_score, 'w+') as f:
f.write(str(fid_score))
with open(savename_std, 'w+') as f:
f.write(str(fid_std))
print('\nFID SCORE: {:.2f} +/- {:.2f}'.format(fid_score, fid_std))
return {"scalars": {"fid": fid_score}}
if __name__ == "__main__":
from edflow.debug import DebugDataset
from edflow.data.dataset import ProcessedDataset
D1 = DebugDataset(size=100)
D2 = DebugDataset(size=100)
P = lambda *args, **kwargs: {'image': np.ones([256, 256, 3])}
D1 = ProcessedDataset(D1, P)
D2 = ProcessedDataset(D2, P)
print(D1[0])
fid('.', D1, D2, {})
|
python
|
# Copyright (c) 2014 Scality
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# If we are running in the context of Swift, all our exceptions must
# inherit from DiskFileError because that's what the calling code expects
try:
import swift.common.exceptions
BASE_EXCEPTION = swift.common.exceptions.DiskFileError
except ImportError:
BASE_EXCEPTION = Exception
class SproxydException(BASE_EXCEPTION):
'''Base Exception for this library.'''
class SproxydHTTPException(SproxydException):
def __init__(self, msg, url='', http_status=0, http_reason=''):
super(SproxydHTTPException, self).__init__(msg)
self.msg = msg
self.url = url
self.http_status = http_status
self.http_reason = http_reason
def __str__(self):
suffix = filter(bool, [
self.url if self.url else None,
' %d' % self.http_status if self.http_status else None,
' %s' % self.http_reason if self.http_reason else None])
if not suffix:
return self.msg
else:
return '%s %s' % (self.msg, ''.join(suffix))
def __repr__(self):
args = ', '.join('%s=%r' % arg for arg in [
('msg', self.msg),
('url', self.url),
('http_status', self.http_status),
('http_reason', self.http_reason)])
return 'SproxydException(%s)' % args
class SproxydConfException(SproxydException):
'''Exception raised when an invalid Sproxyd conf is detected.'''
class InvariantViolation(RuntimeError):
'''Exception raised when some invariant is violated
If this ever occurs at runtime, something is very wrong.
'''
|
python
|
from .convunet import unet
from .dilatedunet import dilated_unet
from .dilateddensenet import dilated_densenet, dilated_densenet2, dilated_densenet3
|
python
|
import mlflow.pyfunc
class Model(mlflow.pyfunc.PythonModel):
"""
Abstract class representing an MLFlow model.
Methods:
fit
predict
validate
register
download
Attrs:
"""
def __init__():
self.base_model = init_BaseModel()
def fit():
def predict():
def validate():
def register():
def download():
|
python
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class TeleBruxellesIE(InfoExtractor):
_VALID_URL = (
r"https?://(?:www\.)?(?:telebruxelles|bx1)\.be/(?:[^/]+/)*(?P<id>[^/#?]+)"
)
_TESTS = [
{
"url": "http://bx1.be/news/que-risque-lauteur-dune-fausse-alerte-a-la-bombe/",
"md5": "a2a67a5b1c3e8c9d33109b902f474fd9",
"info_dict": {
"id": "158856",
"display_id": "que-risque-lauteur-dune-fausse-alerte-a-la-bombe",
"ext": "mp4",
"title": "Que risque l’auteur d’une fausse alerte à la bombe ?",
"description": "md5:3cf8df235d44ebc5426373050840e466",
},
},
{
"url": "http://bx1.be/sport/futsal-schaerbeek-sincline-5-3-a-thulin/",
"md5": "dfe07ecc9c153ceba8582ac912687675",
"info_dict": {
"id": "158433",
"display_id": "futsal-schaerbeek-sincline-5-3-a-thulin",
"ext": "mp4",
"title": "Futsal : Schaerbeek s’incline 5-3 à Thulin",
"description": "md5:fd013f1488d5e2dceb9cebe39e2d569b",
},
},
{
"url": "http://bx1.be/emission/bxenf1-gastronomie/",
"only_matching": True,
},
{
"url": "https://bx1.be/berchem-sainte-agathe/personnel-carrefour-de-berchem-sainte-agathe-inquiet/",
"only_matching": True,
},
{
"url": "https://bx1.be/dernier-jt/",
"only_matching": True,
},
{
# live stream
"url": "https://bx1.be/lives/direct-tv/",
"only_matching": True,
},
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
article_id = self._html_search_regex(
r'<article[^>]+\bid=["\']post-(\d+)', webpage, "article ID", default=None
)
title = self._html_search_regex(
r"<h1[^>]*>(.+?)</h1>", webpage, "title", default=None
) or self._og_search_title(webpage)
description = self._og_search_description(webpage, default=None)
rtmp_url = self._html_search_regex(
r'file["\']?\s*:\s*"(r(?:tm|mt)ps?://[^/]+/(?:vod/mp4:"\s*\+\s*"[^"]+"\s*\+\s*"\.mp4|stream/live))"',
webpage,
"RTMP url",
)
# Yes, they have a typo in scheme name for live stream URLs (e.g.
# https://bx1.be/lives/direct-tv/)
rtmp_url = re.sub(r"^rmtp", "rtmp", rtmp_url)
rtmp_url = re.sub(r'"\s*\+\s*"', "", rtmp_url)
formats = self._extract_wowza_formats(rtmp_url, article_id or display_id)
self._sort_formats(formats)
is_live = "stream/live" in rtmp_url
return {
"id": article_id or display_id,
"display_id": display_id,
"title": self._live_title(title) if is_live else title,
"description": description,
"formats": formats,
"is_live": is_live,
}
|
python
|
from setuptools import setup, find_packages
import sys
import os.path
import numpy as np
# Must be one line or PyPI will cut it off
DESC = ("A colormap tool")
LONG_DESC = open("README.rst").read()
setup(
name="viscm",
version="0.9",
description=DESC,
long_description=LONG_DESC,
author="Nathaniel J. Smith, Stefan van der Walt",
author_email="[email protected], [email protected]",
url="https://github.com/bids/viscm",
license="MIT",
classifiers =
[ "Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
packages=find_packages(),
install_requires=["numpy", "matplotlib", "colorspacious"],
package_data={'viscm': ['examples/*']},
)
|
python
|
# -*- coding: utf-8 -*-
import llbc
class pyllbcProperty(object):
"""
pyllbc property class encapsulation.
property can read & write .cfg format file.
file format:
path.to.name = value # The comments
path.to.anotherName = \#\#\# anotherValue \#\#\# # The comments too
Property class can parse or serialize this format's file.
"""
def __init__(self, prop_file=''):
self._c_obj = llbc.inl.Property_New(prop_file)
def __del__(self):
llbc.inl.Property_Delete(self._c_obj)
def from_content(self, content):
"""
Load properties from string content.
:param content: the string content.
"""
llbc.inl.Property_FromContent(self._c_obj, content)
def from_file(self, prop_file):
"""
Load properties from specifics property file.
:param prop_file: the property file path.
"""
llbc.inl.Property_FromFile(self._c_obj, prop_file)
def to_content(self):
"""
Serialize the properties as string.
:return: the serialized properties' string representation.
"""
return llbc.inl.Property_ToContent(self._c_obj)
def to_file(self, prop_file):
"""
Serialize the properties to file.
:param prop_file: the property file path.
"""
llbc.inl.Property_ToFile(self._c_obj, prop_file)
def getvalue(self, name='', default=None, as_type=str):
"""
Get property value by property name.
examples:
db_host = prop.getvalue("Server.DB.Host", "127.0.0.1")
db_port = prop.getvalue("Server.DB.Port", default=3306, as_type=int)
client_host = prop.get_property("Server.Client.Host").getvalue();
client_port = prop.get_property("Server.Client.Port").getvalue(as_type=int);
Note:
If caller not pass the 'name' parameter or pass name parameter as '', will return
self property's value, if this property is leaf-property will return real value,
otherwise will return default value.
If name format error, will raise llbc.error exception.
:param name: the property, if pass a error format name, will raise llbc.error exception.
:param default: default error when could not found specific name's property value, will
return this default. specially, if the default is None, the return value
is "as_type' is means return as_type().
:param as_type: as type, Property class using str type to store all property values, if
not specific your type, will always return str type value.
:return: the property value.
"""
val = llbc.inl.Property_GetValue(self._c_obj, name)
if val is None:
return as_type() if default is None else default
else:
return as_type(val)
def setvalue(self, name, value, comments=""):
"""
Set property value.
:param name: the property name.
:param value: the property value, will convert str type to store.
:param comments: the comments, default is empty.
"""
llbc.inl.Property_SetValue(self._c_obj, name, value, comments)
def get_comments(self, name=''):
"""
Get specific property's comments, if name is '', return self property's comments.
:param name: property name.
:return: the property comments.
"""
return llbc.inl.Property_GetComments(self._c_obj, name)
def set_comments(self, name, comments):
"""
Set specific property's comments, if name is '', return self property's comments.
:param name: property name.
:param comments: property comments.
:return: None.
"""
llbc.inl.Property_SetComments(self._c_obj, name, comments)
def get_property(self, name):
"""
Get the property.
"""
return llbc.inl.Property_GetProperty(self._c_obj, name)
def get_property_count(self):
"""
Get property count.
:return: the property count.
"""
return llbc.inl.Property_GetPropertyCount(self._c_obj)
def get_property_names(self, nest=False):
"""
Get property names.
:param nest: get all left-properties names or not, default is False.
:return: the property names.
"""
return llbc.inl.Property_GetPropertyNames(self._c_obj, nest)
def has_property(self, name):
"""
Check this property has specific name's property or not.
:param name: the property name.
:return: return True if has specific name's property, otherwise return False.
"""
return llbc.inl.Property_HasProperty(self._c_obj, name)
def remove_property(self, name, remove_all=True):
"""
Remoev specific name's property.
:param name: the property name.
:param remove_all: remove all flag.
"""
llbc.inl.Property_RemoveProperty(self._c_obj, name, remove_all)
def remove_all_properties(self):
"""
Remove all properties.
"""
llbc.inl.Property_RemoveAllProperties(self._c_obj)
def __getattr__(self, key):
return llbc.inl.Property_GetProperty(self._c_obj, key)
def __int__(self):
return self.getvalue('', as_type=int)
def __long__(self):
return self.getvalue('', as_type=long)
def __float__(self):
return self.getvalue('', as_type=float)
def __str__(self):
return self.getvalue('', as_type=str)
def __nonzero__(self):
val = self.getvalue('', as_type=str)
val_len = len(val)
if val_len == 0:
return False
elif val_len == 4 and val.lower() == 'true':
return True
try:
float_val = float(val)
except Exception, e:
return False
else:
return float_val != 0.0
def __kvdict(self):
return dict([(key, self.get_property(key)) for key in self.get_property_names(nest=True)])
def iterkeys(self):
return self.__kvdict().iterkeys()
def itervalues(self):
return self.__kvdict().itervalues()
def iteritems(self):
return self.__kvdict().iteritems()
def __iter__(self):
return self.__kvdict().__iter__()
llbc.Property = pyllbcProperty
|
python
|
# -*- coding: utf-8 -*-
"""
@author: Fredrik Wahlberg <[email protected]>
"""
import numpy as np
import random
from sklearn.base import BaseEstimator, ClassifierMixin
from scipy.optimize import fmin_l_bfgs_b
from sklearn.gaussian_process.gpc import _BinaryGaussianProcessClassifierLaplace as BinaryGPC
from sklearn.gaussian_process.kernels import Matern, RBF, ConstantKernel as C
__all__ = ['SharedKernelClassifier']
class SharedKernelClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, n_iter=100, kernel='rbf', ard=True, ardinit=True,
n_restarts=0, model_batch_size=None, verbose=False):
# Check and store parameters
assert n_iter > 0
assert n_restarts >= 0
assert kernel in ['rbf', 'matern52', 'matern32']
assert type(ard) is bool
self.n_iter = n_iter
self.n_restarts = n_restarts
self.kernel = kernel
self.ard = ard
self.ardinit = ardinit
self.verbose = verbose
self.model_batch_size = model_batch_size
# Container for the sub models
self.models_ = dict()
# Stores likelihoods of optimizations
self.convergence_ = list()
@property
def classes_(self):
return list(self.models_.keys())
@property
def log_likelihood_(self):
likelihood = list()
for m in self.models_.values():
likelihood.append(m.log_marginal_likelihood())
return np.mean(likelihood)
def _kernel_factory(self, X, theta):
"""Factory for creating a kernel"""
n_samples, n_features = X.shape
if self.ard:
lengthscale = np.ones(n_features)
else:
lengthscale = 1.0
if self.kernel == 'rbf':
k = C(1.0) * RBF(length_scale=lengthscale)
elif self.kernel == 'matern32':
k = C(1.0) * Matern(nu=1.5, length_scale=lengthscale)
elif self.kernel == 'matern52':
k = C(1.0) * Matern(nu=2.5, length_scale=lengthscale)
else:
raise RuntimeError("Unknown kernel")
if theta is not None:
theta = np.asarray(theta).copy()
assert theta.shape == k.theta.shape
k.theta = theta
return k
def _estimator_factory(self, X, y, theta):
"""Factory for creating a binary estimator"""
k = self._kernel_factory(X, theta)
estimator = BinaryGPC(kernel=k, optimizer=None, copy_X_train=False)
# copy_X_train=False saves memory by not copying the feature data
# optimizer=None only initializes the model without training
estimator.fit(X, y)
return estimator
def _oneVsAllSplit(self, y):
"""Perform one-vs-all binary vector encoding"""
one_vs_all_y = dict()
for c in np.unique(y):
one_vs_all_y[c] = np.vstack(np.asarray(y == c, dtype=np.int)).ravel()
return one_vs_all_y
def _init_sub_models(self, X, y, theta=None):
# Encode y as one-vs-all binary vectors
one_vs_all_y = self._oneVsAllSplit(y)
# Add or replace models for classes in y
for c in one_vs_all_y.keys():
self.models_[c] = self._estimator_factory(X, one_vs_all_y[c], theta)
def fit(self, X, y):
# Save a reference to the training data
self._X = X
# self._y = y
# Initialize the models
self._init_sub_models(X, y)
assert self.model_batch_size is None or self.model_batch_size <= len(self.models_.keys())
assert len(self.classes_) > 0
# Run optimization with restarts
for restart in range(1 + self.n_restarts):
if restart>0 and self.verbose:
print("restarting optimization")
# Randomize initial hyperparameters
k = self._kernel_factory(X, theta=None)
x0 = np.random.uniform(
low=k.bounds[:, 0],
high=k.bounds[:, 1],
size=k.theta.shape)
# x0 = np.log(np.random.uniform(
# low=np.exp(k.bounds[:, 0]),
# high=np.exp(k.bounds[:, 1]),
# size=k.theta.shape))
if self.ard and self.ardinit:
if self.verbose:
print("initializing ard")
from copy import deepcopy
modelcopy = deepcopy(self)
modelcopy.ard = False
modelcopy.ardinit = False
modelcopy.n_iter = 5
modelcopy.n_restarts = 0
modelcopy.fit(X, y)
if self.verbose:
print("ard init hyper ", modelcopy.hyperparameters_)
x0[:] = modelcopy.hyperparameters_[1]
x0[0] = modelcopy.hyperparameters_[0]
# Define optimization bounds
theta_bounds = [tuple(k.bounds[i, :]) for i in range(k.bounds.shape[0])]
# Create list for storing converngence information
log_likelihood_convergence = list()
# Define objective function to _minimize_
self._optimizer_iteration = 0
def inc_optimizer_iteration(theta):
self._optimizer_iteration += 1
def f(theta):
likelihood = list()
gradient = list()
if self.model_batch_size is not None:
keys = random.sample(self.models_.keys(),
self.model_batch_size)
else:
keys = self.models_.keys()
for k in keys:
lml, grad = self.models_[k].log_marginal_likelihood(theta,
eval_gradient=True)
likelihood.append(np.exp(lml))
gradient.append(grad)
likelihood = np.log(np.mean(likelihood))
log_likelihood_convergence.append(likelihood)
gradient = np.mean(np.stack(gradient), axis=0)
if self.verbose:
print("%i| log likelihood: %.6f" % (self._optimizer_iteration, likelihood))
return -likelihood, -gradient
theta, likelihood, flags = fmin_l_bfgs_b(f, x0,
bounds=theta_bounds,
maxiter=self.n_iter,
disp=None, callback=inc_optimizer_iteration)
self.convergence_.append(log_likelihood_convergence)
# Select the best run
if restart == 0 or self.log_likelihood_ < -likelihood:
self.hyperparameters_ = theta.copy()
# Re-init all sub models with the best hyper parameters
self._init_sub_models(X, y, theta=self.hyperparameters_)
return self
def get_kernel(self):
"""Return a kernel with the estimated hyperparameters"""
return self._kernel_factory(self._X, theta=self.hyperparameters_)
def _models_probability_matrix(self, X):
"""Returns non-normalized probabilities of X per model
Returns
-------
C : array-like, shape = (n_samples, n_classes)
"""
n_samples, n_features = X.shape
prob = np.zeros((n_samples, len(self.classes_)))
for idx, cla in enumerate(self.classes_):
prob[:, idx] = self.models_[cla].predict_proba(X)[:, 1].ravel()
return prob
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
prob = self.predict_proba(X)
classes = list(self.classes_)
return [classes[idx] for idx in np.argmax(prob, axis=1)]
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
probs = self._models_probability_matrix(X)
# Normalize per column
for i in range(probs.shape[0]):
probs[i, :] = probs[i, :] / np.sum(probs[i, :])
return probs
def score_covar_ntop(self, X, y):
"""
"""
return self._affinity_ntop(affinity_matrix=self.get_kernel()(X), labels=y)
def _affinity_ntop(self, affinity_matrix, labels):
sorted_K = np.argsort(-affinity_matrix, axis=0)
labeld_K = labels[sorted_K]
m = 5
ntop = [0]*(2*m-1)
for n in range(1, m+1):
soft_ntop = 0
hard_ntop = 0
for i in range(len(labels)):
matches = labeld_K[1:n+1, i] == labeld_K[0, i]
if np.sum(matches) > 0:
soft_ntop += 1/len(labels)
if np.all(matches):
hard_ntop += 1/len(labels)
ntop[m+n-2] = soft_ntop
ntop[m-n] = hard_ntop
return ntop
|
python
|
# Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Optional, List
from PyQt5.QtCore import pyqtProperty, pyqtSignal, pyqtSlot, QObject
from UM.Logger import Logger
from UM.Preferences import Preferences
from UM.Resources import Resources
from UM.i18n import i18nCatalog
from cura.Settings.SettingVisibilityPreset import SettingVisibilityPreset
catalog = i18nCatalog("cura")
class SettingVisibilityPresetsModel(QObject):
onItemsChanged = pyqtSignal()
activePresetChanged = pyqtSignal()
def __init__(self, preferences: Preferences, parent = None) -> None:
super().__init__(parent)
self._items = [] # type: List[SettingVisibilityPreset]
self._custom_preset = SettingVisibilityPreset(preset_id = "custom", name = "Custom selection", weight = -100)
self._populate()
basic_item = self.getVisibilityPresetById("basic")
if basic_item is not None:
basic_visibile_settings = ";".join(basic_item.settings)
else:
Logger.log("w", "Unable to find the basic visiblity preset.")
basic_visibile_settings = ""
self._preferences = preferences
# Preference to store which preset is currently selected
self._preferences.addPreference("cura/active_setting_visibility_preset", "basic")
# Preference that stores the "custom" set so it can always be restored (even after a restart)
self._preferences.addPreference("cura/custom_visible_settings", basic_visibile_settings)
self._preferences.preferenceChanged.connect(self._onPreferencesChanged)
self._active_preset_item = self.getVisibilityPresetById(self._preferences.getValue("cura/active_setting_visibility_preset"))
# Initialize visible settings if it is not done yet
visible_settings = self._preferences.getValue("general/visible_settings")
if not visible_settings:
new_visible_settings = self._active_preset_item.settings if self._active_preset_item is not None else []
self._preferences.setValue("general/visible_settings", ";".join(new_visible_settings))
else:
self._onPreferencesChanged("general/visible_settings")
self.activePresetChanged.emit()
def getVisibilityPresetById(self, item_id: str) -> Optional[SettingVisibilityPreset]:
result = None
for item in self._items:
if item.presetId == item_id:
result = item
break
return result
def _populate(self) -> None:
from cura.CuraApplication import CuraApplication
items = [] # type: List[SettingVisibilityPreset]
items.append(self._custom_preset)
for file_path in Resources.getAllResourcesOfType(CuraApplication.ResourceTypes.SettingVisibilityPreset):
setting_visibility_preset = SettingVisibilityPreset()
try:
setting_visibility_preset.loadFromFile(file_path)
except Exception:
Logger.logException("e", "Failed to load setting preset %s", file_path)
items.append(setting_visibility_preset)
# Sort them on weight (and if that fails, use ID)
items.sort(key = lambda k: (int(k.weight), k.presetId))
self.setItems(items)
@pyqtProperty("QVariantList", notify = onItemsChanged)
def items(self) -> List[SettingVisibilityPreset]:
return self._items
def setItems(self, items: List[SettingVisibilityPreset]) -> None:
if self._items != items:
self._items = items
self.onItemsChanged.emit()
@pyqtSlot(str)
def setActivePreset(self, preset_id: str) -> None:
if self._active_preset_item is not None and preset_id == self._active_preset_item.presetId:
Logger.log("d", "Same setting visibility preset [%s] selected, do nothing.", preset_id)
return
preset_item = self.getVisibilityPresetById(preset_id)
if preset_item is None:
Logger.log("w", "Tried to set active preset to unknown id [%s]", preset_id)
return
need_to_save_to_custom = self._active_preset_item is None or (self._active_preset_item.presetId == "custom" and preset_id != "custom")
if need_to_save_to_custom:
# Save the current visibility settings to custom
current_visibility_string = self._preferences.getValue("general/visible_settings")
if current_visibility_string:
self._preferences.setValue("cura/custom_visible_settings", current_visibility_string)
new_visibility_string = ";".join(preset_item.settings)
if preset_id == "custom":
# Get settings from the stored custom data
new_visibility_string = self._preferences.getValue("cura/custom_visible_settings")
if new_visibility_string is None:
new_visibility_string = self._preferences.getValue("general/visible_settings")
self._preferences.setValue("general/visible_settings", new_visibility_string)
self._preferences.setValue("cura/active_setting_visibility_preset", preset_id)
self._active_preset_item = preset_item
self.activePresetChanged.emit()
@pyqtProperty(str, notify = activePresetChanged)
def activePreset(self) -> str:
if self._active_preset_item is not None:
return self._active_preset_item.presetId
return ""
def _onPreferencesChanged(self, name: str) -> None:
if name != "general/visible_settings":
return
# Find the preset that matches with the current visible settings setup
visibility_string = self._preferences.getValue("general/visible_settings")
if not visibility_string:
return
visibility_set = set(visibility_string.split(";"))
matching_preset_item = None
for item in self._items:
if item.presetId == "custom":
continue
if set(item.settings) == visibility_set:
matching_preset_item = item
break
item_to_set = self._active_preset_item
if matching_preset_item is None:
# The new visibility setup is "custom" should be custom
if self._active_preset_item is None or self._active_preset_item.presetId == "custom":
# We are already in custom, just save the settings
self._preferences.setValue("cura/custom_visible_settings", visibility_string)
else:
# We need to move to custom preset.
item_to_set = self.getVisibilityPresetById("custom")
else:
item_to_set = matching_preset_item
# If we didn't find a matching preset, fallback to custom.
if item_to_set is None:
item_to_set = self._custom_preset
if self._active_preset_item is None or self._active_preset_item.presetId != item_to_set.presetId:
self._active_preset_item = item_to_set
if self._active_preset_item is not None:
self._preferences.setValue("cura/active_setting_visibility_preset", self._active_preset_item.presetId)
self.activePresetChanged.emit()
|
python
|
import numpy as np
from utils import resize, opt
from skimage import morphology
def find_focal_points(image, scope='local', maxima_areas='large', local_maxima_threshold=None, num_points=None):
"""
Finds the 'focal_points' of a model, given a low resolution CAM. Has two modes: a 'local' scope and a 'global' one.
If a 'local' scope is selected, the function looks for local maxima in the CAM. Due to the high sensitivity of the
algorithm finding the local maxima, usually a large number of maxima is identified (which is, in most cases,
undesirable. An interest_threshold can be selected that filters out possibly unwanted maxima (i.e. maxima whose
intensity is lower than the threshold). Due to the resizing of the CAM, these local maxima produce large areas in
the new image. If this is not desired, the option maxima_areas='small' should be selected, which "skeletonizes" the
large areas to shrink them.
The 'global' scope looks for global maxima in the CAM. This is accompanied by the parameter num_points, which
designates the number of points returned by the function.
:param image: An input image. Ideally this should be a low resolution CAM.
:param scope: Can either be 'local' or 'global'. A 'local' scope looks for local maxima in the image, while a
'global' scope looks for global ones.
:param maxima_areas: Can either be 'large' or 'small', depending on whether or not we want larger or smaller areas.
Only relevant for 'local' scopes.
:param local_maxima_threshold: A float that filters out any local maxima that are below the threshold. Its default
value is the average of the lowest-intensity local maxima with the highest-intensity
one. Only relevant for 'local' scopes.
:param num_points: An integer that specifies the number of points with the maximum intensity.
Only relevant for 'global' scopes.
:return: A list of tuples, each containing the x and y coordinates of the 'focal points' in the input CAM.
"""
# Global scope: looks for 'num_points' global maxima in the input image.
if scope == 'global':
# If 'num_points' is not defined, picks the square root of one of its dimensions:
# e.g. for a 224x224 image: num_points = sqrt(224) = 15
if num_points:
if not isinstance(num_points, int):
raise TypeError('num_points can only take integer values')
else:
num_points = int(round(np.sqrt(opt.im_size)))
# Resizes the image to the desired size and returns the coordinates of the top 'num_points' pixels that have
# the largest values. They are cast as python's default 32-bit integers to be compatible with SimpleITK's
# ConnectedThreshold function. The two axes are also reversed.
top_points = np.argpartition(resize(image).ravel(), -num_points)[-num_points:]
return [(int(x % opt.im_size), int(x // opt.im_size)) for x in top_points]
# Local scope: looks for local maxima in the input image.
elif scope == 'local':
# Identifies the image's local maxima.
candidate_points = morphology.local_maxima(image).astype(bool)
# Because of the high sensitivity of scikit-image's morphology.local_maxima function, it is often desired to
# filter some of the local maxima out via a threshold. If this is not passed explicitly the average of the
# local maxima with the minimum and maximum intensities is used.
if not isinstance(local_maxima_threshold, float):
local_maxima_threshold = (image[candidate_points].max() + image[candidate_points].min()) / 2
# Any local maxima that, whose intensity fails to exceed the threshold is ignored.
focal_points = candidate_points * image > local_maxima_threshold
# Resizes the map of the local maxima to the desired dimensions. This results in the enlargement of the areas
# of the each maxima. If this is undesired, as indicated by the option maxima_areas='small', scikit-image's
# morphology.skeletonize is applied, which shrinks the maxima areas.
focal_points = resize(focal_points.astype(float), resample_method='nearest')
if maxima_areas not in ('small', 'large'):
raise ValueError("maxima_areas can either be 'small' or 'large'")
elif maxima_areas == 'small':
focal_points = morphology.skeletonize(focal_points)
# Finally, the coordinates of the maxima are returned. They are cast as python's default 32-bit integers to be
# compatible with SimpleITK's ConnectedThreshold function. The two axes are also reversed.
focal_point_coods = np.where(focal_points)
return [(int(focal_point_coods[1][i]), int(focal_point_coods[0][i])) for i in range(len(focal_point_coods[0]))]
def remove_small_holes(image, max_hole_size=256):
"""
Wrapper to scikit-image's morphology.remove_small_holes that returns an image array with numbers instead of a
boolean array.
:param image: A segmentation mask (numpy.ndarray).
:param max_hole_size: The maximum size (in pixels) of a hole to fill (int).
:return: The filled segmentation mask (numpy.ndarray).
"""
return morphology.remove_small_holes(image > 0, area_threshold=max_hole_size).astype(float)
|
python
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸 (Blueking) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from mock import patch
from apps.utils.test_utils import tools
from apps.utils.test_utils.tests import MyTestCase
from apps.gsekit.cmdb.views.tests import CmdbMockClient
from apps.gsekit.process import models as process_models
from apps.gsekit.process.views.process import ProcessViews
from apps.gsekit.process.handlers.process import ProcessHandler
from apps.gsekit.process import mock_data as process_mock_data
from apps.gsekit.configfile.models import ConfigTemplateBindingRelationship
from apps.gsekit.configfile.views.tests import BscpMockClient
class TestProcessView(MyTestCase):
"""
测试进程相关的接口
"""
swagger_test_view = ProcessViews
fields_exempt = MyTestCase.fields_exempt + ["config_template_id"]
actions_exempt = MyTestCase.actions_exempt + ["operate_process", "sync_process_status"]
cmdb_mock_client = CmdbMockClient.get_cmdb_mock_client_inst()
CC_API_MOCK_PATH = "apps.gsekit.process.handlers.process.CCApi"
BSCP_API_MOCK_PATH = "apps.gsekit.adapters.bscp.adapter.BscpApi"
BSCP_MOCK_CLIENT = BscpMockClient(create_app_return={"app_id": "test_app"}, update_config_return=None)
CMDB_MOCK_CLIENT = CmdbMockClient.get_cmdb_mock_client_inst()
@classmethod
def setUpTestData(cls):
"""TestCase实例生成时调用一次, 可DB回滚
该hook比setUpClass先执行,需要考虑mock相关顺序
"""
super().setUpTestData()
patch(cls.CC_API_MOCK_PATH, cls.cmdb_mock_client).start()
patch(cls.BSCP_API_MOCK_PATH, cls.BSCP_MOCK_CLIENT).start()
ProcessHandler(bk_biz_id=cls.bk_biz_id).sync_biz_process()
# 创建绑定关系
config_templates = tools.init_config_template(3)
relation_to_be_created = []
for config_template in config_templates:
relation_to_be_created.append(
ConfigTemplateBindingRelationship(
bk_biz_id=cls.bk_biz_id,
config_template_id=config_template["config_template_id"],
process_object_type=process_models.Process.ProcessObjectType.TEMPLATE,
process_object_id=process_mock_data.PROCESS_TEMPLATE_RESPONSE[0]["id"],
)
)
ConfigTemplateBindingRelationship.objects.bulk_create(relation_to_be_created)
def setUp(self) -> None:
super(TestProcessView, self).setUp()
|
python
|
from core import views
from django.urls import path
urlpatterns = [
path('user/', views.UserListView.as_view()),
path('user/<username>/', views.UserDetailView.as_view()),
path('wallet/', views.WalletListView.as_view()),
path('subacc/', views.SubAccountListView.as_view()),
path('subacc/<sub_address>/', views.SubAccountDetailView.as_view()),
path('deposit/', views.BankDepositListView.as_view()),
path('deposit/<sub_address>/', views.BankDepositDetailView.as_view()),
path('trans/', views.TransactionListView.as_view()),
path('trans/in/', views.TransactionIncomingListView.as_view()),
path('trans/out/', views.TransactionOutgoingListView.as_view()),
path('trans/<transaction_hash>/', views.TransactionDetailView.as_view()),
path('login_history/', views.LoginRecordListView.as_view()),
]
|
python
|
from math import gcd
from functools import reduce
def fun(a):
return reduce(gcd,a)
for i in range(int(input())):
n = int(input())
a = list(set([int(j) for j in input().split()]))
if(len(a)==1):
print(a[0]+a[0])
else:
b = max(a)
a.remove(b)
m = b+fun(a)
c = max(a)
a.remove(c)
a.append(b)
print(max(m,c+fun(a)))
|
python
|
OTHER_METAl_TYPE = {
'chromium':True,
'kanthal':False,
'solder':False,
'aluminium_steel':True,
'weak_aluminium_steel':False,
'bismuth_steel':True,
'weak_bismuth_steel':False,
'damascus_steel':True,
'weak_damascus_steel':False,
'stainless_steel':False,
'weak_stainless_steel':False,
'rose_alloy':False,
'ferrochrome':False,
'cadmium':False,
'nichrome':True,
'alnico':False,
'vanadium':False,
'rhodium':False,
'palladium':False,
'antimony': False,
'constantan': False,
'electrum': False,
'red_alloy': False,
'mithril': True,
'nickel_silver': True,
'invar': True,
'aluminium': True,
'aluminium_brass': False,
'ardite': False,
'cobalt': True,
'manyullyn': True,
'osmium': True,
'titanium': True,
'tungsten': True,
'tungsten_steel': True,
'boron': True,
'thorium': False,
'manganese': False,
'magnesium': False,
'lithium': False,
'zirconium': False,
'zircaloy': True,
'beryllium': False,
'beryllium_copper': True,
'hsla_steel': False,
'ferroboron': False,
'tough': False,
'magnesium_diboride': False,
'uranium': False,
'soulforged_steel':False,
'signalum':True,
'lumium':True,
'enderium':True,
'refined_obsidian':False,
'refined_glowstone':False,
'thaumium':True,
'void_metal':False,
'bismuth': False,
'bismuth_bronze': True,
'black_bronze': True,
'brass': False,
'bronze': True,
'copper': True,
'gold': False,
'lead': False,
'nickel': False,
'rose_gold': False,
'silver': False,
'tin': False,
'zinc': False,
'sterling_silver': False,
'wrought_iron': True,
'pig_iron': False,
'steel': True,
'platinum': False,
'black_steel': True,
'blue_steel': True,
'red_steel': True
}
def Upper(s) :
splitString = s.replace('_', ' ').title()
return splitString
for metal, tool_metal in OTHER_METAl_TYPE.items() :
#print('item.firmalife.%s_mallet.name=%s Mallet' % (metal, Upper(metal)))
#print('item.firmalife.%s_mallet_head.name=%s Mallet Head' % (metal, Upper(metal)))
print("item.ironbackpacks.backpack.tfcompat.%s.name=%s Backpack" % (metal, Upper(metal)))
|
python
|
# coding:utf-8
from .util import *
from .abstract_predictor import AbstractPredictor
from .average_predictor import AveragePredictor
from .average_predictor_without_outliers import AveragePredictorWithoutOutliers
from .average_predictor_without_outliers2 import AveragePredictorWithoutOutliers2
from .average_predictor_each_cost import AveragePredictorEachCost
from .average_predictor_with_other_users import AveragePredictorWithOtherUsers
from .regression_prediction import RegressionPrediction
from .average_predictor_each_cost_interval import AveragePredictorEachCostInterval
|
python
|
import os
import uuid
class CommitTreeToScriptConverter:
def __init__(self, skip_single_child_nodes, post_conversion_commands, script_file):
self.skip_single_child_nodes = skip_single_child_nodes
self.post_conversion_commands = post_conversion_commands
self.print_debug = False
self.script_file = script_file
def convert_commit_tree_to_script(self, commit_tree_to_copy):
temp_dir = os.path.dirname(self.script_file.name)
self.print_to_file("cd " + temp_dir)
self.print_to_file("Invoke-Expression \"git init\"")
self.recursivly_generate_git_commands(commit_tree_to_copy.root)
self.print_all_to_file(self.get_fomatted_post_conversion_commands())
def get_fomatted_post_conversion_commands(self):
delete_master = ["git checkout master-real", "git branch -D master"]
replace_master_real_with_master = ["git checkout -b master", "git branch -D master-real"]
all_commands_to_add = delete_master + replace_master_real_with_master + self.post_conversion_commands
return ["Invoke-Expression \"%s\"" % command for command in all_commands_to_add]
def recursivly_generate_git_commands(self, current_commit):
if self.should_skip_commit(current_commit):
self.recursivly_generate_git_commands(current_commit.children[0])
return
commit_id = self.print_commands_to_make_commit_on_current_branch(current_commit.pretty_names[0])
if current_commit.has_name:
self.print_commands_to_make_branch_on_current_commit(current_commit)
for child in current_commit.children:
self.print_commands_required_for_child(child, current_commit.children[-1], commit_id)
def should_skip_commit(self, commit):
return self.skip_single_child_nodes and len(commit.children) == 1 and not commit.has_name
def get_adjusted_branch_name(self, commit):
if "master" in commit.pretty_names:
return "master-real"
return commit.pretty_names[0].replace(',', '')
def print_commands_to_make_commit_on_current_branch(self, commit_message):
commit_id = "$temp%s" % str(uuid.uuid4()).replace("-", "")
self.print_to_file("New-Item %s.txt" % str(uuid.uuid4()).replace("-", ""))
self.print_to_file("Invoke-Expression \"git add .\"")
self.print_to_file("Invoke-Expression \"git commit -q -a -m '" + commit_message + "'\"")
self.print_to_file(commit_id + " = Invoke-Expression \"git log --format='%H' -n 1\"")
return commit_id
def print_commands_to_make_branch_on_current_commit(self, commit):
branch_name = self.get_adjusted_branch_name(commit)
self.print_to_file("Invoke-Expression \"git branch %s\"" % branch_name)
def print_commands_required_for_child(self, child, last_child, return_commit_id):
self.recursivly_generate_git_commands(child)
if not child == last_child:
self.print_to_file("Invoke-Expression \"git checkout " + return_commit_id + "\"")
def print_all_to_file(self, strings):
for string in strings:
self.print_to_file(string)
def print_to_file(self, string):
print(string, file=self.script_file)
if self.print_debug:
print(string)
|
python
|
class Queue(object):
def __init__(self):
self.q = []
def push(self, value):
self.q.insert(0, value)
def pop(self):
return self.q.pop()
def is_empty(self):
return self.q == []
def size(self):
return len(self.q)
# Example
q = Queue()
q.push(1)
q.push(2)
q.push(3)
print(q.q) # [3, 2, 1]
q.pop()
print(q.q) # [3, 2]
|
python
|
import os
from datetime import datetime, timedelta
import pytz
import requests
from google.transit import gtfs_realtime_pb2
GTFS_API_KEY = os.environ.get('TRANSPORT_NSW_API_KEY')
GTFS_REALTIME_URL = 'https://api.transport.nsw.gov.au/v1/gtfs/realtime/buses/'
GTFS_VEHICLE_URL = 'https://api.transport.nsw.gov.au/v1/gtfs/vehiclepos/buses/'
FEED_TIMEZONE = pytz.timezone('Australia/Sydney')
def process_trip_update(trip_update, threshold):
global count
trip = trip_update.trip
print(trip)
for stop_update in trip_update.stop_time_update:
if stop_update.arrival.time < threshold:
print(trip.route_id)
print(stop_update.stop_sequence)
print(stop_update.arrival.delay)
print(stop_update.arrival.time)
print(threshold)
print()
def fetch():
feed = gtfs_realtime_pb2.FeedMessage()
headers = {'Authorization': 'apikey ' + GTFS_API_KEY}
response = requests.get(GTFS_REALTIME_URL, headers=headers)
if response.status_code == 200:
feed.ParseFromString(response.content)
now = datetime.now(tz=FEED_TIMEZONE)
threshold = int((now + timedelta(minutes=3)).timestamp())
for entity in feed.entity:
if entity.HasField('trip_update'):
process_trip_update(entity.trip_update, threshold)
else:
print(response.status_code)
print(response.content)
if __name__ == '__main__':
fetch()
|
python
|
# Generated by Django 3.0.6 on 2020-05-25 01:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sme_management', '0008_auto_20200525_0113'),
]
operations = [
migrations.AlterField(
model_name='smeproject',
name='documents',
field=models.FileField(blank=True, upload_to='projects/'),
),
]
|
python
|
"""Build an online book using Jupyter Notebooks and Jekyll."""
from pathlib import Path
import os
# Load the version from the template Jupyter Book repository config
path_file = Path(__file__)
path_yml = path_file.parent.joinpath('book_template', '_config.yml')
# Read in the version *without* pyyaml because we can't assume it's installed
lines = path_yml.read_text().split('\n')
version = [line for line in lines if 'jupyter_book_version' in line]
version_line = [line for line in lines if 'jupyter_book_version' in line][0]
__version__ = version_line.split(' ')[-1].strip().strip('"')
|
python
|
# importing forms
from django import forms
from messenger.models import Message
from newsletter.models import *
class MessageForm(forms.ModelForm):
OPTIONS = (('Y', 'Yes'),
('N', 'No'),)
is_encrypted = forms.ChoiceField(required=True, choices=OPTIONS, help_text="Encrypt this message?")
message_title = forms.CharField(required=True, help_text="Subject")
message_content = forms.CharField(required=True, help_text="Message Content", widget=forms.Textarea(attrs={'rows':6, 'cols': 20}))
message_to = forms.CharField(required=True, help_text="Message Recipient")
class Meta:
model = Message
fields = ("message_to", "message_title", "is_encrypted", "message_content")
class EmailForm(forms.ModelForm):
message_subject = forms.CharField(required=True, label="Subject of Email")
message_content = forms.CharField(required=True, label="Email Content", widget=forms.Textarea(attrs={'rows': 6, 'cols': 20}))
class Meta:
model = Message
fields = ("message_subject", "message_content")
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-08-16 09:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('program_manage', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='program',
name='os_type',
field=models.IntegerField(blank=True, default=0, null=True, verbose_name='\u7cfb\u7edf\u7c7b\u578b'),
),
migrations.AlterField(
model_name='program',
name='status',
field=models.IntegerField(blank=True, default=0, null=True, verbose_name='\u72b6\u6001'),
),
migrations.AlterField(
model_name='program',
name='type',
field=models.IntegerField(blank=True, default=0, null=True, verbose_name='\u7a0b\u5e8f\u7c7b\u578b'),
),
]
|
python
|
class Shark:
animal_type = "fish"
location = "ocean"
followers = 5
def __init__(self, name, age):
self.name = name
self.age = age
s1 = Shark("Frederick", 12)
s2 = Shark("Nicholas", "10")
print("\n>>> Shark 1 <<<")
print("Name:", s1.name)
print("Age:", s1.age)
print("Type:", s1.animal_type)
print("Location:", s1.location)
print("Followers:", s1.followers)
print("\n>>> Shark 2 <<<")
print("Name:", s2.name)
print("Age:", s2.age)
print("Type:", s2.animal_type)
print("Location:", s2.location)
print("Followers:", s2.followers)
|
python
|
import requests
import os
from core.client import TweepyClient, OpenaiClient
BEARER_TOKEN = ""
API_SECRET_KEY = ""
CLIENT_SECRET = ""
API_KEY = ""
CLIENT_KEY = ""
OPENAI_KEY = ""
def bearer_oauth(r):
# To set your environment variables in your terminal run the following line:
# export 'BEARER_TOKEN'='<your_bearer_token>'
# bearer_token = os.environ.get("BEARER_TOKEN")
r.headers['Authorization'] = "Bearer {}".format(BEARER_TOKEN)
return r
def test():
URL = 'https://api.twitter.com/2/compliance/jobs'
headers = {}
response = requests.get(f"{URL}/{id}", auth=bearer_oauth, headers=headers)
print(response)
def make_one_tweet():
print(API_KEY)
print(API_SECRET_KEY)
tweeter_client = TweepyClient(
API_KEY,
API_SECRET_KEY,
CLIENT_KEY,
CLIENT_SECRET
)
openai_client = OpenaiClient(
OPENAI_KEY
)
msg = openai_client.QnA()
tweeter_client.make_a_tweet(msg)
def init_client():
tweeter_client = TweepyClient(
API_KEY,
API_SECRET_KEY,
CLIENT_KEY,
CLIENT_SECRET
)
openai_client = OpenaiClient(
OPENAI_KEY
)
# msg = openai_client.QnA()
# tweeter_client.make_a_tweet(msg)
# new_client.make_a_tweet("First tweet via bot")
def get_env_variable():
global BEARER_TOKEN, API_SECRET_KEY,CLIENT_SECRET,API_KEY,CLIENT_KEY,OPENAI_KEY
BEARER_TOKEN = (os.environ['BEARER_TOKEN'])
API_SECRET_KEY = (os.environ['API_SECRET_KEY'])
CLIENT_SECRET = (os.environ['CLIENT_SECRET'])
API_KEY = (os.environ['API_KEY'])
CLIENT_KEY = (os.environ['CLIENT_KEY'])
OPENAI_KEY = (os.environ['OPENAI_KEY'])
def get_variable_from_config():
from config import BEARER_TOKEN, API_SECRET_KEY,CLIENT_SECRET,API_KEY,CLIENT_KEY,OPENAI_KEY
return
def main():
get_env_variable()
make_one_tweet()
# init_client()
# test()
if __name__ == "__main__":
main()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Category, Choice, BallotPaper
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class CategoryAdmin(admin.ModelAdmin):
readonly_fields = ('ballot_paper', 'category_name', 'created_by')
inlines = [ChoiceInline]
list_display = ['category_name', 'ballot_paper', 'created_by']
class CategoryInline(admin.TabularInline):
model = Category
class BallotAdmin(admin.ModelAdmin):
prepopulated_fields = {'ballot_url': ('ballot_name',)}
readonly_fields = ('created_by',)
list_display = ['ballot_name', 'created_by', 'is_custom']
admin.site.register(Category, CategoryAdmin)
admin.site.register(BallotPaper, BallotAdmin)
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CTLSTMCell(nn.Module):
def __init__(self, hidden_dim, beta=1.0, device=None):
super(CTLSTMCell, self).__init__()
device = device or 'cpu'
self.device = torch.device(device)
self.hidden_dim = hidden_dim
self.linear = nn.Linear(hidden_dim * 2, hidden_dim * 7, bias=True)
self.beta = beta
def forward(
self, rnn_input,
hidden_t_i_minus, cell_t_i_minus, cell_bar_im1):
dim_of_hidden = rnn_input.dim() - 1
input_i = torch.cat((rnn_input, hidden_t_i_minus), dim=dim_of_hidden)
output_i = self.linear(input_i)
gate_input, \
gate_forget, gate_output, gate_pre_c, \
gate_input_bar, gate_forget_bar, gate_decay = output_i.chunk(
7, dim_of_hidden)
gate_input = torch.sigmoid(gate_input)
gate_forget = torch.sigmoid(gate_forget)
gate_output = torch.sigmoid(gate_output)
gate_pre_c = torch.tanh(gate_pre_c)
gate_input_bar = torch.sigmoid(gate_input_bar)
gate_forget_bar = torch.sigmoid(gate_forget_bar)
gate_decay = F.softplus(gate_decay, beta=self.beta)
cell_i = gate_forget * cell_t_i_minus + gate_input * gate_pre_c
cell_bar_i = gate_forget_bar * cell_bar_im1 + gate_input_bar * gate_pre_c
return cell_i, cell_bar_i, gate_decay, gate_output
def decay(self, cell_i, cell_bar_i, gate_decay, gate_output, dtime):
# no need to consider extra_dim_particle here
# cuz this function is applicable to any # of dims
if dtime.dim() < cell_i.dim():
dtime = dtime.unsqueeze(cell_i.dim()-1).expand_as(cell_i)
cell_t_ip1_minus = cell_bar_i + (cell_i - cell_bar_i) * torch.exp(
-gate_decay * dtime)
hidden_t_ip1_minus = gate_output * torch.tanh(cell_t_ip1_minus)
return cell_t_ip1_minus, hidden_t_ip1_minus
|
python
|
"""tools."""
from collections import OrderedDict
import yaml
def ordered_yaml_load(filepath, loader=yaml.Loader,
object_pairs_hook=OrderedDict):
"""ordered_yaml_load."""
class OrderedLoader(loader):
"""OrderedLoader."""
def construct_mapping(loader, node):
"""construct_mapping."""
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
with open(filepath) as stream:
return yaml.load(stream, OrderedLoader)
def ordered_yaml_dump(data, stream=None, dumper=yaml.SafeDumper, **kwargs):
"""ordered_yaml_dump."""
class OrderedDumper(dumper):
""".OrderedDumper"""
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwargs)
def main(_data):
import pprint
with open('test.yaml', 'w') as _f:
ordered_yaml_dump(
_data, _f, default_flow_style=False, allow_unicode=True)
with open('test.yaml') as _f:
data = yaml.load(_f)
pprint.pprint(data)
if __name__ == '__main__':
data = {
"name": "文章接口测试",
"classname": "ArticleAPITest",
"cases": [
{
"name": "get_article",
"assertions": [{"status_code": 200}, {"errmsg": "成功"}]
},
{
"name": "get_article_2",
"assertions": [{"status_code": 400}, {"errmsg": "失败"}]
},
]
}
main(data)
|
python
|
from .hankify_pw import hanky_pass # noqa
|
python
|
from django import template
from ..forms.widgets import LikertSelect, RatingSelect
register = template.Library()
@register.filter
def is_likert(field):
return isinstance(field.field.widget, LikertSelect)
@register.filter
def is_rating(field):
return isinstance(field.field.widget, RatingSelect)
|
python
|
picture = [
[0,0,0,1,0,0,0],
[0,0,1,1,1,0,0],
[0,1,1,1,1,1,0],
[1,1,1,1,1,1,1],
[0,0,0,1,0,0,0],
[0,0,0,1,0,0,0]
]
for row in range(len(picture)):
for col in range(len(picture[row])):
if picture[row][col] == 1:
print('*', end = '')
else:
print(' ', end = '')
print()
# Another way
print()
for line in picture:
for pixel in line:
if pixel == 1:
print('*', end = '')
else:
print(' ', end = '')
print()
|
python
|
import socket
from threading import Thread, Lock
import sys
def receiver(sock):
while flag:
data = sock.recv(1024)
if data == 'quit':
sys.exit(0)
print "\t\t"+data
host = '192.168.122.1'
port = 50020
size = 1024
flag = True
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect((host,port))
hop = Thread(target=receiver, args=(sock,))
hop.daemon = True
hop.start()
data = "random"
while data != 'quit':
data = raw_input()
sock.send(data)
'''Send data to server'''
sock.close()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pass1objects.py
#
# Part of MARK II project. For informations about license, please
# see file /LICENSE .
#
# author: Vladislav Mlejnecký
# email: [email protected]
from common import *
class item():
def __init__(self, parrent, address):
self.address = address
self.parrent = parrent
class blob(item):
def __init__(self, parrent, address, data):
item.__init__(self, parrent, address)
self.data = data
self.relocation = False
self.special = False
def translate(self, symbol_table, special_symbol_table):
result = trySolveImmediateOperand(self, symbol_table, special_symbol_table, self.data)
return checkSizeOfImmediate(self, 32, result[0])
class instruction(item):
def __init__(self, parrent, address, opcode):
item.__init__(self, parrent, address)
self.opcode = opcode
self.relocation = False
self.special = False
self.register_a = "R0"
self.register_b = "R0"
self.register_c = "R0"
self.register_f = "R0"
self.reg_a = 0
self.reg_b = 0
self.reg_c = 0
self.reg_f = 0
self.regs = 0
def decodeRegs(self):
self.reg_a = self.decodeRegName(self.register_a)
self.reg_b = self.decodeRegName(self.register_b)
self.reg_c = self.decodeRegName(self.register_c)
self.reg_f = self.decodeRegName(self.register_f)
self.regs = (self.reg_f << 20) | (self.reg_a << 8) | (self.reg_b << 4) | self.reg_c
def decodeRegName(self, reg_name):
reg = -1
if reg_name == "R0": reg = 0
elif reg_name == "R1": reg = 1
elif reg_name == "R2": reg = 2
elif reg_name == "R3": reg = 3
elif reg_name == "R4": reg = 4
elif reg_name == "R5": reg = 5
elif reg_name == "R6": reg = 6
elif reg_name == "R7": reg = 7
elif reg_name == "R8": reg = 8
elif reg_name == "R9": reg = 9
elif reg_name == "R10": reg = 10
elif reg_name == "R11": reg = 11
elif reg_name == "R12": reg = 12
elif reg_name == "R13": reg = 13
elif reg_name == "R14": reg = 14
elif reg_name == "R15": reg = 15
elif reg_name == "PC": reg = 14
elif reg_name == "SP": reg = 15
else: reg = -1
if reg == -1:
print "Error! Instruction '" + self.opcode + "' at " + self.parrent.fileName + "@" + str(self.parrent.lineNumber) + ". Invalid name of register."
sys.exit(1)
else:
return reg
class RET(instruction):
def __init__(self, parrent, address):
instruction.__init__(self, parrent, address, 'RET')
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x01000000
class RETI(instruction):
def __init__(self, parrent, address):
instruction.__init__(self, parrent, address, 'RETI')
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x02000000
class CALLI(instruction):
def __init__(self, parrent, address, register_1):
instruction.__init__(self, parrent, address, 'CALLI')
self.register_a = register_1
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x03000000
class PUSH(instruction):
def __init__(self, parrent, address, register_1):
instruction.__init__(self, parrent, address, 'PUSH')
self.register_b = register_1
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x04000000
class POP(instruction):
def __init__(self, parrent, address, register_1):
instruction.__init__(self, parrent, address, 'POP')
self.register_c = register_1
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x05000000
class LDI(instruction):
def __init__(self, parrent, address, register_1, register_2):
instruction.__init__(self, parrent, address, 'LDI')
self.register_a = register_1
self.register_c = register_2
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x06000000
class STI(instruction):
def __init__(self, parrent, address, register_1, register_2):
instruction.__init__(self, parrent, address, 'STI')
self.register_b = register_1
self.register_a = register_2
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x07000000
class BNZI(instruction):
def __init__(self, parrent, address, register_1, register_2):
instruction.__init__(self, parrent, address, 'BNZI')
self.register_f = register_1
self.register_a = register_2
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x08000000
class BZI(instruction):
def __init__(self, parrent, address, register_1, register_2):
instruction.__init__(self, parrent, address, 'BZI')
self.register_f = register_1
self.register_a = register_2
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x09000000
class CMPI(instruction):
def __init__(self, parrent, address, comparison, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'CMPI')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
self.comparison = comparison
def __decodeComparison(self):
code = -1
if self.comparison == "EQ" : code = 6;
elif self.comparison == "NEQ" : code = 7;
elif self.comparison == "L" : code = 10;
elif self.comparison == "LU" : code = 14;
elif self.comparison == "LE" : code = 11;
elif self.comparison == "LEU" : code = 15;
elif self.comparison == "G" : code = 8;
elif self.comparison == "GU" : code = 12;
elif self.comparison == "GE" : code = 9;
elif self.comparison == "GEU" : code = 13;
else: code = -1
if code == -1:
print "Error! Instruction '" + self.opcode + "' at " + self.parrent.fileName + "@" + str(self.parrent.lineNumber) + ". Invalid comparison name."
sys.exit(1)
else:
return code
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
comp = self.__decodeComparison()
return self.regs | 0x0A000000 + (comp << 20)
class CMPF(instruction):
def __init__(self, parrent, address, comparison, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'CMPF')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
self.comparison = comparison
def __decodeComparison(self):
code = -1
if self.comparison == "EQ" : code = 0;
elif self.comparison == "NEQ" : code = 5;
elif self.comparison == "L" : code = 3;
elif self.comparison == "LE" : code = 4;
elif self.comparison == "G" : code = 1;
elif self.comparison == "GE" : code = 2;
else: code = -1
if code == -1:
print "Error! Instruction '" + self.opcode + "' at " + self.parrent.fileName + "@" + str(self.parrent.lineNumber) + ". Invalid comparison name."
sys.exit(1)
else:
return code
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
comp = self.__decodeComparison()
return self.regs | 0x0B000000 + (comp << 20)
class MULU(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'MULU')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0C000000
class MUL(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'MUL')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0C100000
class ADD(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'ADD')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0C600000
class SUB(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'SUB')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0C700000
class INC(instruction):
def __init__(self, parrent, address, register_1, register_2):
instruction.__init__(self, parrent, address, 'INC')
self.register_a = register_1
self.register_c = register_2
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0C800000
class DEC(instruction):
def __init__(self, parrent, address, register_1, register_2):
instruction.__init__(self, parrent, address, 'DEC')
self.register_a = register_1
self.register_c = register_2
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0C900000
class AND(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'AND')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0CA00000
class OR(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'OR')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0CB00000
class XOR(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'XOR')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0CC00000
class NOT(instruction):
def __init__(self, parrent, address, register_1, register_2):
instruction.__init__(self, parrent, address, 'NOT')
self.register_a = register_1
self.register_c = register_2
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0CD00000
class DIVU(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'DIVU')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0D200000
class DIV(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'DIV')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0D300000
class REMU(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'REMU')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0D400000
class REM(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'REM')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0D500000
class LSL(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'LSL')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0E000000
class LSR(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'LSR')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0E100000
class ROL(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'ROL')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0E200000
class ROR(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'ROR')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0E300000
class ASL(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'ASL')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0E400000
class ASR(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'ASR')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0E500000
class FSUB(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'FSUB')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0F000000
class FADD(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'FADD')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x0F300000
class FMUL(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'FMUL')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x10100000
class FDIV(instruction):
def __init__(self, parrent, address, register_1, register_2, register_3):
instruction.__init__(self, parrent, address, 'FDIV')
self.register_a = register_1
self.register_b = register_2
self.register_c = register_3
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x11200000
class MVIL(instruction):
def __init__(self, parrent, address, register, value):
instruction.__init__(self, parrent, address, 'MVIL')
self.register_c = register
self.register_b = register
self.value = value
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
value = checkSizeOfImmediate(self, 16, trySolveImmediateOperand(self, symbol_table, special_symbol_table, self.value)[0])
return self.regs | 0x12000000 | (value << 8)
class MVIH(instruction):
def __init__(self, parrent, address, register, value):
instruction.__init__(self, parrent, address, 'MVIH')
self.register_c = register
self.register_b = register
self.value = value
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
value = checkSizeOfImmediate(self, 16, trySolveImmediateOperand(self, symbol_table, special_symbol_table, self.value)[0])
return self.regs | 0x13000000 | (value << 8)
class CALL(instruction):
def __init__(self, parrent, address, call_address):
instruction.__init__(self, parrent, address, 'CALL')
self.call_address = call_address
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
result = trySolveImmediateOperand(self, symbol_table, special_symbol_table, self.call_address)
self.relocation = result[1]
call_address = checkSizeOfImmediate(self, 24, result[0])
self.special = result[2]
return self.regs | 0x80000000 | (call_address << 4)
class LD(instruction):
def __init__(self, parrent, address, ld_address, register):
instruction.__init__(self, parrent, address, 'LD')
self.register_c = register
self.ld_address = ld_address
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
result = trySolveImmediateOperand(self, symbol_table, special_symbol_table, self.ld_address)
self.relocation = result[1]
ld_address = checkSizeOfImmediate(self, 24, result[0])
self.special = result[2]
return self.regs | 0x90000000 | (ld_address << 4)
class ST(instruction):
def __init__(self, parrent, address, register, st_address):
instruction.__init__(self, parrent, address, 'ST')
self.register_b = register
self.st_address = st_address
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
result = trySolveImmediateOperand(self, symbol_table, special_symbol_table, self.st_address)
self.relocation = result[1]
st_address = checkSizeOfImmediate(self, 24, result[0])
self.special = result[2]
return self.regs | 0xA0000000 | (st_address & 0xF) | (((st_address & 0xFFFFF0) >> 4) << 8)
class BZ(instruction):
def __init__(self, parrent, address, register, br_address):
instruction.__init__(self, parrent, address, 'BZ')
self.register_f = register
self.br_address = br_address
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
result = trySolveImmediateOperand(self, symbol_table, special_symbol_table, self.br_address)
self.relocation = result[1]
br_address = checkSizeOfImmediate(self, 24, result[0])
self.special = result[2]
return self.regs | 0xB0000000 | ((br_address & 0xF00000) << 4) | (br_address & 0x0FFFFF)
class BNZ(instruction):
def __init__(self, parrent, address, register, br_address):
instruction.__init__(self, parrent, address, 'BNZ')
self.register_f = register
self.br_address = br_address
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
result = trySolveImmediateOperand(self, symbol_table, special_symbol_table, self.br_address)
self.relocation = result[1]
br_address = checkSizeOfImmediate(self, 24, result[0])
self.special = result[2]
return self.regs | 0xC0000000 | ((br_address & 0xF00000) << 4) | (br_address & 0x0FFFFF)
class MVIA(instruction):
def __init__(self, parrent, address, register, operand):
instruction.__init__(self, parrent, address, 'MVIA')
self.register_c = register
self.operand = operand
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
result = trySolveImmediateOperand(self, symbol_table, special_symbol_table, self.operand)
self.relocation = result[1]
operand = checkSizeOfImmediate(self, 24, result[0])
self.special = result[2]
return self.regs | 0xD0000000 | (operand << 4)
class SWI(instruction):
def __init__(self, parrent, address):
instruction.__init__(self, parrent, address, 'SWI')
def translate(self, symbol_table, special_symbol_table):
self.decodeRegs()
return self.regs | 0x14000000
|
python
|
from __future__ import unicode_literals
SEQUENCE = [
'bugzilla_url_charfield',
'repository_raw_file_url',
'repository_visible',
'repository_path_length_255',
'localsite',
'repository_access_control',
'group_site',
'repository_hosting_accounts',
'repository_extra_data_null',
'unique_together_baseline',
'repository_archive',
'repository_hooks_uuid',
'repository_raw_password',
'repository_name_length_255',
]
|
python
|
_version='2022.0.1.dev8'
|
python
|
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torchsummary import summary
# Device configuration
device = torch.device('cuda: 0' if torch.cuda.is_available() else 'cup')
print(device, torch.__version__)
# Hyper parameters
num_epochs = 5
num_classes = 10
batch_size = 100
learning_rate = 0.01
# MINST DATASET
train_dataset = torchvision.datasets.MNIST(root='G:/Other_Datasets/mnist/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='G:/Other_Datasets/mnist/',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Convolutional neural network
def Dropout(X, drop_prob):
X = X.float()
assert 0 <= drop_prob <= 1
keep_prob = 1 - drop_prob
# 这种情况下把全部元素都丢弃
if keep_prob == 0:
return torch.zeros_like(X)
mask = (torch.randn(X.shape) < keep_prob).float().to(X.device)
return mask * X / keep_prob
def batch_norm(is_training, X, gamma, beta, moving_mean, moving_var, eps, momentum):
if not is_training:
# 如果在预测模式下,直接使用传入的移动平均所得的均值和方差
X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)
else:
assert len(X.shape) in (2, 4)
if len(X.shape) == 2:
# 使用全连接层的情况下,计算特征维上均值和方差
mean = X.mean(dim=0)
var = ((X - mean) ** 2).mean(dim=0)
else:
# 使用二维卷积层的情况,计算通道维度上(dim=0)的均值和方差。这里我们需要保持
# X的形状以便后面可以做广播运算
mean = X.mean(dim=0, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True)
var = ((X - mean) ** 2).mean(dim=0, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True)
# 训练模式下,用当前的均值和方差做标准化
X_hat = (X - mean) / torch.sqrt(var + eps)
# 更新移动平均的均值和方差
moving_mean = momentum * moving_mean + (1.0 - momentum) * mean
moving_var = momentum * moving_var + (1.0 - momentum) * var
Y = gamma * X_hat + beta # 拉伸和偏移
return Y, moving_mean, moving_var
class BatchNorm(nn.Module):
def __init__(self, in_channels, num_dims):
super(BatchNorm, self).__init__()
if num_dims == 2:
shape = (1, in_channels)
else:
shape = (1, in_channels, 1, 1)
# 参与求梯度和迭代的拉伸、偏移参数,分别初始化为0和1
self.gamma = nn.Parameter(torch.ones(shape))
self.beta = nn.Parameter(torch.zeros(shape))
# 不参与求梯度和迭代的变量,全在内存上初始化成0
self.moving_mean = torch.zeros(shape)
self.moving_var = torch.zeros(shape)
def forward(self, X):
# 如果X不在内存上,将moving_mean和moving_var复制到X所在显存上
if self.moving_mean.device != X.device:
self.moving_mean = self.moving_mean.to(X.device)
self.moving_var = self.moving_var.to(X.device)
# 保存更新过的moving_mean和moving_var, Module实例的traning属性默认为true, 调用.eval()后设成false
Y, self.moving_mean, self.moving_var = batch_norm(self.training,
X, self.gamma, self.beta, self.moving_mean,
self.moving_var, eps=1e-5, momentum=0.9)
return Y
class ConvNet(nn.Module):
def __init__(self, in_channels, num_classes):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels, 16, kernel_size=5, stride=1, padding=2),
BatchNorm(16, num_dims=4),
# nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
) # 28*28*1 -> 14*14*16
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
BatchNorm(32, num_dims=4),
# nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
) # 14*14*16 -> 7*7*32
self.fc1 = nn.Linear(7 * 7 * 32, 128) # 7*7*32 -> 128
self.fc2 = nn.Linear(128, num_classes) # 128 -> 10
def forward(self, input):
out = self.layer1(input)
out = self.layer2(out)
out = out.reshape(out.size(0), -1) # pytorch folow NCHW convention
out = F.relu(self.fc1(out))
if self.training: # 只在训练模型时使用丢弃法
out = Dropout(out, drop_prob=0.5)
out = self.fc2(out)
return out
model = ConvNet(1, num_classes).to(device)
# print(model)
# summary(model, (1, 28, 28))
# Construct Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
model.train()
total_step = len(train_loader)
for epoch in range(num_epochs):
for batch_idx, (images, labels) in enumerate(train_loader):
images, labels = images.to(device), labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and Optimize
optimizer.zero_grad() # zero the gradient buffers
loss.backward()
optimizer.step() # Does the update
if (batch_idx + 1) % 100 == 0:
print('Epoch [{}/{}], step[{}/{}], loss:{:.4f}'
.format(epoch + 1, num_epochs, batch_idx + 1, total_step, loss.item()))
# Test the model
model.eval() # eval model (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
total = 0
for batch_idx, (images, labels) in enumerate(test_loader):
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
|
python
|
from queue import PriorityQueue
v = 14
graph = [[] for i in range(v)]
def best_first_search(source, target, n):
visited = [0] * n
visited[0] = True
pq = PriorityQueue()
pq.put((0, source))
while pq.empty() == False:
u = pq.get()[1]
# Displaying the path having lowest cost
print(u, end=" ")
if u == target:
break
for v, c in graph[u]:
if visited[v] == False:
visited[v] = True
pq.put((c, v))
print('')
# Function for adding edges to graph
def addedge(x, y, cost):
graph[x].append((y, cost))
graph[y].append((x, cost))
# The nodes shown in above example(by alphabets) are
# implemented using integers addedge(x,y,cost);
addedge(0, 1, 3)
addedge(0, 2, 6)
addedge(0, 3, 5)
addedge(1, 4, 9)
addedge(1, 5, 8)
addedge(2, 6, 12)
addedge(2, 7, 14)
addedge(3, 8, 7)
addedge(8, 9, 5)
addedge(8, 10, 6)
addedge(9, 11, 1)
addedge(9, 12, 10)
addedge(9, 13, 2)
source = 0
target = 9
best_first_search(source, target, v)
|
python
|
#!/usr/bin/python3
from demo_utils.learning import get_model
import time
import json
from demo_utils.general import gamest
from demo_utils.general import get_label
from sklearn.model_selection import GridSearchCV
# Aquí está lo necesario para realizar un expeimento
def cross_validate(model, tunning_params, data_train, target_train):
clf = GridSearchCV(model, tunning_params, cv=10, iid=False)
clf.fit(data_train, target_train)
best_params = clf.best_params_
return best_params
def store_exp(*dics, exp_code, dts_name):
'''
Recibe diccionarios y los guarda en disco en json
Parameters
==========
dics : tuple of dict
Diccionarios con los resultados de los experimentos
exp_code : str
Qué experimento se está realizando. De la forma 2_4
'''
filename = f'experimental_results/{exp_code}/{dts_name}.json'
with open(filename, 'w') as f:
json.dump(dics, f, indent=4, sort_keys=True)
def get_prefixes(box_name):
# Retorna el prefijo adecuado para acceder a parámetros del sampler y
# de modelo, en función de la caja
if box_name == 'none':
sampler_prefix = 'sampler__'
model_prefix = 'model__'
elif box_name in ['grey_bag', 'grey_ens']:
sampler_prefix = 'base_estimator__sampler__'
model_prefix = 'base_estimator__model__'
elif box_name in ['black_bag', 'black_ens']:
sampler_prefix = 'sampler__'
model_prefix = 'model__base_estimator__'
return sampler_prefix, model_prefix
def exp(model_info, tunning_params, data_train, data_test, target_train,
target_test, description='No description'):
'''
Ejecuta el experimento especificado y retorna un diccionario con los
scores, los tiempos, label
Esto NO es un experimento completo, solo es una columna de los experimentos
Es genérico, para cualquier experimento
'''
model_name = model_info['model_name']
if model_name in ['logit', 'linear_svc', 'rbf_svc']:
param_name = 'C'
elif model_name == 'dt':
param_name = 'min_impurity_decrease'
model = get_model(**model_info)
box_name = model_info['box_type']
sampler_name = model_info['sampler_name']
sampler_prefix, model_prefix = get_prefixes(box_name)
new_tunning_params = {}
for k in tunning_params:
new_k = model_prefix + \
k if k in ['C', 'min_impurity_decrease'] else sampler_prefix + k
new_tunning_params[new_k] = tunning_params[k]
if sampler_name != 'identity':
model.set_params(**{f'{sampler_prefix}n_components': 500})
##############################
# Empieza el tiempo de ejecución
##############################
# time0 = time.clock()
time0 = time.perf_counter()
chosen_gamma = gamest(data_train)
if model_name == 'rbf_svc':
model.set_params(**{f'{model_prefix}gamma': chosen_gamma})
elif sampler_name != 'identity':
model.set_params(**{f'{sampler_prefix}gamma': chosen_gamma})
best_params = cross_validate(model=model,
tunning_params=new_tunning_params,
data_train=data_train,
target_train=target_train)
model.set_params(**best_params)
model.fit(data_train, target_train)
# time1 = time.clock()
time1 = time.perf_counter()
##############################
# Fin del tiempo de ejecución
##############################
c_time = time1 - time0
train_score = model.score(data_train, target_train)
test_score = model.score(data_test, target_test)
params_finales = model.get_params()
model_param = {param_name: params_finales.get(
model_prefix + param_name, 'Patata')}
if model_name != 'rbf_svc':
ret_gamma = params_finales.get(f'{sampler_prefix}gamma', None)
else:
ret_gamma = params_finales.get(f'{model_prefix}gamma', None)
label = get_label(model_name=model_name,
sampler_name=sampler_name,
box_name=box_name,
n_estim=model_info['n_estim'])
ret_dic = {
'train_score': train_score,
'test_score': test_score,
'time': c_time,
'model_param': model_param,
# 'gamma': params_finales.get(f'{sampler_prefix}gamma', None),
'gamma': ret_gamma,
'label': label,
'model_name': model_info['model_name'],
'box_name': model_info['box_type'],
'description': description,
}
print(ret_dic)
return ret_dic
def store_exp_general(*dics, filename):
'''
Recibe diccionarios y los guarda en disco en json
Parameters
==========
dics : tuple of dict
Diccionarios con los resultados de los experimentos
exp_code : str
Qué experimento se está realizando. De la forma 2_4
'''
# filename = f'experimental_results/{exp_code}/{dts_name}.json'
with open(filename, 'w') as f:
json.dump(dics, f, indent=4, sort_keys=True)
|
python
|
from application.models.models import BusinessModel, ExerciseModel, SurveyModel, InstrumentModel
def query_exercise_by_id(exercise_id, session):
return session.query(ExerciseModel).filter(ExerciseModel.exercise_id == exercise_id).first()
def query_business_by_ru(ru_ref, session):
return session.query(BusinessModel).filter(BusinessModel.ru_ref == ru_ref).first()
def query_survey_by_id(survey_id, session):
return session.query(SurveyModel).filter(SurveyModel.survey_id == survey_id).first()
def query_instrument_by_id(instrument_id, session):
return session.query(InstrumentModel).filter(InstrumentModel.instrument_id == instrument_id).first()
def query_instrument(session):
return session.query(InstrumentModel)
|
python
|
from views import *
from lookups import *
import requests
import re
from utils import *
import itertools
from config import config
if config.IMPORT_PYSAM_PRIMER3:
import pysam
import csv
#hpo lookup
import random
from flask import Response, request
import os
from werkzeug.datastructures import Headers
import re
@app.route('/bam_viewer/')
def bam_viewer():
return render_template('igv_viewer.html')
@app.route('/read_viz/bam/<sample>')
def read_viz(sample):
BAM_FILES=app.config['BAM_FILES']
print(request.method)
headers=Headers()
#headers.add('Content-Type','application/octet-stream')
headers.add('Content-Transfer-Encoding','binary')
#Date:Wed, 06 Jul 2016 17:19:52 GMT
#ETag:"flask-1446310274.0-12661331-649139018"
#Expires:Thu, 07 Jul 2016 05:19:52 GMT
#Keep-Alive:timeout=5, max=93
#Last-Modified:Sat, 31 Oct 2015 16:51:14 GMT
headers.add('Accept-Ranges', 'bytes')
#Server:Apache/2.4.12 (Red Hat) mod_wsgi/3.4 Python/2.7.8
headers.add('X-Frame-Options','SAMEORIGIN')
if sample=='gencode.v19.sorted.bed':
bamfile=BAM_FILES+'/gencode.v19.sorted.bed'
elif sample=='gencode.v19.sorted.bed.idx':
bamfile=BAM_FILES+'/gencode.v19.sorted.bed.idx'
elif sample.endswith('.bai'):
bamfile=BAM_FILES+'/%s.bam.bai' % sample
else:
bamfile=BAM_FILES+'/%s.bam' % sample
size = os.path.getsize(bamfile)
print(size)
status = 200
begin = 0
end = size-1
if request.headers.has_key("Range") and request.method=='GET':
print(request.headers['Range'])
headers.add('Accept-Ranges','bytes')
ranges = re.findall(r"\d+", request.headers["Range"])
begin = int( ranges[0] )
if len(ranges)>1: end = int( ranges[1] )
headers.add('Content-Range','bytes %s-%s/%s' % (str(begin),str(end),size) )
headers.add('Content-Length',str((end-begin)+1))
with file(bamfile,'rb') as f:
f.seek(begin)
data=f.read(end-begin)
print(len(data))
response = Response( data, status=206, mimetype="application/octet-stream", headers=headers, direct_passthrough=True)
else:
if request.method=='HEAD':
headers.add('Content-Length',size)
response = Response( '', status=200, mimetype="application/octet-stream", headers=headers, direct_passthrough=True)
elif request.method=='GET':
response = Response( file(bamfile), status=200, mimetype="application/octet-stream", headers=headers, direct_passthrough=True)
#Add mimetype
response.cache_control.public = True
response.make_conditional(request)
return response
def read_viz2():
print(sample)
print(region)
from subprocess import call
tmpfile=subprocess.Popen('mktemp', shell=True, stdout=subprocess.PIPE).stdout.read().strip()+'.bam'
print(tmpfile)
print(subprocess.Popen("samtools view -b %s/%s_sorted_unique.bam %s > %s" % (BAM_FILES,sample,region, tmpfile), shell=True, stdout=subprocess.PIPE).stdout.read())
subprocess.Popen('samtools index %s'%tmpfile).stdout.read()
|
python
|
import os
import io
import json
import random
import uuid
from collections import defaultdict, Counter
from annoy import AnnoyIndex
from tqdm import tqdm
from itertools import product
import wcag_contrast_ratio as contrast
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from scipy.stats import mode
from pprint import pprint
import base64
from io import BytesIO
ASSETS_DIR = "assets"
BACKGROUNDS = []
FONTS = defaultdict(list) # Script names are keys
WORDS = defaultdict(list) # Script names are keys
COLOR_INDEX = AnnoyIndex(3, metric="euclidean")
COLOR_COMBINATIONS = {}
GENERATED_IMAGES_DIR = "generated_images"
SAVE_IMAGES_TO_DISK = False
# Just create the directory if it doesn't exist
if not os.path.exists(GENERATED_IMAGES_DIR):
os.mkdir(GENERATED_IMAGES_DIR)
def load_assets():
print("Loading assets")
print("-"*80)
global ASSETS_DIR
# Load all backgrounds
print("Loading backgrounds")
global BACKGROUNDS
BACKGROUNDS_DIR = os.path.join(ASSETS_DIR, "backgrounds")
for bg_filename in tqdm(os.listdir(BACKGROUNDS_DIR)):
filepath = os.path.join(BACKGROUNDS_DIR, bg_filename)
BACKGROUNDS.append(filepath)
print("{} backgrounds loaded\n\n".format(len(BACKGROUNDS)))
# Load all fonts
print("Loading fonts")
global FONTS
FONTS_DIR = os.path.join(ASSETS_DIR, "fonts")
fonts_json_file_path = os.path.join(FONTS_DIR, "google-fonts.json")
with io.open(fonts_json_file_path, encoding="utf-8") as f:
fonts_json = json.load(f)
total_fonts = 0
for font_info in tqdm(fonts_json["info"]):
for script in font_info["subsets"]:
for font in font_info["fonts"]:
font["font_path"] = os.path.join(FONTS_DIR, font_info["files_path"], font["filename"])
font["category"] = font_info["category"]
FONTS[script].append(font)
total_fonts += 1
print("Loaded {} fonts across {} scripts\n\n".format(total_fonts, len(FONTS)))
# Load all words
print("Loading words")
global WORDS
SCRIPTS_DIR = os.path.join(ASSETS_DIR, "scripts")
total_words = 0
for script_filename in tqdm(os.listdir(SCRIPTS_DIR)):
script_filepath = os.path.join(SCRIPTS_DIR, script_filename)
WORDS[script_filename] = {}
for lang_filename in os.listdir(script_filepath):
WORDS[script_filename][lang_filename] = []
lang_filepath = os.path.join(script_filepath, lang_filename)
with io.open(lang_filepath) as f:
for word in f:
word = word.strip()
if len(word) == 0 or len(word) > 30:
continue
WORDS[script_filename][lang_filename].append(word)
total_words += 1
print("Loaded {} words across {} scripts\n\n".format(total_words, len(WORDS)))
# Load all colors and color combinations
print("Loading color and combinations")
global COLOR_COMBINATIONS
PALETTES_DIR = os.path.join(ASSETS_DIR, "palettes")
colors = set()
for palette_filename in os.listdir(PALETTES_DIR):
palette_filepath = os.path.join(PALETTES_DIR, palette_filename)
with io.open(palette_filepath) as f:
for palette in tqdm(f):
# Read pallette as Hex
palette = palette.strip().split(",")
# Get good combinations as RGB pairs
color_combinations = get_good_contrast_combinations(palette)
for combination in color_combinations:
color_1, color_2 = combination
colors.add(color_1)
colors.add(color_2)
color_1, color_2 = rgb_to_hex(color_1), rgb_to_hex(color_2)
COLOR_COMBINATIONS[color_1] = color_2
COLOR_COMBINATIONS[color_2] = color_1
global COLOR_INDEX
for i, color in enumerate(colors):
COLOR_INDEX.add_item(i, color)
COLOR_INDEX.build(10)
print("Loaded {} colors\n\n".format(len(COLOR_COMBINATIONS)))
print("-"*80)
def hex_to_rgb(color):
color = color.lstrip("#")
return tuple(int(color[i:i+2], 16)/255 for i in (0, 2 ,4))
def get_good_contrast_combinations(palette):
# palette.append("#ffffff")
# palette.append("#000000")
palette = set(palette)
colors = [hex_to_rgb(color) for color in palette]
valid_color_combinations = []
for combination in product(colors, colors):
color_1 = combination[0]
color_2 = combination[1]
if contrast.passes_AA(contrast.rgb(color_1, color_2)):
valid_color_combinations.append((color_1, color_2))
return valid_color_combinations
def shuffle_assets():
global BACKGROUNDS
random.Random().shuffle(BACKGROUNDS)
global FONTS
for script in FONTS:
random.Random().shuffle(FONTS[script])
global WORDS
for script in WORDS:
for language in WORDS[script]:
random.Random().shuffle(WORDS[script][language])
def generate_random_payload(filters={}):
payload = {}
# Pick a random background
payload["background_path"] = random.choice(BACKGROUNDS)
# Pick a random script and a random language in it
# Filter scripts if any
available_scripts = filters.get("scripts", ["latin", "devanagari", "arabic", "cyrillic", "korean"])
payload["script"] = random.choice(available_scripts)
available_languages = [x for x in WORDS[payload["script"]].keys()]
# Filter languages if any
# available_languages = filters.get("languages", available_languages)
payload["language"] = random.choice(available_languages)
# Pick a random font with given filters
available_fonts = []
for font in FONTS[payload["script"]]:
# Filter weights if specified
if "weights" in filters and font["weight"] not in filters["weights"]:
continue
# Filter category if specified
if "categories" in filters and font["category"] not in filters["categories"]:
continue
# Filter italicization if required
if "styles" in filters and font["style"] not in filters["styles"]:
continue
available_fonts.append(font)
payload["font"] = random.choice(available_fonts)
# Pick a random word
payload["word"] = random.choice(WORDS[payload["script"]][payload["language"]])
# Add more words followed by new line or space with 50% probability
while(random.choice([0,1]) == 0):
payload["word"] += random.choice([" ", "\n"]) + random.choice(WORDS[payload["script"]][payload["language"]])
return payload
def get_suitable_text_color(image, mask):
# Find the most common color in the image which this text is covering
image_np = np.array(image)
mask_np = np.array(mask)
text_overlay_np = image_np & mask_np
useful_pixel_locations = np.nonzero(text_overlay_np)
pixel_values = image_np[useful_pixel_locations[:-1]]//4*4 # Smoothing
color = mode(pixel_values).mode[0]
color = tuple([x/255 for x in color])
# width, height = image.size
# # Make this smaller to reduce calculations
# smaller_image = image.resize((300, int(height/(width/300))))
# # Reduce total colors to simplify common color calculations
# color_quantized_image = smaller_image.convert("P", palette=Image.ADAPTIVE, colors=256).convert("RGB")
# # Get top 3 most common colors
# color_counter = Counter(color_quantized_image.getdata())
# colors = [x[0] for x in color_counter.most_common(3)]
# # Pick a random color
# color = random.choice(colors)
# color = tuple([x/255 for x in color])
# See which 10 colors are closest to this color from the palettes we have.
global COLOR_INDEX
closest_colors_from_palettes = COLOR_INDEX.get_nns_by_vector(color, 10)
closest_colors = [COLOR_INDEX.get_item_vector(x) for x in closest_colors_from_palettes]
# Get text colors for each of them and choose the one with the highest contrast
text_colors = [COLOR_COMBINATIONS[rgb_to_hex(x)] for x in closest_colors]
text_color = text_colors[np.argmax([contrast.rgb(color,hex_to_rgb(x)) for x in text_colors])]
return text_color
def rgb_to_hex(color):
color = [int(x*255) for x in color]
return "#{0:02x}".format(color[0]) + "{0:02x}".format(color[1]) + "{0:02x}".format(color[2])
def generate_image_from_payload(payload):
background = Image.open(payload["background_path"]).convert('RGB')
width, height = background.size
padding = min(50, width/10)
# Choose a font size. Reduce till text box is likely to fit in the background
divisor = random.choice([5,7,10])
padding = 30
while(True):
font_size = height//divisor
font_object = ImageFont.truetype(payload["font"]["font_path"], font_size)
text_width, text_height = font_object.getsize_multiline(payload["word"])
roi_width = text_width + padding*2
roi_height = text_height + padding*2
if roi_height < height and roi_width < width:
break
divisor += 1
# Get a random (left, top)
random_top = random.randint(0, height - roi_height -1)
random_left = random.randint(0, width - roi_width - 1)
# Crop with random box values
roi = background.crop((random_left, random_top, random_left + roi_width, random_top + roi_height))
# Draw onto black image as a mask
mask = Image.new('RGB', (roi.width, roi.height), color="#000000")
draw_pad = ImageDraw.Draw(mask)
draw_pad.text((padding, padding/4), payload["word"], font=font_object, fill="#FFFFFF")
# Get suitable text color
text_color = get_suitable_text_color(roi, mask)
# Draw onto image
draw_pad = ImageDraw.Draw(roi)
draw_pad.text((padding, padding/4), payload["word"], font=font_object, fill=text_color)
global SAVE_IMAGES_TO_DISK
if SAVE_IMAGES_TO_DISK:
filename = "%s.png" % (uuid.uuid4())
filepath = os.path.join(GENERATED_IMAGES_DIR, filename)
roi.save(filepath)
mask_filepath = "{}-mask.png".format(filepath[:-4])
mask.save(mask_filepath)
else:
filepath = None
mask_filepath = None
roi_buffered = BytesIO()
roi.save(roi_buffered, format="PNG")
mask_buffered = BytesIO()
mask.save(mask_buffered, format="PNG")
return {
"text_color": text_color,
"image": base64.b64encode(roi_buffered.getvalue()),
"mask": base64.b64encode(mask_buffered.getvalue()),
"image_filepath": filepath,
"mask_filepath": mask_filepath
}
def generate_data(filters):
try:
payload = generate_random_payload(filters)
except Exception:
output = {
"message": "Please check your filters"
}
return output
try:
image = generate_image_from_payload(payload)
output = {
"image": image["image"],
"mask": image["mask"],
"text": payload["word"],
"text_color": image["text_color"],
"font_face": payload["font"]["full_name"],
"category": payload["font"]["category"],
"italicization": payload["font"]["style"] == "italic",
"weight": int(payload["font"]["weight"]),
"script": payload["script"],
"language": payload["language"],
"image_filepath": image["image_filepath"],
"mask_filepath": image["mask_filepath"]
}
except Exception:
return generate_data(filters)
return output
if __name__ == "__main__":
load_assets()
shuffle_assets()
SAVE_IMAGES_TO_DISK = True
filters = {}
filters = {
# "scripts": ["korean"],
# "weights": ["800"]
# "style": ["italic"]
}
while(True):
output = generate_data(filters)
if "message" in output:
print(output)
else:
print("Generated: ", output["image_filepath"])
|
python
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import json
from datetime import datetime
from functools import partial
from typing import Any
from aiohttp import web
from sqlalchemy import select
from tglib.clients import APIServiceClient, MySQLClient
from .models import TopologyHistory
routes = web.RouteTableDef()
def custom_serializer(obj: Any) -> str:
if isinstance(obj, datetime):
return datetime.isoformat(obj)
else:
return str(obj)
@routes.get("/topology")
async def handle_get_topology(request: web.Request) -> web.Response:
"""
---
description: Fetch all of a network's topologies between a given UTC datetime range.
tags:
- Topology History
produces:
- application/json
parameters:
- in: query
name: network_name
description: The name of the network
required: true
type: string
- in: query
name: start_dt
description: The start UTC offset-naive datetime of the query in ISO 8601 format
required: true
type: string
- in: query
name: end_dt
description: The end UTC offset-naive datetime of the query in ISO 8601 format. Defaults to current datetime if not provided.
type: string
responses:
"200":
description: Return a list of topologies belonging to the given network in the given datetime range.
"400":
description: Invalid or missing parameters.
"""
network_name = request.rel_url.query.get("network_name")
if network_name is None:
raise web.HTTPBadRequest(text="Missing required 'network_name' param")
if network_name not in APIServiceClient.network_names():
raise web.HTTPBadRequest(text=f"Invalid network name: {network_name}")
start_dt = request.rel_url.query.get("start_dt")
end_dt = request.rel_url.query.get("end_dt")
# Parse start_dt, raise '400' if missing/invalid
if start_dt is None:
raise web.HTTPBadRequest(text="'start_dt' is missing from query string")
try:
start_dt_obj = datetime.fromisoformat(start_dt)
if start_dt_obj.tzinfo:
raise web.HTTPBadRequest(
text="'start_dt' param must be UTC offset-naive datetime"
)
except ValueError:
raise web.HTTPBadRequest(text=f"'start_dt' is invalid ISO 8601: '{start_dt}'")
# Parse end_dt, use current datetime if not provided. Raise '400' if invalid
if end_dt is None:
end_dt_obj = datetime.utcnow()
else:
try:
end_dt_obj = datetime.fromisoformat(end_dt)
if end_dt_obj.tzinfo:
raise web.HTTPBadRequest(
text="'end_dt' param must be UTC offset-naive datetime"
)
except ValueError:
raise web.HTTPBadRequest(text=f"'end_dt' is invalid ISO 8601: '{end_dt}'")
query = select([TopologyHistory.topology, TopologyHistory.last_updated]).where(
(TopologyHistory.network_name == network_name)
& (TopologyHistory.last_updated >= start_dt_obj)
& (TopologyHistory.last_updated <= end_dt_obj)
)
async with MySQLClient().lease() as sa_conn:
cursor = await sa_conn.execute(query)
return web.json_response(
{"topologies": [dict(row) for row in await cursor.fetchall()]},
dumps=partial(json.dumps, default=custom_serializer),
)
|
python
|
#!usr/bin/python
# -*- coding:utf8 -*-
class UserModel(object):
users = {
1: {'name': 'zhang', 'age': 10},
2: {'name': 'wang', 'age': 12},
3: {'name': 'li', 'age': 20},
4: {'name': 'zhao', 'age': 30},
}
@classmethod
def get(cls, user_id):
return cls.users[user_id]
# return cls.users.get(user_id)
@classmethod
def get_all(cls):
return list(cls.users.values())
@classmethod
def create(cls, name, age):
user_dict = {'name': name, 'age': age}
max_id = max(cls.users.keys()) + 1
cls.users[max_id] = user_dict
@classmethod
def update(cls, user_id, age):
cls.users[user_id]['age'] = age
@classmethod
def delete(cls, user_id):
if user_id in cls.users:
return cls.users.pop(user_id)
|
python
|
# Copyright (c) 2019-2021, Jonas Eschle, Jim Pivarski, Eduardo Rodrigues, and Henry Schreiner.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import numpy
import pytest
import vector._backends.numpy_
import vector._backends.object_
import vector._methods
def test_spatial_object():
vec = vector._backends.object_.VectorObject3D(
vector._backends.object_.AzimuthalObjectXY(0.1, 0.2),
vector._backends.object_.LongitudinalObjectZ(0.3),
)
out = vec.rotateY(0.25)
assert isinstance(out.azimuthal, vector._methods.AzimuthalXY)
assert isinstance(out.longitudinal, vector._methods.LongitudinalZ)
assert out.x == pytest.approx(0.17111242994742137)
assert out.y == pytest.approx(0.2)
assert out.z == pytest.approx(0.2659333305877411)
for t in "xyz", "xytheta", "xyeta", "rhophiz", "rhophitheta", "rhophieta":
out = getattr(vec, "to_" + t)().rotateY(0.25)
assert isinstance(out.azimuthal, vector._methods.AzimuthalXY)
assert isinstance(out.longitudinal, vector._methods.LongitudinalZ)
assert out.x == pytest.approx(0.17111242994742137)
assert out.y == pytest.approx(0.2)
assert out.z == pytest.approx(0.2659333305877411)
def test_spatial_numpy():
vec = vector._backends.numpy_.VectorNumpy3D(
[(0.1, 0.2, 0.3)],
dtype=[("x", numpy.float64), ("y", numpy.float64), ("z", numpy.float64)],
)
out = vec.rotateY(0.25)
assert isinstance(out.azimuthal, vector._methods.AzimuthalXY)
assert isinstance(out.longitudinal, vector._methods.LongitudinalZ)
assert out[0].x == pytest.approx(0.17111242994742137)
assert out[0].y == pytest.approx(0.2)
assert out[0].z == pytest.approx(0.2659333305877411)
for t in "xyz", "xytheta", "xyeta", "rhophiz", "rhophitheta", "rhophieta":
out = getattr(vec, "to_" + t)().rotateY(0.25)
assert isinstance(out.azimuthal, vector._methods.AzimuthalXY)
assert isinstance(out.longitudinal, vector._methods.LongitudinalZ)
assert out[0].x == pytest.approx(0.17111242994742137)
assert out[0].y == pytest.approx(0.2)
assert out[0].z == pytest.approx(0.2659333305877411)
def test_lorentz_object():
vec = vector._backends.object_.VectorObject4D(
vector._backends.object_.AzimuthalObjectXY(0.1, 0.2),
vector._backends.object_.LongitudinalObjectZ(0.3),
vector._backends.object_.TemporalObjectT(99),
)
out = vec.rotateY(0.25)
assert isinstance(out.azimuthal, vector._methods.AzimuthalXY)
assert isinstance(out.longitudinal, vector._methods.LongitudinalZ)
assert hasattr(out, "temporal")
assert out.x == pytest.approx(0.17111242994742137)
assert out.y == pytest.approx(0.2)
assert out.z == pytest.approx(0.2659333305877411)
for t in (
"xyzt",
"xythetat",
"xyetat",
"rhophizt",
"rhophithetat",
"rhophietat",
"xyztau",
"xythetatau",
"xyetatau",
"rhophiztau",
"rhophithetatau",
"rhophietatau",
):
out = getattr(vec, "to_" + t)().rotateY(0.25)
assert isinstance(out.azimuthal, vector._methods.AzimuthalXY)
assert isinstance(out.longitudinal, vector._methods.LongitudinalZ)
assert hasattr(out, "temporal")
assert out.x == pytest.approx(0.17111242994742137)
assert out.y == pytest.approx(0.2)
assert out.z == pytest.approx(0.2659333305877411)
def test_lorentz_numpy():
vec = vector._backends.numpy_.VectorNumpy4D(
[(0.1, 0.2, 0.3, 99)],
dtype=[
("x", numpy.float64),
("y", numpy.float64),
("z", numpy.float64),
("t", numpy.float64),
],
)
out = vec.rotateY(0.25)
assert isinstance(out.azimuthal, vector._methods.AzimuthalXY)
assert isinstance(out.longitudinal, vector._methods.LongitudinalZ)
assert out[0].x == pytest.approx(0.17111242994742137)
assert out[0].y == pytest.approx(0.2)
assert out[0].z == pytest.approx(0.2659333305877411)
for t in (
"xyzt",
"xythetat",
"xyetat",
"rhophizt",
"rhophithetat",
"rhophietat",
"xyztau",
"xythetatau",
"xyetatau",
"rhophiztau",
"rhophithetatau",
"rhophietatau",
):
out = getattr(vec, "to_" + t)().rotateY(0.25)
assert isinstance(out.azimuthal, vector._methods.AzimuthalXY)
assert isinstance(out.longitudinal, vector._methods.LongitudinalZ)
assert out[0].x == pytest.approx(0.17111242994742137)
assert out[0].y == pytest.approx(0.2)
assert out[0].z == pytest.approx(0.2659333305877411)
|
python
|
import argparse
from data.dataset import *
from model.network import *
from model.representation import *
from training.train import *
tf.random.set_random_seed(1950)
random.seed(1950)
np.random.seed(1950)
def parse_model(name):
ind_str = name.split("_")[1]
ind = [int(i) for i in ind_str]
return ind
def train(args, blocks, kernels, flops):
if args.gpu:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
else:
session = tf.Session()
layer = get_placeholder(args.model, args.layer)
dataset_train, dataset_val, dataset_test = get_train_test_datasets(args.machine, args.model,
args.train_set, args.val_set, args.test_set,
layer["name"], args.batch_size, session)
inpt, output, training = master_module(layer["shape"], blocks, kernels, flops)
trainer = PoseTrainer(inpt, output, training, session, name=args.name)
session.run(tf.global_variables_initializer())
learning_rate = [args.learning_rate]
for _ in range(args.epochs): learning_rate.append(learning_rate[-1] * args.decay)
# learning_rate = args.learning_rate
val_loss = trainer.train(dataset_train, dataset_val, dataset_test,
epochs=args.epochs,
learning_rate=learning_rate)
open(f"{MODEL_SAVER_PATH}/{args.name}/{args.name}.txt", "w+").write(
json.dumps({
"NAME": args.name,
"VAL_LOSS": trainer.validation_loss,
"FLOPS": trainer.flops,
"FIT": trainer.fitness
}, indent=3)
)
session.close()
tf.reset_default_graph()
return val_loss
def test(args, blocks, kernels, flops):
if args.gpu:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
else:
session = tf.Session()
layer = get_placeholder(args.model, args.layer)
inpt, output, training = master_module(layer["shape"], blocks, kernels, flops, inference=True)
model_dir = f"{MODEL_SAVER_PATH}/{args.name}/{args.name}.ckpt"
trainer = PoseTrainer(inpt, output, training, session, name=args.name, export_dir=model_dir)
trainer.freeze_model(training_node=False)
for test_set in args.test_sets:
dataset_test = get_test_dataset(args.machine, args.model,
test_set, layer["name"],
args.batch_size, session)
res = trainer.test_forward(dataset_test)
print(np.mean(res))
print(test_set)
print(res)
session.close()
tf.reset_default_graph()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pose extractor trainer.')
parser.add_argument('-ma', "--machine", type=str, default="local")
parser.add_argument('-d', "--train_set", type=str, default="300w_train")
parser.add_argument('-v', "--val_set", type=str, default="300w_val")
parser.add_argument('-te', "--test_set", type=str, default="biwi")
parser.add_argument('-t', "--test_sets", nargs='+', default=["aflw", "biwi"])
parser.add_argument('-m', "--model", type=str, default="inception")
parser.add_argument('-la', "--layer", type=int, default=13)
# parser.add_argument('-lr', "--learning_rate", nargs='+', default=[0.0005, 0.0002, 0.00009, 0.00004, 0.00001, 0.00001])
parser.add_argument('-lr', "--learning_rate", type=float, default=0.0005)
parser.add_argument('-de', "--decay", type=float, default=0.8)
parser.add_argument('-b', "--batch_size", type=int, default=32)
parser.add_argument('-e', "--epochs", type=int, default=6)
parser.add_argument('-g', "--gpu", type=bool, default=True)
parser.add_argument("--test", type=bool, default=False)
parser.add_argument('-n', "--name", type=str, default="baseline")
parser.add_argument('-ind', "--individual", type=str, default=None)
args = parser.parse_args()
if args.individual:
state = NeuralSearchState()
args.name = f"test_{args.individual}"
model = state.decode_int(parse_model(args.name))
else:
model = [ConvBlock,
ConvBlockUpscale,
ConvBlock,
ConvBlock,
ConvBlockUpscale,
ConvBlock
], [1, 3, 3, 3, 3, 1], 2
model_args = args, *model
if not args.test:
res = train(*model_args)
test(*model_args)
else:
res = test(*model_args)
print("Result {}".format(res))
|
python
|
from worms import *
from worms.data import poselib
from worms.vis import showme
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from concurrent.futures.process import BrokenProcessPool
from time import perf_counter
import sys
import pyrosetta
def main():
pyrosetta.init('-corrections:beta_nov16 -mute all')
helix0 = Spliceable(
poselib.curved_helix, [
('2:2', 'N'), ('11:11', "C")])
helix = Spliceable(poselib.curved_helix, [(':4', 'N'), ('-4:', "C")])
dimer = Spliceable(poselib.c2, sites=[('1,:1', 'N'), ('1,-1:', 'C'),
('2,:1', 'N'), ('2,-1:', 'C')])
c3het = Spliceable(poselib.c3het, sites=[
('1,2:2', 'N'), ('2,2:2', 'N'), ('3,2:2', 'N')])
segments = [Segment([helix0], '_C'),
Segment([helix0], 'NC'),
Segment([helix0], 'NC'),
Segment([c3het], 'NN'),
Segment([helix], 'CN'),
Segment([dimer], 'CC'),
Segment([helix], 'NC'),
Segment([helix], 'NC'),
Segment([c3het], 'N_'), ]
w = grow(segments, Cyclic(3, from_seg=3), thresh=1)
print(w.scores)
p, sc = w.sympose(0, score=True, fullatom=True)
print('score is', sc)
assert sc < 10
p.dump_pdb('cool_worms_thing.pdb')
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
"""
requests-toolbelt
=================
See http://toolbelt.rtfd.org/ for documentation
:copyright: (c) 2014 by Ian Cordasco and Cory Benfield
:license: Apache v2.0, see LICENSE for more details
"""
from .adapters import SSLAdapter, SourceAddressAdapter
from .auth.guess import GuessAuth
from .multipart import (
MultipartEncoder, MultipartEncoderMonitor, MultipartDecoder,
ImproperBodyPartContentException, NonMultipartContentTypeException
)
from .streaming_iterator import StreamingIterator
from .utils.user_agent import user_agent
__title__ = 'requests-toolbelt'
__authors__ = 'Ian Cordasco, Cory Benfield'
__license__ = 'Apache v2.0'
__copyright__ = 'Copyright 2014 Ian Cordasco, Cory Benfield'
__version__ = '0.7.0'
__version_info__ = tuple(int(i) for i in __version__.split('.'))
__all__ = [
'GuessAuth', 'MultipartEncoder', 'MultipartEncoderMonitor',
'MultipartDecoder', 'SSLAdapter', 'SourceAddressAdapter',
'StreamingIterator', 'user_agent', 'ImproperBodyPartContentException',
'NonMultipartContentTypeException', '__title__', '__authors__',
'__license__', '__copyright__', '__version__', '__version_info__',
]
|
python
|
# -*- coding: utf-8 -*-
__author__ = 'Marcin Usielski, Michal Ernst'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
__email__ = '[email protected], [email protected]'
import abc
import six
from moler.event import Event
from moler.cmd import RegexHelper
@six.add_metaclass(abc.ABCMeta)
class TextualEvent(Event):
_default_newline_chars = ("\n", "\r") # New line chars on device, not system with script!
def __init__(self, connection=None, till_occurs_times=-1, runner=None):
super(TextualEvent, self).__init__(connection=connection, runner=runner, till_occurs_times=till_occurs_times)
self._last_not_full_line = None
self._newline_chars = TextualEvent._default_newline_chars
self._regex_helper = RegexHelper() # Object to regular expression matching
def event_occurred(self, event_data):
self._consume_already_parsed_fragment()
super(TextualEvent, self).event_occurred(event_data)
@abc.abstractmethod
def on_new_line(self, line, is_full_line):
"""
Method to parse output from device.
Write your own implementation to do something useful
:param line: Line to parse, new lines are trimmed
:param is_full_line: True if new line character was removed from line, False otherwise
:return: None
"""
def data_received(self, data):
"""
Called by framework when any data are sent by device
:param data: List of strings sent by device
:return: None
"""
lines = data.splitlines(True)
for current_chunk in lines:
if not self.done():
line, is_full_line = self._update_from_cached_incomplete_line(current_chunk=current_chunk)
self._process_line_from_output(line=line, current_chunk=current_chunk, is_full_line=is_full_line)
def _process_line_from_output(self, current_chunk, line, is_full_line):
"""
Processes line from connection (device) output.
:param current_chunk: Chunk of line sent by connection.
:param line: Line of output (current_chunk plus previous chunks of this line - if any) without newline char(s).
:param is_full_line: True if line had newline char(s). False otherwise.
:return: None.
"""
decoded_line = self._decode_line(line=line)
self.on_new_line(line=decoded_line, is_full_line=is_full_line)
def _update_from_cached_incomplete_line(self, current_chunk):
"""
Concatenates (if necessary) previous chunk(s) of line and current.
:param current_chunk: line from connection (full line or incomplete one).
:return: Concatenated (if necessary) line from connection without newline char(s). Flag: True if line had
newline char(s), False otherwise.
"""
line = current_chunk
if self._last_not_full_line is not None:
line = "{}{}".format(self._last_not_full_line, line)
self._last_not_full_line = None
is_full_line = self.is_new_line(line)
if is_full_line:
line = self._strip_new_lines_chars(line)
else:
self._last_not_full_line = line
return line, is_full_line
def is_new_line(self, line):
"""
Method to check if line has chars of new line at the right side
:param line: String to check
:return: True if any new line char was found, False otherwise
"""
if line.endswith(self._newline_chars):
return True
return False
def _strip_new_lines_chars(self, line):
"""
:param line: line from device
:return: line without new lines chars
"""
for char in self._newline_chars:
line = line.rstrip(char)
return line
def _consume_already_parsed_fragment(self):
"""
Clear already parsed fragment of line to not parse it twice when another fragment appears on device.
:return: Nothing
"""
self._last_not_full_line = None
def _decode_line(self, line):
"""
Decodes line if necessary. Put here code to remove colors from terminal etc.
:param line: line from device to decode.
:return: decoded line.
"""
return line
|
python
|
from __future__ import print_function
import argparse
from dataset import CarvanaDataset
from net import CarvanaFvbNet
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.optim as optim
from torch.autograd import Variable
parser = argparse.ArgumentParser(description='Carvana Front vs Back - PyTorch')
parser.add_argument('--dataroot', type=str,
help='location of dataset')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--which-epoch', type=int, default=1, metavar='N',
help='initialize the model with parameters from epoch N')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
test_ds = CarvanaDataset()
test_ds.initialize(args, phase='test')
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
test_loader = DataLoader(test_ds, batch_size=args.batch_size, drop_last=True, **kwargs)
model = CarvanaFvbNet()
model.load_state_dict(torch.load('./checkpoints/latest_{}.pth'.format(args.which_epoch)))
if args.cuda:
model.cuda()
def test():
model.eval()
print(model)
test_loss = 0
correct = 0
for data, target, dsidx in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
target = target.squeeze(1)
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target, size_average=False).data[0]
# get the index of the max log-probability
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test()
|
python
|
# -*- coding: utf-8 -*-
"""This module implements regularized least square restoration methods
adapted to 3D data.
The two methods it gathers are
* **Cosine Least Square (CLS) algorith**,
* **Post-LS Cosine Least Square (Post_LS_CLS) algorithm**.
"""
import time
import numpy as np
import numpy.linalg as lin
from ..tools import PCA
from ..tools import FISTA
from ..tools import dct
from ..tools import sec2str
def _proxg_cls(X, Lambda):
"""Implementation of the proximal operator of g(X)=Lambda*||X*Psi||_{2,1}
where Psi is the band-by-band DCT transform.
Arguments
---------
X: (m, n, l) numpy array
The data matrix.
Lambda: float
The Lambda parameter.
Returns
-------
(m, n, l) numpy array
Proximal operator of g(X).
float
Percentage of non-zero pgX DCT coefficients.
numpy array
List of (flattened) non-zero pgX DCT coefficients indexes.
"""
# Get shape and DCT transform
m, n, B = X.shape
A = dct.dct2d_bb(X)
# Vector containing the l2 norms of the DCT(X) spectra.
Normed = lin.norm(A, ord=2, axis=2)
# Repeat Normed along spectrum axis.
NormedR = np.repeat(Normed[:, :, np.newaxis], B, axis=2)
# Indices of the spectra that should not be 0 after thresholding.
Gamma = np.flatnonzero(Normed > Lambda)
nnz = np.nonzero(NormedR > Lambda)
# Create output matrix
At = np.zeros(A.shape)
# Thresholding
At[nnz] = (1 - Lambda / NormedR[nnz]) * A[nnz]
# Inverse DCT transformation
pgX = dct.idct2d_bb(At)
return (pgX, Gamma.size / (n * m), Gamma)
def CLS(Y, Lambda, mask=None, PCA_transform=True, PCA_th='auto', init=None,
Nit=None, verbose=True):
r"""Cosine Least Square algorithm
The CLS algorithm denoises or reconstructs a multi-band image possibly
spatially sub-sampled in the case of spatially sparse content in the DCT
basis. It is well adapted to periodic data.
This algorithm solves the folowing optimization problem:
.. math::
\gdef \X {\mathbf{X}}
\gdef \Y {\mathbf{Y}}
\gdef \H {\mathbf{H}}
\gdef \I {\mathcal{I}}
\hat{\X} = \underset{\X\in\mathbb{R}^{m \times n \times B}}{\arg\min}
\frac{1}{2}||\Y_\I - \X_\I||_\mathrm{F}^2 +
\lambda ||\X \Psi||_{2, 1}
where :math:`\mathbf{Y}` are the corrupted data, :math:`\mathbf{D}`
is a spatial finite difference operator and :math:`\mathcal{I}` is
the set of all sampled pixels.
This algorithm can perform a PCA pre-processing operation to estimate:
* the data subspace basis :math:`\mathbf{H}`,
* the subspace dimension :math:`R`.
This is particularly usefull to reduce the data dimension and the
execution time and to impose a data low-rank property.
Caution
-------
It is strongly recomended to perform PCA before running the
algorithm core. This operation is integrated in this function.
In case this pre-processing step has already been done, set the
**PCA_transform** parameter to False to disable the PCA step
included in the SSS function. If PCA_transform is set to False, the
PCA_info parameter is required.
Arguments
---------
Y (m, n, l) numpy array
A 3D multi-band image.
Lambda: float
Regularization parameter.
mask: optional, None, (m, n) numpy array
A sampling mask which is True if the pixel is sampled.
Default is None for full sampling.
PCA_transform: optional, bool
Enables the PCA transformation if True, otherwise, no PCA
transformation is processed.
Default is True.
PCA_th: optional, int, str
The desired data dimension after dimension reduction.
Possible values are 'auto' for automatic choice, 'max' for maximum
value and an int value for user value.
Default is 'auto'.
init: optional, None, (m, n, l) numpy array
The algorithm initialization.
Default is None for random initialization.
Nit: optional, None, int
Number of iteration in case of inpainting. If None, the iterations
will stop as soon as the functional no longer evolve.
Default is None.
verbose: optional, bool
Indicates if information text is desired.
Default is True.
Returns
-------
(m, n, l) numpy array
The reconstructed/denoised multi-band image.
dict
A dictionary containing some extra info
Note
----
Infos in output dictionary:
* :code:`E` : In the case of partial reconstruction, the cost function
evolution over iterations.
* :code:`Gamma` : The array of kept coefficients (order is Fortran-style)
* :code:`nnz_ratio` : the ratio Gamma.size/(m*n)
* :code:`H`: the basis of the chosen signal subspace
"""
# Test and initializations
if (Lambda < 0):
raise ValueError('Lambda parameter is not positive.')
if mask is None:
mask = np.ones(Y.shape[:2])
if init is None:
init = np.random.randn(*Y.shape)
# Welcome message
if verbose:
print("-- CLS Reconstruction algorithm --")
# Dimension reduction
PCA_operator = PCA.PcaHandler(
Y, mask, PCA_transform=PCA_transform, PCA_th=PCA_th, verbose=verbose)
Y_PCA, PCA_th = PCA_operator.Y_PCA, PCA_operator.PCA_th
init = PCA_operator.direct(init)
#
# Center and normalize data
#
Y_m, Y_std = Y_PCA.mean(), Y_PCA.std()
init_m, init_std = init.mean(), init.std()
Y_PCA = (Y_PCA - Y_m)/Y_std
init = (init - init_m)/init_std
#
# Separates denoising vs. inpainting
#
m, n = Y_PCA.shape[:2]
N = mask.sum()
P = m*n
start = time.time()
if (N == P):
#
# Denoising
#
# In this case, the procedure consists in simply applying the g prox
# operator to Y_PCA.
#
X_PCA, nnz_ratio, Gamma = _proxg_cls(Y_PCA, Lambda)
# As the mean of Xwm had been removed before thresholding, the coeff
# 0 may have been removed from I.
# Let's put it back, if removed.
# This code has been removed as the data are centered before
# processing.
# if not np.isin(0,Gamma):
# Gamma = np.insert(Gamma,0,0)
# KeptRatio = Gamma.size/P
localInfo = {}
else:
#
# Inpainting
#
mask3 = np.tile(mask[:, :, np.newaxis], [1, 1, PCA_th])
L = 1
# import ipdb; ipdb.set_trace()
#
# FISTA solver
#
solver = FISTA.FISTA(
#
f=lambda X: 1 / 2 * lin.norm(((Y_PCA - X)*mask3).flatten())**2,
#
df=lambda X: (X - Y_PCA)*mask3,
#
L=L,
#
g=lambda X: Lambda * np.sum(
lin.norm(dct.dct2d_bb(X), 2, axis=2)),
#
pg=lambda X: _proxg_cls(X, Lambda / L)[0],
#
shape=Y_PCA.shape,
init=init,
Nit=Nit,
verbose=verbose)
X_PCA, InfoOut_FISTA = solver.execute()
# Get extra info
_, nnz_ratio, Gamma = _proxg_cls(X_PCA, 1e-10)
# Lambda can be whatever, it does not affect X_PCA.
# The Lambda parameter here is the level above which a coeff
# is no more considered to be zero.
# This level should not be 0 exactly as machine non-zero can
# appear when performing direct, then inverse DCT.
localInfo = {'E': InfoOut_FISTA['E']}
#
# Output managing.
#
X_PCA = (X_PCA * Y_std) + Y_m
Xhat = PCA_operator.inverse(X_PCA)
dt = time.time() - start
commonInfo = {'Gamma': Gamma,
'nnz_ratio': nnz_ratio,
'time': dt}
if PCA_transform:
PCA_info = {
'H': PCA_operator.H,
'PCA_th': PCA_operator.PCA_th,
'Ym': np.squeeze(PCA_operator.Ym[0, 0, :])
}
commonInfo['PCA_info'] = PCA_info
InfoOut = {**localInfo, **commonInfo}
if (verbose):
print("""Final ratio of nonzero coefficients is {}.
{} nonzero coefficients over {}.
Done in {}.
--
""".format(nnz_ratio, Gamma.size, P, sec2str.sec2str(dt)))
return Xhat, InfoOut
def _proxg_refitting(A, Gamma):
"""Sets all pixels not in Gamma to 0.
Arguments
---------
A: (m, n, l) numpy array
3D input image.
Gamma: numpy array
Thresholded pixels indexes.
Returns
-------
(m, n, l) numpy array
Thresholded array.
"""
# Output thresholded data.
At = np.zeros(A.shape)
# Only spectra whose index is in Gamma are copied.
i_arr, j_arr = np.unravel_index(Gamma, A.shape[:2])
At[i_arr, j_arr, :] = A[i_arr, j_arr, :]
return At
def Post_LS_CLS(Y, Lambda, mask=None, PCA_transform=True, PCA_th='auto',
init=None, Nit=None, verbose=True):
"""Post-Lasso CLS algorithm.
This algorithms consists in applying CLS to restore the data and
determine the data support in DCT basis. A post-least square
optimization is performed to reduce the coefficients bias.
Arguments
---------
Y (m, n, l) numpy array
A 3D multi-band image.
Lambda: float
Regularization parameter.
mask: optional, None, (m, n) numpy array
A sampling mask which is True if the pixel is sampled.
Default is None for full sampling.
PCA_transform: optional, bool
Enables the PCA transformation if True, otherwise, no PCA
transformation is processed.
Default is True.
PCA_th: optional, int, str
The desired data dimension after dimension reduction.
Possible values are 'auto' for automatic choice, 'max' for maximum
value and an int value for user value.
Default is 'auto'.
init: optional, None, (m, n, l) numpy array
The algorithm initialization.
Default is None for random initialization.
Nit: optional, None, int
Number of iteration in case of inpainting. If None, the iterations
will stop as soon as the functional no longer evolve.
Default is None.
verbose: optional, bool
Indicates if information text is desired.
Default is True.
Returns
-------
(m, n, l) numpy array
The reconstructed/denoised multi-band image.
tuple
A 2-tuple whose alements are the CLS and reffitting information
dictionaries.
Note
----
Infos in output dictionary:
* :code:`E_CLS` : In the case of partial reconstruction, the cost
function evolution over iterations.
* :code:`E_post_ls` : In the case of partial reconstruction, the
cost function evolution over iterations.
* :code:`Gamma` : The array of kept coefficients
(order is Fortran-style)
* :code:`nnz_ratio` : the ratio Gamma.size/(m*n)
* :code:`H`: the basis of the chosen signal subspace
"""
# Welcome message
if verbose:
print("Post-Lasso CLS Reconstruction algorithm...")
#
# Dimension reduction
#
PCA_operator = PCA.PcaHandler(
Y, mask, PCA_transform=PCA_transform, PCA_th=PCA_th, verbose=verbose)
Y_PCA, PCA_th = PCA_operator.Y_PCA, PCA_operator.PCA_th
init = PCA_operator.direct(init)
#
# Center and normalize data
#
Y_m, Y_std = Y_PCA.mean(), Y_PCA.std()
init_m, init_std = init.mean(), init.std()
Y_PCA = (Y_PCA - Y_m)/Y_std
init = (init - init_m)/init_std
#
# CLS reconstruction
#
Xhat_PCA, InfoOut_CLS = CLS(
Y_PCA, Lambda, mask=mask, PCA_transform=False, PCA_th=PCA_th,
init=init, Nit=Nit, verbose=verbose)
#
# Refitting
#
Gamma = InfoOut_CLS['Gamma']
mask3 = np.tile(mask[:, :, np.newaxis], [1, 1, PCA_th])
# FISTA solver
#
solver = FISTA.FISTA(
#
f=lambda A: 1 / 2 * lin.norm((Y_PCA - dct.idct2d_bb(A))*mask3)**2,
#
df=lambda A: dct.dct2d_bb((dct.idct2d_bb(A) - Y_PCA)*mask3),
#
L=1,
#
g=lambda A: 0,
#
pg=lambda A: _proxg_refitting(A, Gamma),
#
shape=Y_PCA.shape,
init=init,
Nit=Nit,
verbose=verbose)
A_PCA, InfoOut_FISTA = solver.execute()
#
# Output managing.
#
InfoOut_CLS['E_CLS'] = InfoOut_CLS.pop('E')
InfoOut_CLS['E_post_ls'] = InfoOut_FISTA['E']
if PCA_transform:
PCA_info = {
'H': PCA_operator.H,
'PCA_th': PCA_operator.PCA_th,
'Ym': np.squeeze(PCA_operator.Ym[0, 0, :])
}
InfoOut_CLS['PCA_info'] = PCA_info
X_PCA = dct.idct2d_bb(A_PCA)
X_PCA = (X_PCA * Y_std) + Y_m
Xhat = PCA_operator.inverse(X_PCA)
return Xhat, InfoOut_CLS
|
python
|
# !/usr/bin/env python
# coding:utf-8
# Author:XuPengTao
# Date: 2020/4/25
from ssd.config import cfg
from ssd.modeling.detector import build_detection_model
import os
obtain_num_parameters = lambda model: sum([param.nelement() for param in model.parameters()])
def model_size(model):#暂时不能处理最后层量化情况
backbone=model.backbone
# print(backbone)
count_qw=0#算出量化的参数个数
qw_size=0#量化的参数size Byte
for i in range(len(backbone.module_list)):
# print(backbone.module_defs[i])
if backbone.module_defs[i]["type"]=='convolutional':
if ("quantization" in backbone.module_defs[i].keys()) and (backbone.module_defs[i]["quantization"]=='1'):
conv = backbone.module_list[i][0]
W_bits=conv.w_bits
W_B=W_bits/8 #Byte=bit/8
qw_size+=W_B*conv.weight.data.flatten().shape[0]
count_qw+=conv.weight.data.flatten().shape[0]
model_size=(obtain_num_parameters(model)-count_qw)*4+qw_size
model_size=model_size/1024.0/1024.0
return model_size
if __name__=='__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
cfg_path="configs/vgg_bn_ssd300_hand_fpga_cutPredict.yaml"
cfg.merge_from_file(cfg_path)
cfg.MODEL.BACKBONE.PRETRAINED = False
model=build_detection_model(cfg)
print(model)
print(f'model_paras:{obtain_num_parameters(model)/1024.0/1024.0}M')
size=model_size(model=model)
print(f'model_size:{size}MB')
|
python
|
dic = {
1 : 4,
2 : 4.5,
3 : 5,
4 : 2,
5 : 1.5,
}
custos = 0
a = [int(x) for x in input().split()]
custos += dic[a[0]]
custos *= a[1]
print("Total: R$ {:0.2f}".format(custos))
|
python
|
import discord
from discord.ext import commands
import os
import aiohttp
import json
from random import choice
class Pictures(commands.Cog):
""" Category for getting random pictures from the internet. """
def __init__(self, client: commands.Bot) -> None:
""" Class init method. """
self.client = client
self.session = aiohttp.ClientSession()
@commands.Cog.listener()
async def on_ready(self) -> None:
""" Tells when the cog is ready to go. """
print("Pictures cog is online!")
@commands.command(aliases=['tak'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def cow(self, ctx) -> None:
""" Gets a random Cow image. """
author: discord.Member = ctx.author
cow_token: str = os.getenv('COW_API_TOKEN')
req: str = f'https://api.unsplash.com/search/photos?client_id={cow_token}&?&query=cow&?format=json'
async with self.session.get(req) as response:
if response.status != 200:
return await ctx.send(f"**Something went wrong with your request, {author.mention}!**")
data = json.loads(await response.read())
pics = data['results']
embed: discord.Embed = discord.Embed(
title="__Cow__",
description=f"Showing 1 random Cow picture out of {len(pics)} results.",
color=author.color,
timestamp=ctx.message.created_at
)
embed.set_image(url=choice(pics)['urls']['full'])
embed.set_footer(text=f"Requested by {author}", icon_url=author.display_avatar)
await ctx.send(embed=embed)
@commands.command(aliases=['httpcat', 'hc', 'http'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def http_cat(self, ctx, code: int = None) -> None:
""" Gets an HTTP cat image.
:param code: The HTTP code to search the image. """
if not code:
return await ctx.send("**Please, inform an HTTP code!**")
code_list = [
100, 101, 102, 200, 201, 202, 204, 206, 207, 300, 301, 302, 303, 304, 305, 307,
400, 401, 402, 403, 404, 405, 406, 408, 409, 410, 411, 412, 413, 414, 415, 416,
417, 418, 420, 421, 422, 423, 424, 425, 426, 429, 431, 444, 450, 451, 499, 500,
501, 502, 503, 504, 506, 507, 508, 509, 510, 511, 599]
if not code in code_list:
return await ctx.send(
content="**Invalid code, please type one of these!**",
embed=discord.Embed(description=f"```py\n{', '.join(map(lambda e: str(e), code_list))}```"))
req = f'https://http.cat/{code}'
try:
embed = discord.Embed(
title="__HTTP Cat__",
url=req)
embed.set_image(url=req)
await ctx.send(embed=embed)
except Exception as e:
print(e)
return await ctx.send("**Something went wrong with it!**")
def setup(client: commands.Bot) -> None:
""" Cog's setup function. """
client.add_cog(Pictures(client))
|
python
|
import emoji
import tempfile
from time import sleep
from functools import wraps
import random
import string
from typing import Callable
import telegram
import shutil
from django.conf import settings
from telegram.error import RetryAfter, NetworkError
from telegram import Bot
from app.conference.models import Slide
from telegram.message import Message
def get_bot() -> telegram.Bot:
return telegram.Bot(token=settings.TELEGRAM_TOKEN)
def generate_name(length=15):
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(length))
def copy_file(name, src, dst):
parts = src.split(".")
folder_dst = f"{dst}/{name}.{parts[1]}"
shutil.copy(src, folder_dst)
return folder_dst
class FileBigException(BaseException):
def __init__(self, message) -> None:
self.message = message
class TelegramTooManyRetriesError(Exception):
message = "Too many retries calling Telegram API."
MAX_TRIES = 3
WAIT_TIME = 15
def retry_after(func: Callable) -> Callable:
@wraps(func)
def wrapper(*args: tuple, **kwargs: dict):
tries = 0
wait_time = WAIT_TIME
while tries < MAX_TRIES:
try:
return func(*args, **kwargs)
except (RetryAfter, NetworkError):
tries += 1
wait_time += WAIT_TIME
sleep(wait_time)
continue
raise
raise TelegramTooManyRetriesError
return wrapper
def check_file_size(max_size, file_size):
if max_size and file_size:
in_m = max_size / 1048576
if file_size > max_size:
raise FileBigException(message={"max_size": in_m})
def process_image(slide: Slide, data, type):
bot = get_bot()
if type == "IMAGE":
return slide.save_image_data(data)
file = download_file(bot, file_id=data["file_id"], path_file=tempfile.gettempdir(), ext="jpg")
message: Message = send_photo(bot=bot, chat_id=settings.GROUP_UPLOAD_FILES, file_path=file)
return slide.save_image_data(message.photo.pop().to_dict())
def process_audio(slide: Slide, data, type):
bot = get_bot()
if type == "VOICE":
return slide.save_voice_data(data)
file = download_file(bot, file_id=data["file_id"], path_file=tempfile.gettempdir(), ext="mp3")
message: Message = send_voice(bot=bot, chat_id=settings.GROUP_UPLOAD_FILES, file_path=file)
return slide.save_voice_data(message.voice.to_dict())
def download_file(bot, file_id, path_file, file_name=None, file_size=None, max_size=None, ext=None, **kwargs):
check_file_size(max_size=max_size, file_size=file_size)
if file_name:
ext = file_name.split(".")[1]
name = generate_name()
file_name = f"{path_file}/{name}.{ext}"
file = bot.getFile(file_id)
file.download(file_name)
return file_name
@retry_after
def send_photo(bot, chat_id, file_path):
file = open(file_path, "rb")
return bot.send_photo(chat_id=chat_id, photo=file)
@retry_after
def send_photo_by_id(bot: Bot, chat_id, file_id):
return bot.send_photo(chat_id=chat_id, photo=file_id)
@retry_after
def send_document(bot, chat_id, file_path):
file = open(file_path, "rb")
return bot.send_document(chat_id=chat_id, document=file)
@retry_after
def send_voice(bot, chat_id, file_path):
file = open(file_path, "rb")
return bot.send_voice(chat_id=chat_id, voice=file)
@retry_after
def send_voice_by_id(bot: Bot, chat_id, file_id):
return bot.send_voice(chat_id=chat_id, voice=file_id)
def send_message(bot, chat_id, text, **kwargs):
text = emoji.emojize(text, use_aliases=True)
bot.send_message(chat_id=chat_id, text=text, **kwargs)
|
python
|
def bubble_sort(arr):
for i in range(len(arr)):
swap = False
for j in range(len(arr)-1-i):
if arr[j]>arr[j+1]:
arr[j],arr[j+1]=arr[j+1],arr[j]
swap=True
if swap==False:
break
return arr
array=[8,5,2,4,3,2]
print(bubble_sort(array))
|
python
|
from rest_framework import serializers
from .UbicacionSerializer import UbicacionSerializer
from .HorarioSerializer import HorarioSerializer
from sucursal_crud_api.models import Sucursal, Ubicacion, Horario
class SucursalSerializer(serializers.ModelSerializer):
ubicacion = UbicacionSerializer()
disponibilidad = HorarioSerializer()
class Meta:
model = Sucursal
fields = ['id','nombre','direccion','ubicacion','disponibilidad']
def create(self, validated_data):
ubicacion_data = validated_data.pop('ubicacion')
ubicacion = Ubicacion.objects.create(**ubicacion_data)
disponibilidad_data = validated_data.pop('disponibilidad')
disponibilidad = Horario.objects.create(**disponibilidad_data)
sucursal = Sucursal.objects.create(**validated_data, ubicacion = ubicacion, disponibilidad = disponibilidad)
return sucursal
def update(self, instance, validated_data):
ubicacion_data = validated_data.pop('ubicacion')
ubicacion = instance.ubicacion
ubicacion.latitud = ubicacion_data.get('latitud', ubicacion.latitud)
ubicacion.longitud = ubicacion_data.get('longitud', ubicacion.longitud)
disponibilidad_data = validated_data.pop('disponibilidad')
disponibilidad = instance.disponibilidad
disponibilidad.dia = disponibilidad_data.get('dia',disponibilidad.dia)
disponibilidad.apertura = disponibilidad_data.get('apertura',disponibilidad.apertura)
disponibilidad.cierre = disponibilidad_data.get('cierre',disponibilidad.cierre)
instance.direccion = validated_data.get('direccion', instance.direccion)
instance.nombre = validated_data.get('nombre', instance.nombre)
instance.save()
return instance
|
python
|
# -*- coding: utf-8 -*-
from app.HuobiAPI import HuobiAPI
from app.authorization import api_key,api_secret
from data.runBetData import RunBetData
from app.dingding import Message
from data.calcIndex import CalcIndex
import time
binan = HuobiAPI(api_key,api_secret)
runbet = RunBetData()
msg = Message()
index = CalcIndex()
class Run_Main():
def __init__(self):
self.coinList = runbet.get_coinList()
pass
def pre_data(self,cointype):
'''获取交易对的data.json基础信息
cointype:交易对
'''
grid_buy_price = runbet.get_buy_price(cointype) # 当前网格买入价格
grid_sell_price = runbet.get_sell_price(cointype) # 当前网格卖出价格
quantity = runbet.get_quantity(cointype) # 买入量
step = runbet.get_step(cointype) # 当前步数
cur_market_price = binan.get_ticker_price(cointype.lower()) # 当前交易对市价
right_size = len(str(cur_market_price).split(".")[1])
return [grid_buy_price,grid_sell_price,quantity,step,cur_market_price,right_size]
def loop_run(self):
print("当前拥有以下账号:\r\n")
msg.show_accounts()
time.sleep(3)
print("模型运行开始")
while True:
for coinType in self.coinList:
[grid_buy_price,grid_sell_price,quantity,step,cur_market_price,right_size] = self.pre_data(coinType)
# aa = index.calcAngle(coinType, "5min" ,False ,right_size)
# print([coinType ,grid_buy_price,grid_sell_price,quantity,step,cur_market_price,right_size,aa])
if grid_buy_price >= cur_market_price : #and index.calcAngle(coinType,"5min",False,right_size): # 是否满足买入价
buy_usd = round( cur_market_price * quantity,2)
if(buy_usd<5.0):buy_usd=5.0
res = msg.buy_market_msg(coinType, buy_usd)
if type(res)==int and res>10000*10000*1000: # 挂单成功
success_price = cur_market_price
#根据当前的atr来处理止盈止损的比率
# runbet.set_ratio(coinType)
runbet.set_record_price(coinType,success_price)
runbet.modify_price(coinType,success_price, step+1,cur_market_price) #修改data.json中价格、当前步数
time.sleep(60*2) # 挂单后,停止运行1分钟
else:
time.sleep(60*2) # 挂单后,停止运行1分钟
break
elif grid_sell_price < cur_market_price :#and index.calcAngle(coinType,"5min",True,right_size): # 是否满足卖出价
if step==0: # setp=0 防止踏空,跟随价格上涨
runbet.modify_price(coinType,grid_sell_price,step,cur_market_price)
else:
last_price = runbet.get_record_price(coinType)
sell_amount = runbet.get_quantity(coinType,False)
porfit_usdt = (cur_market_price - last_price) * sell_amount
res = msg.sell_market_msg(coinType, runbet.get_quantity(coinType,False),porfit_usdt)
if type(res)==int and res>10000*10000*1000: # 挂单成功
# runbet.set_ratio(coinType) #启动动态改变比率
runbet.modify_price(coinType,runbet.get_record_price(coinType), step - 1,cur_market_price)
runbet.remove_record_price(coinType)
time.sleep(60*1) # 挂单后,停止运行1分钟
else:
time.sleep(60*1) # 挂单后,停止运行1分钟
break
else:
print("币种:{coin}当前市价:{market_price}。{buy_price} {sell_price} 未能满足交易,继续运行".format(market_price = cur_market_price,coin=coinType,buy_price = grid_buy_price, sell_price= grid_sell_price))
time.sleep(1)
if __name__ == "__main__":
instance = Run_Main()
try:
instance.loop_run()
except Exception as e:
print(str(e))
error_info = "报警:做多网格,服务停止"
msg.dingding_warn(error_info)
# 调试看报错运行下面,正式运行用上面
# if __name__ == "__main__":
# instance = Run_Main()
|
python
|
# Copyright 2019-present NAVER Corp.
# CC BY-NC-SA 3.0
# Available only for non-commercial use
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
from nets.sampler import FullSampler
class CosimLoss (nn.Module):
""" Try to make the repeatability repeatable from one image to the other.
"""
def __init__(self, N=16):
nn.Module.__init__(self)
self.name = f'cosim{N}'
self.patches = nn.Unfold(N, padding=0, stride=N//2)
def extract_patches(self, sal):
patches = self.patches(sal).transpose(1,2) # flatten
patches = F.normalize(patches, p=2, dim=2) # norm
return patches
def forward(self, repeatability, aflow, **kw):
B,two,H,W = aflow.shape
assert two == 2
# normalize
sali1, sali2 = repeatability
grid = FullSampler._aflow_to_grid(aflow)
sali2 = F.grid_sample(sali2, grid, mode='bilinear', padding_mode='border')
patches1 = self.extract_patches(sali1)
patches2 = self.extract_patches(sali2)
cosim = (patches1 * patches2).sum(dim=2)
return 1 - cosim.mean()
class PeakyLoss (nn.Module):
""" Try to make the repeatability locally peaky.
Mechanism: we maximize, for each pixel, the difference between the local mean
and the local max.
"""
def __init__(self, N=16):
nn.Module.__init__(self)
self.name = f'peaky{N}'
assert N % 2 == 0, 'N must be pair'
self.preproc = nn.AvgPool2d(3, stride=1, padding=1)
self.maxpool = nn.MaxPool2d(N+1, stride=1, padding=N//2)
self.avgpool = nn.AvgPool2d(N+1, stride=1, padding=N//2)
def forward_one(self, sali):
sali = self.preproc(sali) # remove super high frequency
return 1 - (self.maxpool(sali) - self.avgpool(sali)).mean()
def forward(self, repeatability, **kw):
sali1, sali2 = repeatability
return (self.forward_one(sali1) + self.forward_one(sali2)) /2
|
python
|
import subprocess
from os.path import join
prefix = "../split/results/"
targets = [
"mergepad_0701_2018/",
"mergepad_0701_2019/",
"mergepad_0701_2020/",
"mergepad_0701_2021/",
"mergepad_0701_2022/",
"mergepad_0701_2023/",
"mergepad_0701_2024/",
"mergepad_0701_2025/",
"mergepad_0701_2026/",
"mergepad_0701_2027/",
"mergepad_0701_2028/",
"mergepad_0701_2029/",
"mergepad_0701_2030/",
"mergepad_0701_2031/",
"mergepad_0701_2032/",
]
# print("Making datasets...")
# for target in targets:
# extract_cmd = "python3 makedata.py -mode test " + target
# subprocess.call(extract_cmd, shell =True)
for target in targets:
target = join(prefix, target)
print("Evaluating {}".format(target))
fname = target.split('/')[-2]
ex_cmd = "python3 makedata.py "+ target + " -mode test"
# head_cmd = "python3 evaluate.py -m ./models/ranpad2_0610_2057_norm.h5 -p ./results/"+ fname + "_head.npy"
head_cmd = "python3 evaluate.py -m ./models/attacktrain.h5 -p ./results/"+ fname + "_head.npy"
other_cmd = "python3 evaluate.py -m ./models/attacktrain.h5 -p ./results/"+ fname + "_other.npy"
subprocess.call(ex_cmd, shell =True)
subprocess.call(head_cmd, shell =True)
subprocess.call(other_cmd, shell =True)
print("\n")
|
python
|
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.8'
_lr_method = 'LALR'
_lr_signature = '1C5696F6C19A1A5B79951B30D8139054'
_lr_action_items = {'OP_ADD':([11,],[18,]),'OP_SUB':([11,],[19,]),'LPAREN':([0,1,4,6,7,9,10,12,13,14,15,16,18,19,20,21,22,23,24,26,27,28,29,30,31,32,33,35,36,37,38,39,40,41,42,43,44,45,46,47,48,51,52,53,54,57,58,],[5,-5,-4,5,11,-2,-3,11,-9,-26,-28,-27,11,11,34,34,11,11,11,-7,-8,-6,11,-30,11,34,-13,-15,-17,34,-11,11,11,-21,11,11,-22,-29,-23,-12,-14,-10,-25,-20,-24,-19,-16,]),'$end':([1,3,4,6,9,10,26,28,],[-5,0,-4,-1,-2,-3,-7,-6,]),'RBRACE':([12,13,14,15,16,27,33,38,41,44,46,47,51,52,53,54,57,],[26,-9,-26,-28,-27,-8,-13,-11,-21,-22,-23,-12,-10,-25,-20,-24,-19,]),'IMPORT':([5,],[8,]),'INPUT':([11,],[21,]),'OP_DIV':([11,],[22,]),'LBRACE':([0,1,4,6,9,10,26,28,],[2,-5,-4,2,-2,-3,-7,-6,]),'RPAREN':([14,15,16,17,20,21,23,29,30,31,32,33,35,36,37,38,39,40,41,42,44,45,46,47,48,51,52,53,54,55,56,57,58,],[-26,-28,-27,28,33,38,41,44,-30,46,47,-13,-15,-17,51,-11,52,53,-21,54,-22,-29,-23,-12,-14,-10,-25,-20,-24,57,58,-19,-16,]),'STRING':([7,12,13,14,15,16,18,19,22,23,24,27,29,30,31,33,38,39,40,41,42,43,44,45,46,47,51,52,53,54,57,],[15,15,-9,-26,-28,-27,15,15,15,15,15,-8,15,-30,15,-13,-11,15,15,-21,15,15,-22,-29,-23,-12,-10,-25,-20,-24,-19,]),'ID':([2,7,8,11,12,13,14,15,16,18,19,20,21,22,23,24,25,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,57,58,],[7,14,17,23,14,-9,-26,-28,-27,14,14,36,36,14,14,14,43,-8,14,-30,14,36,-13,49,-15,-17,36,-11,14,14,-21,14,14,-22,-29,-23,-12,-14,-18,56,-10,-25,-20,-24,-19,-16,]),'OP_MUL':([11,],[24,]),'ASSIGN':([11,],[25,]),'OUTPUT':([11,],[20,]),'NUMBER':([7,12,13,14,15,16,18,19,22,23,24,27,29,30,31,33,38,39,40,41,42,43,44,45,46,47,51,52,53,54,57,],[16,16,-9,-26,-28,-27,16,16,16,16,16,-8,16,-30,16,-13,-11,16,16,-21,16,16,-22,-29,-23,-12,-10,-25,-20,-24,-19,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'component':([0,6,],[1,9,]),'parameter_list':([18,19,22,23,24,],[29,31,39,40,42,]),'program':([0,],[3,]),'import_statement':([0,6,],[4,10,]),'expression':([7,12,18,19,22,23,24,29,31,39,40,42,43,],[13,27,30,30,30,30,30,45,45,45,45,45,55,]),'declaration':([20,21,32,37,],[35,35,48,48,]),'type':([34,],[50,]),'statement_list':([0,],[6,]),'declaration_list':([20,21,],[32,37,]),'expression_list':([7,],[12,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> program","S'",1,None,None,None),
('program -> statement_list','program',1,'p_program','parser.py',8),
('statement_list -> statement_list component','statement_list',2,'p_statement_list','parser.py',13),
('statement_list -> statement_list import_statement','statement_list',2,'p_statement_list','parser.py',14),
('statement_list -> import_statement','statement_list',1,'p_statement_list','parser.py',15),
('statement_list -> component','statement_list',1,'p_statement_list','parser.py',16),
('import_statement -> LPAREN IMPORT ID RPAREN','import_statement',4,'p_import_statement','parser.py',24),
('component -> LBRACE ID expression_list RBRACE','component',4,'p_component','parser.py',28),
('expression_list -> expression_list expression','expression_list',2,'p_expression_list','parser.py',32),
('expression_list -> expression','expression_list',1,'p_expression_list','parser.py',33),
('expression -> LPAREN INPUT declaration_list RPAREN','expression',4,'p_input','parser.py',41),
('expression -> LPAREN INPUT RPAREN','expression',3,'p_input','parser.py',42),
('expression -> LPAREN OUTPUT declaration_list RPAREN','expression',4,'p_output','parser.py',49),
('expression -> LPAREN OUTPUT RPAREN','expression',3,'p_output','parser.py',50),
('declaration_list -> declaration_list declaration','declaration_list',2,'p_declaration_list','parser.py',57),
('declaration_list -> declaration','declaration_list',1,'p_declaration_list','parser.py',58),
('declaration -> LPAREN type ID RPAREN','declaration',4,'p_declaration','parser.py',66),
('declaration -> ID','declaration',1,'p_declaration','parser.py',67),
('type -> ID','type',1,'p_type','parser.py',74),
('expression -> LPAREN ASSIGN ID expression RPAREN','expression',5,'p_assign','parser.py',78),
('expression -> LPAREN ID parameter_list RPAREN','expression',4,'p_funcexpr','parser.py',82),
('expression -> LPAREN ID RPAREN','expression',3,'p_funcexpr','parser.py',83),
('expression -> LPAREN OP_ADD parameter_list RPAREN','expression',4,'p_op_add_expression','parser.py',90),
('expression -> LPAREN OP_SUB parameter_list RPAREN','expression',4,'p_op_sub_expression','parser.py',93),
('expression -> LPAREN OP_MUL parameter_list RPAREN','expression',4,'p_op_mul_expression','parser.py',96),
('expression -> LPAREN OP_DIV parameter_list RPAREN','expression',4,'p_op_div_expression','parser.py',99),
('expression -> ID','expression',1,'p_exprid','parser.py',103),
('expression -> NUMBER','expression',1,'p_literal','parser.py',107),
('expression -> STRING','expression',1,'p_literal','parser.py',108),
('parameter_list -> parameter_list expression','parameter_list',2,'p_parameter_list','parser.py',112),
('parameter_list -> expression','parameter_list',1,'p_parameter_list','parser.py',113),
]
|
python
|
# SCH1001.sh --> JB_PARTNER_DETAILS.py
#**************************************************************************************************************
#
# Created by : Vinay Kumbakonam
# Modified by : bibin
# Version : 1.1
#
# Description :
#
# 1. Reads the 'Partner Summary' worksheet from partner details xlsx.
# 2. Writes into Oracle table 'PARTNER_DETAILS'.
#
# Initial Creation:
#
# Date (YYYY-MM-DD) Change Description
# ----------------- ------------------
# 2018-09-28 Initial creation
# 2018-10-30 Getting DB schema, db_prop_key_load from Config file, log DIR
#
#**************************************************************************************************************
# Importing required Lib
from dependencies.spark import start_spark
from dependencies.EbiReadWrite import EbiReadWrite
import logging
import sys
from time import gmtime, strftime
import cx_Oracle
import py4j
# Spark logging
logger = logging.getLogger(__name__)
# Date Formats
start_date = "'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'"
log_date = strftime("%Y%m%d", gmtime())
# Job Naming Details
script_name = "SCH1001.sh"
app_name = 'JB_PARTNER_DETAILS'
log_filename = app_name + '_' + log_date + '.log'
# Worksheet Read Details
read_file_name = '/home/spark/v_kumbakonam/Partner_Details.xlsx'
sheet_name = 'Partner Summary'
use_header = 'true'
infer_schema = 'true'
src_count = '0'
# Target Table Details
write_table = 'PARTNER_DETAILS'
# Oracle write Details
save_mode = 'append'
dest_count = '0'
# Main method
def main():
try:
# start Spark application and get Spark session, logger and config
spark, config = start_spark(
app_name=app_name)
db_prop_key_load = config['DB_PROP_KEY_LOAD']
db_schema = config['DB_SCHEMA']
log_file = config['LOG_DIR_NAME'] + "/" + log_filename
target_table_name = db_schema+"."+write_table
# Create class Object
Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger)
# Calling Job Class method --> extract_data_worksheet()
dataFrame = Ebi_read_write_obj.extract_data_worksheet(read_file_name,sheet_name,use_header,infer_schema)
# Calling Lookup table
lkp_table = Ebi_read_write_obj.extract_data_oracle(target_table_name,db_prop_key_load)
#Creating Temp Table
dataFrame.createOrReplaceTempView("partner_details_temp")
lkp_table.createOrReplaceTempView("partner_details_lkp")
# Joining Source file data to Lookup table
lkp_query = spark.sql("select b.PARTNER_ID as PARTNER_ID_LOOKUP\
, upper(a.`Partner Name`)PARTNER_NAME,a.`Partner Id` as PARTNER_ID\
, a.`Cmat Company Id` as CMAT_ID, a.ADDRESS_LINE1, a.ADDRESS_LINE2\
, a.ADDRESS_LINE3, a.CITY, a.STATE, a.ZIP, a.COUNTRY, a.Level as PARTNER_LEVEL\
, a.`Home GEO` as HOME_GEO, a.`CMAT NAGP name` as CMAT_NAGP_NAME\
, a.`Partner Type` as PARTNER_TYPE, a.`Primary Business Right` as PRIMARY_BUSINESS_MODEL\
, a.`Business Right Status` as BUSINESS_MODEL_STATUS \
,a.`Primary CDM` as PRIMARY_CDM, a.`Primary Purchasing Channel` as PRIMARY_PURCHASING_CHANNEL \
from partner_details_temp a \
left outer join partner_details_lkp b on (a.`Partner Id`=b.PARTNER_ID)")
lkp_query.createOrReplaceTempView("lkp_query_table")
partner_details = spark.sql("select PARTNER_NAME, PARTNER_ID, CMAT_ID\
, ADDRESS_LINE1, ADDRESS_LINE2\
, ADDRESS_LINE3, CITY, STATE\
, ZIP, COUNTRY, PARTNER_LEVEL\
, HOME_GEO, CMAT_NAGP_NAME\
, PARTNER_TYPE, PRIMARY_BUSINESS_MODEL\
, BUSINESS_MODEL_STATUS,PRIMARY_CDM\
, PRIMARY_PURCHASING_CHANNEL,cast("+start_date+" as date)ENTRY_DATE\
from lkp_query_table \
where PARTNER_ID_LOOKUP IS NULL")
partner_details.show(5)
# Checking file_record count
src_count=str(partner_details.count())
print(" \n Source Count : "+src_count+"\n")
# Calling Job Class method --> load_data_oracle()
Ebi_read_write_obj.load_data_oracle(partner_details,target_table_name,save_mode,db_prop_key_load)
# getTargetDataCount
dest_count = str(Ebi_read_write_obj.get_target_data_count(target_table_name,db_prop_key_load))
print("\n Target Table Count(After write) : " +dest_count+"\n")
end_date="'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'"
# Log Format
data_format = "JOB START DT : "+start_date+" | SCRIPT NAME : "+script_name+" | JOB : "+app_name+" | SRC COUNT : "+src_count+" | TGT COUNT : "+dest_count+" | JOB END DT : "+end_date+" | STATUS : %(message)s"
Ebi_read_write_obj.create_log(data_format,log_file,logger)
logger.info("Success")
print(" \n Job "+app_name+" Succeed \n")
except Exception as err:
# Write expeption in spark log or console
data_format = "JOB START DT : "+start_date+" | SCRIPT NAME : "+script_name+" | JOB : "+app_name+" | SRC COUNT : "+src_count+" | TGT COUNT : "+dest_count+" | JOB END DT : "+end_date+" | STATUS : %(message)s"
Ebi_read_write_obj.create_log(data_format,log_file,logger)
logger.info("[Error] Failed")
print(" \n Job "+app_name+" Failed\n")
logger.error("\n __main__ SCH_job --> Exception-Traceback :: " + str(err))
raise
# Entry point for script
if __name__ == "__main__":
# Calling main() method
main()
|
python
|
#!/usr/bin/env python2.7
from numpy import *
from pylab import *
from matplotlib import rc, rcParams
trie = genfromtxt('../data/trie_search_found.output')
tst = genfromtxt('../data/tst_search_found.output')
radix = genfromtxt('../data/radix_search_found.output')
_map = genfromtxt('../data/map_search_found.output')
umap = genfromtxt('../data/umap_search_found.output')
######## TIME ########
plot(trie[:,0], trie[:,1], '-o', label='Trie')
hold(True)
plot(tst[:,0], tst[:,1], '-o', label='Ternary Search Tree')
plot(radix[:,0], radix[:,1], '-o', label='Radix Tree')
plot(_map[:,0], _map[:,1], '-o', label='STL ordered Map')
plot(umap[:,0], umap[:,1], '-o', label='STL unordered Map')
xlabel('Max length of the string')
ylabel('Time(ms)')
title('Search test (found)')
legend(loc='best')
grid(True)
savefig('../images/search_found/random/search_found_time_ALL.eps')
hold(False)
plot(tst[:,0], tst[:,1], '-o', label='Ternary Search Tree')
hold(True)
plot(radix[:,0], radix[:,1], '-o', label='Radix Tree')
plot(_map[:,0], _map[:,1], '-o', label='STL ordered Map')
plot(umap[:,0], umap[:,1], '-o', label='STL unordered Map')
xlabel('Max length of the string')
ylabel('Time(ms)')
title('Search test (found)')
legend(loc='best')
grid(True)
savefig('../images/search_found/random/search_found_time_TRMU.eps')
hold(False)
plot(radix[:,0], radix[:,1], '-o', label='Radix Tree')
hold(True)
plot(_map[:,0], _map[:,1], '-o', label='STL ordered Map')
plot(umap[:,0], umap[:,1], '-o', label='STL unordered Map')
xlabel('Max length of the string')
ylabel('Time(ms)')
title('Search test (found)')
legend(loc='best')
grid(True)
savefig('../images/search_found/random/search_found_time_RMU.eps')
hold(False)
plot(trie[:,0], trie[:,1], '-o', label='Trie')
hold(True)
plot(tst[:,0], tst[:,1], '-o', label='Ternary Search Tree')
plot(radix[:,0], radix[:,1], '-o', label='Radix Tree')
xlabel('Max length of the string')
ylabel('Time(ms)')
title('Search test (found)')
legend(loc='best')
grid(True)
savefig('../images/search_found/random/search_found_time_TTR.eps')
|
python
|
""" ThirdParty's """
from .LSUV import LSUVinit
# minor changes to avoid warnings
del LSUV
|
python
|
__all__ = [
"configuration",
"persistence",
]
|
python
|
import argparse
import os
import pickle as pk
import torch
with open('../data/corr_networks/yearly_dict.pk', 'rb') as handle:
yearly_dict = pk.load(handle)
def parameter_parser():
"""
A method to parse up command line parameters.
"""
parser = argparse.ArgumentParser(description="Run SSSNET.")
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--debug', '-D',action='store_true', default=False,
help='Debugging mode, minimal setting.')
parser.add_argument('--train_ratio', type=float, default=0.8,
help='training ratio during data split.')
parser.add_argument('--test_ratio', type=float, default=0.1,
help='test ratio during data split.')
parser.add_argument('--seed', type=int, default=31, help='Random seed.')
parser.add_argument('--epochs', type=int, default=300,
help='Number of maximum epochs to train.')
parser.add_argument('--lr', type=float, default=0.01, #default = 0.01
help='Initial learning rate.')
parser.add_argument('--samples', type=int, default=10000,
help='samples per triplet loss.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=32,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument("--all_methods",
nargs="+",
type=str,
help="Methods to use.")
parser.set_defaults(all_methods=['spectral','SSSNET'])
parser.add_argument("--feature_options",
nargs="+",
type=str,
help="Features to use for SSSNET. Can choose from ['A_reg','L','given','None'].")
parser.set_defaults(feature_options=['A_reg'])
parser.add_argument('--loss_ratio', type=float, default=-1,
help='the ratio of loss_pbnc to loss_pbrc. -1 means only loss_pbnc.')
# synthetic model hyperparameters below
parser.add_argument("--seeds",
nargs="+",
type=int,
help="seeds to generate random graphs.")
parser.set_defaults(seeds=[10, 20, 30, 40, 50])
parser.add_argument('--p', type=float, default=0.02,
help='probability of the existence of a link within communities, with probability (1-p), we have 0.')
parser.add_argument('--N', type=int, default=1000,
help='number of nodes in the signed stochastic block model.')
parser.add_argument('--total_n', type=int, default=1050,
help='total number of nodes in the polarized network.')
parser.add_argument('--num_com', type=int, default=2,
help='number of polarized communities (SSBMs).')
parser.add_argument('--K', type=int, default=2,
help=' number of blocks in each SSBM.')
parser.add_argument('--hop', type=int, default=2,
help='Number of hops to consider for the random walk.')
parser.add_argument('--tau', type=float, default=0.5,
help='the regularization parameter when adding self-loops to the positive part of adjacency matrix, i.e. A -> A + tau * I, where I is the identity matrix.')
parser.add_argument('--triplet_loss_ratio', type=float, default=0.1,
help='Ratio of triplet loss to cross entropy loss in supervised loss part. Default 0.1.')
parser.add_argument('--link_sign_loss_ratio', type=float, default=0.1,
help='Ratio of link sign loss to cut loss in self-supervised loss part.')
parser.add_argument('--supervised_loss_ratio', type=float, default=50,
help='Ratio of factor of supervised loss part to self-supervised loss part.')
parser.add_argument('--seed_ratio', type=float, default=0.1,
help='The ratio in the training set of each cluster to serve as seed nodes.')
parser.add_argument('--size_ratio', type=float, default=1.5,
help='The size ratio of the largest to the smallest block. 1 means uniform sizes. should be at least 1.')
parser.add_argument('--num_trials', type=int, default=2,
help='Number of trials to generate results.')
parser.add_argument('--eta', type=float, default=0.1,
help='direction noise level in the meta-graph adjacency matrix, less than 0.5.')
parser.add_argument('--early_stopping', type=int, default=100, help='Number of iterations to consider for early stopping.')
parser.add_argument('--directed', action='store_true', help='Directed input graph.')
parser.add_argument('--no_validation', action='store_true', help='Whether to disable validation and early stopping during traing.')
parser.add_argument('--regenerate_data', action='store_true', help='Whether to force creation of data splits.')
parser.add_argument('--load_only', action='store_true', help='Whether not to store generated data.')
parser.add_argument('--dense', action='store_true', help='Whether not to use torch sparse.')
parser.add_argument('-AllTrain', '-All', action='store_true', help='Whether to use all data to do gradient descent.')
parser.add_argument('--link_sign_loss', action='store_true', help='Whether to use add link sign loss.')
parser.add_argument('-SavePred', '-SP', action='store_true', help='Whether to save predicted labels.')
parser.add_argument('--no_self_supervised', action='store_true', help='Whether to remove self-supervised loss.')
parser.add_argument('--balance_theory', action='store_true', help='Whether to use social balance theory.')
parser.add_argument('--alpha', type=float, default=0,
help='Threshold in triplet loss for seeds.')
# data loading and logs
parser.add_argument('--log_root', type=str, default=os.path.join(os.path.dirname(os.path.realpath(__file__)),'../logs/'),
help='the path saving model.t7 and the training process')
parser.add_argument('--data_path', type=str, default=os.path.join(os.path.dirname(os.path.realpath(__file__)),'../data/'),
help='data set folder, for default format see dataset/cora/cora.edges and cora.node_labels')
parser.add_argument('--dataset', type=str, default='SSBM/', help='data set selection')
parser.add_argument('--year_index', type=int, default=2,
help='Index of the year when using yearly data.')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.device = torch.device("cuda" if (not args.no_cuda and torch.cuda.is_available()) else "cpu")
if args.dataset[:9].lower() == 'mr_yearly':
args.dataset = yearly_dict[args.year_index]
if args.dataset[-1] != '/':
args.dataset += '/'
if args.loss_ratio == -1:
args.w_pbnc = 1
args.w_pbrc = 0
else:
args.w_pbrc = 1
args.w_pbnc = args.loss_ratio
if args.no_validation:
args.train_ratio = 1 - args.test_ratio
if args.debug:
args.epochs = 2
args.num_trials = 2
args.seeds = [10, 20]
args.log_root = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../debug_logs/')
return args
|
python
|
# Given a non-empty array of non-negative integers nums,
# the degree of this array is defined as the maximum frequency of any one of its elements.
# Your task is to find the smallest possible length of a (contiguous) subarray of nums,
# that has the same degree as nums.
import pytest
class Solution:
def findShortestSubArray(self, nums: list[int]) -> int:
degree = len(nums)
repeated_nums = []
temp_dict = {}
count = 0
for i in nums:
try:
temp_dict[i] += 1
except KeyError:
temp_dict[i] = 1
if count < temp_dict[i]:
count = temp_dict[i]
for i in temp_dict:
if temp_dict[i] == count:
repeated_nums.append(i)
start = 0
for n in repeated_nums:
i = 0
while i < len(nums):
if nums[i] == n:
start = i
break
i += 1
i = len(nums) - 1
while i >= 0:
if nums[i] == n:
end = i
break
i -= 1
if degree > (end - start + 1):
degree = end - start + 1
return degree
@pytest.mark.parametrize(
("nums", "expected"),
[([1, 2, 2, 3, 1], 2), ([1, 2, 2, 3, 1, 4, 2], 6), ([2, 1], 1)],
)
def test_basic(nums: list[int], expected: int):
assert expected == Solution().findShortestSubArray(nums)
|
python
|
# -*- coding: utf-8 -*-
# Standard Library
import re
# Cog Dependencies
from redbot.core import commands
class GuildConverterAPI(commands.Converter):
async def convert(self, ctx: commands.Context, argument: str):
guild_raw = argument
target_guild = None
if guild_raw.isnumeric():
guild_raw = int(guild_raw)
try:
target_guild = ctx.bot.get_guild(guild_raw)
except Exception:
target_guild = None
guild_raw = str(guild_raw)
if target_guild is None:
try:
target_guild = await commands.GuildConverter.convert(ctx, guild_raw)
except Exception:
target_guild = None
if target_guild is None:
try:
target_guild = await ctx.bot.fetch_guild(guild_raw)
except Exception:
target_guild = None
if target_guild is None:
raise commands.BadArgument(f"Invalid Guild: {argument}")
return target_guild
class ConvertUserAPI(commands.UserConverter):
"""Converts to a :class:`User`.
All lookups are via the local guild. If in a DM context, then the lookup
is done by the global cache, ten as a final resolt the API
The lookup strategy is as follows (in order):
1. Lookup by ID.
2. Lookup by mention.
3. Lookup by name#discrim
4. Lookup by name
5. Looks up by ID through the API
"""
async def convert(self, ctx, argument):
try:
user = await super().convert(ctx, argument)
except commands.BadArgument:
user = None
match = self._get_id_match(argument) or re.match(r"<@!?([0-9]+)>$", argument)
if match is not None:
user_id = int(match.group(1))
user = await ctx.bot.fetch_user(user_id)
if user is None:
raise commands.BadArgument('User "{}" not found'.format(argument))
return user
|
python
|
# Generated by Django 3.1.3 on 2020-12-05 20:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('type', '0003_remove_product_product_type'),
]
operations = [
migrations.RemoveField(
model_name='categorytype',
name='branch_type',
),
migrations.RemoveField(
model_name='product',
name='product_category',
),
migrations.AddField(
model_name='categorytype',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='type.category'),
),
migrations.AddField(
model_name='product',
name='category_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='type.categorytype'),
),
]
|
python
|
from datetime import datetime
import pytz
from commcare_cloud.alias import commcare_cloud
from commcare_cloud.cli_utils import ask
from commcare_cloud.colors import color_notice
from commcare_cloud.commands.deploy.sentry import update_sentry_post_deploy
from commcare_cloud.commands.deploy.utils import (
record_deploy_start,
announce_deploy_success,
create_release_tag,
within_maintenance_window,
DeployContext,
record_deploy_failed,
)
from commcare_cloud.commands.utils import run_fab_task
from commcare_cloud.events import publish_deploy_event
from commcare_cloud.fab.deploy_diff import DeployDiff
from commcare_cloud.github import github_repo
def deploy_commcare(environment, args, unknown_args):
deploy_revs, diffs = get_deploy_revs_and_diffs(environment, args)
if not confirm_deploy(environment, deploy_revs, diffs, args):
print(color_notice("Aborted by user"))
return 1
fab_func_args = get_deploy_commcare_fab_func_args(args)
fab_settings = [args.fab_settings] if args.fab_settings else []
for name, rev in deploy_revs.items():
var = 'code_branch' if name == 'commcare' else '{}_code_branch'.format(name)
fab_settings.append('{}={}'.format(var, rev))
context = DeployContext(
service_name="CommCare HQ",
revision=args.commcare_rev,
diff=_get_diff(environment, deploy_revs),
start_time=datetime.utcnow()
)
record_deploy_start(environment, context)
rc = commcare_cloud(
environment.name, 'fab', 'deploy_commcare{}'.format(fab_func_args),
'--set', ','.join(fab_settings), branch=args.branch, *unknown_args
)
if rc != 0:
record_deploy_failed(environment, context)
return rc
if not args.skip_record:
record_successful_deploy(environment, context)
return 0
def confirm_deploy(environment, deploy_revs, diffs, args):
if diffs:
message = (
"Whoa there bud! You're deploying non-default. "
"\n{}\n"
"ARE YOU DOING SOMETHING EXCEPTIONAL THAT WARRANTS THIS?"
).format('/n'.join(diffs))
if not ask(message, quiet=args.quiet):
return False
if not (
_confirm_translated(environment, quiet=args.quiet) and
_confirm_environment_time(environment, quiet=args.quiet)
):
return False
diff = _get_diff(environment, deploy_revs)
diff.print_deployer_diff()
if diff.deployed_commit_matches_latest_commit and not args.quiet:
_print_same_code_warning(deploy_revs['commcare'])
return ask(
'Are you sure you want to preindex and deploy to '
'{env}?'.format(env=environment.name), quiet=args.quiet)
DEPLOY_DIFF = None
def _get_diff(environment, deploy_revs):
global DEPLOY_DIFF
if DEPLOY_DIFF is not None:
return DEPLOY_DIFF
tag_commits = environment.fab_settings_config.tag_deploy_commits
repo = github_repo('dimagi/commcare-hq', require_write_permissions=tag_commits)
deployed_version = _get_deployed_version(environment)
latest_version = repo.get_commit(deploy_revs['commcare']).sha if repo else None
new_version_details = {
'Branch deployed': ', '.join([f'{repo}: {ref}' for repo, ref in deploy_revs.items()])
}
if environment.fab_settings_config.custom_deploy_details:
new_version_details.update(environment.fab_settings_config.custom_deploy_details)
DEPLOY_DIFF = DeployDiff(
repo, deployed_version, latest_version,
new_version_details=new_version_details,
generate_diff=environment.fab_settings_config.generate_deploy_diffs
)
return DEPLOY_DIFF
def _confirm_translated(environment, quiet=False):
if datetime.now().isoweekday() != 3 or environment.meta_config.deploy_env != 'production':
return True
return ask(
"It's the weekly Wednesday deploy, did you update the translations "
"from transifex? Try running this handy script from the root of your "
"commcare-hq directory:\n./scripts/update-translations.sh\n",
quiet=quiet
)
def _confirm_environment_time(environment, quiet=False):
if within_maintenance_window(environment):
return True
window = environment.fab_settings_config.acceptable_maintenance_window
d = datetime.now(pytz.timezone(window['timezone']))
message = (
"Whoa there bud! You're deploying '%s' outside the configured maintenance window. "
"The current local time is %s.\n"
"ARE YOU DOING SOMETHING EXCEPTIONAL THAT WARRANTS THIS?"
) % (environment.name, d.strftime("%-I:%M%p on %h. %d %Z"))
return ask(message, quiet=quiet)
def _print_same_code_warning(code_branch):
if code_branch == 'master':
branch_specific_msg = "Perhaps you meant to merge a PR or specify a --set code_branch=<branch> ?"
elif code_branch == 'enterprise':
branch_specific_msg = (
"Have you tried rebuilding the enterprise branch (in HQ directory)? "
"./scripts/rebuildstaging --enterprise"
)
elif code_branch == 'autostaging':
branch_specific_msg = (
"Have you tried rebuilding the autostaging branch (in HQ directory)? "
"./scripts/rebuildstaging"
)
else:
branch_specific_msg = (
"Did you specify the correct branch using --set code_branch=<branch> ?"
)
print(
f"Whoa there bud! You're deploying {code_branch} which happens to be "
f"the same code as was previously deployed to this environment.\n"
f"{branch_specific_msg}\n"
f"Is this intentional?"
)
def record_successful_deploy(environment, context):
end_time = datetime.utcnow()
diff = context.diff
create_release_tag(environment, diff.repo, diff)
update_sentry_post_deploy(environment, "commcarehq", diff.repo, diff, context.start_time, end_time)
announce_deploy_success(environment, context)
call_record_deploy_success(environment, context, end_time)
publish_deploy_event("deploy_success", "commcare", environment)
def call_record_deploy_success(environment, context, end_time):
delta = end_time - context.start_time
args = [
'--user', context.user,
'--environment', environment.meta_config.deploy_env,
'--url', context.diff.url,
'--minutes', str(int(delta.total_seconds() // 60)),
'--commit', context.diff.deploy_commit,
]
commcare_cloud(environment.name, 'django-manage', 'record_deploy_success', *args)
def _get_deployed_version(environment):
from fabric.api import cd, run
def _task():
with cd(environment.remote_conf.code_current):
return run('git rev-parse HEAD')
host = environment.sshable_hostnames_by_group["django_manage"][0]
res = run_fab_task(_task, host, 'ansible')
return res[host]
def get_deploy_commcare_fab_func_args(args):
fab_func_args = []
if args.resume:
fab_func_args.append('resume=yes')
if args.skip_record:
fab_func_args.append('skip_record=yes')
if fab_func_args:
return ':{}'.format(','.join(fab_func_args))
else:
return ''
def get_deploy_revs_and_diffs(environment, args):
"""Check the revisions to deploy from the arguments against the
defaults configured for the environment and return the final
revisions to deploy and whether they are different from the defaults.
"""
default_branch = environment.fab_settings_config.default_branch
branches = [
('commcare', 'commcare_rev', default_branch),
]
for repo in environment.meta_config.git_repositories:
branches.append((repo.name, '{}_rev'.format(repo.name), repo.version))
diffs = []
actuals = {}
for repo_name, arg_name, default in branches:
actual = getattr(args, arg_name, None)
actuals[repo_name] = actual or default
if actual and actual != default:
diffs.append("'{}' repo: {} != {}".format(repo_name, default, actual))
return actuals, diffs
|
python
|
import re
import logging
logger = logging.getLogger(__name__)
def interpolate_text(template, values):
if isinstance(template, str):
# transforming template tags from
# "{tag_name}" to "{0[tag_name]}"
# as described here:
# https://stackoverflow.com/questions/7934620/python-dots-in-the-name-of-variable-in-a-format-string#comment9695339_7934969
# black list character and make sure to not to allow
# (a) newline in slot name
# (b) { or } in slot name
try:
text = re.sub(r"{([^\n{}]+?)}", r"{0[\1]}", template)
text = text.format(values)
if "0[" in text:
# regex replaced tag but format did not replace
# likely cause would be that tag name was enclosed
# in double curly and format func simply escaped it.
# we don't want to return {0[SLOTNAME]} thus
# restoring original value with { being escaped.
return template.format({})
return text
except KeyError as e:
logger.exception(
"Failed to fill utterance template '{}'. "
"Tried to replace '{}' but could not find "
"a value for it. There is no slot with this "
"name nor did you pass the value explicitly "
"when calling the template. Return template "
"without filling the template. "
"".format(template, e.args[0])
)
return template
return template
def interpolate(template, values):
if isinstance(template, str):
return interpolate_text(template, values)
elif isinstance(template, dict):
for k, v in template.items():
if isinstance(v, dict):
interpolate(v, values)
else:
template[k] = interpolate_text(v, values)
return template
return template
|
python
|
import os
import codecs
import logging
import json
from collections import namedtuple
from django.utils.datastructures import MultiValueDict as MultiDict
from django.conf import settings
from django.utils.http import urlencode
from django.core.urlresolvers import reverse
import datetime
from dateutil import parser
from pytz import timezone
from time import mktime, strptime
import elasticsearch
from unipath import Path
#from sheer.utility import find_in_search_path
from .filters import filter_dsl_from_multidict
from .middleware import get_request
from .templates import _convert_date
ALLOWED_SEARCH_PARAMS = (
'doc_type',
'analyze_wildcard',
'analyzer',
'default_operator',
'df',
'explain',
'fields',
'indices_boost',
'lenient',
'allow_no_indices',
'expand_wildcards',
'ignore_unavailable',
'lowercase_expanded_terms',
'from_',
'preference',
'q',
'routing',
'scroll',
'search_type',
'size',
'sort',
'source',
'stats',
'suggest_field',
'suggest_mode',
'suggest_size',
'suggest_text',
'timeout',
'version')
FakeQuery = namedtuple('FakeQuery', ['es', 'es_index'])
def mapping_for_type(typename, es, es_index):
return es.indices.get_mapping(index=es_index, doc_type=typename)
def field_or_source_value(fieldname, hit_dict):
if 'fields' in hit_dict and fieldname in hit_dict['fields']:
return hit_dict['fields'][fieldname]
if '_source' in hit_dict and fieldname in hit_dict['_source']:
return hit_dict['_source'][fieldname]
def datatype_for_fieldname_in_mapping(
fieldname,
hit_type,
mapping_dict,
es,
es_index):
try:
return mapping_dict[es_index]["mappings"][
hit_type]["properties"][fieldname]["type"]
except KeyError:
return None
def coerced_value(value, datatype):
if datatype is None or value is None:
return value
TYPE_MAP = {'string': unicode,
'date': parser.parse,
'dict': dict,
'float': float,
'long': float,
'boolean': bool}
coercer = TYPE_MAP[datatype]
if isinstance(value, list):
if value and isinstance(value[0], list):
return [[coercer(y) for y in v] for v in value]
else:
return [coercer(v) for v in value] or ""
else:
return coercer(value)
class QueryHit(object):
def __init__(self, hit_dict, es, es_index):
self.hit_dict = hit_dict
self.type = hit_dict['_type']
self.es = es
self.es_index = es_index
self.mapping = mapping_for_type(self.type, es=es, es_index=es_index)
def __str__(self):
return str(self.hit_dict.get('_source'))
def __repr__(self):
return self.__str__()
@property
def permalink(self):
import sheerlike
if self.type in sheerlike.PERMALINK_REGISTRY:
pattern_name = sheerlike.PERMALINK_REGISTRY[self.type]
return reverse(pattern_name, kwargs=dict(doc_id=self._id))
else:
raise NotImplementedError(
"Please use django's reverse url system,"
"or register a permalink for %s" %
self.type)
def __getattr__(self, attrname):
value = field_or_source_value(attrname, self.hit_dict)
datatype = datatype_for_fieldname_in_mapping(
attrname, self.type, self.mapping, self.es, self.es_index)
return coerced_value(value, datatype)
def json_compatible(self):
hit_dict = self.hit_dict
fields = hit_dict.get('fields') or hit_dict.get('_source', {}).keys()
return dict((field, getattr(self, field)) for field in fields)
class QueryResults(object):
def __init__(self, query, result_dict, pagenum=1):
self.result_dict = result_dict
self.total = int(result_dict['hits']['total'])
self.query = query
# confusing: using the word 'query' to mean different things
# above, it's the Query object
# below, it's Elasticsearch query DSL
if 'query' in result_dict:
self.size = int(result_dict['query'].get('size', '10'))
self.from_ = int(result_dict['query'].get('from', 1))
self.pages = self.total / self.size + \
int(self.total % self.size > 0)
else:
self.size, self.from_, self.pages = 10, 1, 1
self.current_page = pagenum
def __iter__(self):
if 'hits' in self.result_dict and 'hits' in self.result_dict['hits']:
for hit in self.result_dict['hits']['hits']:
query_hit = QueryHit(hit, self.query.es, self.query.es_index)
yield query_hit
def __getitem__(self, index):
if 'hits' in self.result_dict and 'hits' in self.result_dict['hits']:
for i, hit in enumerate(self.result_dict['hits']['hits']):
if i == index:
return QueryHit(hit, self.query.es, self.query.es_index)
def __len__(self):
if 'hits' in self.result_dict and 'hits' in self.result_dict['hits']:
return len(self.result_dict['hits']['hits'])
def aggregations(self, fieldname):
if "aggregations" in self.result_dict and \
fieldname in self.result_dict['aggregations']:
return self.result_dict['aggregations'][fieldname]['buckets']
def json_compatible(self):
response_data = {}
response_data['total'] = self.result_dict['hits']['total']
if self.size:
response_data['size'] = self.size
if self.from_:
response_data['from'] = self.from_
if self.pages:
response_data['pages'] = self.pages
response_data['results'] = [
hit.json_compatible() for hit in self.__iter__()]
return response_data
def url_for_page(self, pagenum):
request = get_request()
current_args = request.GET
args_dict = MultiDict(current_args)
if pagenum != 1:
args_dict['page'] = pagenum
elif 'page' in args_dict:
del args_dict['page']
encoded = urlencode(args_dict, doseq=True)
if encoded:
url = "".join(
[request.path, "?", urlencode(args_dict, doseq=True)])
return url
else:
return request.path
class Query(object):
def __init__(self, filename, es, es_index, json_safe=False):
# TODO: make the no filename case work
self.es_index = es_index
self.es = es
self.filename = filename
self.__results = None
self.json_safe = json_safe
def get_tag_related_documents(self, tags, size=0, additional_args={}):
query_file = json.loads(file(self.filename).read())
doc_type = query_file['query']['doc_type']
query_dict = {'sort': [{'date': {'order': 'desc'}}]}
if not additional_args:
query_dict['query'] = {'bool': {'should': []}}
for tag in tags:
query_dict['query']['bool']['should'].append({'match': {'tags.lower': tag.lower()}})
else:
filtered_query = json.loads(file(getattr(QueryFinder(), 'filtered_17').filename).read())
query_dict['query'] = filtered_query
for tag in tags:
query_dict['query']['filtered']['query']['bool']['should'].append({'term': {'tags.lower': tag.lower()}})
for arg in additional_args:
query_dict['query']['filtered']['filter']['or'].append(arg)
if size:
query_dict['size'] = size
response = self.es.search(index=self.es_index, doc_type=doc_type,
body=query_dict, analyzer='tag_analyzer')
return QueryResults(self, response)
def search(
self,
aggregations=None,
use_url_arguments=True,
size=10,
pdf_print=False,
**kwargs):
query_file = json.loads(file(self.filename).read())
query_dict = query_file['query']
if pdf_print:
query_dict['size'] = settings.ELASTICSEARCH_BIGINT
'''
These dict constructors split the kwargs from the template into filter
arguments and arguments that can be placed directly into the query body.
The dict constructor syntax supports python 2.6, 2.7, and 3.x
If python 2.7, use dict comprehension and iteritems()
With python 3, use dict comprehension and items() (items() replaces
iteritems and is just as fast)
'''
filter_args = dict((key, value) for (key, value) in kwargs.items()
if key.startswith('filter_'))
non_filter_args = dict((key, value) for (key, value) in kwargs.items()
if not key.startswith('filter_'))
query_dict.update(non_filter_args)
pagenum = 1
# Add in filters from the template.
new_multidict = MultiDict()
# First add the url arguments if requested
if use_url_arguments:
request = get_request()
new_multidict = MultiDict(request.GET.copy())
args_flat = request.GET.copy()
# Next add the arguments from the search() function used in the
# template
for key, value in filter_args.items():
new_multidict.update({key: value})
filters = filter_dsl_from_multidict(new_multidict)
query_body = {}
if aggregations:
aggs_dsl = {}
if isinstance(aggregations, str):
aggregations = [aggregations] # so we can treat it as a list
for fieldname in aggregations:
aggs_dsl[fieldname] = {'terms':
{'field': fieldname, 'size': 10000}}
query_body['aggs'] = aggs_dsl
else:
if use_url_arguments:
if 'page' in args_flat:
try:
pagenum = int(args_flat['page'])
except ValueError:
pagenum = 1
args_flat['from_'] = int(query_dict.get(
'size', '10')) * (pagenum - 1)
args_flat_filtered = dict(
[(k, v) for k, v in args_flat.items() if v])
query_dict.update(args_flat_filtered)
query_body['query'] = {'filtered': {'filter': {}}}
if filters:
query_body['query']['filtered']['filter'][
'and'] = [f for f in filters]
if 'filters' in query_file:
if 'and' not in query_body['query']['filtered']['filter']:
query_body['query']['filtered']['filter']['and'] = []
for json_filter in query_file['filters']:
query_body['query']['filtered'][
'filter']['and'].append(json_filter)
final_query_dict = dict(
(k, v) for (
k, v) in query_dict.items() if k in ALLOWED_SEARCH_PARAMS)
final_query_dict['index'] = self.es_index
final_query_dict['body'] = query_body
response = self.es.search(**final_query_dict)
response['query'] = query_dict
return QueryResults(self, response, pagenum)
def possible_values_for(self, field, **kwargs):
results = self.search(aggregations=[field], **kwargs)
return results.aggregations(field)
class QueryFinder(object):
def __init__(self):
self.es = elasticsearch.Elasticsearch(
settings.SHEER_ELASTICSEARCH_SERVER)
self.es_index = settings.SHEER_ELASTICSEARCH_INDEX
self.searchpath = [Path(p).child('_queries')
for s, p in settings.SHEER_SITES.items()]
def __getattr__(self, name):
for dir in self.searchpath:
query_filename = name + ".json"
query_file_path = os.path.join(dir, query_filename)
if os.path.exists(query_file_path):
query = Query(query_file_path, self.es, self.es_index)
return query
class QueryJsonEncoder(json.JSONEncoder):
query_classes = [QueryResults, QueryHit]
def default(self, obj):
if type(obj) in (datetime.datetime, datetime.date):
return obj.isoformat()
if type(obj) in self.query_classes:
return obj.json_compatible()
return json.JSONEncoder.default(self, obj)
def more_like_this(hit, **kwargs):
es = elasticsearch.Elasticsearch(settings.SHEER_ELASTICSEARCH_SERVER)
es_index = settings.SHEER_ELASTICSEARCH_INDEX
doctype, docid = hit.type, hit._id
raw_results = es.mlt(
index=es_index, doc_type=doctype, id=docid, **kwargs)
# this is bad and I should feel bad
# (I do)
fake_query = FakeQuery(es, es_index)
return QueryResults(fake_query, raw_results)
def get_document(doctype, docid):
es = elasticsearch.Elasticsearch(settings.SHEER_ELASTICSEARCH_SERVER)
es_index = settings.SHEER_ELASTICSEARCH_INDEX
raw_results = es.get(index=es_index, doc_type=doctype, id=docid)
return QueryHit(raw_results, es, es_index)
def when(starttime, endtime, streamtime=None):
start = _convert_date(starttime, 'America/New_York')
if streamtime:
start = _convert_date(streamtime, 'America/New_York')
end = _convert_date(endtime,'America/New_York')
if start > datetime.datetime.now(timezone('America/New_York')):
return 'future'
elif end < datetime.datetime.now(timezone('America/New_York')):
return 'past'
else:
return 'present'
|
python
|
import math
import matplotlib.pyplot as plt
import matplotlib.colors as mplib_colors
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
import io
from . import helpers as h
#
# HELPERS
#
INPUT_BANDS=[0]
DESCRIPTION_HEAD="""
* batch_index: {}
* image_index: {}
"""
DESCRIPTION_HIST="""{}
* {}:
{}
"""
MULTIHEAD_INDEX=0
class SegmentationImageWriter(object):
def __init__(self,
data_dir,
loader,
vmax,
model=None,
input_bands=INPUT_BANDS,
target_colors=None,
vmin=0,
ax_h=4,
ax_w=None,
ax_delta=0.2,
preserve_epoch=None):
if not target_colors:
target_colors=h.COLORS[:vmax]
self.input_bands=input_bands
self.cmap=mplib_colors.ListedColormap(target_colors)
self.vmin=vmin
self.vmax=vmax
self.ax_h=ax_h
if not ax_w:
ax_w=ax_h*(1+ax_delta)
self.ax_w=ax_w
self.preserve_epoch=preserve_epoch
self.file_writer=tf.summary.create_file_writer(data_dir)
self.loader=loader
self.model=model
def write_batch(self,batch_index,epoch=None,model=True):
if model is True:
model=self.model
data=self.loader[batch_index]
inpts,targs=data[0],data[1]
if model:
preds=model(inpts)
if isinstance(preds,list):
preds=preds[MULTIHEAD_INDEX]
targs=targs[MULTIHEAD_INDEX]
preds=tf.argmax(preds,axis=-1).numpy()
self._save_inputs_targets_predictions(
batch_index,
inpts,
targs,
preds,
epoch)
else:
self._save_inputs_targets(batch_index,inpts,targs,epoch)
def _save_images(self,batch_index,inpts,targs,epoch=None):
for i,(inpt,targ) in enumerate(zip(inpts,targs)):
inpt,targ=self._process_input_target(inpt,targ)
figim=self._get_figure_image(inpt,targ)
targ_hist=self._get_hist(targ)
self._save_figue_image(batch_index,i,figim,epoch,target_hist=targ_hist)
def _save_inputs_targets_predictions(self,batch_index,inpts,targs,preds,epoch):
for i,(inpt,targ,pred) in enumerate(zip(inpts,targs,preds)):
inpt,targ=self._process_input_target(inpt,targ)
pred=self._process_prediction(pred)
figim=self._get_figure_image(inpt,targ,pred)
targ_hist=self._get_hist(targ)
pred_hist=self._get_hist(pred)
self._save_figue_image(
batch_index,
i,
figim,
epoch,
target_hist=targ_hist,
prediction_hist=pred_hist)
def _process_input_target(self,inpt,targ):
targ=np.argmax(targ,axis=-1).astype(np.uint8)
inpt=inpt[:,:,self.input_bands]
if inpt.shape[-1]==1:
inpt=inpt[:,:,0]
else:
inpt=inpt[:,:,:3]
return inpt, targ
def _process_prediction(self,pred):
return pred.astype(np.uint8)
def _get_hist(self,cat_im):
values,counts=np.unique(cat_im,return_counts=True)
hist={ v: c for v,c in zip(values,counts) }
return { v: hist.get(v,0) for v in range(self.vmin,self.vmax+1) }
def _get_figure_image(self,inpt,targ,pred=None):
if pred is None:
nb_cols=2
else:
nb_cols=3
figsize=(int(math.ceil(self.ax_w*nb_cols)),self.ax_h)
fig,axs=plt.subplots(1,nb_cols,figsize=figsize)
_=axs[0].imshow(inpt)
_=axs[1].imshow(targ,vmin=self.vmin,vmax=self.vmax,cmap=self.cmap)
if nb_cols==3:
_=axs[2].imshow(pred,vmin=self.vmin,vmax=self.vmax,cmap=self.cmap)
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close(fig)
buf.seek(0)
image = tf.image.decode_png(buf.getvalue(), channels=3)
image = tf.expand_dims(image, 0)
return image
def _save_figue_image(self,
batch_index,
image_index,
image,
epoch=None,
target_hist=None,
prediction_hist=None):
if self.preserve_epoch and epoch and (not (epoch%self.preserve_epoch)):
name=f'epoch_{epoch}: batch_{batch_index}-image_{image_index}'
else:
name=f'batch_{batch_index}-image_{image_index}'
description=DESCRIPTION_HEAD.format(batch_index,image_index)
if target_hist:
description=DESCRIPTION_HIST.format(description,'target',target_hist)
if prediction_hist:
description=DESCRIPTION_HIST.format(description,'prediction',prediction_hist)
with self.file_writer.as_default():
tf.summary.image(name,image,step=0,description=description)
|
python
|
from __future__ import annotations
import json
import sys
from datetime import datetime
from typing import Any, List, Optional
from meilisearch.client import Client
from meilisearch.errors import MeiliSearchApiError
from rich.console import Group
from rich.panel import Panel
from rich.traceback import install
from typer import Argument, Exit, Option, Typer, echo
from meilisearch_cli import documents, dump, index
from meilisearch_cli._config import (
MASTER_KEY_OPTION,
PANEL_BORDER_COLOR,
RAW_OPTION,
URL_OPTION,
console,
)
from meilisearch_cli._docs import build_docs_tree
from meilisearch_cli._helpers import (
create_client,
create_panel,
handle_meilisearch_api_error,
print_panel_or_raw,
set_search_param,
)
install()
__version__ = "0.10.0"
app = Typer()
app.add_typer(documents.app, name="documents", help="Manage documents in an index.")
app.add_typer(dump.app, name="dump", help="Create and get status of dumps.")
app.add_typer(index.app, name="index", help="Manage indexes")
@app.command()
def docs() -> None:
"""A tree of all documentation links. If supported by your terminal the links are clickable."""
with console.status("Getting documentation links..."):
console.print(build_docs_tree())
@app.command()
def api_docs_link() -> None:
"""Gives a clickable link to the MeiliSearch API documenation. This can be used in terminals that don't support links."""
console.print("https://docs.meilisearch.com/reference/api/")
@app.command()
def docs_link() -> None:
"""Gives a clickable link to the MeiliSearch documenation. This can be used in terminals that don't support links."""
console.print("https://docs.meilisearch.com/")
@app.command()
def generate_tenant_token(
search_rules: str = Argument(..., help="The search rules to use for the tenant token"),
api_key: str = Argument(..., help="The API key to use to generate the tenant token"),
expires_at: datetime = Option(
None, help="The time at which the the tenant token should expire. UTC time should be used."
),
url: Optional[str] = URL_OPTION,
master_key: Optional[str] = MASTER_KEY_OPTION,
) -> None:
"""Generate a tenant token to use for search routes."""
with console.status("Generating Tenant Token..."):
client = create_client(url, master_key)
try:
formatted_search_rules = json.loads(search_rules)
except json.JSONDecodeError:
formatted_search_rules = search_rules
response = client.generate_tenant_token(
formatted_search_rules, api_key=api_key, expires_at=expires_at
)
console.print(create_panel(response, title="Tenant Token"))
@app.command()
def create_key(
description: Optional[str] = Option(None, help="Description of the key"),
actions: Optional[List[str]] = Option(None, help="Actions the key can perform"),
indexes: Optional[List[str]] = Option(None, help="Indexes for which the key has access"),
expires_at: Optional[datetime] = Option(
None, help="The date the key should expire. If included the date should be in UTC time"
),
url: Optional[str] = URL_OPTION,
master_key: Optional[str] = MASTER_KEY_OPTION,
raw: bool = RAW_OPTION,
) -> None:
"""Create a new API key."""
if not description and not actions and not indexes and not expires_at:
console.print("No values included for creating the key", style="error")
sys.exit(1)
options = {
"description": description,
"actions": actions,
"indexes": indexes,
"expiresAt": expires_at.isoformat() if expires_at else None,
}
client = create_client(url, master_key)
with console.status("Creating key..."):
response = client.create_key(options)
print_panel_or_raw(raw, response, "Key")
@app.command()
def delete_key(
key: str = Argument(..., help="The name of the key to delete"),
url: Optional[str] = URL_OPTION,
master_key: Optional[str] = MASTER_KEY_OPTION,
raw: bool = RAW_OPTION,
) -> None:
"""Delete an API key."""
client = create_client(url, master_key)
with console.status("Deleting key..."):
response = client.delete_key(key)
data = {"response": response.status_code} # type: ignore
print_panel_or_raw(raw, data, "Key")
@app.command()
def get_key(
key: str = Argument(..., help="The name of the key to get"),
url: Optional[str] = URL_OPTION,
master_key: Optional[str] = MASTER_KEY_OPTION,
raw: bool = RAW_OPTION,
) -> None:
"""Get an API key."""
client = create_client(url, master_key)
with console.status("Getting key..."):
response = client.get_key(key)
print_panel_or_raw(raw, response, "Key")
@app.command()
def get_keys(
url: Optional[str] = URL_OPTION,
master_key: Optional[str] = MASTER_KEY_OPTION,
raw: bool = RAW_OPTION,
) -> None:
"""Gets the public and private keys"""
client = create_client(url, master_key)
with console.status("Getting keys..."):
keys = client.get_keys()
print_panel_or_raw(raw, keys, "Keys")
@app.command()
def update_key(
key: str = Argument(..., help="The name of the key to update"),
description: Optional[str] = Option(None, help="Description of the key"),
actions: Optional[List[str]] = Option(None, help="Actions the key can perform"),
indexes: Optional[List[str]] = Option(None, help="Indexes for which the key has access"),
expires_at: Optional[datetime] = Option(
None, help="The date the key should expire. If included the date should be in UTC time"
),
url: Optional[str] = URL_OPTION,
master_key: Optional[str] = MASTER_KEY_OPTION,
raw: bool = RAW_OPTION,
) -> None:
"""Update an API key."""
if not description and not actions and not indexes and not expires_at:
console.print("No values included for creating the key", style="error")
sys.exit(1)
options = {
"key": key,
"description": description,
"actions": actions,
"indexes": indexes,
"expiresAt": expires_at.isoformat() if expires_at else None,
}
client = create_client(url, master_key)
with console.status("Updating index..."):
response = client.update_key(key, options)
print_panel_or_raw(raw, response, "Key")
@app.command()
def get_version(
url: Optional[str] = URL_OPTION,
master_key: Optional[str] = MASTER_KEY_OPTION,
raw: bool = RAW_OPTION,
) -> None:
"""Gets the MeiliSearch version information."""
client = create_client(url, master_key)
with console.status("Getting version..."):
version = client.get_version()
print_panel_or_raw(raw, version, "Version Information")
@app.command()
def health(
url: Optional[str] = URL_OPTION,
raw: bool = RAW_OPTION,
) -> None:
"""Checks the status of the server."""
if not url:
console.print(
"A value for [error_highlight]--url[/] has to either be provied or available in the [error_highlight]MEILI_HTTP_ADDR[/] environment variable",
style="error",
)
sys.exit()
client = Client(url)
with console.status("Getting server status..."):
health = client.health()
print_panel_or_raw(raw, health, "Server Health")
@app.callback(invoke_without_command=True)
def main(
version: Optional[bool] = Option(
None,
"--version",
"-v",
is_eager=True,
help="Show the installed version",
),
) -> None:
if version:
echo(__version__)
raise Exit()
@app.command()
def search(
index: str = Argument(..., help="The name of the index from which to retrieve the settings"),
query: str = Argument(..., help="The query string"),
offset: Optional[int] = Option(None, help="The number of documents to skip"),
limit: Optional[int] = Option(None, help="The maximum number of documents to return"),
filter: Optional[List[str]] = Option(None, help="Filter queries by an attribute value"),
facets_distribution: Optional[List[str]] = Option(
None, help="Facets for which to retrieve the matching count"
),
attributes_to_retrieve: Optional[List[str]] = Option(
None, help="Attributes to display in the returned documents"
),
attributes_to_crop: Optional[List[str]] = Option(
None, help="Attributes whose values have to be cropped"
),
crop_length: Optional[int] = Option(None, help="Length used to crop field values"),
attributes_to_hightlight: Optional[List[str]] = Option(
None, help="Attributes whose values will contain highlighted matching terms"
),
matches: bool = Option(
False,
help="Defines whether an object that contains information about the matches should be returned or not",
),
sort: Optional[List[str]] = Option(
None, help="Sort search results according to the attributes"
),
url: Optional[str] = URL_OPTION,
master_key: Optional[str] = MASTER_KEY_OPTION,
raw: bool = RAW_OPTION,
) -> None:
"""Perform a search."""
client = create_client(url, master_key)
search_params: dict[str, Any] = {}
set_search_param(search_params, offset, "offset")
set_search_param(search_params, limit, "limit")
set_search_param(search_params, filter, "filter")
set_search_param(search_params, facets_distribution, "facetsDistribution")
set_search_param(search_params, attributes_to_retrieve, "attributesToRetrieve")
set_search_param(search_params, attributes_to_crop, "attributesToCrop")
set_search_param(search_params, crop_length, "cropLength")
set_search_param(search_params, attributes_to_hightlight, "attributesToHighlight")
set_search_param(search_params, matches, "matches")
set_search_param(search_params, sort, "sort")
try:
with console.status("Searching..."):
if search_params:
search_results = client.index(index).search(query, search_params)
else:
search_results = client.index(index).search(query)
if raw:
console.print_json(json.dumps(search_results))
else:
hits_panel = create_panel(search_results["hits"], title="Hits", fit=False)
del search_results["hits"]
info_panel = create_panel(search_results, title="Information", fit=False)
panel_group = Group(info_panel, hits_panel)
panel = Panel(panel_group, title="Search Results", border_style=PANEL_BORDER_COLOR)
console.print(panel)
except MeiliSearchApiError as e:
handle_meilisearch_api_error(e, index)
if __name__ == "__main__":
app()
|
python
|
import heapq
import operator
import os
from bitstring import BitArray
import json
import pickle
import codecs
DIR_DATA = "media/"
DIR_HUFFMAN = DIR_DATA
#using to save dictionary
temp_reverse = {}
#init a seperate sympol as Node of Huffman Tree with value = frequency
class Node:
#build a class node with sympol, frequency, left node, right node
def __init__(self,sympol=None,frequency=None,left_node=None,right_node=None):
self.sympol = sympol
self.frequency = frequency
self.left_node = left_node
self.right_node = right_node
# defining comparators less_than
def __lt__(self, other):
return self.frequency < other.frequency
#init table frequency of all seperate sympols in text
def count_frequency(content):
table_frequency = {}
for sym in content:
if not sym in table_frequency:
table_frequency[sym] = 0;
table_frequency[sym] +=1
return table_frequency
#sort table frequency min - max
def sort_table_frequency(table_frequency):
sorted_table_frequency = sorted(table_frequency.items(), key=operator.itemgetter(1))
sorted_table_frequency = dict(sorted_table_frequency)
return sorted_table_frequency
#make a Huffman tree with min heap
def tree_maker(tree,table_frequency):
#init sympol is a Node
for each_sym in table_frequency:
node = Node(each_sym,table_frequency[each_sym])
heapq.heappush(tree,node)
#add node into tree until having a node with weight max
while (len(tree)>1):
#take 2 node having min frequency and add to tree
nodel = heapq.heappop(tree)
noder = heapq.heappop(tree)
#create an internal node with frequency = sum of 2 node above
internal_weight = nodel.frequency + noder.frequency
internal_node = Node(None,internal_weight,nodel,noder)
heapq.heappush(tree,internal_node)
return tree
#creat path to all sympols by reverse tree
def encode_reverse(encoded_sympol,temp_reverse,parent, temp_way):
if (parent == None ):
return
if (parent.sympol != None):
encoded_sympol[parent.sympol] = temp_way
temp_reverse[temp_way] = parent.sympol
return
#recursion to find paths
encode_reverse(encoded_sympol,temp_reverse,parent.left_node,temp_way + "0")
encode_reverse(encoded_sympol,temp_reverse,parent.right_node,temp_way + "1")
#create dictionary with sympol and code
def encoded(encoded_sympol,temp_reverse,tree):
parent = heapq.heappop(tree)
temp_path = ""
encode_reverse(encoded_sympol,temp_reverse,parent,temp_path)
#encode text to code
def convert_text_to_code(encoded_sympol,content):
encoded_content = ""
for sym in content:
encoded_content = encoded_content + encoded_sympol[sym]
#print (encoded_content)
return encoded_content
#convert bin to byte,must len(code)%8 = 0, add k "0" into code and convert k to bin and save code together
def prepare_to_convert_bin_to_byte(content):
added_code = 8 - len(content) % 8
for i in range(added_code):
content = "0" + content
encoded_added_code = "{0:08b}".format(added_code)
content = encoded_added_code + content
return content
#convert bin to byte
def convert_bin_to_byte(content):
b = bytearray()
for i in range(0, len(content), 8):
byte = content[i:i + 8]
b.append(int(byte, 2))
return b
#compress
def compress(path):
temp_reverse = {}
tree = []
encoded_sympol = {}
tempname, _ = os.path.splitext(path)
filename = tempname.split("/")
with open(path, 'r+', encoding='utf-8') as f:
content = f.read()
frequency = count_frequency(content)
table_frequency = sort_table_frequency(frequency)
tree = tree_maker(tree,table_frequency)
encoded_tree = encoded(encoded_sympol,temp_reverse,tree)
file_dict = os.path.join(DIR_HUFFMAN,filename[-1] + ".dict")
encoded_content = convert_text_to_code(encoded_sympol,content)
new_content = prepare_to_convert_bin_to_byte(encoded_content)
byte_content = convert_bin_to_byte(new_content)
file_com = os.path.join(DIR_HUFFMAN,"result", filename[-1] + ".bin")
pickle.dump((temp_reverse, byte_content), open(file_com,'wb'))
print ("Completely compressed")
return file_com
#decode code
def decode(new_content):
temp = new_content.bin
added_num_info = temp[:8]
added_num = int(added_num_info, 2)
temp = temp[8:]
return temp[added_num:]
#decompress
def decompress(path):
pack = pickle.load(open(path,'rb'))
temp_reverse = pack[0]
content = pack[1]
new_content = BitArray(bytes = content)
new_content = decode(new_content)
current_code = ""
decoded_text = ""
for bit in new_content:
current_code += bit
if (current_code in temp_reverse):
character = temp_reverse[current_code]
decoded_text += character
current_code = ""
temp_reverse.clear()
tempname, _ = os.path.splitext(path)
filename = tempname.split("/")
file_decom = "media/result/" + filename[-1] + "-decoded" + ".txt"
with codecs.open(file_decom, "w+", encoding='utf-8') as o:
o.write(decoded_text)
print("Completely decompressed")
return filename[-1] + "-decoded" + ".txt"
|
python
|
from flask import jsonify, g
from app import db
from app.api import bp
from app.api.auth import auth_tp
@bp.route('/tokens', methods=['DELETE'])
@auth_tp.login_required
def revoke_token():
g.current_user.revoke_token()
db.session.commit()
return '', 204
|
python
|
from . import discord
from . import log # noqa
def main():
"""Run discurses."""
client = discord.DiscordClient()
client.run()
if __name__ == '__main__':
discurses.main()
|
python
|
from . import models
from . import routes
from . import db_session
from . import app
|
python
|
import pyPdf
import time
import urllib2
from django.conf import settings
#save file locally
def archive_file(original_url, gov_id, doc_type, file_type):
type_dir = doc_type.lower().replace(' ', '_')
file_name = "%s%s/%s.%s" % (settings.DATA_ROOT, type_dir, gov_id, file_type)
try:
remote_file = urllib2.urlopen(original_url)
local_file = open(file_name, "w")
local_file.write(remote_file.read())
local_file.close
except:
pass
return file_name
def pdf_extract_text(path, original_url):
#adapted from http://code.activestate.com/recipes/511465/
content = ""
try:
try:
pdf = pyPdf.PdfFileReader(file(path, "rb"))
except:
remote_file = urllib2.urlopen(original_url)
local_file = open(path, "w")
local_file.write(remote_file.read())
local_file.close
pdf = pyPdf.PdfFileReader(file(path, "rb"))
for i in range(0, pdf.getNumPages()):
content += pdf.getPage(i).extractText() + "\n"
content = " ".join(content.replace("\n", " ").strip().split())
except:
pass
return content
def debug_print(output):
if DEBUG:
print output
|
python
|
# -*- coding: utf-8 -*-
# @author: Optimus
# @since 2018-12-15
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
num_index_map = {}
for index, num in enumerate(nums):
if num in num_index_map.keys():
return [num_index_map[num], index]
else:
num_index_map[target - num] = index
|
python
|
#!/usr/bin/env python3
# coding:utf-8
import os, shutil
def fun(addr):
items = os.listdir(addr)
for each in items:
if os.path.isfile(each): # 是文件,返回 True;是目录,返回 False
item = os.path.splitext(each)[0]
name = item.split("-")[0] # 文件名分割,获取 - 前面的内容
if os.path.exists(addr + '/' + name): # 去新路径下判断有没有以这个名字命名的文件夹
shutil.move(each, addr + '/' + name) # 如果存在就将文件移动到这个文件夹内
else: # 如果不存在就新建一个文件夹,并将文件存入
os.mkdir(addr + '/' + name)
shutil.move(each, addr + '/' + name)
else:
fun(each) # 如果是目录的话回调函数
def arrange(addr):
os.chdir(addr)
for each in os.listdir(addr):
if os.path.isfile(each): # 是文件,返回 True;是目录,返回 False
name = each.split("-")[0] # 获取文件名
if not os.path.exists(name): # 去新路径下判断有没有以这个名字命名的文件夹
os.mkdir(name)
shutil.move(each, name) # 如果存在就将文件移动到这个文件夹内
arrange("D:/Filetest")
|
python
|
"""TRAINING
Created: May 04,2019 - Yuchong Gu
Revised: May 07,2019 - Yuchong Gu
"""
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import time
import logging
import warnings
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from optparse import OptionParser
from roi_align.crop_and_resize import CropAndResizeFunction
from torch.autograd import Variable
from utils import accuracy
from models import *
from dataset import *
import matplotlib.pyplot as plt
import cv2
def generate_attention_image(image, attention_map):
h, w, _ = image.shape
mask = np.mean(attention_map, axis=-1, keepdims=True)
mask = (mask / np.max(mask) * 255.0).astype(np.uint8)
mask = cv2.resize(mask, (w, h))
image = (image / 2.0 + 0.5) * 255.0
image = image.astype(np.uint8)
color_map = cv2.applyColorMap(mask.astype(np.uint8), cv2.COLORMAP_JET)
attention_image = cv2.addWeighted(image, 0.5, color_map.astype(np.uint8), 0.5, 0)
attention_image = cv2.cvtColor(attention_image, cv2.COLOR_BGR2RGB)
return attention_image
def to_varabile(tensor, requires_grad=False, is_cuda=True):
if is_cuda:
tensor = tensor.cuda()
var = Variable(tensor, requires_grad=requires_grad)
return var
def attention_crop(attention_maps):
batch_size, num_parts, height, width = attention_maps.shape
bboxes = []
for i in range(batch_size):
attention_map = attention_maps[i]
part_weights = attention_map.mean(axis=1).mean(axis=1)
part_weights = np.sqrt(part_weights)
part_weights = part_weights / np.sum(part_weights)
selected_index = np.random.choice(np.arange(0, num_parts), size=1, p=part_weights)[0]
mask = attention_map[selected_index, :, :]
threshold = random.uniform(0.4, 0.6)
itemindex = np.where(mask >= mask.max() * threshold)
ymin = itemindex[0].min() / height - 0.1
ymax = itemindex[0].max() / height + 0.1
xmin = itemindex[1].min() / width - 0.1
xmax = itemindex[1].max() / width + 0.1
bbox = np.asarray([ymin, xmin, ymax, xmax], dtype=np.float32)
bboxes.append(bbox)
bboxes = np.asarray(bboxes, np.float32)
return bboxes
def mask2bbox(attention_maps):
height = attention_maps.shape[2]
width = attention_maps.shape[3]
bboxes = []
for i in range(attention_maps.shape[0]):
mask = attention_maps[i]
mask = mask[0]
max_activate = mask.max()
min_activate = 0.1 * max_activate
mask = (mask >= min_activate)
itemindex = np.where(mask == True)
ymin = itemindex[0].min() / height - 0.05
ymax = itemindex[0].max() / height + 0.05
xmin = itemindex[1].min() / width - 0.05
xmax = itemindex[1].max() / width + 0.05
bbox = np.asarray([ymin, xmin, ymax, xmax], dtype=np.float32)
bboxes.append(bbox)
bboxes = np.asarray(bboxes, np.float32)
return bboxes
def attention_drop(attention_maps):
batch_size, num_parts, height, width = attention_maps.shape
masks = []
for i in range(batch_size):
attention_map = attention_maps[i]
part_weights = attention_map.mean(axis=1).mean(axis=1)
part_weights = np.sqrt(part_weights)
part_weights = part_weights / np.sum(part_weights)
selected_index = np.random.choice(np.arange(0, num_parts), 1, p=part_weights)[0]
mask = attention_map[selected_index:selected_index + 1,:, : ]
# soft mask
threshold = random.uniform(0.2, 0.5)
mask = (mask < threshold * mask.max()).astype(np.float32)
masks.append(mask)
masks = np.asarray(masks, dtype=np.float32)
return masks
def imshow(img,text,should_save=False):
npimg = img.numpy() # 将torch.FloatTensor 转换为numpy
plt.axis("off") # 不显示坐标尺寸
if text:
plt.text(75, 8, text, style='italic',fontweight='bold',
bbox={'facecolor':'white', 'alpha':0.8, 'pad':10}) # facecolor前景色
# pytorch 图片的显示问题
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
def main():
parser = OptionParser()
parser.add_option('-j', '--workers', dest='workers', default=16, type='int',
help='number of data loading workers (default: 16)')
parser.add_option('-e', '--epochs', dest='epochs', default=80, type='int',
help='number of epochs (default: 80)')
parser.add_option('-b', '--batch-size', dest='batch_size', default=16, type='int',
help='batch size (default: 16)')
parser.add_option('-c', '--ckpt', dest='ckpt', default=False,
help='load checkpoint model (default: False)')
parser.add_option('-v', '--verbose', dest='verbose', default=100, type='int',
help='show information for each <verbose> iterations (default: 100)')
parser.add_option('--lr', '--learning-rate', dest='lr', default=1e-3, type='float',
help='learning rate (default: 1e-3)')
parser.add_option('--sf', '--save-freq', dest='save_freq', default=10, type='int',
help='saving frequency of .ckpt models (default: 1)')
parser.add_option('--sd', '--save-dir', dest='save_dir', default='./models',
help='saving directory of .ckpt models (default: ./models)')
parser.add_option('--init', '--initial-training', dest='initial_training', default=1, type='int',
help='train from 1-beginning or 0-resume training (default: 1)')
(options, args) = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)s: %(levelname)s: [%(filename)s:%(lineno)d]: %(message)s', level=logging.INFO)
warnings.filterwarnings("ignore")
##################################
# Initialize model
##################################
image_size = (448, 448)
num_classes = 200
num_attentions = 32
start_epoch = 0
feature_net = inception_v3(pretrained=True)
net = WSDAN(num_classes=num_classes, M=num_attentions, net=feature_net)
# feature_center: size of (#classes, #attention_maps, #channel_features)
feature_center = torch.zeros(num_classes, num_attentions, net.num_features * net.expansion).to(torch.device("cuda"))
if options.ckpt:
ckpt = options.ckpt
if options.initial_training == 0:
# Get Name (epoch)
epoch_name = (ckpt.split('/')[-1]).split('.')[0]
start_epoch = int(epoch_name)
# Load ckpt and get state_dict
checkpoint = torch.load(ckpt)
state_dict = checkpoint['state_dict']
# Load weights
net.load_state_dict(state_dict)
logging.info('Network loaded from {}'.format(options.ckpt))
# load feature center
if 'feature_center' in checkpoint:
feature_center = checkpoint['feature_center'].to(torch.device("cuda"))
logging.info('feature_center loaded from {}'.format(options.ckpt))
##################################
# Initialize saving directory
##################################
save_dir = options.save_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
##################################
# Use cuda
##################################
cudnn.benchmark = True
net.to(torch.device("cuda"))
net = nn.DataParallel(net)
##################################
# Load dataset
##################################
train_dataset, validate_dataset = CustomDataset(phase='train', shape=image_size), \
CustomDataset(phase='val' , shape=image_size)
train_loader, validate_loader = DataLoader(train_dataset, batch_size=options.batch_size, shuffle=True,
num_workers=options.workers, pin_memory=True), \
DataLoader(validate_dataset, batch_size=options.batch_size * 4, shuffle=False,
num_workers=options.workers, pin_memory=True)
optimizer = torch.optim.SGD(net.parameters(), lr=options.lr, momentum=0.9, weight_decay=0.00001)
loss = nn.CrossEntropyLoss()
##################################
# Learning rate scheduling
##################################
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=2)
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.9)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,gamma=0.9)
##################################
# TRAINING
##################################
logging.info('')
logging.info('Start training: Total epochs: {}, Batch size: {}, Training size: {}, Validation size: {}'.
format(options.epochs, options.batch_size, len(train_dataset), len(validate_dataset)))
writer = SummaryWriter(log_dir='./log', comment='WS-DAN')
for epoch in range(start_epoch, options.epochs):
train(epoch=epoch,
data_loader=train_loader,
net=net,
feature_center=feature_center,
loss=loss,
optimizer=optimizer,
save_freq=options.save_freq,
save_dir=options.save_dir,
verbose=options.verbose,
writer=writer
)
val_loss = validate(data_loader=validate_loader,
net=net,
loss=loss,
verbose=options.verbose)
scheduler.step()
writer.close()
def train(**kwargs):
# Retrieve training configuration
data_loader = kwargs['data_loader']
net = kwargs['net']
loss = kwargs['loss']
optimizer = kwargs['optimizer']
feature_center = kwargs['feature_center']
epoch = kwargs['epoch']
save_freq = kwargs['save_freq']
save_dir = kwargs['save_dir']
verbose = kwargs['verbose']
writer = kwargs['writer']
# Attention Regularization: LA Loss
l2_loss = nn.MSELoss()
# Default Parameters
beta = 0.05
theta_c = 0.5
theta_d = 0.5
crop_size = (448, 448) # size of cropped images for 'See Better'
# metrics initialization
batches = 0
epoch_loss = np.array([0, 0, 0], dtype='float') # Loss on Raw/Crop/Drop Images
epoch_acc = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]], dtype='float') # Top-1/3/5 Accuracy for Raw/Crop/Drop Images
# begin training
start_time = time.time()
logging.info('Epoch %03d, Learning Rate %g' % (epoch + 1, optimizer.param_groups[0]['lr']))
net.train()
for i, (X, y) in enumerate(data_loader):
batch_start = time.time()
# obtain data for training
X = X.to(torch.device("cuda"))
y = y.to(torch.device("cuda"))
##################################
# Raw Image
##################################
y_pred, feature_matrix, attention_maps = net(X)
# loss
batch_loss_1 = loss(y_pred, y)
epoch_loss[0] += batch_loss_1.item()
# metrics: top-1, top-3, top-5 error
with torch.no_grad():
epoch_acc[0] += accuracy(y_pred, y, topk=(1, 3, 5))
######################################
# Reshape center and bap
####################################
feature_center=feature_center.reshape((feature_center.shape[0],-1))
feature_matrix=feature_matrix.reshape((feature_matrix.shape[0],-1))
#get this batch's batch_center
batch_center = feature_center[y]
#Normalize centermatrix batch_center
batch_center=nn.functional.normalize(batch_center,2,-1)
# Update Feature Center
feature_center[y] += beta * (feature_matrix.detach() - batch_center)
# loss_center = l2_loss(feature_matrix, batch_center)
distance = torch.pow(feature_matrix-batch_center,2)
distance = torch.sum(distance,-1)
loss_center = torch.mean(distance)
##################################
# Attention Cropping
##################################
with torch.no_grad():
crop_masks = F.upsample_bilinear(attention_maps, size=(X.size(2), X.size(3)))
bboxes = attention_crop(crop_masks.cpu().detach().numpy())
bboxes = torch.from_numpy(bboxes).cuda()
box_index = torch.IntTensor(range(crop_masks.size(0))).cuda()
crop_images=CropAndResizeFunction(crop_size[0], crop_size[1], 0)(to_varabile(X),to_varabile(bboxes),to_varabile(box_index))
#loss
y_pred, _, _ = net(crop_images)
batch_loss_2 = loss(y_pred, y)
epoch_loss[1] += batch_loss_2.item()
with torch.no_grad():
epoch_acc[1] += accuracy(y_pred, y, topk=(1, 3, 5))
##################################
# Attention Dropping
##################################
with torch.no_grad():
crop_masks = F.upsample_bilinear(attention_maps, size=(X.size(2), X.size(3)))
mask = attention_drop(crop_masks.cpu().detach().numpy())
mask = torch.from_numpy(mask).cuda()
drop_images = X * mask
# loss
y_pred, _, _ = net(drop_images)
batch_loss_3 = loss(y_pred, y)
epoch_loss[2] += batch_loss_3.item()
with torch.no_grad():
epoch_acc[2] += accuracy(y_pred, y, topk=(1, 3, 5))
totol_loss = 1/3.0*batch_loss_1+1/3.0*batch_loss_2+1/3.0*batch_loss_3+loss_center
# totol_loss = 1 / 2.0 * batch_loss_1 + 1 / 2.0 * batch_loss_2 + loss_center
optimizer.zero_grad()
totol_loss.backward()
optimizer.step()
# end of this batch
batches += 1
batch_end = time.time()
if (i + 1) % verbose == 0:
logging.info('\tBatch %d: (Raw) Loss %.4f, Accuracy: (%.2f, %.2f, %.2f), (Crop) Loss %.4f, Accuracy: (%.2f, %.2f, %.2f), (Drop) Loss %.4f, Accuracy: (%.2f, %.2f, %.2f), Time %3.2f' %
(i + 1,
epoch_loss[0] / batches, epoch_acc[0, 0] / batches, epoch_acc[0, 1] / batches, epoch_acc[0, 2] / batches,
epoch_loss[1] / batches, epoch_acc[1, 0] / batches, epoch_acc[1, 1] / batches, epoch_acc[1, 2] / batches,
epoch_loss[2] / batches, epoch_acc[2, 0] / batches, epoch_acc[2, 1] / batches, epoch_acc[2, 2] / batches,
batch_end - batch_start))
writer.add_image('raw_img', X[0], (epoch+1) * 100+(i + 1) / verbose)
# writer.add_image('crop_mask', crop_mask[0], (epoch+1) * 100+(i + 1) / verbose)
# writer.add_image('crop_img', crop_images[0], (epoch+1) * 100+(i + 1) / verbose)
# writer.add_image('drop_mask', drop_mask[0], (epoch+1) * 100+(i + 1) / verbose)
# writer.add_image('drop_img', drop_images[0], (epoch+1) * 100+(i + 1) / verbose)
# crop_mask = F.upsample_bilinear(attention_maps, size=(X.size(2), X.size(3))) > theta_c
# writer.add_image('attention_img', (X * crop_masks.float())[0], (epoch+1) * 100+(i + 1) / verbose)
# print(type(attention_map[0]))
# writer.add_image('attention_img',generate_attention_image(X[0],attention_map[0].cpu().numpy()) , (epoch + 1) * 100 + (i + 1) / verbose)
# save checkpoint model
if epoch % save_freq == 0:
state_dict = net.module.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
torch.save({
'epoch': epoch,
'save_dir': save_dir,
'state_dict': state_dict,
'feature_center': feature_center.cpu()},
os.path.join(save_dir, '%03d.ckpt' % (epoch + 1)))
# end of this epoch
end_time = time.time()
# metrics for average
epoch_loss /= batches
epoch_acc /= batches
# show information for this epoch
logging.info('Train: (Raw) Loss %.4f, Accuracy: (%.2f, %.2f, %.2f), (Crop) Loss %.4f, Accuracy: (%.2f, %.2f, %.2f), (Drop) Loss %.4f, Accuracy: (%.2f, %.2f, %.2f), Time %3.2f'%
(epoch_loss[0], epoch_acc[0, 0], epoch_acc[0, 1], epoch_acc[0, 2],
epoch_loss[1], epoch_acc[1, 0], epoch_acc[1, 1], epoch_acc[1, 2],
epoch_loss[2], epoch_acc[2, 0], epoch_acc[2, 1], epoch_acc[2, 2],
end_time - start_time))
writer.add_scalars('scalar/train',{'acc_raw':epoch_acc[0, 0],'acc_crop':epoch_acc[1, 0],'acc_drop':epoch_acc[2, 0]},epoch)
writer.add_scalars('scalar/train',{'loss_raw':epoch_loss[0],'loss_crop':epoch_loss[1],'loss_drop':epoch_loss[2]},epoch)
def validate(**kwargs):
# Retrieve training configuration
data_loader = kwargs['data_loader']
net = kwargs['net']
loss = kwargs['loss']
verbose = kwargs['verbose']
# metrics initialization
batches = 0
epoch_loss = 0
epoch_acc = np.array([0, 0, 0], dtype='float') # top - 1, 3, 5
# begin validation
start_time = time.time()
net.eval()
with torch.no_grad():
for i, (X, y) in enumerate(data_loader):
batch_start = time.time()
# obtain data
X = X.to(torch.device("cuda"))
y = y.to(torch.device("cuda"))
##################################
# Raw Image
##################################
y_pred_raw, feature_matrix, attention_maps = net(X)
##################################
# Object Localization and Refinement
##################################
attention_maps = torch.mean(attention_maps, dim=1, keepdim=True)
attention_maps = F.upsample_bilinear(attention_maps,size=(X.size(2),X.size(3)))
bboxes = mask2bbox(attention_maps.cpu().detach().numpy())
bboxes = torch.from_numpy(bboxes).cuda()
box_index = torch.IntTensor(range(attention_maps.size(0))).cuda()
crop_images = CropAndResizeFunction(X.size(2),X.size(3),0)(to_varabile(X),to_varabile(bboxes),to_varabile(box_index))
y_pred_crop, _, _ = net(crop_images)
# crop_mask = F.upsample_bilinear(attention_map, size=(X.size(2), X.size(3))) > theta_c
# crop_images = []
# for batch_index in range(crop_mask.size(0)):
# nonzero_indices = torch.nonzero(crop_mask[batch_index, 0, ...])
# height_min = nonzero_indices[:, 0].min()
# height_max = nonzero_indices[:, 0].max()
# width_min = nonzero_indices[:, 1].min()
# width_max = nonzero_indices[:, 1].max()
# crop_images.append(F.upsample_bilinear(X[batch_index:batch_index + 1, :, height_min:height_max, width_min:width_max], size=crop_size))
# crop_images = torch.cat(crop_images, dim=0)
#
# y_pred_crop, _, _ = net(crop_images)
# final prediction
# y_pred = (y_pred_raw + y_pred_crop) / 2.0
y_pred = torch.log(F.softmax(y_pred_raw)*0.5+F.softmax(y_pred_crop)*0.5)
# y_pred = y_pred_raw
# loss
batch_loss = loss(y_pred, y)
epoch_loss += batch_loss.item()
# metrics: top-1, top-3, top-5 error
epoch_acc += accuracy(y_pred, y, topk=(1, 3, 5))
# end of this batch
batches += 1
batch_end = time.time()
if (i + 1) % verbose == 0:
logging.info('\tBatch %d: Loss %.5f, Accuracy: Top-1 %.2f, Top-3 %.2f, Top-5 %.2f, Time %3.2f' %
(i + 1, epoch_loss / batches, epoch_acc[0] / batches, epoch_acc[1] / batches, epoch_acc[2] / batches, batch_end - batch_start))
# end of validation
end_time = time.time()
# metrics for average
epoch_loss /= batches
epoch_acc /= batches
# show information for this epoch
logging.info('Valid: Loss %.5f, Accuracy: Top-1 %.2f, Top-3 %.2f, Top-5 %.2f, Time %3.2f'%
(epoch_loss, epoch_acc[0], epoch_acc[1], epoch_acc[2], end_time - start_time))
logging.info('')
return epoch_loss
if __name__ == '__main__':
main()
|
python
|
import pathlib
import re
import sys
import setuptools
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
errno = tox.cmdline(self.test_args)
sys.exit(errno)
cowork_init = (pathlib.Path('cowork') / '__init__.py').read_text()
match = re.search(r"^__version__ = '(.+)'$", cowork_init, re.MULTILINE)
version = match.group(1)
with open('README.rst') as reader:
readme = reader.read()
setuptools.setup(
name='cowork',
version=version,
description='Cowork',
long_description=readme,
long_description_content_type='text/x-rst',
author='Grant Jenks',
author_email='[email protected]',
url='http://www.grantjenks.com/docs/jupyter-cowork/',
license='Apache 2.0',
packages=['cowork'],
include_package_data=True,
tests_require=['tox'],
cmdclass={'test': Tox},
install_requires=[
'dj_database_url',
'django-cors-headers',
'django==3.2.*',
'ipython',
'tornado',
],
project_urls={
'Documentation': 'http://www.grantjenks.com/docs/jupyter-cowork/',
'Funding': 'https://gum.co/jupyter-cowork',
'Source': 'https://github.com/grantjenks/jupyter-cowork',
'Tracker': 'https://github.com/grantjenks/jupyter-cowork/issues',
},
classifiers=(
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
),
)
|
python
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .feedback import Feedback
class SearchResultFeedback(Feedback):
"""
Database model representing feedback about search results (e.g. empty results).
"""
search_query = models.CharField(max_length=1000, verbose_name=_("search term"))
@property
def object_name(self):
"""
This property returns the name of the object this feedback comments on.
:return: The name of the object this feedback refers to
:rtype: str
"""
return _("Search results for {}").format(self.search_query)
@property
def object_url(self):
"""
This property returns the url to the object this feedback comments on.
:return: The url to the referred object
:rtype: str
"""
return ""
@property
def related_feedback(self):
"""
This property returns all feedback entries which relate to the same object and have the same is_technical value.
:return: The queryset of related feedback
:rtype: ~django.db.models.query.QuerySet [ ~integreat_cms.cms.models.feedback.search_result_feedback.SearchResultFeedback ]
"""
return SearchResultFeedback.objects.filter(
region=self.region,
language=self.language,
search_query=self.search_query,
is_technical=self.is_technical,
)
class Meta:
#: The verbose name of the model
verbose_name = _("search result feedback")
#: The plural verbose name of the model
verbose_name_plural = _("search result feedback")
#: The default permissions for this model
default_permissions = ()
|
python
|
name = "wiki_archive"
|
python
|