content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
__all__ = ("main",)
def main():
from lyricli.console import main
main()
if __name__ == "__main__":
main() | python |
import nextcord
from util.mongo import Document
class afk_utils:
def __init__(self, bot):
self.db = bot.db
self.afk_db = Document(self.db, "afk_user_db")
async def create_afk(self, user, guild_id, reason):
dict = {
"_id" : user.id,
"guild_id" : guild_id,
"name" : user.name,
"reason": reason
}
await self.afk_db.upsert(dict)
async def fetch_afk(self, id):
data = await self.afk_db.find_by_id(id)
return data
async def delete_afk(self, id):
await self.afk_db.delete_by_id(id) | python |
from .core.serializers import *
| python |
# firstline
# Foo header content
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# lastline
import os
a = 1
| python |
from app.schemas.game_schema import Positions, Action
from .action_handler import ActionHandler
class MoveActionHandler(ActionHandler):
@property
def activity_text(self):
return f"{self.player} moved"
def execute(self):
move_where = self.payload.move_where
player_position = self.game.players_position.get(self.player)
assert self.game.is_empty(move_where)
if player_position in Positions.jr_positions():
assert move_where == Positions.JR_B
elif player_position in Positions.fd_positions():
assert move_where == Positions.FD_B
elif player_position in Positions.tr_positions():
assert move_where in [Positions.FD_B, Positions.JR_B]
elif player_position == Positions.JR_B:
assert move_where in [Positions.TR, Positions.JR]
elif player_position == Positions.FD_B:
assert move_where in [Positions.FD, Positions.TR]
self.game.set_position(self.player, move_where)
self.game.next_turn()
self.game.last_action = Action(
action_type=Action.ActionType.MOVE,
)
| python |
import logging
import os
import re
from scanapi.errors import BadConfigurationError
from scanapi.evaluators.code_evaluator import CodeEvaluator
logger = logging.getLogger(__name__)
class StringEvaluator:
variable_pattern = re.compile(
r"(?P<something_before>\w*)(?P<start>\${)(?P<variable>[\w|-]*)(?P<end>})(?P<something_after>\w*)"
) # ${<variable>}
@classmethod
def evaluate(cls, sequence, spec_vars, is_a_test_case=False):
sequence = cls._evaluate_env_var(sequence)
sequence = cls._evaluate_custom_var(sequence, spec_vars)
return CodeEvaluator.evaluate(sequence, spec_vars, is_a_test_case)
@classmethod
def _evaluate_env_var(cls, sequence):
matches = cls.variable_pattern.finditer(sequence)
for match in matches:
variable_name = match.group("variable")
if any(letter.islower() for letter in variable_name):
continue
try:
variable_value = os.environ[variable_name]
except KeyError as e:
raise BadConfigurationError(e)
sequence = cls.replace_var_with_value(
sequence, match.group(), variable_value
)
return sequence
@classmethod
def _evaluate_custom_var(cls, sequence, spec_vars):
matches = cls.variable_pattern.finditer(sequence)
for match in matches:
variable_name = match.group("variable")
if variable_name.isupper():
continue
if not spec_vars.get(variable_name):
continue
variable_value = spec_vars.get(variable_name)
sequence = cls.replace_var_with_value(
sequence, match.group(), variable_value
)
return sequence
@classmethod
def replace_var_with_value(cls, sequence, variable, variable_value):
if variable == sequence:
return variable_value
variable = re.escape(variable)
return re.sub(variable, str(variable_value), sequence)
| python |
"""
A small tool to resize all Frames in a ByteBlower GUI project.
"""
import sys
import lxml.etree as ET
import random
if len(sys.argv) != 4:
print('Expected 2 arguments: <src bbp> <target bpp> <new frame size>')
sys.exit(-1)
filename = sys.argv[1]
target_name = sys.argv[2]
try:
new_size = int(sys.argv[3])
except:
print('The new frame size should be an integer, not "%s"'% sys.argv[3])
try:
with open(filename, 'r') as f:
tree = ET.parse(f)
except:
print("Can't parse '%s'" % filename)
def resize_string(in_str, target_size, filler_char='a'):
"""
Resizes a string to its new size.
"""
new_string = in_str[:target_size]
new_string += filler_char * (target_size - len(new_string))
return new_string
for fr in tree.iterfind('Frame'):
data = fr.attrib['bytesHexString']
fr.attrib['bytesHexString'] = resize_string(data, 2 * new_size)
tree.write(target_name)
| python |
# This Source Code Form is subject to the terms of the MIT
# License. If a copy of the same was not distributed with this
# file, You can obtain one at
# https://github.com/akhilpandey95/altpred/blob/master/LICENSE.
import sys
import json
import certifi
import urllib3
import requests
import numpy as np
import pandas as pd
from tqdm import tqdm
from ast import literal_eval
from preprocessing import LDA
from bs4 import BeautifulSoup as BS
from collections import defaultdict
# function for computing sigmoid of a value
def sigmoid(value, derivative=False):
"""
Return the sigmoid of a numeric value
Parameters
----------
arg1 | value: int
The numeric value intended to convert into a continuos range
Returns
-------
Float
float
"""
try:
# compute the sigmoid
result = 1. / (1. + np.exp(-x))
# check if derivative is required
if derivative:
# return the sigmoid
return result * (1. - result)
# return the sigmoid
return result
except:
# return zero
return np.zeros(1)[0]
# function for downloading the content from a URI
def obtain_content(uri):
"""
Return the decoded response after making a get request to the URI
Parameters
----------
arg1 | uri: str
Index number that holds information for a class
Returns
-------
String
str
"""
try:
# create a urllib object
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
# establish a connection and make a GET request to the URI
res = http.request('GET', uri)
# decode the value
res = res.data.decode('utf-8')
# return the decoded response
return res
except:
return None
# add title of the scholarly paper
def soupify(year):
"""
Prepare a soup object by storing information all the articles in a list
Parameters
----------
arg1 | year: int
The year from which we want to extrapolate information
Returns
-------
Dictionary
collections.defaultdict
"""
try:
# create the url string
url = 'https://www.altmetric.com/top100/'
# obtain the content for a given year
html = obtain_content(url + str(year))
# create a beautiful soup object
soup = BS(html, 'html.parser')
# return the soup
return soup.find_all('article')
except:
return None
# function for extracting article information from the soup object
def extract_article_information_year_2014(soup):
"""
Collect article information from the soup object
Parameters
----------
arg1 | soup: bs4.element.Tag
The specific article we are looking to extrapolte information
Returns
-------
Dictionary
collections.defaultdict
"""
try:
# get the soup object
data = defaultdict(dict)
# add the article rank
data['ranking'] = int(soup.find('div', class_='ranking').text)
# add the altmetric id
data['altmetric_id'] = int(soup.find('div', class_='metrics').find('a')['href'].split('=')[1])
# add the DOI of the article
data['doi'] = soup.find('h2').find('a')['href']
# add the title of the article
data['title'] = soup.find('h2').find('a').getText()
# add the author information of the article
data['authors'] = soup.find('div', class_='subtitle').text.strip()
# add the journal name of the article
data['journal'] = [x.find_next('td').text for x in \
soup.find('div', class_='details').find('table', class_='article-data') \
.find_all('th') if 'Journal' in x.text][0]
# add the journal name of the article
data['category'] = [x.find_next('td').text for x in \
soup.find('div', class_='details').find('table', class_='article-data') \
.find_all('th') if 'Category' in x.text][0]
# add the tweet count of the article
data['tweet_count'] = int([x.next_sibling.text.split(' ') \
for x in \
soup.find('div', class_='mentions').find_all('dt') if 'twitter' in x.text][0][0])
# return the data
return data
except:
return None
# function for iterating the information extraction from the soup object
def get_info_top_n(n, year, function, data, save=False):
"""
Iterate and collect article information from the soup object
for n articles belonging to a given year
Parameters
----------
arg1 | n: int
Number of articles we are looking to extrapolte information
arg2 | year: int
The specific year we are looking to extrapolte information
arg3 | function: function
The function needed to extract article information for that specific year
arg4 | data: collections.defaultdict
The function needed to extract article information for that specific year
Returns
-------
Dataframe
pandas.DataFrame
"""
try:
# iterate over the function given as input to obtain article information
result = [function(data(year)[number]) for number in tqdm(range(n))]
# convert the dict into a dataframe
result = pd.DataFrame(result)
# check if the save flag is given as an input
# in order to write the data to a CSV file
if save:
# save the dataframe into a csv
result.to_csv(str(function) + '_' + str(year) + '.csv', encoding='utf-8')
# return the data
return result
except:
return None
if __name__ == '__main__':
# extract the information f
print(get_info_top_n(3, 2014, extract_article_information_year_2014, soupify))
# read a dataframe
data = pd.read_csv('altmetrics_j2014_full_gamma.csv')
# preprocess the dataframe
data = data.assign(pub_subjects = list(map(literal_eval, data['pub_subjects'])))
# remove NA values
data = data.loc[data.pub_subjects.apply(len) != 0].reset_index(drop=True)
# obtain the X samples
X = [', '.join(x) for x in data['pub_subjects']]
# init the LDA class object
model = LDA()
# tokenize and normalize the input
input = [model.normalize(doc).split() for doc in tqdm(X[:10])]
# train the LDA model
output = model.train(input, 10, 5)
# print the topics
print(output.print_topics(num_topics=10, num_words=5))
else:
sys.exit(0)
| python |
#!/usr/bin/env python
import os, sys, json, re, shutil
from utils.queryBuilder import postQuery
def prep_inputs(ml_dir, ctx_file, in_file):
# get context
with open(ctx_file) as f:
j = json.load(f)
# get kwargs
kwargs = j #mstarch - with containerization, "kwargs" are in context at top level #json.loads(j['rule']['kwargs'])
# get classmap file and version
cm_file = os.path.basename(kwargs['classmap_file'].strip())
match = re.search(r'classmap_(datav.*?)\.json', cm_file)
if not match:
raise RuntimeError("Failed to extract classmap version: %s" % cm_file)
cm_version = match.group(1)
# get features file and version
ft_file = os.path.basename(kwargs['feat_file'].strip())
match = re.search(r'(featv.*?)\.json', ft_file)
if not match:
raise RuntimeError("Failed to extract feature version: %s" % ft_file)
ft_version = match.group(1)
# set classifier ID
clf_version = kwargs['clf_version']
clf_type = kwargs['clf_type']
username = j['username'] #mstarch - username is a paramemter
rule_name = j['name'] #mstarch - rule_name is a parameter
clf_name = "predictor_model-phunw_clfv%s_%s_%s-%s-%s" % (clf_version, cm_version,
ft_version, username, rule_name)
# get urls
ret, status = postQuery({ 'query': j['query']}) #mstarch - passthrough is now a parameter
urls = [i['url'] for i in ret]
# create input json
input = {
"clf_name": clf_name,
"clf_type": clf_type,
"classmap_file": cm_file,
"feat_file": ft_file,
"crossvalidate": 0,
"saveclf": 1,
"cacheoutput": 0,
"urls": urls,
}
# create product directory and chdir
os.makedirs(clf_name)
os.chdir(clf_name)
# write input file
with open(in_file, 'w') as f:
json.dump(input, f, indent=2)
# copy classmap and feature files
shutil.copy(os.path.join(ml_dir, 'classmaps', cm_file), cm_file)
shutil.copy(os.path.join(ml_dir, 'features', ft_file), ft_file)
if __name__ == "__main__":
prep_inputs(sys.argv[1], sys.argv[2], sys.argv[3])
| python |
#Faça um programa que leia nome e peso de várias pessoas, guardando
#tudo em uma lista. No final mostre:
#A)- Quantas pessoas foram cadatradas
#B)- Uma listagem com as pessoas mais pesadas
#C)- Uma listagem com as pessoas mais leves
temp = []
pessoas = []
mai = men = 0
while True:
temp.append(str(input('Nome: ')))
temp.append(float(input('Peso: ')))
pessoas.append(temp[:])
if len(pessoas) == 1:
mai = men = temp[1]
else:
if temp[1] > mai:
mai = temp[1]
if temp[1] < men:
men = temp[1]
temp.clear()
esc = str(input('Deseja continuar? [S/N]: '))
if esc in 'Nn':
break
print(f'Foram cadastradas {len(pessoas)} pessoas')
print(f'O maior peso foi de {mai}Kg. Peso de ', end='')
for p in pessoas:
if p[1] == mai:
print(f'{p[0]} ', end='')
print()
print(f'O menor peso foi de {men}Kg. Peso de ', end='')
for p in pessoas:
if p[1] == men:
print(f'{p[0]} ', end='')
| python |
# -*- coding: utf-8 -*_
#
# Copyright (c) 2020, Pureport, Inc.
# All Rights Reserved
"""
The credentials module handles loading, parsing and returning a valid
object that can be passed into a :class:`pureport.session.Session`
instance to authenticate to the Pureport API. This module will search
for credentials in well-known locations as well as attempt to load
credentials from the current environment.
The method of precedence for credentials is:
1) Environment
2) Profile in ~/.pureport/credentials
3) "default" profile in ~/.pureport/credentials
If no valid API key and/or API secret could be loaded, then a
:class:`pureport.exceptions.PureportError` exception is raised.
"""
from __future__ import absolute_import
import os
import json
import logging
from collections import namedtuple
import yaml
from pureport import defaults
from pureport.exceptions import PureportError
log = logging.getLogger(__name__)
__all__ = ('default',)
def default():
"""Attempts to discover the configured credentials
This function will attempt to find the credentials to
be used for authorizing a Pureport API session. It will
also discover the Pureport base API URL. The function
follows a strict order for loading crendentials.
In order of precedence, the following credentials are used:
1) Loaded from the current environment
2) Loaded from ~/.pureport/credentials.[yml|yaml|json]
The function will use the following environement variables:
PUREPORT_API_KEY
PUREPORT_API_SECRET
PUREPORT_API_BASE_URL
If the environment variables are not set, then this function
will use the information in ~/.pureport/credentials.[yml|yaml|json].
The credentials file will be used in the following order:
1) ~/.pureport/credentials.yml
2) ~/.pureport/credentials.yaml
3) ~/.pureport/credentials.json
The credentials file has the following structure:
.. code-block:: yaml
---
current_profile: <string, default='default'>
profiles:
<string>:
api_url: <string>
api_key: <string>
api_secret: <string>
If no valid credentials are able to be found, then the function will
raise an exception.
This function will return a tuple of two elements. The first
element will be a valid instance of
:class:`pureport.credentials.Credentials`. The second element will
be a string that represents the Pureport API base url to
use. The tuple values can be used as the required arguments
when creating a new instance of :class:`pureport.session.Session`.
:return: a valid credentials instance, an api base url
:rtype: tuple
:raises: :class:`pureport.exceptions.PureportError`
"""
file_path = defaults.credentials_path
file_name = defaults.credentials_filename
for ext in ('yml', 'yaml', 'json'):
deserializer = json.loads if ext == 'json' else yaml.safe_load
fp = os.path.join(file_path, '{}.{}'.format(file_name, ext))
if os.path.exists(fp):
with open(fp) as f:
log.info("loading credentials file {}".format(fp))
content = deserializer(f.read())
break
else:
content = None
values = {}
if content:
profile = content.get('current_profile', 'default')
profiles = content.get('profiles', {})
values = profiles.get(profile, profiles.get('default'))
kwargs = {
'key': defaults.api_key or values.get('api_key'),
'secret': defaults.api_secret or values.get('api_secret')
}
base_url = defaults.api_base_url or values.get('api_url')
if any((kwargs['key'] is None, kwargs['secret'] is None)):
raise PureportError("missing or invalid credentials")
return namedtuple('Credentials', kwargs)(**kwargs), base_url
| python |
import sublime, sublime_plugin
import winreg, subprocess
import re
from os import path
CONEMU = "C:\\Program Files\\ConEmu\\ConEmu64.exe"
CONEMUC = "C:\\Program Files\\ConEmu\\ConEmu\\ConEmuC64.exe"
try: # can we find ConEmu from App Paths?
apps = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths")
subkeys, nill, nill = winreg.QueryInfoKey(apps)
for k in range(subkeys):
app = winreg.EnumKey(apps, k)
if app.startswith("ConEmu"):
cemu = winreg.QueryValue(apps, app)
if path.exists(cemu):
CONEMU = cemu
dirName, fileName = path.split(cemu)
filePath = path.join(dirName,"ConEmu",fileName.replace('ConEmu','ConEmuC'))
if path.exists(filePath):
CONEMUC = filePath
break
finally:
winreg.CloseKey(apps)
# TODO: bundle Expand-Alias with functions to save it to disk and/or send it to sublime
# TODO: cmder style bundle including ConEmu, Sublime, PSReadLine and these macros
si = subprocess.STARTUPINFO()
si.dwFlags = subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = subprocess.SW_HIDE
### For best results, we use PSReadLine and rely on it's hotkeys:
### We need KillLine and Yank set so we can copy/paste any existing command
# Set-PSReadlineKeyHandler Ctrl+k KillLine
# Set-PSReadlineKeyHandler Ctrl+i Yank
# { "keys": ["f5"], "command": "conemu_script" }
class ConemuScriptCommand(sublime_plugin.TextCommand):
def run(self, edit):
# duplicate ISE behavior:
if self.view.file_name():
if self.view.is_dirty():
self.view.run_command("save")
script = self.view.file_name()
else:
script = self.view.substr(sublime.Region(0, self.view.size()))
script = re.sub(r'\\', r'\\\\', script)
# Use PSReadline KillLine hotkey
subprocess.call([CONEMUC, "-GUIMACRO:0", "KEYS", "Home", "^k"], startupinfo=si)
subprocess.call([CONEMUC, "-GUIMACRO:0", "PASTE", "2", script + "\\n"], startupinfo=si)
# Use PSReadline Yank hotkey
subprocess.call([CONEMUC, "-GUIMACRO:0", "KEYS", "End", "^i"], startupinfo=si)
subprocess.call([CONEMU, "-SHOWHIDE"], startupinfo=si)
# { "keys": ["f8"], "command": "conemu_selection" }
class ConemuSelectionCommand(sublime_plugin.TextCommand):
def run(self, edit):
script = []
for region in self.view.sel():
if region.empty():
## If we wanted to duplicate ISE's bad behavior, we could:
# view.run_command("expand_selection", args={"to":"line"})
## Instead, we'll just get the line contents without selected them:
script += [self.view.substr(self.view.line(region))]
else:
script += [self.view.substr(region)]
script = "\n".join(script) + "\n"
script = re.sub(r'\\', r'\\\\', script)
# Use PSReadline KillLine hotkey
subprocess.call([CONEMUC, "-GUIMACRO:0", "KEYS", "Home", "^k"], startupinfo=si)
subprocess.call([CONEMUC, "-GUIMACRO:0", "PASTE", "2", script], startupinfo=si)
# Use PSReadline Yank hotkey
subprocess.call([CONEMUC, "-GUIMACRO:0", "KEYS", "End", "^i"], startupinfo=si)
subprocess.call([CONEMU, "-SHOWHIDE"], startupinfo=si)
| python |
"""
Created on Sat Mar 09 16:33:01 2020
@author: Pieter Cawood
"""
from mesa import Model
from mesa.time import RandomActivation
from mesa.space import MultiGrid
from mesa.datacollection import DataCollector
from mesa_agents import Parking, Wall, Space, Robot
from ta_world import MAPNODETYPES
class Warehouse(Model):
def __init__(self, world, tsp_seqs, last_sim_step):
self.schedule = RandomActivation(self)
self.world = world
self.tsp_seq = tsp_seqs
self.last_sim_step = last_sim_step
self.time_step = 0
self.task_count = 0
self.grid = MultiGrid(world.width, world.height, torus=False)
self.data_collector = DataCollector(
{"task_count": "task_count"}
)
self.robot_count = 0
# Set up MultiGrid from csv map
for element in world:
if world[element] == MAPNODETYPES.WALL:
# Wall
agent = Wall(element, self)
self.grid.place_agent(agent, element)
self.schedule.add(agent)
# Task endpoint
elif world[element] == MAPNODETYPES.TASK_ENDPOINT:
agent = Space(element, self)
self.grid.place_agent(agent, element)
self.schedule.add(agent)
# Robot spawn endpoint
elif world[element] == MAPNODETYPES.PARKING:
# Parking location
agent = Parking(element, self)
self.grid.place_agent(agent, element)
self.schedule.add(agent)
# Robot location (At park initially)
self.robot_count += 1
agent = Robot(element, self, world.agents[self.robot_count].path)
self.grid.place_agent(agent, element)
self.schedule.add(agent)
self.running = True
def step(self):
new_task_count = 0
# Update tasks counter
for seq_id in self.tsp_seq:
if self.tsp_seq[seq_id].qsize() > 0:
if self.time_step >= self.tsp_seq[seq_id].queue[0].release_time:
if self.time_step in self.world.agents[seq_id].path:
if self.tsp_seq[seq_id].queue[0].delivery_endpoint == \
self.world.agents[seq_id].path[self.time_step]:
self.tsp_seq[seq_id].get()
new_task_count += self.tsp_seq[seq_id].qsize()
self.task_count = new_task_count
# Stop running once finished
if self.time_step >= self.last_sim_step:
self.running = False
# Next step
self.time_step += 1
self.schedule.step()
self.data_collector.collect(self)
| python |
'''
Implement strStr().
Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.
Example 1:
Input: haystack = "hello", needle = "ll"
Output: 2
Example 2:
Input: haystack = "aaaaa", needle = "bba"
Output: -1
Clarification:
What should we return when needle is an empty string? This is a great question to ask during an interview.
For the purpose of this problem, we will return 0 when needle is an empty string. This is consistent to C's strstr() and Java's indexOf().
'''
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
if haystack == None:
return -1
if needle == '':
return 0
for i in range(0, len(haystack) - len(needle) + 1):
if needle == haystack[i:i+len(needle)]:
return i
return -1
if __name__ == "__main__":
solution = Solution()
print(solution.strStr('a', 'a')) | python |
import requests
from bs4 import BeautifulSoup as bs
import pandas as pd
players_image_urls = []
url = 'https://www.pro-football-reference.com/players/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:92.0) Gecko/20100101 Firefox/92.0'}
page = requests.get(url,headers=headers, timeout=2, allow_redirects = True )
soup = bs(page.content, 'html.parser')
ref_alphabet = soup.find('ul',{'class':'page_index'})
ref_li = ref_alphabet.find_all('li')
for j in ref_li:
while True:
try:
ref_li_letter = j.find('a', href=True)
for a_href in j.find_all('a', href=True):
alphabet_letter_ref = a_href['href']
base = 'https://www.pro-football-reference.com'
url = base + str(alphabet_letter_ref)
page = requests.get(url,headers=headers, timeout=2, allow_redirects = True )
soup = bs(page.content, 'html.parser')
players_section = soup.find('div',{'id':'div_players'})
for a_href_players in players_section.find_all('a', href=True):
player_link = a_href_players['href']
base = 'https://www.pro-football-reference.com'
url = base + str(player_link)
page = requests.get(url,headers=headers, timeout=2, allow_redirects = True )
soup = bs(page.content, 'html.parser')
while True:
try:
if soup.find('div', {'class': 'media-item'}):
player_img = soup.find('div', {'class': 'media-item'})
img = player_img.find('img')
img_src = img['src']
# Player Name
player_name = soup.find('h1', {'itemprop': 'name'})
player_name_span = player_name.find('span')
player_name_text = player_name_span.text
player_image = {
"Player": player_name_text,
"Player_img": img_src
}
players_image_urls.append(player_image)
if not soup.find('div', {'class': 'media-item'}):
break
except:
break
break
except:
break
print('process done')
player_img_df = pd.DataFrame(players_image_urls)
print(player_img_df.head)
player_img_df.to_csv('players_img_edited.csv', index=False)
| python |
import pytest
from ethdata import ethdata
class TestAccountSetters(object):
def test_setter_1_address(self):
my_account = ethdata.Account("0x1cB424cB77B19143825004d0bd0a4BEE2c5e91A8")
assert my_account.address == "0x1cb424cb77b19143825004d0bd0a4bee2c5e91a8"
with pytest.raises(ValueError):
my_account.address = ""
def test_setter_2_transaction_receipts(self):
my_account = ethdata.Account("0x1cB424cB77B19143825004d0bd0a4BEE2c5e91A8")
my_account.transaction_receipts = "tx"
assert my_account.transaction_receipts == "tx"
def test_setter_3_query_range(self):
my_account = ethdata.Account("0x1cB424cB77B19143825004d0bd0a4BEE2c5e91A8")
assert my_account.query_range == {}
my_account.query_range = {"start": "2018-01-01", "end": "2018-01-02"}
assert my_account.query_range == {"start": "2018-01-01", "end": "2018-01-02"}
my_account.query_range = {"start": "2018-01-03"}
assert my_account.query_range == {"start": "2018-01-03"}
my_account.query_range = {"end": "2018-01-04"}
assert my_account.query_range == {"end": "2018-01-04"}
my_account.query_range = {"key": "value"}
assert my_account.query_range == {} | python |
import struct
from binascii import b2a_hex, a2b_hex
from pymodbus.exceptions import ModbusIOException
from pymodbus.utilities import checkLRC, computeLRC
from pymodbus.framer import ModbusFramer, FRAME_HEADER, BYTE_ORDER
ASCII_FRAME_HEADER = BYTE_ORDER + FRAME_HEADER
# --------------------------------------------------------------------------- #
# Logging
# --------------------------------------------------------------------------- #
import logging
_logger = logging.getLogger(__name__)
# --------------------------------------------------------------------------- #
# Modbus ASCII Message
# --------------------------------------------------------------------------- #
class ModbusAsciiFramer(ModbusFramer):
"""
Modbus ASCII Frame Controller::
[ Start ][Address ][ Function ][ Data ][ LRC ][ End ]
1c 2c 2c Nc 2c 2c
* data can be 0 - 2x252 chars
* end is '\\r\\n' (Carriage return line feed), however the line feed
character can be changed via a special command
* start is ':'
This framer is used for serial transmission. Unlike the RTU protocol,
the data in this framer is transferred in plain text ascii.
"""
def __init__(self, decoder, client=None):
""" Initializes a new instance of the framer
:param decoder: The decoder implementation to use
"""
self._buffer = b''
self._header = {'lrc': '0000', 'len': 0, 'uid': 0x00}
self._hsize = 0x02
self._start = b':'
self._end = b"\r\n"
self.decoder = decoder
self.client = client
# ----------------------------------------------------------------------- #
# Private Helper Functions
# ----------------------------------------------------------------------- #
def decode_data(self, data):
if len(data) > 1:
uid = int(data[1:3], 16)
fcode = int(data[3:5], 16)
return dict(unit=uid, fcode=fcode)
return dict()
def checkFrame(self):
""" Check and decode the next frame
:returns: True if we successful, False otherwise
"""
start = self._buffer.find(self._start)
if start == -1:
return False
if start > 0: # go ahead and skip old bad data
self._buffer = self._buffer[start:]
start = 0
end = self._buffer.find(self._end)
if end != -1:
self._header['len'] = end
self._header['uid'] = int(self._buffer[1:3], 16)
self._header['lrc'] = int(self._buffer[end - 2:end], 16)
data = a2b_hex(self._buffer[start + 1:end - 2])
return checkLRC(data, self._header['lrc'])
return False
def advanceFrame(self):
""" Skip over the current framed message
This allows us to skip over the current message after we have processed
it or determined that it contains an error. It also has to reset the
current frame header handle
"""
self._buffer = self._buffer[self._header['len'] + 2:]
self._header = {'lrc': '0000', 'len': 0, 'uid': 0x00}
def isFrameReady(self):
""" Check if we should continue decode logic
This is meant to be used in a while loop in the decoding phase to let
the decoder know that there is still data in the buffer.
:returns: True if ready, False otherwise
"""
return len(self._buffer) > 1
def addToFrame(self, message):
""" Add the next message to the frame buffer
This should be used before the decoding while loop to add the received
data to the buffer handle.
:param message: The most recent packet
"""
self._buffer += message
def getFrame(self):
""" Get the next frame from the buffer
:returns: The frame data or ''
"""
start = self._hsize + 1
end = self._header['len'] - 2
buffer = self._buffer[start:end]
if end > 0:
return a2b_hex(buffer)
return b''
def resetFrame(self):
""" Reset the entire message frame.
This allows us to skip ovver errors that may be in the stream.
It is hard to know if we are simply out of sync or if there is
an error in the stream as we have no way to check the start or
end of the message (python just doesn't have the resolution to
check for millisecond delays).
"""
self._buffer = b''
self._header = {'lrc': '0000', 'len': 0, 'uid': 0x00}
def populateResult(self, result):
""" Populates the modbus result header
The serial packets do not have any header information
that is copied.
:param result: The response packet
"""
result.unit_id = self._header['uid']
# ----------------------------------------------------------------------- #
# Public Member Functions
# ----------------------------------------------------------------------- #
def processIncomingPacket(self, data, callback, unit, **kwargs):
"""
The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 // N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
:param unit: Process if unit id matches, ignore otherwise (could be a
list of unit ids (server) or single unit id(client/server))
:param single: True or False (If True, ignore unit address validation)
"""
if not isinstance(unit, (list, tuple)):
unit = [unit]
single = kwargs.get('single', False)
self.addToFrame(data)
while self.isFrameReady():
if self.checkFrame():
if self._validate_unit_id(unit, single):
frame = self.getFrame()
result = self.decoder.decode(frame)
if result is None:
raise ModbusIOException("Unable to decode response")
self.populateResult(result)
self.advanceFrame()
callback(result) # defer this
else:
_logger.error("Not a valid unit id - {}, "
"ignoring!!".format(self._header['uid']))
self.resetFrame()
else:
break
def buildPacket(self, message):
""" Creates a ready to send modbus packet
Built off of a modbus request/response
:param message: The request/response to send
:return: The encoded packet
"""
encoded = message.encode()
buffer = struct.pack(ASCII_FRAME_HEADER, message.unit_id,
message.function_code)
checksum = computeLRC(encoded + buffer)
packet = bytearray()
params = (message.unit_id, message.function_code)
packet.extend(self._start)
packet.extend(('%02x%02x' % params).encode())
packet.extend(b2a_hex(encoded))
packet.extend(('%02x' % checksum).encode())
packet.extend(self._end)
return bytes(packet).upper()
# __END__
| python |
#
# Copyright (c) nexB Inc. and others.
# SPDX-License-Identifier: Apache-2.0
#
# Visit https://aboutcode.org and https://github.com/nexB/ for support and download.
# ScanCode is a trademark of nexB Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from os.path import dirname
from os.path import exists
from os.path import join
from commoncode.testcase import FileBasedTesting
from commoncode import fileutils
from extractcode import new_name
class TestNewName(FileBasedTesting):
test_data_dir = join(dirname(__file__), 'data')
def test_new_name_without_extensions(self):
test_dir = self.get_test_loc('new_name/noext', copy=True)
renamed = new_name(join(test_dir, 'test'), is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'test_4' == result
renamed = new_name(join(test_dir, 'TEST'), is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'TEST_4' == result
renamed = new_name(join(test_dir, 'test_1'), is_dir=True)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'test_1_1' == result
def test_new_name_with_extensions(self):
test_dir = self.get_test_loc('new_name/ext', copy=True)
renamed = new_name(join(test_dir, 'test.txt'), is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'test_3.txt' == result
renamed = new_name(join(test_dir, 'TEST.txt'), is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'TEST_3.txt' == result
renamed = new_name(join(test_dir, 'TEST.tXt'), is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'TEST_3.tXt' == result
renamed = new_name(join(test_dir, 'test.txt'), is_dir=True)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'test.txt_2' == result
renamed = new_name(join(test_dir, 'teST.txt'), is_dir=True)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'teST.txt_2' == result
def test_new_name_with_empties(self):
base_dir = self.get_temp_dir()
self.assertRaises(AssertionError, new_name, '', is_dir=False)
test_file = base_dir + '/'
renamed = new_name(test_file, is_dir=False)
assert renamed
assert not exists(renamed)
test_file = join(base_dir, '.')
renamed = new_name(test_file, is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert '_' == result
test_dir = base_dir + '/'
renamed = new_name(test_dir, is_dir=True)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert result
test_dir = join(base_dir, '.')
renamed = new_name(test_dir, is_dir=True)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert '_' == result
| python |
b='You Yi Xue Sa Xu Li Li Yuan Dui Huo Sha Leng Pou Hu Guo Bu Rui Wei Sou An Yu Xiang Heng Yang Xiao Yao Fan Bi Ci Heng Tao Liu Fei Zhu Tou Xi Zan Yi Dou Yuan Jiu Zai Bo Ti Ying Tou Yi Nian Shao Ben Gou Ban Mo Gai En She Caan Zhi Yang Jian Yuan Shui Ti Wei Xun Zhi Yi Ren Shi Hu Ne Ye Jian Sui Ying Bao Hu Hu Ye Yang Lian Xi En Dui Zan Zhu Ying Ying Jin Chuang Dan Kuai Yi Ye Jian En Ning Ci Qian Xue Bo Mi Shui Mo Liang Qi Qi Shou Fu Bo Beng Bie Yi Wei Huan Fan Qi Mao Fu Ang Ang Fu Qi Qun Tuo Yi Bo Pian Ba Keoi Xuan Baai Yu Chi Lu Yi Li Zaau Niao Xi Wu Gwing Lei Pu Zhuo Zui Zhuo Chang An Er Yu Leng Fu Zha Hun Chun Sou Bi Bi Zha Song He Li Giu Han Zai Gu Cheng Lou Mo Mi Mai Ao Zhe Zhu Huang Fan Deng Tong Du Wo Wei Ji Chi Lin Biao Long Jian Nie Luo Shen Ngon Gua Nie Yi Ku Wan Wa Qia Bo Kao Ling Gan Gua Hai Kuang Heng Kui Ze Ting Lang Bi Huan Po Yao Wan Ti Sui Kua Dui Ao Jian Mo Kui Kuai An Ma Qing Qiao Kao Hao Duo Xian Nai Suo Jie Pi Pa Song Chang Nie Man Song Ci Xian Kuo Gai Di Pou Tiao Zu' | python |
import json
from rest_framework.test import APITestCase
from django.urls import reverse
from rest_framework import status
from django.contrib.auth import get_user_model
from authors.apps.articles.models import Articles
from authors.apps.profiles.models import Profile
class TestGetEndpoint(APITestCase):
def setUp(self):
""" Prepares table for tests """
self.token = self.get_user_token()
self.slug = "life_love_death"
self.title = "Life Love and Death"
self.description = "What is life?"
self.body = "This is the real life body."
self.tagList = "life,love,death"
self.author = 'TestAuthor'
self.article = Articles(
slug=self.slug,
title=self.title,
description=self.description,
body=self.body,
tagList=self.tagList,
author=Profile.objects.get(username=self.author))
self.article.save()
def test_get_all_articles(self):
"""
This tests getting all articles successfully
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_successfully_not_getting_articles_if_token_not_used(self):
"""
Unauthorized error returned if no token is passed in
"""
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_article_id(self):
"""
Tests the pk of the article is true
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertIn(b"1", response.content)
def test_articles_are_paginated(self):
"""
This tests if the returned articles are paginated
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
# this checks the number of articles in the database
self.assertIn(b"1", response.content)
# next is null since there is only one article posted
self.assertIn(b"null", response.content)
# previous is null since only one article has been posted
# the page_size holds ten articles per page
self.assertIn(b"null", response.content) # previous
def test_get_specific_article(self):
"""
This gets a specific article
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articleSpecific', kwargs={'slug': 'life_love_death'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_getting_and_checking_articles_content(self):
"""
This checks if the right content of an article is returned
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
# checks if the body passed during posting is the one returned
self.assertIn(b"This is the real life body.", response.content)
# checks if id returned is 1
self.assertIn(b"1", response.content)
def test_wrong_request(self):
"""
Checks request for a non existing article
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse(
'articleSpecific', kwargs={
'slug': 'life_love_death_live'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response.render()
self.assertIn(b"Article does not exist", response.content)
def get_user_token(self):
user = {
"user": {
"username": "TestAuthor",
"email": "[email protected]",
"password": "test123user#Password"
}
}
response = self.client.post(
reverse('register'), data=user, format='json')
user = get_user_model()
user = user.objects.get(username="TestAuthor")
user.is_active = True
user.save()
response.render()
data = response.content
token = json.loads(data.decode('utf-8'))['user']['token']
return token
| python |
from sympy import Wild, Indexed
from contextlib import contextmanager
class DestructuringError(ValueError):
'''
Represent an error due to the impossibility to destructure a given term.
At the present, we neither provide meaningful error messages nor objects
related to the context in which this exception was raised; moreover, we
do not distinguish the operator in the tackled combination term (Add, Mul,...).
'''
pass
# only for keep the same api, delete it when refactoring is finished,
# a good name to use could be: "destructuring_monomial_with_coeff_subscripts"
@contextmanager
def bind_Mul_indexed(term, indexed, forbidden_terms=[]):
'''
Destructure `term` against pattern `coeff * f[i j ...]`, binding `coeff`, `i` and `j ...`.
I attempt to destructure the given term respect the `Mul` operator, aiming to isolate
term `indexed`, which should be an instance of `Indexed` class, from a coefficient `coeff`,
which collect everything but `indexed` and, optionally, objects appearing in `forbidden_terms`.
If such destructuring fails, then I raise `DestructuringError`.
Examples
========
>>> from sympy import *
Main track, everything is good:
>>> f, n, k, j = IndexedBase('f'), *symbols('n k j')
>>> term = 3 * f[n,k,j]
>>> with bind_Mul_indexed(term, f) as (coeff, subscripts):
... print('{} * {}'.format(coeff, subscripts))
3 * [n, k, j]
Failure, not a vanilla product:
>>> term = 3 * f[n] + 1
>>> try:
... with bind_Mul_indexed(term, f) as (coeff, subscripts):
... print('{} * {}'.format(coeff, subscripts))
... except DestructuringError:
... print('something else')
something else
Failure, `f` not indexed at all:
>>> term = 3 * f
>>> try:
... with bind_Mul_indexed(term, f) as (coeff, subscripts):
... print('{} * {}'.format(coeff, subscripts))
... except DestructuringError:
... print('something else')
something else
'''
coeff_w, ind_w = Wild('coeff', exclude=[indexed] + forbidden_terms), Wild('ind')
matched = term.match(coeff_w * ind_w)
# if no indexing applied then `isinstance(matched[ind_w], IndexedBase)` holds
if (matched
and ind_w in matched
and coeff_w in matched
and isinstance(matched[ind_w], Indexed)):
_, *subscripts = matched[ind_w].args
yield matched[coeff_w], subscripts # do not splice subscripts, give them packed
else:
raise DestructuringError()
| python |
"""
**download.py**
A commandline utility to retrieve test data from
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/ for use in evaluating
LSAnamoly.
**usage**: download.py [-h] --params YML_PARAMS --data-dir DATA_DIR
[--sc-url SC_URL] [--mc-url MC_URL]
Retrieve datasets for LsAnomaly evaluation. By default, data is retrieved from
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/
**Arguments**
-h, --help
show this help message and exit
--params YML_PARAMS, -p YML_PARAMS
YAML file with evaluation parameters
--data-dir DATA_DIR, -d DATA_DIR
directory to store retrieved data sets
--sc-url SC_URL
optional: single class test data URL; default:
https:/ /www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/
--mc-url MC_URL
optional: Multi-class test data URL; default:
https:// www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/
"""
# The MIT License
#
# Copyright 2019 Chris Skiscim
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import bz2
import logging
import os
import requests
import yaml
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
fmt = "[%(asctime)s %(levelname)-8s] [%(filename)s:%(lineno)4s - %(funcName)s()] %(message)s" # noqa
logging.basicConfig(level=logging.INFO, format=fmt)
def unzip_write(file_path):
"""
Reads and inflates a .bz2 file and writes it back.
The compressed file is retrained. Used internally.
Args:
file_path (str): file to inflate
Raises:
FileNotFoundError
"""
try:
with open(file_path[:-4], "wb") as new_file, bz2.BZ2File(
file_path, "rb"
) as file:
for data in iter(lambda: file.read(100 * 1024), b""):
new_file.write(data)
except (FileNotFoundError, Exception) as e:
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise
def write_contents(file_path, get_request):
"""
Writes the contents of the get request to the specified file path.
Args:
file_path (str): file path
get_request (requests.Response): response object
Raises:
IOError
"""
try:
open(file_path, "wb").write(get_request.content)
if file_path.endswith("bz2"):
unzip_write(file_path)
except (IOError, Exception) as e:
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise
def get_request(dataset, file_path, sc_url, mc_url):
"""
Retrieve *dataset* trying first at `sc_url` and failing that, at
`mc_url`. If a data set cannot be retrieved, it is skipped.
The contents to `file_path` with the data set name as the file name.
Args:
dataset (str): Dataset name as referenced in
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/
file_path (str): Directory where `dataset` will be written.
sc_url (str): single class data set URL
mc_url (str): multiclass data set URL
"""
url_get = sc_url + dataset
try:
get_req = requests.get(url_get, allow_redirects=True)
except (requests.exceptions.InvalidURL, Exception) as e:
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise
if get_req.status_code == 200:
write_contents(file_path, get_req)
else:
url_get = mc_url + dataset
get_req = requests.get(url_get, allow_redirects=True)
if get_req.status_code == 200:
write_contents(file_path, get_req)
else:
logger.error("\tunable to retrieve {}".format(dataset))
logger.info("\tsuccess".format(dataset))
def main(param_file, sc_url, mc_url, data_fp):
"""
The main show. Tries to retrieve and store all the configured data-sets.
Args:
param_file (str): `.yml` File containing the evaluation parameters
sc_url (str): single class data set URL
mc_url (str): multiclass data set URL
data_fp (str): Directory where the datasets will be written
Raises:
ValueError: If `data_fp` is not a valid directory.
"""
try:
with open(param_file) as yml_file:
params = yaml.safe_load(yml_file)
except (FileNotFoundError, ValueError):
raise
datasets = params["evaluation"]["datasets"]
if not os.path.isdir(data_fp):
raise ValueError("no directory named {}".format(data_fp))
try:
for dataset in sorted(datasets):
logger.info("retrieving {}".format(dataset))
write_path = os.path.join(data_fp, dataset)
get_request(dataset, write_path, sc_url, mc_url)
except Exception as e:
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise
if __name__ == "__main__":
import argparse
import sys
_sc_url = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/"
_mc_url = (
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/"
)
parser = argparse.ArgumentParser(
description="Retrieve datasets for LsAnomaly evaluation. "
"By default, data is retrieved from "
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/"
)
parser.add_argument(
"--params",
"-p",
dest="yml_params",
required=True,
help="YAML file with evaluation parameters",
)
parser.add_argument(
"--data-dir",
"-d",
dest="data_dir",
required=True,
help="directory to store retrieved data sets",
)
parser.add_argument(
"--sc-url",
dest="sc_url",
required=False,
default=_sc_url,
help="optional: single class test data URL; default: {}".format(
_sc_url
),
)
parser.add_argument(
"--mc-url",
dest="mc_url",
required=False,
default=_mc_url,
help="optional: Multi-class test data URL; default: {}".format(
_mc_url
),
)
args = parser.parse_args()
try:
sys.exit(
main(args.yml_params, args.sc_url, args.mc_url, args.data_dir)
)
except SystemExit:
pass
| python |
#!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from queries import SparqlQuery
class event_precis(SparqlQuery):
"""
"""
def __init__(self, *args, **kwargs):
super(event_precis, self).__init__(*args, **kwargs)
self.query_title = 'Get event precis'
self.description = 'Get precis for event which is a distillation of the event graph rather than verbatim report.'
self.url = 'event_precis'
self.world_cup_example = 'event_precis?uris.0=http://www.newsreader-project.eu/data/cars/2003/06/02/48RT-R260-009F-R155.xml%23ev18'
self.cars_example = 'event_precis?uris.0=http://www.newsreader-project.eu/data/cars/2003/06/02/48RT-R260-009F-R155.xml%23ev18'
self.ft_example = 'event_precis?uris.0=http://www.newsreader-project.eu/data/2013/10/312013/10/312013/10/31/11779884.xml%23ev7'
self.wikinews_example ='event_precis?uris.0=http://en.wikinews.org/wiki/Vettel_becomes_youngest_Formula_One_champion%23ev27_1'
self.query_template = ("""
SELECT DISTINCT ?subject ?predicate ?object ?graph
WHERE {{
{{
{uri_0} eso:hasPreSituation|eso:hasPostSituation|eso:hasDuringSituation ?graph .
GRAPH ?graph {{ ?subject ?predicate ?object }}
}} UNION {{
BIND ({uri_0} as ?subject)
{{
GRAPH ?graph {{ {uri_0} ?predicate ?object }}
FILTER (?predicate = sem:hasActor ||
?predicate = sem:hasPlace ||
?predicate = rdf:type && EXISTS {{ ?object rdfs:isDefinedBy eso: }} ||
EXISTS {{ ?predicate rdfs:isDefinedBy eso: }} )
}} UNION {{
GRAPH ?graph {{ {uri_0} sem:hasTime ?t }}
?t owltime:inDateTime ?object .
BIND (nwr:cleanedTime as ?predicate)
}} UNION {{
SELECT ("number of documents" AS ?predicate) ("graph" AS ?graph)
(COUNT(DISTINCT STRBEFORE(STR(?m), "#")) AS ?object)
WHERE {{ {uri_0} gaf:denotedBy ?m }}
}}
}}
}}
""")
self.count_template = ("""
SELECT (COUNT(*) as ?count)
WHERE{{
SELECT DISTINCT ?subject ?predicate ?object ?graph
WHERE {{
{{
{uri_0} eso:hasPreSituation|eso:hasPostSituation|eso:hasDuringSituation ?graph .
GRAPH ?graph {{ ?subject ?predicate ?object }}
}} UNION {{
BIND ({uri_0} as ?subject)
{{
GRAPH ?graph {{ {uri_0} ?predicate ?object }}
FILTER (?predicate = sem:hasActor ||
?predicate = sem:hasPlace ||
?predicate = rdf:type && EXISTS {{ ?object rdfs:isDefinedBy eso: }} ||
EXISTS {{ ?predicate rdfs:isDefinedBy eso: }} )
}} UNION {{
GRAPH ?graph {{ {uri_0} sem:hasTime ?t }}
?t owltime:inDateTime ?object .
BIND (nwr:cleanedTime as ?predicate)
}} UNION {{
SELECT ("number of documents" AS ?predicate) ("graph" AS ?graph)
(COUNT(DISTINCT STRBEFORE(STR(?m), "#")) AS ?object)
WHERE {{ {uri_0} gaf:denotedBy ?m }}
}}
}}
}}
}}
""")
self.jinja_template = 'table.html'
self.headers = ['subject', 'predicate', 'object', 'graph']
self.required_parameters = ["uris"]
self.optional_parameters = ["output"]
self.number_of_uris_required = 1
self._make_uri_filter_block()
self.query = self._build_query()
| python |
# -*- coding: utf-8 -*-
# Copyright 2021 the HERA Project
# Licensed under the MIT License
import pytest
import glob
from pyuvdata import UVData
from pyuvdata import UVCal
from ..data import DATA_PATH
from .. import chunker
from hera_qm.utils import apply_yaml_flags
import numpy as np
import sys
def test_chunk_data_files(tmpdir):
# list of data files:
tmp_path = tmpdir.strpath
data_files = sorted(glob.glob(DATA_PATH + '/zen.2458044.*.uvh5'))
nfiles = len(data_files)
# form chunks with three samples.
for chunk in range(0, nfiles, 2):
output = tmp_path + f'/chunk.{chunk}.uvh5'
chunker.chunk_files(data_files, data_files[chunk], output, 2,
polarizations=['ee'], spw_range=[0, 32],
throw_away_flagged_ants=True, ant_flag_yaml=DATA_PATH + '/test_input/a_priori_flags_sample_noflags.yaml')
# test that chunked files contain identical data (when combined)
# to original combined list of files.
# load in chunks
chunks = sorted(glob.glob(tmp_path + '/chunk.*.uvh5'))
uvd = UVData()
uvd.read(chunks)
# load in original file
uvdo = UVData()
uvdo.read(data_files, freq_chans=range(32))
apply_yaml_flags(uvdo, DATA_PATH + '/test_input/a_priori_flags_sample_noflags.yaml', throw_away_flagged_ants=True,
flag_freqs=False, flag_times=False, ant_indices_only=True)
assert np.all(np.isclose(uvdo.data_array, uvd.data_array))
assert np.all(np.isclose(uvdo.flag_array, uvd.flag_array))
assert np.all(np.isclose(uvdo.nsample_array, uvd.nsample_array))
# Repeate test with no spw_range or pols provided.
for chunk in range(0, nfiles, 2):
output = tmp_path + f'/chunk.{chunk}.uvh5'
chunker.chunk_files(data_files, data_files[chunk], output, 2,
polarizations=None, spw_range=None, clobber=True,
throw_away_flagged_ants=True, ant_flag_yaml=DATA_PATH + '/test_input/a_priori_flags_sample_noflags.yaml')
# test that chunked files contain identical data (when combined)
# to original combined list of files.
# load in chunks
chunks = sorted(glob.glob(tmp_path + '/chunk.*.uvh5'))
uvd = UVData()
uvd.read(chunks)
# load in original file
uvdo = UVData()
uvdo.read(data_files)
apply_yaml_flags(uvdo, DATA_PATH + '/test_input/a_priori_flags_sample_noflags.yaml', throw_away_flagged_ants=True,
flag_freqs=False, flag_times=False, ant_indices_only=True)
assert np.all(np.isclose(uvdo.data_array, uvd.data_array))
assert np.all(np.isclose(uvdo.flag_array, uvd.flag_array))
assert np.all(np.isclose(uvdo.nsample_array, uvd.nsample_array))
def test_chunk_cal_files(tmpdir):
# list of data files:
tmp_path = tmpdir.strpath
cal_files = sorted(glob.glob(DATA_PATH + '/test_input/*.abs.calfits_54x_only.part*'))
nfiles = len(cal_files)
# test ValueError
pytest.raises(ValueError, chunker.chunk_files, cal_files, cal_files[0], 'output', 2, spw_range=[0, 32], type='arglebargle')
# form chunks with three samples.
for chunk in range(0, nfiles, 2):
output = tmp_path + f'/chunk.{chunk}.calfits'
chunker.chunk_files(cal_files, cal_files[chunk], output, 2, spw_range=[0, 32], type='gains')
# test that chunked files contain identical data (when combined)
# to original combined list of files.
# load in chunks
chunks = sorted(glob.glob(tmp_path + '/chunk.*.calfits'))
uvc = UVCal()
uvc.read_calfits(chunks)
# load in original file
uvco = UVCal()
uvco.read_calfits(cal_files)
uvco.select(freq_chans=range(32))
assert np.all(np.isclose(uvco.gain_array, uvc.gain_array))
assert np.all(np.isclose(uvco.flag_array, uvc.flag_array))
# repeate test with None provided for spw_range and pols
for chunk in range(0, nfiles, 2):
output = tmp_path + f'/chunk.{chunk}.calfits'
chunker.chunk_files(cal_files, cal_files[chunk], output, 2, type='gains', clobber=True)
# test that chunked files contain identical data (when combined)
# to original combined list of files.
# load in chunks
chunks = sorted(glob.glob(tmp_path + '/chunk.*.calfits'))
uvc = UVCal()
uvc.read_calfits(chunks)
# load in original file
uvco = UVCal()
uvco.read_calfits(cal_files)
assert np.all(np.isclose(uvco.gain_array, uvc.gain_array))
assert np.all(np.isclose(uvco.flag_array, uvc.flag_array))
def test_chunk_parser():
sys.argv = [sys.argv[0], 'a', 'b', 'c', 'input', 'output', '3', '--type', 'gains']
ap = chunker.chunk_parser()
args = ap.parse_args()
assert args.filenames == ['a', 'b', 'c']
assert args.inputfile == 'input'
assert args.outputfile == 'output'
assert args.chunk_size == 3
assert args.type == 'gains'
| python |
import argparse
import codecs
import json
import math
import os.path
import numpy as np
import tensorflow as tf
__all__ = ["create_default_hyperparams", "load_hyperparams",
"generate_search_lookup", "search_hyperparams", "create_hyperparams_file"]
def create_default_hyperparams(config_type):
"""create default hyperparameters"""
if config_type == "dam":
hyperparams = tf.contrib.training.HParams(
data_train_contextual_file="",
data_train_contextual_file_type="",
data_eval_contextual_file="",
data_eval_contextual_file_type="",
data_embedding_file="",
data_full_embedding_file="",
data_context_utterance_size=10,
data_context_word_size=50,
data_context_char_size=16,
data_response_candidate_size=3,
data_response_word_size=50,
data_response_char_size=16,
data_word_vocab_file="",
data_word_vocab_size=50000,
data_word_vocab_threshold=0,
data_word_unk="<unk>",
data_word_pad="<pad>",
data_char_vocab_file="",
data_char_vocab_size=1000,
data_char_vocab_threshold=0,
data_char_unk="*",
data_char_pad="#",
data_pipeline_mode="default",
data_num_parallel=4,
data_log_output_dir="",
data_result_output_dir="",
train_random_seed=100,
train_enable_shuffle=True,
train_shuffle_buffer_size=30000,
train_batch_size=32,
train_eval_batch_size=100,
train_eval_metric=["cp_auc@1", "precision@1"],
train_num_epoch=3,
train_ckpt_output_dir="",
train_summary_output_dir="",
train_step_per_stat=10,
train_step_per_ckpt=1000,
train_step_per_eval=1000,
train_clip_norm=5.0,
train_enable_debugging=False,
train_ema_enable=True,
train_ema_decay_rate=0.9999,
train_ema_enable_debias=False,
train_ema_enable_dynamic_decay=False,
train_regularization_enable=True,
train_regularization_type="l2",
train_regularization_scale=3e-7,
train_optimizer_type="adam",
train_optimizer_learning_rate=0.001,
train_optimizer_warmup_enable=False,
train_optimizer_warmup_mode="exponential_warmup",
train_optimizer_warmup_rate=0.01,
train_optimizer_warmup_end_step=1000,
train_optimizer_decay_enable=False,
train_optimizer_decay_mode="exponential_decay",
train_optimizer_decay_rate=0.95,
train_optimizer_decay_step=1000,
train_optimizer_decay_start_step=10000,
train_optimizer_momentum_beta=0.9,
train_optimizer_rmsprop_beta=0.999,
train_optimizer_rmsprop_epsilon=1e-8,
train_optimizer_adadelta_rho=0.95,
train_optimizer_adadelta_epsilon=1e-8,
train_optimizer_adagrad_init_accumulator=0.1,
train_optimizer_adam_beta_1=0.8,
train_optimizer_adam_beta_2=0.999,
train_optimizer_adam_epsilon=1e-07,
model_type="dam",
model_scope="contextual_modeling",
model_representation_word_embed_dim=300,
model_representation_word_dropout=0.1,
model_representation_word_embed_pretrained=True,
model_representation_word_feat_trainable=False,
model_representation_word_feat_enable=True,
model_representation_char_embed_dim=8,
model_representation_char_unit_dim=100,
model_representation_char_window_size=[5],
model_representation_char_hidden_activation="relu",
model_representation_char_dropout=0.1,
model_representation_char_pooling_type="max",
model_representation_char_feat_trainable=True,
model_representation_char_feat_enable=True,
model_representation_fusion_type="highway",
model_representation_fusion_num_layer=2,
model_representation_fusion_unit_dim=400,
model_representation_fusion_hidden_activation="relu",
model_representation_fusion_dropout=0.1,
model_representation_fusion_trainable=True,
model_understanding_context_num_layer=5,
model_understanding_context_num_head=8,
model_understanding_context_unit_dim=128,
model_understanding_context_hidden_activation="relu",
model_understanding_context_dropout=0.1,
model_understanding_context_attention_dropout=0.0,
model_understanding_context_layer_dropout=0.1,
model_understanding_context_trainable=True,
model_understanding_response_num_layer=5,
model_understanding_response_num_head=8,
model_understanding_response_unit_dim=128,
model_understanding_response_hidden_activation="relu",
model_understanding_response_dropout=0.1,
model_understanding_response_attention_dropout=0.0,
model_understanding_response_layer_dropout=0.1,
model_understanding_response_trainable=True,
model_understanding_enable_sharing=False,
model_interaction_context2response_num_layer=5,
model_interaction_context2response_num_head=8,
model_interaction_context2response_unit_dim=128,
model_interaction_context2response_hidden_activation="relu",
model_interaction_context2response_dropout=0.1,
model_interaction_context2response_attention_dropout=0.0,
model_interaction_context2response_layer_dropout=0.1,
model_interaction_context2response_trainable=True,
model_interaction_response2context_num_layer=5,
model_interaction_response2context_num_head=8,
model_interaction_response2context_unit_dim=128,
model_interaction_response2context_hidden_activation="relu",
model_interaction_response2context_dropout=0.1,
model_interaction_response2context_attention_dropout=0.0,
model_interaction_response2context_layer_dropout=0.1,
model_interaction_response2context_trainable=True,
model_matching_aggregation_num_layer=2,
model_matching_aggregation_unit_dim=[32, 16],
model_matching_aggregation_hidden_activation=["relu", "relu"],
model_matching_aggregation_conv_window=[3,3],
model_matching_aggregation_conv_stride=[1,1],
model_matching_aggregation_pool_window=[3,3],
model_matching_aggregation_pool_stride=[3,3],
model_matching_aggregation_pooling_type=["max", "max"],
model_matching_aggregation_dropout=[0.1, 0.1],
model_matching_aggregation_trainable=[True, True],
model_matching_projection_dropout=0.1,
model_matching_projection_trainable=True,
device_num_gpus=1,
device_default_gpu_id=0,
device_log_device_placement=False,
device_allow_soft_placement=False,
device_allow_growth=False,
device_per_process_gpu_memory_fraction=0.8
)
else:
raise ValueError("unsupported config type {0}".format(config_type))
return hyperparams
def load_hyperparams(config_file):
"""load hyperparameters from config file"""
if tf.gfile.Exists(config_file):
with codecs.getreader("utf-8")(tf.gfile.GFile(config_file, "rb")) as file:
hyperparams_dict = json.load(file)
hyperparams = create_default_hyperparams(hyperparams_dict["model_type"])
hyperparams.override_from_dict(hyperparams_dict)
return hyperparams
else:
raise FileNotFoundError("config file not found")
def generate_search_lookup(search,
search_lookup=None):
search_lookup = search_lookup if search_lookup else {}
search_type = search["stype"]
data_type = search["dtype"]
if search_type == "uniform":
range_start = search["range"][0]
range_end = search["range"][1]
if data_type == "int":
search_sample = np.random.randint(range_start, range_end)
elif data_type == "float":
search_sample = (range_end - range_start) * np.random.random_sample() + range_start
else:
raise ValueError("unsupported data type {0}".format(data_type))
elif search_type == "log":
range_start = math.log(search["range"][0], 10)
range_end = math.log(search["range"][1], 10)
if data_type == "float":
search_sample = math.pow(10, (range_end - range_start) * np.random.random_sample() + range_start)
else:
raise ValueError("unsupported data type {0}".format(data_type))
elif search_type == "discrete":
search_set = search["set"]
search_index = np.random.choice(len(search_set))
search_sample = search_set[search_index]
elif search_type == "lookup":
search_key = search["key"]
if search_key in search_lookup:
search_sample = search_lookup[search_key]
else:
raise ValueError("search key {0} doesn't exist in look-up table".format(search_key))
else:
raise ValueError("unsupported search type {0}".format(search_type))
data_scale = search["scale"] if "scale" in search else 1.0
data_shift = search["shift"] if "shift" in search else 0.0
if data_type == "int":
search_sample = int(data_scale * search_sample + data_shift)
elif data_type == "float":
search_sample = float(data_scale * search_sample + data_shift)
elif data_type == "string":
search_sample = str(search_sample)
elif data_type == "boolean":
search_sample = bool(search_sample)
elif data_type == "list":
search_sample = list(search_sample)
else:
raise ValueError("unsupported data type {0}".format(data_type))
return search_sample
def search_hyperparams(hyperparams,
config_file,
num_group,
random_seed):
"""search hyperparameters based on search config"""
if tf.gfile.Exists(config_file):
with codecs.getreader("utf-8")(tf.gfile.GFile(config_file, "rb")) as file:
hyperparams_group = []
np.random.seed(random_seed)
search_setting = json.load(file)
hyperparams_search_setting = search_setting["hyperparams"]
variables_search_setting = search_setting["variables"]
for i in range(num_group):
variables_search_lookup = {}
for key in variables_search_setting.keys():
variables_search = variables_search_setting[key]
variables_search_lookup[key] = generate_search_lookup(variables_search)
hyperparams_search_lookup = {}
for key in hyperparams_search_setting.keys():
hyperparams_search = hyperparams_search_setting[key]
hyperparams_search_lookup[key] = generate_search_lookup(hyperparams_search, variables_search_lookup)
hyperparams_sample = tf.contrib.training.HParams(hyperparams.to_proto())
hyperparams_sample.override_from_dict(hyperparams_search_lookup)
hyperparams_group.append(hyperparams_sample)
return hyperparams_group
else:
raise FileNotFoundError("config file not found")
def create_hyperparams_file(hyperparams_group, config_dir):
"""create config files from groups of hyperparameters"""
if not tf.gfile.Exists(config_dir):
tf.gfile.MakeDirs(config_dir)
for i in range(len(hyperparams_group)):
config_file = os.path.join(config_dir, "config_hyperparams_{0}.json".format(i))
with codecs.getwriter("utf-8")(tf.gfile.GFile(config_file, "w")) as file:
hyperparam_dict = hyperparams_group[i].values()
hyperparams_json = json.dumps(hyperparam_dict, indent=4)
file.write(hyperparams_json)
| python |
from fractions import Fraction
def isPointinPolygon(pointlist, rangelist):#射线法先判断点是否在大多边形里面
# 判断是否在外包矩形内,如果不在,直接返回false
xlist = []#装大多边形的点的横坐标
ylist = []#装大多边形的点的纵坐标
for i in range(len(rangelist)-1):
xlist.append(rangelist[i][0])
ylist.append(rangelist[i][1])
maxx = max(xlist)
minx = min(xlist)
maxy = max(ylist)
miny = min(ylist)
#判断点是否大于外包矩阵的x,y或者小于外包矩阵的x,y
for point in pointlist:
if (point[0] > maxx or point[0] < minx or
point[1] > maxy or point[1] < miny):
print('小图形不在大图形里面')
return False
count = 0
point1 = rangelist[0]
for point in pointlist:
for i in range(1, len(rangelist)):
point2 = rangelist[i]
# 点与多边形顶点重合
if ((point[0] == point1[0] and point[1] == point1[1]) or
(point[0] == point2[0] and point[1] == point2[1])):
print('小图形不在大图形里面')
return False
# 判断线段两端点是否在射线两侧 不在肯定不相交 射线(-∞,lat)(lng,lat)
if (point1[1] < point[1] and point2[1] >= point[1]) or (point1[1] >= point[1] and point2[1] < point[1]):
# 求线段与射线交点 再和lat比较
point12lng = point2[0] - (point2[1] - point[1]) * (point2[0] - point1[0])/(point2[1] - point1[1])
# 点在多边形边上
if (point12lng == point[0]):
print("小图形不在大图形里面")
return False
if (point12lng < point[0]):
count +=1
point1 = point2
if count%2 == 0:
print('小图形不在大图形里面')
return False
else:
print('点在大图形里面')
return True
def line(line):#生成多边型每条边的函数形式并且输入x,y的范围
result=[]
for i in range(len(line)):
if i==len(line)-1:
break
if line[i][1]==line[i+1][1]:#形如x=b
a=0
b=line[i][1]
result.append([a,b,line[i][0],line[i+1][0],line[i][1],line[i+1][1]])
elif line[i][0]==line[i+1][0]:#形如y=b
a='不存在系数'
b=0
result.append([a,b,line[i][0],line[i+1][0],line[i][1],line[i+1][1]])
else:#形如y=ax+b
a=(line[i+1][1]-line[i][1])/(line[i+1][0]-line[i][0])
b=line[i][1]-a*line[i][0]
result.append([a,b,line[i][0],line[i+1][0],line[i][1],line[i+1][1]])
return result
def islineinPolygon(pointlist,rangelist):#判断两个多边形的边是否相交
pointline=line(pointlist)
rangeline=line(rangelist)
x=0
y=0
for i in pointline:
for j in rangeline:
if i[0]=='不存在系数' and j[0]=='不存在系数':#两条边都为x=b的形式一定是平行或者重合
x=0
if i[0]=='不存在系数':#小多边形的边为x=b形式
y=j[0]*i[2]+j[1]
if y>min(j[4:]) and y<max(j[4:]) and y>min(i[4:]) and y<max(i[4:]):
return print('小图形不在大图形里面')
if j[0]=='不存在系数':#大多边形的边为x=b形式
y=i[0]*j[2]+i[1]
if y>min(j[4:]) and y<max(j[4:]) and y>min(i[4:]) and y<max(i[4:]):
return print('小图形不在大图形里面')
if i[0]!=j[0] and i[0]!='不存在系数' and j[0]!='不存在系数':
x=(j[1]-i[1])/(i[0]-j[0])
if x>min(j[2:4]) and x<max(j[2:4]) and x>min(i[2:4]) and x<max(i[2:4]):
return print('小图形不在大图形里面')
print('小图形在大图形里面')
if __name__ == '__main__':
#大多边形构成的坐标点,开始点和最后的点要一样
l=[[0,4],[3,2],[1,0],[3,-2],[0,-4],[-3,-2],[-1,0],[-3,2],[0,4]]
#小多边形构成的坐标点,开始点和最后的点要一样
pointlist=[[-2,2],[2,-2],[-2,-2],[-2,2]]
if isPointinPolygon(pointlist, l):
islineinPolygon(pointlist,l)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Ural URL Extraction Unit Tests
# =============================================================================
from ural import urls_from_text
TEXT = """Facial-recognition technology is advancing faster than the people who worry about it have been able to think of ways to manage it." @NewYorker on the manifold challenges of harnessing a promising, but frightening, technology. http://mitsha.re/Qg1g30mVD78
Today @jovialjoy's @AJLUnited and @GeorgetownCPT are launching the Safe Face Pledge, which calls for facial analysis technology companies to commit to transparency in government contracts and mitigate potential abuse of their technology. http://www.safefacepledge.org #safefacepledge
Now accepting submissions for the 2018 Excellence in Local News Awards http://twib.in/l/xLzxjnpMXx7X via @medium http://foo.com/blah_(wikipedia)#cite-1
Directed to help #Alzheimers patients + others w/ impaired memory by providing intuitive ways to benefit from large amounts of personal data Check out this post by @physicspod in @singularityhub http://on.su.org/2rsPeXh"""
REF_SET = set(["http://mitsha.re/Qg1g30mVD78",
"http://www.safefacepledge.org",
"http://twib.in/l/xLzxjnpMXx7X",
"http://on.su.org/2rsPeXh",
"http://foo.com/blah_(wikipedia)#cite-1"])
TEXT_WITH_INVALID_URLS = """
This is a baaaad url: https://www.bfmtvregain-de-popularite-pour-emmanuel-macron-et-edouard-phi...
"""
TESTS = [
(
"please visit my website, https://oilab.eu/stijn, it's great",
['https://oilab.eu/stijn']
),
(
'I recently read this in a new york times article (https://nytimes.com/some-url-with-(parentheses))',
['https://nytimes.com/some-url-with-(parentheses)']
),
(
'"Bezoek alsjeblieft de websites van het [Juridisch Loket](https://www.juridischloket.nl/), [Sociaal Verhaal](http://www.sociaalverhaal.com/) en/of de [Rechtswinkel](http://www.rechtswinkel.nl/). Reddit is niet een geschikte plek voor juridisch advies."',
[
'https://www.juridischloket.nl/',
'http://www.sociaalverhaal.com/',
'http://www.rechtswinkel.nl/'
]
),
(
'What do you think of https://lemonde.fr? http://www.lemonde.fr. It is good http://www.lemonde.fr#?.',
[
'https://lemonde.fr',
'http://www.lemonde.fr',
'http://www.lemonde.fr'
]
),
(
'This is: "http://www.liberation.fr" and \'https://lefigaro.fr\'.',
[
'http://www.liberation.fr',
'https://lefigaro.fr'
]
),
(
'This is a [markdown]( https://lefigaro.fr) link.',
['https://lefigaro.fr']
),
(
'[http://www.lemonde.fr]',
['http://www.lemonde.fr']
)
]
class TestUrlsFromText(object):
def test_basics(self):
assert set(urls_from_text(TEXT)) == REF_SET
for string, urls in TESTS:
assert list(urls_from_text(string)) == urls
def test_invalid_urls(self):
urls = set(urls_from_text(TEXT_WITH_INVALID_URLS))
assert urls == {
'https://www.bfmtvregain'
}
| python |
"""Train the ASR model.
Tested with Python 3.5, 3.6 and 3.7.
No Python 2 compatibility is being provided.
"""
import time
import tensorflow as tf
from asr.input_functions import input_fn_generator
from asr.model import CTCModel
from asr.params import FLAGS, get_parameters
from asr.util import storage
RANDOM_SEED = FLAGS.random_seed if FLAGS.random_seed != 0 else int(time.time())
def main(_):
"""TensorFlow starting routine."""
# Delete old model data if requested.
storage.maybe_delete_checkpoints(FLAGS.train_dir, FLAGS.delete)
# Logging information about the run.
print('TensorFlow-Version: {}; Tag-Version: {}; Branch: {}; Commit: {}\nParameters: {}'
.format(tf.VERSION, storage.git_latest_tag(), storage.git_branch(),
storage.git_revision_hash(), get_parameters()))
# Setup TensorFlow run configuration and hooks.
config = tf.estimator.RunConfig(
model_dir=FLAGS.train_dir,
tf_random_seed=RANDOM_SEED,
save_summary_steps=FLAGS.log_frequency,
session_config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement,
gpu_options=tf.GPUOptions(allow_growth=FLAGS.allow_vram_growth)
),
keep_checkpoint_max=5,
log_step_count_steps=FLAGS.log_frequency,
train_distribute=None
)
model = CTCModel()
# Construct the estimator that embodies the model.
estimator = tf.estimator.Estimator(
model_fn=model.model_fn,
model_dir=FLAGS.train_dir,
config=config
)
# Train the model.
curriculum_train_input_fn = input_fn_generator('train_batch')
estimator.train(input_fn=curriculum_train_input_fn, hooks=None)
# Evaluate the trained model.
dev_input_fn = input_fn_generator('dev')
evaluation_result = estimator.evaluate(input_fn=dev_input_fn, hooks=None)
tf.logging.info('Evaluation results of epoch {}: {}'.format(1, evaluation_result))
# Train the model and evaluate after each epoch.
for epoch in range(2, FLAGS.max_epochs + 1):
# Train the model.
train_input_fn = input_fn_generator('train_bucket')
estimator.train(input_fn=train_input_fn, hooks=None)
# L8ER: Possible replacement for evaluate every epoch:
# https://www.tensorflow.org/api_docs/python/tf/contrib/estimator/InMemoryEvaluatorHook
# Evaluate the trained model.
dev_input_fn = input_fn_generator('dev')
evaluation_result = estimator.evaluate(input_fn=dev_input_fn, hooks=None)
tf.logging.info('Evaluation results of epoch {}: {}'.format(epoch, evaluation_result))
if __name__ == '__main__':
# General TensorFlow setup.
tf.logging.set_verbosity(tf.logging.INFO)
tf.set_random_seed(RANDOM_SEED)
# Run training.
tf.app.run()
| python |
from typing import List, Union
from datetime import datetime
from mongoengine import *
from regex import F
class Prediction(Document):
"""
The GFI prediction result for an open issue.
This collection will be updated periodically and used by backend and bot for GFI recommendation.
Attributes:
owner, name, number: uniquely identifies a GitHub issue.
threshold: the number of in-repository commits that disqualify one as a newcomer,
can be one to five. For more details please check the ICSE'22 paper.
probability: the modeled probability that the issue is a GFI.
last_updated: the last time this prediction result was updated,
necessary for incremental update.
"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
number: int = IntField(required=True)
threshold: int = IntField(required=True, min_value=1, max_value=5)
probability: float = FloatField(required=True)
last_updated: datetime = DateTimeField(required=True)
meta = {
"indexes": [
{"fields": ["owner", "name", "number", "threshold"], "unique": True},
{"fields": ["probability"]},
]
}
class TrainingSummary(Document):
"""
Describes model training result for a specific repository and threshold.
This collection will be used to communicate the effectiveness of our model to users.
Attributes:
owner, name, threshold: uniquely identifies a GitHub repository and a training setting.
If owner="", name="", then this is a global summary result.
model_file: relative path to the model file, with repository as root.
n_resolved_issues: total number of resolved issues in this repository.
n_newcomer_resolved: the number of issues resolved by newcomers in this repository.
accuracy: the accuracy of the model on the training data.
auc: the area under the ROC curve.
last_updated: the last time this training summary was updated.
"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
issues_train: List[list] = ListField(ListField(), default=[])
issues_test: List[list] = ListField(ListField(), default=[])
threshold: int = IntField(required=True, min_value=1, max_value=5)
model_90_file: str = StringField(required=True)
model_full_file: str = StringField(required=True)
n_resolved_issues: int = IntField(required=True)
n_newcomer_resolved: int = IntField(required=True)
accuracy: float = FloatField(required=True)
auc: float = FloatField(required=True)
last_updated: datetime = DateTimeField(required=True)
meta = {
"indexes": [
{"fields": ["owner", "name", "threshold"], "unique": True},
]
}
class Dataset(Document):
"""
The final dataset involved for RecGFI training
All attributes are restored at a given time
Attributes:
owner, name, number: uniquely identifies a GitHub issue
created_at: The time when the issue is created
closed_at: The time when the issue is closed
before: The time when all features in this document is computed
resolver_commit_num: Issue resolver's commits to this repo, before the issue is resolved
if -1, means that the issue is still open
---------- Content ----------
title: Issue title
body: Issue description
len_title: Length of issue title
len_body: Length of issue description
n_code_snips: The number of code snippets in issue body
n_urls: The number of URLs in issue body
n_imgs: The number of imgs in issue body
coleman_liau_index: Readability index
flesch_reading_ease: Readability index
flesch_kincaid_grade: Readability index
automated_readability_index: Readability index
labels: The number of different labels
---------- Background ----------
reporter_feat: Features for issue reporter
owner_feat: Features for repository owner
prev_resolver_commits: A list of the commits made by resolver for all previously resolved issues
n_stars: Number of stars
n_pulls: Number of pull requests
n_commits: Number of commits
n_contributors: Number of contributors
n_closed_issues: Number of closed issues
n_open_issues: Number of open issues
r_open_issues: Ratio of open issues over all issues
issue_close_time: Median issue close time (in seconds)
---------- Dynamics ----------
comments: All issue comments
events: All issue events, excluding comments
comment_users: Features for all involved commenters
event_users: Features for all involved users
"""
class LabelCategory(EmbeddedDocument):
"""
Each attribute represents the number of labels under this type.
"""
bug: int = IntField(default=0)
feature: int = IntField(default=0)
test: int = IntField(default=0)
build: int = IntField(default=0)
doc: int = IntField(default=0)
coding: int = IntField(default=0)
enhance: int = IntField(default=0)
gfi: int = IntField(default=0)
medium: int = IntField(default=0)
major: int = IntField(default=0)
triaged: int = IntField(default=0)
untriaged: int = IntField(default=0)
class UserFeature(EmbeddedDocument):
"""User features in a dataset
Attributes:
name: GitHub username
n_commits: Number of commits the user made to this repository
n_issues: Number of issues the user opened in this repository
n_pulls: Number of pull requests the user opened in this repository
resolver_commits: For all resolved issue opened by this user,
number of the resolver's commits prior to issue resolution
"""
name: str = StringField(required=True)
n_commits: int = IntField(required=True, min_value=0)
n_issues: int = IntField(required=True, min_value=0)
n_pulls: int = IntField(required=True, min_value=0)
resolver_commits: List[int] = ListField(IntField(min_value=0), default=[])
owner: str = StringField(required=True)
name: str = StringField(required=True)
number: int = IntField(required=True)
created_at: datetime = DateTimeField(required=True)
closed_at: datetime = DateTimeField(null=True)
before: datetime = DateTimeField(required=True)
resolver_commit_num: int = IntField(required=True)
# ---------- Content ----------
title: str = StringField(required=True)
body: str = StringField(required=True)
len_title: int = IntField(required=True)
len_body: int = IntField(required=True)
n_code_snips: int = IntField(required=True)
n_urls: int = IntField(required=True)
n_imgs: int = IntField(required=True)
coleman_liau_index: float = FloatField(required=True)
flesch_reading_ease: float = FloatField(required=True)
flesch_kincaid_grade: float = FloatField(required=True)
automated_readability_index: float = FloatField(required=True)
labels: List[str] = ListField(StringField(), default=[])
label_category: LabelCategory = EmbeddedDocumentField(LabelCategory, required=True)
# ---------- Background ----------
reporter_feat: UserFeature = EmbeddedDocumentField(UserFeature, required=True)
owner_feat: UserFeature = EmbeddedDocumentField(UserFeature, required=True)
prev_resolver_commits: List[int] = ListField(IntField(), default=[])
n_stars: int = IntField(required=True)
n_pulls: int = IntField(required=True)
n_commits: int = IntField(required=True)
n_contributors: int = IntField(required=True)
n_closed_issues: int = IntField(required=True)
n_open_issues: int = IntField(required=True)
r_open_issues: float = FloatField(required=True)
issue_close_time: float = FloatField(required=True)
# ---------- Dynamics ----------
comments: List[str] = ListField(StringField(), default=[])
events: List[str] = ListField(StringField(), default=[])
comment_users: UserFeature = EmbeddedDocumentListField(UserFeature, default=[])
event_users: UserFeature = EmbeddedDocumentListField(UserFeature, default=[])
meta = {
"indexes": [
{"fields": ["owner", "name", "number", "before"], "unique": True},
]
}
class IssueEvent(DynamicEmbeddedDocument):
"""
Object representing issue events.
For assigned, unassigned, labeled, unlabeled, referenced,
cross-referenced, and commented events, additional fields are available.
This document may contain **additional** fields depending on the specific event.
Attributes:
type: Type of the event
time: The time when this event happened, can be null for some events
actor: The GitHub user (login name) associated with the event, can be null for some events
Attributes (for commented):
comment: The comment text
commenter: The commenter GitHub username
Attributes (for labeled, unlabeled):
label: The label name
Attributes (for assigned, unassigned):
assignee: The assignee name
Attributes (for referenced, cross-referenced):
source: The source of reference (an issue number), may be null
commit: The commit SHA of the reference, may be null
"""
type: str = StringField(required=True)
time: datetime = DateTimeField(null=True)
actor: str = StringField(null=True)
comment: str = StringField(null=True)
commenter: str = StringField(null=True)
label: str = StringField(null=True)
assignee: str = StringField(null=True)
source: int = IntField(null=True)
commit: str = StringField(null=True)
class ResolvedIssue(Document):
"""
Additional issue information for issue that are resolved by a developer.
These issues will be used as the training dataset for RecGFI training.
"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
number: int = IntField(required=True)
created_at: datetime = DateTimeField(required=True)
resolved_at: datetime = DateTimeField(required=True)
resolver: str = StringField(required=True) # Issue resolver's GitHub user name
# If int, the PR number that resolved this issue.
# If string, the commit hash that resolved this issue
resolved_in: Union[int, str] = DynamicField(required=True)
# Issue resolver's commits to this repo, before the issue is resolved
resolver_commit_num: int = IntField(required=True)
events: List[IssueEvent] = ListField(EmbeddedDocumentField(IssueEvent))
meta = {"indexes": [{"fields": ["owner", "name", "number"], "unique": True}]}
class OpenIssue(Document):
"""
Additional issue information for currently open issues.
These issues will be used as the testing dataset for RecGFI training.
"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
number: int = IntField(required=True)
created_at: datetime = DateTimeField(required=True)
updated_at: datetime = DateTimeField(required=True)
events: List[IssueEvent] = ListField(EmbeddedDocumentField(IssueEvent))
meta = {"indexes": [{"fields": ["owner", "name", "number"], "unique": True}]}
class Repo(Document):
"""
Repository statistics for both RecGFI training and web app.
Attributes:
created_at: The time when the repository was created in database
updated_at: The time when the repository was last updated in database
repo_created_at: The time when this repository is created in GitHub
owner, name: Uniquely identifies a GitHub repository
topics: A list of topics associated with the repository
language: Main programming language (as returned by GitHub), can be None
languages: All programming languages and their lines of code
description: Repository description
readme: Repostiory README content
median_issue_close_time: The median time it takes to close an issue (in seconds)
monthly_stars, monthly_commits, monthly_issues, monthly_pulls:
Four time series describing number of new stars, commits, issues, and pulls
in each month since repository creation
"""
class LanguageCount(EmbeddedDocument):
language: str = StringField(required=True)
count: int = IntField(required=True)
class MonthCount(EmbeddedDocument):
month: datetime = DateTimeField(required=True)
count: int = IntField(required=True, min_value=0)
created_at: datetime = DateTimeField(required=True)
updated_at: datetime = DateTimeField(required=True)
repo_created_at: datetime = DateTimeField(required=True)
owner: str = StringField(required=True)
name: str = StringField(required=True)
topics: List[str] = ListField(StringField(), default=[])
language: str = StringField(null=True)
languages: List[LanguageCount] = EmbeddedDocumentListField(
LanguageCount, default=[]
)
description: str = StringField(null=True)
readme: str = StringField(null=True)
median_issue_close_time: float = FloatField(null=True)
monthly_stars: List[MonthCount] = EmbeddedDocumentListField(MonthCount, default=[])
monthly_commits: List[MonthCount] = EmbeddedDocumentListField(
MonthCount, default=[]
)
monthly_issues: List[MonthCount] = EmbeddedDocumentListField(MonthCount, default=[])
monthly_pulls: List[MonthCount] = EmbeddedDocumentListField(MonthCount, default=[])
meta = {"indexes": [{"fields": ["owner", "name"], "unique": True}]}
class RepoCommit(Document):
"""Repository commit statistics for RecGFI training"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
sha: str = StringField(required=True)
# GitHub username of the commit author, can be None
author: str = StringField(null=True)
authored_at: datetime = DateTimeField(required=True)
# GitHub username of the committer, can be None
committer: str = StringField(null=True)
committed_at: datetime = DateTimeField(required=True)
message: str = StringField(required=True)
meta = {"indexes": [{"fields": ["owner", "name", "sha"], "unique": True}]}
class RepoIssue(Document):
"""
Repository issue statistics for RecGFI training.
Note that pull requests are also included in this collection
"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
number: int = IntField(required=True, min_value=0)
# GitHub username of the issue reporter / PR submitter
user: str = StringField(required=True)
state: str = StringField(required=True, choices=("open", "closed"))
created_at: datetime = DateTimeField(
required=True
) # The time when this issue/PR is created
closed_at: datetime = DateTimeField(
null=True
) # The time when this issue/PR is closed
is_pull: bool = BooleanField(required=True) # Whether the issue is a pull request
merged_at: datetime = DateTimeField(
null=True
) # If a PR, the time when this PR is merged
title: str = StringField(required=True)
body: str = StringField(null=True)
labels: List[str] = ListField(StringField(required=True))
meta = {"indexes": [{"fields": ["owner", "name", "number"], "unique": True}]}
class RepoStar(Document):
"""Repository star statistics for RecGFI training"""
owner: str = StringField(required=True)
name: str = StringField(required=True)
# GitHub username who starred this repository
user: str = StringField(required=True)
starred_at: datetime = DateTimeField(required=True) # Time of the starred event
meta = {"indexes": [{"fields": ["owner", "name", "user"], "unique": True}]}
class User(Document):
"""User statistics for RecGFI training (TODO: This documentation is not finalized yet)"""
class Issue(EmbeddedDocument):
# repo info
owner: str = StringField(required=True)
name: str = StringField(required=True)
repo_stars: int = IntField(required=True, min_value=0)
# issue number (state can not be updated incrementally)
state: str = StringField(required=True)
number: int = IntField(required=True, min_value=0)
created_at: datetime = DateTimeField(required=True)
class Pull(EmbeddedDocument):
# repo info
owner: str = StringField(required=True)
name: str = StringField(required=True)
repo_stars: int = IntField(required=True, min_value=0)
# pull request number (state can not be updated incrementally)
state: str = StringField(required=True)
number: int = IntField(required=True)
created_at: datetime = DateTimeField(required=True)
class Review(EmbeddedDocument):
# repo info
owner: str = StringField(required=True)
name: str = StringField(required=True)
repo_stars: int = IntField(required=True, min_value=0)
# review number & state
number: int = IntField(required=True)
state: str = StringField(required=True)
created_at: datetime = DateTimeField(required=True)
class CommitContribution(EmbeddedDocument):
# repo info
owner: str = StringField(required=True)
name: str = StringField(required=True)
repo_stars: int = IntField(required=True, min_value=0)
# commit count
commit_count: int = IntField(required=True, min_value=0)
created_at: datetime = DateTimeField(required=True)
_created_at: datetime = DateTimeField(required=True) # created in the database
_updated_at: datetime = DateTimeField(required=True) # updated in the database
name: str = StringField(null=True)
login: str = StringField(required=True)
# issues, issueComments, pulls (use end cursor to paginate)
issues: Issue = EmbeddedDocumentListField(Issue)
pulls: Pull = EmbeddedDocumentListField(Pull)
# reviews, commits (use date to paginate)
pull_reviews: Review = EmbeddedDocumentListField(Review)
commit_contributions: CommitContribution = EmbeddedDocumentListField(
CommitContribution
)
meta = {
"indexes": [
{"fields": ["login"], "unique": True},
{"fields": ["issues.owner", "issues.name"]},
{"fields": ["issues.created_at"]},
{"fields": ["pulls.owner", "pulls.name"]},
{"fields": ["pulls.created_at"]},
{"fields": ["pull_reviews.owner", "pull_reviews.name"]},
{"fields": ["pull_reviews.created_at"]},
{"fields": ["commit_contributions.owner", "commit_contributions.name"]},
{"fields": ["commit_contributions.created_at"]},
]
}
class GithubTokens(Document):
"""GitHub tokens for GitHub App"""
app_name: str = StringField(required=True)
client_id: str = StringField(required=True)
client_secret: str = StringField(required=True)
meta = {
"indexes": [
{"fields": ["client_id"], "unique": True},
{"fields": ["app_name"], "unique": True},
]
}
class GfiUsers(Document):
"""User statictics for GFI-Bot Web App Users"""
github_id: int = IntField(required=True)
github_access_token: str = StringField(required=True)
github_login: str = StringField(required=True)
github_name: str = StringField(required=True)
is_github_app_user: bool = BooleanField(required=True)
github_avatar_url: str = StringField(required=False)
github_url: str = StringField(required=False)
github_email: str = StringField(required=False)
twitter_user_name = StringField(required=False)
meta = {
"indexes": [
{"fields": ["github_id", "is_github_app_user"], "unique": True},
{"fields": ["github_login", "is_github_app_user"], "unique": True},
{"fields": ["github_email"]},
{"fields": ["twitter_user_name"]},
]
}
class GfiQueries(Document):
"""GFI-Bot Web App queries"""
name: str = StringField(required=True)
owner: str = StringField(required=True)
user_github_login: str = StringField(required=True)
is_pending: bool = BooleanField(required=True)
is_finished: bool = BooleanField(required=True)
_created_at: datetime = DateTimeField(required=True)
_finished_at: datetime = DateTimeField(required=False)
mata = {
"indexes": [
{"fields": ["name", "owner"], "unique": True},
{"fields": ["user_github_login"]},
]
}
| python |
def func(a):
return a + 1
ls = [func(a) for a in range(10)]
| python |
from lxml import etree
from re import search
class Response:
@classmethod
def resultDict(cls, strResult):
responseGroup = search("\<RetornoXML>(.*)\</Retorno", strResult).group(1)
res = {}
root = etree.fromstring(responseGroup)
for i in root.iter():
text = i.text
text = text.encode("utf-8", "replace") if text else None
if text:
res.setdefault("{tag}".format(tag=i.tag), "{text}".format(text=text))
return res
@classmethod
def getTail(cls, strResult):
responseGroup = search("\<RetornoXML>(.*)\</Retorno", strResult).group(1)
responseGroup = search("\</Cabecalho>(.*)\</Retorno", responseGroup).group(1)
try:
root = "<root>" + responseGroup + "</root>"
tree = etree.fromstring(root)
nfeData = []
res = {}
for i in tree:
res.update({
"SerieRPS": i.find('.//SerieRPS', namespaces={}).text,
"NumeroRPS": i.find('.//NumeroRPS', namespaces={}).text,
"DataEmissaoNFe": i.find('.//DataEmissaoNFe', namespaces={}).text,
"CPFCNPJTomador": i.find('.//CPFCNPJTomador/CNPJ', namespaces={}).text,
"CodigoVerificacao": i.find('.//CodigoVerificacao', namespaces={}).text,
"NumeroNFe": i.find('.//NumeroNFe', namespaces={}).text
})
nfeData.append(res.copy())
return nfeData
except Exception as error:
return error
| python |
import requests
from configparser import ConfigParser
import pandas as pd
from ipywidgets import widgets, interact
from IPython.display import display
from .appconfig import AppConfig
from abc import ABC, abstractmethod
class widget_container:
def __init__(self, **wlist):
interact(self.on_change, **wlist)
def on_change(self, w, w2):
print(w, w2)
class db_widget:
def __init__(self, widget):
interact(self.on_change, widget=widget)
def on_change(self, widget):
print(widget)
class tools:
def __init__(self):
self.config=AppConfig()
self.url = self.config["client"]["json_url"]
self.info = pd.DataFrame(requests.get(self.config["client"]["info_url"]).json())
def widgets(self):
subsystems = list(self.info.subsystem[~self.info.subsystem.duplicated()].values)
options = [(v,i) for i,v in enumerate(subsystems)]
subsystems.insert(0, '')
log = widgets.Dropdown(options=subsystems, descriptions="Log")
param = widgets.Dropdown(descriptions="Parameter")
submit = widgets.Button(description='Submit', tooltip='Get Data')
def on_select(log, params):
#print(log, param)
#self.info[self.info.subsystem == log]
param.options = list(self.info['ds_name'][self.info.subsystem == log])
def on_submit(value):
print(value)
interact(on_select, log=log, params=param)
display(submit)
submit.observe(on_submit)
def junk(self):
data = {"ds_names": ["laser_cutter_room_temperature3_C", 'hexapod_mini_off_guider_tz_applied'],
"delta_time": 360000}
rq = test_it(data=data)
df = pd.read_json(json.dumps(rq['data']))
print(rq["errors"])
print(rq["info"])
def test_it(self, data=None):
if data is None:
data = {"ds_names": ["laser_cutter_room_dewpoint3_C", 'hexapod_mini_off_guider_tz_applied'],
"delta_time": 360000}
url = self.config["client"]["json_url"]
rq = requests.get(url, json=data)
try:
resp = rq.json()
except Exception as err:
print(err)
resp = rq
return resp
| python |
# -*- coding:utf-8 -*-
from DLtorch.trainer.base import BaseTrainer
from DLtorch.trainer.CNNTrainer import CNNTrainer | python |
import pytest
from rest_framework import status
from rest_framework.reverse import reverse
from .factories import JsonFactory
pytestmark = pytest.mark.django_db
@pytest.fixture
def sample_json(box):
return JsonFactory(box=box, data={"key": "value", "lol": {"name": "hue", "age": 1}})
@pytest.mark.parametrize("method", ["get", "post", "put", "patch", "delete"])
def test_unauthorized(client_api_anon, method):
url = reverse("jsons:jsons-list")
response = getattr(client_api_anon, method)(url)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_get_jsons_list(client_api):
url = reverse("jsons:jsons-list")
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 0
assert data["next"] is None
assert data["previous"] is None
assert data["results"] == []
def test_get_jsons_list_simple(client_api, sample_json):
url = reverse("jsons:jsons-list")
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert data["results"][0] == {"id": str(sample_json.id), "data": sample_json.data}
def test_get_jsons_with_jsonmask(client_api, sample_json):
url = reverse("jsons:jsons-list") + "?fields=data(lol(name))"
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert data["results"][0] == {"data": {"lol": {"name": "hue"}}}
@pytest.mark.parametrize("search", ["key:value", "data__key:value"])
def test_get_jsons_filter_simple(client_api, sample_json, search):
url = reverse("jsons:jsons-list") + "?search={}".format(search)
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == 1
assert data["results"][0]["data"]["key"] == "value"
@pytest.mark.parametrize(
"search,expected",
[
("key:value", 2),
("lol:yolo", 1),
("lol:", 1),
("key:value,lol:yolo", 1),
("key:value,lol:", 1),
("key:,lol:yolo", 0),
("key:,lol:", 0),
],
)
def test_get_jsons_filter_by_multiple_keys(client_api, box, search, expected):
JsonFactory(box=box, data={"key": "value", "lol": "yolo"})
JsonFactory(box=box, data={"key": "value", "lol": ""})
url = reverse("jsons:jsons-list") + "?search={}".format(search)
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["count"] == expected
@pytest.mark.parametrize("search", ["value", "some:value,other"])
def test_get_jsons_filter_with_invalid_search(client_api, search):
url = reverse("jsons:jsons-list") + "?search={}".format(search)
response = client_api.get(url)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_get_json_detail(client_api, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data["id"] == str(sample_json.id)
assert data["data"] == sample_json.data
def test_get_json_detail_from_other_box(client_api_secondary, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
response = client_api_secondary.get(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
data = response.json()
assert "not found" in data["detail"].lower()
def test_get_json_detail_with_jsonmask(client_api, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id]) + "?fields=data(lol(age))"
response = client_api.get(url)
assert response.status_code == status.HTTP_200_OK
data = response.json()
assert data == {"data": {"lol": {"age": 1}}}
def test_delete_json(client_api, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
response = client_api.delete(url)
assert response.status_code == status.HTTP_204_NO_CONTENT
def test_delete_json_from_other_box(client_api_secondary, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
response = client_api_secondary.delete(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_patch_json(client_api, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": {"other": "whatever"}}
response = client_api.patch(url, data=payload)
assert response.status_code == status.HTTP_200_OK
assert response.json()["data"] == payload["data"]
@pytest.mark.parametrize("data", [{}, "", 123, None])
def test_patch_json_invalid(client_api, sample_json, data):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": data}
response = client_api.patch(url, data=payload)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_patch_json_from_other_box(client_api_secondary, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": {"other": "whatever"}}
response = client_api_secondary.patch(url, data=payload)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_put_json(client_api, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": {"other": "whatever"}}
response = client_api.put(url, data=payload)
assert response.status_code == status.HTTP_200_OK
assert response.json()["data"] == payload["data"]
@pytest.mark.parametrize("data", [{}, "", 123, None])
def test_put_json_invalid(client_api, sample_json, data):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": data}
response = client_api.put(url, data=payload)
assert response.status_code == status.HTTP_400_BAD_REQUEST
def test_put_json_from_other_box(client_api_secondary, sample_json):
url = reverse("jsons:jsons-detail", [sample_json.id])
payload = {"data": {"other": "whatever"}}
response = client_api_secondary.put(url, data=payload)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_post_json_empty(client_api):
url = reverse("jsons:jsons-list")
response = client_api.post(url, data={})
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "is required" in response.json()["data"][0]
def test_post_json_invalid(client_api):
url = reverse("jsons:jsons-list")
response = client_api.post(url, data="abc")
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "invalid" in response.json()["non_field_errors"][0].lower()
def test_post_json_simple(client_api):
url = reverse("jsons:jsons-list")
payload = {"data": {"key": "value"}}
response = client_api.post(url, data=payload)
assert response.status_code == status.HTTP_201_CREATED
data = response.json()
assert "id" in data
assert data["data"] == payload["data"]
def test_post_json_complex(client_api):
url = reverse("jsons:jsons-list")
payload = {
"data": {
"key": "value",
"foobar": {"nested": 1, "lalala": ["la", "la", "la"]},
"alist": [3.14],
}
}
response = client_api.post(url, data=payload)
assert response.status_code == status.HTTP_201_CREATED
| python |
import logging
import logging.config
from decimal import Decimal
from pprint import pformat
import time
from sqlalchemy import Column, String
from sqlalchemy.orm import relationship
from trader.exchange.abstract_book import AbstractBook
from trader.exchange.order import Order
import config
from trader.database.manager import BaseWrapper
logging.config.dictConfig(config.log_config)
logger = logging.getLogger(__name__)
@AbstractBook.register
class Book(BaseWrapper):
pair = Column("pair", String(15))
orders = relationship("Order", lazy="dynamic", collection_class=list,
cascade="all, delete-orphan")
def __init__(self, pair):
self.pair = pair
self.persist = True
# if not isinstance(trader, ExchangeApi):
# raise ValueError("trader {} is not an instance of ExchangeApi", str(trader))
# self.trading_api = trader
def add_order_to_book(self, order):
if not isinstance(order, Order):
raise TypeError("Expected order to be of type Order, but received tyep {}", type(order))
self.orders.append(order)
def get_all_orders(self):
return self.orders.all()
def get_ready_orders(self):
return self.orders.filter(
Order._status == "ready").all()
def get_open_orders(self):
return self.orders.filter(
Order._status == "open").all()
def get_filled_orders(self):
return self.orders.filter(
Order._status == "filled").all()
def get_canceled_orders(self):
return self.orders.filter(
Order._status == "canceled").all()
def get_rejected_orders(self):
return self.orders.filter(
Order._status == "rejected").all()
def order_filled(self, filled_order):
logger.debug("Updating filled order: {}".format(filled_order))
filled_order.status = "filled"
filled_order.filled = filled_order.size
| python |
# coding=utf-8
# *** WARNING: this file was generated by Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
import pulumi_kubernetes
__all__ = ['CertManagerArgs', 'CertManager']
@pulumi.input_type
class CertManagerArgs:
def __init__(__self__, *,
affinity: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']] = None,
cainjector: Optional[pulumi.Input['CertManagerCaInjectorArgs']] = None,
cluster_resource_namespace: Optional[pulumi.Input[str]] = None,
container_security_context: Optional[pulumi.Input['pulumi_kubernetes.core.v1.SecurityContextArgs']] = None,
deployment_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
extra_args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
extra_env: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]] = None,
extra_volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]] = None,
extra_volumes: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]] = None,
feature_gates: Optional[pulumi.Input[str]] = None,
global_: Optional[pulumi.Input['CertManagerGlobalArgs']] = None,
helm_options: Optional[pulumi.Input['ReleaseArgs']] = None,
http_proxy: Optional[pulumi.Input[str]] = None,
https_proxy: Optional[pulumi.Input[str]] = None,
image: Optional[pulumi.Input['CertManagerImageArgs']] = None,
ingress_shim: Optional[pulumi.Input['CertManagerIngressShimArgs']] = None,
install_crds: Optional[pulumi.Input[bool]] = None,
no_proxy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_selector: Optional[pulumi.Input['pulumi_kubernetes.core.v1.NodeSelectorArgs']] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_dns_config: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodDNSConfigArgs']] = None,
pod_dns_policy: Optional[pulumi.Input[str]] = None,
pod_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
prometheus: Optional[pulumi.Input['CertManagerPrometheusArgs']] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resources: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']] = None,
security_context: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']] = None,
service_account: Optional[pulumi.Input['CertManagerServiceAccountArgs']] = None,
service_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
startupapicheck: Optional[pulumi.Input['CertManagerStartupAPICheckArgs']] = None,
strategy: Optional[pulumi.Input['pulumi_kubernetes.apps.v1.DeploymentStrategyArgs']] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]] = None,
webhook: Optional[pulumi.Input['CertManagerWebhookArgs']] = None):
"""
The set of arguments for constructing a CertManager resource.
:param pulumi.Input[str] cluster_resource_namespace: Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources. By default, the same namespace as cert-manager is deployed within is used. This namespace will not be automatically created by the Helm chart.
:param pulumi.Input['pulumi_kubernetes.core.v1.SecurityContextArgs'] container_security_context: Container Security Context to be set on the controller component container. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] deployment_annotations: Optional additional annotations to add to the controller Deployment
:param pulumi.Input[Sequence[pulumi.Input[str]]] extra_args: Optional additional arguments.
:param pulumi.Input[str] feature_gates: Comma separated list of feature gates that should be enabled on the controller pod.
:param pulumi.Input['ReleaseArgs'] helm_options: HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] pod_annotations: Optional additional annotations to add to the controller Pods
:param pulumi.Input[str] pod_dns_policy: Optional DNS settings, useful if you have a public and private DNS zone for the same domain on Route 53. What follows is an example of ensuring cert-manager can access an ingress or DNS TXT records at all times. NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for the cluster to work.
:param pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs'] security_context: Pod Security Context. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] service_annotations: Optional additional annotations to add to the controller service
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] service_labels: Optional additional labels to add to the controller Service
"""
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if cainjector is not None:
pulumi.set(__self__, "cainjector", cainjector)
if cluster_resource_namespace is not None:
pulumi.set(__self__, "cluster_resource_namespace", cluster_resource_namespace)
if container_security_context is not None:
pulumi.set(__self__, "container_security_context", container_security_context)
if deployment_annotations is not None:
pulumi.set(__self__, "deployment_annotations", deployment_annotations)
if extra_args is not None:
pulumi.set(__self__, "extra_args", extra_args)
if extra_env is not None:
pulumi.set(__self__, "extra_env", extra_env)
if extra_volume_mounts is not None:
pulumi.set(__self__, "extra_volume_mounts", extra_volume_mounts)
if extra_volumes is not None:
pulumi.set(__self__, "extra_volumes", extra_volumes)
if feature_gates is not None:
pulumi.set(__self__, "feature_gates", feature_gates)
if global_ is not None:
pulumi.set(__self__, "global_", global_)
if helm_options is not None:
pulumi.set(__self__, "helm_options", helm_options)
if http_proxy is not None:
pulumi.set(__self__, "http_proxy", http_proxy)
if https_proxy is not None:
pulumi.set(__self__, "https_proxy", https_proxy)
if image is not None:
pulumi.set(__self__, "image", image)
if ingress_shim is not None:
pulumi.set(__self__, "ingress_shim", ingress_shim)
if install_crds is not None:
pulumi.set(__self__, "install_crds", install_crds)
if no_proxy is not None:
pulumi.set(__self__, "no_proxy", no_proxy)
if node_selector is not None:
pulumi.set(__self__, "node_selector", node_selector)
if pod_annotations is not None:
pulumi.set(__self__, "pod_annotations", pod_annotations)
if pod_dns_config is not None:
pulumi.set(__self__, "pod_dns_config", pod_dns_config)
if pod_dns_policy is not None:
pulumi.set(__self__, "pod_dns_policy", pod_dns_policy)
if pod_labels is not None:
pulumi.set(__self__, "pod_labels", pod_labels)
if prometheus is not None:
pulumi.set(__self__, "prometheus", prometheus)
if replica_count is not None:
pulumi.set(__self__, "replica_count", replica_count)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if security_context is not None:
pulumi.set(__self__, "security_context", security_context)
if service_account is not None:
pulumi.set(__self__, "service_account", service_account)
if service_annotations is not None:
pulumi.set(__self__, "service_annotations", service_annotations)
if service_labels is not None:
pulumi.set(__self__, "service_labels", service_labels)
if startupapicheck is not None:
pulumi.set(__self__, "startupapicheck", startupapicheck)
if strategy is not None:
pulumi.set(__self__, "strategy", strategy)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
if webhook is not None:
pulumi.set(__self__, "webhook", webhook)
@property
@pulumi.getter
def affinity(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]:
return pulumi.get(self, "affinity")
@affinity.setter
def affinity(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.AffinityArgs']]):
pulumi.set(self, "affinity", value)
@property
@pulumi.getter
def cainjector(self) -> Optional[pulumi.Input['CertManagerCaInjectorArgs']]:
return pulumi.get(self, "cainjector")
@cainjector.setter
def cainjector(self, value: Optional[pulumi.Input['CertManagerCaInjectorArgs']]):
pulumi.set(self, "cainjector", value)
@property
@pulumi.getter(name="clusterResourceNamespace")
def cluster_resource_namespace(self) -> Optional[pulumi.Input[str]]:
"""
Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources. By default, the same namespace as cert-manager is deployed within is used. This namespace will not be automatically created by the Helm chart.
"""
return pulumi.get(self, "cluster_resource_namespace")
@cluster_resource_namespace.setter
def cluster_resource_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_resource_namespace", value)
@property
@pulumi.getter(name="containerSecurityContext")
def container_security_context(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.SecurityContextArgs']]:
"""
Container Security Context to be set on the controller component container. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
"""
return pulumi.get(self, "container_security_context")
@container_security_context.setter
def container_security_context(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.SecurityContextArgs']]):
pulumi.set(self, "container_security_context", value)
@property
@pulumi.getter(name="deploymentAnnotations")
def deployment_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional additional annotations to add to the controller Deployment
"""
return pulumi.get(self, "deployment_annotations")
@deployment_annotations.setter
def deployment_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "deployment_annotations", value)
@property
@pulumi.getter(name="extraArgs")
def extra_args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Optional additional arguments.
"""
return pulumi.get(self, "extra_args")
@extra_args.setter
def extra_args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "extra_args", value)
@property
@pulumi.getter(name="extraEnv")
def extra_env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]:
return pulumi.get(self, "extra_env")
@extra_env.setter
def extra_env(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.EnvVarArgs']]]]):
pulumi.set(self, "extra_env", value)
@property
@pulumi.getter(name="extraVolumeMounts")
def extra_volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]:
return pulumi.get(self, "extra_volume_mounts")
@extra_volume_mounts.setter
def extra_volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]):
pulumi.set(self, "extra_volume_mounts", value)
@property
@pulumi.getter(name="extraVolumes")
def extra_volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]:
return pulumi.get(self, "extra_volumes")
@extra_volumes.setter
def extra_volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.VolumeArgs']]]]):
pulumi.set(self, "extra_volumes", value)
@property
@pulumi.getter(name="featureGates")
def feature_gates(self) -> Optional[pulumi.Input[str]]:
"""
Comma separated list of feature gates that should be enabled on the controller pod.
"""
return pulumi.get(self, "feature_gates")
@feature_gates.setter
def feature_gates(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "feature_gates", value)
@property
@pulumi.getter(name="global")
def global_(self) -> Optional[pulumi.Input['CertManagerGlobalArgs']]:
return pulumi.get(self, "global_")
@global_.setter
def global_(self, value: Optional[pulumi.Input['CertManagerGlobalArgs']]):
pulumi.set(self, "global_", value)
@property
@pulumi.getter(name="helmOptions")
def helm_options(self) -> Optional[pulumi.Input['ReleaseArgs']]:
"""
HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
"""
return pulumi.get(self, "helm_options")
@helm_options.setter
def helm_options(self, value: Optional[pulumi.Input['ReleaseArgs']]):
pulumi.set(self, "helm_options", value)
@property
@pulumi.getter
def http_proxy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "http_proxy")
@http_proxy.setter
def http_proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_proxy", value)
@property
@pulumi.getter
def https_proxy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "https_proxy")
@https_proxy.setter
def https_proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "https_proxy", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input['CertManagerImageArgs']]:
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input['CertManagerImageArgs']]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="ingressShim")
def ingress_shim(self) -> Optional[pulumi.Input['CertManagerIngressShimArgs']]:
return pulumi.get(self, "ingress_shim")
@ingress_shim.setter
def ingress_shim(self, value: Optional[pulumi.Input['CertManagerIngressShimArgs']]):
pulumi.set(self, "ingress_shim", value)
@property
@pulumi.getter(name="installCRDs")
def install_crds(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "install_crds")
@install_crds.setter
def install_crds(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "install_crds", value)
@property
@pulumi.getter
def no_proxy(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "no_proxy")
@no_proxy.setter
def no_proxy(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "no_proxy", value)
@property
@pulumi.getter(name="nodeSelector")
def node_selector(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.NodeSelectorArgs']]:
return pulumi.get(self, "node_selector")
@node_selector.setter
def node_selector(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.NodeSelectorArgs']]):
pulumi.set(self, "node_selector", value)
@property
@pulumi.getter(name="podAnnotations")
def pod_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional additional annotations to add to the controller Pods
"""
return pulumi.get(self, "pod_annotations")
@pod_annotations.setter
def pod_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "pod_annotations", value)
@property
@pulumi.getter(name="podDnsConfig")
def pod_dns_config(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodDNSConfigArgs']]:
return pulumi.get(self, "pod_dns_config")
@pod_dns_config.setter
def pod_dns_config(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodDNSConfigArgs']]):
pulumi.set(self, "pod_dns_config", value)
@property
@pulumi.getter(name="podDnsPolicy")
def pod_dns_policy(self) -> Optional[pulumi.Input[str]]:
"""
Optional DNS settings, useful if you have a public and private DNS zone for the same domain on Route 53. What follows is an example of ensuring cert-manager can access an ingress or DNS TXT records at all times. NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for the cluster to work.
"""
return pulumi.get(self, "pod_dns_policy")
@pod_dns_policy.setter
def pod_dns_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pod_dns_policy", value)
@property
@pulumi.getter(name="podLabels")
def pod_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "pod_labels")
@pod_labels.setter
def pod_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "pod_labels", value)
@property
@pulumi.getter
def prometheus(self) -> Optional[pulumi.Input['CertManagerPrometheusArgs']]:
return pulumi.get(self, "prometheus")
@prometheus.setter
def prometheus(self, value: Optional[pulumi.Input['CertManagerPrometheusArgs']]):
pulumi.set(self, "prometheus", value)
@property
@pulumi.getter(name="replicaCount")
def replica_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "replica_count")
@replica_count.setter
def replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "replica_count", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]:
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter(name="securityContext")
def security_context(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]:
"""
Pod Security Context. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
"""
return pulumi.get(self, "security_context")
@security_context.setter
def security_context(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]):
pulumi.set(self, "security_context", value)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> Optional[pulumi.Input['CertManagerServiceAccountArgs']]:
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: Optional[pulumi.Input['CertManagerServiceAccountArgs']]):
pulumi.set(self, "service_account", value)
@property
@pulumi.getter(name="serviceAnnotations")
def service_annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional additional annotations to add to the controller service
"""
return pulumi.get(self, "service_annotations")
@service_annotations.setter
def service_annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "service_annotations", value)
@property
@pulumi.getter(name="serviceLabels")
def service_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Optional additional labels to add to the controller Service
"""
return pulumi.get(self, "service_labels")
@service_labels.setter
def service_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "service_labels", value)
@property
@pulumi.getter
def startupapicheck(self) -> Optional[pulumi.Input['CertManagerStartupAPICheckArgs']]:
return pulumi.get(self, "startupapicheck")
@startupapicheck.setter
def startupapicheck(self, value: Optional[pulumi.Input['CertManagerStartupAPICheckArgs']]):
pulumi.set(self, "startupapicheck", value)
@property
@pulumi.getter
def strategy(self) -> Optional[pulumi.Input['pulumi_kubernetes.apps.v1.DeploymentStrategyArgs']]:
return pulumi.get(self, "strategy")
@strategy.setter
def strategy(self, value: Optional[pulumi.Input['pulumi_kubernetes.apps.v1.DeploymentStrategyArgs']]):
pulumi.set(self, "strategy", value)
@property
@pulumi.getter
def tolerations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]:
return pulumi.get(self, "tolerations")
@tolerations.setter
def tolerations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_kubernetes.core.v1.TolerationArgs']]]]):
pulumi.set(self, "tolerations", value)
@property
@pulumi.getter
def webhook(self) -> Optional[pulumi.Input['CertManagerWebhookArgs']]:
return pulumi.get(self, "webhook")
@webhook.setter
def webhook(self, value: Optional[pulumi.Input['CertManagerWebhookArgs']]):
pulumi.set(self, "webhook", value)
class CertManager(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
affinity: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.AffinityArgs']]] = None,
cainjector: Optional[pulumi.Input[pulumi.InputType['CertManagerCaInjectorArgs']]] = None,
cluster_resource_namespace: Optional[pulumi.Input[str]] = None,
container_security_context: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.SecurityContextArgs']]] = None,
deployment_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
extra_args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
extra_env: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.EnvVarArgs']]]]] = None,
extra_volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]] = None,
extra_volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.VolumeArgs']]]]] = None,
feature_gates: Optional[pulumi.Input[str]] = None,
global_: Optional[pulumi.Input[pulumi.InputType['CertManagerGlobalArgs']]] = None,
helm_options: Optional[pulumi.Input[pulumi.InputType['ReleaseArgs']]] = None,
http_proxy: Optional[pulumi.Input[str]] = None,
https_proxy: Optional[pulumi.Input[str]] = None,
image: Optional[pulumi.Input[pulumi.InputType['CertManagerImageArgs']]] = None,
ingress_shim: Optional[pulumi.Input[pulumi.InputType['CertManagerIngressShimArgs']]] = None,
install_crds: Optional[pulumi.Input[bool]] = None,
no_proxy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_selector: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.NodeSelectorArgs']]] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_dns_config: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.PodDNSConfigArgs']]] = None,
pod_dns_policy: Optional[pulumi.Input[str]] = None,
pod_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
prometheus: Optional[pulumi.Input[pulumi.InputType['CertManagerPrometheusArgs']]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resources: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]] = None,
security_context: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]] = None,
service_account: Optional[pulumi.Input[pulumi.InputType['CertManagerServiceAccountArgs']]] = None,
service_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
startupapicheck: Optional[pulumi.Input[pulumi.InputType['CertManagerStartupAPICheckArgs']]] = None,
strategy: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.apps.v1.DeploymentStrategyArgs']]] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.TolerationArgs']]]]] = None,
webhook: Optional[pulumi.Input[pulumi.InputType['CertManagerWebhookArgs']]] = None,
__props__=None):
"""
Automates the management and issuance of TLS certificates from various issuing sources within Kubernetes
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_resource_namespace: Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources. By default, the same namespace as cert-manager is deployed within is used. This namespace will not be automatically created by the Helm chart.
:param pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.SecurityContextArgs']] container_security_context: Container Security Context to be set on the controller component container. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] deployment_annotations: Optional additional annotations to add to the controller Deployment
:param pulumi.Input[Sequence[pulumi.Input[str]]] extra_args: Optional additional arguments.
:param pulumi.Input[str] feature_gates: Comma separated list of feature gates that should be enabled on the controller pod.
:param pulumi.Input[pulumi.InputType['ReleaseArgs']] helm_options: HelmOptions is an escape hatch that lets the end user control any aspect of the Helm deployment. This exposes the entirety of the underlying Helm Release component args.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] pod_annotations: Optional additional annotations to add to the controller Pods
:param pulumi.Input[str] pod_dns_policy: Optional DNS settings, useful if you have a public and private DNS zone for the same domain on Route 53. What follows is an example of ensuring cert-manager can access an ingress or DNS TXT records at all times. NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for the cluster to work.
:param pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.PodSecurityContextArgs']] security_context: Pod Security Context. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] service_annotations: Optional additional annotations to add to the controller service
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] service_labels: Optional additional labels to add to the controller Service
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[CertManagerArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Automates the management and issuance of TLS certificates from various issuing sources within Kubernetes
:param str resource_name: The name of the resource.
:param CertManagerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CertManagerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
affinity: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.AffinityArgs']]] = None,
cainjector: Optional[pulumi.Input[pulumi.InputType['CertManagerCaInjectorArgs']]] = None,
cluster_resource_namespace: Optional[pulumi.Input[str]] = None,
container_security_context: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.SecurityContextArgs']]] = None,
deployment_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
extra_args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
extra_env: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.EnvVarArgs']]]]] = None,
extra_volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.VolumeMountArgs']]]]] = None,
extra_volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.VolumeArgs']]]]] = None,
feature_gates: Optional[pulumi.Input[str]] = None,
global_: Optional[pulumi.Input[pulumi.InputType['CertManagerGlobalArgs']]] = None,
helm_options: Optional[pulumi.Input[pulumi.InputType['ReleaseArgs']]] = None,
http_proxy: Optional[pulumi.Input[str]] = None,
https_proxy: Optional[pulumi.Input[str]] = None,
image: Optional[pulumi.Input[pulumi.InputType['CertManagerImageArgs']]] = None,
ingress_shim: Optional[pulumi.Input[pulumi.InputType['CertManagerIngressShimArgs']]] = None,
install_crds: Optional[pulumi.Input[bool]] = None,
no_proxy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_selector: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.NodeSelectorArgs']]] = None,
pod_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
pod_dns_config: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.PodDNSConfigArgs']]] = None,
pod_dns_policy: Optional[pulumi.Input[str]] = None,
pod_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
prometheus: Optional[pulumi.Input[pulumi.InputType['CertManagerPrometheusArgs']]] = None,
replica_count: Optional[pulumi.Input[int]] = None,
resources: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.ResourceRequirementsArgs']]] = None,
security_context: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.PodSecurityContextArgs']]] = None,
service_account: Optional[pulumi.Input[pulumi.InputType['CertManagerServiceAccountArgs']]] = None,
service_annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
startupapicheck: Optional[pulumi.Input[pulumi.InputType['CertManagerStartupAPICheckArgs']]] = None,
strategy: Optional[pulumi.Input[pulumi.InputType['pulumi_kubernetes.apps.v1.DeploymentStrategyArgs']]] = None,
tolerations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['pulumi_kubernetes.core.v1.TolerationArgs']]]]] = None,
webhook: Optional[pulumi.Input[pulumi.InputType['CertManagerWebhookArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CertManagerArgs.__new__(CertManagerArgs)
__props__.__dict__["affinity"] = affinity
__props__.__dict__["cainjector"] = cainjector
__props__.__dict__["cluster_resource_namespace"] = cluster_resource_namespace
__props__.__dict__["container_security_context"] = container_security_context
__props__.__dict__["deployment_annotations"] = deployment_annotations
__props__.__dict__["extra_args"] = extra_args
__props__.__dict__["extra_env"] = extra_env
__props__.__dict__["extra_volume_mounts"] = extra_volume_mounts
__props__.__dict__["extra_volumes"] = extra_volumes
__props__.__dict__["feature_gates"] = feature_gates
__props__.__dict__["global_"] = global_
__props__.__dict__["helm_options"] = helm_options
__props__.__dict__["http_proxy"] = http_proxy
__props__.__dict__["https_proxy"] = https_proxy
__props__.__dict__["image"] = image
__props__.__dict__["ingress_shim"] = ingress_shim
__props__.__dict__["install_crds"] = install_crds
__props__.__dict__["no_proxy"] = no_proxy
__props__.__dict__["node_selector"] = node_selector
__props__.__dict__["pod_annotations"] = pod_annotations
__props__.__dict__["pod_dns_config"] = pod_dns_config
__props__.__dict__["pod_dns_policy"] = pod_dns_policy
__props__.__dict__["pod_labels"] = pod_labels
__props__.__dict__["prometheus"] = prometheus
__props__.__dict__["replica_count"] = replica_count
__props__.__dict__["resources"] = resources
__props__.__dict__["security_context"] = security_context
__props__.__dict__["service_account"] = service_account
__props__.__dict__["service_annotations"] = service_annotations
__props__.__dict__["service_labels"] = service_labels
__props__.__dict__["startupapicheck"] = startupapicheck
__props__.__dict__["strategy"] = strategy
__props__.__dict__["tolerations"] = tolerations
__props__.__dict__["webhook"] = webhook
__props__.__dict__["status"] = None
super(CertManager, __self__).__init__(
'kubernetes-cert-manager:index:CertManager',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter
def status(self) -> pulumi.Output['outputs.ReleaseStatus']:
"""
Detailed information about the status of the underlying Helm deployment.
"""
return pulumi.get(self, "status")
| python |
import typing
from uuid import uuid4
from pydantic import BaseModel
IdentifierType = typing.NewType("IdentifierType", str)
def create_identifier() -> IdentifierType:
"""Create an identifier"""
return IdentifierType(str(uuid4()))
class EmptyModel(BaseModel):
pass
OffsetVector = typing.Tuple[float, float, float]
class JogPosition(BaseModel):
vector: OffsetVector
| python |
import os, re
import pandas as pd
path = os.getcwd()
files = os.listdir('C:/Users/Richard/Desktop/Database/嘉南AD_20200317')
#print(files)
files_xls = [f for f in files if f[-4:] == 'xlsx']
#print(files_xls)
df = pd.DataFrame()
for f in files_xls:
data = pd.read_excel('C:/Users/Richard/Desktop/Database/嘉南AD_20200317/'+f, sheet_name='資料')
for i in data.index:
if 'SC' in str(data['B3 Name'][i]):
print(f)
break
df = df.append(data)
| python |
def main():
#Lets Create the test dataset to build our tree
dataset = {'Name':['Person 1','Person 2','Person 3','Person 4','Person 5','Person 6','Person 7','Person 8','Person 9','Person 10'],
'Salary':['Low','Med','Med','Med','Med','High','Low','High','Med','Low'],
'Sex':['Male','Male','Male','Female','Male','Female','Female','Male','Female','Male'],
'Marital':['Unmarried','Unmarried','Married','Married','Married','Unmarried','Unmarried','Unmarried','Unmarried','Married'],
'Class':['No','No','Yes','No','Yes','Yes','No','Yes','Yes','Yes']}
from Chapter_02 import DecisionTree_ID3 as ID3
#Preprocess data set
df = ID3.preProcess(dataset)
#Lets build the tree
tree = ID3.buildTree(df)
import pprint
#print(tree)
pprint.pprint(tree)
#Select test instance
inst = df.ix[2]
#Remove its class attribute
inst.pop('Class')
#Get prediction
prediction = ID3.predict(inst, tree)
print("Prediction: %s"%prediction[0])
main() | python |
from django.db import models
from django.conf import settings
class Post(models.Model):
ip = models.CharField(max_length=50)
idUser = models.CharField(max_length=250)
idClick = models.CharField(max_length=250, primary_key=True)
classe = models.CharField(max_length=50)
texto = models.TextField(max_length=250)
current = models.CharField(max_length=250)
href = models.CharField(max_length=250)
timestamp = models.FloatField()
dateTimestamp = models.IntegerField()
dateR = models.DateTimeField(auto_now = False, auto_now_add=True)
class Adapters(models.Model):
rid = models.CharField(max_length=250,primary_key=True)
ativo = models.IntegerField(default=0, choices={(1,0)})
class RecomendacaoAcessada(models.Model):
rid = models.ForeignKey(Adapters,max_length=250, on_delete=models.CASCADE)
idClick = models.ForeignKey(Post,max_length=250,on_delete=models.CASCADE)
idRows = models.AutoField(primary_key=True)
date = models.DateTimeField(auto_now=False, auto_now_add=True)
class RecomendacaoGerada(models.Model):
rid = models.ForeignKey(Adapters,on_delete=models.CASCADE)
idClick = models.CharField(max_length=250)
url = models.CharField(max_length=250)
date = models.DateTimeField(auto_now=False, auto_now_add=True)
idFileira = models.AutoField(primary_key=True)
| python |
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def associate_node(self, ServerName: str, NodeName: str, EngineAttributes: List) -> Dict:
"""
Associates a new node with the server. For more information about how to disassociate a node, see DisassociateNode .
On a Chef server: This command is an alternative to ``knife bootstrap`` .
Example (Chef): ``aws opsworks-cm associate-node --server-name *MyServer* --node-name *MyManagedNode* --engine-attributes "Name=*CHEF_ORGANIZATION* ,Value=default" "Name=*CHEF_NODE_PUBLIC_KEY* ,Value=*public-key-pem* "``
On a Puppet server, this command is an alternative to the ``puppet cert sign`` command that signs a Puppet node CSR.
Example (Chef): ``aws opsworks-cm associate-node --server-name *MyServer* --node-name *MyManagedNode* --engine-attributes "Name=*PUPPET_NODE_CSR* ,Value=*csr-pem* "``
A node can can only be associated with servers that are in a ``HEALTHY`` state. Otherwise, an ``InvalidStateException`` is thrown. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid. The AssociateNode API call can be integrated into Auto Scaling configurations, AWS Cloudformation templates, or the user data of a server's instance.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/AssociateNode>`_
**Request Syntax**
::
response = client.associate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'NodeAssociationStatusToken': 'string'
}
**Response Structure**
- *(dict) --*
- **NodeAssociationStatusToken** *(string) --*
Contains a token which can be passed to the ``DescribeNodeAssociationStatus`` API call to get the status of the association request.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server with which to associate the node.
:type NodeName: string
:param NodeName: **[REQUIRED]**
The name of the node.
:type EngineAttributes: list
:param EngineAttributes: **[REQUIRED]**
Engine attributes used for associating the node.
**Attributes accepted in a AssociateNode request for Chef**
* ``CHEF_ORGANIZATION`` : The Chef organization with which the node is associated. By default only one organization named ``default`` can exist.
* ``CHEF_NODE_PUBLIC_KEY`` : A PEM-formatted public key. This key is required for the ``chef-client`` agent to access the Chef API.
**Attributes accepted in a AssociateNode request for Puppet**
* ``PUPPET_NODE_CSR`` : A PEM-formatted certificate-signing request (CSR) that is created by the node.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_backup(self, ServerName: str, Description: str = None) -> Dict:
"""
Creates an application-level backup of a server. While the server is in the ``BACKING_UP`` state, the server cannot be changed, and no additional backup can be created.
Backups can be created for servers in ``RUNNING`` , ``HEALTHY`` , and ``UNHEALTHY`` states. By default, you can create a maximum of 50 manual backups.
This operation is asynchronous.
A ``LimitExceededException`` is thrown when the maximum number of manual backups is reached. An ``InvalidStateException`` is thrown when the server is not in any of the following states: RUNNING, HEALTHY, or UNHEALTHY. A ``ResourceNotFoundException`` is thrown when the server is not found. A ``ValidationException`` is thrown when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/CreateBackup>`_
**Request Syntax**
::
response = client.create_backup(
ServerName='string',
Description='string'
)
**Response Syntax**
::
{
'Backup': {
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Backup** *(dict) --*
Backup created by request.
- **BackupArn** *(string) --*
The ARN of the backup.
- **BackupId** *(string) --*
The generated ID of the backup. Example: ``myServerName-yyyyMMddHHmmssSSS``
- **BackupType** *(string) --*
The backup type. Valid values are ``automated`` or ``manual`` .
- **CreatedAt** *(datetime) --*
The time stamp when the backup was created in the database. Example: ``2016-07-29T13:38:47.520Z``
- **Description** *(string) --*
A user-provided description for a manual backup. This field is empty for automated backups.
- **Engine** *(string) --*
The engine type that is obtained from the server when the backup is created.
- **EngineModel** *(string) --*
The engine model that is obtained from the server when the backup is created.
- **EngineVersion** *(string) --*
The engine version that is obtained from the server when the backup is created.
- **InstanceProfileArn** *(string) --*
The EC2 instance profile ARN that is obtained from the server when the backup is created. Because this value is stored, you are not required to provide the InstanceProfileArn again if you restore a backup.
- **InstanceType** *(string) --*
The instance type that is obtained from the server when the backup is created.
- **KeyPair** *(string) --*
The key pair that is obtained from the server when the backup is created.
- **PreferredBackupWindow** *(string) --*
The preferred backup period that is obtained from the server when the backup is created.
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period that is obtained from the server when the backup is created.
- **S3DataSize** *(integer) --*
This field is deprecated and is no longer used.
- **S3DataUrl** *(string) --*
This field is deprecated and is no longer used.
- **S3LogUrl** *(string) --*
The Amazon S3 URL of the backup's log file.
- **SecurityGroupIds** *(list) --*
The security group IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ServerName** *(string) --*
The name of the server from which the backup was made.
- **ServiceRoleArn** *(string) --*
The service role ARN that is obtained from the server when the backup is created.
- **Status** *(string) --*
The status of a backup while in progress.
- **StatusDescription** *(string) --*
An informational message about backup status.
- **SubnetIds** *(list) --*
The subnet IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ToolsVersion** *(string) --*
The version of AWS OpsWorks CM-specific tools that is obtained from the server when the backup is created.
- **UserArn** *(string) --*
The IAM user ARN of the requester for manual backups. This field is empty for automated backups.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server that you want to back up.
:type Description: string
:param Description:
A user-defined description of the backup.
:rtype: dict
:returns:
"""
pass
def create_server(self, ServerName: str, InstanceProfileArn: str, InstanceType: str, ServiceRoleArn: str, AssociatePublicIpAddress: bool = None, DisableAutomatedBackup: bool = None, Engine: str = None, EngineModel: str = None, EngineVersion: str = None, EngineAttributes: List = None, BackupRetentionCount: int = None, KeyPair: str = None, PreferredMaintenanceWindow: str = None, PreferredBackupWindow: str = None, SecurityGroupIds: List = None, SubnetIds: List = None, BackupId: str = None) -> Dict:
"""
Creates and immedately starts a new server. The server is ready to use when it is in the ``HEALTHY`` state. By default, you can create a maximum of 10 servers.
This operation is asynchronous.
A ``LimitExceededException`` is thrown when you have created the maximum number of servers (10). A ``ResourceAlreadyExistsException`` is thrown when a server with the same name already exists in the account. A ``ResourceNotFoundException`` is thrown when you specify a backup ID that is not valid or is for a backup that does not exist. A ``ValidationException`` is thrown when parameters of the request are not valid.
If you do not specify a security group by adding the ``SecurityGroupIds`` parameter, AWS OpsWorks creates a new security group.
*Chef Automate:* The default security group opens the Chef server to the world on TCP port 443. If a KeyName is present, AWS OpsWorks enables SSH access. SSH is also open to the world on TCP port 22.
*Puppet Enterprise:* The default security group opens TCP ports 22, 443, 4433, 8140, 8142, 8143, and 8170. If a KeyName is present, AWS OpsWorks enables SSH access. SSH is also open to the world on TCP port 22.
By default, your server is accessible from any IP address. We recommend that you update your security group rules to allow access from known IP addresses and address ranges only. To edit security group rules, open Security Groups in the navigation pane of the EC2 management console.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/CreateServer>`_
**Request Syntax**
::
response = client.create_server(
AssociatePublicIpAddress=True|False,
DisableAutomatedBackup=True|False,
Engine='string',
EngineModel='string',
EngineVersion='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
],
BackupRetentionCount=123,
ServerName='string',
InstanceProfileArn='string',
InstanceType='string',
KeyPair='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string',
SecurityGroupIds=[
'string',
],
ServiceRoleArn='string',
SubnetIds=[
'string',
],
BackupId='string'
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
The server that is created by the request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type AssociatePublicIpAddress: boolean
:param AssociatePublicIpAddress:
Associate a public IP address with a server that you are launching. Valid values are ``true`` or ``false`` . The default value is ``true`` .
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup:
Enable or disable scheduled backups. Valid values are ``true`` or ``false`` . The default value is ``true`` .
:type Engine: string
:param Engine:
The configuration management engine to use. Valid values include ``Chef`` and ``Puppet`` .
:type EngineModel: string
:param EngineModel:
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
:type EngineVersion: string
:param EngineVersion:
The major release version of the engine that you want to use. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
:type EngineAttributes: list
:param EngineAttributes:
Optional engine attributes on a specified server.
**Attributes accepted in a Chef createServer request:**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA public key. The corresponding private key is required to access the Chef API. When no CHEF_PIVOTAL_KEY is set, a private key is generated and returned in the response.
* ``CHEF_DELIVERY_ADMIN_PASSWORD`` : The password for the administrative user in the Chef Automate GUI. The password length is a minimum of eight characters, and a maximum of 32. The password can contain letters, numbers, and special characters (!/@#$%^&+=_). The password must contain at least one lower case letter, one upper case letter, one number, and one special character. When no CHEF_DELIVERY_ADMIN_PASSWORD is set, one is generated and returned in the response.
**Attributes accepted in a Puppet createServer request:**
* ``PUPPET_ADMIN_PASSWORD`` : To work with the Puppet Enterprise console, a password must use ASCII characters.
* ``PUPPET_R10K_REMOTE`` : The r10k remote is the URL of your control repository (for example, ssh://[email protected]:user/control-repo.git). Specifying an r10k remote opens TCP port 8170.
* ``PUPPET_R10K_PRIVATE_KEY`` : If you are using a private Git repository, add PUPPET_R10K_PRIVATE_KEY to specify an SSH URL and a PEM-encoded private SSH key.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:type BackupRetentionCount: integer
:param BackupRetentionCount:
The number of automated backups that you want to keep. Whenever a new backup is created, AWS OpsWorks CM deletes the oldest backups if this number is exceeded. The default value is ``1`` .
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server. The server name must be unique within your AWS account, within each region. Server names must start with a letter; then letters, numbers, or hyphens (-) are allowed, up to a maximum of 40 characters.
:type InstanceProfileArn: string
:param InstanceProfileArn: **[REQUIRED]**
The ARN of the instance profile that your Amazon EC2 instances use. Although the AWS OpsWorks console typically creates the instance profile for you, if you are using API commands instead, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the instance profile you need.
:type InstanceType: string
:param InstanceType: **[REQUIRED]**
The Amazon EC2 instance type to use. For example, ``m4.large`` . Recommended instance types include ``t2.medium`` and greater, ``m4.*`` , or ``c4.xlarge`` and greater.
:type KeyPair: string
:param KeyPair:
The Amazon EC2 key pair to set for the instance. This parameter is optional; if desired, you may specify this parameter to connect to your instances by using SSH.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
The start time for a one-hour period each week during which AWS OpsWorks CM performs maintenance on the instance. Valid values must be specified in the following format: ``DDD:HH:MM`` . The specified time is in coordinated universal time (UTC). The default value is a random one-hour period on Tuesday, Wednesday, or Friday. See ``TimeWindowDefinition`` for more information.
**Example:** ``Mon:08:00`` , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
The start time for a one-hour period during which AWS OpsWorks CM backs up application-level data on your server if automated backups are enabled. Valid values must be specified in one of the following formats:
* ``HH:MM`` for daily backups
* ``DDD:HH:MM`` for weekly backups
The specified time is in coordinated universal time (UTC). The default value is a random, daily start time.
**Example:** ``08:00`` , which represents a daily start time of 08:00 UTC.
**Example:** ``Mon:08:00`` , which represents a start time of every Monday at 08:00 UTC. (8:00 a.m.)
:type SecurityGroupIds: list
:param SecurityGroupIds:
A list of security group IDs to attach to the Amazon EC2 instance. If you add this parameter, the specified security groups must be within the VPC that is specified by ``SubnetIds`` .
If you do not specify this parameter, AWS OpsWorks CM creates one new security group that uses TCP ports 22 and 443, open to 0.0.0.0/0 (everyone).
- *(string) --*
:type ServiceRoleArn: string
:param ServiceRoleArn: **[REQUIRED]**
The service role that the AWS OpsWorks CM service backend uses to work with your account. Although the AWS OpsWorks management console typically creates the service role for you, if you are using the AWS CLI or API commands, run the service-role-creation.yaml AWS CloudFormation template, located at https://s3.amazonaws.com/opsworks-cm-us-east-1-prod-default-assets/misc/opsworks-cm-roles.yaml. This template creates a CloudFormation stack that includes the service role and instance profile that you need.
:type SubnetIds: list
:param SubnetIds:
The IDs of subnets in which to launch the server EC2 instance.
Amazon EC2-Classic customers: This field is required. All servers must run within a VPC. The VPC must have \"Auto Assign Public IP\" enabled.
EC2-VPC customers: This field is optional. If you do not specify subnet IDs, your EC2 instances are created in a default subnet that is selected by Amazon EC2. If you specify subnet IDs, the VPC must have \"Auto Assign Public IP\" enabled.
For more information about supported Amazon EC2 platforms, see `Supported Platforms <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html>`__ .
- *(string) --*
:type BackupId: string
:param BackupId:
If you specify this field, AWS OpsWorks CM creates the server by using the backup represented by BackupId.
:rtype: dict
:returns:
"""
pass
def delete_backup(self, BackupId: str) -> Dict:
"""
Deletes a backup. You can delete both manual and automated backups. This operation is asynchronous.
An ``InvalidStateException`` is thrown when a backup deletion is already in progress. A ``ResourceNotFoundException`` is thrown when the backup does not exist. A ``ValidationException`` is thrown when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DeleteBackup>`_
**Request Syntax**
::
response = client.delete_backup(
BackupId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type BackupId: string
:param BackupId: **[REQUIRED]**
The ID of the backup to delete. Run the DescribeBackups command to get a list of backup IDs. Backup IDs are in the format ``ServerName-yyyyMMddHHmmssSSS`` .
:rtype: dict
:returns:
"""
pass
def delete_server(self, ServerName: str) -> Dict:
"""
Deletes the server and the underlying AWS CloudFormation stacks (including the server's EC2 instance). When you run this command, the server state is updated to ``DELETING`` . After the server is deleted, it is no longer returned by ``DescribeServer`` requests. If the AWS CloudFormation stack cannot be deleted, the server cannot be deleted.
This operation is asynchronous.
An ``InvalidStateException`` is thrown when a server deletion is already in progress. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DeleteServer>`_
**Request Syntax**
::
response = client.delete_server(
ServerName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type ServerName: string
:param ServerName: **[REQUIRED]**
The ID of the server to delete.
:rtype: dict
:returns:
"""
pass
def describe_account_attributes(self) -> Dict:
"""
Describes your account attributes, and creates requests to increase limits before they are reached or exceeded.
This operation is synchronous.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeAccountAttributes>`_
**Request Syntax**
::
response = client.describe_account_attributes()
**Response Syntax**
::
{
'Attributes': [
{
'Name': 'string',
'Maximum': 123,
'Used': 123
},
]
}
**Response Structure**
- *(dict) --*
- **Attributes** *(list) --*
The attributes that are currently set for the account.
- *(dict) --*
Stores account attributes.
- **Name** *(string) --*
The attribute name. The following are supported attribute names.
* *ServerLimit:* The number of current servers/maximum number of servers allowed. By default, you can have a maximum of 10 servers.
* *ManualBackupLimit:* The number of current manual backups/maximum number of backups allowed. By default, you can have a maximum of 50 manual backups saved.
- **Maximum** *(integer) --*
The maximum allowed value.
- **Used** *(integer) --*
The current usage, such as the current number of servers that are associated with the account.
:rtype: dict
:returns:
"""
pass
def describe_backups(self, BackupId: str = None, ServerName: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Describes backups. The results are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the backup does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeBackups>`_
**Request Syntax**
::
response = client.describe_backups(
BackupId='string',
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'Backups': [
{
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Backups** *(list) --*
Contains the response to a ``DescribeBackups`` request.
- *(dict) --*
Describes a single backup.
- **BackupArn** *(string) --*
The ARN of the backup.
- **BackupId** *(string) --*
The generated ID of the backup. Example: ``myServerName-yyyyMMddHHmmssSSS``
- **BackupType** *(string) --*
The backup type. Valid values are ``automated`` or ``manual`` .
- **CreatedAt** *(datetime) --*
The time stamp when the backup was created in the database. Example: ``2016-07-29T13:38:47.520Z``
- **Description** *(string) --*
A user-provided description for a manual backup. This field is empty for automated backups.
- **Engine** *(string) --*
The engine type that is obtained from the server when the backup is created.
- **EngineModel** *(string) --*
The engine model that is obtained from the server when the backup is created.
- **EngineVersion** *(string) --*
The engine version that is obtained from the server when the backup is created.
- **InstanceProfileArn** *(string) --*
The EC2 instance profile ARN that is obtained from the server when the backup is created. Because this value is stored, you are not required to provide the InstanceProfileArn again if you restore a backup.
- **InstanceType** *(string) --*
The instance type that is obtained from the server when the backup is created.
- **KeyPair** *(string) --*
The key pair that is obtained from the server when the backup is created.
- **PreferredBackupWindow** *(string) --*
The preferred backup period that is obtained from the server when the backup is created.
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period that is obtained from the server when the backup is created.
- **S3DataSize** *(integer) --*
This field is deprecated and is no longer used.
- **S3DataUrl** *(string) --*
This field is deprecated and is no longer used.
- **S3LogUrl** *(string) --*
The Amazon S3 URL of the backup's log file.
- **SecurityGroupIds** *(list) --*
The security group IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ServerName** *(string) --*
The name of the server from which the backup was made.
- **ServiceRoleArn** *(string) --*
The service role ARN that is obtained from the server when the backup is created.
- **Status** *(string) --*
The status of a backup while in progress.
- **StatusDescription** *(string) --*
An informational message about backup status.
- **SubnetIds** *(list) --*
The subnet IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ToolsVersion** *(string) --*
The version of AWS OpsWorks CM-specific tools that is obtained from the server when the backup is created.
- **UserArn** *(string) --*
The IAM user ARN of the requester for manual backups. This field is empty for automated backups.
- **NextToken** *(string) --*
This is not currently implemented for ``DescribeBackups`` requests.
:type BackupId: string
:param BackupId:
Describes a single backup.
:type ServerName: string
:param ServerName:
Returns backups for the server with the specified ServerName.
:type NextToken: string
:param NextToken:
This is not currently implemented for ``DescribeBackups`` requests.
:type MaxResults: integer
:param MaxResults:
This is not currently implemented for ``DescribeBackups`` requests.
:rtype: dict
:returns:
"""
pass
def describe_events(self, ServerName: str, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Describes events for a specified server. Results are ordered by time, with newest events first.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeEvents>`_
**Request Syntax**
::
response = client.describe_events(
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'ServerEvents': [
{
'CreatedAt': datetime(2015, 1, 1),
'ServerName': 'string',
'Message': 'string',
'LogUrl': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **ServerEvents** *(list) --*
Contains the response to a ``DescribeEvents`` request.
- *(dict) --*
An event that is related to the server, such as the start of maintenance or backup.
- **CreatedAt** *(datetime) --*
The time when the event occurred.
- **ServerName** *(string) --*
The name of the server on or for which the event occurred.
- **Message** *(string) --*
A human-readable informational or status message.
- **LogUrl** *(string) --*
The Amazon S3 URL of the event's log file.
- **NextToken** *(string) --*
NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call ``DescribeEvents`` again, and assign the token from the previous results as the value of the ``nextToken`` parameter. If there are no more results, the response object's ``nextToken`` parameter value is ``null`` . Setting a ``nextToken`` value that was not returned in your previous results causes an ``InvalidNextTokenException`` to occur.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server for which you want to view events.
:type NextToken: string
:param NextToken:
NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call ``DescribeEvents`` again, and assign the token from the previous results as the value of the ``nextToken`` parameter. If there are no more results, the response object\'s ``nextToken`` parameter value is ``null`` . Setting a ``nextToken`` value that was not returned in your previous results causes an ``InvalidNextTokenException`` to occur.
:type MaxResults: integer
:param MaxResults:
To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a ``NextToken`` value that you can assign to the ``NextToken`` request parameter to get the next set of results.
:rtype: dict
:returns:
"""
pass
def describe_node_association_status(self, NodeAssociationStatusToken: str, ServerName: str) -> Dict:
"""
Returns the current status of an existing association or disassociation request.
A ``ResourceNotFoundException`` is thrown when no recent association or disassociation request with the specified token is found, or when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeNodeAssociationStatus>`_
**Request Syntax**
::
response = client.describe_node_association_status(
NodeAssociationStatusToken='string',
ServerName='string'
)
**Response Syntax**
::
{
'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NodeAssociationStatus** *(string) --*
The status of the association or disassociation request.
**Possible values:**
* ``SUCCESS`` : The association or disassociation succeeded.
* ``FAILED`` : The association or disassociation failed.
* ``IN_PROGRESS`` : The association or disassociation is still in progress.
- **EngineAttributes** *(list) --*
Attributes specific to the node association. In Puppet, the attibute PUPPET_NODE_CERT contains the signed certificate (the result of the CSR).
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:type NodeAssociationStatusToken: string
:param NodeAssociationStatusToken: **[REQUIRED]**
The token returned in either the AssociateNodeResponse or the DisassociateNodeResponse.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server from which to disassociate the node.
:rtype: dict
:returns:
"""
pass
def describe_servers(self, ServerName: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Lists all configuration management servers that are identified with your account. Only the stored results from Amazon DynamoDB are returned. AWS OpsWorks CM does not query other services.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeServers>`_
**Request Syntax**
::
response = client.describe_servers(
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'Servers': [
{
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Servers** *(list) --*
Contains the response to a ``DescribeServers`` request.
*For Puppet Server:* ``DescribeServersResponse$Servers$EngineAttributes`` contains PUPPET_API_CA_CERT. This is the PEM-encoded CA certificate that is used by the Puppet API over TCP port number 8140. The CA certificate is also used to sign node certificates.
- *(dict) --*
Describes a configuration management server.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
- **NextToken** *(string) --*
This is not currently implemented for ``DescribeServers`` requests.
:type ServerName: string
:param ServerName:
Describes the server with the specified ServerName.
:type NextToken: string
:param NextToken:
This is not currently implemented for ``DescribeServers`` requests.
:type MaxResults: integer
:param MaxResults:
This is not currently implemented for ``DescribeServers`` requests.
:rtype: dict
:returns:
"""
pass
def disassociate_node(self, ServerName: str, NodeName: str, EngineAttributes: List = None) -> Dict:
"""
Disassociates a node from an AWS OpsWorks CM server, and removes the node from the server's managed nodes. After a node is disassociated, the node key pair is no longer valid for accessing the configuration manager's API. For more information about how to associate a node, see AssociateNode .
A node can can only be disassociated from a server that is in a ``HEALTHY`` state. Otherwise, an ``InvalidStateException`` is thrown. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DisassociateNode>`_
**Request Syntax**
::
response = client.disassociate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'NodeAssociationStatusToken': 'string'
}
**Response Structure**
- *(dict) --*
- **NodeAssociationStatusToken** *(string) --*
Contains a token which can be passed to the ``DescribeNodeAssociationStatus`` API call to get the status of the disassociation request.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server from which to disassociate the node.
:type NodeName: string
:param NodeName: **[REQUIRED]**
The name of the client node.
:type EngineAttributes: list
:param EngineAttributes:
Engine attributes that are used for disassociating the node. No attributes are required for Puppet.
**Attributes required in a DisassociateNode request for Chef**
* ``CHEF_ORGANIZATION`` : The Chef organization with which the node was associated. By default only one organization named ``default`` can exist.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def export_server_engine_attribute(self, ExportAttributeName: str, ServerName: str, InputAttributes: List = None) -> Dict:
"""
Exports a specified server engine attribute as a base64-encoded string. For example, you can export user data that you can use in EC2 to associate nodes with a server.
This operation is synchronous.
A ``ValidationException`` is raised when parameters of the request are not valid. A ``ResourceNotFoundException`` is thrown when the server does not exist. An ``InvalidStateException`` is thrown when the server is in any of the following states: CREATING, TERMINATED, FAILED or DELETING.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/ExportServerEngineAttribute>`_
**Request Syntax**
::
response = client.export_server_engine_attribute(
ExportAttributeName='string',
ServerName='string',
InputAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'EngineAttribute': {
'Name': 'string',
'Value': 'string'
},
'ServerName': 'string'
}
**Response Structure**
- *(dict) --*
- **EngineAttribute** *(dict) --*
The requested engine attribute pair with attribute name and value.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **ServerName** *(string) --*
The server name used in the request.
:type ExportAttributeName: string
:param ExportAttributeName: **[REQUIRED]**
The name of the export attribute. Currently, the supported export attribute is ``Userdata`` . This exports a user data script that includes parameters and values provided in the ``InputAttributes`` list.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server from which you are exporting the attribute.
:type InputAttributes: list
:param InputAttributes:
The list of engine attributes. The list type is ``EngineAttribute`` . An ``EngineAttribute`` list item is a pair that includes an attribute name and its value. For the ``Userdata`` ExportAttributeName, the following are supported engine attribute names.
* **RunList** In Chef, a list of roles or recipes that are run in the specified order. In Puppet, this parameter is ignored.
* **OrganizationName** In Chef, an organization name. AWS OpsWorks for Chef Automate always creates the organization ``default`` . In Puppet, this parameter is ignored.
* **NodeEnvironment** In Chef, a node environment (for example, development, staging, or one-box). In Puppet, this parameter is ignored.
* **NodeClientVersion** In Chef, the version of the Chef engine (three numbers separated by dots, such as 13.8.5). If this attribute is empty, OpsWorks for Chef Automate uses the most current version. In Puppet, this parameter is ignored.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def restore_server(self, BackupId: str, ServerName: str, InstanceType: str = None, KeyPair: str = None) -> Dict:
"""
Restores a backup to a server that is in a ``CONNECTION_LOST`` , ``HEALTHY`` , ``RUNNING`` , ``UNHEALTHY`` , or ``TERMINATED`` state. When you run RestoreServer, the server's EC2 instance is deleted, and a new EC2 instance is configured. RestoreServer maintains the existing server endpoint, so configuration management of the server's client devices (nodes) should continue to work.
This operation is asynchronous.
An ``InvalidStateException`` is thrown when the server is not in a valid state. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/RestoreServer>`_
**Request Syntax**
::
response = client.restore_server(
BackupId='string',
ServerName='string',
InstanceType='string',
KeyPair='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type BackupId: string
:param BackupId: **[REQUIRED]**
The ID of the backup that you want to use to restore a server.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server that you want to restore.
:type InstanceType: string
:param InstanceType:
The type of the instance to create. Valid values must be specified in the following format: ``^([cm][34]|t2).*`` For example, ``m4.large`` . Valid values are ``t2.medium`` , ``m4.large`` , and ``m4.2xlarge`` . If you do not specify this parameter, RestoreServer uses the instance type from the specified backup.
:type KeyPair: string
:param KeyPair:
The name of the key pair to set on the new EC2 instance. This can be helpful if the administrator no longer has the SSH key.
:rtype: dict
:returns:
"""
pass
def start_maintenance(self, ServerName: str, EngineAttributes: List = None) -> Dict:
"""
Manually starts server maintenance. This command can be useful if an earlier maintenance attempt failed, and the underlying cause of maintenance failure has been resolved. The server is in an ``UNDER_MAINTENANCE`` state while maintenance is in progress.
Maintenance can only be started on servers in ``HEALTHY`` and ``UNHEALTHY`` states. Otherwise, an ``InvalidStateException`` is thrown. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/StartMaintenance>`_
**Request Syntax**
::
response = client.start_maintenance(
ServerName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
Contains the response to a ``StartMaintenance`` request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server on which to run maintenance.
:type EngineAttributes: list
:param EngineAttributes:
Engine attributes that are specific to the server on which you want to run maintenance.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:rtype: dict
:returns:
"""
pass
def update_server(self, ServerName: str, DisableAutomatedBackup: bool = None, BackupRetentionCount: int = None, PreferredMaintenanceWindow: str = None, PreferredBackupWindow: str = None) -> Dict:
"""
Updates settings for a server.
This operation is synchronous.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/UpdateServer>`_
**Request Syntax**
::
response = client.update_server(
DisableAutomatedBackup=True|False,
BackupRetentionCount=123,
ServerName='string',
PreferredMaintenanceWindow='string',
PreferredBackupWindow='string'
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
Contains the response to a ``UpdateServer`` request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type DisableAutomatedBackup: boolean
:param DisableAutomatedBackup:
Setting DisableAutomatedBackup to ``true`` disables automated or scheduled backups. Automated backups are enabled by default.
:type BackupRetentionCount: integer
:param BackupRetentionCount:
Sets the number of automated backups that you want to keep.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server to update.
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow:
``DDD:HH:MM`` (weekly start time) or ``HH:MM`` (daily start time).
Time windows always use coordinated universal time (UTC). Valid strings for day of week (``DDD`` ) are: ``Mon`` , ``Tue`` , ``Wed`` , ``Thr`` , ``Fri`` , ``Sat`` , or ``Sun`` .
:type PreferredBackupWindow: string
:param PreferredBackupWindow:
``DDD:HH:MM`` (weekly start time) or ``HH:MM`` (daily start time).
Time windows always use coordinated universal time (UTC). Valid strings for day of week (``DDD`` ) are: ``Mon`` , ``Tue`` , ``Wed`` , ``Thr`` , ``Fri`` , ``Sat`` , or ``Sun`` .
:rtype: dict
:returns:
"""
pass
def update_server_engine_attributes(self, ServerName: str, AttributeName: str, AttributeValue: str = None) -> Dict:
"""
Updates engine-specific attributes on a specified server. The server enters the ``MODIFYING`` state when this operation is in progress. Only one update can occur at a time. You can use this command to reset a Chef server's public key (``CHEF_PIVOTAL_KEY`` ) or a Puppet server's admin password (``PUPPET_ADMIN_PASSWORD`` ).
This operation is asynchronous.
This operation can only be called for servers in ``HEALTHY`` or ``UNHEALTHY`` states. Otherwise, an ``InvalidStateException`` is raised. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/UpdateServerEngineAttributes>`_
**Request Syntax**
::
response = client.update_server_engine_attributes(
ServerName='string',
AttributeName='string',
AttributeValue='string'
)
**Response Syntax**
::
{
'Server': {
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
}
}
**Response Structure**
- *(dict) --*
- **Server** *(dict) --*
Contains the response to an ``UpdateServerEngineAttributes`` request.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server to update.
:type AttributeName: string
:param AttributeName: **[REQUIRED]**
The name of the engine attribute to update.
:type AttributeValue: string
:param AttributeValue:
The value to set for the attribute.
:rtype: dict
:returns:
"""
pass
| python |
import media
import fresh_tomatoes
# Create movie instance for Toy Story
john_wick = media.Movie("John Wick",
"An ex-hitman comes out of retirement to track down the gangsters that took everything from him.",
"https://upload.wikimedia.org/wikipedia/en/9/98/John_Wick_TeaserPoster.jpg",
"https://www.youtube.com/watch?v=2AUmvWm5ZDQ")
# Create movie instance for Avatar
fist_fight = media.Movie("Fist Fight",
" When one school teacher gets the other fired, he is challenged to an after-school fight. ",
"https://upload.wikimedia.org/wikipedia/en/b/b2/Fist_Fight.png",
"https://www.youtube.com/watch?v=6YVBj2o_3mg")
# Create movie instance for Office Christmas Party
office_xmas_party = media.Movie("Office Christmas Party",
"When his uptight CEO sister threatens to shut down his"
" branch, the branch manager throws an epic Christmas"
" party in order to land a big client and save the day,"
" but the party gets way out of hand...",
"https://upload.wikimedia.org/wikipedia/en/8/8a/Office_Christmas_Party.png",
"https://www.youtube.com/watch?v=z4PHjxRiT2I")
# Create movie instance for This is 40
this_is_40 = media.Movie("This is 40", "Pete and Debbie are both about to turn 40, their kids hate each other, both of"
" their businesses are failing, they're on the verge of losing their house, and"
" their relationship is threatening to fall apart.",
"https://upload.wikimedia.org/wikipedia/en/e/eb/This_is_40.jpg",
"https://www.youtube.com/watch?v=6sGkPwrze0o")
# Create movie instance for Skyfall
skyfall = media.Movie("Skyfall", "Bond's loyalty to M is tested when her past comes back to haunt her."
" Whilst MI6 comes under attack, 007 must track down and destroy the"
" threat, no matter how personal the cost.",
"https://upload.wikimedia.org/wikipedia/en/a/a7/Skyfall_poster.jpg",
"https://www.youtube.com/watch?v=24mTIE4D9JM")
# Create movie instance for Deadpool
deadpool = media.Movie("Deadpool", "A fast-talking mercenary with a morbid sense of humor is subjected to a rogue"
" experiment that leaves him with accelerated healing powers and a quest for revenge.",
"https://upload.wikimedia.org/wikipedia/en/4/46/Deadpool_poster.jpg",
"https://www.youtube.com/watch?v=ONHBaC-pfsk")
# Create list of favorite movie instances
movies = [john_wick, fist_fight, office_xmas_party, this_is_40, skyfall, deadpool]
# Pass list of movies to generate website to display movies
fresh_tomatoes.open_movies_page(movies)
| python |
import pickle
mv_grade = [0]*17771
for i in range(18):
with open('temgrade/'+str(i)+'_tem_grade', 'rb') as tf:
c = pickle.load(tf)
for (mi, grade) in c.items():
mv_grade[int(mi)] = float(grade)
print str(i)+ " DONE!"
with open('movie_grade.list', 'wb') as mg:
pickle.dump(mv_grade, mg)
| python |
import json
# alias
# bind
# bind
# bind
def loadCommand(data):
return command(data[0], data[1], data[2])
def loadBind(data, key):
return bind(loadCommand(data[0]), key, data[1], data[2])
def loadBKey(data, base=None):
b = bKey(data[0], base)
for i in data[1]:
loadBind(i, b)
return b
def loadBKeys(data, base):
for i in data:
loadBKey(i, base)
def loadCommandHolder(data):
c = commandHolder()
for i in data:
c.add(loadCommand(i))
return c
def loadForm(lst):
b = bindHolder(loadBKey(lst[1]), loadCommandHolder(lst[0]))
loadBKeys(lst[2], b)
return b
class command(object):
command = ""
command2 = ""
name = ""
name2 = ""
string = ""
def __init__(self, name, command, command2=""):
self.command = command
self.command2 = command2
self.name = name
self.name2 = name
if self.command2 != "":
self.string += 'alias "+' + name + '" "' + self.command + '"\n'
self.string += 'alias "-' + name + '" "' + self.command2 + '"'
self.name = "+" + self.name
else:
self.string += 'alias "' + name + '" "' + self.command + '"'
def saveForm(self):
return [self.name2, self.command, self.command2]
class bind(object):
command = None
key = ""
up = True
string = ""
name = ""
def __init__(self, command, bKey, key, up=False):
if type(bKey) == str:
raise "Type Error! 'key' was ment to be an object of bKey."
self.command = command
self.key = key
self.up = up
if type(command) != str:
command = command.name
if up:
bKey = bKey.getBase()
self.name = "mod" + bKey.upper() + "_" + key.upper()
self.string = (
'alias "' + self.name + '" "bind ' + key.lower() + " " + command + '"'
)
if up:
bKey.append(self)
else:
bKey.down.append(self)
def saveForm(self):
return [self.command.saveForm(), self.key, self.up]
class bKey(object):
key = None
down = None
base = None
down = None
up = None
def __init__(self, key, b=None):
self.key = key
if b == None:
self.base = None
self.up = []
else:
self.base = b.base
b.add(self)
self.down = []
def upper(self):
return self.key.upper()
def getBase(self):
if self.base != None:
return self.base
return self
def getBinds(self):
string = ""
if self.base != None:
string += self.get(self.down)
string += (
'alias "+mod'
+ self.upper()
+ '" "'
+ ";".join(i.name for i in self.down)
+ '"\n'
)
string += 'alias "-mod' + self.upper() + '" "none"\n'
string += 'bind "' + self.upper() + '" "+mod' + self.upper() + '"'
else:
string += self.get(self.up)
string += 'alias "none" "' + ";".join(i.name for i in self.up) + '"\n'
return string
def get(self, lst):
string = ""
for i in lst:
string += i.command.string + "\n"
for i in lst:
string += i.string + "\n"
return string
def append(self, data):
if self.base != None:
self.base.append(data)
else:
self.up.append(data)
def saveForm(self):
if self.down != None:
return [self.key] + [[i.saveForm() for i in self.down]]
else:
return [self.key] + [[i.saveForm() for i in self.up]]
class commandHolder(object):
lst = None
def __init__(self):
self.lst = []
def add(self, data):
self.lst.append(data)
def getData(self):
return "\n".join(i.string for i in self.lst) + "\n"
def saveForm(self):
return [i.saveForm() for i in self.lst]
class bindHolder(object):
lst = None
base = None
comm = None
def __init__(self, b=None, c=None):
self.lst = []
if b == None:
self.base = bKey("")
else:
self.base = b
if c == None:
self.comm = commandHolder()
else:
self.comm = c
def getData(self):
string = ""
string += self.comm.getData()
string += self.base.getBinds()
string += "\n".join(i.getBinds() for i in self.lst)
return string
def add(self, data):
self.lst.append(data)
def saveForm(self):
return (
[self.comm.saveForm()]
+ [self.base.saveForm()]
+ [[i.saveForm() for i in self.lst]]
)
b = bindHolder()
m4 = bKey("mouse4", b)
b.comm.add(command("sFollow", ""))
bind(
command("top", "dota_camera_setpos -2296.339355 1085.593506 0.000000", "sFollow"),
m4,
"1",
)
bind(
command("bot", "dota_camera_setpos 2874.552734 -3017.180664 0.000000", "sFollow"),
m4,
"1",
True,
)
bind(command("tShop", "toggleshoppanel"), m4, "2")
bind(command("sToggle", "dota_smart_camera_toggle"), m4, "2", True)
bind(
command(
"home", "dota_select_courier;dota_ability_execute 0;+camera;dota_courier_burst"
),
m4,
"3",
)
bind(
command(
"secret",
"dota_select_courier;dota_ability_execute 1;+camera;dota_courier_burst",
),
m4,
"3",
True,
)
bind(command("courier", "dota_courier_deliver;dota_courier_burst"), m4, "4")
bind(command("burst", "dota_courier_burst"), m4, "4", True)
bind(command("sCourier", "dota_select_courier"), m4, "5")
bind(command("", ""), m4, "5", True)
bind(command("", ""), m4, "TAB")
bind(command("", ""), m4, "TAB", True)
bind(command("item0", "dota_item_execute 0"), m4, "a")
bind(command("item1", "dota_item_execute 1"), m4, "a", True)
bind(command("item2", "dota_item_execute 2"), m4, "s")
bind(command("item3", "dota_item_execute 3"), m4, "s", True)
bind(command("item4", "dota_item_execute 4"), m4, "d")
bind(command("item5", "dota_item_execute 5"), m4, "d", True)
m5 = bKey("mouse5", b)
bind(command("test", "test"), m5, "1")
item = b.saveForm()
b = loadForm(b.saveForm())
print item == b.saveForm()
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collect the occupancy dataset.
See the README file for more information.
Author: G.J.J. van den Burg
License: This file is part of TCPD, see the top-level LICENSE file.
Copyright: 2019, The Alan Turing Institute
"""
import argparse
import clevercsv
import hashlib
import json
import os
import sys
import time
from functools import wraps
from urllib.request import urlretrieve
from urllib.error import URLError
SAMPLE = 16
TXT_URL = "https://web.archive.org/web/20191128145102if_/https://raw.githubusercontent.com/LuisM78/Occupancy-detection-data/master/datatraining.txt"
MD5_TXT = "e656cd731300cb444bd10fcd28071e37"
MD5_JSON = "bc6cd9adaf496fe30bf0e417d2c3b0c6"
NAME_TXT = "datatraining.txt"
NAME_JSON = "occupancy.json"
class ValidationError(Exception):
def __init__(self, filename):
message = (
"Validating the file '%s' failed. \n"
"Please raise an issue on the GitHub page for this project "
"if the error persists." % filename
)
super().__init__(message)
def check_md5sum(filename, checksum):
with open(filename, "rb") as fp:
data = fp.read()
h = hashlib.md5(data).hexdigest()
return h == checksum
def validate(checksum):
"""Decorator that validates the target file."""
def validate_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
target = kwargs.get("target_path", None)
if os.path.exists(target) and check_md5sum(target, checksum):
return
out = func(*args, **kwargs)
if not os.path.exists(target):
raise FileNotFoundError("Target file expected at: %s" % target)
if not check_md5sum(target, checksum):
raise ValidationError(target)
return out
return wrapper
return validate_decorator
@validate(MD5_TXT)
def download_txt(target_path=None):
count = 0
while count < 5:
count += 1
try:
urlretrieve(TXT_URL, target_path)
return
except URLError as err:
print(
"Error occurred (%r) when trying to download txt. Retrying in 5 seconds"
% err,
sys.stderr,
)
time.sleep(5)
@validate(MD5_JSON)
def write_json(txt_path, target_path=None):
with open(txt_path, "r", newline="", encoding="ascii") as fp:
reader = clevercsv.reader(
fp, delimiter=",", quotechar='"', escapechar=""
)
rows = list(reader)
header = rows.pop(0)
header.insert(0, "id")
as_dicts = [dict(zip(header, r)) for r in rows]
var_include = ["Temperature", "Humidity", "Light", "CO2"]
time = [x["date"] for x in as_dicts]
time = [time[i] for i in range(0, len(time), SAMPLE)]
data = {
"name": "occupancy",
"longname": "Occupancy",
"n_obs": len(time),
"n_dim": len(var_include),
"time": {
"type": "string",
"format": "%Y-%m-%d %H:%M:%S",
"index": list(range(len(time))),
"raw": time,
},
"series": [],
}
for idx, var in enumerate(var_include, start=1):
lbl = "V%i" % idx
obs = [float(x[var]) for x in as_dicts]
obs = [obs[i] for i in range(0, len(obs), SAMPLE)]
data["series"].append({"label": lbl, "type": "float", "raw": obs})
with open(target_path, "w") as fp:
json.dump(data, fp, indent="\t")
def collect(output_dir="."):
txt_path = os.path.join(output_dir, NAME_TXT)
json_path = os.path.join(output_dir, NAME_JSON)
download_txt(target_path=txt_path)
write_json(txt_path, target_path=json_path)
def clean(output_dir="."):
txt_path = os.path.join(output_dir, NAME_TXT)
json_path = os.path.join(output_dir, NAME_JSON)
if os.path.exists(txt_path):
os.unlink(txt_path)
if os.path.exists(json_path):
os.unlink(json_path)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output-dir", help="output directory to use", default="."
)
parser.add_argument(
"action",
choices=["collect", "clean"],
help="Action to perform",
default="collect",
nargs="?",
)
return parser.parse_args()
def main(output_dir="."):
args = parse_args()
if args.action == "collect":
collect(output_dir=args.output_dir)
elif args.action == "clean":
clean(output_dir=args.output_dir)
if __name__ == "__main__":
main()
| python |
"""
Test CLI
References:
* https://click.palletsprojects.com/en/7.x/testing/
ToDo: expand cli testing
"""
from __future__ import annotations
from typing import Any
from click.testing import CliRunner
from pytest_mock import MockFixture
from alsek import __version__
def test_version(
cli_runner: CliRunner,
mocker: MockFixture,
) -> None:
result = cli_runner.invoke(args=["--version"])
assert result.exit_code == 0
assert __version__ in result.output
def test_help(
cli_runner: CliRunner,
mocker: MockFixture,
) -> None:
result = cli_runner.invoke(args=["--help"])
assert result.exit_code == 0
assert "Start a pool of Alsek workers" in result.output
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import tablib
import pytz
from datetime import datetime
from decimal import Decimal, InvalidOperation
from django.db.utils import IntegrityError
from django.core.management.base import BaseCommand
from django.utils import timezone
from ...models import MinuteData
def make_timestamp(date_string):
"""
A row-operation that converts an Efergy timestamp of the form
"2015-12-31 12:34:56" into a Python datetime object.
"""
try:
return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S')
except:
return None
class Command(BaseCommand):
help = """Load Efergy's Engage minute data directly in like this:
`python.py manage load_engage_data your_filename.csv`
"""
def add_arguments(self, parser):
parser.add_argument('file_name', nargs='+', type=str)
def handle(self, *args, **options):
file_name = options['file_name'][0]
data = tablib.Dataset()
data.csv = open(file_name).read()
counter = 0
for row in data:
timestamp = timezone.make_aware(
make_timestamp(row[0]), timezone.get_current_timezone())
try:
value = Decimal(row[1])
except InvalidOperation:
value = None
if timestamp and value:
minute = timestamp.hour * 60 + timestamp.minute
try:
MinuteData.objects.create(
# TODO: Obviously, this should be a setting somewhere
timestamp=timestamp.astimezone(
pytz.timezone("America/New_York")),
minute=minute,
watts=value
)
counter += 1
except IntegrityError:
pass
print('Imported {0} new minutes from {1}'.format(counter, file_name))
| python |
# -*- coding: utf-8 -*-
from .eg import eg_hierarchy
| python |
"""Dahua package constants"""
__version__ = '0.0.2-2'
__author__ = "Alexander Ryazanov <[email protected]>"
from .device import *
from .channel import *
| python |
import pygame
from settings import *
class Tile(pygame.sprite.Sprite):
def __init__(self, pos, groups):
super().__init__(groups)
self.image = pygame.image.load('assets/rock.png').convert_alpha()
self.rect = self.image.get_rect(topleft = pos) | python |
#id name color
## Cityscapes, kiti, vkiti
CITYSCAPES_LABELS = \
[[ 0 , 'unlabeled' , ( 0, 0, 0)],
[ 1 , 'ego vehicle' , ( 0, 0, 0)],
[ 2 , 'rectification border' , ( 0, 0, 0)],
[ 3 , 'out of roi' , ( 0, 0, 0)],
[ 4 , 'static' , ( 0, 0, 0)],
[ 5 , 'dynamic' , (111, 74, 0)],
[ 6 , 'ground' , ( 81, 0, 81)],
[ 7 , 'road' , (128, 64,128)],
[ 8 , 'sidewalk' , (244, 35,232)],
[ 9 , 'parking' , (250,170,160)],
[10 , 'rail track' , (230,150,140)],
[11 , 'building' , ( 70, 70, 70)],
[12 , 'wall' , (102,102,156)],
[13 , 'fence' , (190,153,153)],
[14 , 'guard rail' , (180,165,180)],
[15 , 'bridge' , (150,100,100)],
[16 , 'tunnel' , (150,120, 90)],
[17 , 'pole' , (153,153,153)],
[18 , 'polegroup' , (153,153,153)],
[19 , 'traffic light' , (250,170, 30)],
[20 , 'traffic sign' , (220,220, 0)],
[21 , 'vegetation' , (107,142, 35)],
[22 , 'terrain' , (152,251,152)],
[23 , 'sky' , ( 70,130,180)],
[24 , 'person' , (220, 20, 60)],
[25 , 'rider' , (255, 0, 0)],
[26 , 'car' , ( 0, 0,142)],
[27 , 'truck' , ( 0, 0, 70)],
[28 , 'bus' , ( 0, 60,100)],
[29 , 'caravan' , ( 0, 0, 90)],
[30 , 'trailer' , ( 0, 0,110)],
[31 , 'train' , ( 0, 80,100)],
[32 , 'motorcycle' , ( 0, 0,230)],
[33 , 'bicycle' , (119, 11, 32)],
[34 , 'license plate' , ( 0, 0,142)]]
## SYNTHIA-SF
SYNTHIA =\
[[0 , 'void' , ( 0, 0, 0)],
[1 , 'road' , (128, 64,128)],
[2 , 'sidewalk' , (244, 35,232)],
[3 , 'building' , ( 70, 70, 70)],
[4 , 'wall' , (102,102,156)],
[5 , 'fence' , (190,153,153)],
[6 , 'pole' , (153,153,153)],
[7 , 'traffic light' , (250,170, 30)],
[8 , 'traffic sign' , (220,220, 0)],
[9 , 'vegetation' , (107,142, 35)],
[10 , 'terrain' , (152,251,152)],
[11 , 'sky' , ( 70,130,180)],
[12 , 'person' , (220, 20, 60)],
[13 , 'rider' , (255, 0, 0)],
[14 , 'car' , ( 0, 0,142)],
[15 , 'truck' , ( 0, 0, 70)],
[16 , 'bus' , ( 0, 60,100)],
[17 , 'train' , ( 0, 80,100)],
[18 , 'motorcycle' , ( 0, 0,230)],
[19 , 'bicycle' , (119, 11, 32)],
[20 , 'road lines' , (157,234, 50)],
[21 , 'other' , ( 72, 0, 98)],
[22 , 'road works' , (167,106, 29)]]
## VIPER
VIPER=\
{( 0, 0, 0) : (0 , 'unlabeled' ),
(111, 74, 0) : (1 , 'ambiguous' ),
( 70,130,180) : (2 , 'sky' ),
(128, 64,128) : (3 , 'road' ),
(244, 35,232) : (4 , 'sidewalk' ),
(230,150,140) : (5 , 'railtrack' ),
(152,251,152) : (6 , 'terrain' ),
( 87,182, 35) : (7 , 'tree' ),
( 35,142, 35) : (8 , 'vegetation' ),
( 70, 70, 70) : (9 , 'building' ),
(153,153,153) : (10 , 'infrastructure'),
(190,153,153) : (11 , 'fence' ),
(150, 20, 20) : (12 , 'billboard' ),
(250,170, 30) : (13 , 'trafficlight' ),
(220,220, 0) : (14 , 'trafficsign' ),
(180,180,100) : (15 , 'mobilebarrier' ),
(173,153,153) : (16 , 'firehydrant' ),
(168,153,153) : (17 , 'chair' ),
( 81, 0, 21) : (18 , 'trash' ),
( 81, 0, 81) : (19 , 'trashcan' ),
(220, 20, 60) : (20 , 'person' ),
(255, 0, 0) : (21 , 'animal' ),
(119, 11, 32) : (22 , 'bicycle' ),
( 0, 0,230) : (23 , 'motorcycle' ),
( 0, 0,142) : (24 , 'car' ),
( 0, 80,100) : (25 , 'van' ),
( 0, 60,100) : (26 , 'bus' ),
( 0, 0, 70) : (27 , 'truck' ),
( 0, 0, 90) : (28 , 'trailer' ),
( 0, 80,100) : (29 , 'train' ),
( 0,100,100) : (30 , 'plane' ),
( 50, 0, 90) : (31 , 'boat' )}
| python |
"""FastApi Backend for my Portfolio Website.
This doesn't have much purpose currently, but eventually I want to use this
backend to interact with various Python-based projects I develop.
"""
| python |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Spectroscopy experiment class for resonators."""
from typing import Iterable, Optional, Tuple
import numpy as np
from qiskit import QuantumCircuit
from qiskit.circuit import Parameter
from qiskit.exceptions import QiskitError
from qiskit.providers import Backend
import qiskit.pulse as pulse
from qiskit_experiments.framework import Options
from qiskit_experiments.library.characterization.spectroscopy import Spectroscopy
from .analysis.resonator_spectroscopy_analysis import ResonatorSpectroscopyAnalysis
class ResonatorSpectroscopy(Spectroscopy):
"""Perform spectroscopy on the readout resonator.
# section: overview
This experiment does spectroscopy on the readout resonator. It applies the following
circuit
.. parsed-literal::
┌─┐
q: ┤M├
└╥┘
c: 1/═╩═
0
where a spectroscopy pulse is attached to the measurement instruction.
Side note: when doing readout resonator spectroscopy, each measured IQ point has a
frequency dependent phase. Close to the resonance, the IQ points start rotating around
in the IQ plan. This effect must be accounted for in the data processing to produce a
meaningful signal. The default data processing workflow will therefore reduce the two-
dimensional IQ data to one-dimensional data using the magnitude of each IQ point.
# section: warning
Some backends may not have the required functionality to properly support resonator
spectroscopy experiments. The experiment may not work or the resulting resonance
may not properly reflect the properties of the readout resonator.
# section: example
The resonator spectroscopy experiment can be run by doing:
.. code:: python
qubit = 1
spec = ResonatorSpectroscopy(qubit, backend)
exp_data = spec.run().block_for_results()
exp_data.figure(0)
This will measure the resonator attached to qubit 1 and report the resonance frequency
as well as the kappa, i.e. the line width, of the resonator.
# section: analysis_ref
:py:class:`ResonatorSpectroscopyAnalysis`
# section: see_also
qiskit_experiments.library.characterization.qubit_spectroscopy.QubitSpectroscopy
"""
@classmethod
def _default_experiment_options(cls) -> Options:
"""Default option values used for the spectroscopy pulse.
All units of the resonator spectroscopy experiment are given in seconds.
Experiment Options:
amp (float): The amplitude of the spectroscopy pulse. Defaults to 1 and must
be between 0 and 1.
duration (float): The duration in seconds of the spectroscopy pulse.
sigma (float): The standard deviation of the spectroscopy pulse in seconds.
width (float): The width of the flat-top part of the GaussianSquare pulse in
seconds. Defaults to 0.
"""
options = super()._default_experiment_options()
options.amp = 1
options.duration = 480e-9
options.sigma = 60e-9
options.width = 360e-9
return options
def __init__(
self,
qubit: int,
backend: Optional[Backend] = None,
frequencies: Optional[Iterable[float]] = None,
absolute: bool = True,
**experiment_options,
):
"""Initialize a resonator spectroscopy experiment.
A spectroscopy experiment run by setting the frequency of the readout drive.
The parameters of the GaussianSquare spectroscopy pulse can be specified at run-time
through the experiment options.
Args:
qubit: The qubit on which to run readout spectroscopy.
backend: Optional, the backend to run the experiment on.
frequencies: The frequencies to scan in the experiment, in Hz. The default values
range from -20 MHz to 20 MHz in 51 steps. If the ``absolute`` variable is
set to True then a center frequency obtained from the backend's defaults is
added to each value of this range.
absolute: Boolean to specify if the frequencies are absolute or relative to the
resonator frequency in the backend. The default value is True.
experiment_options: Key word arguments used to set the experiment options.
Raises:
QiskitError: if no frequencies are given and absolute frequencies are desired and
no backend is given.
"""
analysis = ResonatorSpectroscopyAnalysis()
if frequencies is None:
frequencies = np.linspace(-20.0e6, 20.0e6, 51)
if absolute:
if backend is None:
raise QiskitError(
"Cannot automatically compute absolute frequencies without a backend."
)
center_freq = backend.defaults().meas_freq_est[qubit]
frequencies += center_freq
super().__init__(qubit, frequencies, backend, absolute, analysis, **experiment_options)
@property
def _backend_center_frequency(self) -> float:
"""Returns the center frequency of the experiment.
Returns:
The center frequency of the experiment.
Raises:
QiskitError: If the experiment does not have a backend set.
"""
if self.backend is None:
raise QiskitError("backend not set. Cannot call center_frequency.")
return self.backend.defaults().meas_freq_est[self.physical_qubits[0]]
def _template_circuit(self) -> QuantumCircuit:
"""Return the template quantum circuit."""
circuit = QuantumCircuit(1, 1)
circuit.measure(0, 0)
return circuit
def _schedule(self) -> Tuple[pulse.ScheduleBlock, Parameter]:
"""Create the spectroscopy schedule."""
dt, granularity = self._dt, self._granularity
duration = int(granularity * (self.experiment_options.duration / dt // granularity))
sigma = granularity * (self.experiment_options.sigma / dt // granularity)
width = granularity * (self.experiment_options.width / dt // granularity)
qubit = self.physical_qubits[0]
freq_param = Parameter("frequency")
with pulse.build(backend=self.backend, name="spectroscopy") as schedule:
pulse.shift_frequency(freq_param, pulse.MeasureChannel(qubit))
pulse.play(
pulse.GaussianSquare(
duration=duration,
amp=self.experiment_options.amp,
sigma=sigma,
width=width,
),
pulse.MeasureChannel(qubit),
)
pulse.acquire(duration, qubit, pulse.MemorySlot(0))
return schedule, freq_param
def circuits(self):
"""Create the circuit for the spectroscopy experiment.
The circuits are based on a GaussianSquare pulse and a frequency_shift instruction
encapsulated in a measurement instruction.
Returns:
circuits: The circuits that will run the spectroscopy experiment.
"""
sched, freq_param = self._schedule()
circs = []
for freq in self._frequencies:
freq_shift = freq - self._backend_center_frequency if self._absolute else freq
freq_shift = np.round(freq_shift, decimals=3)
sched_ = sched.assign_parameters({freq_param: freq_shift}, inplace=False)
circuit = self._template_circuit()
circuit.add_calibration("measure", self.physical_qubits, sched_)
self._add_metadata(circuit, freq, sched)
circs.append(circuit)
return circs
| python |
# Generated by Django 2.2.5 on 2019-09-25 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0002_squarefootlayout'),
]
operations = [
migrations.AddField(
model_name='squarefootlayout',
name='fill_cols',
field=models.CharField(blank=True, max_length=200),
),
migrations.AddField(
model_name='squarefootlayout',
name='fill_rows',
field=models.CharField(blank=True, max_length=200),
),
]
| python |
from __future__ import absolute_import, division, print_function
from stripe.api_resources.abstract import APIResource
class Mandate(APIResource):
OBJECT_NAME = "mandate"
| python |
from django.contrib.auth import logout, authenticate, login
from django.core.checks import messages
from django.shortcuts import render, redirect
from requests import auth
from django.contrib.auth.models import User, auth
from hotels.models import Reservation
from .forms import *
# Create your views here.
def log(request):
if request.method == 'POST':
password = request.POST.get('password')
username = request.POST.get('username')
user = auth.authenticate(request, username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect('/')
else:
return redirect('/login/')
else:
return render(request, 'login.html')
def log_out(request):
logout(request)
return redirect('/')
def registration(request):
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
new_user = form.save(commit=False)
new_user.set_password(form.cleaned_data['password'])
new_user.save()
return redirect('/login/')
else:
form = UserForm()
return render(request, 'registration.html', {'form': form})
def profile(request):
user = request.user
reservations = Reservation.objects.filter(user=user)
return render(request, 'profile.html', {'reservations': reservations, 'user': user}) | python |
# -*- Mode: Python; tab-width: 8; indent-tabs-mode: nil; python-indent-offset:4 -*-
# vim:set et sts=4 ts=4 tw=80:
# This Source Code Form is subject to the terms of the MIT License.
# If a copy of the ML was not distributed with this
# file, You can obtain one at https://opensource.org/licenses/MIT
# author: JackRed <[email protected]>
# Timothée Couble
from pso import PSO, minimise
from pso_ann import train_ANN_PSO
import train_help
from pso_json import get_boundary_config, decode_args, encode_args
import matplotlib.pyplot as plt
from args import opso_args
def scale_args(args, boundary):
# Iterate through all arguments to scale them between specific born
i = 0
for key in boundary:
args[i] = train_help.scale(args[i], boundary[key][0], boundary[key][1])
i += 1
# Round nb_h_layers and nb_neurons_layer to have int values
args[1] = round(args[1])
args[2] = round(args[2])
# Get activation functions
i_activation = round(train_help.scale(args[-1], 0,
len(train_help.ACTIVATIONS) - 1))
activations = [train_help.ACTIVATIONS[i_activation]
for _ in range(args[1] + 1)]
return args[:-1] + [activations]
def fitness_mean(*args):
res = []
best_pso = None
best_score = float("inf")
for i in range(4):
pso, _ = train_ANN_PSO(*args)
res.append(pso.best_global_score)
if pso.best_global_score < best_score:
best_score = pso.best_global_score
best_pso = pso
return sum(res) / len(res), best_pso
def train_PSO_PSO_ANN(inputs, res_ex, boundary, opso_arg, pso_arg,
draw_graph=False):
dim = 11
opso = PSO(dim,
lambda param: fitness_mean(inputs, res_ex, *pso_arg.values(),
*scale_args(param, boundary)),
**opso_arg, comparator=minimise,
min_bound=train_help.MIN_BOUND, max_bound=train_help.MAX_BOUND,
endl="11")
print("\nRunning...\n")
if draw_graph:
opso.set_graph_config(inputs=inputs, res_ex=res_ex, opso=True)
opso.run()
return opso
def main():
args = opso_args().parse_args()
file_name = train_help.name_to_file(args.function)
inputs, res_ex = train_help.read_input(file_name)
opso_arg = decode_args('', 'opso', args.onc)
real_time_graph = args.real_time
boundary = get_boundary_config(args.obc)
pso = train_PSO_PSO_ANN(inputs, res_ex, boundary, **opso_arg,
draw_graph=real_time_graph)
dict_pso = {**train_help.args_to_pso_kwargs(
scale_args(pso.best_position, boundary)),
**opso_arg["pso_arg"]}
train_help.write_activation(dict_pso)
encode_args(args.function, 'pso', **dict_pso)
if not real_time_graph:
pso.set_graph_config(inputs=inputs, res_ex=res_ex, opso=True)
pso.draw_graphs()
plt.show()
if __name__ == '__main__':
main()
| python |
# Test MQTT and Async
# 10 second button monitor
from machine import Pin
import pycom
import time
import uasyncio as asyncio
from my_mqtt import MyMqtt
pycom.heartbeat(False)
class RGB:
def __init__(self):
self.colour = 0x000000
def set(self, colour):
self.colour = colour
pycom.rgbled(self.colour)
rgb = RGB()
async def killer(duration):
await asyncio.sleep(duration)
async def toggle(rgbLED, time_ms):
while True:
await asyncio.sleep_ms(time_ms)
colour = rgb.colour
colour = (colour + 1) % 0xFFFFFF
rgb.set(colour) # Purple
# Starting to link to actual outputs to Sensors and multi threaded
# 1 second delays to prevent overloading MQTT which will then fail
rgb.set(0x200000) # Red
print("test4 version 0.10 2018-08-22")
mq = MyMqtt()
mq.send_value("0", "button")
rgb.set(0x002000) # Green
async def button_monitor():
p_in = Pin('P10', mode=Pin.IN, pull=Pin.PULL_UP)
while True:
# Button not pushed
pycom.rgbled(0xFF8000) # Orange
mq.send_value("0", "button")
await asyncio.sleep_ms(1000)
while p_in() == 1: # Wait for button push
await asyncio.sleep_ms(100)
rgb.set(0x008000) # Green
mq.send_value("0", "button")
await asyncio.sleep_ms(1000)
mq.send_value("1", "button")
await asyncio.sleep_ms(1000)
while p_in() == 0: # Wait for button release
await asyncio.sleep_ms(100)
rgb.set(0x808000) # Yellow
mq.send_value("1", "button")
await asyncio.sleep_ms(1000)
def test(duration):
loop = asyncio.get_event_loop()
duration = int(duration)
if duration > 0:
print("Run test for {:3d} seconds".format(duration))
loop.create_task(toggle(pycom.rgbled, 10))
loop.create_task(button_monitor())
loop.run_until_complete(killer(duration))
loop.close()
test(20)
time.sleep_ms(1000) # Make sure don't over load sending of data
mq.send_value("0", "button")
rgb.set(0x201010) # pale pink
print("Test completed")
| python |
#Sobreira Gustavo
#Falta u, placar e repetições
from random import randint
def criar_tabuleiro():
#Cria a matriz para o jogo
for l in range(3):
linha = []
for c in range(3):
linha.append('🟫')
campo.append(linha)
def enumerar_colunas():
print(' COLUNA')
num = 0
print(' ' * 4, end=' ')
for i in range(3):
print(f'{num}', end=' ')
num += 1
print()
def enumerar_linha():
linha = 'INHA'
print(' L')
for l in range(3):
print(f' {linha[l]} {l} ', end=' ')
# Neste 'for c' é que é feito o visual do tabuleiro, aqui ele ganha forma
for c in range(3):
print(f'{campo[l][c]} ', end='')
print()
print(' A')
# O print a cima serve para que as linhas sejam puladas, recomendo que coloquem uma '#' nele e rodem o código
def exibir_tabuleiro():
criar_tabuleiro()
alinhar()
enumerar_colunas()
enumerar_linha()
alinhar()
def selecionar_player():
erro = 1
while erro != 0:
escolha = int(input('Antes de começarmos escolha seu símbolo\n'
'[ 1 ] - 🔳\n'
'[ 2 ] - 🔘\n'
'Digite o número referente ao símbolo: '))
if escolha == 1 or escolha == 2:
erro -= 1
return escolha
def verificar_ganhador():
ganhador = 0
for c in range(0, 3):
if (campo[0][c] == '🔳' and campo[1][c] == '🔳' and campo[2][c] == '🔳') \
or (campo[0][0] == '🔳' and campo[1][1] == '🔳' and campo[2][2] == '🔳')\
or campo[0][2] == '🔳' and campo[1][1] == '🔳' and campo[2][0] == '🔳':
ganhador += 1
else:
if (campo[0][c] == '🔘' and campo[1][c] == '🔘' and campo[2][c] == '🔘') \
or (campo[0][0] == '🔘' and campo[1][1] == '🔘' and campo[2][2] == '🔘') \
or campo[0][2] == '🔘' and campo[1][1] == '🔘' and campo[2][0] == '🔘':
ganhador += 2
return ganhador
def fazer_jogada(rodada):
#Já que se player escolhe 'X' obrigatoriamente bot escolhe 'O' temos:
if escolha == 1:
simbolo_player = '🔳'
simbolo_bot = '🔘'
else:
simbolo_player = '🔘'
simbolo_bot = '🔳'
#Para que o jogo nunca comece com o mesmo player, coloco um randint para deixar aleatório
ordem_jogada = 0
if rodada == 0:
ordem_jogada = randint(1, 2)
rodada += 1
while rodada != 10:
if verificar_ganhador() != 0:
if verificar_ganhador() == 1:
print('O jogador 🔳 VENCEU')
else:
print('O jogador 🔘 VENCEU')
break
#Assim caso o número que seja sorteado seja 2 o player joga
if ordem_jogada % 2 == 0:
erro = 1
ordem_jogada -= 1
#Evitando de usar 'Break' já que é uma função exclusiva Python busco essa solução com a função 'erro'
#Só haverá mudanças em erro caso o jogador acerte a jogada
while erro != 0:
linha = int(input('Selecione uma coordenada utilizando apenas os números\n'
'Linha: '))
coluna = int(input('Coluna: '))
if linha in (0, 1, 2) and coluna in (0, 1, 2):
if campo[linha][coluna] == '🟫':
campo[linha][coluna] = simbolo_player
erro -= 1
exibir_tabuleiro()
rodada += 1
else:
print(' =- =- =- =- =- Busque casas vazias -= -= -= -= -= ')
else:
erro = 1
ordem_jogada += 1
while erro != 0:
linha = randint(0, 2)
coluna = randint(0, 2)
if campo[linha][coluna] == '🟫':
campo[linha][coluna] = simbolo_bot
erro -= 1
exibir_tabuleiro()
rodada += 1
if rodada == 10:
print('Deu Velha')
def alinhar():
print('\n')
print('='*40)
print('\n')
campo = []
exibir_tabuleiro()
escolha = selecionar_player()
verificar_ganhador()
fazer_jogada(0)
| python |
# Copyright 2017-2018, Mohammad Haft-Javaherian. ([email protected]).
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# References:
# -----------
# [1] Haft-Javaherian, M; Fang, L.; Muse, V.; Schaffer, C.B.; Nishimura,
# N.; & Sabuncu, M. R. (2018) Deep convolutional neural networks for
# segmenting 3D in vivo multiphoton images of vasculature in
# Alzheimer disease mouse models. *arXiv preprint, arXiv*:1801.00880.
# =============================================================================
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import range
import h5py
import time
import scipy.io as io
import sys
from random import shuffle
import itertools as it
# Change isTrain to True if you want to train the network
isTrain = False
# Change isForward to True if you want to test the network
isForward = True
# padSize is the padding around the central voxel to generate the field of view
padSize = ((3, 3), (16, 16), (16, 16), (0, 0))
WindowSize = np.sum(padSize, axis=1) + 1
# pad Size aroung the central voxel to generate 2D region of interest
corePadSize = 2
# number of epoch to train
nEpoch = 100
# The input h5 file location
if len(sys.argv) > 1:
inputData = sys.argv[1]
else:
inputData = raw_input("Enter h5 input file path (e.g. ../a.h5)> ")
# batch size
if len(sys.argv) > 2:
batch_size = int(sys.argv[2])
else:
batch_size = 1000
# start the TF session
sess = tf.InteractiveSession()
#create placeholder for input and output nodes
x = tf.placeholder(tf.float32, shape=[None, WindowSize[0], WindowSize[1],
WindowSize[2], WindowSize[3]])
y_ = tf.placeholder(tf.float32, shape=[None, (2 * corePadSize + 1) ** 2, 2])
# Import Data
f = h5py.File(inputData, 'r')
im = np.array(f.get('/im'))
im = im.reshape(im.shape + (1, ))
imSize = im.size
imShape = im.shape
if isTrain:
l = np.array(f.get('/l'))
l = l.reshape(l.shape + (1,))
nc = im.shape[1]
tst = im[:, (nc / 2):(3 * nc / 4), :]
tstL = l[:,(nc / 2):(3 * nc / 4), :]
trn = im[:, 0:(nc / 2), :]
trnL = l[:, 0:(nc / 2), :]
tst = np.pad(tst, padSize, 'symmetric')
trn = np.pad(trn, padSize, 'symmetric')
if isForward:
im = np.pad(im, padSize, 'symmetric')
V = np.ndarray(shape=(imShape), dtype=np.float32)
print("Data loaded.")
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
def conv3d(x, W):
return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='VALID')
def max_pool(x, shape):
return tf.nn.max_pool3d(x, ksize=shape,
strides=[1, 2, 2, 2, 1], padding='SAME')
def get_batch(im, l, corePadSize, ID):
""" generate a batch from im and l for training
based on the location of ID entries and core pad size. Note that the ID
is based on no core pad.
"""
l_ = np.ndarray(shape=(len(ID), (2 * corePadSize + 1) ** 2, 2),
dtype=np.float32)
im_ = np.ndarray(shape=(len(ID), WindowSize[0], WindowSize[1], WindowSize[2],
WindowSize[3]), dtype=np.float32)
for i in range(len(ID)):
r = np.unravel_index(ID[i], l.shape)
im_[i, :, :, :] = im[r[0]:(r[0] + WindowSize[0]),
r[1]:(r[1] + WindowSize[1]), r[2]:(r[2] + WindowSize[2]), :]
l_[i, :, 1] = np.reshape(l[r[0],
(r[1] - corePadSize):(r[1] + corePadSize + 1),
(r[2] - corePadSize):(r[2] + corePadSize + 1),:],
(2 * corePadSize + 1) ** 2)
l_[i, :,0] = 1-l_[i, :, 1]
return im_, l_
def get_batch3d_fwd(im, Vshape, ID):
""" generate a batch from im for testing
based on the location of ID entries and core pad size. Note that the ID
is based on no core pad.
"""
im_=np.ndarray(shape=(len(ID),WindowSize[0], WindowSize[1], WindowSize[2]
, WindowSize[3]),dtype=np.float32)
for i in range(len(ID)):
r = np.unravel_index(ID[i],Vshape)
im_[i,:,:,:]=im[r[0]:r[0]+WindowSize[0],r[1]:r[1]+WindowSize[1],
r[2]:r[2]+WindowSize[2],r[3]:r[3]+WindowSize[3]]
return im_
# Define the DeepVess Architecture
W_conv1a = weight_variable([3, 3, 3, 1, 32])
b_conv1a = bias_variable([32])
h_conv1a = tf.nn.relu(conv3d(x, W_conv1a) + b_conv1a)
W_conv1b = weight_variable([3, 3, 3, 32, 32])
b_conv1b = bias_variable([32])
h_conv1b = tf.nn.relu(conv3d(h_conv1a, W_conv1b) + b_conv1b)
W_conv1c = weight_variable([3, 3, 3, 32, 32])
b_conv1c = bias_variable([32])
h_conv1c = tf.nn.relu(conv3d(h_conv1b, W_conv1c) + b_conv1c)
h_pool1 = max_pool(h_conv1c,[1, 1, 2, 2, 1])
W_conv2a = weight_variable([1, 3, 3, 32, 64])
b_conv2a = bias_variable([64])
h_conv2a = tf.nn.relu(conv3d(h_pool1, W_conv2a) + b_conv2a)
W_conv2b = weight_variable([1, 3, 3, 64, 64])
b_conv2b = bias_variable([64])
h_conv2b = tf.nn.relu(conv3d(h_conv2a, W_conv2b) + b_conv2b)
h_pool2 = max_pool(h_conv2b,[1, 1, 2, 2, 1])
W_fc1 = weight_variable([1 * 5 * 5 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 1 * 5 * 5 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 1 * 5 * 5 * 2])
b_fc2 = bias_variable([1 * 5 * 5 * 2])
h_fc1 = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
y_conv = tf.reshape(h_fc1, [-1, 1 * 5 * 5, 2])
# loss function over (TP U FN U FP)
allButTN = tf.maximum(tf.argmax(y_conv, 2), tf.argmax(y_, 2))
cross_entropy = tf.reduce_mean(tf.multiply(tf.cast(allButTN, tf.float32),
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)))
train_step = tf.train.AdamOptimizer(1e-6).minimize(cross_entropy)
correct_prediction = tf.multiply(tf.argmax(y_conv, 2), tf.argmax(y_, 2))
accuracy = tf.divide(tf.reduce_sum(tf.cast(correct_prediction, tf.float32)),
tf.reduce_sum(tf.cast(allButTN, tf.float32)))
sess.run(tf.global_variables_initializer())
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
if isTrain:
file_log = open("model.log", "w")
file_log.write("Epoch, Step, training accuracy, test accuracy, Time (hr) \n")
file_log.close()
start = time.time()
begin = start
trnSampleID = []
for ii in range(0, trnL.shape[0]):
for ij in it.chain(range(corePadSize,
trnL.shape[1] - corePadSize, 2 * corePadSize + 1),
[trnL.shape[1] - corePadSize - 1]):
for ik in it.chain(range(corePadSize,trnL.shape[2]-corePadSize,
2*corePadSize + 1), [trnL.shape[2] - corePadSize - 1]):
trnSampleID.append(np.ravel_multi_index((ii, ij, ik, 0),
trnL.shape))
shuffle(trnSampleID)
tstSampleID = []
for ii in range(0, tstL.shape[0]):
for ij in it.chain(range(corePadSize, tstL.shape[1] - corePadSize,
2 * corePadSize + 1), [tstL.shape[1] - corePadSize - 1]):
for ik in it.chain(range(corePadSize, tstL.shape[2] - corePadSize,
2 * corePadSize + 1), [tstL.shape[2] - corePadSize - 1]):
tstSampleID.append(np.ravel_multi_index((ii, ij, ik, 0),
tstL.shape))
shuffle(tstSampleID)
x_tst,l_tst = get_batch(tst, tstL, corePadSize, tstSampleID[0:batch_size])
for epoch in range(nEpoch):
shuffle(trnSampleID)
for i in range(np.int(np.ceil(len(trnSampleID) / batch_size))):
x1,l1 = get_batch(trn, trnL, corePadSize,
trnSampleID[(i * batch_size):((i + 1) * batch_size)])
train_step.run(feed_dict={x: x1, y_: l1, keep_prob: 0.5})
if i%100 == 99:
train_accuracy = accuracy.eval(feed_dict={
x: x1 , y_: l1 , keep_prob: 1.0})
test_accuracy = accuracy.eval(feed_dict={
x: x_tst , y_: l_tst, keep_prob: 1.0})
end = time.time()
print("epoch %d, step %d, training accuracy %g, test accuracy %g. "
"Elapsed time/sample is %e sec. %f hour to finish."%(epoch, i,
train_accuracy, test_accuracy, (end - start) / 100000,
((nEpoch - epoch) * len(trnSampleID) / batch_size - i)
* (end - start) / 360000))
file_log = open("model.log","a")
file_log.write("%d, %d, %g, %g, %f \n" % (epoch, i, train_accuracy,
test_accuracy, (end-begin) / 3600))
file_log.close()
start = time.time()
if epoch%10 == 9:
save_path = saver.save(sess, "model-epoch" + str(epoch) + ".ckpt")
print("epoch %d, Model saved in file: %s" % (epoch, save_path))
if isForward:
saver.restore(sess, "private/model-epoch29999.ckpt")
print("Model restored.")
vID=[]
for ii in range(0,V.shape[0]):
for ij in it.chain(range(corePadSize, V.shape[1] - corePadSize,
2 * corePadSize + 1), [V.shape[1] - corePadSize - 1]):
for ik in it.chain(range(corePadSize, V.shape[2] - corePadSize,
2 * corePadSize + 1), [V.shape[2] - corePadSize - 1]):
vID.append(np.ravel_multi_index((ii, ij, ik, 0), V.shape))
start = time.time()
for i in range(np.int(np.ceil(len(vID) / batch_size))):
x1 = get_batch3d_fwd(im,imShape, vID[i*batch_size:(i+1)*batch_size])
y1 = np.reshape(y_conv.eval(feed_dict={x:x1,keep_prob: 1.0}),(-1,
(2*corePadSize+1), (2*corePadSize+1),2))
for j in range(y1.shape[0]):
r=np.unravel_index(vID[i * batch_size + j], V.shape)
V[r[0],(r[1]-corePadSize):(r[1]+corePadSize+1),
(r[2]-corePadSize):(r[2]+corePadSize+1),0] = np.argmax(y1[j],axis=2)
if i%100 == 99:
end = time.time()
print("step %d is done. %f min to finish." % (i, (end - start)
/ 60 / (i + 1) * (np.int(np.ceil(len(vID) / batch_size)) - i - 1)))
io.savemat(sys.argv[1][:-3] + '-V_fwd', {'V':np.transpose(np.reshape(V,
imShape[0:3]), (2, 1, 0))})
print(sys.argv[1][:-3] + '-V_fwd.mat is saved.')
| python |
# Adapted from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/precise_bn.py # noqa: E501
# Original licence: Copyright (c) 2019 Facebook, Inc under the Apache License 2.0 # noqa: E501
import logging
import time
import mmcv
import torch
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import Hook
from mmcv.utils import print_log
from torch.nn import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
from torch.nn.parallel import DataParallel, DistributedDataParallel
from torch.utils.data import DataLoader
def is_parallel_module(module):
"""Check if a module is a parallel module.
The following 3 modules (and their subclasses) are regarded as parallel
modules: DataParallel, DistributedDataParallel,
MMDistributedDataParallel (the deprecated version).
Args:
module (nn.Module): The module to be checked.
Returns:
bool: True if the input module is a parallel module.
"""
parallels = (DataParallel, DistributedDataParallel,
MMDistributedDataParallel)
if isinstance(module, parallels):
return True
else:
return False
@torch.no_grad()
def update_bn_stats(model, data_loader, num_iters=200, logger=None):
"""Recompute and update the batch norm stats to make them more precise.
During
training both BN stats and the weight are changing after every iteration,
so the running average can not precisely reflect the actual stats of the
current model.
In this function, the BN stats are recomputed with fixed weights, to make
the running average more precise. Specifically, it computes the true
average of per-batch mean/variance instead of the running average.
Args:
model (nn.Module): The model whose bn stats will be recomputed.
data_loader (iterator): The DataLoader iterator.
num_iters (int): number of iterations to compute the stats.
logger (:obj:`logging.Logger` | None): Logger for logging.
Default: None.
"""
model.train()
assert len(data_loader) >= num_iters, (
f'length of dataloader {len(data_loader)} must be greater than '
f'iteration number {num_iters}')
if is_parallel_module(model):
parallel_module = model
model = model.module
else:
parallel_module = model
# Finds all the bn layers with training=True.
bn_layers = [
m for m in model.modules() if m.training and isinstance(m, _BatchNorm)
]
if len(bn_layers) == 0:
print_log('No BN found in model', logger=logger, level=logging.WARNING)
return
print_log(f'{len(bn_layers)} BN found', logger=logger)
# Finds all the other norm layers with training=True.
for m in model.modules():
if m.training and isinstance(m, (_InstanceNorm, GroupNorm)):
print_log(
'IN/GN stats will be updated like training.',
logger=logger,
level=logging.WARNING)
# In order to make the running stats only reflect the current batch, the
# momentum is disabled.
# bn.running_mean = (1 - momentum) * bn.running_mean + momentum *
# batch_mean
# Setting the momentum to 1.0 to compute the stats without momentum.
momentum_actual = [bn.momentum for bn in bn_layers] # pyre-ignore
for bn in bn_layers:
bn.momentum = 1.0
# Note that running_var actually means "running average of variance"
running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers]
running_var = [torch.zeros_like(bn.running_var) for bn in bn_layers]
finish_before_loader = False
prog_bar = mmcv.ProgressBar(len(data_loader))
for ind, data in enumerate(data_loader):
with torch.no_grad():
parallel_module(**data, return_loss=False)
prog_bar.update()
for i, bn in enumerate(bn_layers):
# Accumulates the bn stats.
running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1)
# running var is actually
running_var[i] += (bn.running_var - running_var[i]) / (ind + 1)
if (ind + 1) >= num_iters:
finish_before_loader = True
break
assert finish_before_loader, 'Dataloader stopped before ' \
f'iteration {num_iters}'
for i, bn in enumerate(bn_layers):
# Sets the precise bn stats.
bn.running_mean = running_mean[i]
bn.running_var = running_var[i]
bn.momentum = momentum_actual[i]
class PreciseBNHook(Hook):
"""Precise BN hook.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
num_iters (int): Number of iterations to update the bn stats.
Default: 200.
interval (int): Perform precise bn interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, num_iters=200, interval=1):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.num_iters = num_iters
def after_train_epoch(self, runner):
if self.every_n_epochs(runner, self.interval):
# sleep to avoid possible deadlock
time.sleep(2.)
print_log(
f'Running Precise BN for {self.num_iters} iterations',
logger=runner.logger)
update_bn_stats(
runner.model,
self.dataloader,
self.num_iters,
logger=runner.logger)
print_log('BN stats updated', logger=runner.logger)
# sleep to avoid possible deadlock
time.sleep(2.)
| python |
from flask import Flask, jsonify
import RPi.GPIO as GPIO
app = Flask(__name__)
@app.route('/off/<int:pin>')
def getOff(pin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
state = GPIO.input(pin)
GPIO.output(pin,GPIO.HIGH)
return jsonify({'status':'LOW', 'pin_no':pin})
@app.route('/on/<int:pin>')
def getOn(pin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
state = GPIO.input(pin)
GPIO.output(pin,GPIO.LOW)
return jsonify({'status':'HIGH', 'pin_no':pin})
@app.route('/status/<int:pin>')
def getStatus(pin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.OUT)
state = GPIO.input(pin)
if state == 0:
#GPIO.output(pin,GPIO.HIGH)
return jsonify({'status':'HIGH', 'pin_no':pin})
else:
#GPIO.output(pin,GPIO.LOW)
return jsonify({'status':'LOW', 'pin_no':pin})
if __name__ =='__main__':
app.run(host='0.0.0.0', debug=True)
| python |
#!/usr/bin/env python3
# -*-encoding: utf-8-*-
# author: Valentyn Kofanov
from kivy.lang import Builder
from kivy.uix.screenmanager import Screen
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.recycleview import RecycleView
Builder.load_file("style.kv")
CHATS = ["Alex", "Masha", "Petya", "Vasya", "Vilatiy", "Misha", "John", "Michael", "Alexander", "Fedor", "111", "333"]
class RV(RecycleView):
def __init__(self, chats=CHATS, **kwargs):
super(RV, self).__init__(**kwargs)
self.data = [{'text': str(chat)} for chat in chats]
class DialogScreen(Screen):
def refresh(self):
print(self.chat_list.selected.text)
| python |
from data_exploration import explore_over_time, frame_count, generate_summary_plot
from file_contents_gen import get_batches_multi_dir, multi_dir_data_gen
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Activation, Flatten, Dense, Lambda, Dropout
# from tf.keras.layers import InputLayer
from keras.layers import Cropping2D
from keras.layers.convolutional import Conv2D, MaxPooling2D
# from import tf as ktf
# import tensorflow as tf
# import keras
import matplotlib.pyplot as plt
import numpy as np
import keras
# choose the operations to perform
# load_prev_model can be combined with train_model to 'add on' to the knowledge of the network
produce_graph = True
load_prev_model = True
train_model = True # train the model using the data in the dirs variable
summary_plot = False # generate a matplotlib figure that include plots of steering angle, throttle, braking, etc and sample images from the 3 cameras
compile_statistics = False # generate statistics that indicate the distribution of the data by steering angle
dirs = \
[
"../data/P3-sim-data-udacity/data",
"../data/P3-sim-data-hard-left-0"
]
for d in dirs:
print('frame count for', d, 'is: ', frame_count(d))
if summary_plot:
images, sw_angles, throttle, brake_input, speeds = explore_over_time(fname, 300)
generate_summary_plot(images, sw_angles, throttle, brake_input, speeds)
if train_model:
model = Sequential() # use the keras Sequential model type
image_shape = (70, 160, 3)# images[0,0,:,:].shape
# model.add(__import__('tensorflow').keras.layers.InputLayer(input_shape=(None, 160, 320, 3)))
# started with the NVIDIA End-to-End SDC network described here: https://devblogs.nvidia.com/deep-learning-self-driving-cars/
# made adjustments to the sizes of the layers by trial and error and used greyscale instead of colour images
model.add(Lambda(lambda x: __import__('tensorflow').image.rgb_to_grayscale(x)))
# crop out parts of the top and bottom of the image, since these parts of the image do not seem necessary
# for steering the car.
model.add(Cropping2D(cropping=( (60,25), (0,0) )))
# use a keras Lambda to resize the image
model.add(Lambda(lambda x: __import__('keras').backend.tf.image.resize_images(x, (50,160))))
# change the range of the data to [-1.0, 1.0]
model.add(Lambda(lambda x: (x / 255.0 - 0.5) * 2))
# add the convolutional layers
model.add(Conv2D(filters=12, kernel_size=5, strides=(1,1), activation='relu'))
model.add(Conv2D(filters=24, kernel_size=5, strides=(2,2), activation='relu'))
model.add(Conv2D(filters=36, kernel_size=5, strides=(2,2), activation='relu'))
model.add(Conv2D(filters=48, kernel_size=3, strides=(1,1), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=3, strides=(1,1), activation='relu'))
# flatten the convolutional layers to connect to the Fully Connected layers
model.add(Flatten())
model.add(Dense(400, activation='relu'))
model.add(Dense(600, activation='relu'))
model.add(Dense(300, activation='relu'))
model.add(Dense(100, activation='relu'))
# use dropout to improve generalization to other data
model.add(Dropout(0.5))
model.add(Dense(1)) #steering wheel angle is the output
# features = images[:,0,:,:]
# labels = sw_angles
opt = keras.optimizers.Adam(lr=0.0001) # use the Adam Optimizer - was successful in P2 and worked well here too
# get the 'generator' for the data
# In the multi_dir_data_gen function, I included an option to split the data into Training and Validation data
# the keras fit function also provides options to split data into training/validation sets
data_gen_all = multi_dir_data_gen(dirs, 64, 0.2, "ALL")
# data_gen_train = multi_dir_data_gen(dirs, 64, 0.2, "TRAIN")
# data_gen_valid = multi_dir_data_gen(dirs, 64, 0.2, "VALIDATION")
model.compile(loss='mse', optimizer=opt)
if load_prev_model:
model = keras.models.load_model('model.h5')
if produce_graph:
print(model.summary())
from keras.utils import plot_model
plot_model(model, to_file='model.png', show_shapes=True)
exit()
# I attempted to use model.fit_generator but there were some problems
# using my data generator with custom batch size and the normal fit function from keras
# works well anyway
for features, labels in data_gen_all:
print('features shape: ', features.shape)
print('labels shape: ', labels.shape)
model.fit(features, labels, validation_split=0.2, shuffle=True, epochs=5, batch_size=64)
# save the model for later recall
model.save('model.h5')
if compile_statistics:
#define an array of bin boundaries and an array of counts (initialized to 0)
bins = np.arange(-10.0,10.0,0.1)
counts = np.arange(-10.0,10.0,0.1) * 0.0
# count greater than, less than and equal to 0 steering angles to validate the data augmentation that is built into the generator
count_gt_zero = 0
count_lt_zero = 0
count_eq_zero = 0
# this loop generates the histogram counts
for batch_ctr, images, sw_angles, throttle, brake_input, speeds in get_batches_multi_dir(dirs, 128):
for sw_angle in sw_angles:
if sw_angle > 0.0 or sw_angle < 0.0:
count_lt_zero = count_lt_zero + 1
count_gt_zero = count_gt_zero + 1
else:
count_eq_zero = count_eq_zero + 2
for sw_angle in sw_angles:
histo_loc = np.argmax(bins >= sw_angle)
counts[histo_loc] = counts[histo_loc] + 1
for sw_angle in sw_angles:
histo_loc = np.argmax(bins >= -1.0 * sw_angle)
counts[histo_loc] = counts[histo_loc] + 1
print('count_gt_zero: ', count_gt_zero)
print('count_lt_zero: ', count_lt_zero)
print('count_eq_zero: ', count_eq_zero)
# plot the histogram
fig = plt.figure()
ax=plt.subplot(111)
plt.plot(bins, counts)
ax.set_xticks(np.arange(-10,10,0.1), minor=True)
ax.set_xticks(np.arange(-10,10,1.0), minor=False)
# ax.set_yticks(np.arange(0, np.max(counts)), minor=True)
plt.grid(which='major', axis='both')
plt.grid(which='minor', axis='both')
plt.show()
# model.fit_generator(data_gen_train, validation_data=data_gen_valid, samples_per_epoch=10, epochs=10)
# //steering: -1 to 1
# // throttle 0 to 1
# // brake 0 1
# // speed 0 30
| python |
def print_title():
print('---------------------------')
print(' HELLO WORLD')
print('---------------------------')
print()
def main():
print_title()
name_input = input('What is your name? ')
print('Hello ' + name_input)
if __name__ == '__main__':
main() | python |
#!/usr/bin/python3
"""
Module for the function to_json_string(my_obj) that returns the JSON
representation of an object (string).
"""
import json
def to_json_string(my_obj):
"""
Function that returns the JSON representation of an object.
Args:
my_obj (str): Surce object
Returns:
JSON representation.
"""
return json.dumps(my_obj)
| python |
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from apps.evaluation.serializers.monthlyMeliaEvaluationSerliazer import MonthlyMeliaEvaluationSerliazer
from apps.hotel.models import Hotel
from backend.extraPermissions import IsFoodAndDrinkBoss
from apps.evaluation.models import MonthlyGastronomyEvaluation, MonthlyMeliaEvaluation
from apps.payTime.models import PayTime
from apps.workers.models import Worker
from backend.utils import insertion_sort
def getGastronomyEvaluationOnPayTime(pay_time: PayTime, worker: Worker):
if MonthlyGastronomyEvaluation.objects.filter(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno).exists():
model = MonthlyGastronomyEvaluation.objects.get(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno)
return model.id
return None
def getMeliaEvaluationOnPayTime(pay_time: PayTime, worker: Worker):
if MonthlyMeliaEvaluation.objects.filter(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno).exists():
model = MonthlyMeliaEvaluation.objects.get(payTime__id=pay_time.id,
evaluateWorker__no_interno=worker.no_interno)
return model.id
return None
@api_view(['POST'])
@permission_classes([IsAuthenticated, IsFoodAndDrinkBoss])
def getMonthlyPerformanceEvaluationReport(request):
data = request.data
try:
hotel = Hotel.objects.get(pk=int(data.get('hotelId')))
payTime = PayTime.objects.get(pk=int(data.get('payTimeId')))
listToOrder, listNone = [], []
for worker in hotel.workers.filter(activo=True):
evalId = getMeliaEvaluationOnPayTime(payTime, worker)
meliaEvaluation = None if evalId is None else MonthlyMeliaEvaluation.objects.get(pk=evalId)
serializer = None if evalId is None else MonthlyMeliaEvaluationSerliazer(meliaEvaluation, many=False).data
newItem = {
'worker': str(worker.nombreCompleto()).title(),
'meliaEvaluation': serializer,
'total': None if meliaEvaluation is None else meliaEvaluation.totalPoints(),
'discount': None if meliaEvaluation is None else meliaEvaluation.getDisscount(),
}
if newItem['meliaEvaluation'] is None:
listNone.append(newItem)
else:
listToOrder.append(newItem)
insertion_sort(listToOrder)
listToReturn = listToOrder + listNone
return Response(listToReturn, status=status.HTTP_200_OK)
except Exception as e:
return Response({"detail": e.args[0]}, status=status.HTTP_400_BAD_REQUEST)
| python |
from json import loads
from fastapi.testclient import TestClient
from os.path import abspath, dirname, join
from main import app
class TestTopicsCRUDAsync:
def test_bearer_token(self):
client = TestClient(app)
# Please create new user with the "credentials.json" info
with open(join(dirname(abspath(__file__)), 'data', 'credentials.json'),
mode='r', encoding='utf-8') as f:
example_user = loads(f.read())
data = {
'username': example_user['email'],
'password': example_user['password'],
'grant_type': '', 'scope': '', 'client_id': '', 'client_secret': ''
}
headers = {
'accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
response = client.post(f"/auth/token", data=data, headers=headers)
try:
assert response.status_code == 200
assert isinstance(response.json()['access_token'], str)
except (KeyError, AttributeError) as e:
raise ValueError("There is no user who have already registered with this email address.") from e
class TestTopicsErrorsAsync:
def test_create_user_fail(self):
client = TestClient(app)
data = '{\n "email": "[email protected]",\n "password": "string",\n "is_root": false\n}'
headers = {
'accept': 'application/json',
'Content-Type': 'application/json',
}
client.post(f"/auth/users/", data=data, headers=headers)
response = client.post(f"/auth/users/", data=data, headers=headers)
assert response.status_code == 400
def test_bearer_token_fail(self):
client = TestClient(app)
data = {
'username': 'test', 'password': 'test',
'grant_type': '', 'scope': '', 'client_id': '', 'client_secret': ''
}
headers = {
'accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
response = client.post(f"/auth/token", data=data, headers=headers)
assert response.status_code == 400
assert response.json()['detail'] == 'There is no user who have already registered with this email address.'
| python |
from RecSearch.DataWorkers.Abstract import DataWorkers
from RecSearch.ExperimentSupport.ExperimentData import ExperimentData
import pandas as pd
class Metrics(DataWorkers):
"""
Metric class adds metric data.
"""
# Configs inline with [[NAME]]
@classmethod
def set_config(cls):
additional_config = {'required': {'precedence': {'validate': 'integer(default=40)'}}}
cls.cfg = super().update_config(cls.cfg, additional_config)
def __init__(self, name: str, data_worker_config: dict, Data: ExperimentData):
self.class_name = self.get_classname()
super().__init__(self.class_name, name, data_worker_config, Data)
@classmethod
def get_classname(cls):
return cls.__name__
def get_metrics(self, column_name: str, whos: pd.DataFrame, parameters: dict) -> pd.DataFrame:
"""
Get neighborhood (list of ids) for every id in whos.index
:param column_name: output column name
:param whos: who(s) [with related data] to iterate to get metrics
:param parameters: additional parameters
:return: dataframe with column containing metric data for each who in who(s)
"""
df = pd.DataFrame()
for who in whos.itertuples():
data = self.Interface.iget_metric(who._asdict(), **parameters)
df = df.append(pd.Series(data=[v for v in data.values()],
index=['M__' + column_name + k for k in data.keys()], name=who[0]))
return df
def do_work(self):
return self.get_metrics(self.name, self.eval, self.parameters)
Metrics.set_config()
| python |
#!/usr/bin/env python
# coding: utf-8
import logging
import os
import glob
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from apiclient.http import MediaFileUpload
DIRECTORY = '/upload'
SCOPES = [
'https://www.googleapis.com/auth/documents',
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/drive.file'
]
PORT = int(os.environ.get('PORT', 0))
def get_credentials(port: int = 0):
"""Shows basic usage of the Docs API.
Prints the title of a sample document.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('/credentials/token.pickle'):
with open('/credentials/token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
if not os.path.exists('/credentials/credentials.json'):
raise FileNotFoundError(
'credentials.json does not exist. ' +
'Please follow README instruction ' +
'(and go to https://developers.google.com/docs/api/quickstart/python)'
)
flow = InstalledAppFlow.from_client_secrets_file('/credentials/credentials.json', SCOPES)
creds = flow.run_local_server(port=port)
# Save the credentials for the next run
with open('/credentials/token.pickle', 'wb') as token:
pickle.dump(creds, token)
return creds
def upload_images(files, logger):
drive = build('drive', 'v3', credentials=get_credentials(PORT))
uploaded_files = []
file_metadata = {'name': 'photo.png'}
batch = drive.new_batch_http_request()
user_permission = {
'type': 'anyone',
'role': 'reader',
}
logger.info('Uploading images')
for file in files:
logger.info('Uploading %s' % file)
media = MediaFileUpload(file, mimetype='image/png')
file = drive.files().create(body=file_metadata, media_body=media, fields='id').execute()
batch.add(
drive.permissions().create(
fileId=file.get('id'),
body=user_permission,
fields='id',
)
)
uploaded_files.append(file.get('id'))
logger.info('Allowing images access')
batch.execute()
return uploaded_files
def delete_uploaded_files(uploaded_files, logger):
drive = build('drive', 'v3', credentials=get_credentials(PORT))
logger.info('Deleting uploaded images')
for file_id in uploaded_files:
logger.info('Deleting %s' % file_id)
drive.files().delete(fileId=file_id).execute()
def create_document(title, files, logger):
docs = build('docs', 'v1', credentials=get_credentials(PORT))
uploaded_files = upload_images(files, logger)
doc = docs.documents().create(body={'title': title}).execute()
# raise ValueError(doc)
requests_list = [{
'updateDocumentStyle': {
'documentStyle': {
'marginTop': {
'magnitude': 0,
'unit': 'PT',
},
'marginBottom': {
'magnitude': 0,
'unit': 'PT',
},
'marginRight': {
'magnitude': 0,
'unit': 'PT',
},
'marginLeft': {
'magnitude': 0,
'unit': 'PT',
},
},
'fields': 'marginTop,marginBottom,marginRight,marginLeft',
},
}]
for file_id in uploaded_files:
requests_list.append({
'insertInlineImage': {
'location': {
'index': 1
},
'uri':
'https://docs.google.com/uc?id=' + file_id,
'objectSize': {
'height': {
'magnitude': 848,
'unit': 'PT'
},
'width': {
'magnitude': 595,
'unit': 'PT'
}
}
}
})
logger.info('Creating document')
docs.documents().batchUpdate(documentId=doc.get('documentId'), body={'requests': requests_list}).execute()
delete_uploaded_files(uploaded_files, logger)
if __name__ == "__main__":
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
files = [file for file in glob.glob(glob.escape(DIRECTORY) + '/**/*', recursive=True)]
for file_path in files:
logger.info("Converting %s" % file_path)
bashCommand = 'convert -quality 100 -density 150 ' + file_path + ' /app/tmp/%04d.png'
os.system(bashCommand)
files_images = sorted(
[file_image for file_image in glob.glob(glob.escape('/app/tmp') + '/**/*', recursive=True)],
reverse=True
)
create_document(title=os.path.basename(file_path), files=files_images, logger=logger)
logger.info("Removing %s" % file_path)
os.remove(file_path)
for file in files_images:
logger.info('Removing %s' % file)
os.remove(file)
logger.info("Done %s" % file_path)
| python |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Wu Tangsheng(lanbaba) <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os, threading, logging
import os.path
from Queue import *
import hashlib
from ossync.lib import helper
from ossync.lib import queue_model
class QueueThread(threading.Thread):
""" 此线程的作用是将bucket,root, path压入要上传的队列,队列元素格式:
"bucket::root::relpath::action::life"
其中action表示文件是新建还是修改还是删除;life表示重入次数
"""
def __init__(self, bucket, dirs, queue, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.bucket = bucket
self.queue = queue
self.dirs = dirs
self._terminate = False
self.logger = logging.getLogger('app')
dbpath = 'db/ossync.db'
self.qm = queue_model.QueueModel(dbpath)
def terminate(self):
self._terminate = True
def is_el_queued(self, hashcode):
row = self.qm.get(hashcode)
if row:
return True
return False
def run(self):
files = {}
for d in self.dirs:
files[d] = list(helper.walk_files(os.path.normpath(d), yield_folders = True))
if len(files) > 0:
self.qm.open()
self.logger.info('Queue path ...')
for i in files:
if len(files[i]) > 0:
for path in files[i]:
relpath = os.path.relpath(path, i) # 相对于root的相对路径
el = self.bucket + '::' + i+ '::' + relpath + '::C'
hashcode = helper.calc_el_md5(i, relpath, self.bucket)
if not self.is_el_queued(hashcode):
data={"root": i, "relpath": relpath, "bucket": self.bucket, "action": 'C', "status": 0, "retries" : 0}
self.qm.save(data)
'''queue el, el: element of queue , formated as "bucket::root::path"'''
try:
self.queue.put(el, block = True, timeout = 1)
msg = 'queue element:' + el
#print msg
self.logger.info(msg)
except Full as e:
self.queue.put(None)
self.logger.error(e.message)
self.qm.close()
self.queue.put(None)
#self.queue.join()
return
| python |
import os
import midinormalizer
from mido import MidiFile, MetaMessage
from MusicRoll import *
def iter_midis_in_path(folder_path):
for root, dirs, files in os.walk(folder_path):
for file in files:
if file.endswith(".mid") or file.endswith(".MID"):
yield (os.path.join(root, file), file)
def perform(path):
print("Processing '{0}'".format(path))
roll = MusicRoll(path, labels = [], tapes = [])
midi = MidiFile(path)
midinormalizer.MidiNormalizer(roll, midi).normalize(chop_loss_percent = 0.002) # 0.2 percent
roll.set_hash(midinormalizer.md5())
roll.dump(self_contained = False)
# from pycallgraph import PyCallGraph
# from pycallgraph.output import GraphvizOutput
if __name__ == "__main__":
# with PyCallGraph(output=GraphvizOutput()):
for path, file in iter_midis_in_path('.'):
roll_name = path[:-4] + '.mrl'
# no music roll file?
if not os.path.isfile(roll_name):
perform(path)
else:
# file is outdated?
old_roll = pickle.load(open(roll_name, 'rb'))
if not (hasattr(old_roll, 'md5') and old_roll.md5 == midinormalizer.md5()):
perform(path)
else:
print("Skipping '{0}'".format(file))
| python |
from ekstep_data_pipelines.audio_transcription.transcription_sanitizers import (
BaseTranscriptionSanitizer,
)
from ekstep_data_pipelines.common.utils import get_logger
LOGGER = get_logger("GujratiTranscriptionSanitizer")
class GujratiSanitizer(BaseTranscriptionSanitizer):
@staticmethod
def get_instance(**kwargs):
return GujratiSanitizer()
def __init__(self, *args, **kwargs):
pass
def sanitize(self, transcription):
pass
| python |
from nose.plugins.attrib import attr
from gilda import ground
from indra.sources import hypothesis
from indra.sources import trips
from indra.statements import *
from indra.sources.hypothesis.processor import HypothesisProcessor, \
parse_context_entry, parse_grounding_entry, get_text_refs
from indra.sources.hypothesis.annotator import statement_to_annotations, \
evidence_to_annotation, get_annotation_text
@attr('nonpublic', 'slow', 'notravis')
def test_process_indra_annnotations():
hp = hypothesis.process_annotations(reader=trips.process_text)
assert hp.statements
for stmt in hp.statements:
print(stmt)
print(stmt.evidence[0])
def test_grounding_annotation():
hp = HypothesisProcessor(annotations=[grounding_annot_example])
hp.extract_groundings()
assert hp.groundings['HCQ'] == {'CHEBI': 'CHEBI:5801'}
assert hp.groundings['Plaquenil'] == {'CHEBI': 'CHEBI:5801'}
@attr('slow')
def test_statement_annotation():
hp = HypothesisProcessor(annotations=[statement_annot_example],
reader=trips.process_text)
hp.extract_statements()
assert len(hp.statements) == 1
stmt = hp.statements[0]
assert stmt.subj.name == 'AMPK'
assert stmt.obj.name == 'STAT3'
context = stmt.evidence[0].context
assert context.location.name == 'nucleus', context
assert context.location.db_refs == {'GO': 'GO:0005634', 'TEXT': 'nucleus'}
assert context.organ.name == 'Liver', context
assert context.organ.db_refs == {'MESH': 'D008099', 'TEXT': 'liver'}
def test_get_text_refs_pmid():
url = 'https://www.ncbi.nlm.nih.gov/pubmed/32196952'
refs = get_text_refs(url)
assert refs.get('PMID') == '32196952', refs
assert refs.get('URL') == url, refs
def test_get_text_refs_pmcid():
url = 'https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7071777/'
refs = get_text_refs(url)
assert refs.get('PMCID') == 'PMC7071777', refs
assert refs.get('URL') == url, refs
def test_get_text_refs_biorxiv():
url = 'https://www.biorxiv.org/content/10.1101/2020.04.16.044016v1'
refs = get_text_refs(url)
assert refs.get('URL') == url, refs
assert refs.get('DOI') == '10.1101/2020.04.16.044016', refs
url = 'https://www.biorxiv.org/content/10.1101/2020.04.16.044016v1.full'
refs = get_text_refs(url)
assert refs.get('URL') == url, refs
assert refs.get('DOI') == '10.1101/2020.04.16.044016', refs
def test_parse_grounding_entry():
entry = '[a and b] -> CHEBI:CHEBI:1234|PUBCHEM:5678'
grounding = parse_grounding_entry(entry)
assert grounding == {'a and b': {'CHEBI': 'CHEBI:1234',
'PUBCHEM': '5678'}}, grounding
def test_parse_invalid_grounding_entry():
entries = ['xxx', '[xxx]->a', 'xxx -> a', 'xxx -> a:1&b:4']
for entry in entries:
assert parse_grounding_entry(entry) is None
def test_parse_context_entry():
context_dict = parse_context_entry('Cell type: antigen presenting cells',
ground, 'antigen presenting cells')
assert len(context_dict) == 1
assert 'cell_type' in context_dict
ref_context = context_dict['cell_type']
assert ref_context.name == 'Antigen-Presenting Cells', ref_context
assert ref_context.db_refs.get('MESH') == 'D000938'
assert ref_context.db_refs.get('TEXT') == 'antigen presenting cells'
def test_parse_invalid_context_entry():
entries = ['xxx: yyy', 'Disease:something', 'xxx']
for entry in entries:
assert parse_context_entry(entry, ground) is None
def test_parse_ungrounded_context_entry():
entry = 'Cell type: CD4+ T-cells'
context_dict = parse_context_entry(entry, ground)
assert len(context_dict['cell_type'].db_refs) == 1, \
context_dict['cell_type'].db_refs
assert context_dict['cell_type'].db_refs['TEXT'] == \
'CD4+ T-cells', context_dict['cell_type'].db_refs
grounding_annot_example = {
'uri': 'https://en.wikipedia.org/wiki/Hydroxychloroquine',
'text': '[Plaquenil] -> CHEBI:CHEBI:5801\n\n[HCQ] -> CHEBI:CHEBI:5801',
'tags': ['gilda'],
'target': [{'source': 'https://en.wikipedia.org/wiki/Hydroxychloroquine'}],
'document': {'title': ['Hydroxychloroquine - Wikipedia']},
}
statement_annot_example = {
'id': '4nBYAmqwEeq1ujf13__Y-w',
'uri': 'https://www.ncbi.nlm.nih.gov/pubmed/32190173',
'text': 'AMPK activates STAT3\nOrgan: liver\nLocation: nucleus',
'tags': [],
}
def test_get_annotation_text():
# Test statement with multiple grounded agents
stmt = Inhibition(
Agent('vemurafenib', db_refs={'CHEBI': 'CHEBI:63637'}),
Agent('BRAF', db_refs={'HGNC': '1097'})
)
annot_text = get_annotation_text(stmt, annotate_agents=True)
assert annot_text == \
'[vemurafenib](https://identifiers.org/CHEBI:63637) inhibits ' \
'[BRAF](https://identifiers.org/hgnc:1097).', annot_text
annot_text = get_annotation_text(stmt, annotate_agents=False)
assert annot_text == 'Vemurafenib inhibits BRAF.', annot_text
# Test statement with ungrounded and None agents
stmt = Phosphorylation(None, Agent('X'))
annot_text = get_annotation_text(stmt, annotate_agents=True)
assert annot_text == 'X is phosphorylated.', annot_text
annot_text = get_annotation_text(stmt, annotate_agents=False)
assert annot_text == 'X is phosphorylated.', annot_text
def test_evidence_to_annot():
# No evidence text
ev = Evidence(source_api='reach')
assert evidence_to_annotation(ev) is None
# No text refs
ev = Evidence(source_api='reach', text='Some text')
assert evidence_to_annotation(ev) is None
# Various text refs
ev = Evidence(source_api='reach', text='Some text',
pmid='12345')
annot = evidence_to_annotation(ev)
assert annot == {'url': 'https://pubmed.ncbi.nlm.nih.gov/12345/',
'target_text': 'Some text',
'tags': ['reach']}, annot
ev = Evidence(source_api='reach', text='Some text',
pmid=None, text_refs={'PMCID': '12345'})
annot = evidence_to_annotation(ev)
assert annot['url'] == 'https://www.ncbi.nlm.nih.gov/pmc/articles/12345/'
ev = Evidence(source_api='reach', text='Some text',
pmid=None, text_refs={'URL': 'https://wikipedia.org'})
annot = evidence_to_annotation(ev)
assert annot['url'] == 'https://wikipedia.org'
def test_statement_to_annotations():
evs = [
# This will get filtered out
Evidence(source_api='reach'),
# This will get added as an annotation
Evidence(source_api='sparser', text='some text 1',
pmid='12345'),
]
stmt = Dephosphorylation(None, Agent('X'), evidence=evs)
annots = statement_to_annotations(stmt)
assert len(annots) == 1
assert annots[0]['target_text'] == 'some text 1'
| python |
from __future__ import absolute_import, print_function, unicode_literals
from xml.dom.minidom import parseString
from jinja2 import Template
from .forward_parameter import ForwardParametersAction
from .interface import Action
from .multi_action import MultiAction
_SYNC_DESCRIPTION_TEMPLATE = Template(""" <hudson.plugins.descriptionsetter.DescriptionSetterBuilder plugin="[email protected]">
<regexp></regexp>
<description>{{ description | escape }}</description>
</hudson.plugins.descriptionsetter.DescriptionSetterBuilder>""")
class MultiSyncAction(Action):
"""
A MultiSync action wraps many sync actions
in order to generate a coherent description
setting build step.
"""
def __init__(self, output_format, children):
self.multi = MultiAction(output_format, children)
self.children = children
self.output_format = output_format
def generate_parameters(self):
return self.multi.generate_parameters()
def generate_build_steps(self):
return self.description() + self.multi.generate_build_steps() + self.generate_parameter_forwarding_step()
def generate_post_build_steps(self):
return self.multi.generate_post_build_steps()
def description(self):
description_lines = ["<div>"]
child_descriptions = "{}".format("<br/>\n".join([child.description() for child in self.children]))
description_lines.append(child_descriptions)
description_lines.append("</div>")
return [_SYNC_DESCRIPTION_TEMPLATE.render(description="\n".join(description_lines))]
def generate_parameter_forwarding_step(self):
"""
This is a terrible hack to get around the fact that
we take structured data from the configuration and
immediately flatten it into XML strings in these
generators. A proper approach would keep the data
structured and, perhaps, do the conversion to XML
parameter definitions later on, so we did not have
to parse out from XML here. That challenges a basic
assumption of generators, we can revisit that in the
future if SJB is still around.
"""
parameter_names = []
for parameter in self.generate_parameters():
parameter_name = (
parseString(parameter).
getElementsByTagName("hudson.model.StringParameterDefinition")[0].
getElementsByTagName("name")[0].
childNodes[0].nodeValue
)
if parameter_name in parameter_names:
continue
parameter_names.append(parameter_name)
return ForwardParametersAction(parameter_names).generate_build_steps()
| python |
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
import attr
from string import Formatter
from ._core import Enum
class EmojiSize(Enum):
"""Used to specify the size of a sent emoji"""
LARGE = "369239383222810"
MEDIUM = "369239343222814"
SMALL = "369239263222822"
class MessageReaction(Enum):
"""Used to specify a message reaction"""
LOVE = "😍"
SMILE = "😆"
WOW = "😮"
SAD = "😢"
ANGRY = "😠"
YES = "👍"
NO = "👎"
@attr.s(cmp=False)
class Mention(object):
"""Represents a @mention"""
#: The thread ID the mention is pointing at
thread_id = attr.ib()
#: The character where the mention starts
offset = attr.ib(0)
#: The length of the mention
length = attr.ib(10)
@attr.s(cmp=False)
class Message(object):
"""Represents a Facebook message"""
#: The actual message
text = attr.ib(None)
#: A list of :class:`Mention` objects
mentions = attr.ib(factory=list, converter=lambda x: [] if x is None else x)
#: A :class:`EmojiSize`. Size of a sent emoji
emoji_size = attr.ib(None)
#: The message ID
uid = attr.ib(None, init=False)
#: ID of the sender
author = attr.ib(None, init=False)
#: Timestamp of when the message was sent
timestamp = attr.ib(None, init=False)
#: Whether the message is read
is_read = attr.ib(None, init=False)
#: A list of pepole IDs who read the message, works only with :func:`fbchat.Client.fetchThreadMessages`
read_by = attr.ib(factory=list, init=False)
#: A dict with user's IDs as keys, and their :class:`MessageReaction` as values
reactions = attr.ib(factory=dict, init=False)
#: A :class:`Sticker`
sticker = attr.ib(None)
#: A list of attachments
attachments = attr.ib(factory=list, converter=lambda x: [] if x is None else x)
#: A list of :class:`QuickReply`
quick_replies = attr.ib(factory=list, converter=lambda x: [] if x is None else x)
#: Whether the message is unsent (deleted for everyone)
unsent = attr.ib(False, init=False)
@classmethod
def formatMentions(cls, text, *args, **kwargs):
"""Like `str.format`, but takes tuples with a thread id and text instead.
Returns a `Message` object, with the formatted string and relevant mentions.
```
>>> Message.formatMentions("Hey {!r}! My name is {}", ("1234", "Peter"), ("4321", "Michael"))
<Message (None): "Hey 'Peter'! My name is Michael", mentions=[<Mention 1234: offset=4 length=7>, <Mention 4321: offset=24 length=7>] emoji_size=None attachments=[]>
>>> Message.formatMentions("Hey {p}! My name is {}", ("1234", "Michael"), p=("4321", "Peter"))
<Message (None): 'Hey Peter! My name is Michael', mentions=[<Mention 4321: offset=4 length=5>, <Mention 1234: offset=22 length=7>] emoji_size=None attachments=[]>
```
"""
result = ""
mentions = list()
offset = 0
f = Formatter()
field_names = [field_name[1] for field_name in f.parse(text)]
automatic = "" in field_names
i = 0
for (literal_text, field_name, format_spec, conversion) in f.parse(text):
offset += len(literal_text)
result += literal_text
if field_name is None:
continue
if field_name == "":
field_name = str(i)
i += 1
elif automatic and field_name.isdigit():
raise ValueError(
"cannot switch from automatic field numbering to manual field specification"
)
thread_id, name = f.get_field(field_name, args, kwargs)[0]
if format_spec:
name = f.format_field(name, format_spec)
if conversion:
name = f.convert_field(name, conversion)
result += name
mentions.append(
Mention(thread_id=thread_id, offset=offset, length=len(name))
)
offset += len(name)
message = cls(text=result, mentions=mentions)
return message
| python |
from distutils.core import setup
from setuptools import setup
setup(
name='pyflask',
version='1.0',
author='liuwill',
author_email='[email protected]',
url='http://www.liuwill.com',
install_requires=[
'flask>=0.12.1',
'Flask-SocketIO>=2.8.6',
'Flask-Cors>=3.0.2',
'Jinja2>=2.9.6'
],
packages=["chat"],
#packages=['']
#py_modules=['foo'],
scripts=["main.py"],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
| python |
import torch
import torch.nn as nn
class DEM(nn.Module):
def __init__(self, channel):
""" Detail Emphasis Module """
super(DEM, self).__init__()
self.conv1 = nn.Sequential(nn.ReflectionPad2d(1),
nn.Conv2d(channel, channel, kernel_size=3, stride=1, padding=0),
nn.BatchNorm2d(channel),
nn.ReLU(True))
self.global_path = nn.Sequential(nn.AdaptiveAvgPool2d(1),
nn.Conv2d(channel, channel, kernel_size=1, stride=1, padding=0),
nn.ReLU(True),
nn.Conv2d(channel, channel, kernel_size=1, stride=1, padding=0),
nn.Sigmoid())
def forward(self, x):
"""
inputs :
x : input feature maps(B X C X H X W)
returns :
out : recalibrated feature + input feature
attention: B X C X 1 X 1
"""
out = self.conv1(x)
attention = self.global_path(out)
return out + out * attention.expand_as(out)
| python |
# encoding: utf-8
"""
test.py
"""
import sys
def data_from_body(body):
if sys.version_info[0] < 3:
return ''.join(chr(_) for _ in body)
# python3
return bytes(body)
| python |
def spring_summer(): #봄, 여름 함수
global soils
global trees
dead_trees = [[[] for _ in range(N)] for _ in range(N)]
for i in range(N):
for j in range(N):
trees[i][j].sort()
for idx in range(len((trees[i][j]))):
if soils[i][j]>= trees[i][j][idx]:
soils[i][j] -= trees[i][j][idx]
trees[i][j][idx] += 1
else:
dead_trees[i][j].append(idx)
for idx in range(len(dead_trees[i][j])-1,-1,-1): #죽은 나무가 있을 경우 해당 칸 안만 바뀌기 때문에 칸별로 봄여름을 한번에 진행했다.
temp = trees[i][j][dead_trees[i][j][idx]]
del trees[i][j][dead_trees[i][j][idx]]
soils[i][j] += temp//2
return
delta = [(0,1),(0,-1),(1,0),(-1,0),(1,1),(1,-1),(-1,-1),(-1,1)]
def autumn(): #가을 함수
new_trees = [[[] for _ in range(N)] for _ in range(N)]
for i in range(N):
for j in range(N):
for tree in trees[i][j]:
if tree%5 == 0:
for dl in delta:
newi = i + dl[0]
newj = j + dl[1]
if -1<newi<N and -1<newj<N:
new_trees[newi][newj].append(1)
for i in range(N):
for j in range(N):
trees[i][j].extend(new_trees[i][j])
return
def winter(): #겨울함수
for i in range(N):
for j in range(N):
soils[i][j] +=fertilizer[i][j]
return
N, M, K = map(int, input().split())
fertilizer = [list(map(int, input().split())) for _ in range(N)]
soils = [[5 for _ in range(N)] for _ in range(N)]
trees = [[[] for _ in range(N)] for _ in range(N)]
for _ in range(M):
x, y, z = map(int, input().split())
trees[x-1][y-1].append(z)
for _ in range(K):
spring_summer()
autumn()
winter()
ans = 0
for i in range(N):
for j in range(N):
ans += len(trees[i][j])
print(ans) | python |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
# coding=utf-8
import sys
import os
import glob
import re
import numpy as np
import pandas as pd
import cv2
import tensorflow as tf
# Flask utils
from flask import Flask, redirect, url_for, request, render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
# Define a flask app
app = Flask(__name__)
print(os.getcwd())
labels = ['Middle','Old','Young']
# Keras
from keras.models import load_model
from keras.preprocessing import image
from keras.models import model_from_json
from keras.optimizers import SGD
MODEL_PATH = 'C:/Users/rohan/Desktop/Work/Age_detection_dataset/App/model/model.json'
MODEL_PATH2 = 'C:/Users/rohan/Desktop/Work/Age_detection_dataset/App/model/model.h5'
# opening and store file in a variable
json_file = open('model/model.json','r')
loaded_model_json = json_file.read()
json_file.close()
# use Keras model_from_json to make a loaded model
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights('model/model.h5')
print("Loaded Model from disk")
opt = SGD(lr=0.01)
loaded_model.compile(loss='categorical_crossentropy',optimizer=opt,metrics=['accuracy'])
loaded_model._make_predict_function()
def model_predict(img_path,loaded_model):
images=[]
img = cv2.imread(img_path)
img = cv2.resize(img , (64,64))
images.append(img)
images = np.array(images, dtype="float") / 255.0
pred=loaded_model.predict(images[0])
return pred
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
# Make prediction
preds = model_predict(file_path, loaded_model)
i=preds.argmax(axis=1)
vals=np.amax(preds,axis=1)
perc_vals = vals*100
perc_vals_rounded = perc_vals.round(2)
label_img = labels[i]
result = label_img+": "+str(perc_vals_rounded)
return result
return None
if __name__ == '__main__':
app.run(debug=True)
| python |
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
## PyTorch implementation of CDCK2, CDCK5, CDCK6, speaker classifier models
# CDCK2: base model from the paper 'Representation Learning with Contrastive Predictive Coding'
# CDCK5: CDCK2 with a different decoder
# CDCK6: CDCK2 with a shared encoder and double decoders
# SpkClassifier: a simple NN for speaker classification
class CDCK6(nn.Module):
''' CDCK2 with double decoder and a shared encoder '''
def __init__(self, timestep, batch_size, seq_len):
super(CDCK6, self).__init__()
self.batch_size = batch_size
self.seq_len = seq_len
self.timestep = timestep
self.encoder = nn.Sequential( # downsampling factor = 160
nn.Conv1d(1, 512, kernel_size=10, stride=5, padding=3, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=8, stride=4, padding=2, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True)
)
self.gru1 = nn.GRU(512, 128, num_layers=1, bidirectional=False, batch_first=True)
self.Wk1 = nn.ModuleList([nn.Linear(128, 512) for i in range(timestep)])
self.gru2 = nn.GRU(512, 128, num_layers=1, bidirectional=False, batch_first=True)
self.Wk2 = nn.ModuleList([nn.Linear(128, 512) for i in range(timestep)])
self.softmax = nn.Softmax()
self.lsoftmax = nn.LogSoftmax()
def _weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# initialize gru1 and gru2
for layer_p in self.gru1._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru1.__getattr__(p), mode='fan_out', nonlinearity='relu')
for layer_p in self.gru2._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru2.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(_weights_init)
def init_hidden1(self, batch_size): # initialize gru1
#return torch.zeros(1, batch_size, 128).cuda()
return torch.zeros(1, batch_size, 128)
def init_hidden2(self, batch_size): # initialize gru2
#return torch.zeros(1, batch_size, 128).cuda()
return torch.zeros(1, batch_size, 128)
def forward(self, x, x_reverse, hidden1, hidden2):
batch = x.size()[0]
nce = 0 # average over timestep and batch and gpus
t_samples = torch.randint(self.seq_len/160-self.timestep, size=(1,)).long() # randomly pick time stamps. ONLY DO THIS ONCE FOR BOTH GRU.
# first gru
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
encode_samples = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(1, self.timestep+1):
encode_samples[i-1] = z[:,t_samples+i,:].view(batch,512) # z_tk e.g. size 8*512
forward_seq = z[:,:t_samples+1,:] # e.g. size 8*100*512
output1, hidden1 = self.gru1(forward_seq, hidden1) # output size e.g. 8*100*256
c_t = output1[:,t_samples,:].view(batch, 128) # c_t e.g. size 8*256
pred = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(0, self.timestep):
linear = self.Wk1[i]
pred[i] = linear(c_t) # Wk*c_t e.g. size 8*512
for i in np.arange(0, self.timestep):
total = torch.mm(encode_samples[i], torch.transpose(pred[i],0,1)) # e.g. size 8*8
correct1 = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, batch))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
# second gru
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x_reverse)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
encode_samples = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(1, self.timestep+1):
encode_samples[i-1] = z[:,t_samples+i,:].view(batch,512) # z_tk e.g. size 8*512
forward_seq = z[:,:t_samples+1,:] # e.g. size 8*100*512
output2, hidden2 = self.gru2(forward_seq, hidden2) # output size e.g. 8*100*256
c_t = output2[:,t_samples,:].view(batch, 128) # c_t e.g. size 8*256
pred = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(0, self.timestep):
linear = self.Wk2[i]
pred[i] = linear(c_t) # Wk*c_t e.g. size 8*512
for i in np.arange(0, self.timestep):
total = torch.mm(encode_samples[i], torch.transpose(pred[i],0,1)) # e.g. size 8*8
correct2 = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, batch))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1.*batch*self.timestep
nce /= 2. # over two grus
accuracy = 1.*(correct1.item()+correct2.item())/(batch*2) # accuracy over batch and two grus
#print(torch.cat((output1, output2), dim=2).shape)
return accuracy, nce, hidden1, hidden2
def predict(self, x, x_reverse, hidden1, hidden2):
batch = x.size()[0]
# first gru
# input sequence is N*C*L, e.g. 8*1*20480
z1 = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z1 = z1.transpose(1,2)
output1, hidden1 = self.gru1(z1, hidden1) # output size e.g. 8*128*256
# second gru
z2 = self.encoder(x_reverse)
z2 = z2.transpose(1,2)
output2, hidden2 = self.gru2(z2, hidden2)
return torch.cat((output1, output2), dim=2) # size (64, seq_len, 256)
#return torch.cat((z1, z2), dim=2) # size (64, seq_len, 512*2)
class CDCK5(nn.Module):
''' CDCK2 with a different decoder '''
def __init__(self, timestep, batch_size, seq_len):
super(CDCK5, self).__init__()
self.batch_size = batch_size
self.seq_len = seq_len
self.timestep = timestep
self.encoder = nn.Sequential( # downsampling factor = 160
nn.Conv1d(1, 512, kernel_size=10, stride=5, padding=3, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=8, stride=4, padding=2, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True)
)
self.gru = nn.GRU(512, 40, num_layers=2, bidirectional=False, batch_first=True)
self.Wk = nn.ModuleList([nn.Linear(40, 512) for i in range(timestep)])
self.softmax = nn.Softmax()
self.lsoftmax = nn.LogSoftmax()
def _weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# initialize gru
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(_weights_init)
def init_hidden(self, batch_size):
#return torch.zeros(2*1, batch_size, 40).cuda()
return torch.zeros(2*1, batch_size, 40)
def forward(self, x, hidden):
batch = x.size()[0]
t_samples = torch.randint(self.seq_len/160-self.timestep, size=(1,)).long() # randomly pick time stamps
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
nce = 0 # average over timestep and batch
encode_samples = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(1, self.timestep+1):
encode_samples[i-1] = z[:,t_samples+i,:].view(batch,512) # z_tk e.g. size 8*512
forward_seq = z[:,:t_samples+1,:] # e.g. size 8*100*512
output, hidden = self.gru(forward_seq, hidden) # output size e.g. 8*100*40
c_t = output[:,t_samples,:].view(batch, 40) # c_t e.g. size 8*40
pred = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(0, self.timestep):
decoder = self.Wk[i]
pred[i] = decoder(c_t) # Wk*c_t e.g. size 8*512
for i in np.arange(0, self.timestep):
total = torch.mm(encode_samples[i], torch.transpose(pred[i],0,1)) # e.g. size 8*8
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, batch))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1.*batch*self.timestep
accuracy = 1.*correct.item()/batch
return accuracy, nce, hidden
def predict(self, x, hidden):
batch = x.size()[0]
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
output, hidden = self.gru(z, hidden) # output size e.g. 8*128*40
return output, hidden # return every frame
#return output[:,-1,:], hidden # only return the last frame per utt
class CDCK2(nn.Module):
def __init__(self, timestep, batch_size, seq_len):
super(CDCK2, self).__init__()
self.batch_size = batch_size
self.seq_len = seq_len
self.timestep = timestep
self.encoder = nn.Sequential( # downsampling factor = 160
nn.Conv1d(1, 512, kernel_size=10, stride=5, padding=3, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=8, stride=4, padding=2, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Conv1d(512, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True)
)
self.gru = nn.GRU(512, 256, num_layers=1, bidirectional=False, batch_first=True)
self.Wk = nn.ModuleList([nn.Linear(256, 512) for i in range(timestep)])
self.softmax = nn.Softmax()
self.lsoftmax = nn.LogSoftmax()
def _weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# initialize gru
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(_weights_init)
def init_hidden(self, batch_size, use_gpu=True):
if use_gpu: return torch.zeros(1, batch_size, 256).cuda()
else: return torch.zeros(1, batch_size, 256)
def forward(self, x, hidden):
batch = x.size()[0]
t_samples = torch.randint(self.seq_len/160-self.timestep, size=(1,)).long() # randomly pick time stamps
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
nce = 0 # average over timestep and batch
encode_samples = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(1, self.timestep+1):
encode_samples[i-1] = z[:,t_samples+i,:].view(batch,512) # z_tk e.g. size 8*512
forward_seq = z[:,:t_samples+1,:] # e.g. size 8*100*512
output, hidden = self.gru(forward_seq, hidden) # output size e.g. 8*100*256
c_t = output[:,t_samples,:].view(batch, 256) # c_t e.g. size 8*256
pred = torch.empty((self.timestep,batch,512)).float() # e.g. size 12*8*512
for i in np.arange(0, self.timestep):
linear = self.Wk[i]
pred[i] = linear(c_t) # Wk*c_t e.g. size 8*512
for i in np.arange(0, self.timestep):
total = torch.mm(encode_samples[i], torch.transpose(pred[i],0,1)) # e.g. size 8*8
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0), torch.arange(0, batch))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1.*batch*self.timestep
accuracy = 1.*correct.item()/batch
return accuracy, nce, hidden
def predict(self, x, hidden):
batch = x.size()[0]
# input sequence is N*C*L, e.g. 8*1*20480
z = self.encoder(x)
# encoded sequence is N*C*L, e.g. 8*512*128
# reshape to N*L*C for GRU, e.g. 8*128*512
z = z.transpose(1,2)
output, hidden = self.gru(z, hidden) # output size e.g. 8*128*256
return output, hidden # return every frame
#return output[:,-1,:], hidden # only return the last frame per utt
class SpkClassifier(nn.Module):
''' linear classifier '''
def __init__(self, spk_num):
super(SpkClassifier, self).__init__()
self.classifier = nn.Sequential(
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, spk_num)
#nn.Linear(256, spk_num)
)
def _weights_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.apply(_weights_init)
def forward(self, x):
x = self.classifier(x)
return F.log_softmax(x, dim=-1)
| python |
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer
from sqlalchemy.orm import sessionmaker
engine = create_engine("sqlite:///tmp.db")
Base = declarative_base()
class Signature(Base):
__tablename__ = "signature"
X = Column(Integer, primary_key=True)
Y = Column(Integer)
Z = Column(Integer)
class Signature2(Base):
__tablename__ = "signature2"
A = Column(Integer, primary_key=True)
B = Column(Integer)
C = Column(Integer)
Session = sessionmaker(bind=engine)
| python |
#!/usr/bin/python3
from lib.utility.SystemUtility import *
from lib.utility.SessionUtility import *
from lib.utility.DocumentUtility import *
from lib.utility.CustomJSONEncoder import *
| python |
import asyncio
import aiohttp
from asynctest import TestCase
from asynctest.mock import CoroutineMock
from asgard.backends.chronos.impl import ChronosScheduledJobsBackend
from asgard.clients.chronos import ChronosClient
from asgard.conf import settings
from asgard.http.client import http_client
from asgard.models.account import Account
from asgard.models.user import User
from itests.util import USER_WITH_MULTIPLE_ACCOUNTS_DICT, ACCOUNT_DEV_DICT
from tests.utils import with_json_fixture
class ChronosScheduledJobsBackendTest(TestCase):
async def setUp(self):
self.backend = ChronosScheduledJobsBackend()
async def test_get_job_by_id_job_not_found(self):
job_id = "job-not-found"
user = User(**USER_WITH_MULTIPLE_ACCOUNTS_DICT)
account = Account(**ACCOUNT_DEV_DICT)
job = await self.backend.get_job_by_id(job_id, user, account)
self.assertIsNone(job)
async def test_add_namespace_to_job_name(self):
self.backend.client = CoroutineMock(spec=ChronosClient)
self.backend.client.get_job_by_id.return_value = None
user = User(**USER_WITH_MULTIPLE_ACCOUNTS_DICT)
account = Account(**ACCOUNT_DEV_DICT)
job_id = "my-scheduled-job"
await self.backend.get_job_by_id(job_id, user, account)
self.backend.client.get_job_by_id.assert_awaited_with(
f"{account.namespace}-{job_id}"
)
@with_json_fixture("scheduled-jobs/chronos/infra-purge-logs-job.json")
async def test_get_job_by_id_job_exists(self, job_fixture):
job_fixture["name"] = "dev-scheduled-job"
async with http_client as client:
await client.post(
f"{settings.SCHEDULED_JOBS_SERVICE_ADDRESS}/v1/scheduler/iso8601",
json=job_fixture,
)
# Para dar tempo do chronos registra e responder no request log abaixo
await asyncio.sleep(1)
user = User(**USER_WITH_MULTIPLE_ACCOUNTS_DICT)
account = Account(**ACCOUNT_DEV_DICT)
job_id = "scheduled-job"
job = await self.backend.get_job_by_id(job_id, user, account)
self.assertEqual(job_id, job.id)
async def test_get_job_by_id_service_unavailable(self):
"""
Por enquanto deixamos o erro ser propagado.
"""
get_job_by_id_mock = CoroutineMock(
side_effect=aiohttp.ClientConnectionError()
)
self.backend.client = CoroutineMock(spec=ChronosClient)
self.backend.client.get_job_by_id = get_job_by_id_mock
user = User(**USER_WITH_MULTIPLE_ACCOUNTS_DICT)
account = Account(**ACCOUNT_DEV_DICT)
with self.assertRaises(aiohttp.ClientConnectionError):
await self.backend.get_job_by_id("job-id", user, account)
| python |
#!/usr/bin/env python
import logging
import os
import string
import sys
import yaml
from glob import iglob
import django
from foia_hub.models import Agency, Office, Stats, ReadingRoomUrls
django.setup()
logger = logging.getLogger(__name__)
def check_urls(agency_url, row, field):
# Because only some rows have websites, we only want to update if they do.
row_url = row.get(field, None)
# Check if the existing rec has a url & if it doesn't
# match, then we end up with two conflicting records.
# In this case, we need to reaccess website on agency.
if agency_url and (agency_url != row_url):
logger.warning('Two records with the same agency have two diff urls.')
logger.warning('1:%s | 2:%s' % (agency_url, row_url))
logger.warning('Website: %s, was not saved.' % (row_url))
return agency_url
else:
return row_url
def extract_tty_phone(service_center):
""" Extract a TTY phone number if one exists from the service_center
entry in the YAML. """
tty_phones = [p for p in service_center['phone'] if 'TTY' in p]
if len(tty_phones) > 0:
return tty_phones[0]
def extract_non_tty_phone(public_liaison):
""" Extract a non-TTY number if one exists, otherwise use the TTY number.
If there are multiple options, for now pick the first one. Return None if
no phone number """
if 'phone' in public_liaison:
non_tty = [p for p in public_liaison['phone'] if 'TTY' not in p]
if len(non_tty) > 0:
return non_tty[0]
elif len(public_liaison['phone']) > 0:
return public_liaison['phone'][0]
def contactable_fields(agency, office_dict):
"""Add the Contactable and USAddress fields to the agency based on values
in the office dictionary. This will be called for both parent and child
agencies/offices (as written in our current data set)"""
agency.phone = office_dict.get('phone')
agency.emails = office_dict.get('emails', [])
agency.fax = office_dict.get('fax')
agency.office_url = office_dict.get('website')
agency.request_form_url = office_dict.get('request_form')
service_center = office_dict.get(
'service_center', {'name': None, 'phone': ['']})
agency.TTY_phone = extract_tty_phone(service_center)
agency.person_name = service_center.get('name')
public_liaison = office_dict.get(
'public_liaison', {'name': None, 'phone': []})
agency.public_liaison_phone = extract_non_tty_phone(public_liaison)
agency.public_liaison_name = public_liaison.get('name')
address = office_dict.get('address', )
agency.zip_code = address.get('zip')
agency.state = address.get('state')
agency.city = address.get('city')
agency.street = address.get('street')
agency.address_lines = address.get('address_lines', [])
update_reading_rooms(agency, office_dict)
def add_request_time_statistics(data, agency, office=None):
"""Load stats data about agencies into the database."""
# Delete old stats before adding
Stats.objects.filter(agency=agency, office=office).delete()
if data.get('request_time_stats'):
latest_year = sorted(
data.get('request_time_stats').keys(), reverse=True)[0]
data = data['request_time_stats'].get(latest_year)
if data:
iterator = [('S', 'simple'), ('C', 'complex')]
for arg in iterator:
median = data.get("%s_median_days" % arg[1])
if median:
stat = Stats(
agency=agency,
office=office,
year=int(latest_year),
stat_type=arg[0])
if median == 'less than 1':
stat.median = 1
stat.less_than_one = True
else:
stat.median = median
stat.save()
def update_reading_rooms(contactable, data):
""" This ensures that the reading rooms indicated in `data` are added to
the contactable (agency, office). If the contactable already has reading
rooms, those are deleted first. """
# Delete all existing reading rooms, because we'll re-add them.
contactable.reading_room_urls.all().delete()
for link_text, url in data.get('reading_rooms', []):
rru = ReadingRoomUrls(
content_object=contactable, link_text=link_text, url=url)
rru.save()
def build_abbreviation(agency_name):
""" Given an agency name, guess at an abbrevation. """
abbreviation = ''
for ch in agency_name:
if ch in string.ascii_uppercase:
abbreviation += ch
return abbreviation
def load_agency_fields(agency, data):
""" Loads agency-specific values """
abbreviation = data.get('abbreviation')
if not abbreviation:
abbreviation = build_abbreviation(data.get('name'))
agency.abbreviation = abbreviation
agency.description = data.get('description')
agency.keywords = data.get('keywords')
agency.common_requests = data.get('common_requests', [])
agency.no_records_about = data.get('no_records_about', [])
def load_data(data):
"""
Loads data from each yaml file into the database.
"""
# Load the agency
name = data['name']
slug = Agency.slug_for(name)
a, created = Agency.objects.get_or_create(slug=slug, name=name)
# Load the agency-specific values
load_agency_fields(a, data)
# If the agency only has a single department the contactable fields
if len(data['departments']) == 1:
dept_rec = data['departments'][0]
contactable_fields(a, dept_rec)
a.save()
add_request_time_statistics(data, a)
# Load agency offices
if len(data['departments']) > 1:
for dept_rec in data['departments']:
# If top-level=True office is saved as agency
if dept_rec.get('top_level'):
sub_agency_name = dept_rec['name']
sub_agency_slug = Agency.slug_for(sub_agency_name)
sub_agency, created = Agency.objects.get_or_create(
slug=sub_agency_slug, name=sub_agency_name)
sub_agency.parent = a
load_agency_fields(sub_agency, dept_rec)
contactable_fields(sub_agency, dept_rec)
sub_agency.save()
add_request_time_statistics(dept_rec, sub_agency)
else:
# Just an office
office_name = dept_rec['name']
office_slug = Office.slug_for(office_name)
full_slug = slug + '--' + office_slug
o, created = Office.objects.get_or_create(
agency=a, slug=full_slug)
o.office_slug = office_slug
o.name = office_name
contactable_fields(o, dept_rec)
o.save()
add_request_time_statistics(dept_rec, a, o)
def process_yamls(folder):
"""
Loops through each agency yaml file and loads it into the database
"""
for item in iglob(os.path.join(folder, '*.yaml')):
data = yaml.load(open(item))
load_data(data)
if __name__ == "__main__":
'''
To run this:
python load_agency_contacts $LOCATION_OF_DATA
The data is currently is a folder of yaml that is in the main
foia repo. If you were running this locally, it might look something
like this:
python load_agency_contacts.py ~/Projects/code/foia/foia/contacts/data
# If you want to designate an alternate csv path, specify that as the
# next argument following the yaml dir otherwise
# the script will default
# to the following:
# ../../data/foia-contacts/full-foia-contacts/
'''
yaml_folder = sys.argv[1]
process_yamls(yaml_folder)
| python |
import logging
from re import search
from flask import Blueprint
from flask_restful import Api
from com_cheese_api.cmm.hom.home import Home
from com_cheese_api.usr.user.resource.user import User, Users
from com_cheese_api.usr.user.resource.login import Login
from com_cheese_api.usr.user.resource.signup import SignUp
from com_cheese_api.cop.itm.cheese.resource.cheese import Cheeses, Cheese, CheeseSearch
from com_cheese_api.cop.itm.cheese.model.cheese_dto import CheeseVo
from com_cheese_api.cop.ord.order.resource.order import Order, Orders
from com_cheese_api.cop.ord.order.resource.search import OrderSearch
from com_cheese_api.cop.ord.order.resource.best import GenderBest, AgeBest
from com_cheese_api.cop.rev.review.model.review_dto import ReviewVo
from com_cheese_api.cop.rev.review.resource.review import Review, Reviews
from com_cheese_api.cop.chat.chatbot.resource.chatbot import Chatbot
from com_cheese_api.cop.rec.recommend.resource.recommend import Recommend
home = Blueprint('home', __name__, url_prefix='/api')
# ================================= User =================================
user = Blueprint('user', __name__, url_prefix='/api/user')
users = Blueprint('users', __name__, url_prefix='/api/users')
login = Blueprint('login', __name__, url_prefix='api/login')
signup = Blueprint('signup', __name__, url_prefix='/api/signup')
# ================================= Cheese =================================
cheese = Blueprint('cheese', __name__, url_prefix='/api/cheese')
cheeses = Blueprint('cheeses', __name__, url_prefix='/api/cheeses')
cheese_search = Blueprint('cheese_search', __name__, url_prefix='/api/cheese/search')
# ================================= Order =================================
order = Blueprint('order', __name__, url_prefix='/api/order')
orders = Blueprint('orders', __name__, url_prefix='/api/orders')
order_search = Blueprint('order_search', __name__, url_prefix='/api/order/search')
gender_best = Blueprint('gender_best', __name__, url_prefix='/api/gender_best')
age_best = Blueprint('age_best', __name__, url_prefix='/api/age_best')
# ================================= Review =================================
review = Blueprint('review', __name__, url_prefix='/api/review')
# review_new = Blueprint('review_new', __name__, url_prefix='/api/review_new')
reviews = Blueprint('reviews', __name__, url_prefix='/api/reviews')
# ================================= Chatbot =================================
chatbot = Blueprint('chatbot', __name__, url_prefix='/api/chatbot/')
# ================================= Chatbot =================================
recommend = Blueprint('recommend', __name__, url_prefix='/api/recommend')
api = Api(home)
api = Api(user)
api = Api(users)
api = Api(login)
api = Api(signup)
# api = Api(cheese)
api = Api(cheeses)
api = Api(cheese_search)
api = Api(order)
api = Api(orders)
api = Api(order_search)
api = Api(gender_best)
api = Api(age_best)
api = Api(review)
# api = Api(review_new)
api = Api(reviews)
api = Api(chatbot)
api = Api(recommend)
####################################################################
def initialize_routes(api):
api.add_resource(Home, '/api')
# ================================= User =================================
api.add_resource(User, '/api/user', '/api/user/<user_id>')
api.add_resource(Users, '/api/users')
api.add_resource(Login, '/api/login')
api.add_resource(SignUp, '/api/signup')
# ================================= Cheese =================================
api.add_resource(Cheese, '/api/cheese', '/api/cheese/<cheese_id>')
api.add_resource(Cheeses, '/api/cheeses')
api.add_resource(CheeseSearch, '/api/cheese/search', '/api/cheese/search/<category>')
# ================================= Order =================================
api.add_resource(Order, '/api/order', '/api/order/<user_id>')
api.add_resource(OrderSearch, '/api/order/search/<order_no>')
api.add_resource(Orders, '/api/orders')
# api.add_resource(OrderBest, '/api/best')
api.add_resource(GenderBest, '/api/gender_best')
api.add_resource(AgeBest, '/api/age_best')
# ================================= Review =================================
api.add_resource(Review, '/api/review', '/api/review/<review_no>')
# api.add_resource(ReviewNew, '/api/review_new/')
api.add_resource(Reviews, '/api/reviews')
# ================================= Chatbot =================================
api.add_resource(Chatbot, '/api/chatbot')
# ================================= Chatbot =================================
api.add_resource(Recommend, '/api/recommend', '/api/recommend/<user_id>')
@home.errorhandler(500)
def home_api_error(e):
logging.exception('An error occurred during home request. %s' % str(e))
return 'An internal error occurred.', 500
@user.errorhandler(500)
def user_api_error(e):
logging.exception('An error occurred during user request. %s' % str(e))
return 'An internal error occurred.', 500
@user.errorhandler(500)
def login_api_error(e):
logging.exception('An error occurred during user request. %s' % str(e))
return 'An internal error occurred.', 500
@user.errorhandler(500)
def auth_api_error(e):
logging.exception('An error occurred during user request. %s' % str(e))
return 'An internal error occurred.', 500
@cheeses.errorhandler(500)
def cheese_api_error(e):
logging.exception('An error occurred during cheeses request. %s' % str(e))
return 'An internal error occurred.', 500
@order.errorhandler(500)
def order_api_error(e):
logging.exception('An error occurred during home request. %s' % str(e))
return 'An internal error occurred.', 500
@cheeses.errorhandler(500)
def review_api_error(e):
logging.exception('An error occurred during cheeses request. %s' % str(e))
return 'An internal error occurred.', 500
@chatbot.errorhandler(500)
def review_api_error(e):
logging.exception('An error occurred during cheeses request. %s' % str(e))
return 'An internal error occurred.', 500
@recommend.errorhandler(500)
def review_api_error(e):
logging.exception('An error occurred during cheeses request. %s' % str(e))
return 'An internal error occurred.', 500
# ==============================================================
# ==================== =====================
# ==================== TEST =====================
# ==================== =====================
# ==============================================================
# from com_cheese_api.home.api import HomeAPI
# from com_cheese_api.cheese.cheese_api import CheeseAPI
# from com_cheese_api.board.board_api import BoardAPI
# from com_cheese_api.suggest.suggest_api import SuggestAPI
# from com_cheese_api.admin.admin_api import AdminAPI
# from com_cheese_api.login.login_api import LoginAPI
# from com_cheese_api.login.sign_up_api import SignUpAPI
# def initialize_routes(api):
# api.add_resource(HomeAPI, '/api')
# api.add_resource(CheeseAPI, '/api/cheese')
# api.add_resource(BoardAPI, '/api/board')
# api.add_resource(SuggestAPI, '/api/suggest')
# api.add_resource(AdminAPI, '/api/admin')
# api.add_resource(LoginAPI, '/api/login')
# api.add_resource(SignUpAPI, '/api/sign_up')
| python |
from unicon.plugins.iosxe import IosXEServiceList, IosXESingleRpConnection
from .settings import IosXEIec3400Settings
from . import service_implementation as svc
from .statemachine import IosXEIec3400SingleRpStateMachine
class IosXEIec3400ServiceList(IosXEServiceList):
def __init__(self):
super().__init__()
self.reload = svc.Reload
class IosXEIec3400SingleRpConnection(IosXESingleRpConnection):
os = 'iosxe'
platform = 'iec3400'
chassis_type = 'single_rp'
state_machine_class = IosXEIec3400SingleRpStateMachine
subcommand_list = IosXEIec3400ServiceList
settings = IosXEIec3400Settings()
| python |
from typing import Any, Iterable, Optional, TypeVar
from reactivex import Observable, abc
from reactivex.disposable import (
CompositeDisposable,
Disposable,
SerialDisposable,
SingleAssignmentDisposable,
)
from reactivex.scheduler import CurrentThreadScheduler
_T = TypeVar("_T")
def catch_with_iterable_(sources: Iterable[Observable[_T]]) -> Observable[_T]:
"""Continues an observable sequence that is terminated by an
exception with the next observable sequence.
Examples:
>>> res = catch([xs, ys, zs])
>>> res = reactivex.catch(src for src in [xs, ys, zs])
Args:
sources: an Iterable of observables. Thus a generator is accepted.
Returns:
An observable sequence containing elements from consecutive
source sequences until a source sequence terminates
successfully.
"""
sources_ = iter(sources)
def subscribe(
observer: abc.ObserverBase[_T], scheduler_: Optional[abc.SchedulerBase] = None
) -> abc.DisposableBase:
_scheduler = scheduler_ or CurrentThreadScheduler.singleton()
subscription = SerialDisposable()
cancelable = SerialDisposable()
last_exception = None
is_disposed = False
def action(scheduler: abc.SchedulerBase, state: Any = None) -> None:
def on_error(exn: Exception) -> None:
nonlocal last_exception
last_exception = exn
cancelable.disposable = _scheduler.schedule(action)
if is_disposed:
return
try:
current = next(sources_)
except StopIteration:
if last_exception:
observer.on_error(last_exception)
else:
observer.on_completed()
except Exception as ex: # pylint: disable=broad-except
observer.on_error(ex)
else:
d = SingleAssignmentDisposable()
subscription.disposable = d
d.disposable = current.subscribe(
observer.on_next,
on_error,
observer.on_completed,
scheduler=scheduler_,
)
cancelable.disposable = _scheduler.schedule(action)
def dispose() -> None:
nonlocal is_disposed
is_disposed = True
return CompositeDisposable(subscription, cancelable, Disposable(dispose))
return Observable(subscribe)
__all__ = ["catch_with_iterable_"]
| python |
from __future__ import division
import sys, time, csv, h2o
import pandas as pd
import numpy as np
arg = sys.argv
print "Running script:", sys.argv[0]
arg = sys.argv[1:]
print "Arguments passed to script:", arg
load_data_fp = arg[0]
saving_meanImputed_fp = arg[1]
saving_modelImputed_fp = arg[2]
saving_means_fp = arg[3]
saving_models_fp = arg[4]
predictors = arg[5:]
# GWP_lag is treated as an int variable. It has no missings, so no need to impute it.
# But to keep this scripts code simple I impute anything with 'lag' in the var name.
to_impute = [var for var in predictors if 'lag' in var]
h2o.init(min_mem_size_GB=200, max_mem_size_GB = 225)
d = h2o.import_frame(path = load_data_fp)
#######################################################################
print "Making 'time_period' a factor..."
d['time_period'] = d['time_period'].asfactor()
assert d['time_period'].isfactor()
print d.levels(col='time_period')
d.describe()
def impute_data(method = "mean",
to_impute = to_impute,
predictors = predictors):
if method == "mean":
print "Mean imputing missing data for predictors:", to_impute
# find mean for each time period in data for each predictor, save them in a matrix with a col for the mean values of each predictor
# then on holdout use this table to fill in all missing values based on the time period (row) and the variable (col) of this matrix
#if using python module h2o-3.1.0.3131: grouped = data.group_by(["time_period"])
# gm = [grouped.mean(predictor, na="rm").get_frame() for predictor in to_impute]
gm = d["time_period"].unique()
print "Finding means..."
for predictor in to_impute:
gm = gm.cbind(d.group_by(["time_period"], {predictor:["mean", d.names().index(predictor), "rm"]}, order_by = 0))
gm.show()
print "Saving the imputation means to disk..."
h2o.download_csv(gm, filename = saving_means_fp)
# df_py = h2o.as_list(gm)
# Now that's stored for the holdout data, do this a faster way in java for the training data:
for predictor in to_impute:
d.impute(predictor, method='mean', by = ['time_period'], inplace = True)
print "Done imputing", predictor
print "Saving the final mean imputed data to disk..."
h2o.export_file(frame = d, path =saving_meanImputed_fp, force=True)
if method == "model":
# sequentially impute 'newdata', not 'data', so the order of the predictor variables in the loop does not matter
# otherwise, you would be using increasingly imputed data to make predictions as the loop progresses.
newdata = d
# With training data, build a model for each col and predict missing data, save the models, use them on the holdout data to predict all missing data.
for predictor in to_impute:
print "Building model for imputing " + predictor
print "Subsetting the data into missing values for predictor and no missing values for predictor"
na_ind = d[predictor].isna()
not_na_ind = na_ind != 1.0
to_train = d[not_na_ind]
to_predict = d[na_ind]
these_var = [var for var in predictors if var != predictor]
trained = h2o.gbm(x = to_train[these_var],
y = to_train[[predictor]],
ntrees=300,
max_depth=6,
learn_rate=0.2)
print "Saving the imputation tree model for " + predictor
h2o.save_model(trained, dir = saving_models_fp, name = "dl_imputation_model_" + predictor)
print "Imputing the missing " + predictor + " data by predicting with the model..."
predicted = trained.predict(to_predict[these_var])
tofillin = newdata[predictor]
assert len(predicted) == len(tofillin[na_ind])
tofillin[na_ind] = predicted # mutate the column in place
newdata[predictor] = tofillin
print "Saving the final model-imputed data to disk..."
h2o.export_file(frame = d, path =saving_modelImputed_fp, force=True)
def compare_frames(d1 = saving_meanImputed_fp,
d2 = saving_modelImputed_fp,
imputed = to_impute):
print "Comparing the resulting two matrices..."
# Load the saved frames back in
meanI = h2o.import_file(path = d1)
modelI = h2o.import_file(path = d2)
meanIquantiles = h2o.as_list(meanI[imputed].quantile(prob=[0.01,0.1,0.25,0.333,0.5,0.667,0.75,0.9,0.99]))
modelIquantiles = h2o.as_list(modelI[imputed].quantile(prob=[0.01,0.1,0.25,0.333,0.5,0.667,0.75,0.9,0.99]))
meanIcolmeans = [v.mean() for v in meanI[imputed]]
modelIcolmeans = [v.mean() for v in modelI[imputed]]
meanIcolmedians = [v.median() for v in meanI[imputed]]
modelIcolmedians = [v.median() for v in modelI[imputed]]
meanIcolmin = [v.min() for v in meanI[imputed]]
modelIcolmin = [v.min() for v in modelI[imputed]]
# TODO save all this in a csv file
impute_data("mean")
impute_data("model")
# compare_frames()
# Send email
email = False
if(email):
import smtplib
GMAIL_USERNAME = None
GMAIL_PW = None
RECIP = None
SMTP_NUM = None
session = smtplib.SMTP('smtp.gmail.com', SMTP_NUM)
session.ehlo()
session.starttls()
session.login(GMAIL_USERNAME, GMAIL_PW)
headers = "\r\n".join(["from: " + GMAIL_USERNAME,
"subject: " + "Finished running script: " + __file__,
"to: " + RECIP,
"mime-version: 1.0",
"content-type: text/html"])
content = headers + "\r\n\r\n" + "Done running the script.\n Sent from my Python code."
session.sendmail(GMAIL_USERNAME, RECIP, content)
| python |
import hashlib
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMessageBox
from Model.Register import Register
from Model.Values import Values
from Model.dataUtils import sqlconn
class Login_Window(QtWidgets.QMainWindow):
def __init__(self, gui, reg):
super(Login_Window, self).__init__()
self.setupUi(self)
self.retranslateUi(self)
self.gui = gui
self.reg = reg
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(386, 127)
MainWindow.setWindowIcon(QIcon(''
''
'logo.png'))
MainWindow.setStyleSheet("background-image:url(logo.jpg)")
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.lineEdit = QtWidgets.QLineEdit(self.centralWidget)
self.lineEdit.setGeometry(QtCore.QRect(250, 24, 100, 24))
self.lineEdit.setText("")
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralWidget)
self.lineEdit_2.setGeometry(QtCore.QRect(250, 54, 100, 24))
self.lineEdit_2.setText("")
self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEdit_2.setObjectName("lineEdit_2")
self.label = QtWidgets.QLabel(self.centralWidget)
self.label.setGeometry(QtCore.QRect(200, 24, 48, 24))
self.label.setTextFormat(QtCore.Qt.AutoText)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralWidget)
self.label_2.setGeometry(QtCore.QRect(200, 54, 48, 24))
self.label_2.setObjectName("label_2")
self.pushButton = QtWidgets.QPushButton(self.centralWidget)
self.pushButton.setGeometry(QtCore.QRect(190, 90, 75, 23))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralWidget)
self.pushButton_2.setGeometry(QtCore.QRect(290, 90, 75, 23))
self.pushButton_2.setObjectName("pushButton_2")
MainWindow.setCentralWidget(self.centralWidget)
self.pushButton.clicked.connect(self.word_get)
# self.pushButton_2.clicked.connect(MainWindow.close)
self.pushButton_2.clicked.connect(self.register)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "报刊订阅系统"))
self.lineEdit.setPlaceholderText(_translate("MainWindow", "请输入帐号"))
self.lineEdit_2.setPlaceholderText(_translate("MainWindow", "请输入密码"))
self.lineEdit_2.returnPressed.connect(self.word_get)
self.label.setText(_translate("MainWindow", "帐 号"))
self.label_2.setText(_translate("MainWindow", "密 码"))
self.pushButton.setText(_translate("MainWindow", "确定"))
self.pushButton_2.setText(_translate("MainWindow", "注册"))
def register(self):
self.hide()
self.reg.show()
def word_get(self):
connect, cursor = sqlconn()
login_user = self.lineEdit.text()
login_password = self.lineEdit_2.text()
passwd = hashlib.md5(login_password.encode('UTF-8')).hexdigest()
sql_root = "select * from root where usrname='" + login_user + "' and passwd='" + passwd + "'"
sql_user = "select * from user where usrname='" + login_user + "' and passwd='" + passwd + "'"
res_root = cursor.execute(sql_root)
res_user = cursor.execute(sql_user)
if res_root > 0:
Values.IsRootLogin = True
Values.CurrentUser = login_user
self.gui.show()
self.close()
elif res_user > 0:
Values.IsUserLogin = True
Values.CurrentUser = login_user
self.gui.show()
self.close()
else:
QMessageBox.warning(self,
"警告",
"用户名或密码错误!",
QMessageBox.Yes)
self.lineEdit.setFocus()
self.gui.refreshAll()
connect.close()
| python |
#!/bin/envrun
import z3
import circ as ci
print("z3------")
# XOR test case
# (A + B)* ~(AB)
x = z3.Bool('x')
y = z3.Bool('y')
expr = z3.And( # 'z'
z3.Or(x, y),
z3.Not(z3.And(x, y))
)
print(expr)
print("internal-------")
ix, iy = ci.In(), ci.In()
#ox = ci.Out()
xor = ci.Circuit.fromRAW(
ci.And(
ci.Or(ix, iy),
ci.Not(ci.And(ix, iy))))
print(xor)
print(xor.debug())
print(xor.data)
try:
for x in False,True:
for y in False,True:
out = xor.eval(ix= x, iy= y)
print(f"{x},\t{y}\t= {out}")
assert(out['ox'] == (x ^ y))
except ci.InputConflict as e:
print(e) | python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import io
import logging
import os.path as op
import re
import math
import random
import string
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
from fairseq.data import (
ConcatDataset,
Dictionary,
FairseqDataset,
ResamplingDataset,
data_utils as fairseq_data_utils,
)
from fairseq.data.audio.audio_utils import get_fbank, get_waveform
from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform
from fairseq.data.audio.speech_to_text_dataset import (
S2TDataConfig,
SpeechToTextDataset,
SpeechToTextDatasetCreator
)
from fairseq.data.audio.speech_to_text_dataset import (
get_features_or_waveform,
_collate_frames
)
logger = logging.getLogger(__name__)
class AudioDictDataset(SpeechToTextDataset):
LANG_TAG_TEMPLATE = "<lang:{}>"
def __init__(
self,
split: str,
is_train_split: bool,
data_cfg: S2TDataConfig,
audio_paths: List[str],
n_frames: List[int],
audio_dict,
align_time_min,
align_time_max,
total_time,
src_texts: Optional[List[str]] = None,
tgt_texts: Optional[List[str]] = None,
speakers: Optional[List[str]] = None,
src_langs: Optional[List[str]] = None,
tgt_langs: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
tgt_dict: Optional[Dictionary] = None,
pre_tokenizer=None,
bpe_tokenizer=None,
):
self.split, self.is_train_split = split, is_train_split
self.data_cfg = data_cfg
self.audio_paths, self.n_frames = audio_paths, n_frames
self.n_samples = len(audio_paths)
assert len(n_frames) == self.n_samples > 0
assert src_texts is None or len(src_texts) == self.n_samples
assert tgt_texts is None or len(tgt_texts) == self.n_samples
assert speakers is None or len(speakers) == self.n_samples
assert src_langs is None or len(src_langs) == self.n_samples
assert tgt_langs is None or len(tgt_langs) == self.n_samples
assert ids is None or len(ids) == self.n_samples
assert (tgt_dict is None and tgt_texts is None) or (
tgt_dict is not None and tgt_texts is not None
)
self.src_texts, self.tgt_texts = src_texts, tgt_texts
self.src_langs, self.tgt_langs = src_langs, tgt_langs
self.tgt_dict = tgt_dict
self.check_tgt_lang_tag()
self.ids = ids
self.shuffle = data_cfg.shuffle if is_train_split else False
self.feature_transforms = CompositeAudioFeatureTransform.from_config_dict(
self.data_cfg.get_feature_transforms(split, is_train_split)
)
# For aligned augmentation
self.align_time_min = align_time_min
self.align_time_max = align_time_max
self.audio_dict = audio_dict
self.audio_dict_size = len(self.audio_dict)
self.total_time = total_time
# Used in the +AuioDict part of ADA-LM/ADA-RT
self.max_samp_fbank = self.data_cfg.max_samp_fbank
if self.max_samp_fbank is not None:
assert isinstance(self.max_samp_fbank, int) and \
self.max_samp_fbank >= 1
self.num_samp_fbank = self.data_cfg.num_samp_fbank
# Used in aligned masking (target side only w/o audio dict)
self.max_mask_fbank = self.data_cfg.max_mask_fbank
self.num_mask_fbank = self.data_cfg.num_mask_fbank
# % of data in a mini-batch to be applied with sampleFbank
# prob: should be -1 when sample_fbank is not used
self.sampleFbank_prob = self.data_cfg.sampleFbank_prob
self.apply_alignAugment = self.data_cfg.apply_alignAugment
self.roberta = None
self.skip_roberta = self.data_cfg.skip_roberta
logger.info('Skip roberta: {}'.format(self.skip_roberta))
if self.apply_alignAugment:
if not self.skip_roberta:
from fairseq.models.roberta import RobertaModel
self.roberta = RobertaModel.from_pretrained(
self.data_cfg.path_roberta, checkpoint_file='model.pt'
)
if self.data_cfg.roberta_fp16:
self.roberta.half()
logger.info('Inference of roberta with dtype: {}'.format(
(next(self.roberta.parameters())).dtype)
)
self.roberta.cuda()
self.roberta.eval()
else:
self.audio_dict_keys = list(self.audio_dict.keys())
self.alignAugment_prob = self.data_cfg.alignAugment_prob
self.alignMask = self.data_cfg.alignMask
self.skip_source = self.data_cfg.skip_source
self.percentMaskedTokens = self.data_cfg.percentMaskedTokens
self.thresholdMaskedTokens = self.data_cfg.thresholdMaskedTokens
if self.alignAugment_prob > 0 and self.alignAugment_prob <= 1:
assert self.thresholdMaskedTokens >= 1
self.random_time_mask_N = self.data_cfg.random_time_mask_N
self.random_time_mask_T = self.data_cfg.random_time_mask_T
self.random_time_mask_p = self.data_cfg.random_time_mask_p
self.random_time_mask_limited = self.data_cfg.random_time_mask_limited
if self.random_time_mask_N is not None \
and self.random_time_mask_T is not None:
self.time_mask_max = self.random_time_mask_N * \
self.random_time_mask_T
self.random_freq_mask_N = self.data_cfg.random_freq_mask_N
self.random_freq_mask_F = self.data_cfg.random_freq_mask_F
self.random_mask_value = self.data_cfg.random_mask_value #specaugment after ADA
self.align_mask_value = self.data_cfg.align_mask_value
self.pre_tokenizer = pre_tokenizer
self.bpe_tokenizer = bpe_tokenizer
logger.info(self.__repr__())
def __repr__(self):
return (
self.__class__.__name__
+ f'(split="{self.split}", n_samples={self.n_samples}, '
f"prepend_tgt_lang_tag={self.data_cfg.prepend_tgt_lang_tag}, "
f"roberta={self.roberta}, "
f"skip_roberta={self.skip_roberta}, "
f"alignAugment_prob={self.alignAugment_prob}, "
f"self.alignMask={self.alignMask}, "
f"self.skip_source={self.skip_source}, "
f"self.percentMaskedTokens={self.percentMaskedTokens}, "
f"self.thresholdMaskedTokens={self.thresholdMaskedTokens}, "
f"self.random_time_mask_N={self.random_time_mask_N}, "
f"self.random_time_mask_T={self.random_time_mask_T}, "
f"self.random_time_mask_p={self.random_time_mask_p}, "
f"self.random_time_mask_limited={self.random_time_mask_limited}, "
f"self.random_freq_mask_N={self.random_freq_mask_N}, "
f"self.random_freq_mask_F={self.random_freq_mask_F}, "
f"self.random_mask_value={self.random_mask_value}, "
f"self.align_mask_value={self.align_mask_value}, "
f"self.sampleFbank_prob={self.sampleFbank_prob}, "
f"self.max_samp_fbank={self.max_samp_fbank}, "
f"self.num_samp_fbank={self.num_samp_fbank}, "
f"shuffle={self.shuffle}, transforms={self.feature_transforms}, "
)
def _augment_target(self, orig_sentence):
'''
To augment the target side based on Roberta model or
random replacements from the keys of audio dictionary
Arguments:
orig_sentence (str): an input transcription
Return:
1. container (List[Tuple(position, word_from_roberta)])
2. updated (str):
The transcription with words prediced by roberta,
or sampled from the keys of audio dictionary
'''
container, collect_sent = [], []
updated = orig_sentence.split()
positions = random.sample(
range(len(updated)),
min(
max(1, int(len(updated)*self.percentMaskedTokens)),
self.thresholdMaskedTokens
)
)
positions.sort()
if not self.skip_roberta:
with torch.no_grad():
for pos in positions:
sent_list = orig_sentence.split()
sent_list[pos] = '<mask>'
collect_sent.append(' '.join(sent_list))
_info = self.roberta.batch_fill_mask(collect_sent, topk=2)
for pos, info in zip(positions, _info):
try:
item = info[1][-1].strip()
except:
item = info[0][-1].strip()
if item in string.punctuation:
continue
item = item.upper()
updated[pos] = item
container.append((pos, item))
else:
# ADA-RT
idx_tokens = random.sample(
range(self.audio_dict_size),
len(positions)
)
for pos, tok in zip(positions, idx_tokens):
updated[pos] = self.audio_dict_keys[tok]
container.append((pos, self.audio_dict_keys[tok]))
return container, ' '.join(updated), positions
def _sample_fbank(self,
spectrogram,
transcription,
time_min,
time_max,
scaling
):
'''
This is the data augmentation part by sampling from AudioDict.
Since passing the audio_dict to funct inside can be slow.
We do it here
'''
align_time_min = time_min.split('-')
align_time_max = time_max.split('-')
# Sample words for sampling fbanks
transp_list = transcription.split()
len_transp_list = len(transp_list)
if int(self.num_samp_fbank) >= 1:
_number_swapped = int(self.num_samp_fbank)
elif float(self.num_samp_fbank) >= 0. and float(self.num_samp_fbank) < 1.:
_number_swapped = math.floor(len_transp_list*self.num_samp_fbank)
else:
_number_swapped = len_transp_list
number_swapped = min(max(1, _number_swapped), int(self.max_samp_fbank))
positions = np.sort(
np.random.choice(range(0, len_transp_list),
size=number_swapped,
replace=False)
)
positions.sort()
collect_fbank_min_pos, collect_fbank_max_pos = [], []
collect_sampled_fbanks = []
for pos in positions:
if transp_list[pos] not in self.audio_dict.keys():
continue
if len(self.audio_dict[transp_list[pos]]) <= 3:
# Not enough varants for this word
continue
sampled_idx = np.random.choice(
range(len(self.audio_dict[transp_list[pos]])),
replace=False, size=1
)
word_sampled_fbank = self.audio_dict[
transp_list[pos]][sampled_idx[0]
]
sampled_fbank = np.concatenate(
[v for k, v in word_sampled_fbank.items() if k != '_id']
)
fbank_min_pos = int(float(align_time_min[pos]) * scaling)
fbank_max_pos = int(float(align_time_max[pos]) * scaling)
collect_fbank_min_pos.append(fbank_min_pos)
collect_fbank_max_pos.append(fbank_max_pos)
collect_sampled_fbanks.append(sampled_fbank)
if len(collect_fbank_max_pos) == 0:
assert len(collect_fbank_min_pos) == 0
# Words for positions sampled do not exist in AD
return spectrogram
# Update the fbank
collect_fbank_max_pos.insert(0, 0)
collect_fbank_min_pos.append(spectrogram.shape[0])
collect_pos = [(max_pos, min_pos) for max_pos, min_pos in
zip(collect_fbank_max_pos, collect_fbank_min_pos)]
collect_sampled_fbanks.append(np.array([])) # to maintain the same length
fbank_updated = []
for idx, ((max_idx, min_idx), fb) in enumerate(
zip(collect_pos, collect_sampled_fbanks)
):
remained_fbank = spectrogram[max_idx:(min_idx), :]
fbank_updated.append(remained_fbank)
if fb.shape[0] == 0:
# because of the "maintain the same length"
continue
else:
fbank_updated.append(fb)
fbank_updated = np.concatenate(fbank_updated)
return fbank_updated
def _ADAMask(self, spectrogram, frames_masked):
'''
SpecAugment for ADA with extension to control the amount of
random time maskings given the number of frames masked in
aligned time maskings
Note:
#mask_value: in previous version: 0 here but mean in SpecAugment
'''
distorted = spectrogram.copy()
num_frames = spectrogram.shape[0]
num_freqs = spectrogram.shape[1]
if self.random_mask_value is None:
mask_value = spectrogram.mean()
else:
mask_value = self.random_mask_value
for _i in range(self.random_freq_mask_N):
f = np.random.randint(0, self.random_freq_mask_F)
f0 = np.random.randint(0, num_freqs - f)
if f != 0:
distorted[:, f0: f0 + f] = mask_value
if self.random_time_mask_limited:
# Restrict the amount of random time masking given
# the amount of aligned time masking
remained = self.time_mask_max - frames_masked
if remained > 0:
max_time_mask_t = (remained // self.random_time_mask_N)
else:
max_time_mask_t = -1
else:
# Normal specaugment
max_time_mask_t = min(
self.random_time_mask_T,
math.floor(num_frames * self.random_time_mask_p)
)
if max_time_mask_t < 1:
return distorted
for _i in range(self.random_time_mask_N):
t = np.random.randint(0, max_time_mask_t)
t0 = np.random.randint(0, num_frames - t)
if t != 0:
distorted[t0 : t0 + t, :] = mask_value
return distorted
def _alignAugment(self, source, index, scaling, align_mask=False, skip_source=False):
'''
Not sure if it is better to pass copies of align_time_min/max and tgt_texts instead
Arguments:
source: fbanks in numpy format
index: index of data instance
scaling: conversion factor between raw audio time and fbank time steps
align_mask: Replace the corresponding fbanks with variable
align_mask_value
skip_source: No aligned masking or
audio dictionary is applied on source side.
It is used in target-only augmentation
Returns:
1. spectrograms (np array)
2. augmented transcriptions (str)
3. number of frames masked in ADA (int)
4. number of tokens replaced in transcriptions (int)
5. number of hits on audio dictionary (int)
'''
aug_info, aug_tp, positions = self._augment_target(self.tgt_texts[index])
align_time_min = self.align_time_min[index].split('-')
align_time_max = self.align_time_max[index].split('-')
frames_masked = 0
hit_audioDict = 0
assert len(aug_tp.split())==len(align_time_min)==len(align_time_max)
if skip_source:
## Only target side augmentation
return source, aug_tp, frames_masked, len(aug_info), 0
# Generate fbanks for augmented words
collect_fbank_min_pos, collect_fbank_max_pos = [], []
collect_sampled_fbanks = []
if self.align_mask_value is None:
align_mask_value = source.mean()
else:
align_mask_value = self.align_mask_value
for pos, word in aug_info:
fbank_min_pos = int(float(align_time_min[pos]) * scaling)
fbank_max_pos = int(float(align_time_max[pos]) * scaling)
if align_mask or word not in self.audio_dict.keys():
# Return masked spectrogram
frames_masked += (fbank_max_pos - fbank_min_pos + 1)
assert frames_masked >= 0
source[fbank_min_pos:(fbank_max_pos+1),:] = align_mask_value
else:
# sample fbanks from AD
hit_audioDict += 1
sampled_idx = np.random.choice(
range(len(self.audio_dict[word])),
replace=False, size=1
)
word_sampled_fbank = self.audio_dict[word][sampled_idx[0]]
sampled_fbank = np.concatenate(
[v for k, v in word_sampled_fbank.items() if k != '_id']
)
collect_fbank_min_pos.append(fbank_min_pos)
collect_fbank_max_pos.append(fbank_max_pos)
collect_sampled_fbanks.append(sampled_fbank)
if not collect_fbank_min_pos and not collect_fbank_max_pos:
# No augmented words exist in AD or no augmented target words
assert hit_audioDict == 0
return source, aug_tp, frames_masked, len(aug_info), hit_audioDict
# Update the fbank
assert len(collect_fbank_min_pos)==len(collect_fbank_max_pos)\
==len(collect_sampled_fbanks)
collect_fbank_max_pos.insert(0, 0)
collect_fbank_min_pos.append(source.shape[0])
collect_pos = [(max_pos, min_pos) for max_pos, min_pos in
zip(collect_fbank_max_pos, collect_fbank_min_pos)]
collect_sampled_fbanks.append(np.array([])) # to maintain the same length
fbank_updated = []
for idx, ((max_idx, min_idx), fb) in enumerate(
zip(collect_pos, collect_sampled_fbanks)
):
remained_fbank = source[max_idx:(min_idx), :]
fbank_updated.append(remained_fbank)
if fb.shape[0] == 0:
# because of the "maintain the same length"
continue
else:
fbank_updated.append(fb)
fbank_updated = np.concatenate(fbank_updated)
return fbank_updated, aug_tp, frames_masked, len(aug_info), hit_audioDict
def __getitem__(
self, index: int
) -> Tuple[int, torch.Tensor, Optional[torch.Tensor]]:
source = get_features_or_waveform(
self.audio_paths[index], need_waveform=self.data_cfg.use_audio_input
)
if self.feature_transforms is not None:
assert not self.data_cfg.use_audio_input
scaling = source.shape[0] / float(self.total_time[index])
transp_list = self.tgt_texts[index].split()
tgt_texts, align_time_min, align_time_max = None, None, None
if \
self.is_train_split and \
self.apply_alignAugment and \
torch.rand([1]).item() <= float(self.alignAugment_prob) \
:
source, tgt_texts, frames_masked, tokens_masked, hit = \
self._alignAugment(
source, index, scaling,
align_mask=self.alignMask,
skip_source=self.skip_source
)
source = self._ADAMask(source, frames_masked)
else:
if tgt_texts is None:
assert align_time_min is None
assert align_time_max is None
tgt_texts = self.tgt_texts[index]
align_time_min = self.align_time_min[index]
align_time_max = self.align_time_max[index]
if \
self.is_train_split and \
self.audio_dict is not None and \
torch.rand([1]).item() <= self.sampleFbank_prob \
:
## Allow the original fbanks to be used under certain prob
source = self._sample_fbank(
source,
tgt_texts,
align_time_min,
align_time_max,
scaling
)
# Call the standard SpecAugment
source = self.feature_transforms(source)
tokens_masked = hit = 0
source = torch.from_numpy(source).float()
target = None
if self.tgt_texts is not None:
#tokenized = self.tokenize_text(self.tgt_texts[index])
tokenized = self.tokenize_text(tgt_texts)
target = self.tgt_dict.encode_line(
tokenized, add_if_not_exist=False, append_eos=True
).long()
if self.data_cfg.prepend_tgt_lang_tag:
lang_tag = self.LANG_TAG_TEMPLATE.format(self.tgt_langs[index])
lang_tag_idx = self.tgt_dict.index(lang_tag)
target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0)
return index, source, target, tokens_masked, hit
def collater(self, samples: List[Tuple[int, torch.Tensor, torch.Tensor]]) -> Dict:
if len(samples) == 0:
return {}
indices = torch.tensor([i for i, _, _, _, _ in samples], dtype=torch.long)
frames = _collate_frames(
[s for _, s, _, _, _ in samples], self.data_cfg.use_audio_input
)
tokens_masked = torch.tensor([i for _, _, _, i, _ in samples])
hit = torch.tensor([i for _, _, _, _, i in samples])
ntokens_masked = torch.sum(tokens_masked)
nhit = torch.sum(hit)
n_frames = torch.tensor([s.size(0) for _, s, _, _, _ in samples], dtype=torch.long)
n_frames, order = n_frames.sort(descending=True)
indices = indices.index_select(0, order)
frames = frames.index_select(0, order)
target, target_lengths = None, None
prev_output_tokens = None
ntokens = None
if self.tgt_texts is not None:
target = fairseq_data_utils.collate_tokens(
[t for _, _, t, _, _ in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, order)
target_lengths = torch.tensor(
[t.size(0) for _, _, t, _, _ in samples], dtype=torch.long
).index_select(0, order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[t for _, _, t, _, _ in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, order)
ntokens = sum(t.size(0) for _, _, t, _, _ in samples)
out = {
"id": indices,
"net_input": {
"src_tokens": frames,
"src_lengths": n_frames,
"prev_output_tokens": prev_output_tokens,
},
"target": target,
"target_lengths": target_lengths,
"ntokens": ntokens,
"nsentences": len(samples),
"ntokens_masked": ntokens_masked,
"nhit": nhit
}
return out
class AudioDictDatasetCreator(SpeechToTextDatasetCreator):
# mandatory columns
KEY_ID, KEY_AUDIO, KEY_N_FRAMES = "id", "audio", "n_frames"
KEY_TGT_TEXT = "tgt_text"
# optional columns
KEY_SPEAKER, KEY_SRC_TEXT = "speaker", "src_text"
KEY_SRC_LANG, KEY_TGT_LANG = "src_lang", "tgt_lang"
# default values
DEFAULT_SPEAKER = DEFAULT_SRC_TEXT = DEFAULT_LANG = ""
# columns for alignment info.
KEY_TIME_MIN, KEY_TIME_MAX = "align_time_min", "align_time_max"
KEY_TOTAL_TIME = "total_time"
@classmethod
def _from_list(
cls,
split_name: str,
is_train_split,
samples: List[List[Dict]],
data_cfg: S2TDataConfig,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
audio_dict,
) -> AudioDictDataset:
audio_paths, n_frames, src_texts, tgt_texts, ids = [], [], [], [], []
speakers, src_langs, tgt_langs = [], [], []
align_time_min, align_time_max, total_time = [], [], []
for s in samples:
ids.extend([ss[cls.KEY_ID] for ss in s])
audio_paths.extend(
[op.join(data_cfg.audio_root, ss[cls.KEY_AUDIO]) for ss in s]
)
n_frames.extend([int(ss[cls.KEY_N_FRAMES]) for ss in s])
tgt_texts.extend([ss[cls.KEY_TGT_TEXT] for ss in s])
src_texts.extend(
[ss.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for ss in s]
)
speakers.extend([ss.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for ss in s])
src_langs.extend([ss.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for ss in s])
tgt_langs.extend([ss.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for ss in s])
align_time_min.extend([ss[cls.KEY_TIME_MIN] for ss in s])
align_time_max.extend([ss[cls.KEY_TIME_MAX] for ss in s])
total_time.extend([ss[cls.KEY_TOTAL_TIME] for ss in s])
return AudioDictDataset(
split_name,
is_train_split,
data_cfg,
audio_paths,
n_frames,
audio_dict,
align_time_min,
align_time_max,
total_time,
src_texts,
tgt_texts,
speakers,
src_langs,
tgt_langs,
ids,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
)
@classmethod
def from_tsv(
cls,
root: str,
data_cfg: S2TDataConfig,
splits: str,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split: bool,
epoch: int,
seed: int,
audio_dict
) -> AudioDictDataset:
samples = []
_splits = splits.split(",")
for split in _splits:
tsv_path = op.join(root, f"{split}.tsv")
if not op.isfile(tsv_path):
raise FileNotFoundError(f"Dataset not found: {tsv_path}")
with open(tsv_path) as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
samples.append([dict(e) for e in reader])
assert len(samples) > 0
datasets = [
cls._from_list(
name,
is_train_split,
[s],
data_cfg,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
audio_dict
)
for name, s in zip(_splits, samples)
]
if is_train_split and len(_splits) > 1 and data_cfg.sampling_alpha != 1.0:
# temperature-based sampling
size_ratios = cls._get_size_ratios(
_splits, [len(s) for s in samples], alpha=data_cfg.sampling_alpha
)
datasets = [
ResamplingDataset(
d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0)
)
for d, r in zip(datasets, size_ratios)
]
return ConcatDataset(datasets)
| python |
from pyrogram import filters
from pyrogram.types import Message
from megumin import megux, Config
from megumin.utils import get_collection
from megumin.utils.decorators import input_str
LOCK_TYPES = ["audio", "link", "video"]
@megux.on_message(filters.command("lock", Config.TRIGGER))
async def lock(c: megux, m: Message):
LOCK = get_collection(f"LOCK {m.chat.id}")
res = input_str(m)
await LOCK.insert_one({"lock": res})
| python |
import yaml
import collections
# Ordered loading of dictionary items in yaml files
# Taken from: SO link: /questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
def yaml_ordered_load(fp):
class OrderedLoader(yaml.Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return collections.OrderedDict(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(fp, OrderedLoader)
| python |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Unit tests for ly_test_tools._internal.pytest_plugin.terminal_report
"""
import os
import pytest
import unittest.mock as mock
import ly_test_tools._internal.pytest_plugin.terminal_report as terminal_report
pytestmark = pytest.mark.SUITE_smoke
class TestTerminalReport(object):
@mock.patch('ly_test_tools._internal.pytest_plugin.failed_test_rerun_command.build_rerun_commands')
def test_AddCommands_MockCommands_CommandsAdded(self, mock_build_commands):
mock_build_commands.side_effect = lambda path, nodes, dir: nodes
mock_reporter = mock.MagicMock()
header = 'This is a header'
test_path = 'Foo'
mock_node_ids = ['a', 'b']
terminal_report._add_commands(mock_reporter, header, test_path, mock_node_ids)
mock_reporter.write_line.assert_has_calls([
mock.call(header),
mock.call('a'),
mock.call('b')
])
@mock.patch('ly_test_tools._internal.pytest_plugin.failed_test_rerun_command.build_rerun_commands')
def test_AddCommands_NoCommands_ErrorWritten(self, mock_build_commands):
mock_reporter = mock.MagicMock()
header = 'This is a header'
test_path = 'Foo'
mock_node_ids = []
terminal_report._add_commands(mock_reporter, header, test_path, mock_node_ids)
calls = mock_reporter.write_line.mock_calls
mock_build_commands.assert_not_called()
assert calls[0] == mock.call(header)
assert 'Error' in calls[1][1][0]
@mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_commands')
def test_TerminalSummary_NoErrorsNoFailures_EmptyReport(self, mock_add_commands):
mock_report = mock.MagicMock()
mock_report.stats.get.return_value = []
mock_config = mock.MagicMock()
terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)
mock_add_commands.assert_not_called()
mock_report.config.getoption.assert_not_called()
mock_report.section.assert_not_called()
@mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_commands')
def test_TerminalSummary_ErrorsAndFailures_SectionsAdded(self, mock_add_commands):
mock_report = mock.MagicMock()
mock_node = mock.MagicMock()
mock_node.nodeid = 'something'
mock_report.stats.get.return_value = [mock_node, mock_node]
mock_config = mock.MagicMock()
terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)
assert len(mock_add_commands.mock_calls) == 2
mock_report.config.getoption.assert_called()
mock_report.section.assert_called_once()
@mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_commands', mock.MagicMock())
@mock.patch('os.path.basename')
def test_TerminalSummary_Failures_CallsWithBasename(self, mock_basename):
mock_report = mock.MagicMock()
mock_node = mock.MagicMock()
mock_base = 'something'
node_id = os.path.join('C:', mock_base)
mock_node.nodeid = node_id
mock_report.stats.get.side_effect = [[mock_node], []] # first item is failure list
mock_config = mock.MagicMock()
terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)
mock_basename.assert_called_with(node_id)
@mock.patch('ly_test_tools._internal.pytest_plugin.terminal_report._add_commands', mock.MagicMock())
@mock.patch('os.path.basename')
def test_TerminalSummary_Errors_CallsWithBasename(self, mock_basename):
mock_report = mock.MagicMock()
mock_node = mock.MagicMock()
mock_base = 'something'
node_id = os.path.join('C:', mock_base)
mock_node.nodeid = node_id
mock_report.stats.get.side_effect = [[], [mock_node]] # second item is error list
mock_config = mock.MagicMock()
terminal_report.pytest_terminal_summary(mock_report, 0, mock_config)
mock_basename.assert_called_with(node_id)
| python |
import platform
from datetime import datetime
from typing import Optional
import discord
from discord.ext import commands
class Stats(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print(f"{self.__class__.__name__} Cog has been loaded\n-----")
@commands.command(name="emojiinfo", aliases=["ei"])
@commands.guild_only()
async def emoji_info(self, ctx, emoji: discord.Emoji = None):
if not emoji:
return await ctx.invoke(self.bot.get_command("help"), entity="emojiinfo")
try:
emoji = await emoji.guild.fetch_emoji(emoji.id)
except discord.NotFound:
return await ctx.send("I could not find this emoji in the given guild.")
is_managed = "Yes" if emoji.managed else "No"
is_animated = "Yes" if emoji.animated else "No"
requires_colons = "Yes" if emoji.require_colons else "No"
creation_time = emoji.created_at.strftime("%I:%M %p %B %d, %Y")
can_use_emoji = (
"Everyone"
if not emoji.roles
else " ".join(role.name for role in emoji.roles)
)
description = f"""
**General:**
**- Name:** {emoji.name}
**- Id:** {emoji.id}
**- URL:** [Link To Emoji]({emoji.url})
**- Author:** {emoji.user.mention}
**- Time Created:** {creation_time}
**- Usable by:** {can_use_emoji}
**Other:**
**- Animated:** {is_animated}
**- Managed:** {is_managed}
**- Requires Colons:** {requires_colons}
**- Guild Name:** {emoji.guild.name}
**- Guild Id:** {emoji.guild.id}
"""
embed = discord.Embed(
title=f"**Emoji Information for:** `{emoji.name}`",
description=description,
colour=discord.Color.blurple(),
)
embed.set_thumbnail(url=emoji.url)
await ctx.send(embed=embed)
@commands.command(name="botinfo", aliases=["bi", "bot", "bot info"])
@commands.guild_only()
async def info_bot(self, message):
"""
This Command Provides us the info of the bot
"""
pythonVersion = platform.python_version()
dpyVersion = discord.__version__
serverCount = len(self.bot.guilds)
memberCount = len(set(self.bot.get_all_members()))
mem1 = self.bot.get_user(854230635425693756)
embed = discord.Embed(
title=f"{mem1.name} Stats ",
description=f"{self.bot.user.name} Bot is a MultiPrupose Bot Customised for FRNz COmmunity. Made By <@448740493468106753>",
colour=discord.Color.blurple(),
timestamp=datetime.utcnow(), )
embed.add_field(name="Bot Version:", value=self.bot.version)
embed.add_field(name="Python Version:", value=pythonVersion)
embed.add_field(name="Discord.Py Version", value=dpyVersion)
embed.add_field(name="Total Guilds:", value=serverCount)
embed.add_field(name="Total Users:", value=memberCount)
embed.add_field(name="Bot Made By:", value="<@448740493468106753>")
embed.set_footer(text=f"{message.guild.name} | {self.bot.user.name}")
embed.set_author(name=self.bot.user.name,
icon_url=self.bot.user.avatar.url)
embed.set_thumbnail(url=self.bot.user.avatar.url)
await message.channel.send(embed=embed)
@commands.command(name="userinfo", aliases=["ui", "memberinfo", "mi", "whois"])
@commands.guild_only()
async def info_user(self, ctx, member: Optional[discord.Member]):
"""
gets info of a user
"""
member1 = member or ctx.author
embed = discord.Embed(title="Member Information",
color=discord.Color.blurple(),
timestamp=datetime.utcnow())
embed.add_field(name="ID", value=f"{member1.id}", inline=False)
embed.add_field(
name="Name", value=f"{member1.name}#{member1.discriminator}")
embed.add_field(name="Top role", value=f"{member1.top_role.mention}")
embed.add_field(name="status",
value=f"{str(member1.activity.type).split('.') if member1.activity else 'N/A'} {member1.activity.name if member1.activity else ''}")
embed.add_field(
name="created at", value=f"{member1.created_at.strftime('%d/%m/%y %H:%M:%S')}")
embed.add_field(
name="Joined at", value=f"{member1.joined_at.strftime('%d/%m/%y %H:%M:%S')}")
embed.add_field(name="Boosted?", value=f"{member1.premium_since}")
await ctx.reply(embed=embed)
@commands.command(name="channelstats", aliases=["cs"])
@commands.guild_only()
async def channel_stats(self, ctx, channel: discord.TextChannel = None):
"""
This Command Provides us the stats of the channel
"""
channel = channel or ctx.channel
embed = discord.Embed(
title=f"Stats for **{channel.name}**",
description=f"{'Category: {}'.format(channel.category.name) if channel.category else 'This channel is not in a category'}",
color=discord.Color.blurple(),
)
embed.add_field(name="Channel Guild",
value=ctx.guild.name, inline=False)
embed.add_field(name="Channel Id", value=channel.id, inline=False)
embed.add_field(
name="Channel Topic",
value=f"{channel.topic if channel.topic else 'No topic.'}",
inline=False,
)
embed.add_field(name="Channel Position",
value=channel.position, inline=False)
embed.add_field(
name="Channel Slowmode Delay", value=channel.slowmode_delay, inline=False
)
embed.add_field(name="Channel is nsfw?",
value=channel.is_nsfw(), inline=False)
embed.add_field(name="Channel is news?",
value=channel.is_news(), inline=False)
embed.add_field(
name="Channel Creation Time", value=channel.created_at, inline=False
)
embed.add_field(
name="Channel Permissions Synced",
value=channel.permissions_synced,
inline=False,
)
embed.add_field(name="Channel Hash", value=hash(channel), inline=False)
await ctx.message.delete()
await ctx.send(embed=embed)
@commands.command(name="serverinfo", aliases=["guildinfo", "si", "gi"])
@commands.guild_only()
async def server_info(self, ctx):
embed = discord.Embed(title="Server information",
color=discord.Color.blurple(),
timestamp=datetime.utcnow())
embed.set_thumbnail(url=ctx.guild.icon.url)
statuses = [len(list(filter(lambda m: str(m.status) == "online", ctx.guild.members))),
len(list(filter(lambda m: str(m.status)
== "idle", ctx.guild.members))),
len(list(filter(lambda m: str(m.status) == "dnd", ctx.guild.members))),
len(list(filter(lambda m: str(m.status) == "offline", ctx.guild.members)))]
fields = [("Owner & owner id", f"{ctx.guild.owner}, {ctx.guild.owner.id}", False),
("Server ID", ctx.guild.id, True),
("Created at", ctx.guild.created_at.strftime(
"%d/%m/%Y %H:%M:%S"), True),
("Region", ctx.guild.region, True),
("Members", len(ctx.guild.members), True),
("Humans", len(list(filter(lambda m: not m.bot, ctx.guild.members))), True),
("Bots", len(list(filter(lambda m: m.bot, ctx.guild.members))), True),
("Banned members", len(await ctx.guild.bans()), True),
("Statuses",
f"🟢 {statuses[0]} 🟠 {statuses[1]} 🔴 {statuses[2]} ⚪ {statuses[3]}", True),
("Text channels", len(ctx.guild.text_channels), True),
("Voice channels", len(ctx.guild.voice_channels), True),
("Categories", len(ctx.guild.categories), True),
("Roles", len(ctx.guild.roles), True),
("Invites", len(await ctx.guild.invites()), True),
("\u200b", "\u200b", True)]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Stats(bot))
| python |
import urllib.request
import sys
import chardet
from html.parser import HTMLParser
from datetime import datetime
pikabuUrl = 'http://pikabu.ru/top50_comm.php'
startTag = 'profile_commented'
endTag = 'b-sidebar-sticky'
newsTag = 'a'
classTag = 'class'
headers = []
links = []
class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.readData = False
self.weAreIn = False
def handle_starttag(self, tag, attrs):
for attr in attrs:
if attr[0] == classTag:
if attr[1] == startTag:
self.weAreIn = True
if tag == newsTag and self.weAreIn == True:
links.append( attr[1])
self.readData = True
def handle_data(self, data):
if self.readData:
headers.append(data)
self.weAreIn = False
self.readData = False
def proceed():
request = urllib.request.urlopen(pikabuUrl)
content = request.read()
encoding = chardet.detect(content)['encoding']
print('Encoding Website: ' + str(encoding))
print('Encoding Console: ' + str(sys.stdout.encoding))
html = content.decode(encoding)
parser = MyHTMLParser()
parser.feed(html)
def write():
now = datetime.now();
separator = '-'
timestring = str(now.hour) + separator + str(now.minute) + separator + str(now.second) + separator + str(now.day) + separator +str(now.month) + separator + str(now.year)
filename = str("pikabu " + timestring + '.txt')
outputFile = open(filename, "a")
counter = 1
for header, link in zip(headers, links):
finalstr = str(str(counter) + '. ' + header + ' : ' + link)
outputFile.write(finalstr + "\n")
counter+=1
print(finalstr)
outputFile.close()
print ("Saved to: " + filename)
print ("Pikabu Top 50 Comments")
proceed()
write()
input("Press Enter To Exit") | python |
"""
Recipes available to data with tags ['F2', 'IMAGE', 'CAL', 'FLAT']
Default is "makeProcessedFlat".
"""
recipe_tags = {'F2', 'IMAGE', 'CAL', 'FLAT'}
# TODO: This recipe needs serious fixing to be made meaningful to the user.
def makeProcessedFlat(p):
"""
This recipe calls a selection primitive, since K-band F2 flats only have
lamp-off frames, and so need to be treated differently.
Parameters
----------
p : PrimitivesF2 object
A primitive set matching the recipe_tags.
"""
p.prepare()
p.addDQ()
p.addVAR(read_noise=True)
#p.nonlinearityCorrect()
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.addToList(purpose='forFlat')
p.getList(purpose='forFlat')
p.makeLampFlat()
p.normalizeFlat()
p.thresholdFlatfield()
p.storeProcessedFlat()
return
_default = makeProcessedFlat
| python |
"""
Notifications
--------------------------------------------
.. NOTE::
Coming soon 🛠
""" | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.