content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import unittest
from datetime import datetime
from pyopenrec.comment import Comment
class TestComment(unittest.TestCase):
c = Comment()
def test_get_comment(self):
dt = datetime(2021, 12, 21, 0, 0, 0)
data = self.c.get_comment("n9ze3m2w184", dt)
self.assertEqual(200, data["status"])
self.assertIsNotNone(data["url"])
self.assertIsNotNone(data["data"])
def test_get_recent_comment(self):
data = self.c.get_recent_comment("n9ze3m2w184")
self.assertEqual(200, data["status"])
self.assertIsNotNone(data["url"])
self.assertIsNotNone(data["data"])
def test_get_vod_comment(self):
data = self.c.get_vod_comment("e2zw69jmw8o")
self.assertEqual(200, data["status"])
self.assertIsNotNone(data["url"])
self.assertIsNotNone(data["data"])
if __name__ == "__main__":
unittest.main()
|
python
|
from numpy.testing import *
import time
import random
import skimage.graph.heap as heap
def test_heap():
_test_heap(100000, True)
_test_heap(100000, False)
def _test_heap(n, fast_update):
# generate random numbers with duplicates
random.seed(0)
a = [random.uniform(1.0, 100.0) for i in range(n // 2)]
a = a + a
t0 = time.clock()
# insert in heap with random removals
if fast_update:
h = heap.FastUpdateBinaryHeap(128, n)
else:
h = heap.BinaryHeap(128)
for i in range(len(a)):
h.push(a[i], i)
if a[i] < 25:
# double-push same ref sometimes to test fast update codepaths
h.push(2 * a[i], i)
if 25 < a[i] < 50:
# pop some to test random removal
h.pop()
# pop from heap
b = []
while True:
try:
b.append(h.pop()[0])
except IndexError:
break
t1 = time.clock()
# verify
for i in range(1, len(b)):
assert(b[i] >= b[i - 1])
return t1 - t0
if __name__ == "__main__":
run_module_suite()
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-25 23:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0014_auto_20181116_0716'),
]
operations = [
migrations.AlterField(
model_name='participant',
name='can_receive_invitations',
field=models.BooleanField(default=False, help_text='Check this box to opt-in and receive email invitations for upcoming experiments'),
),
]
|
python
|
import discord
from discord.ext import commands
import chickensmoothie as cs
class Pet:
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.guild_only()
async def pet(self, ctx, link: str = ''): # Pet command
pet = await cs.pet(link) # Get pet data
if pet is None:
embed = discord.Embed(title='Pet', description='An error has occurred while processing pet image.', colour=0xff5252) # Create embed
else:
embed = discord.Embed(title=pet['owner'] + '\'s Pet', colour=0x4ba139) # Create embed
embed.set_image(url=pet['image']) # Set image
initial = True
for key, value in pet.items():
if (key == 'owner' or key == 'pps') and initial:
if key == 'pps':
if not value:
continue
else:
embed.add_field(name='PPS', value='[This pet has "PPS". What\'s that?](http://www.chickensmoothie.com/help/pets#pps)', inline=False)
elif key == 'owner':
value = f'[{pet["owner"]}]({pet["owner_link"]})'
embed.add_field(name=key.capitalize(), value=value, inline=False)
else:
if key == 'image' or key == 'owner_link' or key == 'given_link':
pass
else:
if key == 'id':
key = 'Pet ID'
elif key == 'name':
if value == '':
continue
else:
key = 'Pet\'s name'
elif key == 'age':
key = 'Age'
value = f'{value} days'
elif key == 'given':
if value == '':
continue
else:
key = f'Given to {pet["owner"]} by'
value = f'[{pet["given"]}]({pet["given_link"]})'
else:
key = key.capitalize()
embed.add_field(name=key, value=value, inline=True)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Pet(bot))
|
python
|
# ----------------------------------------
# Created on 3rd Apr 2021
# By the Cached Coder
# ----------------------------------------
'''
This script defines the function required
to get a the email ids to send the mail to
from the GForms' responses.
Functions:
getAllResponses():
No Inputs
Returns emails, names and list of whether
they wish to recieve the mail or not
'''
# ----------------------------------------
import gspread
import json
# ----------------------------------------
# Function to open sheet and get all responses
def getAllResponses():
# Gets secrets
with open('secrets.json', 'r') as fh:
secrets = json.load(fh)
# Load spreadsheet
gc = gspread.service_account(filename='secrets.json')
sh = gc.open_by_key(secrets['key'])
# Get all entries
worksheet = sh.sheet1
emails = worksheet.col_values(2)[1:]
names = worksheet.col_values(3)[1:]
sendMail = worksheet.col_values(4)[1:]
# Turn sendMail from strings to bools
sendMail = [True if i == 'Yes' else False for i in sendMail]
# Return email and names
return emails, names, sendMail
if __name__ == '__main__':
emails, names, sendMail = getAllResponses()
print(emails)
print(names)
print(sendMail)
|
python
|
import numpy as np
from simulation_api import SimulationAPI
from simulation_control.dm_control.utility import EnvironmentParametrization
from simulation_control.dm_control.utility import SensorsReading
# Check if virtual_arm_environment API works with a given step input
sapi = SimulationAPI()
sapi.step(np.array([0, 0, 0, 0, 0], dtype='float64'))
print(sapi.get_sensors_reading().grip_velp)
print(sapi.export_parameters().object_translate)
# Check if virtual_arm_environment API accepts a manual input
t = {
'object_translate': 6.9,
'object_change_slope': 0.0,
'robot_change_finger_length': 0.0,
'robot_change_joint_stiffness': 0.0,
'robot_change_finger_spring_default': 0.0,
'robot_change_thumb_spring_default': 0.0,
'robot_change_friction': 0.0
}
ep = EnvironmentParametrization(t)
sapi.import_parameters(ep)
print(sapi.export_parameters().object_translate)
# Check if virtual_arm_environment API's run function works
x = np.zeros(shape=(10, 5))
def lmao(last_reward: float, step: int, last_step: bool, readings: SensorsReading) -> float:
return 0.5
sapi.specify_reward_function(lmao)
reward = sapi.run(x)
print(reward)
|
python
|
def pg(obs, num_particles=100, num_mcmc_iter=2000):
T = len(obs)
X = np.zeros([num_mcmc_iter, T])
params = [] # list of SV_params
# YOUR CODE
return X, params
|
python
|
# -*- coding: utf-8 -*-
import sys
import numpy as np
import scipy.io.wavfile
def main():
try:
if len(sys.argv) != 5:
raise ValueError("Invalid arguement count");
if sys.argv[1] == "towave":
toWave(sys.argv[2], sys.argv[3], float(sys.argv[4]))
elif sys.argv[1] == "totextwave":
toTextWave(sys.argv[2], sys.argv[3], float(sys.argv[4]))
else:
raise ValueError("Invalid first argument");
except Exception as ex:
printUsage()
print(ex)
def toWave(inputFilePath, outputFilePath, gain):
with open(inputFilePath, "r") as inputFile:
lines = [line.rstrip('\n') for line in inputFile]
size = int(lines[0]);
Fs = int(lines[1]);
data = np.zeros((size,), dtype=np.int16);
for i in range(size):
data[i] = int(float(lines[i + 2]) * gain)
scipy.io.wavfile.write(outputFilePath, Fs, data);
def toTextWave(inputFilePath, outputFilePath, gain):
Fs, data = scipy.io.wavfile.read(inputFilePath)
if data.shape != (data.size,):
raise ValueError("Many channel wave are not supported")
data = data * gain;
with open(outputFilePath, "w") as outputFile:
outputFile.write(str(data.size) + "\n")
outputFile.write(str(Fs) + "\n")
for i in range(data.size):
outputFile.write(str(data[i]) + "\n")
def printUsage():
print("Convert a wave file to a text wave file:")
print("\tpython textwav.py totextwave input_file_path output_file_path gain")
print("Convert a text wave file to a wave file:")
print("\tpython textwav.py towave input_file_path output_file_path gain\n\n")
if __name__ == "__main__":
main()
|
python
|
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#from PIL import Image
from six.moves import zip
from .utils import download_url, check_integrity
import os
from .vision import VisionDataset
class SBU(VisionDataset):
"""`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
Args:
root (string): Root directory of dataset where tarball
``SBUCaptionedPhotoDataset.tar.gz`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
url = "http://www.cs.virginia.edu/~vicente/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = '9aec147b3488753cf758b4d493422285'
def __init__(self, root, transform=None, target_transform=None, download=True):
super(SBU, self).__init__(root, transform=transform,
target_transform=target_transform)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# Read the caption for each photo
self.photos = []
self.captions = []
file1 = os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')
file2 = os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_captions.txt')
for line1, line2 in zip(open(file1), open(file2)):
url = line1.rstrip()
photo = os.path.basename(url)
filename = os.path.join(self.root, 'dataset', photo)
if os.path.exists(filename):
caption = line2.rstrip()
self.photos.append(photo)
self.captions.append(caption)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a caption for the photo.
"""
filename = os.path.join(self.root, 'dataset', self.photos[index])
img = Image.open(filename).convert('RGB')
if self.transform is not None:
img = self.transform(img)
target = self.captions[index]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
"""The number of photos in the dataset."""
return len(self.photos)
def _check_integrity(self):
"""Check the md5 checksum of the downloaded tarball."""
root = self.root
fpath = os.path.join(root, self.filename)
if not check_integrity(fpath, self.md5_checksum):
return False
return True
def download(self):
"""Download and extract the tarball, and download each individual photo."""
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.md5_checksum)
# Extract file
with tarfile.open(os.path.join(self.root, self.filename), 'r:gz') as tar:
tar.extractall(path=self.root)
# Download individual photos
with open(os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, 'dataset'))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass
|
python
|
# ===- test_floats.py ----------------------------------*- python -*-===//
#
# Copyright (C) 2021 GrammaTech, Inc.
#
# This code is licensed under the MIT license.
# See the LICENSE file in the project root for license terms.
#
# This project is sponsored by the Office of Naval Research, One Liberty
# Center, 875 N. Randolph Street, Arlington, VA 22203 under contract #
# N68335-17-C-0700. The content of the information does not necessarily
# reflect the position or policy of the Government and no official
# endorsement should be inferred.
#
# ===-----------------------------------------------------------------===//
import argparse
import gtirb
def create_floats(filename: str):
ir = gtirb.IR()
ir.aux_data["AFloat"] = gtirb.AuxData(0.5, "float")
ir.aux_data["ADouble"] = gtirb.AuxData(2.0, "double")
ir.save_protobuf(filename)
def check_for_floats(filename: str) -> bool:
ir = gtirb.IR.load_protobuf(filename)
f = ir.aux_data["AFloat"]
float_success = f.type_name == "float" and f.data == 0.5
g = ir.aux_data["ADouble"]
double_success = g.type_name == "double" and g.data == 2.0
return float_success and double_success
parser = argparse.ArgumentParser()
parser.add_argument("-w", required=False, type=str)
parser.add_argument("-r", required=False, type=str)
if __name__ == "__main__":
args = parser.parse_args()
if args.w:
create_floats(args.w)
elif args.r:
if check_for_floats(args.r):
exit(0)
else:
exit(1)
|
python
|
# Copyright Tom SF Haines
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from direct.showbase import DirectObject
from panda3d.core import *
class Profile(DirectObject.DirectObject):
"""Connects to pstats, if pstats is not running on the local computer it will set a copy running regardless."""
def __init__(self,manager,xml):
self.pstats = None
def go(self):
if (PStatClient.connect()==0):
# No pstat server - create it, then try and connect again...
self.pstats = subprocess.Popen(['pstats'])
# Need to give pstats some time to warm up - use a do latter task...
def tryAgain(task):
PStatClient.connect()
taskMgr.doMethodLater(0.5,tryAgain,'pstats again')
def reload(self,manager,xml):
pass
def destroy(self):
if self.pstats!=None:
self.pstats.kill()
|
python
|
import json
import numpy as np
import boto3
import scipy
import scipy.sparse
from io import BytesIO
import os
ACCESS_KEY = os.environ['ACCESS_KEY']
SECRET_ACCESS_KEY = os.environ['SECRET_ACCESS_KEY']
def getData():
BUCKET = 'personal-bucket-news-ranking'
client = boto3.client('s3',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_ACCESS_KEY
)
FILE_TO_READ = 'csr_articles.npz'
result = client.get_object(Bucket=BUCKET, Key=FILE_TO_READ)
word_articles = scipy.sparse.load_npz(BytesIO(result["Body"].read()))
FILE_TO_READ = 'word_emb.npy'
result = client.get_object(Bucket=BUCKET, Key=FILE_TO_READ)
word_emb = np.load(BytesIO(result["Body"].read()))
FILE_TO_READ = 'word_bias.npy'
result = client.get_object(Bucket=BUCKET, Key=FILE_TO_READ)
word_bias = np.load(BytesIO(result["Body"].read()))
FILE_TO_READ = 'reversed_word_ids.json'
result = client.get_object(Bucket=BUCKET, Key=FILE_TO_READ)
id_to_word = json.loads(result["Body"].read().decode())
FILE_TO_READ = 'mapped_dataset.json'
result = client.get_object(Bucket=BUCKET, Key=FILE_TO_READ)
real_data = json.loads(result["Body"].read().decode())
return word_articles, word_emb, word_bias, id_to_word, real_data
def lambda_handler(event, context):
print(ACCESS_KEY)
publication_emb = np.asarray([1.0440499, 1.0030843, 1.0340449, 0.992087, 1.0509816,
1.0315005, -1.0493797, -1.0198538, 0.9712321, -1.026394,
-0.9687971, 1.0592866, -1.0200703, -1.0423145, 0.9929519,
1.0220934, 1.021279, -1.0265925, 0.9601833, 0.9763889,
1.0109168, -0.9728226, 0.97199583, -1.0237931, -0.9996001,
0.9932069, 0.97966635, -0.98893607, -0.9876815, -0.98812914,
-0.9625895, 0.99879754, 0.9876508, -0.9581506, -0.95436096,
-0.9601925, -1.0134513, -0.98763955, 0.98665, -1.0140482,
1.004904, 0.9894275, -1.0044671, -0.9839679, -0.97082543,
-0.9798079, 0.9926766, -0.97317344, 0.9797, -0.97642475,
-0.99420726, -0.9972062, -1.0104703, 1.0575777, 0.9957696,
-1.0413874, -1.0056863, -1.0151271, -0.99969465, 0.97463423,
-0.98398715, -1.0211866, -1.0128828, -1.0024365, -0.9800189,
1.0457181, 1.0155835, -1.036794, -1.013707, -1.0498024,
-1.0252678, -1.0388161, -0.97501564, 0.97687274, 0.97906756,
1.0536852, 1.0590494, -0.96917725, 1.0247189, -0.9818878,
-1.0417286, -1.0204054, -1.0285249, -1.0329671, 0.9705739,
0.96375024, 0.9891868, 0.9892464, 1.039075, 1.0042666,
0.9786834, 1.0199072, 0.98080486, 0.9698635, -0.99322844,
-0.95841753, -0.99150276, 0.97394156, 0.9976019, -1.0375009],
dtype=np.float32)
publication_bias = 0.99557
publication_emb[1] = event['a']
publication_emb[5] = event['b']
publication_emb[17] = event['c']
publication_emb[34] = event['d']
publication_emb[67] = event['e']
print(publication_emb)
word_articles, word_emb, word_bias, id_to_word, real_data = getData()
print("Data loaded successfully!")
article_embeddings = word_articles.dot(word_emb)
emb_times_publication = np.dot(article_embeddings, publication_emb.reshape(100,1))
article_bias = word_articles.dot(word_bias)
product_with_bias = emb_times_publication + article_bias
word_counts = word_articles.sum(axis=1).reshape(word_articles.shape[0], 1)
final_logits = np.divide(product_with_bias, word_counts) + float(publication_bias)
indices = final_logits.argsort(axis=0)[-75:].reshape(75)
word_logits = np.dot(word_emb, publication_emb.reshape(100,1)) + word_bias
top_articles = word_articles[indices.tolist()[0]]
broadcasted_words_per_article = top_articles.toarray() * word_logits.T
sorted_word_indices = broadcasted_words_per_article.argsort(axis=1)
return_articles = []
i = 0
for idx in indices.tolist()[0]:
current_article = real_data[int(idx)]
current_article['logit'] = float(final_logits[int(idx)])
current_sorted_words = sorted_word_indices[i]
top_words = []
least_words = []
for top_word in current_sorted_words[-10:]:
word = id_to_word[str(top_word)]
top_words.append(word)
for least_word in current_sorted_words[:10]:
word = id_to_word[str(least_word)]
least_words.append(word)
current_article['top_words'] = top_words
current_article['least_words'] = least_words
return_articles.append(current_article)
i += 1
ordered_return_articles = return_articles[::-1]
response = {
"statusCode": 200,
"body": json.dumps(ordered_return_articles)
}
return response
if __name__ == "__main__":
test_event = {
'a': 5,
'b': 6,
'c': 100,
'd': 12,
'e': -123
}
print(lambda_handler(test_event, ''))
|
python
|
B = input().strip()
B1 = ''
for b in B:
if b in ['X', 'L', 'C']:
B1 += b
else:
break
if B1 == 'LX':
B1 = 'XL'
B2 = B[len(B1):]
if B2 == 'VI':
B2 = 'IV'
elif B2 == 'I' and B1.endswith('X'):
B1 = B1[:-1]
B2 = 'IX'
if B1 == 'LX':
B1 = 'XL'
print(B1+B2)
|
python
|
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from django.conf import settings
import pymongo
from . import permissions
@api_view(['GET'])
def root(request, **kwargs):
permitted_user_types = ['interviewer']
if permissions.check(request, permitted_user_types) != permissions.PASS:
return Response(
{'error': 'Permission denied'},
status.HTTP_403_FORBIDDEN
)
client = pymongo.MongoClient()
db = client[settings.DB_NAME]
token = request.GET.get('token')
cursor = db.users.find({'token': token})
room_cursor = db.rooms.find({'interviewer': cursor[0]['username']})
if room_cursor.count() == 0:
return Response(
{
'error': 'No room found.'
},
status.HTTP_400_BAD_REQUEST
)
room_id = room_cursor[0]['id']
return Response(
{'roomId': room_id},
status.HTTP_200_OK
)
|
python
|
import unittest
import sys
import inspect
from unittest.mock import Mock
from io import StringIO
from math import ceil
from damage import Damage
from classes import Paladin
from spells import PaladinSpell
from models.spells.loader import load_paladin_spells_for_level
class PaladinTests(unittest.TestCase):
def setUp(self):
self.name = "Netherblood"
self.level = 3
self.dummy = Paladin(name=self.name, level=self.level, health=100, mana=100, strength=10)
def test_init(self):
""" The __init__ should load/save all the spells for the Paladin"""
spells = [spell for level in range(1,self.level+1) for spell in load_paladin_spells_for_level(level)]
self.assertNotEqual(len(self.dummy.learned_spells), 0)
for spell in spells:
self.assertIn(spell.name, self.dummy.learned_spells)
char_spell = self.dummy.learned_spells[spell.name]
# find the largest rank in our spells list (the char has the highest rank only)
max_rank = list(sorted(filter(lambda x: x.name == spell.name, spells), key=lambda x: x.rank))[-1].rank
self.assertEqual(char_spell.rank, max_rank)
def test_leave_combat(self):
"""
Except the normal behaviour, leave_combat should remove the SOR buff from the pally
and reset his spell cds
"""
self.dummy._in_combat = True
self.dummy.SOR_ACTIVE = True
for spell in self.dummy.learned_spells.values():
spell._cooldown_counter = 100
self.assertTrue(self.dummy.is_in_combat())
self.dummy.leave_combat()
self.assertFalse(self.dummy.is_in_combat())
self.assertFalse(self.dummy.SOR_ACTIVE)
# All cooldowns should be reset
self.assertTrue(all([spell._cooldown_counter == 0 for spell in self.dummy.learned_spells.values()]))
def test_reset_spell_cooldowns(self):
""" The reset_spell_cooldowns goes through every spell and resets its CD"""
for spell in self.dummy.learned_spells.values():
spell._cooldown_counter = 100
self.assertTrue(all([spell._cooldown_counter != 0 for spell in self.dummy.learned_spells.values()]))
self.dummy.reset_spell_cooldowns()
self.assertTrue(all([spell._cooldown_counter == 0 for spell in self.dummy.learned_spells.values()]))
def test_level_up(self):
""" Except the normal behaviour, it should learn new spells for the character """
# empty the learned spells, it's stored as a static variable, which is not good practice but doesn't hurt in the game
Paladin.learned_spells = {}
pl = Paladin(name="fuck a nine to five")
spells_to_learn = [spell.name for spell in load_paladin_spells_for_level(pl.level + 1)]
for spell in spells_to_learn:
self.assertNotIn(spell, pl.learned_spells)
pl._level_up()
for spell in spells_to_learn:
self.assertIn(spell, pl.learned_spells)
def test_level_up_to_level(self):
""" Except the normal behaviour, it should learn new spells for the character """
# empty the learned spells, it's stored as a static variable, which is not good practice but doesn't hurt in the game
Paladin.learned_spells = {}
pl = Paladin(name="fuck a nine to five")
to_level = 4
spells_to_learn = [spell for level in range(2, to_level + 1) for spell in load_paladin_spells_for_level(level)]
for spell in spells_to_learn:
has_not_learned_spell = spell.name not in pl.learned_spells
has_smaller_rank = spell.rank > pl.learned_spells[spell.name].rank if not has_not_learned_spell else False
self.assertTrue(has_not_learned_spell or has_smaller_rank)
pl._level_up(to_level=to_level)
for spell in spells_to_learn:
self.assertIn(spell.name, pl.learned_spells)
def test_lookup_and_handle_new_spells(self):
""" Should look up the available spells for our level and learn them or update our existing ones"""
Paladin.learned_spells = {}
pl = Paladin(name="fuck a nine to five")
print(pl.learned_spells)
pl.level = 3
spells_to_learn = [spell for spell in load_paladin_spells_for_level(pl.level)]
for spell in spells_to_learn:
has_not_learned_spell = spell.name not in pl.learned_spells
has_smaller_rank = spell.rank > pl.learned_spells[spell.name].rank if not has_not_learned_spell else False
self.assertTrue(has_not_learned_spell or has_smaller_rank)
pl._lookup_and_handle_new_spells()
for spell in spells_to_learn:
self.assertIn(spell.name, pl.learned_spells)
def test_learn_new_spell(self):
""" Given a PaladinSpell, add it to the learned_spells dictionary"""
spell = PaladinSpell(name="Too_Alive", rank=5)
expected_message = f'You have learned a new spell - {spell.name}'
self.assertNotIn(spell.name, self.dummy.learned_spells)
try:
output = StringIO()
sys.stdout = output
self.dummy.learn_new_spell(spell)
self.assertIn(expected_message, output.getvalue())
finally:
sys.stdout = sys.__stdout__
self.assertIn(spell.name, self.dummy.learned_spells)
def test_lookup_available_spells_to_learn(self):
""" It's a generator function returning a spell that can be learnt for the level """
lev = 3
expected_spells = load_paladin_spells_for_level(lev)
generator = self.dummy._lookup_available_spells_to_learn(lev)
self.assertTrue(inspect.isgenerator(generator))
for spell in expected_spells:
self.assertEqual(vars(next(generator)), vars(spell))
def test_update_spell(self):
""" The update_spell() function updates a spell we already have learned"""
f_spell = PaladinSpell('Spell', rank=1)
s_spell = PaladinSpell('Spell', rank=2)
expected_message = f'Spell {f_spell.name} has been updated to rank {s_spell.rank}!'
self.dummy.learn_new_spell(f_spell)
try:
output = StringIO()
sys.stdout = output
self.dummy.update_spell(s_spell)
self.assertIn(expected_message, output.getvalue())
finally:
sys.stdout = sys.__stdout__
# assert that it updated the rank
self.assertEqual(self.dummy.learned_spells[s_spell.name].rank, s_spell.rank)
self.assertGreater(self.dummy.learned_spells[s_spell.name].rank, f_spell.rank)
def test_spell_handler_sor(self):
"""
The spell handler takes spell names and casts the appropriate function
It might work in a bad way since it's not too testable
"""
unsuccessful_message = 'Unsuccessful cast'
sor_success_msg = 'SOR_CASTED'
sor_command_name = 'sor'
# Mock the function that should get called
self.dummy.spell_seal_of_righteousness = lambda x: sor_success_msg
try:
output = StringIO()
sys.stdout = output
result = self.dummy.spell_handler(sor_command_name, None)
self.assertNotIn(unsuccessful_message, output.getvalue())
finally:
sys.stdout = sys.__stdout__
# Assert that it called the spell_seal_of_righteousness function
self.assertEqual(result, sor_success_msg)
def test_spell_handler_fol(self):
unsuccessful_message = 'Unsuccessful cast'
fol_success_msg = 'FOL_CASTED'
fol_command_name = 'fol'
# Mock the function that should get called
self.dummy.spell_flash_of_light = lambda x: fol_success_msg
try:
output = StringIO()
sys.stdout = output
result = self.dummy.spell_handler(fol_command_name, None)
self.assertNotIn(unsuccessful_message, output.getvalue())
finally:
sys.stdout = sys.__stdout__
# Assert that it called the spell_seal_of_righteousness function
self.assertEqual(result, fol_success_msg)
def test_spell_handler_ms(self):
unsuccessful_message = 'Unsuccessful cast'
ms_success_msg = 'MS_CASTED'
ms_command_name = 'ms'
# Mock the function that should get called
self.dummy.spell_melting_strike = lambda target=None, spell=None: ms_success_msg
try:
output = StringIO()
sys.stdout = output
result = self.dummy.spell_handler(ms_command_name, None)
self.assertNotIn(unsuccessful_message, output.getvalue())
finally:
sys.stdout = sys.__stdout__
# Assert that it called the spell_seal_of_righteousness function
self.assertEqual(result, ms_success_msg)
def test_spell_handler_invalid_spell(self):
unsuccessful_message = 'Unsuccessful cast'
invalid_command = 'WooHoo'
try:
output = StringIO()
sys.stdout = output
result = self.dummy.spell_handler(invalid_command, None)
self.assertIn(unsuccessful_message, output.getvalue())
finally:
sys.stdout = sys.__stdout__
self.assertFalse(result)
def test_spell_seal_of_righteousness(self):
sor: PaladinSpell = self.dummy.learned_spells[Paladin.KEY_SEAL_OF_RIGHTEOUSNESS]
expected_message = f'{self.dummy.name} activates {Paladin.KEY_SEAL_OF_RIGHTEOUSNESS}!'
expected_mana = self.dummy.mana - sor.mana_cost
self.assertFalse(self.dummy.SOR_ACTIVE)
self.assertEqual(self.dummy.SOR_TURNS, 0)
try:
output = StringIO()
sys.stdout = output
self.dummy.spell_seal_of_righteousness(sor)
self.assertIn(expected_message, output.getvalue())
finally:
sys.stdout = sys.__stdout__
self.assertTrue(self.dummy.SOR_ACTIVE)
self.assertEqual(self.dummy.SOR_TURNS, 3)
self.assertEqual(self.dummy.mana, expected_mana)
def test_spell_seal_of_righteousness_attack(self):
sor: PaladinSpell = self.dummy.learned_spells[Paladin.KEY_SEAL_OF_RIGHTEOUSNESS]
expected_damage = sor.damage1
self.dummy.spell_seal_of_righteousness(sor)
self.assertTrue(self.dummy.SOR_ACTIVE)
self.assertEqual(self.dummy.SOR_TURNS, 3)
result = self.dummy._spell_seal_of_righteousness_attack()
self.assertEqual(result, expected_damage)
self.assertEqual(self.dummy.SOR_TURNS, 2)
def test_spell_seal_of_righteousness_attack_fade(self):
sor: PaladinSpell = self.dummy.learned_spells[Paladin.KEY_SEAL_OF_RIGHTEOUSNESS]
expected_message = f'{Paladin.KEY_SEAL_OF_RIGHTEOUSNESS} has faded from {self.dummy.name}'
self.dummy.spell_seal_of_righteousness(sor)
self.assertTrue(self.dummy.SOR_ACTIVE)
self.dummy.SOR_TURNS = 1
self.dummy._spell_seal_of_righteousness_attack()
self.assertEqual(self.dummy.SOR_TURNS, 0)
self.assertTrue(self.dummy.SOR_ACTIVE)
# Should fade now and not do any damage on turn end
try:
output = StringIO()
sys.stdout = output
self.dummy.end_turn_update()
self.assertIn(expected_message, output.getvalue())
finally:
sys.stdout = sys.__stdout__
self.assertFalse(self.dummy.SOR_ACTIVE)
def test_spell_flash_of_light(self):
import heal
# Nullify the chance to double heal for consistent testing
heal.DOUBLE_HEAL_CHANCE = 0
fol: PaladinSpell = self.dummy.learned_spells[Paladin.KEY_FLASH_OF_LIGHT]
expected_message = f'{self.dummy.name} activates {Paladin.KEY_FLASH_OF_LIGHT}!'
expected_mana = self.dummy.mana - fol.mana_cost
orig_health = 1
self.dummy.health = orig_health
expected_message = f'{fol.name} healed {self.dummy.name} for {fol.heal1}.'
try:
output = StringIO()
sys.stdout = output
self.dummy.spell_flash_of_light(fol)
self.assertIn(expected_message, output.getvalue())
finally:
sys.stdout = sys.__stdout__
self.assertEqual(self.dummy.mana, expected_mana)
self.assertEqual(self.dummy.health, orig_health + fol.heal1)
def test_spell_flash_of_light_overheal(self):
import heal
# Nullify the chance to double heal for consistent testing
heal.DOUBLE_HEAL_CHANCE = 0
fol: PaladinSpell = self.dummy.learned_spells[Paladin.KEY_FLASH_OF_LIGHT]
expected_message = f'{fol.name} healed {self.dummy.name} for 0.00 ({fol.heal1:.2f} Overheal).'
expected_mana = self.dummy.mana - fol.mana_cost
orig_health = self.dummy.health
self.dummy.health = orig_health
try:
output = StringIO()
sys.stdout = output
self.dummy.spell_flash_of_light(fol)
self.assertIn(expected_message, output.getvalue())
finally:
sys.stdout = sys.__stdout__
self.assertEqual(self.dummy.mana, expected_mana)
self.assertEqual(self.dummy.health, orig_health) # should have only overhealed
def test_spell_melting_strike(self):
ms: PaladinSpell = self.dummy.learned_spells[Paladin.KEY_MELTING_STRIKE]
expected_mana = self.dummy.mana - ms.mana_cost
expected_message2 = 'Took attack'
expected_message3 = 'Took buff'
take_attack = lambda x, y: print('Took attack')
add_buff = lambda x: print('Took buff')
target = Mock(name="All",
take_attack=take_attack,
add_buff=add_buff)
expected_message = f'{ms.name} damages {target.name} for {ms.damage1:.2f} physical damage!'
try:
output = StringIO()
sys.stdout = output
result = self.dummy.spell_melting_strike(ms, target)
self.assertIn(expected_message, output.getvalue())
self.assertIn(expected_message2, output.getvalue())
self.assertIn(expected_message3, output.getvalue())
finally:
sys.stdout = sys.__stdout__
self.assertTrue(result)
self.assertEqual(expected_mana, self.dummy.mana)
def test_get_auto_attack_damage(self):
""" Applies damage reduction in regard to level and adds the sor_damage
It attaches the sor_damage to the magic_dmg in the Damage class and
returns the sor_dmg explicitly for easy printing"""
sor: PaladinSpell = self.dummy.learned_spells[Paladin.KEY_SEAL_OF_RIGHTEOUSNESS]
self.dummy.spell_seal_of_righteousness(sor)
received_dmg, sor_dmg = self.dummy.get_auto_attack_damage(self.dummy.level)
self.assertTrue(isinstance(received_dmg, Damage))
self.assertTrue(self.dummy.min_damage <= received_dmg.phys_dmg <= self.dummy.max_damage)
self.assertEqual(received_dmg.magic_dmg, sor.damage1)
self.assertEqual(sor_dmg, sor.damage1)
def test_get_auto_attack_damage_higher_level(self):
""" Applies damage reduction in regard to level and adds the sor_damage
It attaches the sor_damage to the magic_dmg in the Damage class and
returns the sor_dmg explicitly for easy printing"""
sor: PaladinSpell = self.dummy.learned_spells[Paladin.KEY_SEAL_OF_RIGHTEOUSNESS]
level_diff = 2
prc_mod = (level_diff * 0.1)
level = self.dummy.level + level_diff
expected_sor_dg = sor.damage1 - (sor.damage1 * prc_mod)
expected_min_dmg = int(self.dummy.min_damage) - (self.dummy.min_damage * prc_mod)
expected_max_dmg = int(self.dummy.max_damage) - (self.dummy.max_damage * prc_mod)
self.dummy.spell_seal_of_righteousness(sor)
received_dmg, sor_dmg = self.dummy.get_auto_attack_damage(level)
self.assertTrue(isinstance(received_dmg, Damage))
self.assertTrue(expected_min_dmg <= received_dmg.phys_dmg <= expected_max_dmg)
self.assertEqual(received_dmg.magic_dmg, expected_sor_dg)
self.assertEqual(sor_dmg, expected_sor_dg)
def test_attack(self):
expected_message2 = 'Took Attack!'
expected_message3 = 'Get_take_attack_damage_repr called!'
victim = Mock(level=self.dummy.level, take_attack=lambda x, y: print(expected_message2),
get_take_attack_damage_repr=lambda x,y: print(expected_message3))
expected_message = f'{self.dummy.name} attacks {victim.name}'
try:
output = StringIO()
sys.stdout = output
self.dummy.attack(victim)
self.assertIn(expected_message, output.getvalue())
self.assertIn(expected_message2, output.getvalue())
self.assertIn(expected_message3, output.getvalue())
finally:
sys.stdout = sys.__stdout__
def test_get_class(self):
""" get_class() returns the class name as a string in lowercase """
expected_result = 'paladin'
self.assertEqual(self.dummy.get_class(), expected_result)
if __name__ == '__main__':
unittest.main()
|
python
|
# -*- coding: UTF-8 -*-
import pika
if __name__ == '__main__':
connection = pika.BlockingConnection(pika.ConnectionParameters("localhost"))
channel = connection.channel()
channel.exchange_declare(exchange="tang",type="fanout")
message = "You are awsome!"
for i in range(0, 100): # 循环100次发送消息
channel.basic_publish(exchange="tang", routing_key='', body=message + " " + str(i),)
print "sending ", message
|
python
|
import torch.nn as nn
import config
from utils.manager import PathManager
class BaseModel(nn.Module):
def __init__(self,
model_params: config.ParamsConfig,
path_manager: PathManager,
loss_func,
data_source,
**kwargs):
super(BaseModel, self).__init__()
self.LossFunc = loss_func
self.ModelParams = model_params
self.TaskParams = None
self.ImageW = None
self.TaskType = ""
self.DataSource = data_source
self.FusedFeatureDim = None
self.Fusion = None # buildFusion(self, model_params)
self.SeqEmbedPipeline = []
self.ImgEmbedPipeline = []
def _seqEmbed(self, x, lens=None):
for worker in self.SeqEmbedPipeline:
x = worker(x, lens)
return x
def _imgEmbed(self, x):
for worker in self.ImgEmbedPipeline:
x = worker(x)
return x
def _extractEpisodeTaskStruct(self,
support_seqs, query_seqs,
support_imgs, query_imgs):
assert (support_seqs is None) ^ (query_seqs is not None), \
f"[extractEpisodeTaskStruct] 支持集和查询集的序列数据存在性不一致: support: {support_seqs is None}, query:{query_seqs is None}"
assert (support_imgs is None) ^ (query_imgs is not None), \
f"[extractEpisodeTaskStruct] 支持集和查询集的图像数据存在性不一致: support: {support_imgs is None}, query:{query_imgs is None}"
# TODO: 支持更多task的输入类型来提取任务结构参数
if support_seqs is not None:
k = support_seqs.size(1)
n = support_seqs.size(0)
elif support_imgs is not None:
k = support_imgs.size(1)
n = support_imgs.size(0)
else:
assert False, "[extractEpisodeTaskStruct] 序列和图像的支持集都为None,无法提取n,k"
if query_seqs is not None:
qk = query_seqs.size(0)
elif query_imgs is not None:
qk = query_imgs.size(0)
else:
assert False, "[extractEpisodeTaskStruct] 序列和图像的查询集都为None,无法提取qk"
# support img shape: [n, k, 1, w, w]
# query img shape: [qk, 1, w, w]
if support_imgs is not None:
w = support_imgs.size(3)
elif query_imgs is not None:
w = query_imgs.size(2)
else:
w = None
self.TaskParams = config.EpisodeTaskConfig(k, n, qk)
self.ImageW = w
# 3.20修改:不再对support提供按类的view,直接输出整个support的batch
def embed(self,
support_seqs, query_seqs,
support_lens, query_lens,
support_imgs, query_imgs):
self._extractEpisodeTaskStruct(support_seqs, query_seqs,
support_imgs, query_imgs)
k, n, qk, w = self.TaskParams.k, self.TaskParams.n, self.TaskParams.qk, self.ImageW
# 提取任务结构时,已经判断过支持集和查询集的数据一致性,此处做单侧判断即可
if support_seqs is not None:
support_seqs = support_seqs.view(n * k, -1)
support_seqs = self._seqEmbed(support_seqs, support_lens) # .view(n, k, -1) # embed中不再默认提供整形
query_seqs = self._seqEmbed(query_seqs, query_lens)
assert support_seqs.size(1) == query_seqs.size(1), \
"[BaseProtoModel.Embed] Support/query sequences' feature dimension size must match: (%d,%d)" \
% (support_seqs.size(1), query_seqs.size(1))
# 提取任务结构时,已经判断过支持集和查询集的数据一致性,此处做单侧判断即可
if support_imgs is not None:
support_imgs = support_imgs.view(n*k, 1, w, w) # 默认为单通道图片
support_imgs = self._imgEmbed(support_imgs) # .view(n, k, -1) # embed中不再默认提供整形
query_imgs = self._imgEmbed(query_imgs).squeeze()
assert support_imgs.size(1) == query_imgs.size(1), \
"[BaseProtoModel.Embed] Support/query images' feature dimension size must match: (%d,%d)"\
%(support_imgs.size(1),query_imgs.size(1))
return support_seqs, query_seqs, support_imgs, query_imgs
def forward(self, # forward接受所有可能用到的参数
support_seqs, support_imgs, support_lens, support_labels,
query_seqs, query_imgs, query_lens, query_labels,
loss_func,
**kwargs):
raise NotImplementedError
def name(self):
return "BaseModel"
def test(self, *args, **kwargs):
raise NotImplementedError
def _fuse(self, seq_features, img_features, **kwargs):
return self.Fusion(seq_features, img_features, **kwargs)
def train_state(self, mode=True):
self.TaskType = "Train"
super().train(mode)
def validate_state(self):
self.TaskType = "Validate"
super().eval()
def test_state(self):
self.TaskType = "Test"
super().eval()
|
python
|
import pandas as pd
from IPython.display import display_html, Image
import weasyprint as wsp
import matplotlib.pyplot as plt
import os
import math
experiment_pref = 'experiment-log-'
test_file_pref = 'test_file_'
csv_ext = '.csv'
txt_ext = '.txt'
def display_best_values(directory=None):
real_list = []
oracle_list = []
if directory is None:
directory = '/content/CIS-700/results/'
for filename in os.listdir(directory):
if filename.startswith(experiment_pref) and filename.endswith(csv_ext):
fn_split = filename.split(experiment_pref)[1].split(csv_ext)[0].split('-')
if(len(fn_split) == 2):
model = fn_split[0]
training = fn_split[1]
df = pd.read_csv(directory + filename)
best_val_metric_msg = model.capitalize() + '\n\t'
for col in df:
best_val = ''
if col == 'epochs' or col.startswith('Unnamed'):
continue
if col == 'EmbeddingSimilarity':
temp = df.iloc[df[col].argmax()]
best_val = str(round(temp[col], 4))
elif col != 'epochs':
temp = df.iloc[df[col].argmin()]
best_val = str(round(temp[col], 4))
epoch = str(round(temp['epochs']))
if(pd.notna(best_val)):
best_val_metric_msg+= col + ': ' + best_val + ' @epoch ' + epoch +'\t'
if training == 'real':
real_list.append(best_val_metric_msg + '\n')
else:
oracle_list.append(best_val_metric_msg + '\n')
print('********************************')
print('\tOracle Training:')
print('********************************')
print(*sorted(oracle_list), sep = "\n")
print('********************************')
print('\tReal Training:')
print('********************************')
print(*sorted(real_list), sep = "\n")
def display_synth_data(directory=None, rows=None):
container = ''
if directory is None:
directory = '/content/CIS-700/results/'
if rows is None:
rows = 5
else:
rows = int(rows)
real_synth_image_path = directory + "real_synth_data.png"
for filename in os.listdir(directory):
if filename.startswith(test_file_pref) and filename.endswith(txt_ext):
fn_split = filename.split(test_file_pref)[1].split(txt_ext)[0].split('_')
if len(fn_split) == 2:
model = fn_split[0]
training = fn_split[1]
df = pd.read_csv(directory + filename, sep="\n", header=None)
df.columns = [model.capitalize() + " " + training.capitalize() + " Synth Data"]
df_styler = df.head(rows).style.set_table_attributes("style='display:inline-block'")
if container != '':
container += '<hr style="width: 400px; margin-left:0;">'
container += df_styler._repr_html_()
if container != '':
file = open(directory + "real_synth_data.html", "w")
file.write(container)
file.close()
display_html(container, raw=True)
'''
#write html as image
html = wsp.HTML(string=container)
html.write_png(real_synth_image_path, optimize_images=False)
display(Image(filename=real_synth_image_path))
#resize image
from PIL import Image
real_synth_image_path = directory + "real_synth_data.png"
img = Image.open(real_synth_image_path)
resized_image = img.resize((1700,1700))
display(resized_image)
'''
def display_metrics(directory=None):
df_list = []
real_df_list = []
oracle_df_list = []
real_labels = []
oracle_labels = []
if directory is None:
directory = '/content/CIS-700/results/'
for filename in os.listdir(directory):
if filename.startswith(experiment_pref) and filename.endswith(csv_ext):
fn_split = filename.split(experiment_pref)[1].split(csv_ext)[0].split('-')
if(len(fn_split) == 2):
model = fn_split[0]
training = fn_split[1]
df = pd.read_csv(directory + filename)
if training == 'real':
df = df.rename(columns={"EmbeddingSimilarity": "EmbSim_" + model.capitalize(), "nll-test": "Nll-Test_" + model.capitalize()})
real_df_list.append(df.set_index('epochs'))
real_labels.append(model)
elif training == 'oracle':
df = df.rename(columns={"EmbeddingSimilarity": "EmbSim_" + model.capitalize(), "nll-test": "Nll-Test_" + model.capitalize(), "nll-oracle": "Nll-Oracle_" + model.capitalize()})
oracle_df_list.append(df.set_index('epochs'))
oracle_labels.append(model)
#TODO Add CFG Training Logic Here
real_results = pd.concat(real_df_list)
oracle_results = pd.concat(oracle_df_list)
# filter results to get separate lists for each metric type under each training
filter_col = [col for col in real_results if col.startswith('EmbSim_') ]
df_list.append(real_results[filter_col])
filter_col = [col for col in real_results if col.startswith('Nll-Test')]
df_list.append(real_results[filter_col])
filter_col = [col for col in oracle_results if col.startswith('EmbSim_')]
df_list.append(oracle_results[filter_col])
filter_col = [col for col in oracle_results if col.startswith('Nll-Test')]
df_list.append(oracle_results[filter_col])
filter_col = [col for col in oracle_results if col.startswith('Nll-Oracle')]
df_list.append(oracle_results[filter_col])
# define number of rows and columns for subplots
nrow = 3
ncol = math.ceil(len(df_list) / nrow)
# make a list of all dataframes
df_title_list = ['Real EmbeddingSimilarites', 'Real NLL-Test', 'Oracle EmbeddingSimilarites', 'Oracle NLL-Test', 'Oracle NLL-Oracle']
# plot counter
count = 0
#build plot
fig, axes = plt.subplots(nrow, ncol)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
for r in range(nrow):
for c in range(ncol):
if count < len(df_list):
df = df_list[count]
if df.columns.any('EmbSim_'):
df.columns = df.columns.str.replace(r'^EmbSim_', '')
if df.columns.any('Nll-Test_'):
df.columns = df.columns.str.replace(r'^Nll-Test_', '')
if df.columns.any('Nll-Oracle_'):
df.columns = df.columns.str.replace(r'^Nll-Oracle_', '')
df.plot(ax=axes[r, c], y=df.columns, kind='line',
title=df_title_list[count], figsize=(20, 10))
count += 1
# save metrics to .png for later use in pdf report
plt.savefig(directory + 'model_metric_charts.png')
|
python
|
'''Collects tweets, embeddings and save to DB'''
from flask_sqlalchemy import SQLAlchemy
from dotenv import load_dotenv
import os
import tweepy
import basilica
from .models import DB, Tweet, User
TWITTER_USERS = ['calebhicks', 'elonmusk', 'rrherr', 'SteveMartinToGo',
'alyankovic', 'nasa', 'sadserver', 'jkhowland', 'austen',
'common_squirrel', 'KenJennings', 'conanobrien',
'big_ben_clock', 'IAM_SHAKESPEARE']
load_dotenv()
API_KEY = os.getenv("API_KEY")
API_SECRET_KEY = os.getenv("API_SECRET_KEY")
BEARER_TOKEN = os.getenv("BEARER_TOKEN")
BASILICA_KEY = os.getenv("BASILICA_KEY")
b = basilica.Connection(BASILICA_KEY)
# Grants authorization
TWITTER_AUTH = tweepy.OAuthHandler(API_KEY, API_SECRET_KEY)
TWITTER = tweepy.API(TWITTER_AUTH)
DB = SQLAlchemy()
user = 'jackblack'
twitter_user = TWITTER.get_user(user)
tweets = twitter_user.timeline(count = 5, exclude_replies=True,
include_rts=False,
tweet_mode = 'extended',)
tweet_text = tweets[0].full_text
embedding = b.embed_sentence(tweet_text, model = 'twitter')
def add_or_update_user(username):
twitter_user = TWITTER.get_user(username)
db_user = (User.query.get(twitter_user.id) or
User(id = twitter_user.id, name = username))
DB.session.add(db_user)
tweets = twitter_user.timeline(count = 3, exclude_replies=True,
include_rts=False,
tweet_mode = 'extended',)
# Get latest tweet ID
if tweets:
db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
embedding = b.embed_sentence(tweet.full_text, model='twitter')
db_tweet = Tweet(id = tweet.id, text=tweet.full_text[:300], embedding=embedding)
db_user.tweets.append(db_tweet)
DB.session.add(db_tweet)
DB.session.commit()
for name in TWITTER_USERS:
try:
twitter_user = TWITTER.get_user(name)
db_user = (User.query.get(twitter_user.id))
# print(twitter_user.id)
tweets = twitter_user.timeline(count = 3, exclude_replies=True,
include_rts=False, tweet_mode='Extended',
)
# for tweet in tweets:
# print(tweet.text)
except Exception as e:
print(f'Error: {e},\n{username} not found')
else:
DB.session.commit()
def insert_example_user():
for user in TWITTER_USERS[:5]:
add_or_update_user(user)
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['CodeSigningConfigArgs', 'CodeSigningConfig']
@pulumi.input_type
class CodeSigningConfigArgs:
def __init__(__self__, *,
allowed_publishers: pulumi.Input['CodeSigningConfigAllowedPublishersArgs'],
description: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input['CodeSigningConfigPoliciesArgs']] = None):
"""
The set of arguments for constructing a CodeSigningConfig resource.
:param pulumi.Input['CodeSigningConfigAllowedPublishersArgs'] allowed_publishers: A configuration block of allowed publishers as signing profiles for this code signing configuration. Detailed below.
:param pulumi.Input[str] description: Descriptive name for this code signing configuration.
:param pulumi.Input['CodeSigningConfigPoliciesArgs'] policies: A configuration block of code signing policies that define the actions to take if the validation checks fail. Detailed below.
"""
pulumi.set(__self__, "allowed_publishers", allowed_publishers)
if description is not None:
pulumi.set(__self__, "description", description)
if policies is not None:
pulumi.set(__self__, "policies", policies)
@property
@pulumi.getter(name="allowedPublishers")
def allowed_publishers(self) -> pulumi.Input['CodeSigningConfigAllowedPublishersArgs']:
"""
A configuration block of allowed publishers as signing profiles for this code signing configuration. Detailed below.
"""
return pulumi.get(self, "allowed_publishers")
@allowed_publishers.setter
def allowed_publishers(self, value: pulumi.Input['CodeSigningConfigAllowedPublishersArgs']):
pulumi.set(self, "allowed_publishers", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Descriptive name for this code signing configuration.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input['CodeSigningConfigPoliciesArgs']]:
"""
A configuration block of code signing policies that define the actions to take if the validation checks fail. Detailed below.
"""
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input['CodeSigningConfigPoliciesArgs']]):
pulumi.set(self, "policies", value)
class CodeSigningConfig(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_publishers: Optional[pulumi.Input[pulumi.InputType['CodeSigningConfigAllowedPublishersArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[pulumi.InputType['CodeSigningConfigPoliciesArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a Lambda Code Signing Config resource. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).
For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html)
## Example Usage
```python
import pulumi
import pulumi_aws as aws
new_csc = aws.lambda_.CodeSigningConfig("newCsc",
allowed_publishers=aws.lambda..CodeSigningConfigAllowedPublishersArgs(
signing_profile_version_arns=[
aws_signer_signing_profile["example1"]["arn"],
aws_signer_signing_profile["example2"]["arn"],
],
),
policies=aws.lambda..CodeSigningConfigPoliciesArgs(
untrusted_artifact_on_deployment="Warn",
),
description="My awesome code signing config.")
```
## Import
Code Signing Configs can be imported using their ARN, e.g.
```sh
$ pulumi import aws:lambda/codeSigningConfig:CodeSigningConfig imported_csc arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-0f6c334abcdea4d8b
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['CodeSigningConfigAllowedPublishersArgs']] allowed_publishers: A configuration block of allowed publishers as signing profiles for this code signing configuration. Detailed below.
:param pulumi.Input[str] description: Descriptive name for this code signing configuration.
:param pulumi.Input[pulumi.InputType['CodeSigningConfigPoliciesArgs']] policies: A configuration block of code signing policies that define the actions to take if the validation checks fail. Detailed below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CodeSigningConfigArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Lambda Code Signing Config resource. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).
For information about Lambda code signing configurations and how to use them, see [configuring code signing for Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/configuration-codesigning.html)
## Example Usage
```python
import pulumi
import pulumi_aws as aws
new_csc = aws.lambda_.CodeSigningConfig("newCsc",
allowed_publishers=aws.lambda..CodeSigningConfigAllowedPublishersArgs(
signing_profile_version_arns=[
aws_signer_signing_profile["example1"]["arn"],
aws_signer_signing_profile["example2"]["arn"],
],
),
policies=aws.lambda..CodeSigningConfigPoliciesArgs(
untrusted_artifact_on_deployment="Warn",
),
description="My awesome code signing config.")
```
## Import
Code Signing Configs can be imported using their ARN, e.g.
```sh
$ pulumi import aws:lambda/codeSigningConfig:CodeSigningConfig imported_csc arn:aws:lambda:us-west-2:123456789012:code-signing-config:csc-0f6c334abcdea4d8b
```
:param str resource_name: The name of the resource.
:param CodeSigningConfigArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CodeSigningConfigArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_publishers: Optional[pulumi.Input[pulumi.InputType['CodeSigningConfigAllowedPublishersArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[pulumi.InputType['CodeSigningConfigPoliciesArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if allowed_publishers is None and not opts.urn:
raise TypeError("Missing required property 'allowed_publishers'")
__props__['allowed_publishers'] = allowed_publishers
__props__['description'] = description
__props__['policies'] = policies
__props__['arn'] = None
__props__['config_id'] = None
__props__['last_modified'] = None
super(CodeSigningConfig, __self__).__init__(
'aws:lambda/codeSigningConfig:CodeSigningConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allowed_publishers: Optional[pulumi.Input[pulumi.InputType['CodeSigningConfigAllowedPublishersArgs']]] = None,
arn: Optional[pulumi.Input[str]] = None,
config_id: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
last_modified: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[pulumi.InputType['CodeSigningConfigPoliciesArgs']]] = None) -> 'CodeSigningConfig':
"""
Get an existing CodeSigningConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['CodeSigningConfigAllowedPublishersArgs']] allowed_publishers: A configuration block of allowed publishers as signing profiles for this code signing configuration. Detailed below.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the code signing configuration.
:param pulumi.Input[str] config_id: Unique identifier for the code signing configuration.
:param pulumi.Input[str] description: Descriptive name for this code signing configuration.
:param pulumi.Input[str] last_modified: The date and time that the code signing configuration was last modified.
:param pulumi.Input[pulumi.InputType['CodeSigningConfigPoliciesArgs']] policies: A configuration block of code signing policies that define the actions to take if the validation checks fail. Detailed below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["allowed_publishers"] = allowed_publishers
__props__["arn"] = arn
__props__["config_id"] = config_id
__props__["description"] = description
__props__["last_modified"] = last_modified
__props__["policies"] = policies
return CodeSigningConfig(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowedPublishers")
def allowed_publishers(self) -> pulumi.Output['outputs.CodeSigningConfigAllowedPublishers']:
"""
A configuration block of allowed publishers as signing profiles for this code signing configuration. Detailed below.
"""
return pulumi.get(self, "allowed_publishers")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the code signing configuration.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="configId")
def config_id(self) -> pulumi.Output[str]:
"""
Unique identifier for the code signing configuration.
"""
return pulumi.get(self, "config_id")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Descriptive name for this code signing configuration.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> pulumi.Output[str]:
"""
The date and time that the code signing configuration was last modified.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def policies(self) -> pulumi.Output['outputs.CodeSigningConfigPolicies']:
"""
A configuration block of code signing policies that define the actions to take if the validation checks fail. Detailed below.
"""
return pulumi.get(self, "policies")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
python
|
import logging
from contextlib import contextmanager
from unittest import mock
import pytest
import hedwig.conf
from hedwig.backends.base import HedwigPublisherBaseBackend
from hedwig.backends.import_utils import import_module_attr
from hedwig.testing.config import unconfigure
from tests.models import MessageType
try:
# may not be available
from moto import mock_sqs, mock_sns
except ImportError:
pass
def pytest_configure():
logging.basicConfig()
@pytest.fixture
def settings():
"""
Use this fixture to override settings. Changes are automatically reverted
"""
hedwig.conf.settings._ensure_configured()
original_module = hedwig.conf.settings._user_settings
class Wrapped:
# default to the original module, but allow tests to setattr which would override
def __getattr__(self, name):
return getattr(original_module, name)
unconfigure()
hedwig.conf.settings._user_settings = Wrapped()
try:
yield hedwig.conf.settings._user_settings
finally:
unconfigure()
hedwig.conf.settings._user_settings = original_module
@pytest.fixture(name='message_factory', params=['jsonschema', 'protobuf'])
def _message_factory(request, settings):
if request.param == 'jsonschema':
settings.HEDWIG_DATA_VALIDATOR_CLASS = 'hedwig.validators.jsonschema.JSONSchemaValidator'
try:
import jsonschema # noqa
from hedwig.testing.factories.jsonschema import JSONSchemaMessageFactory # noqa
yield JSONSchemaMessageFactory
except ImportError:
pytest.skip("JSON Schema not importable")
if request.param == 'protobuf':
settings.HEDWIG_DATA_VALIDATOR_CLASS = 'hedwig.validators.protobuf.ProtobufValidator'
try:
from tests.protobuf_factory import ProtobufMessageFactory # noqa
def _encode_proto(msg):
return msg.SerializeToString(deterministic=True)
# make maps deterministically ordered
with mock.patch("hedwig.validators.protobuf.ProtobufValidator._encode_proto", side_effect=_encode_proto):
yield ProtobufMessageFactory
except ImportError:
pytest.skip("Protobuf factory not importable")
@pytest.fixture()
def message_data(message_factory):
return message_factory.build(msg_type=MessageType.trip_created)
@pytest.fixture()
def message(message_factory):
return message_factory(msg_type=MessageType.trip_created)
@pytest.fixture()
def message_with_trace(message_factory):
return message_factory(
msg_type=MessageType.trip_created,
metadata__headers__traceparent="00-aa2ada259e917551e16da4a0ad33db24-662fd261d30ec74c-01",
)
@contextmanager
def _mock_boto3():
settings.AWS_REGION = 'us-west-1'
with mock_sqs(), mock_sns(), mock.patch("hedwig.backends.aws.boto3", autospec=True) as boto3_mock:
yield boto3_mock
@pytest.fixture
def mock_boto3():
with _mock_boto3() as m:
yield m
@pytest.fixture()
def sqs_consumer_backend(mock_boto3):
# may not be available
from hedwig.backends import aws
yield aws.AWSSQSConsumerBackend()
@pytest.fixture
def mock_pubsub_v1():
with mock.patch("hedwig.backends.gcp.pubsub_v1", autospec=True) as pubsub_v1_mock:
yield pubsub_v1_mock
@pytest.fixture(params=['aws', 'google'])
def consumer_backend(request):
if request.param == 'aws':
try:
from hedwig.backends.aws import AWSSQSConsumerBackend # noqa
with _mock_boto3():
yield AWSSQSConsumerBackend()
except ImportError:
pytest.skip("AWS backend not importable")
if request.param == 'google':
try:
from hedwig.backends.gcp import GooglePubSubConsumerBackend # noqa
with mock.patch("hedwig.backends.gcp.pubsub_v1"), mock.patch(
"hedwig.backends.gcp.google_auth_default", return_value=(None, "DUMMY")
):
yield GooglePubSubConsumerBackend()
except ImportError:
pytest.skip("Google backend not importable")
@pytest.fixture(
params=["hedwig.backends.aws.AWSSNSConsumerBackend", "hedwig.backends.gcp.GooglePubSubPublisherBackend"]
)
def publisher_backend(request, mock_boto3):
with mock.patch("hedwig.backends.gcp.pubsub_v1"):
yield import_module_attr(request.param)
@pytest.fixture()
def mock_publisher_backend():
with mock.patch.object(HedwigPublisherBaseBackend, '_publish'):
yield HedwigPublisherBaseBackend()
@pytest.fixture(params=[True, False], ids=["message-attrs", "no-message-attrs"])
def use_transport_message_attrs(request, settings):
settings.HEDWIG_USE_TRANSPORT_MESSAGE_ATTRIBUTES = request.param
yield settings.HEDWIG_USE_TRANSPORT_MESSAGE_ATTRIBUTES
|
python
|
# author: Drew Botwinick, Botwinick Innovations
# license: 3-clause BSD
import os
import sys
# region Daemonize (Linux)
# DERIVED FROM: http://code.activestate.com/recipes/66012-fork-a-daemon-process-on-unix/
# This module is used to fork the current process into a daemon.
# Almost none of this is necessary (or advisable) if your daemon
# is being started by inetd. In that case, stdin, stdout and stderr are
# all set up for you to refer to the network connection, and the fork()s
# and session manipulation should not be done (to avoid confusing inetd).
# Only the chdir() and umask() steps remain as useful.
# References:
# UNIX Programming FAQ
# 1.7 How do I get my program to act like a daemon?
# http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
#
# Advanced Programming in the Unix Environment
# W. Richard Stevens, 1992, Addison-Wesley, ISBN 0-201-56317-7.
def daemonize_linux(stdin='/dev/null', stdout='/dev/null', stderr=None, pid_file=None, working_dir=None):
"""
This forks the current process into a daemon.
The stdin, stdout, and stderr arguments are file names that will be opened and be used to replace
the standard file descriptors in sys.stdin, sys.stdout, and sys.stderr.
These arguments are optional and default to /dev/null. Note that stderr is opened unbuffered, so
if it shares a file with stdout then interleaved output may not appear in the order that you expect.
:param stdin:
:param stdout:
:param stderr:
:param pid_file:
:param working_dir:
"""
# Because you're not reaping your dead children, many of these resources are held open longer than they should.
# Your second children are being properly handled by init(8) -- their parent is dead, so they are re-parented
# to init(8), and init(8) will clean up after them (wait(2)) when they die.
#
# However, your program is responsible for cleaning up after the first set of children. C programs typically
# install a signal(7) handler for SIGCHLD that calls wait(2) or waitpid(2) to reap the children's exit status
# and thus remove its entries from the kernel's memory.
#
# But signal handling in a script is a bit annoying. If you can set the SIGCHLD signal disposition to SIG_IGN
# explicitly, the kernel will know that you are not interested in the exit status and will reap the children
# for you_.
import signal
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
# Do first fork.
try:
# sys.stdout.write("attempting first fork, pid=")
pid = os.fork()
if pid > 0:
# sys.stdout.write("%s\n" % pid)
sys.exit(0) # Exit first parent.
except OSError as e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment.
# sys.stdout.write("attempting to separate from the parent environment\n")
os.chdir('/')
os.umask(0)
os.setsid()
# Do second fork.
try:
# sys.stdout.write("attempting second fork, pid=")
pid = os.fork()
if pid > 0:
# sys.stdout.write("%s\n" % pid)
sys.exit(0) # Exit second parent.
except OSError as e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# sys.stdout.write("\nI am now a daemon -- redirecting stdin, stdout, stderr now -- goodbye terminal\n")
# Redirect standard file descriptors.
if not stderr:
stderr = stdout
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+', 0)
# this might be a good time to write a PID message to the starting user?
if pid_file:
with open(pid_file, 'w+') as f: # online references don't close this -- is it bad if we do?
f.write('%s\n' % pid)
# flush anything that is in the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
# close file descriptors for stdin, stdout, and stderr
os.close(sys.stdin.fileno())
os.close(sys.stdout.fileno())
os.close(sys.stderr.fileno())
# reassign file descriptors for stdin, stdout, and stderr
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
if working_dir:
os.chdir(working_dir)
# ## Why 2 forks?
# The first fork accomplishes two things - allow the shell to return, and allow you to do a setsid().
#
# The setsid() removes yourself from your controlling terminal.
# You see, before, you were still listed as a job of your previous process, and therefore the user might
# accidentally send you a signal. setsid() gives you a new session, and removes the existing controlling terminal.
#
# The problem is, you are now a session leader. As a session leader, if you open a file descriptor that is a terminal,
# it will become your controlling terminal (oops!). Therefore, the second fork makes you NOT be a session leader.
# Only session leaders can acquire a controlling terminal, so you can open up any file you wish without worrying
# that it will make you a controlling terminal.
#
# So - first fork - allow shell to return, and permit you to call setsid()
#
# Second fork - prevent you from accidentally reacquiring a controlling terminal.
# endregion
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 14:57
from __future__ import unicode_literals
import cms.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='MenuItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=255, unique=True)),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('title', models.CharField(max_length=255, verbose_name='Tytuł')),
('menu_id', models.CharField(max_length=255, verbose_name='Menu ID')),
('url', models.URLField(null=True)),
('target', models.CharField(blank=True, choices=[('_blank', 'Open in new window'), ('_self', 'Open in same window'), ('_parent', 'Delegate to parent'), ('_top', 'Delegate to top')], max_length=255, verbose_name='Target')),
('page', cms.models.fields.PageField(null=True, on_delete=django.db.models.deletion.CASCADE, to='cms.Page', verbose_name='Strona')),
],
options={
'abstract': False,
},
),
]
|
python
|
import pandas as pd
#%%
print('hello')
|
python
|
import pygeoip
gip = pygeoip.GeoIP("GeoLiteCity.dat")
res = gip.record_by_addr('192.168.29.160')
for key, val in res.items():
print('%s : %s' % (key, val))
|
python
|
"""
Simple data container for a observable
"""
from tcvx21 import Quantity
import numpy as np
class MissingDataError(Exception):
"""An error to indicate that the observable is missing data"""
pass
class Observable:
def __init__(self, data, diagnostic, observable, label, color, linestyle):
"""Simple container for individual observables"""
try:
self.name = data.observable_name
self.label = label
self.color = color
self.linestyle = linestyle
self.diagnostic, self.observable = diagnostic, observable
self.dimensionality = data.dimensionality
self.check_dimensionality()
self.experimental_hierarchy = data.experimental_hierarchy
self.simulation_hierarchy = getattr(data, "simulation_hierarchy", None)
self._values = Quantity(data["value"][:], data["value"].units)
try:
self._errors = Quantity(data["error"][:], data["error"].units).to(
self._values.units
)
except IndexError:
self._errors = Quantity(
np.zeros_like(self._values), data["value"].units
).to(self._values.units)
self.mask = np.ones_like(self._values).astype(bool)
except (AttributeError, IndexError):
raise MissingDataError(
f"Missing data for {diagnostic}:{observable}. Data available is {data}"
)
def check_dimensionality(self):
raise NotImplementedError()
@property
def values(self) -> Quantity:
"""Returns the observable values, with a mask applied if applicable"""
return self._values[self.mask]
@property
def errors(self) -> Quantity:
"""Returns the observable errors, with a mask applied if applicable"""
return self._errors[self.mask]
@property
def units(self) -> str:
"""Returns the units of the values and errors, as a string"""
return str(self._values.units)
@property
def is_empty(self):
return False
@property
def has_errors(self):
return bool(np.count_nonzero(self.errors))
@property
def compact_units(self) -> str:
"""Units with compact suffix"""
if self.values.check("[length]^-3"):
# Don't convert 10^19 m^-3 to ~10 1/µm^3
return str(self.values.units)
else:
return str(np.max(np.abs(self.values)).to_compact().units)
@property
def npts(self):
"""Returns the number of unmasked observable points"""
return self.values.size
def nan_mask(self):
"""Returns a mask which will remove NaN values"""
return np.logical_and(~np.isnan(self._values), ~np.isnan(self._errors))
def check_attributes(self, other):
self.mask = np.logical_and(self.mask, other.mask)
assert self.color == other.color
assert self.label == other.label
assert self.dimensionality == other.dimensionality
assert self.linestyle == other.linestyle
if hasattr(self, "_positions_rsep"):
assert np.allclose(
self._positions_rsep, other._positions_rsep, equal_nan=True
)
if hasattr(self, "_positions_zx"):
assert np.allclose(self._positions_zx, other._positions_zx, equal_nan=True)
def fill_attributes(self, result):
"""Fills the attributes when copying to make a new object"""
result.mask = self.mask
result.color = self.color
result.label = self.label
result.dimensionality = self.dimensionality
result.linestyle = self.linestyle
if hasattr(self, "xmin") and hasattr(self, "xmax"):
result.xmin, result.xmax, result.ymin, result.ymax = (
self.xmin,
self.xmax,
None,
None,
)
if hasattr(self, "_positions_rsep"):
result._positions_rsep = self._positions_rsep
if hasattr(self, "_positions_zx"):
result._positions_zx = self._positions_zx
def __add__(self, other):
assert type(self) == type(other)
result = object.__new__(self.__class__)
result._values = self._values + other._values
result._errors = np.sqrt(self._errors ** 2 + other._errors ** 2)
self.fill_attributes(result)
result.check_attributes(other)
return result
def __sub__(self, other):
assert type(self) == type(other)
result = object.__new__(self.__class__)
result._values = self._values - other._values
result._errors = np.sqrt(self._errors ** 2 + other._errors ** 2)
self.fill_attributes(result)
result.check_attributes(other)
return result
def __mul__(self, other):
result = object.__new__(self.__class__)
if isinstance(other, (float, Quantity)):
# Scalar multiplication
result._values = self._values * other
result._errors = self._errors * other
self.fill_attributes(result)
else:
assert type(self) == type(other)
result._values = self._values * other._values
result._errors = result._values * np.sqrt(
(self._errors / self._values) ** 2
+ (other._errors / other._values) ** 2
)
self.fill_attributes(result)
result.check_attributes(other)
return result
def __truediv__(self, other):
assert type(self) == type(other)
assert self._values.size == other._values.size
result = object.__new__(self.__class__)
result._values = self._values / other._values
result._errors = result._values * np.sqrt(
(self._errors / self._values) ** 2 + (other._errors / other._values) ** 2
)
self.fill_attributes(result)
result.check_attributes(other)
return result
def trim_to_mask(self, mask):
result = object.__new__(self.__class__)
result._values = self._values[mask]
result._errors = self._errors[mask]
self.fill_attributes(result)
result.mask = np.ones_like(result._values).astype(bool)
if hasattr(self, "_positions_rsep"):
result._positions_rsep = self._positions_rsep[mask]
if hasattr(self, "_positions_zx"):
result._positions_zx = self._positions_zx[mask]
return result
|
python
|
import numpy as np
from scipy.special import loggamma
from scipy.spatial import KDTree
from matplotlib import pyplot as plt
from scipy.optimize import minimize
from mpl_toolkits import mplot3d
from math import frexp
from mpmath import mp, hyper, nstr, hyperu
from exactlearning import BFGS_search, analyse
mp.dps = 16; mp.pretty = True
np.seterr(divide = 'raise')
#twopi = 2*np.pi
#twopi_rec = 1/twopi
#pi_rec = 1/np.pi
## Set the tag here
tag = "Linear_0"
print("*** Loading Data ***")
N_d = 10
logmoments = np.load("logmoments_{}.npy".format(tag))[:N_d]
moments = np.load("moments_{}.npy".format(tag))[:N_d]
s_values = np.load("s_values_{}.npy".format(tag))[:N_d]
real_error = np.load("real_error_{}.npy".format(tag))[:N_d]
imag_error = np.load("imag_error_{}.npy".format(tag))[:N_d]
## Chop up
real_s = np.real(s_values)
imag_s = np.imag(s_values)
real_logm = np.real(logmoments)
imag_logm = np.imag(logmoments)
real_m = np.real(moments)
imag_m = np.imag(moments)
real_log_upper = np.real(np.log(moments + real_error))
real_log_lower = np.real(np.log(moments - real_error))
imag_log_upper = np.imag(np.log(moments + imag_error))
imag_log_lower = np.imag(np.log(moments - imag_error))
## The bounds to work with
real_log_diff = real_log_upper - real_log_lower
imag_log_diff = imag_log_upper - imag_log_lower
### Define a rational/algebraic etc. solution space.
PT = np.array([-1,-2,-1/2,-3/4,3/4,3/2,5/2,2/3,np.sqrt(np.sqrt(2)),1/np.sqrt(np.sqrt(2)),1e-6,1,2,3,4,1/2,1/3,1/4,1/5,np.sqrt(2),np.sqrt(3),1/np.sqrt(2),1/np.sqrt(3),np.pi,1.0/np.pi])
#PT = np.reshape([PT,-PT],2*len(PT))
if(False):
values = []
index_to_drop = []
A = len(constants_dict.keys())
count = 0
for i in constants_dict.keys():
if(constants_dict[i] not in values): values.append(constants_dict[i])
else: index_to_drop.append(i)
count +=1
print(count/A)
for i in index_to_drop:
del constants_dict[i]
PT = np.array(list(constants_dict.values()))
Pkeys = np.array(list(constants_dict.keys()))
np.save("clean_values",PT)
np.save("clean_keys",Pkeys)
PT = np.load("clean_values.npy")
PTkeys = np.load("clean_keys.npy")
reverse_dict = { i:j for i,j in zip(PT,PTkeys)}
PT[0]=1e-7
#from scipy.spatial import KDTree
### Define a point cloud
#points = [[[[[a,b,c,d] for d in PT] for c in PT] for b in PT] for a in PT]
#points = np.array(points)
#points = np.reshape(points,(len(PT)**4,4))
#points_tree = KDTree(points)
N_terms = 13
## Scaled
def fingerprint(p):
ret = np.log(p[0]**2) ## A constant factor
ret += s_values*np.log(p[1]**2) ## C^s for some C, together with previous cover most prefactors
ret += loggamma(p[2]+ p[3]*s_values) ## A flexible gamma
ret += loggamma(p[4] + p[5]*s_values) ## A flexible gamma
hyp = [complex(hyper([p[6]*s+p[7],p[8]+p[9]*s],[p[10]+p[11]*s],p[12])) for s in s_values] ## slow generalised_hypergeom
ret += np.log(hyp)
# s_values**2 * np.log(p[6]**2) #+ s**3 * np.log(p[3]**2) + s**4 * np.log(p[4]**2) ## Strange series temrs
#ret += np.log(1 + p[5]*s_values + p[6]*s_values**2 + p[7]*s_values**3 + p[8]*s_values**4) ## Log of polynomial
return ret
#p0 = np.ones(N_terms) + (0.5- np.random.rand(N_terms))
observations = []
losses = []
def categorical_solve(nits, L_in=None, P_in=None):
C_size = len(PT)
#static = np.array(range(N_terms))
if(L_in == None): L = 0.001*np.ones((N_terms,C_size))
C = 0.001*np.ones((N_terms,C_size))
#K = np.random.choice(range(C_size),size=10,N_terms,replace=True)
p = PT[K]
l = complex_diff(p)
Q = [[ np.exp(-np.abs(K[i]-PT[j]))/l for j in range(C_size)] for i in range(N_terms)]
N = [[ np.exp(-np.abs(K[i]-PT[j])) for j in range(C_size)] for i in range(N_terms)]
L += Q
C += N
## Probability distribution over elements
if(P_in == None):
P = L/C
N = np.sum(P,axis =1)
P = P / N[:,None]
N = np.sum(P,axis =1)
#I.e. a n array of differences and sorted list...
## Add in an additional parameter choice which isn't in the list? (Some kind of solver?)
## Add in a routine that sets certain elements of P to zero after they drop below a threshold (number of observations)?
losses = []
for i in range(nits):
power = 1 + i/1000
K = np.array([np.random.choice(range(C_size),replace=True, p = pp) for pp in P])
p = PT[K]
try:
l = complex_diff(p)
except:
l = 100
if(l>100): l = 100
#l = 0.01+np.random.random()
print(l)
if(l<1e-6): return L, P
Q = [[ np.exp(-np.abs(K[i]-PT[j]))/l for j in range(C_size)] for i in range(N_terms)]
N = [[ np.exp(-np.abs(K[i]-PT[j])) for j in range(C_size)] for i in range(N_terms)]
L += Q
C += N
P = L/C
N = np.sum(P,axis =1)
P = (P / N[:,None])**power
N = np.sum(P,axis =1)
P = (P / N[:,None])
#if(i%100==0):
# i = np.transpose(np.argwhere(P<1e-3))
# L[i[0],i[1]] = 0
# P = L/C
# N = np.sum(P,axis =1)
# P = P / N[:,None]
return L, P
if(False):
L, P = categorical_solve(1000)
for i in range(N_terms):
q = np.quantile(P[i],0.75)
m = np.argmax(P[i])
indices = np.where(P[i] > q)
terms = PT[indices]
print("p[{}] ~ ".format(i),terms)
print("Hypothesis: p[{}] ~ {}".format(i,PT[m]))
for i in range(len(PT)):
print(i,PT[i])
for i in P:
plt.bar(range(len(i)),i)
plt.show()
exit()
if(False):
from scipy.stats import norm
def weighted_avg_and_std(values, weights):
average = np.average(values, weights=weights)
variance = np.average((values-average)**2, weights=weights)
return (average, np.sqrt(variance))
## First carry out a random set of experiments
for i in range(1000):
p0 = np.random.uniform(low=np.amin(PT),high=np.amax(PT),size=N_terms)
score = complex_diff(p0)
observations.append(p0)
losses.append(score)
for k in range(1):
O = np.array(observations)
L = np.array(losses)
## Now for each parameter, derive a normal distribution
MS = np.array([ weighted_avg_and_std(np.transpose(O)[i], 1/L) for i in range(N_terms)])
print(MS)
for i in range(100):
p0 = [ np.random.normal(loc=m,scale = 2*s) for m,s in MS]
score = complex_diff(p0)
observations.append(p0)
losses.append(score)
for k in range(100):
O = np.array(observations)
L = np.array(losses)
## Now for each parameter, derive a normal distribution
MS = np.array([ weighted_avg_and_std(np.transpose(O)[i], 1/L) for i in range(N_terms)])
print(MS)
## Consider the list of solutions weighted by the normals distributions
PT_weights = [ [norm(loc=m,scale=s).pdf(k) for k in PT] for m,s in MS]
PT_weights = [ a/np.sum(a) for a in PT_weights ]
Ps = np.transpose(np.array([ np.random.choice(PT,size=10,p=p) for p in PT_weights ]))
for p in Ps:
score = complex_diff(p)
observations.append(p)
losses.append(score)
print("Best Score:",np.amin(losses))
print("Best Params:",observations[np.argmin(losses)])
print(losses)
print(observations)
#p_test = [1/np.sqrt(2),0.5,0.5,0.25]
## Loop here
#res = points_tree.query(p_test,k=20)
#new_indices = res[1]
#vecs = points[new_indices]
#scores = [complex_diff(p) for p in vecs]
#print(scores)
p0= np.random.random(N_terms)
#p0 = [ 0.51238944,0.97451466,-0.01,0.4491124,0.12458327,0.82568312,0.20801154,0.27429931,0.73933532,0.16679021,0.5342653,0.90349894,0.31334464, 0.68688119]
p0 = [ 0.51238944,0.97451466,0.4491124,0.12458327,0.82568312,0.20801154,0.27429931,0.73933532,0.16679021,0.5342653,0.90349894,0.31334464, 0.68688119]
if(True):
if(True):
popt = BFGS_search(p0)
else:
print("BFGS Disabled")
popt = p0
print("** Searching for Algebraic Identity ***")
## Rational searcher
## Loop here
#res = points_tree.query(popt,k=10)
#new_indices = res[1]
#vecs = points[new_indices]
#scores = [complex_diff(p) for p in vecs]
#best_score = np.argmin(scores)
#print(vecs[best_score],scores[best_score])
analyse(popt)
## Add these "best" solutions to the mix
## This gives us a chance at partially optimising the original solution
PT = np.concatenate((PT,popt))
PTkeys = np.concatenate((PTkeys,["BFGS_param_{}".format(i) for i in range(len(popt))]))
reverse_dict = { i:j for i,j in zip(PT,PTkeys)}
##
## IMPORTANT IDEA
## CONSIDER FIRST ITERATING EACH PARAMETER IN TERMS OF NEARBY SOLUTIOSN WHILE KEEPING THE OTHER TERMS CONSTANT
## IF WE GET ANY HITS THIS IS PROMISING
PT2 = [[k] for k in PT]
value_tree = KDTree(PT2)
CHOICES = []
for i in range(len(popt)):
k_query = 5
nearest_k = value_tree.query([popt[i]],k=k_query)
## Get all the values which are within 0.1
dists = nearest_k[0]
inds = np.argwhere(dists <= 0.1)
elements = nearest_k[1][inds]
choice = [k[0] for k in PT[elements]]
CHOICES.append(choice)
print("p[{}] choose from {}".format(i,choice))
## Set up a score system for the choices
P = np.zeros((len(popt),k_query))
for i in range(len(CHOICES)):
for j in range(len(CHOICES[i])):
P[i,j]+=1
N = np.sum(P,axis =1)
P = (P / N[:,None])
## A probabilistic scoring approach
if(False):
## Assemble all parameter combinations
nits = 10*np.prod([len(ii) for ii in CHOICES])
print("N iterations = {}".format(nits))
## Or run the weighted combinations analysis
print("*** Running Enumeration ***")
l_best =100
for i in range(nits):
K = np.array([np.random.choice(range(k_query),replace=True, p = pp) for pp in P])
p = [ CHOICES[ch][K[ch]] for ch in range(len(CHOICES))]
try:
l = complex_diff(p)
except:
l = 100
if(l<l_best):
l_best=l
print("Best score yet: {} with {}".format(l,p))
exit()
## A Gaussian weighted exploration algorithm
if(True):
l_best =100
observations = []
losses = []
## First carry out a random set of experiments
for i in range(100):
K = np.array([np.random.choice(range(k_query),replace=True, p = pp) for pp in P])
p = [ CHOICES[ch][K[ch]] for ch in range(N_terms)]
try:
l = complex_diff(p)
except:
l = 100
if(l<l_best):
l_best=l
print("Best score yet: {} with {}".format(l,p))
observations.append(p)
losses.append(l)
for k in range(1):
O = np.array(observations)
L = np.array(losses)
## Now for each parameter, derive a normal distribution
MS = np.array([ weighted_avg_and_std(np.transpose(O)[i], 1/L) for i in range(N_terms)])
for i in range(100):
p = [ np.random.normal(loc=m,scale = 2*s) for m,s in MS]
try:
l = complex_diff(p)
except:
l = 100
if(l<l_best):
l_best=l
print("Best score yet: {} with {}".format(l,p))
observations.append(p)
losses.append(l)
for k in range(1000):
O = np.array(observations)
L = np.array(losses)
## Now for each parameter, derive a normal distribution
MS = np.array([ weighted_avg_and_std(np.transpose(O)[i], 1/L) for i in range(N_terms)])
## Consider the list of solutions weighted by the normals distributions
PT_weights = [ [norm(loc=MS[qq][0],scale=MS[qq][1]).pdf(k) for k in CHOICES[qq]] for qq in range(len(MS))]
PT_weights = [ a/np.sum(a) for a in PT_weights ]
p = [ np.random.choice(CHOICES[i],p=PT_weights[i]) for i in range(len(CHOICES)) ]
try:
l = complex_diff(p)
except:
l = 100
if(l<l_best):
l_best=l
print("Best score yet: {} with {}".format(l,p))
print("Translates to: {} with {}".format(l,[reverse_dict[i] for i in p]))
observations.append(p)
losses.append(l)
print("Best Params:",observations[np.argmin(losses)])
##
#WITH THE BEST SCORE, search through the tree of values
#for each parameter get say 5 values?
#Check the lists by eye? I.e.
#"Basic Constant in [0,1,2,3]", then we can see if 0 is a bad suggestion?
#"Blah Blah in ... ",
#From here we can run the above methods of filtering or a direct enumeration if the number of combinations is less than 2 million or so..
#Run on a single data point, collect the best combinations and chec kfor multiple datapoints.
#Consider a method to design splits i.e. as before on the filtering method
#If we have two very high peaks, then reenumerate using those two values only!
ax = plt.axes(projection='3d')
# Data for three-dimensional scattered points
ax.scatter3D(real_s, imag_s, real_logm, c=real_logm, cmap='Reds', label = "Numeric")
ax.scatter3D(real_s, imag_s, np.real(fit), c=np.real(fit), cmap='Greens', label = "Theoretical")
ax.set_xlabel('Re(s)')
ax.set_ylabel('Im(s)')
ax.set_zlabel('$\log Re(E[x^{s-1}])$')
plt.legend()
plt.show()
ax = plt.axes(projection='3d')
# Data for three-dimensional scattered points
ax.scatter3D(real_s, imag_s, imag_logm, c=imag_logm, cmap='Reds', label = "Numeric")
ax.scatter3D(real_s, imag_s, np.imag(fit), c=np.imag(fit), cmap='Greens', label = "Theoretical")
ax.set_xlabel('Re(s)')
ax.set_ylabel('Im(s)')
ax.set_zlabel('$\log Im(E[x^{s-1}])$')
plt.legend()
plt.show()
p_best = popt
|
python
|
#!/usr/bin/env python3
# coding: utf-8
"""
@author: Ping Qiu [email protected]
@last modified by: Ping Qiu
@file:test_find_markers.py
@time:2021/03/16
"""
import sys
sys.path.append('/data/workspace/st/stereopy-release')
from stereo.tools.spatial_pattern_score import *
import pandas as pd
from anndata import AnnData
import numpy as np
np.random.seed(9)
def init(genes=50, cells=20, dtype='dataframe'):
gname = [f'g{i}' for i in range(genes)]
cname = [f'c{i}' for i in range(cells)]
x = np.random.randint(0, 100, (cells, genes))
if dtype == 'anndata':
var = pd.DataFrame(index=gname)
obs = pd.DataFrame(index=cname)
groups = np.random.choice(['1', '2', '3'], cells)
obs['cluster'] = groups
andata = AnnData(x, obs=obs, var=var)
return andata
else:
return pd.DataFrame(x, index=cname, columns=gname)
def test():
andata = init(30, 100, 'anndata')
tmp = SpatialPatternScore(data=andata)
tmp.fit()
print(andata.var)
test()
|
python
|
import timeit
import functools
import numpy as np
def timefunc(number=10000):
def _timefunc(func):
@functools.wraps(func)
def time_func_wrapper(*args, **kwargs):
t0 = timeit.default_timer()
for _ in range(number):
value = func(*args, **kwargs)
t1 = timeit.default_timer()
print("func: {}(args={}, kwargs={}) time: {}".format(func.__name__, str(args), str(kwargs), t1-t0))
return value
return time_func_wrapper
return _timefunc
volumes= [6, 7, 8]
@timefunc(number=100000)
def modify_np(x, i, j):
y = x.copy()
free = volumes[j] - x[j]
spill = min(free, x[i])
y[i] -= spill
y[j] += spill
#y = x
h = hash(y.tostring())
return h
x = np.array([1,2,3])
modify_np(x, 1, 2)
@timefunc(number=100000)
def modify_np(x, i, j):
y = x.copy()
free = volumes[j] - x[j]
spill = min(free, x[i])
y[i] -= spill
y[j] += spill
hash(tuple(y))
x = [1,2,3]
modify_np(x, 1, 2)
|
python
|
#!/usr/bin/env python
"""
This module provides File.GetID data access object.
"""
from WMCore.Database.DBFormatter import DBFormatter
from dbs.utils.dbsExceptionHandler import dbsExceptionHandler
class GetID(DBFormatter):
"""
File GetID DAO class.
"""
def __init__(self, logger, dbi, owner):
"""
Add schema owner and sql.
"""
DBFormatter.__init__(self, logger, dbi)
self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else ""
self.sql = \
"""
SELECT F.FILE_ID
FROM %sFILES F
""" % ( self.owner )
def execute(self, conn, name, transaction = False):
"""
returns id for a given lfn
"""
sql = self.sql
sql += "WHERE F.LOGICAL_FILE_NAME = :lfn"
binds = {"lfn":name}
result = self.dbi.processData(sql, binds, conn, transaction)
plist = self.formatDict(result)
if len(plist) < 1: return -1
return plist[0]["file_id"]
|
python
|
class Solution:
def fourSumCount(self, nums1: List[int], nums2: List[int], nums3: List[int], nums4: List[int]) -> int:
cnt = 0
m = {}
for num1 in nums1:
for num2 in nums2:
m[num1 + num2] = m.get(num1 + num2, 0) + 1
for num3 in nums3:
for num4 in nums4:
cnt += m.get(-(num3 + num4), 0)
return cnt
|
python
|
DEBUG = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SECRET_KEY = 'na2Tei0FoChe3ooloh5Yaec0ji7Aipho'
INSTALLED_APPS=(
'mailrobot',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': False,
'OPTIONS': {
'debug': DEBUG,
},
},
]
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
|
python
|
VERSION = "2.0.113"
VERSION_APP = "1265"
API_KEY = "270072d0fb4811ebacd96f6726fbdbb1"
API_SECRET = "2d0288d0fb4811ebabfbd57e57c6ae64"
ENDPOINT = "https://api.myxplora.com/api"
|
python
|
from .dataset_evaluator import DatasetEvaluator
__all__ = [
'DatasetEvaluator'
]
|
python
|
cities = [
'Tallinn',
'Tartu',
'Narva',
'Kohtla-Jaerve',
'Paernu',
'Viljandi',
'Rakvere',
'Sillamaee',
'Maardu',
'Kuressaare',
'Voru',
'Valga',
'Haapsalu',
'Johvi',
'Paide',
'Keila',
'Kivioli',
'Tapa',
'Polva',
'Jogeva',
'Tueri',
'Elva',
'Rapla',
'Saue',
'Kaerdla'
]
|
python
|
#!/usr/bin/env python3
"""
* Example demonstrating the Position closed-loop servo.
* Tested with Logitech F350 USB Gamepad inserted into Driver Station]
*
* Be sure to select the correct feedback sensor using configSelectedFeedbackSensor() below.
*
* After deploying/debugging this to your RIO, first use the left Y-stick
* to throttle the Talon manually. This will confirm your hardware setup.
* Be sure to confirm that when the Talon is driving forward (green) the
* position sensor is moving in a positive direction. If this is not the cause
* flip the boolean input to the setSensorPhase() call below.
*
* Once you've ensured your feedback device is in-phase with the motor,
* use the button shortcuts to servo to target position.
*
* Tweak the PID gains accordingly.
"""
from ctre import WPI_TalonSRX
import wpilib
class Robot(wpilib.IterativeRobot):
#: Which PID slot to pull gains from. Starting 2018, you can choose from
#: 0,1,2 or 3. Only the first two (0,1) are visible in web-based
#: configuration.
kSlotIdx = 0
#: Talon SRX/ Victor SPX will supported multiple (cascaded) PID loops. For
#: now we just want the primary one.
kPIDLoopIdx = 0
#: set to zero to skip waiting for confirmation, set to nonzero to wait and
#: report to DS if action fails.
kTimeoutMs = 10
def robotInit(self):
self.talon = WPI_TalonSRX(3)
self.joy = wpilib.Joystick(0)
self.loops = 0
self.lastButton1 = False
self.targetPos = 0
# choose the sensor and sensor direction
self.talon.configSelectedFeedbackSensor(
WPI_TalonSRX.FeedbackDevice.CTRE_MagEncoder_Relative,
self.kPIDLoopIdx,
self.kTimeoutMs,
)
# choose to ensure sensor is positive when output is positive
self.talon.setSensorPhase(True)
# choose based on what direction you want forward/positive to be.
# This does not affect sensor phase.
self.talon.setInverted(False)
# set the peak and nominal outputs, 12V means full
self.talon.configNominalOutputForward(0, self.kTimeoutMs)
self.talon.configNominalOutputReverse(0, self.kTimeoutMs)
self.talon.configPeakOutputForward(1, self.kTimeoutMs)
self.talon.configPeakOutputReverse(-1, self.kTimeoutMs)
# Set the allowable closed-loop error, Closed-Loop output will be
# neutral within this range. See Table in Section 17.2.1 for native
# units per rotation.
self.talon.configAllowableClosedloopError(0, self.kPIDLoopIdx, self.kTimeoutMs)
# set closed loop gains in slot0, typically kF stays zero - see documentation */
self.talon.selectProfileSlot(self.kSlotIdx, self.kPIDLoopIdx)
self.talon.config_kF(0, 0, self.kTimeoutMs)
self.talon.config_kP(0, 0.1, self.kTimeoutMs)
self.talon.config_kI(0, 0, self.kTimeoutMs)
self.talon.config_kD(0, 0, self.kTimeoutMs)
# zero the sensor
self.talon.setSelectedSensorPosition(0, self.kPIDLoopIdx, self.kTimeoutMs)
def teleopPeriodic(self):
"""
This function is called periodically during operator control
"""
# get gamepad axis - forward stick is positive
leftYstick = self.joy.getY()
# calculate the percent motor output
motorOutput = self.talon.getMotorOutputPercent()
button1 = self.joy.getRawButton(1)
button2 = self.joy.getRawButton(2)
# deadband gamepad
if abs(leftYstick) < 0.1:
leftYstick = 0
# prepare line to print
sb = []
sb.append("\tOut%%: %.3f" % motorOutput)
sb.append(
"\tPos: %.3fu" % self.talon.getSelectedSensorPosition(self.kPIDLoopIdx)
)
if self.lastButton1 and button1:
# Position mode - button just pressed
# 10 Rotations * 4096 u/rev in either direction
self.targetPos = leftYstick * 4096 * 10.0
self.talon.set(WPI_TalonSRX.ControlMode.Position, self.targetPos)
# on button2 just straight drive
if button2:
# Percent voltage mode
self.talon.set(WPI_TalonSRX.ControlMode.PercentOutput, leftYstick)
if self.talon.getControlMode() == WPI_TalonSRX.ControlMode.Position:
# append more signals to print when in speed mode.
sb.append("\terr: %s" % self.talon.getClosedLoopError(self.kPIDLoopIdx))
sb.append("\ttrg: %.3f" % self.targetPos)
# periodically print to console
self.loops += 1
if self.loops >= 10:
self.loops = 0
print(" ".join(sb))
# save button state for on press detect
self.lastButton1 = button1
if __name__ == "__main__":
wpilib.run(Robot)
|
python
|
#!/usr/bin/env python3
from copy import deepcopy
from queue import Queue
from pickle import dump, load
from colorama import Fore, Style
class GoBoard(object):
black = 1
space = 0
white = -1
featureCount = 22
printDic = {space : '.', black : 'B', white : 'W'}
colorDic = {space : Fore.WHITE + Style.BRIGHT, black : Fore.RED + Style.BRIGHT, white : Fore.WHITE + Style.BRIGHT, 'last' : Fore.CYAN + Style.BRIGHT, 'reset' : Style.RESET_ALL}
dxdy = [(1, 0), (-1, 0), (0, 1), (0, -1)]
def __init__(self, size = 19):
if not isinstance(size, int) or size <= 0:
raise Exception('GoBoard: __init__: error: invalid size')
self.__size = size
self.__twoHistory = [None] * 2
self.__lastMove = None
self.__nextColor = GoBoard.black
self.__boardList = self.getEmptyBoardList()
def save(self, filename):
if not isinstance(filename, str):
raise Exception('GoBoard: save: error: invalid filename')
with open(filename, 'wb') as f:
dump(self.__dict__, f, 2)
def load(self, filename):
if not isinstance(filename, str):
raise Exception('GoBoard: load: error: invalid filename')
with open(filename, 'rb') as f:
self.__dict__.update(load(f))
def getNextColor(self):
return self.__nextColor
def skip(self):
self.__nextColor = - self.__nextColor
def getSize(self):
return self.__size
def setBoardList(self, boardList):
if not self.isValidBoardList(boardList):
raise Exception('GoBoard: setBoardList: error: invalid boardList')
self.__boardList = deepcopy(boardList)
def getBoardList(self):
return deepcopy(self.__boardList)
def setSpot(self, x, y, value):
if not isinstance(x, int) or not 0 <= x < self.__size:
raise Exception('GoBoard: setSpot: error: invalid x coordinate')
if not isinstance(y, int) or not 0 <= y < self.__size:
raise Exception('GoBoard: setSpot: error: invalid y coordinate')
if not isinstance(value, int) or not GoBoard.white <= value <= GoBoard.black:
raise Exception('GoBoard: setSpot: error: invalid value')
self.__boardList[x][y] = value
def getSpot(self, x, y):
if not isinstance(x, int) or not 0 <= x < self.__size:
raise Exception('GoBoard: getSpot: error: invalid x coordinate')
if not isinstance(y, int) or not 0 <= y < self.__size:
raise Exception('GoBoard: getSpot: error: invalid y coordinate')
return self.__boardList[x][y]
def printBoard(self):
print(GoBoard.colorDic[GoBoard.space] + '+' + '-' * (self.__size * 2 + 1) + '+')
for i in range(self.__size):
print(GoBoard.colorDic[GoBoard.space] + '|', end = ' ')
for j in range(self.__size):
if self.__lastMove == (i, j):
print(GoBoard.colorDic['last'] + GoBoard.printDic[self.__boardList[i][j]], end = ' ')
else:
print(GoBoard.colorDic[self.__boardList[i][j]] + GoBoard.printDic[self.__boardList[i][j]], end = ' ')
print(GoBoard.colorDic[GoBoard.space] + '|')
print(GoBoard.colorDic[GoBoard.space] + '+' + '-' * (self.__size * 2 + 1) + '+' + Style.RESET_ALL)
def hash(self):
s = ''
for row in self.__boardList:
for spot in row:
s += str(spot + 1)
return int(s, 3)
def setBoardListFromHash(self, h):
if not isinstance(h, int):
raise Exception('GoBoard: setBoardListFromHash: error: invalid hash')
s = ''
while h > 0:
s = str(h % 3) + s
h /= 3
if len(s) < self.__size ** 2:
s = '0' * (self.__size ** 2 - len(s)) + s
elif len(s) > self.__size ** 2:
raise Exception('GoBoard: setBoardListFromHash: error: invalid hash')
for i in range(self.__size):
for j in range(self.__size):
self.__boardList[i][j] = int(s[i * self.__size + j]) - 1
def bfsFloodFill(self, x, y):
if not isinstance(x, int) or not 0 <= x < self.__size:
raise Exception('GoBoard: bfsFloodFill: error: invalid x coordinate')
if not isinstance(y, int) or not 0 <= y < self.__size:
raise Exception('GoBoard: bfsFloodFill: error: invalid y coordinate')
color = self.__boardList[x][y]
if color == GoBoard.space:
return ([], [])
stonespot = []
libertyspot = []
vis = self.getEmptyBoardList()
que = Queue()
que.put((x, y))
while not que.empty():
cur = que.get()
if not 0 <= cur[0] < self.__size or not 0 <= cur[1] < self.__size or self.__boardList[cur[0]][cur[1]] == - color or vis[cur[0]][cur[1]] == 1:
continue
vis[cur[0]][cur[1]] = 1
if self.__boardList[cur[0]][cur[1]] == GoBoard.space:
libertyspot.append((cur[0], cur[1]))
else:
stonespot.append((cur[0], cur[1]))
for d in GoBoard.dxdy:
que.put((cur[0] + d[0], cur[1] + d[1]))
return (stonespot, libertyspot)
def countLiberty(self):
ret = [[-1] * self.__size for _ in range(self.__size)]
for i in range(self.__size):
for j in range(self.__size):
if ret[i][j] == -1 and self.__boardList[i][j] != GoBoard.space:
bfs = self.bfsFloodFill(i, j)
liberty = len(bfs[1])
for spot in bfs[0]:
ret[spot[0]][spot[1]] = liberty
elif self.__boardList[i][j] == GoBoard.space:
ret[i][j] = 0
return ret
def captureSpot(self, exception = None):
ret = []
mat = self.getEmptyBoardList()
liberty = self.countLiberty()
for i in range(self.__size):
for j in range(self.__size):
if liberty[i][j] == 0 and self.__boardList[i][j] != GoBoard.space:
mat[i][j] = 1
if isinstance(exception, tuple) and len(exception) == 2:
god = self.bfsFloodFill(exception[0], exception[1])
for spot in god[0]:
mat[spot[0]][spot[1]] = 0
elif exception != None:
raise Exception('GoBoard: captureSpot: error: invalid exception')
for i in range(self.__size):
for j in range(self.__size):
if mat[i][j] == 1:
ret.append((i, j))
return ret
def capture(self, exception = None):
spots = self.captureSpot(exception)
for spot in spots:
self.__boardList[spot[0]][spot[1]] = GoBoard.space
def isValidMove(self, x, y, color):
if not isinstance(x, int) or not 0 <= x < self.__size or not isinstance(y, int) or not 0 <= y < self.__size or not isinstance(color, int) or color != GoBoard.white and color != GoBoard.black or self.__boardList[x][y] != GoBoard.space:
return False
for k in GoBoard.dxdy:
i = x + k[0]
j = y + k[1]
if 0 <= i < self.__size and 0 <= j < self.__size and self.__boardList[i][j] == GoBoard.space:
return True
tempBoard = GoBoard(self.__size)
tempBoard.setBoardList(self.__boardList)
tempBoard.setSpot(x, y, color)
tempBoard.capture((x, y))
if len(tempBoard.bfsFloodFill(x, y)[1]) == 0:
return False
if self.__twoHistory[0] == tempBoard.hash():
return False
return True
def move(self, x, y, color):
if not isinstance(x, int) or not 0 <= x < self.__size:
raise Exception('GoBoard: move: error: invalid x coordinate')
if not isinstance(y, int) or not 0 <= y < self.__size:
raise Exception('GoBoard: move: error: invalid y coordinate')
if not isinstance(color, int) or color != GoBoard.white and color != GoBoard.black:
raise Exception('GoBoard: move: error: invalid color')
if self.__boardList[x][y] != GoBoard.space:
raise Exception('GoBoard: move: error: occupied spot')
for k in GoBoard.dxdy:
i = x + k[0]
j = y + k[1]
if 0 <= i < self.__size and 0 <= j < self.__size and self.__boardList[i][j] == GoBoard.space:
self.__boardList[x][y] = color
self.capture()
self.__twoHistory[0], self.__twoHistory[1] = self.__twoHistory[1], self.hash()
self.__nextColor = - color
self.__lastMove = (x, y)
return
tempBoard = GoBoard(self.__size)
tempBoard.setBoardList(self.__boardList)
tempBoard.setSpot(x, y, color)
tempBoard.capture((x, y))
if len(tempBoard.bfsFloodFill(x, y)[1]) == 0:
raise Exception('GoBoard: move: error: invalid move')
if self.__twoHistory[0] == tempBoard.hash():
raise Exception('GoBoard: move: error: reappeared state')
self.__boardList = tempBoard.getBoardList()
self.__twoHistory[0], self.__twoHistory[1] = self.__twoHistory[1], self.hash()
self.__nextColor = - color
self.__lastMove = (x, y)
def isValidBoardList(self, boardList):
if not isinstance(boardList, list) or len(boardList) != self.__size:
return False
for row in boardList:
if not isinstance(row, list) or len(row) != self.__size:
return False
for spot in row:
if not isinstance(spot, int) or not GoBoard.white <= spot <= GoBoard.black:
return False
return True
def getEmptyBoardList(self):
return [[GoBoard.space] * self.__size for _ in range(self.__size)]
def featureColor(self, color):
if not isinstance(color, int) or not GoBoard.white <= color <= GoBoard.black:
raise Exception('GoBoard: featureColor: error: invalid color')
ret = self.getEmptyBoardList()
for i in range(self.__size):
for j in range(self.__size):
if self.__boardList[i][j] == color:
ret[i][j] = 1
return ret
def featureCurrent(self):
return self.featureColor(self.__nextColor)
def featureOpponent(self):
return self.featureColor(- self.__nextColor)
def featureEmpty(self):
return self.featureColor(GoBoard.space)
def featureAllZeros(self):
return self.getEmptyBoardList()
def featureAllOnes(self):
return [[1] * self.__size for _ in range(self.__size)]
def featureFourLiberty(self):
ret = [self.getEmptyBoardList() for _ in range(8)]
liberty = self.countLiberty()
for i in range(self.__size):
for j in range(self.__size):
if self.__boardList[i][j] == self.__nextColor:
if liberty[i][j] == 1:
ret[0][i][j] = 1
elif liberty[i][j] == 2:
ret[1][i][j] = 1
elif liberty[i][j] == 3:
ret[2][i][j] = 1
elif liberty[i][j] >= 4:
ret[3][i][j] = 1
elif self.__boardList[i][j] == - self.__nextColor:
if liberty[i][j] == 1:
ret[4][i][j] = 1
elif liberty[i][j] == 2:
ret[5][i][j] = 1
elif liberty[i][j] == 3:
ret[6][i][j] = 1
elif liberty[i][j] >= 4:
ret[7][i][j] = 1
return ret
def featureIllegal(self):
ret = self.getEmptyBoardList()
for i in range(self.__size):
for j in range(self.__size):
if not self.isValidMove(i, j, self.__nextColor):
ret[i][j] = 1
return ret
def featureFourCapture(self):
ret = [self.getEmptyBoardList() for _ in range(8)]
vis = self.getEmptyBoardList()
for i in range(self.__size):
for j in range(self.__size):
if vis[i][j] == 0 and self.__boardList[i][j] != GoBoard.space:
bfs = self.bfsFloodFill(i, j)
for spot in bfs[0]:
vis[spot[0]][spot[1]] = 1
if len(bfs[1]) == 1:
x = bfs[1][0][0]
y = bfs[1][0][1]
self.__boardList[x][y] = - self.__boardList[i][j]
count = len(self.captureSpot((x, y)))
self.__boardList[x][y] = GoBoard.space
if self.__boardList[i][j] == - self.__nextColor:
if not self.isValidMove(x, y, self.__nextColor):
continue
if count == 1:
ret[0][x][y] = 1
elif count == 2:
ret[1][x][y] = 1
elif count == 3:
ret[2][x][y] = 1
elif count >= 4:
ret[3][x][y] = 1
else:
if count == 1:
ret[4][x][y] = 1
elif count == 2:
ret[5][x][y] = 1
elif count == 3:
ret[6][x][y] = 1
elif count >= 4:
ret[7][x][y] = 1
return ret
def allFeatures(self):
ret = [[[0] * GoBoard.featureCount for _ in range(self.__size)] for _ in range(self.__size)]
tmp = []
tmp.append(self.featureCurrent())
tmp.append(self.featureOpponent())
tmp.append(self.featureEmpty())
tmp.append(self.featureAllZeros())
tmp.append(self.featureAllOnes())
tmp += self.featureFourLiberty()
tmp.append(self.featureIllegal())
tmp += self.featureFourCapture()
for i in range(self.__size):
for j in range(self.__size):
for k in range(GoBoard.featureCount):
ret[i][j][k] = tmp[k][i][j]
return ret
def rPrint(arg):
if isinstance(arg, list):
for item in arg:
rPrint(item)
print()
else:
print(arg, end = ' ')
def test():
board = GoBoard(int(input('Board size: ')))
while True:
color = board.getNextColor()
board.printBoard()
if color == GoBoard.black:
print('Black\'s turn')
else:
print('White\'s turn')
x = input('x: ')
y = input('y: ')
if x == '' and y == '':
board.skip()
else:
board.move(int(x), int(y), color)
board.printBoard()
while True:
feature = input('Feature: ')
if feature == '':
break
if hasattr(board, 'feature' + feature):
rPrint(getattr(board, 'feature' + feature)())
else:
print('Feature not found!')
if __name__ == '__main__':
test()
|
python
|
from flask_login.utils import confirm_login
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, EqualTo
from wtforms import ValidationError
from myproject.models import User
class loginForm(FlaskForm):
email = StringField('Enter Email', validators=[DataRequired()])
password = PasswordField('Enter Password', validators=[DataRequired()])
submit = SubmitField('Login')
class RegForm(FlaskForm):
email = StringField('Enter Email', validators=[DataRequired(), Email()])
username = StringField('Enter Username', validators=[DataRequired()])
password = PasswordField('Enter Password', validators=[DataRequired(), EqualTo('confirm_password', message='Passwords Must Match')])
confirm_password = PasswordField('Enter Password Again', validators=[DataRequired()])
submit = SubmitField('Sign-Up')
def check_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email Already Exists')
def check_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username Taken')
|
python
|
import unittest
from function.piecewise_linear_function import PiecewiseLinearFunction
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_plf(self):
array = [(5, 1), (7, 3), (10, 6), (12, 8)]
f = PiecewiseLinearFunction(array)
assert f.eval(0) == 1
assert f.eval(2) == 1
assert f.eval(5) == 1
assert f.eval(6) == 2
assert f.eval(7) == 3
assert f.eval(8) == 4
assert f.eval(9) == 5
assert f.eval(10) == 6
assert f.eval(11) == 7
assert f.eval(11.5) == 7.5
assert f.eval(12) == 8
assert f.eval(13) == 8
def test_one_tp(self):
array = [(5, 1)]
f = PiecewiseLinearFunction(array)
assert f.eval(0) == 1
assert f.eval(2) == 1
assert f.eval(5) == 1
assert f.eval(6) == 1
assert f.eval(7) == 1
def test_add_tp(self):
array = [(5, 1), (7, 3), (10, 6), (12, 8)]
f = PiecewiseLinearFunction(array)
f.add((15, 5))
assert f.eval(0) == 1
assert f.eval(2) == 1
assert f.eval(5) == 1
assert f.eval(6) == 2
assert f.eval(7) == 3
assert f.eval(8) == 4
assert f.eval(9) == 5
assert f.eval(10) == 6
assert f.eval(11) == 7
assert f.eval(11.5) == 7.5
assert f.eval(12) == 8
assert f.eval(13) == 7
assert f.eval(14) == 6
assert f.eval(15) == 5
f.add_and_clear_forward((9, 5))
assert f.eval(0) == 1
assert f.eval(2) == 1
assert f.eval(5) == 1
assert f.eval(6) == 2
assert f.eval(7) == 3
assert f.eval(8) == 4
assert f.eval(9) == 5
assert f.eval(10) == 5
assert f.eval(11) == 5
assert f.eval(11.5) == 5
assert f.eval(12) == 5
assert f.eval(13) == 5
assert f.eval(14) == 5
assert f.eval(15) == 5
if __name__ == "__main__":
unittest.main()
|
python
|
from collections.abc import Iterable
from collections.abc import Mapping
from xml.dom import minidom
from xml.dom.minidom import Element
import pandas as pd
from trainerroad.Utils.Str import *
class Workout:
def __init__(self):
pass
def add_workout_to_document(self, workouts: Iterable, document: minidom.Document, section, parent_section):
"""
:param workouts:
:param document:
:param section:
:param parent_section:
:return:
"""
workouts_ = workouts[1:]
workouts_shifted = pd.Series(workouts_).shift(1).fillna(-1).tolist()
for index, (current_interval, previous_interval) in enumerate(zip(workouts_, workouts_shifted)):
cooldown = index == len(workouts_) - 1
warmup = index == 0
self.build_workout(document=document, section=section, interval=current_interval,
previous_interval=previous_interval, warmup=warmup,
cooldown=cooldown)
parent_section.appendChild(section)
def build_workout(self, document, section, interval: dict, previous_interval: dict, cooldown=False, warmup=False):
"""
:param previous_interval:
:param document:
:param section:
:param interval:
:param cooldown:
:param warmup:
:return:
"""
end = int(interval.get("End"))
start = int(interval.get("Start"))
power = str(float(interval.get("StartTargetPowerPercent")) / 100)
duration = str(end - start)
is_current_fake = bool(interval.get("IsFake"))
# is_previous_fake = None
previous_power = None
if previous_interval != -1:
# is_previous_fake = bool(previous_interval.get("IsFake"))
previous_power = str(float(previous_interval.get("StartTargetPowerPercent")) / 100)
if cooldown is False and warmup is False:
steady_interval = document.createElement(STEADY_STATE)
steady_interval.setAttribute(DURATION, duration)
steady_interval.setAttribute(POWER, power)
new_interval = steady_interval
# print(f"Power: {power}, Start: {start}, End: {end}, Duration {duration}")
elif cooldown and warmup is False:
cooldown_interval = document.createElement(RAMP) if is_current_fake else document.createElement(
STEADY_STATE)
cooldown_interval.setAttribute(DURATION, duration)
# cooldown_interval.setAttribute(POWER_HIGH, power)
# print(f"is_current_fake: {is_current_fake}")
cooldown_interval.setAttribute(POWER_LOW, power)
if is_current_fake:
cooldown_interval.setAttribute(POWER_HIGH, str(round(float(power) - 0.1, 3)))
else:
cooldown_interval.setAttribute(POWER_HIGH, power)
# print(
# f"Cooldown: Previous Power {previous_power}, Power: {power}, Start: {start}, End: {end}, Duration {duration}")
new_interval = cooldown_interval
elif cooldown is False and warmup:
warmup_interval = document.createElement(WARMUP)
warmup_interval.setAttribute(DURATION, duration)
warmup_interval.setAttribute(POWER_HIGH, power)
warmup_interval.setAttribute(POWER_LOW, power)
new_interval = warmup_interval
# print(f"Warmup Power: {power}, Start: {start}, End: {end}, Duration {duration}")
else:
steady_interval = document.createElement(STEADY_STATE)
steady_interval.setAttribute(DURATION, duration)
steady_interval.setAttribute(POWER, power)
new_interval = steady_interval
# print(f"Power: {power}, Start: {start}, End: {end}, Duration {duration}")
section.appendChild(new_interval)
return section
def add_workout_details(self, details, section: Element, document: minidom.Document):
"""
:param details:
:param section:
:param document:
:return:
"""
workout_name = details.get(WORKOUT_NAME)
description = details.get(WORKOUT_DESC)
author_section = document.createElement(AUTHOR)
author_section.appendChild(document.createTextNode(TRAINER_ROAD))
description_section = document.createElement(DESCRIPTION)
description_section.appendChild(document.createTextNode(description))
name_section = document.createElement(NAME)
name_section.appendChild(document.createTextNode(workout_name))
section.appendChild(author_section)
section.appendChild(description_section)
section.appendChild(name_section)
def convert_workout(self, interval: Iterable, workout_details: Mapping) -> minidom.Document:
"""
:param interval:
:param workout_details:
:return:
"""
document = minidom.Document()
workout_file = document.createElement(WORKOUT_FILE)
workout_section = document.createElement(WORKOUT_STR)
self.add_workout_details(workout_details, document=document, section=workout_file)
self.add_workout_to_document(interval, document=document, section=workout_section, parent_section=workout_file)
document.appendChild(workout_file)
return document
|
python
|
import torch
import torch.nn as nn
from torch.quantization.observer import MinMaxObserver, PerChannelMinMaxObserver
import warnings
class _InputEqualizationObserver(nn.Module):
r"""Observer for tracking the running min/max values of input columns, and
computing the quantization parameters for the overall min/max input values.
Args:
dtype: Quantized data type
qscheme: Quantization scheme
quant_min: Minimum quantization value. If unspecified, it will
follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will
follow the 8-bit setup.
output_obs: For the user to specify what kind of output observer they
would like to use
The running minimum/maximum :math:`x_\text{min/max}` are computed in the
same way as :class:`~torch.quantization.observer.PerChannelMinMaxObserver`,
with the difference that the running min/max values are stored per column.
The qparams are calculated by multiplying the min/max input column values
with the equalization scale, reducing to find the global min/max input
values, and then calculating in the same way as in
:class:`~torch.quantization.observer.MinMaxObserver`
.. note:: If the running minimum equals to the running maximum, the scales
and zero_points are set to 1.0 and 0.
"""
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
quant_min=None, quant_max=None, output_obs=None,
factory_kwargs=None) -> None:
super(_InputEqualizationObserver, self).__init__()
if qscheme not in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
raise TypeError("Input qscheme must be per-tensor")
self.input_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype,
qscheme=qscheme,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs)
if output_obs is None:
self.output_obs = MinMaxObserver(dtype=dtype,
qscheme=qscheme,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs)
else:
self.output_obs = output_obs
self.equalization_scale = torch.empty(0)
def forward(self, x_orig):
# TODO: Allow for convoluational layers
if not (x_orig.ndim == 2):
raise ValueError("InputEqualizationObserver only supports Linear layers")
return self.input_obs(x_orig)
def get_input_minmax(self):
return (self.input_obs.min_vals, self.input_obs.max_vals)
def set_equalization_scale(self, equalization_scale):
self.equalization_scale = equalization_scale
def calculate_qparams(self):
r"""
Returns the scale/zero_point for the input and weight rows
"""
if self.equalization_scale.nelement() == 0:
warnings.warn(
"Must call calculate_scale before calling calculate_qparams.\
Returning default scale and zero point. "
)
return torch.tensor([1.0]), torch.tensor([0]), torch.tensor([1.0]), torch.tensor([0])
# Calculate qparams for the scaled min/max inputs
# Scale the input by the equalization scale located at the same column
# index
(min_inputs, max_inputs) = self.get_input_minmax()
min_input_scaled = torch.min(torch.mul(min_inputs, self.equalization_scale))
max_input_scaled = torch.max(torch.mul(max_inputs, self.equalization_scale))
(scale_input, zero_point_input) = self.input_obs._calculate_qparams(min_input_scaled, max_input_scaled)
return scale_input, zero_point_input
class _WeightEqualizationObserver(nn.Module):
r"""Observer for tracking the running min/max values of weight columns and
rows, and computing the quantization parameters for the weight rows.
Args:
dtype: Quantized data type
qscheme: Quantization scheme
quant_min: Minimum quantization value. If unspecified, it will
follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will
follow the 8-bit setup.
This observer is made up of 2 PerChannelMinMaxObservers
- weight_col_obs: Used to record the running minimum and maximum of
columns of incoming weight tensors
- weight_row_obs: Used to record the running minimum and maximum of
rows of incoming weight tensors
The running minimum/maximum :math:`w_\text{min/max}` are computed in the
same way as :class:`~torch.quantization.observer.PerChannelMinMaxObserver`.
The qparams are calculated by multiplying the min/max weight row values
with the inverse of the equalization scale, and then calculating in the same
way as in :class:`~torch.quantization.observer.PerChannelMinMaxObserver`
.. note:: If the running minimum equals to the running maximum, the scales
and zero_points are set to 1.0 and 0.
"""
def __init__(self, dtype=torch.qint8, qscheme=torch.per_tensor_affine, quant_min=None,
quant_max=None, factory_kwargs=None) -> None:
super(_WeightEqualizationObserver, self).__init__()
self.weight_col_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype,
qscheme=qscheme,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs)
self.weight_row_obs = PerChannelMinMaxObserver(ch_axis=0, dtype=dtype,
qscheme=qscheme,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs)
self.equalization_scale = torch.empty(0)
def forward(self, w_orig):
# TODO: Allow for convoluational layers
if not (w_orig.ndim == 2):
raise ValueError("WeightEqualizationObserver only supports Linear layers")
return self._forward(w_orig)
def _forward(self, w_orig):
r"""
Calculates the min/max values of each weight column and weight row.
"""
w_orig = self.weight_col_obs(w_orig)
w_orig = self.weight_row_obs(w_orig)
# Calculate the column indices of the min/max weight in each row
num_row, _ = w_orig.shape
min_weights_ind = []
max_weights_ind = []
for i in range(num_row):
min_weights_ind.append(torch.nonzero(w_orig[i] == self.weight_row_obs.min_vals[i])[0][0])
max_weights_ind.append(torch.nonzero(w_orig[i] == self.weight_row_obs.max_vals[i])[0][0])
self.min_weights_ind = torch.tensor(min_weights_ind)
self.max_weights_ind = torch.tensor(max_weights_ind)
return w_orig
def get_weight_col_minmax(self):
return (self.weight_col_obs.min_vals, self.weight_col_obs.max_vals)
def get_weight_row_minmax(self):
return (self.weight_row_obs.min_vals, self.weight_row_obs.max_vals)
def set_equalization_scale(self, equalization_scale):
self.equalization_scale = equalization_scale
def calculate_qparams(self):
r"""
Returns the scale/zero_point for the input and weight rows
"""
if self.equalization_scale.nelement() == 0:
warnings.warn(
"Must call calculate_scale before calling calculate_qparams.\
Returning default scale and zero point. "
)
return torch.tensor([1.0]), torch.tensor([0]), torch.tensor([1.0]), torch.tensor([0])
if self.min_weights_ind is None or self.max_weights_ind is None:
warnings.warn(
"Must find the column indicies of the minimum of each row in the \
weights in order to calculate the qparams calculate the \
qparams. Returning default scale and zero point. "
)
return torch.tensor([1.0]), torch.tensor([0]), torch.tensor([1.0]), torch.tensor([0])
# Calculate the qparams for weights by using the rows
# Scale the weight rows by the reciprocal of the equalization scale
# located at the same column index
(min_weights, max_weights) = self.get_weight_row_minmax()
min_weights_scaled = torch.mul(min_weights, torch.reciprocal(self.equalization_scale[self.min_weights_ind]))
max_weights_scaled = torch.mul(max_weights, torch.reciprocal(self.equalization_scale[self.max_weights_ind]))
(scale_weight, zero_point_weight) = self.weight_row_obs._calculate_qparams(min_weights_scaled, max_weights_scaled)
return scale_weight, zero_point_weight
def calculate_equalization_scale(input_obs: _InputEqualizationObserver,
weight_obs: _WeightEqualizationObserver) -> torch.Tensor:
r""" Calculates the equalization scale and sets the equalization_scale value
in the observers.
Args:
input_obs: Observer that tracks the ranges for the input columns
weight_obs: Observer that tracks the ranges for the weight columns
"""
(min_inputs, max_inputs) = input_obs.get_input_minmax()
(min_weights, max_weights) = weight_obs.get_weight_col_minmax()
if not (min_inputs.shape == min_weights.shape):
raise ValueError(
"Input and Weight must have the same column dimension. " +
f"Found {min_inputs.shape} and {max_inputs.shape} instead."
)
equalization_scale = torch.sqrt((max_weights - min_weights) / (max_inputs - min_inputs))
return equalization_scale
|
python
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
from torch.distributions import constraints
from torch.distributions.utils import lazy_property
from pyro.distributions.torch import Chi2
from pyro.distributions.torch_distribution import TorchDistribution
from pyro.distributions.util import broadcast_shape
class MultivariateStudentT(TorchDistribution):
"""
Creates a multivariate Student's t-distribution parameterized by degree of
freedom :attr:`df`, mean :attr:`loc` and scale :attr:`scale_tril`.
:param ~torch.Tensor df: degrees of freedom
:param ~torch.Tensor loc: mean of the distribution
:param ~torch.Tensor scale_tril: scale of the distribution, which is
a lower triangular matrix with positive diagonal entries
"""
arg_constraints = {
"df": constraints.positive,
"loc": constraints.real_vector,
"scale_tril": constraints.lower_cholesky,
}
support = constraints.real_vector
has_rsample = True
def __init__(self, df, loc, scale_tril, validate_args=None):
dim = loc.size(-1)
assert scale_tril.shape[-2:] == (dim, dim)
if not isinstance(df, torch.Tensor):
df = loc.new_tensor(df)
batch_shape = broadcast_shape(df.shape, loc.shape[:-1], scale_tril.shape[:-2])
event_shape = torch.Size((dim,))
self.df = df.expand(batch_shape)
self.loc = loc.expand(batch_shape + event_shape)
self._unbroadcasted_scale_tril = scale_tril
self._chi2 = Chi2(self.df)
super().__init__(batch_shape, event_shape, validate_args=validate_args)
@lazy_property
def scale_tril(self):
return self._unbroadcasted_scale_tril.expand(
self._batch_shape + self._event_shape + self._event_shape
)
@lazy_property
def covariance_matrix(self):
# NB: this is not covariance of this distribution;
# the actual covariance is df / (df - 2) * covariance_matrix
return torch.matmul(
self._unbroadcasted_scale_tril,
self._unbroadcasted_scale_tril.transpose(-1, -2),
).expand(self._batch_shape + self._event_shape + self._event_shape)
@lazy_property
def precision_matrix(self):
identity = torch.eye(
self.loc.size(-1), device=self.loc.device, dtype=self.loc.dtype
)
return torch.cholesky_solve(identity, self._unbroadcasted_scale_tril).expand(
self._batch_shape + self._event_shape + self._event_shape
)
@staticmethod
def infer_shapes(df, loc, scale_tril):
event_shape = loc[-1:]
batch_shape = broadcast_shape(df, loc[:-1], scale_tril[:-2])
return batch_shape, event_shape
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(MultivariateStudentT, _instance)
batch_shape = torch.Size(batch_shape)
loc_shape = batch_shape + self.event_shape
scale_shape = loc_shape + self.event_shape
new.df = self.df.expand(batch_shape)
new.loc = self.loc.expand(loc_shape)
new._unbroadcasted_scale_tril = self._unbroadcasted_scale_tril
if "scale_tril" in self.__dict__:
new.scale_tril = self.scale_tril.expand(scale_shape)
if "covariance_matrix" in self.__dict__:
new.covariance_matrix = self.covariance_matrix.expand(scale_shape)
if "precision_matrix" in self.__dict__:
new.precision_matrix = self.precision_matrix.expand(scale_shape)
new._chi2 = self._chi2.expand(batch_shape)
super(MultivariateStudentT, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
X = torch.empty(shape, dtype=self.df.dtype, device=self.df.device).normal_()
Z = self._chi2.rsample(sample_shape)
Y = X * torch.rsqrt(Z / self.df).unsqueeze(-1)
return self.loc + self.scale_tril.matmul(Y.unsqueeze(-1)).squeeze(-1)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
n = self.loc.size(-1)
y = torch.linalg.solve_triangular(
self.scale_tril, (value - self.loc).unsqueeze(-1), upper=False
).squeeze(-1)
Z = (
self.scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
+ 0.5 * n * self.df.log()
+ 0.5 * n * math.log(math.pi)
+ torch.lgamma(0.5 * self.df)
- torch.lgamma(0.5 * (self.df + n))
)
return -0.5 * (self.df + n) * torch.log1p(y.pow(2).sum(-1) / self.df) - Z
@property
def mean(self):
m = self.loc.clone()
m[self.df <= 1, :] = float("nan")
return m
@property
def variance(self):
m = self.scale_tril.pow(2).sum(-1) * (self.df / (self.df - 2)).unsqueeze(-1)
m[(self.df <= 2) & (self.df > 1), :] = float("inf")
m[self.df <= 1, :] = float("nan")
return m
|
python
|
"""
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from ._univariate_selection import chi2
from ._univariate_selection import f_classif
from ._univariate_selection import f_oneway
from ._univariate_selection import f_regression
from ._univariate_selection import SelectPercentile
from ._univariate_selection import SelectKBest
from ._univariate_selection import SelectFpr
from ._univariate_selection import SelectFdr
from ._univariate_selection import SelectFwe
from ._univariate_selection import GenericUnivariateSelect
from ._variance_threshold import VarianceThreshold
from ._rfe import RFE
from ._rfe import RFECV
from ._from_model import SelectFromModel
from ._mutual_info import mutual_info_regression, mutual_info_classif
from ._base import SelectorMixin
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectFromModel',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'mutual_info_classif',
'mutual_info_regression',
'SelectorMixin']
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-28 19:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0026_rename_preprintservice_subjects'),
]
operations = [
migrations.AlterField(
model_name='subject',
name='text',
field=models.CharField(max_length=256),
),
migrations.AlterUniqueTogether(
name='subject',
unique_together=set([('text', 'provider')]),
),
]
|
python
|
import requests
from crawl_service.util.config import CONFIG
def new_session() -> requests.Session:
session = requests.Session()
session.proxies = CONFIG.get('proxies', dict())
return session
|
python
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_autoscaling_auto_scaling_configuration_actions
short_description: Perform actions on an AutoScalingConfiguration resource in Oracle Cloud Infrastructure
description:
- Perform actions on an AutoScalingConfiguration resource in Oracle Cloud Infrastructure
- For I(action=change_compartment), moves an autoscaling configuration into a different compartment within the same tenancy. For information
about moving resources between compartments, see
L(Moving Resources to a Different Compartment,https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes).
When you move an autoscaling configuration to a different compartment, associated resources such as instance
pools are not moved.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
auto_scaling_configuration_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the autoscaling configuration.
type: str
aliases: ["id"]
required: true
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to move the autoscaling configuration
to.
type: str
required: true
action:
description:
- The action to perform on the AutoScalingConfiguration.
type: str
required: true
choices:
- "change_compartment"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action change_compartment on auto_scaling_configuration
oci_autoscaling_auto_scaling_configuration_actions:
# required
auto_scaling_configuration_id: "ocid1.autoscalingconfiguration.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
action: change_compartment
"""
RETURN = """
auto_scaling_configuration:
description:
- Details of the AutoScalingConfiguration resource acted upon by the current operation
returned: on success
type: complex
contains:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment containing the autoscaling
configuration.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the autoscaling configuration.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
cool_down_in_seconds:
description:
- For threshold-based autoscaling policies, this value is the minimum period of time to wait between scaling actions.
The cooldown period gives the system time to stabilize before rescaling. The minimum value is 300 seconds, which
is also the default. The cooldown period starts when the instance pool reaches the running state.
- For schedule-based autoscaling policies, this value is not used.
returned: on success
type: int
sample: 56
is_enabled:
description:
- Whether the autoscaling configuration is enabled.
returned: on success
type: bool
sample: true
resource:
description:
- ""
returned: on success
type: complex
contains:
type:
description:
- The type of resource.
returned: on success
type: str
sample: instancePool
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the resource that is managed by the autoscaling
configuration.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
policies:
description:
- Autoscaling policy definitions for the autoscaling configuration. An autoscaling policy defines the criteria that
trigger autoscaling actions and the actions to take.
returned: on success
type: complex
contains:
capacity:
description:
- The capacity requirements of the autoscaling policy.
returned: on success
type: complex
contains:
max:
description:
- For a threshold-based autoscaling policy, this value is the maximum number of instances the instance pool is allowed
to increase to (scale out).
- For a schedule-based autoscaling policy, this value is not used.
returned: on success
type: int
sample: 56
min:
description:
- For a threshold-based autoscaling policy, this value is the minimum number of instances the instance pool is allowed
to decrease to (scale in).
- For a schedule-based autoscaling policy, this value is not used.
returned: on success
type: int
sample: 56
initial:
description:
- For a threshold-based autoscaling policy, this value is the initial number of instances to launch in the instance pool
immediately after autoscaling is enabled. After autoscaling retrieves performance metrics, the number of
instances is automatically adjusted from this initial number to a number that is based on the limits that
you set.
- For a schedule-based autoscaling policy, this value is the target pool size to scale to when executing the schedule
that's defined in the autoscaling policy.
returned: on success
type: int
sample: 56
id:
description:
- The ID of the autoscaling policy that is assigned after creation.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
policy_type:
description:
- The type of autoscaling policy.
returned: on success
type: str
sample: scheduled
time_created:
description:
- The date and time the autoscaling configuration was created, in the format defined by RFC3339.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
is_enabled:
description:
- Whether the autoscaling policy is enabled.
returned: on success
type: bool
sample: true
execution_schedule:
description:
- The schedule for executing the autoscaling policy.
returned: on success
type: complex
contains:
type:
description:
- The type of execution schedule.
returned: on success
type: str
sample: cron
timezone:
description:
- The time zone for the execution schedule.
returned: on success
type: str
sample: UTC
expression:
description:
- A cron expression that represents the time at which to execute the autoscaling policy.
- "Cron expressions have this format: `<second> <minute> <hour> <day of month> <month> <day of week> <year>`"
- You can use special characters that are supported with the Quartz cron implementation.
- You must specify `0` as the value for seconds.
- "Example: `0 15 10 ? * *`"
returned: on success
type: str
sample: expression_example
resource_action:
description:
- ""
returned: on success
type: complex
contains:
action_type:
description:
- The type of resource action.
returned: on success
type: str
sample: power
action:
description:
- ""
returned: on success
type: str
sample: STOP
rules:
description:
- ""
returned: on success
type: complex
contains:
action:
description:
- ""
returned: on success
type: complex
contains:
type:
description:
- The type of action to take.
returned: on success
type: str
sample: CHANGE_COUNT_BY
value:
description:
- To scale out (increase the number of instances), provide a positive value. To scale in (decrease the number of
instances), provide a negative value.
returned: on success
type: int
sample: 56
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
id:
description:
- ID of the condition that is assigned after creation.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
metric:
description:
- ""
returned: on success
type: complex
contains:
metric_type:
description:
- ""
returned: on success
type: str
sample: CPU_UTILIZATION
threshold:
description:
- ""
returned: on success
type: complex
contains:
operator:
description:
- The comparison operator to use. Options are greater than (`GT`), greater than or equal to
(`GTE`), less than (`LT`), and less than or equal to (`LTE`).
returned: on success
type: str
sample: GT
value:
description:
- ""
returned: on success
type: int
sample: 56
time_created:
description:
- The date and time the autoscaling configuration was created, in the format defined by RFC3339.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
max_resource_count:
description:
- The maximum number of resources to scale out to.
returned: on success
type: int
sample: 56
min_resource_count:
description:
- The minimum number of resources to scale in to.
returned: on success
type: int
sample: 56
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"display_name": "display_name_example",
"freeform_tags": {'Department': 'Finance'},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"cool_down_in_seconds": 56,
"is_enabled": true,
"resource": {
"type": "instancePool",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
},
"policies": [{
"capacity": {
"max": 56,
"min": 56,
"initial": 56
},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"policy_type": "scheduled",
"time_created": "2013-10-20T19:20:30+01:00",
"is_enabled": true,
"execution_schedule": {
"type": "cron",
"timezone": "UTC",
"expression": "expression_example"
},
"resource_action": {
"action_type": "power",
"action": "STOP"
},
"rules": [{
"action": {
"type": "CHANGE_COUNT_BY",
"value": 56
},
"display_name": "display_name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"metric": {
"metric_type": "CPU_UTILIZATION",
"threshold": {
"operator": "GT",
"value": 56
}
}
}]
}],
"time_created": "2013-10-20T19:20:30+01:00",
"max_resource_count": 56,
"min_resource_count": 56
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.autoscaling import AutoScalingClient
from oci.autoscaling.models import ChangeAutoScalingCompartmentDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AutoScalingConfigurationActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
change_compartment
"""
@staticmethod
def get_module_resource_id_param():
return "auto_scaling_configuration_id"
def get_module_resource_id(self):
return self.module.params.get("auto_scaling_configuration_id")
def get_get_fn(self):
return self.client.get_auto_scaling_configuration
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_auto_scaling_configuration,
auto_scaling_configuration_id=self.module.params.get(
"auto_scaling_configuration_id"
),
)
def change_compartment(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ChangeAutoScalingCompartmentDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.change_auto_scaling_configuration_compartment,
call_fn_args=(),
call_fn_kwargs=dict(
auto_scaling_configuration_id=self.module.params.get(
"auto_scaling_configuration_id"
),
change_compartment_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
AutoScalingConfigurationActionsHelperCustom = get_custom_class(
"AutoScalingConfigurationActionsHelperCustom"
)
class ResourceHelper(
AutoScalingConfigurationActionsHelperCustom,
AutoScalingConfigurationActionsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
auto_scaling_configuration_id=dict(
aliases=["id"], type="str", required=True
),
compartment_id=dict(type="str", required=True),
action=dict(type="str", required=True, choices=["change_compartment"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="auto_scaling_configuration",
service_client_class=AutoScalingClient,
namespace="autoscaling",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
|
python
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import List, Property, Str
from traitsui.api import View, VGroup, UItem
from traitsui.menu import Action
from pychron.dvc.dvc_irradiationable import DVCAble
class OKButton(Action):
name = 'OK'
enabled_when = 'ok_enabled'
STYLESHEET = 'QLabel {font-size: 14px; color: red}'
class BaseEntry(DVCAble):
value = Str
available = List
error_message = Str
ok_enabled = Property(depends_on='value')
tag = ''
def do(self):
return self._add_loop()
def _get_ok_enabled(self):
if self.value not in self.available:
self.error_message = ''
return True
else:
self.error_message = '{} already exists. Choose another'.format(self.tag)
return False
def _add_loop(self):
while 1:
info = self.edit_traits()
if info.result:
db = self.get_database()
ret = self._add_item()
if ret is None:
return False
elif ret:
return True
else:
return False
def _add_item(self):
raise NotImplementedError
def _new_view(self, *args, **kw):
for a, v in (('buttons', ['OK', 'Cancel']),
('resizable', True),
('kind', 'livemodal')):
if a not in kw:
kw[a] = v
v = View(*args, **kw)
return v
def traits_view(self):
# style_sheet='QLabel {font-size: 10px} QLineEdit {font-size: 10px}'
a = VGroup(UItem('value'),
UItem('error_message', style='readonly', style_sheet=STYLESHEET))
buttons = [OKButton(), 'Cancel']
return self._new_view(a,
width=400,
title='Add {}'.format(self.tag),
buttons=buttons)
# ============= EOF =============================================
|
python
|
from django.core.management.base import BaseCommand, CommandError
from game.models import *
import settings
from PIL import Image
import random
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
if lv == 1:
v = int(value, 16)*17
return v, v, v
if lv == 3:
return tuple(int(value[i:i+1], 16)*17 for i in range(0, 3))
return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3))
class Command(BaseCommand):
args = ''
help = 'Run this command whenever 2 minutes go by.'
def handle(self, *args, **options):
Announcement.objects.all().delete()
all_actions = {}
player_locations = {}
for account in Account.objects.all():
if account.actions != '':
this_accounts_actions = []
actions = account.actions.split(',')
for action in actions:
if action == 'walk':
this_accounts_actions.append('walk-start')
this_accounts_actions.append('walk')
else:
this_accounts_actions.append(action)
if len(this_accounts_actions) < 10:
for i in range(10 - len(this_accounts_actions)):
this_accounts_actions.append('noop')
all_actions[account.id] = this_accounts_actions
account.inactive_turns = 0
else:
all_actions[account.id] = ['noop']*10
account.inactive_turns += 1
account.last_chat_message = account.chat_message
account.chat_message = ''
account.last_actions = account.actions
account.last_col = account.col
account.last_row = account.row
account.last_direction = account.direction
account.actions = ''
account.save()
print 'Action hash setup complete.'
for second in range(10):
for account in Account.objects.all():
this_action_name = all_actions[account.id][second]
if this_action_name != 'walk-start' and this_action_name != 'noop':
# Figure stamina for this action
this_action = get_action_by_name(this_action_name)
account.stamina += this_action['stamina']
if account.stamina > 10:
account.stamina = 10
if this_action_name == 'walk':
if account.direction == 'west':
account.col -= 1
if account.direction == 'east':
account.col += 1
if account.direction == 'north':
account.row -= 1
if account.direction == 'south':
account.row += 1
# Give flags to those who should have them
square = get_object_or_None(Square, col=account.col, row=account.row)
if square != None:
if TILES[square.tile] == 'red-flag' and account.team == 'blue':
account.has_flag = True
if TILES[square.tile] == 'blue-flag' and account.team == 'red':
account.has_flag = True
if (account.has_flag and square.col < 25 and account.team == 'red') or (account.has_flag and square.col >= 25 and account.team == 'blue'):
account.has_flag = False
account.flags_gotten += 1
Announcement.objects.create(text='%s gets a flag for %s' % (account.username, account.get_team_display()))
if this_action_name == 'run':
# Factor this into a function sometime
if account.direction == 'west':
account.col -= 1
if account.direction == 'east':
account.col += 1
if account.direction == 'north':
account.row -= 1
if account.direction == 'south':
account.row += 1
# Give flags to those who should have them
square = get_object_or_None(Square, col=account.col, row=account.row)
if square != None:
if TILES[square.tile] == 'red-flag' and account.team == 'blue':
account.has_flag = True
if TILES[square.tile] == 'blue-flag' and account.team == 'red':
account.has_flag = True
if (account.has_flag and square.col < 25 and account.team == 'red') or (account.has_flag and square.col >= 25 and account.team == 'blue'):
account.has_flag = False
account.flags_gotten += 1
Announcement.objects.create(text='%s gets a flag for %s' % (account.username, account.get_team_display()))
if account.direction == 'west':
account.col -= 1
if account.direction == 'east':
account.col += 1
if account.direction == 'north':
account.row -= 1
if account.direction == 'south':
account.row += 1
# Give flags to those who should have them
square = get_object_or_None(Square, col=account.col, row=account.row)
if square != None:
if TILES[square.tile] == 'red-flag' and account.team == 'blue':
account.has_flag = True
if TILES[square.tile] == 'blue-flag' and account.team == 'red':
account.has_flag = True
if (account.has_flag and square.col < 25 and account.team == 'red') or (account.has_flag and square.col >= 25 and account.team == 'blue'):
account.has_flag = False
account.flags_gotten += 1
Announcement.objects.create(text='%s gets a flag for %s' % (account.username, account.get_team_display()))
if this_action_name in ['north', 'south', 'east', 'west']:
account.direction = this_action_name
if account.col < 1: account.col = 1
if account.col > 48: account.col = 48
if account.row < 1: account.row = 1
if account.row > 73: account.row = 73
account.save()
if account.col not in player_locations:
player_locations[account.col] = {}
if account.row not in player_locations[account.col]:
player_locations[account.col][account.row] = []
if account not in player_locations[account.col][account.row]:
player_locations[account.col][account.row].append(account)
print 'Action resolutions finished'
for row in range(75):
for col in range(50):
if player_locations.has_key(col):
if player_locations[col].has_key(row):
players_in_this_square = player_locations[col][row]
if len(players_in_this_square) >= 2:
seen = {}
for account in players_in_this_square:
for other_account in players_in_this_square:
if account != other_account and (not seen.has_key(str(account.id) + '|' + str(other_account.id))) and (not seen.has_key(str(other_account.id) + '|' + str(account.id))):
if account.team != other_account.team:
if col < 25:
if account.team == 'blue':
account.col = BLUE_START['col']
account.row = BLUE_START['row']
other_account.enemies_tagged += 1
if other_account.team == 'blue':
other_account.col = BLUE_START['col']
other_account.row = BLUE_START['row']
account.enemies_tagged += 1
else:
if account.team == 'red':
account.col = RED_START['col']
account.row = RED_START['row']
other_account.enemies_tagged += 1
if other_account.team == 'red':
other_account.col = RED_START['col']
other_account.row = RED_START['row']
account.enemies_tagged += 1
account.save()
other_account.save()
seen[str(account.id) + '|' + str(other_account.id)] = True
seen[str(other_account.id) + '|' + str(account.id)] = True
squares = Square.objects.order_by('row', 'col')
im = Image.new('RGB', (50, 75), 'black')
for square in squares:
terrain = square.get_terrain_type()
if terrain == 'grass':
color = (102, 188, 83)
elif terrain == 'water':
color = (71, 132, 224)
elif terrain == 'corn':
color = (255, 255, 0)
elif terrain == 'rock':
color = (160, 160, 160)
elif terrain == 'trees':
color = (8, 74, 41)
elif terrain == 'dirt':
color = (205, 115, 32)
elif terrain == 'shrubbery':
color = (8, 74, 41)
elif terrain == 'road':
color = (200, 200, 200)
elif terrain == 'red-flag':
color = (150, 0, 30)
elif terrain == 'blue-flag':
color = (0, 0, 196)
if terrain == 'red-flag' or terrain == 'blue-flag':
im.putpixel((square.col, square.row), color)
im.putpixel((square.col-1, square.row), color)
im.putpixel((square.col-1, square.row-1), color)
im.putpixel((square.col-1, square.row+1), color)
im.putpixel((square.col+1, square.row), color)
else:
im.putpixel((square.col, square.row), color)
for account in Account.objects.filter(inactive_turns__lt=settings.TURNS_TILL_DEACTIVATION):
if account.team == 'red':
color = (255, 0, 0)
elif account.team == 'blue':
color = (0, 0, 255)
im.putpixel((account.col, account.row), color)
im.putpixel((account.col-1, account.row), color)
im.putpixel((account.col+1, account.row), color)
im.putpixel((account.col, account.row-1), color)
im.putpixel((account.col, account.row+1), color)
im = im.resize((250, 375), Image.NEAREST)
im.save('static/images/minimap.png', 'PNG')
|
python
|
'''ইউজার একটি পুর্ন সংখ্যা ইনপুট দিবে সংখ্যা জোর হলে আউটপুট হবে(even)
আর বিজোড় হলে আউটপুট হবে (odd)'''
#input
num=int(input("Enter the number:"))
#logic
if (num%2==0):
print(f"{num} is even")
else:
print(f"{num} is odd")
|
python
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
def copy_dir(source_directory,
target_directory):
"""
Copies files from source_directory to\
target_directory. If target directory doesn't exist\
it will be created.
:param source_directory: source
:type source_directory: str
:param adict: if specified, adict will be printed
:type adict: dict
"""
if os.path.isdir(source_directory):
def deep_copy(source, target):
"""Copies recursively all files from source to destination
"""
names = os.listdir(source)
os.makedirs(target, exist_ok=True)
for name in names:
src_name = os.path.join(source, name)
tgt_name = os.path.join(target, name)
if os.path.isdir(src_name):
# source is a directory
deep_copy(src_name, tgt_name)
else:
# source is a file
print('Copying "{}" to "{}" ...'
.format(src_name, tgt_name))
shutil.copy2(src_name, tgt_name)
# copy files recursively
deep_copy(source_directory,
target_directory)
else:
print('Error. Directory "{}" was not found.'.format(source_directory))
|
python
|
# Generated by Django 2.2.9 on 2020-01-11 21:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bio_app', '0013_auto_20200111_2035'),
]
operations = [
migrations.AlterField(
model_name='clusters',
name='cluster_id',
field=models.CharField(max_length=255, verbose_name='cluster Id'),
),
]
|
python
|
import pandas as pd
from bokeh.io import show, curdoc
from bokeh.layouts import layout
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.plotting import figure
from bokeh.sampledata.degrees import data
from bokeh.themes import Theme
data = data.set_index('Year')
categories = data.columns.tolist()
categories.reverse()
curdoc().theme = Theme(json={'attrs': {
'Figure': {
'toolbar_location': None,
'outline_line_color': None,
'min_border_right': 10,
},
'Axis': {
'major_tick_in': None,
'minor_tick_out': None,
'minor_tick_in': None,
'axis_line_color': '#CAC6B6',
'major_tick_line_color': '#CAC6B6',
},
'Legend': {
'background_fill_alpha': 0.8,
}
}})
def _make_source_for_year(year):
# Get data out of dataframe for a given year
year_df = pd.DataFrame(data.loc[year]).reset_index()
year_df = year_df.rename(columns={year: 'percent_female', 'index': 'category'})
source = ColumnDataSource(year_df)
return source
def all_for_year(year):
source = _make_source_for_year(year)
bar_opts = dict(y='category', height=0.5)
p = figure(title=str(year), y_range=FactorRange(factors=categories), x_range=(0, 100), tools='')
p.grid.grid_line_color = None
p.hbar(left=0, right='percent_female', color='#AE9E59', legend='Female', source=source, **bar_opts)
p.hbar(left='percent_female', right=100, color='#CAC6B6', legend='Male', source=source, **bar_opts)
return p
def two_categories_over_time():
bar_opts = dict(width=0.3, alpha=0.8)
p = figure(title="Percentage of women graduating over time in two fields.", y_range=(0, 100), tools='')
p.vbar(bottom=0, top=data['Psychology'], x=data.index - 0.2, color='#4F4478', legend='Psychology', **bar_opts)
p.vbar(bottom=0, top=data['Engineering'], x=data.index + 0.2, color='#827F8B', legend='Engineering', **bar_opts)
return p
l = layout([
[all_for_year(1970), all_for_year(2010)],
[two_categories_over_time()],
], sizing_mode='stretch_both')
show(l)
|
python
|
from django import template
from django.forms.utils import flatatt
from django.template.loader import render_to_string
from django.utils.html import format_html, format_html_join
from core.constants import VIDEO_DURATION_DATA_ATTR_NAME
register = template.Library()
def _get_poster_attribute(video):
if video and video.thumbnail:
return f'poster="{video.thumbnail.url}" ' # trailing space is deliberate
return ''
@register.simple_tag
def render_video(block):
"""Renders a video block (eg in a lesson hero or a case study).
Includes a custom attribute on the video element so we can estimate
page view time in our post-save hook, without clashing with the automatically
added `duration` attribute that a browser may add to <video>.
"""
if not block:
return ''
video_duration = getattr(block['video'], 'duration', 0)
# The default, above, _should_ never be needed because field is mandatory in the CMS
video = block['video']
timestamp_to_allow_poster_image_to_work_on_mobile_safari = '#t=0.1'
sources_data = []
for source in video.sources:
if 'src' in source:
source['src'] += timestamp_to_allow_poster_image_to_work_on_mobile_safari
sources_data.append([flatatt(source)])
sources = format_html_join('\n', '<source{0}>', sources_data)
if video.subtitles:
rendered_subtitles = []
for subtitle_spec in video.subtitles:
rendered_subtitles.append(
render_to_string(
'core/includes/_video_subtitle.html',
subtitle_spec,
)
)
subtitles = '\n'.join(rendered_subtitles)
else:
subtitles = ''
rendered = format_html(
f"""
<video preload="metadata" controls controlsList="nodownload"
{_get_poster_attribute(video)}{VIDEO_DURATION_DATA_ATTR_NAME}="{video_duration}">
{sources}
{subtitles}
Your browser does not support the video tag.
</video>
<div class="video-transcript-container"></div>
"""
)
return rendered
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import neurovault.apps.statmaps.models
import neurovault.apps.statmaps.storage
class Migration(migrations.Migration):
dependencies = [
('statmaps', '0073_auto_20161111_0033'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='private',
field=models.BooleanField(default=False, verbose_name=b'Accessibility', choices=[(False, b'Public (The collection will be accessible by anyone and all the data in it will be distributed under CC0 license)'), (True, b'Private (The collection will be not listed in the NeuroVault index. It will be possible to shared it with others at a private URL.)')]),
),
migrations.AlterField(
model_name='image',
name='surface_left_file',
field=models.FileField(storage=neurovault.apps.statmaps.storage.DoubleExtensionStorage(), upload_to=neurovault.apps.statmaps.models.upload_img_to, null=True, verbose_name=b'File with the unthresholded LEFT hemisphere fsaverage surface map (.mgh, .curv, .gii)', blank=True),
),
migrations.AlterField(
model_name='image',
name='surface_right_file',
field=models.FileField(storage=neurovault.apps.statmaps.storage.DoubleExtensionStorage(), upload_to=neurovault.apps.statmaps.models.upload_img_to, null=True, verbose_name=b'File with the unthresholded RIGHT hemisphere fsaverage surface map (.mgh, .curv, .gii)', blank=True),
),
]
|
python
|
#!/usr/bin/env python3
#
# Copyright 2021 Xiaomi Corporation (author: Wei Kang)
#
# See ../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run this single test, use
#
# ctest --verbose -R get_best_matching_stats_test_py
import unittest
import k2
import torch
class TestGetBestMatchingStats(unittest.TestCase):
def test(self):
s = '[ [ [ 5 1 4 6 ] [ 5 1 2 6 ] [ 5 3 4 6 ] ] ]'
tokens = k2.RaggedInt(s)
scores = torch.tensor([1, 2, 3, 4, 5, 7, 8, 6, 0, 0, 0, 0],
dtype=torch.float32)
counts = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
dtype=torch.int32)
eos = 6
min_token = 1
max_token = 6
max_order = 2
mean, var, counts_out, ngram_order = k2.get_best_matching_stats(
tokens, scores, counts, eos, min_token, max_token, max_order)
mean_ref = torch.tensor([3, 4.5, 3, 4, 3, 4.5, 4.5, 5, 3, 4.5, 3, 4],
dtype=torch.float32)
var_ref = torch.tensor([4, 6.25, 0, 0, 4, 6.25, 5.25, 1, 4, 5.25, 0, 0],
dtype=torch.float32)
counts_out_ref = torch.tensor([2, 2, 1, 1, 2, 2, 0, 2, 2, 0, 1, 1],
dtype=torch.int32)
ngram_order_ref = torch.tensor([2, 2, 1, 2, 2, 2, 0, 1, 2, 0, 1, 2],
dtype=torch.int32)
assert torch.allclose(mean, mean_ref)
assert torch.allclose(var, var_ref)
assert torch.all(torch.eq(counts_out, counts_out_ref))
assert torch.all(torch.eq(ngram_order, ngram_order_ref))
if __name__ == '__main__':
unittest.main()
|
python
|
#!/usr/local/bin/python
import os, sys, time
space = ' '
# read file by filename
def read_file(filename):
f = open(filename, 'r')
content = f.readlines()
f.close()
return content
def handleTemp(temp):
# extract content from <TEXT> and void <TEXT>content</TEXT>
start = temp.find('<TEXT>') + len('<TEXT>')
end = temp.find('</TEXT>')
return temp[start:end]
def handleDocument(content):
documentid = ''
Text = ''
i = 0
length = len(content)
while i < length:
if '<DOCNO>' in content[i]:
no = content[i].split(' ')
documentid = no[1]
break
# find the doc id, and to avoid un-necessary if-check, end this loop
i += 1
while i < length:
if '<TEXT>' in content[i]:
temp = ''
while '</TEXT>' not in content[i]:
# replace the '\n' with space
temp += content[i][:-1] + ' '
i += 1
temp += content[i]
Text += handleTemp(temp)
i += 1
return (documentid, Text)
# split the file into document
def splitDoc(content):
length = len(content)
i = 0
documents = []
while i < length:
if '<DOC>' in content[i]:
# start to get the whole doc
doc = []
doc.append(content[i])
i += 1
while '</DOC>' not in content[i]:
doc.append(content[i])
i += 1
doc.append(content[i])
documents.append(handleDocument(doc))
i += 1
return documents
def getNumber(content):
index = 0
# first number, second number, and the position to be continue
result = []
c = content.split(' ')
end = len(c[0]) + len(c[1]) + 2 # 2 space
result=[int(c[0]), int(c[1]), end]
return result
size = len(content)
while index < size:
if content[index] == ' ':
temp = ''
index += 1
while index < size and content[index] != ' ':
temp += content[index]
index += 1
result.append(int(temp))
else:
index += 1
if len(result) == 2:
while index < size and content[index] == ' ':
index += 1
result.append(index)
return result
def mergefile(name):
# data file
print 'mergefile'
file1 = open('cache1_' + name,'r')
file2 = open('cache2_' + name,'r')
file3 = open('cache3_' + name, 'w')
cate3 = open('cache3_' + name + '_category', 'w')
# category file
cate1 = open('cache1_' + name + '_category', 'r').readlines()
cate2 = open('cache2_' + name + '_category', 'r').readlines()
ptr1 = 0
ptr2 = 0
start = 0
while ptr1 < len(cate1) and ptr2 < len(cate2):
# line in category
# term start len
line1 = cate1[ptr1].split(' ')
line2 = cate2[ptr2].split(' ')
t1 = line1[0]
t2 = line2[0]
result = ''
if t1 < t2:
result += t1 + space + str(start) + space
file1.seek(int(line1[1]))
content = file1.read(int(line1[2][:-1]))
file3.write(content)
size = len(content)
result += str(size) + '\n'
cate3.write(result)
start += size
ptr1 += 1
elif t1 > t2:
result += t2 + space + str(start) + space
file2.seek(int(line2[1]))
content = file2.read(int(line2[2][:-1]))
file3.write(content)
size = len(content)
result += str(size) + '\n'
cate3.write(result)
start += size
ptr2 += 1
elif t1 == t2:
# if two terms are the same
result += t1 + space + str(start) + space
file1.seek(int(line1[1]))
content1 = file1.read(int(line1[2][:-1]))
# print content1
space1 = getNumber(content1)
# print space1
# sys.exit(-1)
file2.seek(int(line2[1]))
content2 = file2.read(int(line2[2][:-1]))
# print content2
space2 = getNumber(content2)
data = str(space1[0] + space2[0]) + space
data += str(space1[1] + space2[1]) + space + content1[space1[2]:-1]
data += space + content2[space2[2]:]
file3.write(data)
size = len(data)
result += str(size) + '\n'
cate3.write(result)
start += size
ptr1 += 1
ptr2 += 1
while ptr1 < len(cate1):
line1 = cate1[ptr1].split(' ')
t1 = line1[0]
result = t1 + space + str(start) + space
file1.seek(int(line1[1]))
content = file1.read(int(line1[2][:-1]))
file3.write(content)
size = len(content)
result += str(size) + '\n'
cate3.write(result)
start += size
ptr1 += 1
while ptr2 < len(cate2):
line2 = cate2[ptr2].split(' ')
t2 = line2[0]
result = t2 + space + str(start) + space
file2.seek(int(line2[1]))
content = file2.read(int(line2[2][:-1]))
file3.write(content)
size = len(content)
result += str(size) + '\n'
cate3.write(result)
start += size
ptr2 += 1
file1.close()
file2.close()
file3.close()
cate3.close()
os.remove('cache1_' + name)
os.remove('cache2_' + name)
os.remove('cache1_' + name + '_category')
os.remove('cache2_' + name + '_category')
os.rename('cache3_' + name, 'cache1_' + name)
os.rename('cache3_' + name + '_category', 'cache1_' + name + '_category')
def get_range(nums, move_able):
# return the range of the list, and return the index of the smallest number
small = 10000
next_val = 10000
next_index = 0
big = -1
index = 0
while index < len(nums):
num = nums[index][0]
if small > num:
small = num
if next_val > num and move_able[index]:
next_val = num
next_index = index
if big < num:
big = num
index += 1
return (next_index, big - small)
def get_min_span(matirx):
# a matrix should be a list contains a lot of lists
if len(matirx) == 1:
return 0
column = []
row = len(matirx)
for i in range(row):
column.append([matirx[i][0], 0])
move_able = []
smallest = 10000
for i in range(row):
move_able.append(True)
while True in move_able:
next_move = get_range(column, move_able)
if next_move[1] + 1 == row:
smallest = next_move[1] + 1
break
if smallest > next_move[1] + 1:
smallest = next_move[1] + 1
next_val = next_move[0]
column[next_val][1] += 1
if len(matirx[next_val]) <= column[next_val][1] + 1:
move_able[next_val] = False
if move_able[next_val]:
column[next_val][0] = matirx[next_val][column[next_val][1]]
# print matirx, smallest
return smallest
pass
|
python
|
# -*- coding: utf-8 -*-
from icemac.addressbook.interfaces import IPerson
import icemac.addressbook.testing
import pytest
import zope.component.hooks
# Fixtures to set-up infrastructure which are usable in tests,
# see also in ./fixtures.py (which are imported via src/../conftest.py):
@pytest.yield_fixture(scope='function')
def person_with_field_data(personWithFieldDataS):
"""Provide predefined person data, see `personWithFieldDataS`."""
for connection in icemac.addressbook.testing.pyTestStackDemoStorage(
personWithFieldDataS.zodb, 'PersonWithFieldFunction'):
yield connection
# Infrastructure fixtures
@pytest.yield_fixture(scope='session')
def personWithFieldDataS(
addressBookS, FullPersonFactory, PostalAddressFactory, KeywordFactory,
PhoneNumberFactory, EMailAddressFactory, HomepageAddressFactory,
FieldFactory):
"""Create base data used in person tests."""
for connection in icemac.addressbook.testing.pyTestStackDemoStorage(
addressBookS, 'SearchSession'):
address_book = connection.rootFolder['ab']
with zope.component.hooks.site(address_book):
field_name = FieldFactory(
address_book, IPerson, 'TextLine', u'foobar').__name__
icemac.addressbook.conftest._create_person(
address_book, FullPersonFactory, PostalAddressFactory,
KeywordFactory, PhoneNumberFactory, EMailAddressFactory,
HomepageAddressFactory, **{field_name: u'my value'})
yield connection
|
python
|
log_enabled = True
# 1 = print everything
# 2 = print few stuff
# 3 = print fewer stuff
log_level = 2
def log(message, message_type="info", level=3):
if not log_enabled:
return
log_message_type_symbols = {
"info": "[*]",
"warning": "[!]",
"error": "[x]",
"success": "[+]",
}
# Errors and warnings are logged anyways
if(message_type == "error" or message_type == "warning"):
print(log_message_type_symbols[message_type], message)
else:
if (level >= log_level):
print(log_message_type_symbols[message_type], message)
|
python
|
from diary.views import get_next_or_none
from django.urls import reverse
def test_get_next_or_none__last(note):
assert get_next_or_none(note) is None
def test_get_next_or_none__next_exists(note2):
assert get_next_or_none(note2).title == 'My Title'
def test_NoteListView(client, note):
response = client.get(reverse('note-list'))
assert response.status_code == 200
assert b'<div hx-get="/note/create" hx-trigger="load" hx-swap="outerHTML"></div>' in response.content
|
python
|
#
# Control an RFSpace SDR-IP, NetSDR, or CloudIQ.
#
# Example:
# sdr = sdrip.open("192.168.3.125")
# sdr.setrate(32000)
# sdr.setgain(-10)
# sdr.setrun()
# while True:
# buf = sdr.readiq()
# OR buf = sdr.readusb()
#
# Robert Morris, AB1HL
#
import socket
import sys
import os
import numpy
import scipy
import scipy.signal
import threading
import time
import struct
import weakutil
def x8(x):
s = bytearray([x & 0xff])
return s
def x16(x):
# least-significant first
s = bytearray([
x & 0xff,
(x >> 8) & 0xff ])
return s
def x32(x):
# least-significant first
s = bytearray([
x & 0xff,
(x >> 8) & 0xff,
(x >> 16) & 0xff,
(x >> 24) & 0xff ])
return s
# 40-bit frequency in Hz, lsb first
# but argument must be an int
def x40(hz):
s = b""
for i in range(0, 5):
s = s + bytearray([ hz & 0xff ])
hz >>= 8
return s
# turn a char into an int.
# yord[s[i]]
# in python27, s is str, s[i] is str, so call ord().
# in python3, s is bytes, s[i] is int, so no ord().
def yord(x):
if type(x) == int:
return x
else:
return ord(x)
def y16(s):
x = (yord(s[0]) +
(yord(s[1]) << 8))
return x
def y32(s):
x = (yord(s[0]) +
(yord(s[1]) << 8) +
(yord(s[2]) << 16) +
(yord(s[3]) << 24))
return x
# turn 5 bytes from NetSDR into a 40-bit number.
# LSB first.
def y40(s):
hz = (yord(s[0]) +
(yord(s[1]) << 8) +
(yord(s[2]) << 16) +
(yord(s[3]) << 24) +
(yord(s[4]) << 32))
return hz
# turn a byte array into hex digits
def hx(s):
buf = ""
for i in range(0, len(s)):
buf += "%02x " % (yord(s[i]))
return buf
mu = threading.Lock()
#
# if already connected, return existing SDRIP,
# otherwise a new one.
#
sdrips = { }
def open(ipaddr):
global sdrips, mu
mu.acquire()
if not (ipaddr in sdrips):
sdrips[ipaddr] = SDRIP(ipaddr)
sdr = sdrips[ipaddr]
mu.release()
return sdr
class SDRIP:
def __init__(self, ipaddr):
# ipaddr is SDR-IP's IP address e.g. "192.168.3.123"
self.mode = "usb"
self.ipaddr = ipaddr
self.mu = threading.Lock()
self.lasthz = 0
self.rate = None
self.frequency = None
self.running = False
self.mhz_overload = { }
self.mhz_gain = { }
# 16 or 24
# only 24 seems useful
self.samplebits = 24
# iq? i think only True works.
self.iq = True
self.nextseq = 0
self.reader_pid = None
self.connect()
# "usb" or "fm"
# maybe only here to be ready by weakaudio.py/SDRIP.
def set_mode(self, mode):
self.mode = mode
def connect(self):
# allocate a UDP socket and port for incoming data from the SDR-IP.
self.ds = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.ds.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024*1024)
self.ds.bind(('', 0)) # ask kernel to choose a free port
hostport = self.ds.getsockname() # hostport[1] is port number
# fork() a sub-process to read and buffer the data UDP socket,
# since the Python thread scheduler doesn't run us often enough if
# WSPR is compute-bound in numpy for tens of seconds.
r, w = os.pipe()
self.reader_pid = os.fork()
if self.reader_pid == 0:
os.close(r)
self.reader(w)
os._exit(0)
else:
self.pipe = r
os.close(w)
self.ds.close()
# commands over TCP to port 50000
self.cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.cs.connect((self.ipaddr, 50000))
# only this thread reads from the control TCP socket,
# and appends to self.replies.
self.replies_mu = threading.Lock()
self.replies = [ ]
th = threading.Thread(target=lambda : self.drain_ctl())
th.daemon = True
th.start()
time.sleep(0.1) # CloudIQ
# tell the SDR-IP where to send UDP packets
self.setudp(hostport[1])
# boilerplate
self.setad()
self.setfilter(0)
self.setgain(0)
#self.setgain(-20)
# "SDR-IP"
#print("name: %s" % (self.getitem(0x0001)))
# option 0x02 means reflock board is installed
#oo = self.getitem(0x000A) # Options
#oo0 = yord(oo[0])
#print("options: %02x" % (oo0))
if False:
# set calibration.
# 192.168.3.130 wants + 506
# 192.168.3.131 wants + 525
# (these are with the 10 mhz reflock ocxo, but not locked)
data = b""
data += x8(0) # ignored
if self.ipaddr == "192.168.3.130":
data += x32(80000000 + 506)
elif self.ipaddr == "192.168.3.131":
data += x32(80000000 + 525)
else:
print("sdrip.py: unknown IP address %s for calibration" % (self.ipaddr))
# data += x32(80000000 + 0)
data = None
if data != None:
self.setitem(0x00B0, data)
# A/D Input Sample Rate Calibration
# factory set to 80000000
x = self.getitem(0x00B0)
cal = y32(x[1:5])
print("sdrip %s cal: %s" % (self.ipaddr, cal))
# read the UDP socket from the SDR-IP.
def reader1(self):
while True:
buf = self.ds.recv(4096)
self.packets_mu.acquire()
self.packets.append(buf)
self.packets_mu.release()
# read the data UDP socket in a separate process and
# send the results on the pipe w.
def reader(self, w):
ww = os.fdopen(w, 'wb')
# spawn a thread that just keeps reading from the socket
# and appending packets to packets[].
self.packets = [ ]
self.packets_mu = threading.Lock()
th = threading.Thread(target=lambda : self.reader1())
th.daemon = True
th.start()
# move packets from packets[] to the UNIX pipe.
# the pipe write() calls may block, but it's OK because
# the reader1() thread keeps draining the UDP socket.
while True:
self.packets_mu.acquire()
ppp = self.packets
self.packets = [ ]
self.packets_mu.release()
if len(ppp) < 1:
# we expect 100 pkts/second
# but OSX seems to limit a process to 150 wakeups/second!
# time.sleep(0.005)
time.sleep(0.01)
for pkt in ppp:
try:
ww.write(struct.pack('I', len(pkt)))
ww.write(pkt)
ww.flush()
except:
#sys.stderr.write("sdrip: pipe write failed\n")
os._exit(1)
# consume and record TCP control messages from the NetSDR,
# and notice if it goes away.
def drain_ctl(self):
try:
while True:
reply = self.real_readreply()
if reply != None:
self.replies_mu.acquire()
self.replies.append(reply)
self.replies_mu.release()
except:
print("drain error:", sys.exc_info()[0])
sys.stdout.flush()
pass
sys.stderr.write("sdrip: control connection died\n")
os.kill(self.reader_pid, 9)
# read a 16-bit int from TCP control socket
def read16(self):
x0 = self.cs.recv(1) # least-significant byte
x1 = self.cs.recv(1) # most-significant byte
return (yord(x0) & 0xff) | ((yord(x1) << 8) & 0xff00)
# read a reply from the TCP control socket
# return [ type, item, data ]
def readctl(self):
len = self.read16() # overall length and msg type
mtype = (len >> 13) & 0x7
len &= 0x1fff
if len == 2:
# NAK -- but for what?
sys.stderr.write("sdrip: NAK\n")
return None
item = self.read16() # control item
data = b""
xlen = len - 4
while xlen > 0:
dd = self.cs.recv(1)
data += dd
xlen -= 1
return [ mtype, item, data ]
# read one reply from the tcp control socket.
def real_readreply(self):
reply = self.readctl()
if reply == None:
# NAK
return None
# print("reply: %d %04x %s" % (reply[0], reply[1], hx(reply[2])))
# reply[0] is mtype (0=set, 1=get)
# reply[1] is item
# reply[2] is date
if reply[0] == 1 and reply[1] == 5:
# A/D overload
self.got_overload()
return reply
def got_overload(self):
mhz = self.lasthz // 1000000
self.mhz_overload[mhz] = time.time()
ogain = self.mhz_gain.get(mhz, 0)
gain = ogain - 10
if gain < -30:
gain = -30
self.mhz_gain[mhz] = gain
sys.stderr.write("sdrip: overload mhz=%d %d %d\n" % (mhz, ogain, gain))
# wait for drain thread to see the reply we want.
def readreply(self, item):
self.replies_mu.acquire()
lasti = len(self.replies) - 10 # XXX
lasti = max(0, lasti)
self.replies_mu.release()
while True:
self.replies_mu.acquire()
while lasti < len(self.replies):
reply = self.replies[lasti]
lasti = lasti + 1
if reply[0] == 0 and reply[1] == item:
self.replies_mu.release()
return reply[2]
if len(self.replies) > 20:
self.replies = [ ]
lasti = 0
self.replies_mu.release()
time.sleep(0.01)
# send a Request Control Item, wait for and return the result
def getitem(self, item, extra=None):
try:
self.mu.acquire()
mtype = 1 # type=request control item
buf = b""
buf += x8(4) # overall length, lsb
buf += x8((mtype << 5) | 0) # 0 is len msb
buf += x16(item)
if extra != None:
buf += extra
self.cs.send(buf)
ret = self.readreply(item)
return ret
finally:
self.mu.release()
def setitem(self, item, data):
try:
self.mu.acquire()
mtype = 0 # set item
lx = 4 + len(data)
buf = b""
buf += x8(lx)
buf += x8((mtype << 5) | 0)
buf += x16(item)
buf += data
self.cs.send(buf)
ret = self.readreply(item)
return ret
finally:
self.mu.release()
def print_setup(self):
print(("freq 0: %d" % (self.getfreq(0)))) # 32770 if down-converting
print(("name: %s" % (self.getname())))
print(("serial: %s" % (self.getserial())))
print(("interface: %d" % (self.getinterface())))
# print("boot version: %s" % (self.getversion(0)))
# print("application firmware version: %s" % (self.getversion(1)))
# print("hardware version: %s" % (self.getversion(2)))
# print("FPGA config: %s" % (self.getversion(3)))
print(("rate: %d" % (self.getrate())))
print(("freq 0: %d" % (self.getfreq(0)))) # 32770 if down-converting
print(("A/D mode: %s" % (self.getad(0))))
print(("filter: %d" % (self.getfilter(0))))
print(("gain: %d" % (self.getgain(0))))
print(("fpga: %s" % (self.getfpga())))
print(("scale: %s" % (self.getscale(0))))
# print("downgain: %s" % (self.getdowngain()))
# set Frequency
def setfreq1(self, chan, hz):
hz = int(hz)
data = b""
data += bytearray([chan]) # 1=display, 0=actual receiver DDC
data += x40(hz)
self.setitem(0x0020, data)
self.lasthz = hz
def setfreq(self, hz):
self.setfreq1(0, hz) # DDC
self.setfreq1(1, hz) # display
# a sleep seems to be needed for the case in which
# a NetSDR is switching on the down-converter.
if hz > 30000000 and (self.frequency == None or self.frequency < 30000000):
time.sleep(0.5)
self.frequency = hz
# reduce gain if recently saw overload warning
mhz = hz // 1000000
gain = 0
if mhz in self.mhz_gain:
if time.time() - self.mhz_overload[mhz] > 5 * 60:
self.mhz_overload[mhz] = time.time()
self.mhz_gain[mhz] += 10
if self.mhz_gain[mhz] > 0:
self.mhz_gain[mhz] = 0
gain = self.mhz_gain[mhz]
if mhz <= 4 and gain > -10:
gain = -10
self.mhz_gain[mhz] = gain
self.mhz_overload[mhz] = time.time()
self.setgain(gain)
def getfreq(self, chan):
x = self.getitem(0x0020, x8(chan))
hz = y40(x[1:6])
return hz
# set Receiver State to Run
# only I/Q seems to work, not real.
def setrun(self):
self.running = True
data = b""
if self.iq:
data += x8(0x80) # 0x80=I/Q, 0x00=real
else:
data += x8(0x00) # 0x80=I/Q, 0x00=real
data += x8(0x02) # 1=idle, 2=run
if self.samplebits == 16:
data += x8(0x00) # 80=24 bit continuous, 00=16 bit continuous
else:
data += x8(0x80) # 80=24 bit continuous, 00=16 bit continuous
data += x8(0x00) # unused
self.setitem(0x0018, data)
self.nextseq = 0
# self.print_setup()
# stop receiver
def stop(self):
self.running = False
data = b""
if self.iq:
data += x8(0x80) # 0x80=I/Q, 0x00=real
else:
data += x8(0x00) # 0x80=I/Q, 0x00=real
data += x8(0x01) # 1=idle, 2=run
if self.samplebits == 16:
data += x8(0x00) # 80=24 bit continuous, 00=16 bit continuous
else:
data += x8(0x80) # 80=24 bit continuous, 00=16 bit continuous
data += x8(0x00) # unused
self.setitem(0x0018, data)
# DDC Output Sample Rate
# rate is samples/second
# must be an integer x4 division of 80 million.
# the minimum is 32000.
def setrate(self, rate):
self.rate = rate
data = b""
data += x8(0) # ignored
data += x32(rate)
self.setitem(0x00B8, data)
def getrate(self):
x = self.getitem(0x00B8, x8(0))
rate = y32(x[1:5])
return rate
# A/D Modes
# set dither and A/D gain
def setad(self):
data = b""
data += x8(0) # ignored
# bit zero is dither, bit 1 is A/D gain 1.5
#data += x8(0x3)
data += x8(0x1)
self.setitem(0x008A, data)
# [ dither, A/D gain ]
def getad(self, chan):
x = self.getitem(0x008A, x8(0))
dither = (yord(x[1]) & 1) != 0
gain = (yord(x[1]) & 2) != 0
return [ dither, gain ]
# RF Filter Select
# 0=automatic
# 11=bypass
# 12=block everything (mute)
def setfilter(self, f):
data = b""
data += x8(0) # channel
data += x8(f)
self.setitem(0x0044, data)
def getfilter(self, chan):
x = self.getitem(0x0044, x8(chan))
return yord(x[1])
# RF Gain
# gain is 0, -10, -20 -30 dB
def setgain(self, gain):
data = b""
data += x8(0) # channel 1
data += x8(gain)
self.setitem(0x0038, data)
def getgain(self, chan):
x = self.getitem(0x0038, x8(chan))
return yord(x[1])
# e.g. "NetSDR"
def getname(self):
x = self.getitem(0x0001)
return x
# e.g. "PS000553"
def getserial(self):
x = self.getitem(0x0002)
return x
# 123 means version 1.23
# returns 10 for my NetSDR
def getinterface(self):
x = self.getitem(0x0003)
return y16(x[0:2])
# ID=0 boot code
# ID=1 application firmware
# ID=2 hardware
# ID=3 FPGA configuration
# XXX seems to cause protocol problems, NetSDR sends NAKs or something.
def getversion(self, id):
x = self.getitem(0x0004, x8(id))
if x == None:
# NAK
return None
if id == 3:
return [ yord(x[1]), yord(x[2]) ] # ID, version
else:
return y16(x[1:3]) # version * 100
# [ FPGA config number, FPGA config ID, FPGA revision, descr string ]
# e.g. [1, 1, 7, 'Std FPGA Config \x00']
def getfpga(self):
x = self.getitem(0x000C)
return [ yord(x[0]),
yord(x[1]),
yord(x[2]),
x[3:] ]
# Receiver A/D Amplitude Scale
def getscale(self, chan):
x = self.getitem(0x0023, x8(chan))
return y16(x[1:3])
# VHF/UHF Down Converter Gain
# XXX seems to yield a NAK
def getdowngain(self):
x = self.getitem(0x003A)
auto = yord(x[0])
lna = yord(x[1])
mixer = yord(x[2])
ifout = yord(x[3])
return [ auto, lna, mixer, ifout ]
# Data Output UDP IP and Port Address
# just set the port, not the host address.
def setudp(self, port):
# find host's IP address.
hostport = self.cs.getsockname()
ipaddr = socket.inet_aton(hostport[0]) # yields a four-byte string, wrong order
data = b""
data += bytearray([
ipaddr[3],
ipaddr[2],
ipaddr[1],
ipaddr[0], ])
data += x16(port)
self.setitem(0x00C5, data)
# wait for and decode a UDP packet of I/Q samples.
# returns a buffer with interleaved I and Q float64.
# return an array of complex (real=I, imag=Q).
def readiq(self):
# read from the pipe; a 4-byte length, then the packet.
x4 = os.read(self.pipe, 4)
if len(x4) != 4:
sys.stderr.write("sdrip read from child failed\n")
os._exit(1)
[plen] = struct.unpack("I", x4)
assert plen > 0 and plen < 65536
buf = b""
while len(buf) < plen:
x = os.read(self.pipe, plen - len(buf))
buf = buf + x
# parse SDR-IP header into length, msg type
lx = yord(buf[0])
lx |= (yord(buf[1]) << 8)
mtype = (lx >> 13) & 0x7 # 0x4 is data
lx &= 0x1fff # should == len(buf)
# packet sequence number (wraps to 1, not 0)
seq = yord(buf[2]) | (yord(buf[3]) << 8)
gap = 0
if seq != self.nextseq and (seq != 1 or self.nextseq != 65536):
# one or more packets were lost.
# we'll fill the gap with zeros.
sys.stderr.write("seq oops got=%d wanted=%d\n" % (seq, self.nextseq))
if seq > self.nextseq:
gap = seq - self.nextseq
self.nextseq = seq + 1
if self.samplebits == 16:
samples = numpy.fromstring(buf[4:], dtype=numpy.int16)
else:
s8 = numpy.fromstring(buf[4:], dtype=numpy.uint8)
x0 = s8[0::3]
x1 = s8[1::3]
x2 = s8[2::3]
# top 8 bits, sign-extended from x2
high = numpy.greater(x2, 127)
x3 = numpy.where(high,
numpy.repeat(255, len(x2)),
numpy.repeat(0, len(x2)))
z = numpy.empty([len(x0)*4], dtype=numpy.uint8)
z[0::4] = x0
z[1::4] = x1
z[2::4] = x2
z[3::4] = x3
zz = z.tostring()
#s32 = numpy.fromstring(zz, dtype=numpy.int32)
#samples = s32.astype(numpy.int16)
samples = numpy.fromstring(zz, dtype=numpy.int32)
samples = samples.astype(numpy.float64)
if gap > 0:
pad = numpy.zeros(len(samples)*gap, dtype=numpy.float64),
samples = numpy.append(pad, samples)
ii1 = samples[0::2]
qq1 = samples[1::2]
cc1 = ii1 + 1j*qq1
return cc1
#
# read from SDR-IP, demodulate as USB.
#
def readusb(self):
iq = self.readiq()
usb = weakutil.iq2usb(iq)
return usb
|
python
|
#
# This file is part of Python Client Library for STAC.
# Copyright (C) 2019 INPE.
#
# Python Client Library for STAC is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Python Client Library for STAC."""
from .stac import Stac
from .utils import Catalog, Collection, Item, ItemCollection, Link, Geometry, Provider, Extent
from .version import __version__
__all__ = ('__version__',
'stac', )
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import shutil
from nova.openstack.common import log as logging
from nova.virt.disk import api as disk_api
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
def cache_image(context, target, image_id, user_id, project_id):
if not os.path.exists(target):
libvirt_utils.fetch_image(context, target, image_id,
user_id, project_id)
def inject_into_image(image, key, net, metadata, admin_password,
files, partition, use_cow=False):
try:
disk_api.inject_data(image, key, net, metadata, admin_password,
files, partition, use_cow)
except Exception as e:
LOG.warn(_("Failed to inject data into image %(image)s. "
"Error: %(e)s") % locals())
def unlink_without_raise(path):
try:
os.unlink(path)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
LOG.warn(_("Failed to unlink %(path)s, error: %(e)s") % locals())
def rmtree_without_raise(path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
except OSError as e:
LOG.warn(_("Failed to remove dir %(path)s, error: %(e)s") % locals())
def write_to_file(path, contents):
with open(path, 'w') as f:
f.write(contents)
def create_link_without_raise(source, link):
try:
os.symlink(source, link)
except OSError as e:
if e.errno == errno.EEXIST:
return
else:
LOG.warn(_("Failed to create symlink from %(source)s to %(link)s"
", error: %(e)s") % locals())
def random_alnum(count):
import random
import string
chars = string.ascii_uppercase + string.digits
return "".join(random.choice(chars) for _ in range(count))
def map_network_interfaces(network_info, use_ipv6=False):
# TODO(deva): fix assumption that device names begin with "eth"
# and fix assumption about ordering
if not isinstance(network_info, list):
network_info = [network_info]
interfaces = []
for id, (network, mapping) in enumerate(network_info):
address_v6 = None
gateway_v6 = None
netmask_v6 = None
if use_ipv6:
address_v6 = mapping['ip6s'][0]['ip']
netmask_v6 = mapping['ip6s'][0]['netmask']
gateway_v6 = mapping['gateway_v6']
interface = {
'name': 'eth%d' % id,
'address': mapping['ips'][0]['ip'],
'gateway': mapping['gateway'],
'netmask': mapping['ips'][0]['netmask'],
'dns': ' '.join(mapping['dns']),
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
}
interfaces.append(interface)
return interfaces
|
python
|
from __future__ import print_function
import os
import sys
from distutils.dir_util import copy_tree
from pathlib import Path
import pytest
from _pytest.pytester import Testdir
from pytest import ExitCode
from tests.util import RESOURCES
pytest_plugins = "pytester"
NB_VERSION = 4
import shutil
@pytest.fixture
def fake_repo(testdir: Testdir):
copy_tree(RESOURCES.as_posix(), ".")
shutil.rmtree(".deepcov", ignore_errors=True)
return testdir
def test_when_no_xml_then_output_correctly(testdir: Testdir, fake_repo: object):
shutil.rmtree(".deepcov", ignore_errors=True)
hook_recorder = testdir.inline_run()
assert hook_recorder.ret == ExitCode.TESTS_FAILED
assert Path(".deepcov/junit.xml").exists()
assert Path(".deepcov/.coverage").exists()
def test_when_other_xml_then_output_correctly(testdir: Testdir, fake_repo: object):
shutil.rmtree(".deepcov", ignore_errors=True)
hook_recorder = testdir.inline_run("--junit-xml=junit.xml")
assert hook_recorder.ret == ExitCode.TESTS_FAILED
assert Path(".deepcov/junit.xml").exists()
assert Path("junit.xml").exists()
def test_when_trace_present_then_disables_cov(testdir: Testdir, fake_repo: object):
print(os.getcwd())
shutil.rmtree(".deepcov", ignore_errors=True)
assert not Path(".deepcov/junit.xml").exists()
sys.settrace(lambda x, y, z: None)
hook_recorder = testdir.inline_run()
assert hook_recorder.ret == ExitCode.TESTS_FAILED
assert not Path(".deepcov/junit.xml").exists()
def test_when_collect_only_then_no_output(fake_repo: Testdir):
assert not Path(".deepcov/junit.xml").exists()
hook_recorder = fake_repo.inline_run("--co")
assert hook_recorder.ret == ExitCode.OK
assert not Path(".deepcov/junit.xml").exists()
|
python
|
import os
import re
import subprocess
########################################
# Globals ##############################
########################################
g_verbose = False
IGNORE_PATHS = ("/lib/modules",)
########################################
# Functions ############################
########################################
def executable_check(op):
"""Check for existence of a single binary."""
try:
proc = subprocess.Popen([op], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
return False
try:
if not proc.poll():
proc.kill()
proc.wait()
except OSError:
print("WARNING: subprocess '%s' did not terminate properly" % (op))
return True
return True
def executable_find(proposition, default_list, name):
"""Try to find given executable from proposition and default list."""
if proposition:
if not executable_check(proposition):
raise RuntimeError("could not use supplied '%s' executable '%s'" % (name, proposition))
return proposition
ret = executable_search(default_list, name)
if not ret:
raise RuntimeError("suitable '%s' executable not found" % (name))
return ret
def executable_search(op, description=None):
"""Check for existence of binary, everything within the list will be tried."""
checked = []
ret = None
if is_listing(op):
for ii in op:
if not ii in checked:
if executable_check(ii):
ret = ii
break
else:
checked += [ii]
elif isinstance(op, str):
if not op in checked:
if executable_check(op):
ret = op
checked += [op]
else:
raise RuntimeError("weird argument given to executable search: %s" % (str(op)))
if description and is_verbose():
output_message = "Looking for '%s' executable... " % (description)
if ret:
print("%s'%s'" % (output_message, ret))
else:
print("%snot found" % (output_message))
return ret
def file_is_ascii_text(op):
"""Check if given file contains nothing but ASCII7 text."""
if not os.path.isfile(op):
return False
fd = open(op, "rb")
while True:
line = fd.readline()
if 0 >= len(line):
fd.close()
return True
try:
line.decode("ascii")
except UnicodeDecodeError:
fd.close()
return False
def get_indent(op):
"""Get indentation for given level."""
ret = ""
for ii in range(op):
# Would tab be better?
ret += " "
return ret
def is_listing(op):
"""Tell if given parameter is a listing."""
return isinstance(op, (list, tuple))
def is_verbose():
"""Tell if verbose mode is on."""
return g_verbose
def labelify(op):
"""Take string as input. Convert into string that passes as label."""
return re.sub(r'[\/\.]', '_', op)
def listify(lhs, rhs=None):
"""Make a list of one or two elements if reasonable."""
if (lhs is None) and (rhs is None):
return []
if lhs is None:
if is_listing(rhs):
return rhs
return [rhs]
if rhs is None:
if is_listing(lhs):
return lhs
return [lhs]
if is_listing(lhs) and is_listing(rhs):
return lhs + rhs
if is_listing(lhs):
return lhs + [rhs]
if is_listing(rhs):
return [lhs] + rhs
return [lhs, rhs]
def locate(pth, fn, previous_paths=None):
"""Search for given file from given path downward."""
if is_listing(pth):
for ii in pth:
ret = locate(ii, fn, previous_paths)
if ret:
return ret
return None
# If path is not given or is empty, assume current path.
if not pth:
pth = "."
# Initialize previous paths on first execution.
if not previous_paths:
previous_paths = [os.path.realpath(pth)]
# Some specific directory trees would take too much time to traverse.
if pth in IGNORE_PATHS:
return None
# Recurse, expect filesystem errors.
try:
for ii in os.listdir(pth):
ret = os.path.normpath(pth + "/" + ii)
if (isinstance(fn, str) and (ii == fn)) or ((not isinstance(fn, str)) and fn.match(ii)):
if os.path.isfile(ret):
return ret
elif os.path.isdir(ret):
real_path = os.path.realpath(ret)
if not real_path in previous_paths:
ret = locate(ret, fn, previous_paths + [real_path])
if ret:
return ret
except OSError as ee: # Permission denied or the like.
if 13 == ee.errno:
return None
raise ee
return None
def run_command(lst, decode_output=True):
"""Run program identified by list of command line parameters."""
if is_verbose():
print("Executing command: %s" % (" ".join(lst)))
proc = subprocess.Popen(lst, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(proc_stdout, proc_stderr) = proc.communicate()
if decode_output and not isinstance(proc_stdout, str):
proc_stdout = proc_stdout.decode()
if decode_output and not isinstance(proc_stderr, str):
proc_stderr = proc_stderr.decode()
returncode = proc.returncode
if 0 != proc.returncode:
raise RuntimeError("command failed: %i, stderr output:\n%s" % (proc.returncode, proc_stderr))
return (proc_stdout, proc_stderr)
def set_verbose(op):
"""Set verbosity status."""
global g_verbose
g_verbose = op
|
python
|
import random
import time
import cv2
from abc import ABC, abstractmethod
from core.dataClasses.frame import Frame
class ImageProcessingInt(ABC):
"""
Base Abstract class aka Interface for Image processing class
"""
@abstractmethod
def process(self, _observer, _scheduler):
"""
Imports video clip and samples it.
Sampled Images are processed and encapsulated into Frame class.
As a result those are emitted to manager by _observer.on_next()
:param _observer: rx.core.typing.Observer
:param _scheduler: rx.core.typing.Scheduler
:return:
"""
raise NotImplemented
class ImageProcessing(ImageProcessingInt):
def __init__(self, _path):
self._path = _path
def process(self, _observer, _scheduler):
video = cv2.VideoCapture(self._path)
if not video.isOpened():
_observer.on_error('FILE NOT FOUND OR WRONG CODEC')
# Find OpenCV version
(major_ver, minor_ver, subminor_ver) = cv2.__version__.split('.')
if int(major_ver) < 3:
fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
else:
fps = video.get(cv2.CAP_PROP_FPS)
curr_frame = 0
while video.isOpened():
ret, frame = video.read()
if ret:
# TODO change that
f = Frame(curr_frame)
f.time_stamp_ = curr_frame # curr_frame / fps
f.img_ = frame
f.fps_ = fps
_observer.on_next(f)
else:
break
curr_frame += 1
video.release()
_observer.on_completed()
class ImageProcessingMock(ImageProcessingInt):
def __init__(self, _):
self._limit = 120
def process(self, _observer, _scheduler):
for i in range(self._limit):
time.sleep(random.uniform(0.01, 0.05))
# each time "send" processed image by evoking _observer.on_next( /analysed Frame/ ) method
_observer.on_next(Frame(i))
# when process is completed notify Manager by calling _observer.on_completed()
_observer.on_completed()
|
python
|
from .attckobject import AttckObject
class AttckTactic(AttckObject):
def __init__(self, attck_obj = None, **kwargs):
'''The AttckTactic class is used to gather information about all Mitre ATT&CK Framework Tactics.
To access this class directly you must first instantiate it and provide the appropriate inputs, but it is easier to use the Attck class wrapper.
Args:
attck_obj ([json]): This should be the raw Mitre ATT&CK json object. Defaults to None, but should be provided
'''
self.attck_obj = attck_obj
self.id = super(AttckTactic, self)._set_id(kwargs)
self.created_by_ref = super(AttckTactic, self)._set_attribute(kwargs, 'created_by_ref')
self.type = super(AttckTactic, self)._set_attribute(kwargs, 'type')
self.name = super(AttckTactic, self)._set_attribute(kwargs, 'name')
self.description = super(AttckTactic, self)._set_attribute(kwargs, 'description')
self.external_reference = super(AttckTactic, self)._set_reference(kwargs)
self.created = super(AttckTactic, self)._set_attribute(kwargs, 'created')
self.modified = super(AttckTactic, self)._set_attribute(kwargs, 'modified')
self.stix = super(AttckTactic, self)._set_attribute(kwargs, 'id')
self.short_name = super(AttckTactic, self)._set_attribute(kwargs, 'x_mitre_shortname')
self.wiki = super(AttckTactic, self)._set_wiki(kwargs)
@property
def techniques(self):
'''Returns all techniques as a list that are related to this tactic'''
from .technique import AttckTechnique
technique_list = []
for item in self.attck_obj['objects']:
if 'kill_chain_phases' in item:
for prop in item['kill_chain_phases']:
if str(prop['phase_name']).lower() == str(self.short_name).lower():
technique_list.append(AttckTechnique(**item))
return technique_list
|
python
|
from sys import argv
script, input_file = argv
def print_all(f):
print(f.read())
# This will reset the position of the pointer to the start
def rewind(f):
f.seek(0)
def print_a_line(line_no,f):
print(line_no, f.readline())
current_file = open(input_file)
print("First print the whole file: ")
print_all(current_file)
print("Now let's rewind, Kind of like a tape: ")
rewind(current_file)
print("Lets print three lines: ")
current_line = 1
print_a_line(current_line,current_file)
current_line += 1
print_a_line(current_line,current_file)
current_line += 1
print_a_line(current_line,current_file)
current_file.close()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Advent of Code 2020
# https://github.com/scorphus/advent-of-code-2020
# Licensed under the BSD-3-Clause license:
# https://opensource.org/licenses/BSD-3-Clause
# Copyright (c) 2020, Pablo S. Blum de Aguiar <[email protected]>
SEA_MONSTER = (
" # ",
"# ## ## ###",
" # # # # # # ",
)
def part1(lines):
tiles = dict(read(lines))
img = assemble(tiles)
return img[0][0][2] * img[0][-1][2] * img[-1][0][2] * img[-1][-1][2]
def part2(lines):
tiles = dict(read(lines))
img = assemble(tiles)
img = concat(img)
monster = [int(seg.replace(" ", "0").replace("#", "1"), 2) for seg in SEA_MONSTER]
monster_length = len(SEA_MONSTER[0])
monster_weight = "".join(SEA_MONSTER).count("#")
monsters = find_sea_monsters(img, monster, monster_length)
return "".join("".join(r) for r in img).count("1") - monsters * monster_weight
def read(lines):
tiles = "".join(lines).split("\n\n")
for title, tile in (t.rstrip().split("\n", maxsplit=1) for t in tiles):
tid = int(title.rstrip(":").split(maxsplit=1)[1])
yield tid, list(parse(tile))
def parse(tile):
tile = [list(r) for r in tile.replace("#", "1").replace(".", "0").splitlines()]
for i in range(8):
yield borders(tile), tile
tile = list(rotate(tile))
if i == 3:
tile = [r[::-1] for r in tile]
def rotate(tile):
for x in range(len(tile[0])):
yield [r[-x - 1] for r in tile]
def borders(tile):
left = int("".join(t[0] for t in tile), 2)
right = int("".join(t[-1] for t in tile), 2)
top = int("".join(tile[0]), 2)
bot = int("".join(tile[-1]), 2)
return left, right, top, bot
def assemble(tiles):
size = int(len(tiles) ** 0.5)
return assemble_dfs(tiles, [[None] * size for _ in range(size)], set())
def assemble_dfs(tiles, img, placed, row=0, col=0):
rc = row, col + 1
if col == len(img) - 1:
rc = row + 1, 0
for tid, tile in tiles.items():
if tid not in placed:
placed.add(tid)
for i, ((left, right, top, bot), ith_tile) in enumerate(tile):
if (row > 0 and img[row - 1][col][1] != top) or (
col > 0 and img[row][col - 1][0] != left
):
continue
img[row][col] = right, bot, tid, i, ith_tile
assemble_dfs(tiles, img, placed, *rc)
if len(placed) == len(tiles):
return img
placed.remove(tid)
def concat(img):
size = len(img) * (len(img[0][0][-1]) - 2)
final_img = [[] for _ in range(size)]
r = 0
for row in img:
for *_, tile in row:
for y, line in enumerate(tile[1:-1]):
final_img[r + y] += line[1:-1]
r += len(tile) - 2
return final_img
def find_sea_monsters(img, monster, monster_length):
for i in range(8):
count = 0
img_dec = [int("".join(row), 2) for row in img]
for r, rows in enumerate(zip(img_dec[:-2], img_dec[1:-1], img_dec[2:]), 1):
for s in range(len(img[0]) - monster_length):
count += all(r & m << s == m << s for r, m in zip(rows, monster))
if count:
return count
img = list(rotate(img)) # pragma: no cover (🤷🏻♂️)
if i == 3: # pragma: no cover (🤷🏻♂️)
img = [r[::-1] for r in img]
|
python
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API endpoints for scope package."""
from http import HTTPStatus
from flask import jsonify
from flask_restplus import Namespace, Resource, cors
from ..models.scope_package import ScopePackage
from ..schemas.scope_package import ScopePackageSchema
from ..utils.auth import auth
from ..utils.util import cors_preflight
API = Namespace('ScopePackage', description='ScopePackage')
@cors_preflight('GET,OPTIONS')
@API.route('', methods=['GET', 'OPTIONS'])
class ScopePackageResource(Resource):
"""Resource for managing get scope packages."""
@staticmethod
@cors.crossdomain(origin='*')
@auth.require
def get():
"""Get all scope package."""
scope_packages = ScopePackage.find_all()
return jsonify({
'scopePackages': ScopePackageSchema().dump(scope_packages, many=True)
}), HTTPStatus.OK
|
python
|
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for static analysis test of dart packages generated by dart-pkg"""
import argparse
import errno
import json
import multiprocessing
import os
import shutil
import subprocess
import sys
DART_ANALYZE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"dart_analyze.py")
# List of analysis results.
result_list = []
def collect_result(result):
result_list.append(result)
def analyze_entrypoints(dart_sdk, package_root, entrypoints):
cmd = [ "python", DART_ANALYZE ]
cmd.append("--dart-sdk")
cmd.append(dart_sdk)
cmd.append("--entrypoints")
cmd.extend(entrypoints)
cmd.append("--package-root")
cmd.append(package_root)
cmd.append("--show-sdk-warnings")
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print('Failed analyzing %s' % entrypoints)
print(e.output)
return e.returncode
return 0
def analyze_package(dart_sdk, package_root, package):
package_name = package[0]
package_entrypoints = package[1]
print('Analyzing dart-pkg %s ' % package_name)
return analyze_entrypoints(dart_sdk, package_root, package_entrypoints)
# Filter entrypoints for files that exist.
def filter_entrypoints(package_name, entrypoints):
result = []
for entrypoint in entrypoints:
if os.path.isfile(entrypoint):
result.append(entrypoint)
else:
print('WARNING: Could not find %s from %s ' % (entrypoint, package_name))
return result
def main():
parser = argparse.ArgumentParser(description='Generate a dart-pkg')
parser.add_argument('--dart-sdk',
action='store',
metavar='dart_sdk',
help='Path to the Dart SDK.')
parser.add_argument('--dart-pkg-dir',
action='store',
metavar='dart_pkg_dir',
help='Directory of dart packages',
required=True)
parser.add_argument('--package-root',
metavar='package_root',
help='packages/ directory',
required=True)
parser.add_argument('package_name',
nargs='?',
default=None)
args = parser.parse_args()
# Make sure we have a Dart SDK.
dart_sdk = args.dart_sdk
if dart_sdk is None:
dart_sdk = os.environ.get('DART_SDK')
if dart_sdk is None:
print "Pass --dart-sdk, or define the DART_SDK environment variable"
return 1
jobs = []
# Determine which packages to analyze
for filename in os.listdir(args.dart_pkg_dir):
if filename.endswith('.entries'):
if not args.package_name or (filename == args.package_name + '.entries'):
with open(os.path.join(args.dart_pkg_dir, filename)) as f:
entrypoints = f.read().splitlines()
package_name = os.path.splitext(filename)[0]
entrypoints = filter_entrypoints(package_name, entrypoints)
if entrypoints != []:
jobs.append([package_name, entrypoints])
# Create a process pool.
pool = multiprocessing.Pool(multiprocessing.cpu_count())
# Spawn jobs.
for job in jobs:
pool.apply_async(analyze_package,
args = (dart_sdk, args.package_root, job, ),
callback = collect_result)
# Wait for them to complete.
pool.close();
pool.join();
# Return the error code if any packages failed.
for result in result_list:
if result != 0:
return result
return 0
if __name__ == '__main__':
sys.exit(main())
|
python
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Dict
from lean.components.config.lean_config_manager import LeanConfigManager
from lean.models.api import QCSecurityType
from lean.models.data import MarketHoursDatabaseEntry
class MarketHoursDatabase:
"""The MarketHoursDatabase class handles access to the market-hours-database.json file."""
def __init__(self, lean_config_manager: LeanConfigManager) -> None:
"""Creates a new MarketHoursDatabase instance.
:param lean_config_manager: the LeanConfigManager instance that is used when retrieving the data directory
"""
self._lean_config_manager = lean_config_manager
def get_entry(self, security_type: QCSecurityType, market: str, ticker: str) -> MarketHoursDatabaseEntry:
"""Reads the market hours database and returns the entry for the given data.
An error is raised if the market hours database does not contain an entry matching the given data.
:param security_type: the security type of the data
:param market: the market of the data
:param ticker: the ticker of the data
:return: the market hours database entry for the data
"""
entries = self._get_all_entries()
keys_to_check = [f"{security_type.value}-{market}-{ticker.upper()}", f"{security_type.value}-{market}-[*]"]
for key in keys_to_check:
if key in entries:
return entries[key]
raise ValueError(f"Could not find entry in market hours database, checked following keys: {keys_to_check}")
def _get_all_entries(self) -> Dict[str, MarketHoursDatabaseEntry]:
"""Reads the market hours database and returns all parsed entries by name.
:return: a dict containing all market hours database entries by name
"""
data_dir = self._lean_config_manager.get_data_directory()
market_hours_database_path = data_dir / "market-hours" / "market-hours-database.json"
market_hours_database = json.loads(market_hours_database_path.read_text(encoding="utf-8"))
return {key: MarketHoursDatabaseEntry(**value) for key, value in market_hours_database["entries"].items()}
|
python
|
from django.shortcuts import render
from django.http import HttpResponse
from django.utils import translation
from challenge.models import Challenge
from codingPage.models import Log, Command
from connections.models import Feedback
def convert_to_code(data):
translation = ''
for char in data:
action = Command.objects.get(id=int(char)).action
translation += action + '\n'
return translation
# Create your views here.
def index(request):
# Retrieve latest commands from db
latest_log = Log.objects.latest('sent_datetime')
id_num = latest_log.get_challenge_id()
chal = Challenge.objects.get(id=id_num)
data = Log.objects.get(challenge = chal).data
translation = convert_to_code(data)
payload = {
'title': 'Dashboard',
# Retrieve challenge data from db
'challenge': chal,
'commands': data,
'feedback': Feedback.objects.latest('sent_datetime'),
'translation': translation,
'jsfile': 'dashboard'
}
return render(request, "dashboard.html", payload)
|
python
|
#!/usr/bin/python
import optparse, os, shutil, sys, tempfile, glob, shlex, vcf, pysam
from subprocess import *
import subprocess, math, random
CHUNK_SIZE = 2**20 #1mb
def createOutputHTML (outputF, sampleNames):
ofh = open(outputF, "w")
print outputF
ofh.write( '<html>\n<head>\n<title>Galaxy - CNVKIT VCF Output</title>\n</head>\n<body>\n<p/>\n<ul>\n' )
outputDir='%s_files' % ''.join(outputF.split('.')[:-1])
for sample in sampleNames:
values = sample.split(" ")
sn = values[0]
outVCF = "%s/%s.vcf" % (outputDir, sn)
print "\noutVCF: %s\n" % outVCF
# if os.path.exists(outVCF):
ofh.write('<li><a href="%s">%s</a></li>\n' % ( outVCF, sn ) )
ofh.write( '</ul>\n</body>\n</html>\n' )
ofh.close()
def open_file_from_option( filename, mode = 'rb' ):
if filename:
return open( filename, mode = mode )
return None
def run_cmd ( cmd , descriptor):
stderr_name = tempfile.NamedTemporaryFile( prefix = "cmd_stderr" ).name
proc = Popen( args=cmd, shell=True, stderr=open( stderr_name, 'wb' ) )
exit_code = proc.wait()
if exit_code:
for line in open( stderr_name ):
print >> sys.stderr, line
os.unlink( stderr_name ) #clean up
raise Exception( "Error running command: %s " % descriptor )
os.unlink( stderr_name ) #clean up
def create_additional_bam_copy(inFile, baseline_indir):
fh = open(inFile, "r")
fpath = None
for line in fh:
fpath = line.rstrip("\n")
fh.close()
name = os.path.basename(fpath)
linkname = "%s/%s.dup.bam" % (baseline_indir, name)
os.symlink(fpath, linkname)
# add link to existing file
fh = open(inFile, "a")
fh.write(linkname)
fh.close()
def lineCount(inFile):
fh = open(inFile, "r")
counter = 0
for line in fh:
counter += 1
return counter
def __main__():
parser = optparse.OptionParser()
parser.add_option( '', '--input_dir', dest='input_dir', action='store', type="string", help='Input directory path of BAM files' )
parser.add_option( '', '--input_dir_file', dest='input_dir_file', action='store', type="string", help='Input directory File containing path of BAM files' )
parser.add_option( '', '--input_files', dest='input_files', action='store', type="string", help='Input File list containing path of BAM files' )
parser.add_option( '', '--out-dir', dest='output_dir', action='store', type="string", default=None, help='If specified, the output directory for extra files.' )
parser.add_option( '', '--log', dest='swift_log', action='store', type="string", default=None, help='swift summary output.' )
parser.add_option( '', '--output', dest='outputF', action='store', type="string", default=None, help='mpileup output.' )
parser.add_option( '-p', '--pass_through', dest='pass_through_options', action='append', type="string", help='These options are passed through directly to contra, without any modification.' )
parser.add_option( '-c', '--config', dest='config_file', action="store", type="string", default=None )
parser.add_option( '-t', '--target', dest='bed_file', action="store", type="string", default=None )
parser.add_option( '-r', '--reference_path', dest='reference_path', help="reference file" )
parser.add_option( '', '--percent-bam-files-for-baseline', type="float", dest='percent', help='contra baseline group: percentage of BAMs to use' )
parser.add_option( '', '--baseline-input-bam', dest='group_by_keyword', help='contra baseline group: to group or not to group' )
parser.add_option( '', '--group-field', dest='rg_field', help='contra baseline group: RG field to use for grouping' )
parser.add_option( '', '--keyword-separator', dest='field_separator', help='contra baseline group: RG field separator' )
parser.add_option( '', '--keyword-field-order', dest='field_order', help='contra baseline group: RG field order' )
(options, args) = parser.parse_args()
swift_bin = 'swift'
sites_file = '/opt/galaxy/tools/cnvkit/cnvkit_sites.xml'
tc_file = '/opt/galaxy/tools/swift/tc.data'
swift_file = '/opt/galaxy/tools/cnvkit/cnvkit.swift'
cnvkit_bin = "/mnt/galaxyTools/tools/pymodules/python2.7/bin"
pythonpath = "/mnt/galaxyTools/tools/pymodules/python2.7/lib/python"
r_path = "/mnt/galaxyTools/tools/R/3.2.2/bin/bin"
r_libs = "/mnt/galaxyTools/tools/R/3.2.2/site-library"
r_ld = "/mnt/galaxyTools/tools/R/3.2.2/ld_libs"
if not os.path.exists(options.output_dir):
os.mkdir(options.output_dir)
output_dir = "%s/output" % options.output_dir
inputDirectory = "%s/bams" % options.output_dir
baseline_dir = "%s/baseline" % options.output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
os.mkdir(inputDirectory)
os.mkdir(baseline_dir)
tmp_dir = tempfile.mkdtemp( dir=options.output_dir , prefix='tmp-TOOL-' )
inputLinkedFiles = []
basenames = []
if not options.input_files:
if options.input_dir_file:
infile = open(options.input_dir_file, 'r')
inputDirLine = infile.readline()
inputRealDirectory = inputDirLine.rstrip('\n\r')
elif options.input_dir:
inputRealDirectory = options.input_dir
inputFiles = glob.glob("%s/*.bam" % inputRealDirectory )
# create a link of the BAMs inside the inputs directory
# this is to prevent the Warning of the BAI files being older than BAMs
#for inputF in inputFiles:
# #os.symlink(inputF, "%s/%s" % (inputDirectory, os.path.basename(inputF)))
# inputLinkedFiles.append("%s/%s" % (inputDirectory, os.path.basename(inputF)))
else:
inputFiles = options.input_files.strip(" ").split(" ")
#for inputF in inputFiles:
# #os.symlink(inputF, "%s/%s" % (inputDirectory, os.path.basename(inputF)))
# inputLinkedFiles.append("%s/%s" % (inputDirectory, os.path.basename(inputF)) )
# get the input BAMs into a list
sampleNames = []
groupNames = []
baselineFiles = []
# get the sample name and bam groups if necessary
# for the bam file and store in list in the same order as the Input file list
#### Run the configManta command for each input on the head node since it's a low cost job
for inputF in inputFiles:
samfile = pysam.AlignmentFile(inputF, 'rb')
sn = samfile.header['RG'][0]['SM']
sampleNames.append("%s" % (sn))
# create the output directory for the sample
sample_outD = "%s/%s" % (output_dir, sn)
os.mkdir(sample_outD)
# create symlink for input file
os.symlink(inputF, "%s/%s.bam" % (inputDirectory, sn))
inputLinkedFiles.append("%s/%s.bam" % (inputDirectory, sn))
# get group names if needed and generate the input baseline files
base_param = ""
if options.group_by_keyword is not None and options.rg_field is not None:
if options.group_by_keyword == "all":
value = "all_bam"
else:
value = samfile.header['RG'][0][options.rg_field]
if options.field_separator is not None:
value = value.split(options.field_separator)[int(options.field_order)-1]
fh_base = open("%s/%s.txt" % (baseline_dir, value), 'a')
fh_base.write("%s\n" % inputF)
fh_base.close()
base_param = "-c %s/%s.baseline.txt" % (baseline_dir, value)
if value not in groupNames:
groupNames.append(value)
if "%s/%s.txt" % (baseline_dir, value) not in baselineFiles:
baselineFiles.append("%s/%s.txt" % (baseline_dir, value) )
# pass through options
ptc = ""
if options.pass_through_options:
ptc = ' '.join( options.pass_through_options )
# create the baseline files if needed
baselineInputs = []
if options.group_by_keyword is not None:
# Make sure there are more than 1 BAM file for each Baseline group.
# if there is only 1 input BAM, then make a link with a new name and add to file
for inFile in baselineFiles:
fileCount = lineCount(inFile)
if fileCount < 2:
create_additional_bam_copy(inFile, baseline_dir)
fileCount = 2
## create a new baseline file with only x% of inputs depending on the user spec.
file_qty_to_use = math.ceil(options.percent * fileCount)
if file_qty_to_use < 2:
file_qty_to_use = math.ceil(1 * fileCount)
with open(inFile, "rb") as source:
lines = [line.rstrip("\n") for line in source]
print "FILE QTY TO ISE: %s" % file_qty_to_use
random_choice = random.sample(lines, int(file_qty_to_use))
newBaseline_input = "%s.selected.txt" % inFile
baselineInputs= baselineInputs+random_choice
#baselineInputs.append(newBaseline_input)
with open(newBaseline_input, "wb") as sink:
sink.write("\n".join(random_choice))
print baselineInputs
baseline_bam = ' '.join(baselineInputs)
# prepare baseline command
baseline_cmd = "export R_LIBS_SITE=%s:\$R_LIBS_SITE; export LD_LIBRARY_PATH=%s:\$LD_LIBRARY_PATH; export PATH=%s:%s:\$PATH;export PYTHONPATH=%s:\$PYTHONPATH; cnvkit.py batch %s -n --output-dir %s --output-reference %s/flat_reference.cnn %s" % (r_libs, r_ld, r_path, cnvkit_bin, pythonpath, baseline_bam, baseline_dir, baseline_dir, ptc )
print "baseline_cmd: %s " % baseline_cmd
stderr = tempfile.NamedTemporaryFile( prefix="TOOL-baseline-stderr-", dir=tmp_dir )
stdout = tempfile.NamedTemporaryFile( prefix="TOOL-baseline-stdout-", dir=tmp_dir )
return_code = None
if return_code is None or not return_code:
proc = subprocess.Popen( args=baseline_cmd, stdout=stdout, stderr=stderr, shell=True, cwd=tmp_dir )
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
if stdout:
stderr_target = stdout
else:
stderr_target = sys.stdout
stderr.flush()
stderr.seek(0)
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
stderr.close()
# prepare tool command
#tool_cmd = "export R_LIBS_SITE=%s:\$R_LIBS_SITE; export LD_LIBRARY_PATH=%s:\$LD_LIBRARY_PATH; export PATH=%s:%s:\$PATH;export PYTHONPATH=%s:\$PYTHONPATH; cnvkit.py batch INPUTBAM -n --output-dir OUTPUTDIR --output-reference REFNAME %s; cnvkit.py call INPUTCNS -y -m threshold -t=-1.1,-0.4,0.3,0.7 -o OUTPUTCALLFILE; cnvkit.py export vcf INPUTCALLFILE -o OUTPUTVCF; cp INT_VCF FINAL_VCF" % (r_libs, r_ld, r_path, cnvkit_bin, pythonpath, ptc)
tool_cmd = "export R_LIBS_SITE=%s:\$R_LIBS_SITE; export LD_LIBRARY_PATH=%s:\$LD_LIBRARY_PATH; export PATH=%s:%s:\$PATH;export PYTHONPATH=%s:\$PYTHONPATH; cnvkit.py batch INPUTBAM --output-dir OUTPUTDIR -r %s/flat_reference.cnn; cnvkit.py call INPUTCNS -y -m threshold -t=-1.1,-0.4,0.3,0.7 -o OUTPUTCALLFILE; cnvkit.py export vcf INPUTCALLFILE -o OUTPUTVCF; cp INT_VCF FINAL_VCF" % (r_libs, r_ld, r_path, cnvkit_bin, pythonpath, baseline_dir)
#if no stderr file is specified, we'll use our own
stderr = tempfile.NamedTemporaryFile( prefix="TOOL-stderr-", dir=tmp_dir )
stdout = tempfile.NamedTemporaryFile( prefix="TOOL-stdout-", dir=tmp_dir )
# prepare command line
swift_params = list()
swift_params.append('-outputdir=' + output_dir)
swift_params.append('-samplenames=\"%s\"' % ",".join(sampleNames))
swift_params.append('-inputfiles=\"%s\"' % ",".join(inputLinkedFiles))
## construct the swift command
swift_cmd = "%s -sites.file %s -tc.file %s %s " % (swift_bin, sites_file, tc_file, swift_file)
cmd = "%s %s %s" % (swift_cmd, ' '.join(swift_params), '-tool_cmd=\"'+tool_cmd+'\"')
print cmd
return_code = None
if return_code is None or not return_code:
proc = subprocess.Popen( args=cmd, stdout=stdout, stderr=stderr, shell=True, cwd=tmp_dir )
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
if stdout:
stderr_target = stdout
else:
stderr_target = sys.stdout
stderr.flush()
stderr.seek(0)
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
stderr.close()
for vcffile in glob.glob("%s/*.vcf" % output_dir):
shutil.copy(vcffile, options.output_dir)
# create list output files in the HTML output
try:
createOutputHTML(options.outputF, sampleNames)
except Exception, e:
sys.stdout.write("problem while generating final VCF " + str(e))
#try:
# if os.path.exists(tmp_dir):
# shutil.rmtree(tmp_dir)
# #if os.path.exists(output_dir):
# # shutil.rmtree(output_dir)
#except:
# pass
swift_log_files = glob.glob("%s/*.log" % tmp_dir)
cmdSummary = "/opt/galaxy/tools/swift/parse_swift_log.py "
for logF in swift_log_files:
if "swift.log" in logF:
continue
cmdSummary += " -l %s " % logF
cmdSummary += " -o %s" % options.swift_log
return_code = None
stderr = tempfile.NamedTemporaryFile( prefix="TOOL-stderr-", dir=tmp_dir )
stdout = tempfile.NamedTemporaryFile( prefix="TOOL-stdout-", dir=tmp_dir )
if return_code is None or not return_code:
proc = subprocess.Popen( args=cmdSummary, stdout=stdout, stderr=stderr, shell=True, cwd=tmp_dir )
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
if stdout:
stderr_target = stdout
else:
stderr_target = sys.stdout
stderr.flush()
stderr.seek(0)
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
stderr.close()
if __name__=="__main__":
__main__()
|
python
|
from enum import Enum
class ApiCode(Enum):
SUCCESS = 1000
JINJA2_RENDER_FAILURE = 1001
DEPLOY_START_FAILURE = 1002
DEPLOY_STOP_FAILURE = 1003
DEPLOY_STATUS_FAILURE = 1004
DEPLOY_REPLAY_FAILURE = 1005
GET_FILE_FAILURE = 1006
GET_CONTAINER_FILE_FAILURE = 1007
COMMAND_EXEC_FAILURE = 1008
DEPLOY_REPLAY_FAILURE_STILL_ACTIVE = 1009
FOLDER_ZIP_FAILURE = 1010
DOCKER_DAEMON_NOT_RUNNING = 1011
MISSING_PARAMETER_POST = 1012
GET_LOGS_FAILED = 1013
MAX_DEPLOYMENTS_REACHED = 1014
CONTAINER_UNREACHABLE = 1015
GET_DEPLOYER_NETWORK_FAILED = 1016
CONTAINER_NET_CONNECT_FAILED = 1017
CONTAINER_NET_DISCONNECT_FAILED = 1018
GET_ENV_VAR_FAILURE = 1019
EMPTY_REQUEST_BODY_PROVIDED = 1020
UPLOAD_FILE_FAILURE = 1021
HTTP_HEADER_NOT_PROVIDED = 1022
KUBERNETES_SERVER_ERROR = 1023
UNAUTHORIZED = 1024
INVALID_JSON_PAYLOAD = 1025
SET_ENV_VAR_FAILURE = 1026
FOLDER_UNZIP_FAILURE = 1027
DEPLOYMENTS_FOLDER_CLEANUP_FAILURE = 1028
GENERAL = 1100
|
python
|
import torch
import torch.nn as nn
from torch_geometric.nn import global_mean_pool
import torch.nn.functional as F
from models.nn_utils import chebyshev
class SCNLayer(nn.Module):
def __init__(self, feature_size, output_size, enable_bias=True, k=1):
super().__init__()
self.k = k
self.conv = nn.Linear(k * feature_size, output_size, bias=enable_bias)
def forward(self, L, x):
X = chebyshev(L, x, self.k)
return self.conv(X)
class SuperpixelSCN(nn.Module):
# This model is based on model described by Stefanie Ebli et al. in Simplicial Neural Networks
# Github here https://github.com/stefaniaebli/simplicial_neural_networks?utm_source=catalyzex.com
def __init__(self, num_node_feats, num_edge_feats, num_triangle_feats, output_size, bias=True):
super().__init__()
conv_size = 32
# Degree 0 convolutions.
self.C0_1 = SCNLayer(num_node_feats, conv_size, enable_bias=bias)
self.C0_2 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.C0_3 = SCNLayer(conv_size, conv_size, enable_bias=bias)
# Degree 1 convolutions.
self.C1_1 = SCNLayer(num_edge_feats, conv_size, enable_bias=bias)
self.C1_2 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.C1_3 = SCNLayer(conv_size, conv_size, enable_bias=bias)
# Degree 2 convolutions.
self.C2_1 = SCNLayer(num_triangle_feats, conv_size, enable_bias=bias)
self.C2_2 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.C2_3 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.layer0 = nn.Linear(3 * conv_size, output_size)
self.layer1 = nn.Linear(3 * conv_size, output_size)
self.layer2 = nn.Linear(3 * conv_size, output_size)
self.combined_layer = nn.Linear(output_size * 3, output_size)
def forward(self, simplicialComplex):
X0, X1, X2 = simplicialComplex.unpack_features()
L0, L1, L2 = simplicialComplex.unpack_laplacians()
batch = simplicialComplex.unpack_batch()
out0_1 = nn.LeakyReLU()(self.C0_1(L0, X0))
out0_2 = nn.LeakyReLU()(self.C0_2(L0, out0_1))
out0_3 = nn.LeakyReLU()(self.C0_3(L0, out0_2))
out1_1 = nn.LeakyReLU()(self.C1_1(L1, X1))
out1_2 = nn.LeakyReLU()(self.C1_2(L1, out1_1))
out1_3 = nn.LeakyReLU()(self.C1_3(L1, out1_2))
out2_1 = nn.LeakyReLU()(self.C2_1(L2, X2))
out2_2 = nn.LeakyReLU()(self.C2_2(L2, out2_1))
out2_3 = nn.LeakyReLU()(self.C2_3(L2, out2_2))
out0 = self.layer0(torch.cat([out0_1, out0_2, out0_3], dim=1))
out1 = self.layer1(torch.cat([out1_1, out1_2, out1_3], dim=1))
out2 = self.layer2(torch.cat([out2_1, out2_2, out2_3], dim=1))
out0 = global_mean_pool(out0, batch[0])
out1 = global_mean_pool(out1, batch[1])
out2 = global_mean_pool(out2, batch[2])
out = torch.cat([out0, out1, out2], dim=1)
return F.softmax(self.combined_layer(out), dim=1)
class PRELU(nn.PReLU):
def forward(self, input):
return F.prelu(input, self.weight)
class PlanetoidSCN(nn.Module):
def __init__(self, num_node_feats, output_size, bias=True):
super().__init__()
f_size = output_size
self.layer_n = SCNLayer(num_node_feats, f_size, bias)
self.layer_e = SCNLayer(num_node_feats, f_size, bias)
self.layer_t = SCNLayer(num_node_feats, f_size, bias)
self.f = PRELU()
self.tri_layer = nn.Linear(output_size, output_size)
def forward(self, simplicialComplex, B1, B2):
X0, X1, X2 = simplicialComplex.unpack_features()
L0, L1, L2 = simplicialComplex.unpack_laplacians()
X0[X0 != 0] = 1
X1_in, X1_out = X0[X1[:, 0]], X0[X1[:, 1]]
X1 = torch.logical_and(X1_in, X1_out).float()
X2_i, X2_j, X2_k = X0[X2[:, 0]], X0[X2[:, 1]], X0[X2[:, 2]]
X2 = torch.logical_and(X2_i, torch.logical_and(X2_j, X2_k)).float()
X0 = self.f(self.layer_n(L0, X0))
X1 = self.f(self.layer_e(L1, X1))
X2 = self.f(self.layer_t(L2, X2))
X0 = (X0 + torch.sparse.mm(B1, X1) + torch.sparse.mm(B1, self.tri_layer(torch.sparse.mm(B2, X2)))) / 3
return X0
class FlowSCN(nn.Module):
def __init__(self, num_node_feats, num_edge_feats, num_triangle_feats, output_size, bias=False, f=nn.LeakyReLU()):
super().__init__()
conv_size = 32
self.layer1 = SCNLayer(num_edge_feats, conv_size, enable_bias=bias)
self.layer2 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.layer3 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.layer4 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.mlp1 = nn.Linear(conv_size, conv_size)
self.mlp2 = nn.Linear(conv_size, output_size)
self.f = f
def forward(self, simplicialComplex):
_, X1, _ = simplicialComplex.unpack_features()
_, L1, _ = simplicialComplex.unpack_laplacians()
batch = simplicialComplex.unpack_batch()
X1 = self.f(self.layer1(L1, X1))
X1 = self.f(self.layer2(L1, X1))
X1 = self.f(self.layer3(L1, X1))
X1 = self.f(self.layer4(L1, X1))
X1 = global_mean_pool(X1.abs(), batch[1])
X1 = F.relu(self.mlp1(X1))
return F.softmax(self.mlp2(X1), dim=1)
class TestSCN(nn.Module):
def __init__(self, num_node_feats, num_edge_feats, num_triangle_feats, output_size, bias=False, f=nn.Identity()):
super().__init__()
self.layer1 = SCNLayer(num_node_feats, output_size, enable_bias=bias)
self.layer2 = SCNLayer(num_edge_feats, output_size, enable_bias=bias)
self.layer3 = SCNLayer(num_triangle_feats, output_size, enable_bias=bias)
self.f = f
def forward(self, simplicialComplex):
X0, X1, X2 = simplicialComplex.unpack_features()
L0, L1, L2 = simplicialComplex.unpack_laplacians()
X0 = self.f(self.layer1(L0, X0))
X1 = self.f(self.layer2(L1, X1))
X2 = self.f(self.layer3(L2, X2))
return X0, X1, X2
|
python
|
import os
from base64 import urlsafe_b64decode, urlsafe_b64encode
from pathlib import Path
from github import Github
gh_access_token = ''
def get_project_root():
"""Returns project root folder."""
return Path(__file__).parent
def gh_session():
"""Returns a PyGithub session."""
if gh_access_token:
return Github(gh_access_token)
return Github()
def reverse_enum(f, start=None):
start = start or 0
fl = list(f)
for i in reversed(range(len(fl))):
yield i + start, fl[i]
def norm_path(file_path):
path = file_path.replace(os.sep, '/')
if path.startswith(('a/', 'b/')):
return path[2:]
if path.startswith('/'):
return path[1:]
return path
def b64_encode(string):
encoded = urlsafe_b64encode(bytes(string, 'utf-8'))
return encoded.decode('utf-8').rstrip('=')
def b64_decode(b64_hash):
padding = 4 - (len(b64_hash) % 4)
string = b64_hash + ('=' * padding)
return urlsafe_b64decode(string).decode('utf-8')
|
python
|
""" Test DB.py in isolation.
Call with twisted.trial eg.
trial test_DB.py
In the context of evoke, and by extension in evoke apps,
only init_db and execute are used by external functions.
The aim of the current exercise is to maintain the current interface
whilst rationalising code and introducing an asyncrous interface
"""
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks # , returnValue
from .DB import DB, init_db, execute, aexecute
connect_config = ('', 'root', 'Elgar104')
class InitDBTestCase(unittest.TestCase):
"test init_db including legacy connect_config formats"
def setUp(self):
""
def tearDown(self):
""
def testTupleConnect(self):
"connect using a tuple"
init_db(connect_config)
def testSpaceDelimitedConnect(self):
"connect using a space delimited string"
init_db(' '.join(connect_config))
def testCommaDelimitedConnect(self):
"connect using a space delimited string"
init_db(','.join(connect_config))
def testDBConnect(self):
"connect by passing connect_config direct to DB object"
DB(connect_config)
class ExecuteTestCase(unittest.TestCase):
""" Not full coverage but the least that would
possibly let us know that we have an execute
connection that can handle substitutions.
"""
def setUp(self):
""
init_db(connect_config)
# #### TODO create test table
def tearDown(self):
""
# #### TODO drop test table
def testNothing(self):
"do nothing except setUp and tearDown"
def testCalculation(self):
"run a simple calculation on the database"
sql = 'select 2+3 as res'
l = execute(sql)
self.assertEqual(len(l), 1)
self.assertEqual(l[0]['res'], 5)
def testSubstitution(self):
"run simple calculation on db with parameter substitution"
l = execute('select %s+%s as res', args=(2, 3))
self.assertEqual(len(l), 1)
self.assertEqual(l[0]['res'], 5)
def testInsert(self):
"an INSERT should return its uid"
# assumes existence of test.test table
sql = "insert into test.test(thing) values ('')"
n = execute(sql)
self.assertEqual(type(n), int)
class AsyncExecuteTestCase(unittest.TestCase):
""" Not full coverage but the least that would
possibly let us know that we have an execute
connection that can handle substitutions.
Asyncrous edition
"""
def setUp(self):
""
init_db(connect_config)
def tearDown(self):
""
def testNothing(self):
"do nothing except setUp and tearDown"
def testCalculation(self):
"run a simple calculation on the database"
sql = 'select 2+3 as res'
d = aexecute(sql)
# test the results using a callback function
def testCallback(l):
self.assertEqual(len(l), 1)
self.assertEqual(l[0]['res'], 5)
d.addCallback(testCallback)
# always return a deferred
return d
@inlineCallbacks
def testInlineCallbackCalculation(self):
"""run a simple calculation on the database
Same test as last time, but recast as
an inlineCallback rather than with a
callback function.
"""
sql = 'select 2+3 as res'
l = yield aexecute(sql)
# test the results
self.assertEqual(len(l), 1)
self.assertEqual(l[0]['res'], 5)
@inlineCallbacks
def testInsert(self):
"an INSERT should return its uid"
# assumes existence of test.test table
sql = "insert into test.test(thing) values ('')"
n = yield aexecute(sql)
self.assertEqual(type(n), int)
|
python
|
import platform
from global_graph_parser.G_grammarListener import G_grammarListener
from graphviz import Digraph
class MyListener(G_grammarListener):
"""
There are 2 methods (enter and exit) for each rule of the grammar.
As the walker encounters the node for rule Choice, for example,
it triggers enterChoice(). After the walker visits all children
of the Choice node, it triggers exitChoice().
NOTE: For our purpose, we can't do anything in enter methods
(except for the enterInit), so we'll leave empty. We need to
go down in parse tree and store informations in stack, before
we'll be able to build the graph.
"""
def __init__(self, graph_name):
self.graph_name = graph_name
self.stack = [] # in stack we save some informations meanwhile we walk between nodes.
self.count = 0 # needed to number each node
self.g = Digraph(graph_name, filename=graph_name, format='pdf') # initializes graph
# some stuff to recognize location where we'll save the graph.
path_file = open("path.txt", 'r')
paths = []
for line in path_file:
paths.append(line.strip())
path_file.close()
# Windows
if platform.system() == "Windows":
pass
# macOs
if platform.system() == "Darwin":
self.path = paths[0]
# Linux
if platform.system() == "Linux":
self.path = paths[1]
# Enter a parse tree produced by a Init production.
def enterInit(self, ctx):
self.g.node(str(self.count), label="", shape="circle") # start node
self.count += 1
# Exit a parse tree produced by a Init production.
def exitInit(self, ctx):
node = self.stack.pop()
self.g.edge("0", str(node[1]))
self.g.edge(str(node[2]), str(self.count))
self.g.node(str(self.count), label="", shape="doublecircle") # end node
self.g.view(self.graph_name, self.path, False) # draw the graph
# Enter a parse tree produced by a interaction production.
def enterInteraction(self, ctx):
pass
# Exit a parse tree produced by a interaction production.
def exitInteraction(self, ctx):
node = ['interaction', self.count, self.count]
self.stack.append(node)
self.count += 1
self.g.node(str(node[1]), label=ctx.getText(), shape="rect")
# Enter a parse tree produced by a Sequential production.
def enterSequential(self, ctx):
pass
# Exit a parse tree produced by a Sequential production.
def exitSequential(self, ctx):
right = self.stack.pop()
left = self.stack.pop()
node = ['sequential', left[1], right[2]]
self.stack.append(node)
self.g.edge(str(left[2]), str(right[1]))
# Enter a parse tree produced by a Choice production.
def enterChoice(self, ctx):
pass
# Exit a parse tree produced by a Choice production.
def exitChoice(self, ctx):
right = self.stack.pop()
left = self.stack.pop()
if left[0] == 'choice':
# in the case we have 3 or more choice nested,
# we'll merge together with the same start node
# and end node. If this behaviour is not required,
# just commented out this if-statement.
node = ['choice', left[1], left[2]]
self.stack.append(node)
self.g.edge(str(left[1]), str(right[1]))
self.g.edge(str(right[2]), str(left[2]))
else:
choice_node_start = str(self.count)
self.count += 1
choice_node_end = str(self.count)
self.count += 1
node = ['choice', choice_node_start, choice_node_end]
self.stack.append(node)
self.g.node(choice_node_start, label="+", shape="diamond")
self.g.edge(choice_node_start, str(left[1]))
self.g.edge(choice_node_start, str(right[1]))
self.g.node(choice_node_end, label="+", shape="diamond")
self.g.edge(str(left[2]), choice_node_end)
self.g.edge(str(right[2]), choice_node_end)
# Enter a parse tree produced by a fork production.
def enterFork(self, ctx):
pass
# Exit a parse tree produced by a fork production.
def exitFork(self, ctx):
right = self.stack.pop()
left = self.stack.pop()
if left[0] == 'fork':
# in the case we have 3 or more fork nested,
# we'll merge together with the same start node
# and end node. If this behaviour is not required,
# just commented out this if-statement.
node = ['fork', left[1], left[2]]
self.stack.append(node)
self.g.edge(str(left[1]), str(right[1]))
self.g.edge(str(right[2]), str(left[2]))
else:
fork_node_start = str(self.count)
self.count += 1
fork_node_end = str(self.count)
self.count += 1
node = ['fork', fork_node_start, fork_node_end]
self.stack.append(node)
self.g.node(fork_node_start, label="|", shape="square")
self.g.edge(fork_node_start, str(left[1]))
self.g.edge(fork_node_start, str(right[1]))
self.g.node(fork_node_end, label="|", shape="square")
self.g.edge(str(left[2]), fork_node_end)
self.g.edge(str(right[2]), fork_node_end)
# Enter a parse tree produced by a loop production.
def enterLoop(self, ctx):
pass
# Exit a parse tree produced by a loop production.
def exitLoop(self, ctx):
node_to_loop = self.stack.pop()
loop_node_start = str(self.count)
self.count += 1
loop_node_end = str(self.count)
self.count += 1
node = ['loop', loop_node_start, loop_node_end]
self.stack.append(node)
self.g.node(loop_node_start, label="+", shape="diamond")
self.g.edge(loop_node_start, str(node_to_loop[1]))
self.g.node(loop_node_end, label="+", shape="diamond")
self.g.edge(str(node_to_loop[2]), loop_node_end)
self.g.edge(loop_node_end, loop_node_start)
# Enter a parse tree produced by a Parenthesis production.
def enterParenthesis(self, ctx):
pass
# Exit a parse tree produced by a Parenthesis production.
def exitParenthesis(self, ctx):
pass
|
python
|
def async_volunteer_group_adder(volunteer_group,volunteers):
through_model = volunteers[0].groups.through
dups = through_model.objects.all().filter(volunteergroup_id=volunteer_group.pk).values_list('volunteer_id', flat=True)
data = []
for pk in volunteers.values_list('pk', flat=True):
if pk not in dups:
data.append(through_model(volunteer_id=pk, volunteergroup_id=volunteer_group.pk) )
through_model.objects.bulk_create(data)
print("Completed Added group {}".format(volunteer_group))
|
python
|
from django import forms
from . models import Comment
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ["post"]
labels = {
"user_name": "Your Name",
"user_email": "Your E-Mail",
"text": "Your Comment",
}
|
python
|
from django.core.management.base import BaseCommand, CommandError
import pandas as pd
import os
import time
import json
from sdap.studies.models import ExpressionStudy, ExpressionData, Database, JbrowseData, Species
from django.core.files import File
from sdap.users.models import User
from django.conf import settings
def sync_study(row):
studies = ExpressionStudy.objects.filter(pmid=row['PubMedID'], technology=parse_values(row['technology']), species=parse_values(row['species']))
if studies.count() == 0:
return False
if studies.count() > 1 :
print("Error : More than one study matching " + row['PubMedID'])
return True
dict = {
"article": row['article'],
"status": "PUBLIC",
"ome": parse_values(row['ome']),
"experimental_design": parse_values(row['experimental_design']),
"topics": parse_values(row['biological_topics']),
"tissues": parse_values(row['tissue_or_cell']),
"sex": parse_values(row['sex']),
"dev_stage":parse_values(row['developmental_stage']),
"age": parse_values(row['age']),
"antibody": parse_values(row['antibody']),
"mutant": parse_values(row['mutant']),
"cell_sorted": parse_values(row['cell_sorted']),
"keywords": parse_values(row['keywords']),
"samples_count": len(parse_values(row['sample_ID'])),
}
need_update = False
for key, value in dict.items():
if not getattr(studies[0], key) == value:
need_update = True
if need_update:
print("Updating " + row['PubMedID'])
studies.update(**dict)
jbrowse_id = row['RGVID']
for study in studies:
if study.jbrowse_data:
study.jbrowse_data.all().delete()
if "JBrowseStatus" in row and row["JBrowseStatus"] == "yes":
species = Species.objects.get(name=row['species'])
data = JbrowseData(jbrowse_id=jbrowse_id, species=species, study=study)
data.save()
return True
def process_study(row, database, superuser, study_folder):
species_dict = {
'Homo sapiens': '9606',
'Mus musculus': '10090',
'Rattus norvegicus': '10116',
'Bos taurus': '9913',
'Macaca mulatta': '9544',
'Sus scrofa': '9823',
'Gallus gallus': '9031',
'Danio rerio': '7955',
'Canis lupus familiaris': '9615',
}
if Species.objects.filter(name=row['species']).count() == 0:
print(row['species'] + " not in registered species : skipping")
return
if sync_study(row):
return
dict = {
"article": row['article'],
"pmid": row['PubMedID'],
"status": "PUBLIC",
"ome": parse_values(row['ome']),
"technology": parse_values(row['technology']),
"species": parse_values(row['species']),
"experimental_design": parse_values(row['experimental_design']),
"topics": parse_values(row['biological_topics']),
"tissues": parse_values(row['tissue_or_cell']),
"sex": parse_values(row['sex']),
"dev_stage":parse_values(row['developmental_stage']),
"age": parse_values(row['age']),
"antibody": parse_values(row['antibody']),
"mutant": parse_values(row['mutant']),
"cell_sorted": parse_values(row['cell_sorted']),
"keywords": parse_values(row['keywords']),
"samples_count": len(parse_values(row['sample_ID'])),
"database": database,
"created_by": superuser
}
print("Creating study " + dict["article"])
study = ExpressionStudy(**dict)
study.save()
jbrowse_id = row['RGVID']
if "JBrowseStatus" in row and row["JBrowseStatus"] == "yes":
species = Species.objects.get(name=row['species'])
data = JbrowseData(jbrowse_id=jbrowse_id, species=species, study=study)
data.save()
for path in parse_values(row['path']):
print("Creating file with path: " + path)
if not os.path.exists("/app/loading_data/" + path):
print("Missing file : skipping")
continue
data_dict = {
"name": "data_genelevel",
"species": Species.objects.get(name=row['species']),
"technology": row['technology'],
"study": study,
"created_by": superuser
}
if path.split('/')[-1] != "data_genelevel.txt":
data_dict['name'] = path.split('/')[-1].replace(".txt","").replace("_", " ")
expression_file = ExpressionData(**data_dict)
expression_file.file.save(path.split('/')[-1], File(open(study_folder + path)), save=False)
expression_file.save()
def populate_data(metadata_file, studies_folder):
if not os.path.exists(metadata_file):
print("Error : no metadata.csv file found.")
return
dbs = Database.objects.all()
database = dbs[0]
users = User.objects.filter(username='admin')
superuser = users[0]
df = pd.read_csv(metadata_file, sep=",")
df = df.fillna('')
for index, row in df.iterrows():
process_study(row, database, superuser, studies_folder)
def parse_values(values):
value_list = []
if values:
value_list = values.split("|")
return value_list
class Command(BaseCommand):
help = 'Add new studies to the DB'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('metadata_file', type=str, help='Path to metadata file', default="/rgv_data/studies/metadata.csv")
parser.add_argument('studies_folder', type=str, help='Folder containing the studies folder', default="/rgv_data/")
def handle(self, *args, **options):
folder = options['studies_folder']
if not folder.endswith('/'):
folder += "/"
populate_data(options['metadata_file'], folder)
|
python
|
"""
The module for loading a dataset from a given file and parsing it into a dictionary.
The offered functions allow to receive a list of pairs of samples with their labels
"""
import random
def load_file(file_name, test_file):
print(file_name);
sample_folder = "../samples";
file_path = sample_folder + "/" + file_name;
text_file = open(file_path, "r");
lines_of_file = text_file.read().split('\n');
loaded_lines = dict();
if(test_file == False):
for i in range(0, len(lines_of_file)):
elements = lines_of_file[i].split(":::");
if(elements[0] == ''):
continue;
if(len(elements) >= 3):
elements_structured = (elements[1].encode('utf8'), elements[2].encode('utf8'), file_name[-6:-4].upper());
else:
elements_structured = elements[1].encode('utf8');
loaded_lines[elements[0]] = elements_structured;
else:
for i in range(0, len(lines_of_file)):
elements = lines_of_file[i].split(" ");
if(elements[0] == '' or len(elements) < 3):
continue;
loaded_lines[elements[0]] = ' '.join(elements[2:-1]);
return loaded_lines;
def load_files_formatted(truth_files, tweet_files):
if truth_files != None:
is_test = False;
truths = []
for t in truth_files:
truths = truths + [load_file(t, is_test)];
else:
is_test = True;
#truths = load_file("truth_es.txt", False);
tweets = []
for t in tweet_files:
tweets = tweets + [load_file(t, is_test)];
formatted_data = [];
for i in range(len(tweets)):
for p in tweets[i]:
if is_test:
label = (b'AGAINST', b'MALE', 'ES');
else:
label = truths[i][p];
formatted_data.append((tweets[i][p], label, p)); #(tweet, labels, ID)
return formatted_data
def load_files_formatted_split(truth_files, tweet_files, train_prop = 0.9, test_prop = 0.1):
data = load_files_formatted(truth_files, tweet_files);
#random shuffle
random.seed(0xF12ABC12123); #random, but constant seed
random.shuffle(data)
#get proportion and return it
upper_bound = int(round(len(data) * train_prop))
return (data[:upper_bound], data[upper_bound:])
|
python
|
# Generated by the pRPC protocol buffer compiler plugin. DO NOT EDIT!
# source: isolated.proto
import base64
import zlib
from google.protobuf import descriptor_pb2
# Includes description of the isolated.proto and all of its transitive
# dependencies. Includes source code info.
FILE_DESCRIPTOR_SET = descriptor_pb2.FileDescriptorSet()
FILE_DESCRIPTOR_SET.ParseFromString(zlib.decompress(base64.b64decode(
'eJy9Ws1vG0l2H4qWTLc1njbHMyNrvmo1q7E4Q5H6sMdrabJYSqTk9lCklh/22oOB3WoWpd5pdn'
'PZTcmar0OQwy4SLJB7guzmuPf8AbkGOQcIcsklQP6BADkF+b1X1c2WLdvJBogg21WvXr2vevU+'
'qm385fvGFTcMPDuSvdJwFERB/nIyP16d//AwCA49Wealg3G/HLkDGUb2YKiwF36fMWbbkR2FLf'
'mrMVbym8Zl2h1Gjwl1LiMyS5fX5kuKTimmU+rEdFqGQidA/rZhjCTYjyM38OemsPfK2jullECl'
'VrLcSqHm3zUuDe1D+Th0v5FzWeybbuUI0MZ8oWm8rkUMh4EfyvxPjdmBtMPxSA6kH4UQMstCph'
'nxjrZvD8OjIGqdwV/41ylNMV7P3zEMaDP6Hyt9ibFZ5znj4njoBXYvZIWzrXia/8h4XQ8fH5zC'
'RqxYtjWrgVsEy79nXOoFJ74icIERJoD8DeONZKKJTDPOlQSsyHxqXHUCP7JdP3w8UkcZzs0wqh'
'kv6CMO8wUjgT32guDr8TCcu8i4b8TwugLn541cQi7HKMmc1vq268Gs4dwltRbPP9k1jMlRA/Pt'
'Vq3drHc7VrPxuNto79e2rR2rVjVfyxvGzJ7V6HZqZiafMy7cbXZb5lT+opGtVh6a2bW6kbP0se'
'Z/ZkzzseWvP3/UWrn5c7wg9puF1+7989tGzrxgvmbumhnj7zK5WZ7k1/6QEdvB8HTkHh5FYm1l'
'9SeicyRFvbttico4OgpGYUlUPE8wQiigoRwdg4UhuqEUQV9ER24owmA8cqRwgp4UmB4Gx3Lky5'
'4Y+z05AooUlaHtEGHXkRCoKO7LUQgDibXSigEEOxKO7YsDKfoBNgnX5111a7vWaNdE34U3GkYu'
'N2XOQOxrGGZzr5k5jAvGbG4GYwPjK9CNZ7kM5jnTNL7k2RTWZ80p8+b8F4INAz2i8ciH4Ji5Ye'
'Q6IRiPlDJ0E8TI9g+lgFgkhj54KDS5uRDHjImD3SwEu5yCTAEya86lIFnMPzJXjM9z0xDnKsR9'
'28zMl8TEW0QUiHEoWZI2zAxblWJxWQDwnKXd4HfVnDbfMK7wjNTLQ733wW06kYggcynIFCDvmu'
'8lezLmm8B4PcHIaEguBZkC5LI5m+yZMq8B43KCQVQIMpOCEM4l00j2ZM23gGEkGFnsIch0CjIF'
'SM68ZHyauwBdrsM2C7DN+0L7NlskvgzKJGyKC6zodWy9ZvxLhqdki/dBvTj/DxmhAjR5JJ2iZ2'
'OMMDg6JUPD1cKhdNy+KxN3i80srL7wg2iCUDRiWj3Zt8cejgQkaIszHo1Ak52G932nEL9jrkEA'
'mg6tq38M5u/KUJy4uFSQYeCGoesfCptlPBX2SDLrU4kbEQyGY9KYjKeVy82weu+mIBlA3jNvpC'
'BZQD4xPzVuaEjG/BB73pt/Jzbo856sEUGeUGdTENr8uvlOCpIFZB4iVDVkyvwR9rw9f1Ps2U/d'
'wXgg/PHgABcf4eFMtkm0jwJ9A1O8p3LTTOdiCpIBJGdeTUGygFwz32JPyZiLFAC0p6hQRzzP9Z'
'QMo+ew9a8zPCVPWQK/T+b/IsNi6jgQxsLCKUYSgQxEnaNR4AdecOg6tieCEaIaDq1EsRLnpSPX'
'UeDJ0Ih9yQO5TRqN5I1Q2HAczz4FGlxZxk4Rm4MO3RlJe3LWLF7uAguYhswAcpmvdQzJAHLdXE'
'xBsoAsIS5uAIKLoGL+fPGZo4jznQhVpEmFQm0xMn+RLVbgGRmsBHk+nb9OtEbRJPxrolr4KS1q'
'KXHUKS1qCY76cQqSBaRgfmLsa0jGXMWea/M/E43Eg1y/5x67vTEMr+sHnS9wOzwZxbdXB+7+yB'
'7IlBwZeNVq4lVT2qNXodYbKUgWkLz5pvFXGQ2aMm9h0/z8n2dSknAZopif0LmHY8eRYdgfe96p'
'lu0F0sBRADm2vTGd/ZCyqB+pqERqYB5iJ9MvCrefACkpnCA0ICv0UkrRVbl1Riky3S0+qwkkC8'
'iceT0xbta8zdf0BcZN6iqlIdd5rzBuFnLcPiMHBffbyZVVEOJLV/ZvYuNeMDc5HP32pcbV/FWo'
'joX7/7HvBei1eUYvCjab0OudFCQLCIXBLzRk2vwp9nwwv5nSqpxcs7h4FCiqHSXPy0w7rcldTE'
'EygORwoBNIFpD3kPcHGjJjVqgOmP9KdIIIZzqJw24kB6GgmpfKsiFnVGUnXH0dshwq0Yo0sw8P'
'R/LQpuxgCNsZBSGCFBJWa387TAk5AyErZ4ScgZAVCDmXgmQBocLjcw25aFax5y1EpGeFTGyEQ0'
'egdcYUxsXP99spnhfBs3qG50XwrHK1N4FkAXkTVcGyhuTMHeb5fups/MBfXltZSbimmOTAZOcM'
'kxyY7JxhkgOTHWJyMMN90rrxX2XjVW1n/o1nWquFTeNS0l1RSxVKuEwv5FYMLZWe5q8Z077tB6'
'rVmm6pydYPxptw5Wfbta0rCcV9Au1nHn166EZH44MSsMuHyI7+4UTEYXQ6lOFE0v/MZP52Kru7'
'v/WHqQ92FeX9uBF8ID3vCx+XsUN77v1ZybhkfoAM85sMSu9/nEVb8QG3FX8/K3iPE3hia9zvI4'
'uKZaGoIR327MiGn0Vy5BxxoQ13HNgoj9K9yMpP9AZh+U5JvKANOYqiYbhRLveQqr1gCEaxOUjX'
'oRZi+UAJUUasaMke8tzIPVBFt412g6puuL1uYwhy4Po2CkWSCyHjBNZD2ud/gzHkHAQ9FIYO35'
'Ai529wHrgRRSzwREil0HWk+4d+4HnBCZV5dJgubeKkDzoy2oBI9PPJM4KF5KPpxmowDqmOpHDC'
'VO0DtFlY0hYzqG5EIi+qoEL1B1FIc/R7z4gDfo5n4+BHpRcJAWYpW8RCQMfeGIIlchgTQf5Pci'
'T1Uy9wxvRoYceHVIb9A6qnBDwF9ZfthRNT8wFh0RBp6ROlGtLlnUTYR6AlgdK+5QeTtVCFy9Dg'
'EolJoQsGU6reOFNQeJJ+L6DKkMNoMAgQp5RN4J1UHcI5RR8LRtwg96MTchPtQXFr4WCXS441It'
'/xlRdxXuKsdtdqi3Zzp/Og0qoJjPdbzftWtVYVWw+xWBPbzf2HLWv3bkfcbdartVZbVBpVQBud'
'lrXV7TRbbUMsVNrYusArlcZDUfvFfqvWbotmS1h7+3UL1EC+VWl0rFq7KKzGdr1btRq7RQEKot'
'HsGGjD96wO8DrNIrN9fp9o7oi9Wmv7LqaVLatudR4ywx2r0yBmO82WISpiv9LqWNvdeqUl9rut'
'/SZae9KsarW36xVrr1ZFC9UAT1G7X2t0RPtupV4/q6ghmg8atRZJn1ZTbNUgZWWrXiNWrGfVat'
'W2O6TQZLQN40HAOvo5fonBCPaoQZ1K62FRE23Xft4FFhZFtbJX2YV2S6+yCg5mu9uq7ZHUMEW7'
'u9XuWJ1upyZ2m80qG7tda923tmvtTVFvttlg3XYNglQrnQqzBg2YC+sYb3XbFhvOanRqrVZ3n9'
'6PCjjlB7AMpKxgb5Ut3GyQtuQrtWbrIZElO/AJFMWDuzXAW2RUtlaFzNCG1bY7aTQwhBGh0kRP'
'0ajt1q3dWmO7RstNIvPAatcKODCrTQgWM4YPgGmXtaaDglyGGqdct8jnKawdUanet0hyjQ0PaF'
'vaXdhs23e1zfVrj0A2mcMoZy5gtEnA3KIeE/QjjD5k6Id6TNAfY7TF0Mt6TFBqFosMzegxQT/G'
'qMzQeEyjG/QEwVBDjwm6hNGPGPpjPf6Pd/nF4gedAuf/7V14eZJ907WoLYYBMh6HN3pgQuUth4'
'gi1Aci4Nj+qYJ/E/gcVbwAFZBBZRCQ7FEREYeyQI9eCmyEprHap+sDjqmoIZ1J5ogXKDFQscDz'
'dNvPL32KkEspFcUWwhYFN/Szchg4R/T+1e1si4Hb8zmyUzl4z/bHlA5Wi2L1zu2VYhywEf48OU'
'TkF7uoHQMEaD+RXpwcuSAnnyLGcYOBQH0O1oHtfI0o2eMC8FQCAmNQIKTUP3D9caSb5c9WEv28'
'gBrxurSHE5WBsRAOsF/2FhB6VSL2A+EBy9BoIrIPPH6t9KUku1JFzCXJkHKsSuxj9TYjvly7uY'
'ywjVNxfZAFDaL+1dLLiw86zzJjFlSPEj8rginlBrGysrK6zL+dlZUN/n1Eqt/Bz/Lq2vL6amdt'
'fePWHfyW7sQ/j0pi69Sgg0RywnlDwEiryNRRrUg4Cz36x+0Uv1BA6WOJpp3PVyUn8WVrZ9sQ6+'
'vrdya6nJyclFwZ9UvB6LA86jv0hzBK0dOoQJUb9Wb0/EEFs/hI1J7a1IiHmOihWN1AIcdvV6m7'
'wAxx4a1fiCdkmaXCk5IufSZISRG6qVYm5XMoo8f6gJd4e6NbrxcK5+Kxvy+tYHEi09qrZDqUEV'
'EJ+j37NCUbdEVSZwboL0V0rDmeQf84Oi4KFmjzj1XpuBQd0+xlGikklCAOappVeM8ZDddfqOED'
'119fE092ZdQ+DdEI0nIl3HE92Tl7EDtWvdZBHhb9SIvxoj0f96NY0i5y1Gc3IbDzdSj+RCwtLS'
'lIoR+Veid3ETiqcBraVRCffy7W1wriO8Fr9eAkXortVi4jgEJeNPwhk6TLAlVTMSwsJQgqSq1+'
'9vw1SqjR9tXPbt68eXv9s5VJ2NAPcV3ffRpTQTB7lkrpjzvMJaU/TKGMUubDop8CuqCUOK/wYK'
'JD5orpLKbosAMUzjjAzRc6wD372BZP1EGW9JM1oey5HurzlANQNEWkJSiO8sUbXuLm2JdAS748'
'2Rq7HiripQIp1tYW0iyUYQqKFv0QTkPpjlhMmmtMpbpWmy1QKB0QZZZlYoNbL7RB+qWectb+KS'
'pxP1b8XPGXCs+eDa7D9sQaWKcIeK+NImzPHg4RFAGwfAVRPW2Rk2PKTmicySnPpHMVUHUmNTgs'
'/6+ismJFGd2mZF5UZBSUmC18S9n0++VvB2hpjvAvgtb3nW8ppX2/8S0yK/6G837/ZelbKiLIkb'
'//6tGCgbxNb3FqNxGyvRP7FMI/jd/UVIbsU27suYdomyjV4xw0p6JgVihzFTPMiVuRUxCz5Gz9'
'jRwFy0O711PNVXQSxNSk7RypSiWubqgq0hetqOsKSm+HAT1rUfKMty65JVnSwNXza6ACBCP+wV'
'BRVpwWHqFqGPf7CA30PurY6mVSsh9wfba0gLJoobB5BmqoMupXYxd1RwlhTD0LKWcIuWN1oagI'
'YQmvF5uSnh6oxlqyw4QbPVUaJEaBDsCnHtFXif55VyJD2mdYDW00pgmbA8jFlQ7lfcdBhSYO0E'
'YzT9qrWupYh/A5OagYDPp93EsuYnZQJUl114piYW1l9TbFzNVbnZXVjfWVjdVbpZVVmE95N0Iv'
'zZOgO7RDVKOMyfzR2CfV5K0ifX6+XdIXCAGr7YzcIe4PGTxdwNiCkoYIDn4pnUjVPm4YO7vyR3'
'Z/GImqyh7uUxRY7WabL9lS4ZyyrTQIvkGcsfl2SX+52y73AicsP5AH5Yko5Zbs4zr4jizvesGB'
'7T1usgxhmQQqp5gU+GXnKIAbWHGkKfI9VyKJJ1RH8TfDePAkVkh/TdLaSoh/nopQ6gmiRp+3pj'
'SC1KWhimyky1rZcw9GMDAXo6WjaOB9xKN4b4FfJIzEkWMm9D4hbiw+XF4cLC/2Oot3Nxb3Nhbb'
'pcX+oxsot92v5YkbqsdiMtDklODPitq9oGezs94IIStME6f6HRWsenqK7PPVknrH03Hul9jJ0t'
'Ngmatoe+jygcRQVVsrWcvP02Y9YwaLa1X8GqJAhgwO+P3M1nqi3IfUQ74gaJoOpS9Htrpq8TUL'
'k3d6HWWRbpJvzz/wt47fTb49/2mGP2D9NiNak94v9n9wILdnO+MQnXT9YZxfgIg9enCj/xfxko'
'bBOK9jeAS5HQ/Ockwt1NXk4/C0kvFiCpQhEH0Rm4CyBKJPYv8e65Yxf0378vP/lBGNwF/2+QPB'
'sTzbdtpxe0Ud1/ltZ0NvTDox/m4Tqje8CTF+aQwj+k5+ZB9Lfq5PeDJpvTH+/yPcyeKMqIOM2+'
'xn7ae7q6L+Y5xrI/pm+OuzNsoo9XPm6ylQlkCmeTX+APDfdjTQIQ==')))
_INDEX = {
f.name: {
'descriptor': f,
'services': {s.name: s for s in f.service},
}
for f in FILE_DESCRIPTOR_SET.file
}
IsolatedServiceDescription = {
'file_descriptor_set': FILE_DESCRIPTOR_SET,
'file_descriptor': _INDEX[u'isolated.proto']['descriptor'],
'service_descriptor': _INDEX[u'isolated.proto']['services'][u'Isolated'],
}
|
python
|
from aiozk import protocol
from aiozk.exc import TransactionFailed
class Transaction:
"""Transaction request builder"""
def __init__(self, client):
"""
:param client: Client instance
:type client: aiozk.ZKClient
"""
self.client = client
self.request = protocol.TransactionRequest()
def check_version(self, path, version):
"""
Check znode version
:param str path: Znode path
:param int version: Znode version
:return: None
"""
path = self.client.normalize_path(path)
self.request.add(
protocol.CheckVersionRequest(path=path, version=version)
)
def create(
self, path, data=None, acl=None,
ephemeral=False, sequential=False, container=False
):
"""
Create new znode
:param str path: Znode path
:param data: Data to store in node
:type data: str or bytes
:param acl: List of ACLs
:type acl: [aiozk.ACL]
:param bool ephemeral: Ephemeral node type
:param bool sequential: Sequential node type
:param bool container: Container node type
:return: None
:raises ValueError: when *containers* feature is not supported by
Zookeeper server (< 3.5.1)
"""
if container and not self.client.features.containers:
raise ValueError("Cannot create container, feature unavailable.")
path = self.client.normalize_path(path)
acl = acl or self.client.default_acl
if self.client.features.create_with_stat:
request_class = protocol.Create2Request
else:
request_class = protocol.CreateRequest
request = request_class(path=path, data=data, acl=acl)
request.set_flags(ephemeral, sequential, container)
self.request.add(request)
def set_data(self, path, data, version=-1):
"""
Set data to znode
:param str path: Znode path
:param data: Data to store in node
:type data: str or bytes
:param int version: Current version of node
:return: None
"""
path = self.client.normalize_path(path)
self.request.add(
protocol.SetDataRequest(path=path, data=data, version=version)
)
def delete(self, path, version=-1):
"""
Delete znode
:param str path: Znode path
:param int version: Current version of node
:return: None
"""
path = self.client.normalize_path(path)
self.request.add(
protocol.DeleteRequest(path=path, version=version)
)
async def commit(self):
"""
Send all calls in transaction request and return results
:return: Transaction results
:rtype: aiozk.transaction.Result
:raises ValueError: On no operations to commit
"""
if not self.request.requests:
raise ValueError("No operations to commit.")
response = await self.client.send(self.request)
pairs = zip(self.request.requests, response.responses)
result = Result()
for request, reply in pairs:
if isinstance(reply, protocol.CheckVersionResponse):
result.checked.add(self.client.denormalize_path(request.path))
elif isinstance(reply, protocol.CreateResponse):
result.created.add(self.client.denormalize_path(request.path))
elif isinstance(reply, protocol.SetDataResponse):
result.updated.add(self.client.denormalize_path(request.path))
elif isinstance(reply, protocol.DeleteResponse):
result.deleted.add(self.client.denormalize_path(request.path))
return result
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exception, tb):
# propagate error by returning None
if exception:
return
result = await self.commit()
if not result:
raise TransactionFailed
class Result:
"""
Transaction result aggregator
Contains attributes:
- **checked** Set with results of ``check_version()`` methods
- **created** Set with results of ``create()`` methods
- **updated** Set with results of ``set_data()`` methods
- **deleted** Set with results of ``delete()`` methods
"""
def __init__(self):
self.checked = set()
self.created = set()
self.updated = set()
self.deleted = set()
def __bool__(self):
return sum([
len(self.checked),
len(self.created),
len(self.updated),
len(self.deleted),
]) > 0
|
python
|
from src.util.config import (
load_config,
save_config,
)
from argparse import ArgumentParser
from typing import Dict
from src.util.default_root import DEFAULT_ROOT_PATH
def make_parser(parser: ArgumentParser):
parser.add_argument(
"--set-node-introducer",
help="Set the introducer for node - IP:Port",
type=str,
nargs="?",
default="",
)
parser.add_argument(
"--set-fullnode-port",
help="Set the port to use for the fullnode",
type=str,
nargs="?",
default="",
)
parser.add_argument(
"--set-log-level",
help="Set the instance log level, Can be CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET",
type=str,
nargs="?",
default="",
)
parser.set_defaults(function=configure)
def help_message():
print("usage: chia configure -flag")
print(
"""
chia configure [arguments] [inputs]
--set-node-introducer [IP:Port] (Set the introducer for node),
--set-fullnode-port [Port] (Set the full node default port, useful for beta testing),
--set-log-level [LogLevel] (Can be CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET)
"""
)
def configure(args, parser):
config: Dict = load_config(DEFAULT_ROOT_PATH, "config.yaml")
change_made = False
if args.set_node_introducer:
try:
if args.set_node_introducer.index(":"):
host, port = (
":".join(args.set_node_introducer.split(":")[:-1]),
args.set_node_introducer.split(":")[-1],
)
config["full_node"]["introducer_peer"]["host"] = host
config["full_node"]["introducer_peer"]["port"] = int(port)
config["introducer"]["port"] = int(port)
print("Node introducer updated.")
change_made = True
except ValueError:
print("Node introducer address must be in format [IP:Port]")
if args.set_fullnode_port:
config["full_node"]["port"] = int(args.set_fullnode_port)
config["full_node"]["introducer_peer"]["port"] = int(args.set_fullnode_port)
config["farmer"]["full_node_peer"]["port"] = int(args.set_fullnode_port)
config["timelord"]["full_node_peer"]["port"] = int(args.set_fullnode_port)
config["wallet"]["full_node_peer"]["port"] = int(args.set_fullnode_port)
config["wallet"]["introducer_peer"]["port"] = int(args.set_fullnode_port)
config["introducer"]["port"] = int(args.set_fullnode_port)
print("Default full node port updated.")
change_made = True
if args.set_log_level:
if (
(args.set_log_level == "CRITICAL")
or (args.set_log_level == "ERROR")
or (args.set_log_level == "WARNING")
or (args.set_log_level == "INFO")
or (args.set_log_level == "DEBUG")
or (args.set_log_level == "NOTSET")
):
config["logging"]["log_level"] = args.set_log_level
print("Logging level updated. Check CHIA_ROOT/log/debug.log")
change_made = True
if change_made:
print("Restart any running chia services for changes to take effect.")
save_config(args.root_path, "config.yaml", config)
else:
help_message()
return 0
|
python
|
from gym.spaces import Box, MultiDiscrete
import logging
import numpy as np
import ray
import ray.experimental.tf_utils
from ray.rllib.agents.ddpg.ddpg_tf_policy import ComputeTDErrorMixin, \
TargetNetworkMixin
from ray.rllib.agents.dqn.dqn_tf_policy import postprocess_nstep_and_prio
from ray.rllib.agents.sac.sac_ensemble_tf_model import SACEnsembleTFModel
from ray.rllib.agents.sac.sac_torch_model import SACTorchModel
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.tf_action_dist import Beta, MultiCategorical, \
DiagGaussian, MultiSquashedGaussian
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_ensemble_policy_template import build_tf_ensemble_policy
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.framework import get_variable, try_import_tf, \
try_import_tfp
tf1, tf, tfv = try_import_tf()
tfp = try_import_tfp()
logger = logging.getLogger(__name__)
def build_sac_ensemble_model(policy, obs_space, action_space, config):
# 2 cases:
# 1) with separate state-preprocessor (before obs+action concat).
# 2) no separate state-preprocessor: concat obs+actions right away.
if config["use_state_preprocessor"]:
num_outputs = 256 # Flatten last Conv2D to this many nodes.
else:
num_outputs = 0
# No state preprocessor: fcnet_hiddens should be empty.
if config["model"]["fcnet_hiddens"]:
logger.warning(
"When not using a state-preprocessor with SAC, `fcnet_hiddens`"
" will be set to an empty list! Any hidden layer sizes are "
"defined via `policy_model.fcnet_hiddens` and "
"`Q_model.fcnet_hiddens`.")
config["model"]["fcnet_hiddens"] = []
# Force-ignore any additionally provided hidden layer sizes.
# Everything should be configured using SAC's "Q_model" and "policy_model"
# settings.
policy.model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework=config["framework"],
model_interface=SACTorchModel
if config["framework"] == "torch" else SACEnsembleTFModel,
name="sac_model",
actor_hidden_activation=config["policy_model"]["fcnet_activation"],
actor_hiddens=config["policy_model"]["fcnet_hiddens"],
critic_hidden_activation=config["Q_model"]["fcnet_activation"],
critic_hiddens=config["Q_model"]["fcnet_hiddens"],
twin_q=config["twin_q"],
initial_alpha=config["initial_alpha"],
alpha=config["alpha"],
target_entropy=config["target_entropy"],
ensemble_size=config["partial_ensemble_size"],
timescale=config["timescale"],
shared_actor=config["shared_actor"],)
policy.target_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework=config["framework"],
model_interface=SACTorchModel
if config["framework"] == "torch" else SACEnsembleTFModel,
name="target_sac_model",
actor_hidden_activation=config["policy_model"]["fcnet_activation"],
actor_hiddens=config["policy_model"]["fcnet_hiddens"],
critic_hidden_activation=config["Q_model"]["fcnet_activation"],
critic_hiddens=config["Q_model"]["fcnet_hiddens"],
twin_q=config["twin_q"],
initial_alpha=config["initial_alpha"],
alpha=config["alpha"],
target_entropy=config["target_entropy"],
ensemble_size=config["partial_ensemble_size"],
timescale=config["timescale"],
shared_actor=config["shared_actor"],)
return policy.model
def slice_loss(x, idx, mode='slice'):
xshape = x.shape.as_list()
if mode == 'slice':
begin = [0] * len(xshape)
size = [-1] * len(xshape)
begin[1] = idx
size[1] = 1
return tf.reduce_mean(tf.slice(x, begin, size))
elif mode == 'mask':
onehot_vec = tf.expand_dims(tf.one_hot(idx, depth=E), 0)
if len(xshape) == 3:
onehot_vec = tf.expand_dims(onehot_vec, -1)
masked_x = tf.multiply(x, onehot_vec)
return tf.reduce_mean(tf.reduce_sum(masked_x, axis=1))
else:
raise ValueError
def postprocess_trajectory(policy,
sample_batch,
other_agent_batches=None,
episode=None):
if 'infos' not in sample_batch:
sample_batch['members'] = np.ones_like(sample_batch[SampleBatch.REWARDS]).astype(np.int32)
print("infos field not in sample_batch !!!")
else:
sample_batch['members'] = np.array([info['active_member'] for info in sample_batch['infos']], dtype=np.int32)
return postprocess_nstep_and_prio(policy, sample_batch)
def get_dist_class(config, action_space):
if isinstance(action_space, MultiDiscrete):
return MultiCategorical
else:
if config["normalize_actions"]:
return MultiSquashedGaussian if \
not config["_use_beta_distribution"] else Beta
else:
return DiagGaussian
def get_distribution_inputs_and_class(policy,
model,
obs_batch,
*,
explore=True,
**kwargs):
# Get base-model output.
model_out, state_out = model({
"obs": obs_batch,
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# Get action model output from base-model output.
distribution_inputs = model.get_policy_output(model_out)
action_dist_class = get_dist_class(policy.config, policy.action_space)
return distribution_inputs, action_dist_class, state_out
def sac_actor_critic_loss(policy, model, _, train_batch):
# Should be True only for debugging purposes (e.g. test cases)!
deterministic = policy.config["_deterministic_loss"]
model_out_t, _ = model({
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
model_out_tp1, _ = model({
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
target_model_out_tp1, _ = policy.target_model({
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# Broadcast the action of active ensemble member to all other ensemble members,
# because this action is the one responsible for the transition.
E = policy.config['partial_ensemble_size']
dones = tf.tile(tf.expand_dims(train_batch[SampleBatch.DONES], 1), [1, E])
rewards = tf.tile(tf.expand_dims(train_batch[SampleBatch.REWARDS], 1), [1, E])
member_mat = tf.one_hot(train_batch['members'], depth=E)
# Discrete case.
if model.discrete:
# Get all action probs directly from pi and form their logp.
log_pis_t = tf.nn.log_softmax(model.get_policy_output(model_out_t), -1)
policy_t = tf.math.exp(log_pis_t)
log_pis_tp1 = tf.nn.log_softmax(
model.get_policy_output(model_out_tp1), -1)
policy_tp1 = tf.math.exp(log_pis_tp1)
# Q-values.
q_t = model.get_q_values(model_out_t)
# Target Q-values.
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1)
if policy.config["twin_q"]:
twin_q_t = model.get_twin_q_values(model_out_t)
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1)
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
######################### CROSS ENTROPY #########################
# old:
# q_tp1 -= model.alpha * log_pis_tp1
# new:
if policy.config["asymmetric"]:
print(f"============ Asymmetric Ensemble===========")
cum_log_pis_tp1 = tf.math.cumsum(log_pis_tp1, axis=1)
arange = tf.range(start=1, limit=E + 1, delta=1, dtype=tf.float32, name='range')
inv_arange = tf.math.divide(1., arange)
w = tf.tile(tf.expand_dims(inv_arange, 1), [1, q_t.shape.as_list()[-1]])
ens_log_pis_tp1 = w * cum_log_pis_tp1
q_tp1 -= model.alpha * ens_log_pis_tp1
else:
beta = 1 / E * tf.ones((E, E), dtype=tf.float32)
q_tp1 -= model.alpha * tf.matmul(beta, log_pis_tp1)
#################################################################
# Actually selected Q-values (from the actions batch).
actions_mat = tf.cast(member_mat, train_batch[SampleBatch.ACTIONS].dtype) * train_batch[SampleBatch.ACTIONS]
actions = tf.reduce_sum(actions_mat, axis=1)
bcast_actions = tf.tile(tf.expand_dims(actions, 1), [1, E])
one_hot = tf.one_hot(bcast_actions, depth=q_t.shape.as_list()[-1])
q_t_selected = tf.reduce_sum(q_t * one_hot, axis=-1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.reduce_sum(twin_q_t * one_hot, axis=-1)
# Discrete case: "Best" means weighted by the policy (prob) outputs.
q_tp1_best = tf.reduce_sum(tf.multiply(policy_tp1, q_tp1), axis=-1)
q_tp1_best_masked = \
(1.0 - tf.cast(dones, tf.float32)) * \
q_tp1_best
# Continuous actions case.
else:
# Sample single actions from distribution.
action_dist_class = get_dist_class(policy.config, policy.action_space)
action_dist_t = action_dist_class(
model.get_policy_output(model_out_t), policy.model)
policy_t = action_dist_t.sample() if not deterministic else \
action_dist_t.deterministic_sample()
log_pis_t = tf.expand_dims(action_dist_t.logp(policy_t, reduce=False), -1)
action_dist_tp1 = action_dist_class(
model.get_policy_output(model_out_tp1), policy.model)
policy_tp1 = action_dist_tp1.sample() if not deterministic else \
action_dist_tp1.deterministic_sample()
log_pis_tp1 = tf.expand_dims(action_dist_tp1.logp(policy_tp1, reduce=False), -1)
# Q-values for the actually selected actions.
ex_member_mat = tf.tile(tf.expand_dims(member_mat, 2), [1, 1, policy_t.shape.as_list()[-1]])
active_actions = tf.reduce_sum(ex_member_mat * train_batch[SampleBatch.ACTIONS], axis=1, keepdims=True)
active_action_mat = tf.tile(active_actions, [1, E, 1])
q_t = model.get_q_values(model_out_t, active_action_mat)
if policy.config["twin_q"]:
twin_q_t = model.get_twin_q_values(
model_out_t, active_action_mat)
# Q-values for current policy in given current state.
q_t_det_policy = model.get_q_values(model_out_t, policy_t)
if policy.config["twin_q"]:
twin_q_t_det_policy = model.get_twin_q_values(
model_out_t, policy_t)
q_t_det_policy = tf.reduce_min(
(q_t_det_policy, twin_q_t_det_policy), axis=0)
# target q network evaluation
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1,
policy_tp1)
if policy.config["twin_q"]:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1)
# Take min over both twin-NNs.
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
######################### CROSS ENTROPY #########################
# old:
# q_tp1 -= model.alpha * log_pis_tp1
# new:
if policy.config["asymmetric"]:
print(f"============ Asymmetric Ensemble===========")
arange = tf.range(start=1, limit=E + 1, delta=1, dtype=tf.float32, name='range')
inv_arange = tf.math.divide(1., arange)
w = tf.tile(tf.expand_dims(inv_arange, 1), [1, q_t.shape.as_list()[-1]])
cum_log_pis_tp1 = tf.math.cumsum(log_pis_tp1, axis=1)
ens_log_pis_tp1 = w * cum_log_pis_tp1
q_tp1 -= model.alpha * ens_log_pis_tp1
else:
beta = 1 / E * tf.ones((E, E), dtype=tf.float32)
q_tp1 -= model.alpha * tf.matmul(beta, log_pis_tp1)
#################################################################
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (1.0 - tf.cast(dones, tf.float32)) * q_tp1_best
assert policy.config["n_step"] == 1, "TODO(hartikainen) n_step > 1"
# compute RHS of bellman equation
q_t_selected_target = tf.stop_gradient(
rewards + policy.config["gamma"]**policy.config["n_step"] * q_tp1_best_masked)
# Compute the TD-error (potentially clipped).
base_td_error = tf.abs(q_t_selected - q_t_selected_target)
if policy.config["twin_q"]:
twin_td_error = tf.abs(twin_q_t_selected - q_t_selected_target)
td_error = 0.5 * (base_td_error + twin_td_error)
else:
td_error = base_td_error
crnt_trnng_idx = tf.cast(policy.model.flrd_cntr, tf.int32)
critic_ens_loss = 0.5 * tf.square(q_t_selected_target - q_t_selected)
slice_mode = 'slice'
critic_loss = [slice_loss(critic_ens_loss, crnt_trnng_idx, mode=slice_mode)]
if policy.config["twin_q"]:
twin_c_ens_loss = 0.5 * tf.square(q_t_selected_target - twin_q_t_selected)
critic_loss.append(slice_loss(twin_c_ens_loss, crnt_trnng_idx, mode=slice_mode))
# Alpha- and actor losses.
# Note: In the papers, alpha is used directly, here we take the log.
# Discrete case: Multiply the action probs as weights with the original
# loss terms (no expectations needed).
if model.discrete:
# ens_pis_t = tf.reduce_mean(policy_t, axis=1)
# ens_log_pis_t = tf.log(ens_pis_t)
# alpha_loss = tf.reduce_mean(
# mask *
# tf.reduce_sum(
# tf.multiply(
# tf.stop_gradient(ens_pis_t), -model.log_alpha *
# tf.stop_gradient(ens_log_pis_t + model.target_entropy)),
# axis=-1))
actor_ens_loss = tf.reduce_sum(tf.multiply(policy_t, model.alpha * log_pis_t - tf.stop_gradient(q_t)), axis=-1)
actor_loss = slice_loss(actor_ens_loss, crnt_trnng_idx, mode=slice_mode)
else:
# alpha_loss = -tf.reduce_mean(
# model.log_alpha *
# tf.stop_gradient(log_pis_t + model.target_entropy))
actor_ens_loss = model.alpha * log_pis_t - q_t_det_policy
actor_loss = slice_loss(actor_ens_loss, crnt_trnng_idx, slice_mode)
# save for stats function
policy.policy_t = policy_t
policy.q_t = q_t
policy.td_error = td_error
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
# policy.alpha_loss = alpha_loss
policy.alpha_value = model.alpha
policy.target_entropy = model.target_entropy
# in a custom apply op we handle the losses separately, but return them
# combined in one loss for now
return actor_loss + tf.add_n(critic_loss) # + alpha_loss
def gradients_fn(policy, optimizer, loss):
# Eager: Use GradientTape.
if policy.config["framework"] in ["tf2", "tfe"]:
tape = optimizer.tape
pol_weights = policy.model.policy_variables()
actor_grads_and_vars = list(zip(tape.gradient(
policy.actor_loss, pol_weights), pol_weights))
q_weights = policy.model.q_variables()
if policy.config["twin_q"]:
half_cutoff = len(q_weights) // 2
grads_1 = tape.gradient(
policy.critic_loss[0], q_weights[:half_cutoff])
grads_2 = tape.gradient(
policy.critic_loss[1], q_weights[half_cutoff:])
critic_grads_and_vars = \
list(zip(grads_1, q_weights[:half_cutoff])) + \
list(zip(grads_2, q_weights[half_cutoff:]))
else:
critic_grads_and_vars = list(zip(tape.gradient(
policy.critic_loss[0], q_weights), q_weights))
# alpha_vars = [policy.model.log_alpha]
# alpha_grads_and_vars = list(zip(tape.gradient(
# policy.alpha_loss, alpha_vars), alpha_vars))
# Tf1.x: Use optimizer.compute_gradients()
else:
actor_grads_and_vars = policy._actor_optimizer.compute_gradients(
policy.actor_loss, var_list=policy.model.policy_variables())
q_weights = policy.model.q_variables()
if policy.config["twin_q"]:
half_cutoff = len(q_weights) // 2
base_q_optimizer, twin_q_optimizer = policy._critic_optimizer
critic_grads_and_vars = base_q_optimizer.compute_gradients(
policy.critic_loss[0], var_list=q_weights[:half_cutoff]
) + twin_q_optimizer.compute_gradients(
policy.critic_loss[1], var_list=q_weights[half_cutoff:])
else:
critic_grads_and_vars = policy._critic_optimizer[
0].compute_gradients(
policy.critic_loss[0], var_list=q_weights)
# alpha_grads_and_vars = policy._alpha_optimizer.compute_gradients(
# policy.alpha_loss, var_list=[policy.model.log_alpha])
# Clip if necessary.
if policy.config["grad_clip"]:
clip_func = tf.clip_by_norm
else:
clip_func = tf.identity
# Save grads and vars for later use in `build_apply_op`.
policy._actor_grads_and_vars = [
(clip_func(g), v) for (g, v) in actor_grads_and_vars if g is not None]
policy._critic_grads_and_vars = [
(clip_func(g), v) for (g, v) in critic_grads_and_vars if g is not None]
# policy._alpha_grads_and_vars = [
# (clip_func(g), v) for (g, v) in alpha_grads_and_vars if g is not None]
grads_and_vars = (
policy._actor_grads_and_vars \
+ policy._critic_grads_and_vars \
# + policy._alpha_grads_and_vars
)
return grads_and_vars
def apply_gradients(policy, optimizer, grads_and_vars):
actor_apply_ops = policy._actor_optimizer.apply_gradients(
policy._actor_grads_and_vars)
cgrads = policy._critic_grads_and_vars
half_cutoff = len(cgrads) // 2
if policy.config["twin_q"]:
critic_apply_ops = [
policy._critic_optimizer[0].apply_gradients(cgrads[:half_cutoff]),
policy._critic_optimizer[1].apply_gradients(cgrads[half_cutoff:])
]
else:
critic_apply_ops = [
policy._critic_optimizer[0].apply_gradients(cgrads)
]
if policy.config["framework"] in ["tf2", "tfe"]:
# policy._alpha_optimizer.apply_gradients(policy._alpha_grads_and_vars)
assert False, 'implement counter apply op'
return
else:
# alpha_apply_ops = policy._alpha_optimizer.apply_gradients(
# policy._alpha_grads_and_vars,
# global_step=tf1.train.get_or_create_global_step())
return tf.group([actor_apply_ops, policy.model.cntr_inc_op] + critic_apply_ops)
# # alpha_apply_ops = policy._alpha_optimizer.apply_gradients(
# # policy._alpha_grads_and_vars,
# # global_step=tf.train.get_or_create_global_step())
# apply_ops = [actor_apply_ops] + critic_apply_ops
# apply_ops += [policy.model.cntr_inc_op]
#
# # if policy.config["alpha"] is None:
# # apply_ops += [alpha_apply_ops]
# return tf.group(apply_ops)
def stats(policy, train_batch):
return {
# "policy_t": policy.policy_t,
# "td_error": policy.td_error,
"mean_td_error": tf.reduce_mean(policy.td_error),
"actor_loss": tf.reduce_mean(policy.actor_loss),
"critic_loss": tf.reduce_mean(policy.critic_loss),
# "alpha_loss": tf.reduce_mean(policy.alpha_loss),
"alpha_value": tf.reduce_mean(policy.alpha_value),
"target_entropy": tf.constant(policy.target_entropy),
"mean_q": tf.reduce_mean(policy.q_t),
"max_q": tf.reduce_max(policy.q_t),
"min_q": tf.reduce_min(policy.q_t),
"counter": policy.model.cntr,
"floored_counter": policy.model.flrd_cntr,
}
class ActorCriticOptimizerMixin:
def __init__(self, config):
# - Create global step for counting the number of update operations.
# - Use separate optimizers for actor & critic.
if config["framework"] in ["tf2", "tfe"]:
self.global_step = get_variable(0, tf_name="global_step")
self._actor_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["actor_learning_rate"])
self._critic_optimizer = [
tf.keras.optimizers.Adam(
learning_rate=config["optimization"][
"critic_learning_rate"])
]
if config["twin_q"]:
self._critic_optimizer.append(
tf.keras.optimizers.Adam(
learning_rate=config["optimization"][
"critic_learning_rate"]))
self._alpha_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["entropy_learning_rate"])
else:
self.global_step = tf1.train.get_or_create_global_step()
self._actor_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["actor_learning_rate"])
self._critic_optimizer = [
tf1.train.AdamOptimizer(
learning_rate=config["optimization"][
"critic_learning_rate"])
]
if config["twin_q"]:
self._critic_optimizer.append(
tf1.train.AdamOptimizer(
learning_rate=config["optimization"][
"critic_learning_rate"]))
self._alpha_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["entropy_learning_rate"])
def setup_early_mixins(policy, obs_space, action_space, config):
ActorCriticOptimizerMixin.__init__(policy, config)
def setup_mid_mixins(policy, obs_space, action_space, config):
ComputeTDErrorMixin.__init__(policy, sac_actor_critic_loss)
def setup_late_mixins(policy, obs_space, action_space, config):
TargetNetworkMixin.__init__(policy, config)
def validate_spaces(pid, observation_space, action_space, config):
if not isinstance(action_space, (Box, MultiDiscrete)):
raise UnsupportedSpaceException(
"Action space ({}) of {} is not supported for "
"SAC.".format(action_space, pid))
if isinstance(action_space, Box) and len(action_space.shape) != 2:
raise UnsupportedSpaceException(
"Action space ({}) of {} has multiple dimensions "
"{}. ".format(action_space, pid, action_space.shape) +
"Consider reshaping this into a single dimension, "
"using a Tuple action space, or the multi-agent API.")
SACEnsembleTFPolicy = build_tf_ensemble_policy(
name="SACTFPolicy",
get_default_config=lambda: ray.rllib.agents.sac.sac_ensemble.DEFAULT_CONFIG,
make_model=build_sac_ensemble_model,
postprocess_fn=postprocess_trajectory,
action_distribution_fn=get_distribution_inputs_and_class,
loss_fn=sac_actor_critic_loss,
stats_fn=stats,
gradients_fn=gradients_fn,
apply_gradients_fn=apply_gradients,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error},
mixins=[
TargetNetworkMixin, ActorCriticOptimizerMixin, ComputeTDErrorMixin
],
validate_spaces=validate_spaces,
before_init=setup_early_mixins,
before_loss_init=setup_mid_mixins,
after_init=setup_late_mixins,
obs_include_prev_action_reward=False)
|
python
|
import unittest
from test.testutil import set_default_576_324_videos_for_testing, \
set_default_576_324_videos_for_testing_scaled, \
set_default_cambi_video_for_testing_b, \
set_default_cambi_video_for_testing_10b
from vmaf.core.cambi_feature_extractor import CambiFeatureExtractor, CambiFullReferenceFeatureExtractor
from vmaf.core.cambi_quality_runner import CambiQualityRunner, CambiFullReferenceQualityRunner
from vmaf.tools.misc import MyTestCase
class CambiFeatureExtractorTest(MyTestCase):
def tearDown(self):
if hasattr(self, 'fextractor'):
self.fextractor.remove_results()
super().tearDown()
def test_run_cambi_fextractor(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
0.6892500624999999, places=4)
self.assertAlmostEqual(results[1]['Cambi_feature_cambi_score'],
0.0014658541666666667, places=4)
def test_run_cambi_fextractor_scaled(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing_scaled()
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
0.9204257916666666, places=4)
self.assertAlmostEqual(results[1]['Cambi_feature_cambi_score'],
0.004251791666666667, places=4)
def test_run_cambi_fextractor_scaled_b(self):
_, _, asset, asset_original = set_default_cambi_video_for_testing_b()
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
1.218365, places=4)
def test_run_cambi_fextractor_10b(self):
_, _, asset, asset_original = set_default_cambi_video_for_testing_10b()
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
0.01451, places=4)
def test_run_cambi_fextractor_max_log_contrast(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={'max_log_contrast': 4}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
0.9182153958333333, places=4)
self.assertAlmostEqual(results[1]['Cambi_feature_cambi_score'],
0.0024499791666667, places=4)
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={'max_log_contrast': 0}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
0.015840666666666666, places=4)
self.assertAlmostEqual(results[1]['Cambi_feature_cambi_score'],
0.000671125, places=4)
def test_run_cambi_fextractor_full_reference(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = CambiFullReferenceFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_score'],
0.689250, places=4)
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_source_score'],
0.00146585416, places=4)
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_full_reference_score'],
0.687784, places=4)
def test_run_cambi_fextractor_full_reference_scaled_ref(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = CambiFullReferenceFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={'src_width': 480, 'src_height': 270}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_score'],
0.689250, places=4)
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_source_score'],
0.0042517916, places=4)
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_full_reference_score'],
0.6849983125, places=4)
class CambiQualityRunnerTest(MyTestCase):
def test_run_cambi_runner(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.qrunner = CambiQualityRunner(
[asset, asset_original],
None, fifo_mode=False,
result_store=None
)
self.qrunner.run(parallelize=True)
results = self.qrunner.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_score'],
0.6892500624999999, places=4)
self.assertAlmostEqual(results[1]['Cambi_score'],
0.0014658541666666667, places=4)
def test_run_cambi_runner_scale(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing_scaled()
self.qrunner = CambiQualityRunner(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.qrunner.run(parallelize=True)
results = self.qrunner.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_score'],
0.9204257916666666, places=4)
self.assertAlmostEqual(results[1]['Cambi_score'],
0.004251791666666667, places=4)
def test_run_cambi_runner_scale_b(self):
_, _, asset, asset_original = set_default_cambi_video_for_testing_b()
self.qrunner = CambiQualityRunner(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.qrunner.run(parallelize=True)
results = self.qrunner.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_score'],
1.218365, places=4)
def test_run_cambi_runner_10b(self):
_, _, asset, asset_original = set_default_cambi_video_for_testing_10b()
self.qrunner = CambiQualityRunner(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.qrunner.run(parallelize=True)
results = self.qrunner.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_score'],
0.01451, places=4)
def test_run_cambi_runner_fullref(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.qrunner = CambiFullReferenceQualityRunner(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
)
self.qrunner.run(parallelize=True)
results = self.qrunner.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_FR_score'],
0.687784125, places=4)
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_score'],
0.68925006249, places=4)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
python
|
import os.path
import sys
base_path = os.path.abspath('..')
aragog_app = os.path.join(base_path, 'app')
sys.path.insert(0, aragog_app)
|
python
|
import os
import sys
from argparse import ArgumentParser
from typing import List, Tuple
from requests import HTTPError # noqa
from adventofcode.config import ROOT_DIR
from adventofcode.scripts.get_inputs import get_input
from adventofcode.util.console import console
from adventofcode.util.input_helpers import get_input_for_day
def add_day():
year, day = _parse_args(sys.argv[1:])
console.print(f'Creating solution day file for year {year} day {day}')
# Solution file
module_path = os.path.join(ROOT_DIR, f'year_{year}')
solution_file = os.path.join(module_path, f'day_{day:02}_{year}.py')
create_module_dir(module_path)
write_solution_template(solution_file, year, day)
# Test file
test_module_path = os.path.abspath(os.path.join(ROOT_DIR, '../../tests', f'year_{year}'))
test_file = os.path.join(test_module_path, f'test_day_{day:02}_{year}.py')
create_module_dir(test_module_path)
write_test_template(test_file, year, day)
# Empty test input
test_input_module_path = os.path.abspath(os.path.join(ROOT_DIR, '../../tests', f'year_{year}', f'inputs'))
test_file_input = os.path.join(test_input_module_path, f'day_{day:02}.txt')
create_dir(test_input_module_path)
write_template(test_file_input, "")
verify_input_exists(year, day)
def write_solution_template(path: str, year: int, day: int) -> None:
if not os.path.exists(path):
write_template(path, read_solution_template(year, day))
console.print(f'[green]Wrote template to {path}')
else:
console.print(f'[yellow]Did not write template for year {year} day {day}, the file already exists.')
def write_test_template(path: str, year: int, day: int) -> None:
if not os.path.exists(path):
write_template(path, read_test_template(year, day))
console.print(f'[green]Wrote test template to {path}')
else:
console.print(f'[yellow]Did not write test template for year {year} day {day}, the file already exists.')
def create_module_dir(path: str) -> None:
create_dir(path)
if not os.path.exists(init_file := os.path.join(path, '__init__.py')):
with open(init_file):
pass
def create_dir(path: str) -> None:
if not os.path.exists(path):
os.mkdir(path)
def verify_input_exists(year: int, day: int) -> None:
try:
_ = get_input_for_day(year, day)
console.print(f'Input data already exists for year {year} day {day}, skipping download')
return
except FileNotFoundError:
try:
get_input(year, day)
console.print(f'Automatically downloaded input data for year {year} day {day}')
return
except HTTPError as e:
console.print(f'[red]Could not retrieve input data for year {year} day {day} automatically: {e}')
except FileNotFoundError:
console.print(f'[red]Could not retrieve input data for year {year} day {day}: .session not set correctly')
raise ValueError('unknown exception occurred in verify_input_exists')
def _read_solution_template(template_path: str, year: str, day: str) -> str:
with open(template_path) as f:
template = f.read()
template = template.replace('{year}', year)
template = template.replace('{day}', day)
return template
def _read_test_template(template_path: str, year: str, day: str, file_day: str) -> str:
with open(template_path) as f:
template = f.read()
template = template.replace('{year}', year)
template = template.replace('{day}', day)
template = template.replace('{file_day}', file_day)
return template
def read_solution_template(year: int, day: int) -> str:
template_path = os.path.join(ROOT_DIR, 'scripts/templates/day_template.txt')
return _read_solution_template(template_path, str(year), str(day))
def read_test_template(year: int, day: int) -> str:
template_path = os.path.join(ROOT_DIR, 'scripts/templates/test_template.txt')
return _read_test_template(template_path, str(year), str(day), f'{day:02}')
def write_template(filename: str, template: str):
with open(filename, 'w') as f:
f.write(template)
def _parse_args(args: List[str]) -> Tuple[int, int]:
parser = ArgumentParser(description='Add a day')
parser.add_argument('year', type=int, help='The year of the exercise')
parser.add_argument('day', type=int, help='The day of the exercise')
parsed = parser.parse_args(args)
return parsed.year, parsed.day
if __name__ == '__main__':
add_day()
|
python
|
###########################
#
# #327 Rooms of Doom - Project Euler
# https://projecteuler.net/problem=327
#
# Code by Kevin Marciniak
#
###########################
|
python
|
from setuptools import setup
package_name = "simdash"
description = "A web based dashboard for visualizing simulations"
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name=package_name,
description=description,
maintainer="Parantapa Bhattacharya",
maintainer_email="[email protected]",
long_description=long_description,
long_description_content_type="text/markdown",
packages=[package_name, "%s.database" % package_name, "%s.viz" %package_name],
scripts=["bin/%s" % package_name],
use_scm_version=True,
setup_requires=['setuptools_scm'],
install_requires=[
"click",
"click_completion",
"logbook",
"flask",
"altair",
"pandas",
"toml",
],
url="http://github.com/NSSAC/%s" % package_name,
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
python
|
from django import views
from django.contrib import admin
from django.urls import path,include
from . import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('profile', views.Profile_viewSet)
router.register('posts', views.Post_viewSet)
router.register('users', views.User_viewSet)
urlpatterns = [
path('',views.index, name = 'homepage' ),
path('signup/', views.signup, name='signup'),
path('', include('django.contrib.auth.urls')),
path('api/', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('profile/<username>/info', views.user_profile, name='profile'),
path('project/<post>', views.project_rating, name='project'),
path('profile/<username>/edit', views.edit_profile, name='editprofile'),
path('project/<post>', views.project_rating, name='project'),
path('search/', views.search_project, name='search'),
]
|
python
|
# -*- coding: utf-8 -*-
'''
(c) Copyright 2013 Telefonica, I+D. Printed in Spain (Europe). All Rights
Reserved.
The copyright to the software program(s) is property of Telefonica I+D.
The program(s) may be used and or copied only with the express written
consent of Telefonica I+D or in accordance with the terms and conditions
stipulated in the agreement/contract under which the program(s) have
been supplied.
'''
from lettuce import step, world
from common.rest_utils import RestUtils
from common.api_utils import APIUtils
from common.test_utils import TestUtils
from common.mongo_utils import MongoUtils
rest_utils = RestUtils()
test_utils = TestUtils()
api_utils = APIUtils()
mongo_utils = MongoUtils()
@step(u'the DB is working')
def the_db_is_working(step):
mongo_utils.the_db_is_working(step)
@step(u'the DB has stopped working')
def the_db_has_stopped_working(step):
mongo_utils.the_db_has_stopped_working(step)
@step(u'I send to (.*) the data (.*)')
def i_send_to_url_the_data(step, url, data):
rest_utils.send_to_url_the_data(step, url, data)
@step(u'I get a success response of type (\d+) with location (.+):')
def i_get_a_success_response_of_type_with_location(step, status_code, location_index):
rest_utils.get_a_success_response_of_type_with_location(step, status_code, location_index)
@step(u'I get an error response of type (\d+) with error code (\w+)')
def i_get_an_error_response_of_type_with_error_code(step, status_type, error_code):
rest_utils.get_an_error_response_of_type_with_error_code(step, status_type, error_code)
@step(u'I send to (.*) the instance data (\d+):')
def i_send_to_url_the_instance_data(step, url, class_index):
api_utils.send_to_url_the_instance_data(step, url, class_index)
@step(u'a class has already been published with data (\d+):')
def a_class_has_already_been_published_with_data(step, old_class_index):
""" Rest wolrd variables for scenario execution"""
api_utils.a_class_has_already_been_published_with_data(step, old_class_index)
@step(u'a class has not already been published with data (.*):')
def a_class_has_not_already_been_published_with_data(step, old_class_index):
api_utils.a_class_has_not_already_been_published_with_data(step, old_class_index)
@step(u'an instance has already been published with data (\d+):')
def an_instance_has_already_been_published_with_data(step, old_instance_index):
api_utils.an_instance_has_already_been_published_with_data(step, old_instance_index)
@step(u'the response contains the instance data')
def the_response_contains_the_instance_data(step):
api_utils.the_response_contains_the_instance_data()
@step(u'the location returns the instance data')
def the_location_returns_the_instance_data(step):
api_utils.the_url_returns_the_instance_data(step, world.location)
@step(u'I send to (.*) the rule data (\d+):')
def i_send_to_url_the_rule_data(step, url, rule_index):
api_utils.send_to_url_the_rule_data(step, url, rule_index)
@step(u'the response contains the rule data')
def the_response_contains_the_rule_data(step):
api_utils.the_response_contains_the_rule_data()
@step(u'the location returns the rule data')
def the_location_returns_the_rule_data(step):
api_utils.the_url_returns_the_rule_data(step, world.location)
@step(u'the following bindings rules are available (.*):')
def the_following_bindings_rules_are_available(step, operation_index):
api_utils.the_following_bindings_rules_are_avalilabe(step, operation_index)
@step(u'there is a context rule already been published with data (\d+):')
def there_is_a_context_rule_already_been_published_with_data(step, old_rule_index):
api_utils.there_is_a_context_rule_already_been_published_with_data(step, old_rule_index)
@step(u'the following bindings in (\d+) are available for the context rules:')
def and_the_following_bindings_in_bindings_index_are_available_for_the_context_rules(step, binding_index):
api_utils.the_following_bindings_are_available_for_the_context_rules(step, binding_index)
@step(u'I request the resource (.*) with parameters (\d+):')
def request_the_resource_with_parameters(step, url, params_index):
rest_utils.request_the_resource(step, url, params_index)
@step(u'I get a success response of type (\d+) with a result set of size (\d+)')
def get_a_success_response_of_type_with_resultset_of_size(step, status_code, size):
rest_utils.get_a_success_response_of_type_with_resultset_of_size(step, status_code, size)
@step(u'the result set contains the instance (\d+) in position (\d+):')
def the_resultset_contains_instance_in_position(step, instance_index, position):
api_utils.the_resultset_contains_instance_in_position(step, instance_index, position)
@step(u'the result set contains the instance (\d+):')
def the_resultset_contains_instance(step, instance_index):
api_utils.the_resultset_contains_instance_in_position(step, instance_index)
@step(u'And the previous bindings are pusblished for the context (\d+):')
def and_the_previous_bindings_are_pusblished_for_the_context_operation_index(step, context_index):
api_utils.send_to_url_the_rule_data(step, "$base_api_url/$bindings_url", context_index)
@step(u'the exceptionText contains (\d+)')
def the_exceptiontext_contains_exceptiontext(step, exceptionText_index):
api_utils.the_exceptiontext_contains_exceptiontext(step, exceptionText_index)
@step(u'the instance published in position (\d+) has been deleted')
def the_instance_published_has_been_deleted(step, position):
i_delete_url(step, "$base_api_url/$classes_url/$class_name/$instances_url/" + api_utils.get_instance_id(position))
@step(u'I delete resource (\d+):')
def i_delete_resource(step, resource_index):
i_delete_url(step, step.hashes[int(resource_index)]["resource"])
@step(u'I delete (.*)')
def i_delete_url(step, url):
rest_utils.delete_url(step, url)
@step(u'I check the resource (\d+):')
def i_check_the_resource(step, resource_index):
request_the_resource(step, step.hashes[int(resource_index)]["resource"])
@step(u'I request the resource (.*)')
def request_the_resource(step, url):
rest_utils.request_the_resource(step, url)
@step(u'I update (.*) with the user data (\d+):')
def i_update_url_with_the_user_data(step, url, user_data_index):
api_utils.send_to_url_the_user_data(step, url+step.hashes[int(user_data_index)]["username"], user_data_index)
@step(u'the response contains the user data')
def the_response_contains_the_user_data(step):
api_utils.the_response_contains_the_user_data()
@step(u'the location returns the user data')
def the_location_returns_the_user_data(step):
api_utils.the_url_returns_the_user_data(step, world.location)
@step(u'I get a success response of type (\d+)')
def i_get_a_success_response_of_type(step, status_code):
rest_utils.get_a_success_response_of_type(step, status_code)
@step(u'the URL (.*) returns the error code (\d+) with error code (\w+)')
def the_url_returns_an_error_of_type_with_error_code(step, url, status_code, error_code):
api_utils.the_url_returns_an_error_of_type_with_error_code(step, url, status_code, error_code)
@step(u'I send to (.*) the class data (\d+):')
def i_send_to_url_the_class_data(step, url, class_index):
api_utils.send_to_url_the_class_data(step, url, class_index)
@step(u'the DB has no classes already published')
def the_db_has_no_classes_already_published(step):
pass # the database is cleaned before each scenario
@step(u'the user performing the operation is:')
def the_user_performing_the_operation_is(step):
test_utils.reset_world()
world.request_user = step.hashes[0]["username"]
world.request_password = step.hashes[0]["password"]
assert True
|
python
|
from gerais import *
from tkinter import messagebox
# ===================Usuarios==============
# ====Verifica se usuario existe====
def existeUsuario(dic,chave):
if chave in dic.keys():
return True
else:
return False
# ====Insere usuario====
def insereUsuario(dic):
email = input("Digite o email:")
municipio = input("Digite o municipio:")
if existeUsuario(dic, email):
print("Usuario já cadastrado!")
pausa()
else:
nome = input("Digite o nome: ")
dic[email]=(nome, municipio)
print("Dados inseridos com sucesso!")
pausa()
# ====Exibe um usuario====
def mostraUsuario(dic,chave):
if existeUsuario(dic,chave):
dados = dic[chave]
print(f"Nome: {dados[0]}")
print(f"Email: {chave}")
print(f"Município: {dados[1]}")
else:
print("Usuario não cadastrada!")
# ====Altera usuario====
def alteraUsuario(dic,chave):
if existeUsuario(dic,chave):
mostraUsuario(dic,chave)
confirma = input("Tem certeza que deseja alterá-lo? (S/N): ").upper()
if confirma == 'S':
nome = input("Digite o nome: ")
municipio = input("Digite o município: ")
dic[chave]=(nome, municipio)
print("Dados alterados com sucesso!")
pausa()
else:
print("Alteração cancelada!")
pausa()
else:
print("Usuario não cadastrado!")
pausa()
# ====Remove um usuario====
def removeUsuario(dic,chave):
if existeUsuario(dic,chave):
mostraUsuario(dic,chave)
confirma = input("Tem certeza que deseja apagar? (S/N): ").upper()
if confirma == 'S':
del dic[chave]
print("Dados apagados com sucesso!")
pausa()
else:
print("Exclusão cancelada!")
pausa()
else:
print("Pessoa não cadastrada!")
pausa()
# ====Mostra todos usuarios====
def mostraTodosUsuarios(dic):
print("Relatório: Todas os usuarios\n")
print("EMAIL - NOME - MUNICÍPIO\n")
for email in dic:
tupla = dic[email]
linha = email + " - " + tupla[0] + " - " + tupla[1]
print(linha)
print("")
pausa()
# ======Grava dados no arquivo=====
def gravaUsuarios(dic):
arq = open("usuarios.txt", "w")
for email in dic:
tupla = dic[email]
linha = email+";"+tupla[0]+";"+tupla[1]+"\n"
arq.write(linha)
arq.close()
# ======Pega dados do arquivo====
def recuperaUsuarios(dic):
if (existe_arquivo("usuarios.txt")):
arq = open("usuarios.txt", "r")
for linha in arq:
linha = linha[:len(linha)-1]
lista = linha.split(";")
nome = lista[1]
email = lista[0]
municipio = lista[2]
dic[email] = (nome, municipio)
# encode("windows-1252").decode("utf-8")
# def verificaMunicipio(municipio):
# ====Menu de usuarios====
def menuUsuarios(dicUsuarios):
opc = 0
while ( opc != 6 ):
print("\nGerenciamento de usuarios:\n")
print("1 - Insere Usuario")
print("2 - Altera Usuario")
print("3 - Remove Usuario")
print("4 - Mostra um Usuario")
print("5 - Mostra todos os Usuarios")
print("6 - Sair do menu de Usuarios")
opc = int( input("Digite uma opção: ") )
if opc == 1:
insereUsuario(dicUsuarios)
elif opc == 2:
email = input("Email a ser alterado: ")
alteraUsuario(dicUsuarios, email)
elif opc == 3:
email=input("Email a ser removido: ")
removeUsuario(dicUsuarios, email)
elif opc == 4:
email=input("Email a ser consultado: ")
mostraUsuario(dicUsuarios, email)
pausa()
elif opc == 5:
mostraTodosUsuarios(dicUsuarios)
elif opc == 6:
gravaUsuarios(dicUsuarios)
# ====Insere usuario INTERFACE====
def insereUsuarioInterface(dic, email, nome, municipio):
if existeUsuario(dic, email):
print("Usuario já cadastrado!")
messagebox.showinfo("Info", "Usuario já cadastrado!")
else:
dic[email]=(nome, municipio)
print("Dados inseridos com sucesso!")
messagebox.showinfo("Info", "Dados inseridos com sucesso!")
# ====Remove um usuario INTERFACE====
def removeUsuarioInterface(dic,chave):
if existeUsuario(dic,chave):
del dic[chave]
print("Dados apagados com sucesso!")
messagebox.showinfo("Info", "Dados apagados com sucesso!")
else:
print("Pessoa não cadastrada!")
messagebox.showinfo("Info", "Pessoa não cadastrada!")
# ====Altera usuario INTERFACE====
def alteraUsuarioInterface(dic, chave, nome, municipio):
if existeUsuario(dic,chave):
mostraUsuario(dic,chave)
dic[chave]=(nome, municipio)
print("Dados alterados com sucesso!")
messagebox.showinfo("Info", "Dados alterados com sucesso!")
else:
print("Usuario não cadastrado!")
messagebox.showinfo("Info", "Usuario não cadastrado!")
# ====Exibe um usuario INTERFACE====
def mostraUsuarioInterface(dic,chave):
if existeUsuario(dic,chave):
dados = dic[chave]
return(dados[0], chave, dados[1])
else:
return(False, False, False)
|
python
|
from freezegun import freeze_time
from rest_framework import test
from waldur_mastermind.common.utils import parse_datetime
from waldur_mastermind.marketplace import callbacks, models
from waldur_mastermind.marketplace.tests import factories
@freeze_time('2018-11-01')
class CallbacksTest(test.APITransactionTestCase):
def test_when_resource_is_created_new_period_is_opened(self):
# Arrange
start = parse_datetime('2018-11-01')
plan = factories.PlanFactory()
resource = factories.ResourceFactory(plan=plan)
order_item = factories.OrderItemFactory(
state=models.OrderItem.States.EXECUTING,
resource=resource,
)
# Act
callbacks.resource_creation_succeeded(resource)
# Assert
self.assertTrue(
models.ResourcePlanPeriod.objects.filter(
resource=resource, plan=plan, start=start, end=None
).exists()
)
order_item.refresh_from_db()
self.assertEqual(order_item.state, models.OrderItem.States.DONE)
def test_when_plan_is_changed_old_period_is_closed_new_is_opened(self):
# Arrange
old_start = parse_datetime('2018-10-01')
new_start = parse_datetime('2018-11-01')
old_plan = factories.PlanFactory()
new_plan = factories.PlanFactory()
resource = factories.ResourceFactory(plan=old_plan)
old_period = models.ResourcePlanPeriod.objects.create(
resource=resource, plan=old_plan, start=old_start, end=None
)
order_item = factories.OrderItemFactory(
state=models.OrderItem.States.EXECUTING,
type=models.OrderItem.Types.UPDATE,
resource=resource,
plan=new_plan,
)
# Act
callbacks.resource_update_succeeded(resource)
# Assert
order_item.refresh_from_db()
self.assertEqual(order_item.state, models.OrderItem.States.DONE)
old_period.refresh_from_db()
self.assertEqual(old_period.end, new_start)
self.assertTrue(
models.ResourcePlanPeriod.objects.filter(
resource=resource, plan=new_plan, start=new_start, end=None
).exists()
)
def test_when_resource_is_terminated_old_period_is_closed(self):
# Arrange
start = parse_datetime('2018-10-01')
end = parse_datetime('2018-11-01')
plan = factories.PlanFactory()
resource = factories.ResourceFactory(plan=plan)
period = models.ResourcePlanPeriod.objects.create(
resource=resource, plan=plan, start=start, end=None
)
order_item = factories.OrderItemFactory(
state=models.OrderItem.States.EXECUTING,
type=models.OrderItem.Types.TERMINATE,
resource=resource,
plan=plan,
)
# Act
callbacks.resource_deletion_succeeded(resource)
# Assert
order_item.refresh_from_db()
self.assertEqual(order_item.state, models.OrderItem.States.DONE)
period.refresh_from_db()
self.assertEqual(period.end, end)
def test_when_resource_is_terminated_directly_old_period_is_closed(self):
# Arrange
start = parse_datetime('2018-10-01')
end = parse_datetime('2018-11-01')
plan = factories.PlanFactory()
resource = factories.ResourceFactory(
plan=plan, state=models.Resource.States.ERRED
)
period = models.ResourcePlanPeriod.objects.create(
resource=resource, plan=plan, start=start, end=None
)
# Act
resource.state = models.Resource.States.TERMINATED
resource.save()
# Assert
period.refresh_from_db()
self.assertEqual(period.end, end)
|
python
|
#!/usr/bin/env python3
class TypedList:
''' List-like class that allows only a single type of item '''
def __init__(self, example_element, initial_list = []):
self.type = type(example_element)
if not isinstance(initial_list, list):
raise TypeError("Second argument of TypedList must "
"be a list.")
for element in initial_list:
self.__check(element)
self.elements = initial_list[:]
def __check(self, element):
if type(element) != self.type:
raise TypeError("Attempted to add an element of "
"incorrect type to a typed list.")
def __setitem__(self, i, element):
self.__check(element)
self.elements[i] = element
def __getitem__(self, i):
return self.elements[i]
def __str__(self):
to_string = '{}'.format(self.elements)
return to_string
|
python
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import List
import torch
from reagent import types as rlt
from reagent.models.base import ModelBase
from reagent.models.fully_connected_network import FullyConnectedNetwork
class FullyConnectedCritic(ModelBase):
def __init__(
self,
state_dim: int,
action_dim: int,
sizes: List[int],
activations: List[str],
use_batch_norm: bool = False,
use_layer_norm: bool = False,
output_dim: int = 1,
):
super().__init__()
assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim)
assert action_dim > 0, "action_dim must be > 0, got {}".format(action_dim)
self.state_dim = state_dim
self.action_dim = action_dim
assert len(sizes) == len(
activations
), "The numbers of sizes and activations must match; got {} vs {}".format(
len(sizes), len(activations)
)
self.fc = FullyConnectedNetwork(
[state_dim + action_dim] + sizes + [output_dim],
activations + ["linear"],
use_batch_norm=use_batch_norm,
use_layer_norm=use_layer_norm,
)
def input_prototype(self):
# for inference: (batchsize, feature_dim)
return (
rlt.FeatureData(torch.randn(1, self.state_dim)),
rlt.FeatureData(torch.randn(1, self.action_dim)),
)
def forward(self, state: rlt.FeatureData, action: rlt.FeatureData):
assert (
len(state.float_features.shape) == len(action.float_features.shape)
and len(action.float_features.shape) == 2
and (state.float_features.shape[0] == action.float_features.shape[0])
), (
f"state shape: {state.float_features.shape}; action shape: "
f"{action.float_features.shape} not equal to (batch_size, feature_dim)"
)
cat_input = torch.cat((state.float_features, action.float_features), dim=-1)
return self.fc(cat_input)
|
python
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from .models import *
import apiclient
from apiclient.discovery import build
from django.core.mail import send_mail,EmailMessage
from apiclient.errors import HttpError
from oauth2client.tools import argparser
from django.core.cache import cache
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
import os
import httplib2
import sys
#import urllib3
import json
# from music.tasks import send_feedback_email_task
from celery.decorators import task
from celery.utils.log import get_task_logger
from dbms import celery_app
logger = get_task_logger(__name__)
#import classes
from music.classes import *
DEVELOPER_KEY = "AIzaSyC4lxc1NfUV09y_vX9kTiRKvSbK6bc6rP0"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# # This OAuth 2.0 access scope allows for full read/write access to the
# # authenticated user's account.
# YOUTUBE_READ_WRITE_SCOPE = "https://www.googleapis.com/auth/youtube"
# CLIENT_SECRETS_FILE = "client_secrets.json"
# # This variable defines a message to display if the CLIENT_SECRETS_FILE is
# # missing.
# MISSING_CLIENT_SECRETS_MESSAGE = """
# WARNING: Please configure OAuth 2.0
# To make this sample run you will need to populate the client_secrets.json file
# found at:
# %s
# with information from the {{ Cloud Console }}
# {{ https://cloud.google.com/console }}
# For more information about the client_secrets.json file format, please visit:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
# """ % os.path.abspath(os.path.join(os.path.dirname(__file__),
# CLIENT_SECRETS_FILE))
# def get_authenticated_service(args):
# flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
# scope=YOUTUBE_READ_WRITE_SCOPE,
# message=MISSING_CLIENT_SECRETS_MESSAGE)
# #storage = Storage("%s-oauth2.json" % sys.argv[0])
# storage = Storage("subscriptions-oauth2.json")
# credentials = storage.get()
# if credentials is None or credentials.invalid:
# credentials = run_flow(flow, storage, args)
# return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
# http=credentials.authorize(httplib2.Http()))
# # argparser.add_argument("--user", help="ID of the channel to subscribe to.",
# # default="Phaneendra Babu")
# # args = argparser.parse_args()
# #args = argparser.parse_args()
# args=""
# youtube = get_authenticated_service(args)
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
# class Video:
# def __init__(self):
# self.title = ""
# self.id = ""
# self.description = ""
# self.thumbnail_url = ""
# self.thumbnail_width = 0
# self.thumbnail_height = 0
# self.channelTitle = ""
# self.duration = ""
# self.caption = ""
# self.viewCount = 0
# self.likeCount = 0
# class Channel:
# def __init__(self):
# self.title = ""
# self.id = ""
# self.description = ""
# self.thumbnail_url = ""
# self.thumbnail_width = 100
# self.thumbnail_height = 100
# class Playlist:
# def __init__(self):
# self.id=""
# self.title=""
# self.channelId=""
# self.channelTitle=""
# self.thumbnail_url=""
# self.thumbnail_width = 100
# self.thumbnail_height = 100
# class PlayListItem:
# def __init__(self):
# self.playlistId=""
# self.id=""
# self.title=""
# self.description=""
# self.thumbnail_url=""
# self.thumbnail_width=100
# self.thumbnail_height=100
# self.channelTitle=""
# Create your views here.
def home(request):
return render(request, 'music/home.html', {})
def login(request):
m=""
#m=send_feedback_email_task.delay("HI It is our DBMS Project").result
print("Hello")
print("Message: %s" % m)
#print(add.delay(4,5).get())
return render(request, 'music/login.html', {})
def register(request):
return render(request, 'music/register.html', {})
def savedetails(request):
firstname = request.POST["firstname"]
lastname = request.POST["lastname"]
email = request.POST["email"]
mobile = request.POST["mobile"]
username = request.POST["username"]
password = request.POST["password"]
try:
o = Login.objects.get(username=username)
return render(request, 'music/register.html', {'error_message': username + " already taken"})
except (KeyError, Login.DoesNotExist):
l = Login(username=username, password=password)
l.save()
l.detail_set.create(firstname=firstname, lastname=lastname, email=email, mobile=mobile)
return render(request, 'music/login.html', {'error_message': "Account Successfully Registered.Login Here"})
def validate(request):
uname = request.POST["username"]
pwd = request.POST["password"]
try:
user = Login.objects.get(username=uname)
except (KeyError, Login.DoesNotExist):
return render(request, 'music/login.html', {'error_message': "Username is not found in database"})
else:
if pwd == user.password:
# return HttpResponseRedirect('music:user', args=(user.id,))
detail = Detail.objects.get(pk=user.id)
send_mail("Conformation of DBMS Accout","PLease Click Below link to confirm your email you registered on DBMS",
'[email protected]',['[email protected]'],fail_silently=True)
#popular_videos = cache.get_or_set('popular',popular(),100000)
popular_videos=get_popular_videos()
# popular_videos=cache.get('popular_videos')
# if popular_videos is None:
# print("Not cached")
# popular_videos=popular()
# cache.set('popular_videos',popular_videos,600)
#popular_channels = cache.get_or_set('popular_channels',popular_channels(),1000000)
popular_channels_list = popular_channels()
context = {
'id': user.id,
'fullname': detail.firstname + detail.lastname,
'email': detail.email,
'popular_videos': popular_videos,
'popular_channels':popular_channels_list,
}
return render(request, 'music/user.html', context)
else:
return render(request, 'music/login.html', {'error_message': "Incorrect Username,Password Combination"})
def user(request, id):
return render(request, "music/user.html", {'id': id})
def search(request):
query = request.POST["search"]
search_response = youtube.search().list(
q=query,
part="id,snippet",
maxResults=5
).execute()
videos = []
channels = []
# playlists = []
if 'nextPageToken' in search_response:
print(search_response['nextPageToken'])
#channels2,videos2=get_next_page.delay(search_response['nextPageToken']).get()
#videos2=get_next_page.delay(search_response['nextPageToken']).get()
print("got next page")
for search_result in search_response.get("items", []):
# print search_result
# if "snippet" in search_result and "thumbnails" in search_result["snippet"] and "default" in search_result["snippet"]["thumbnails"]:
# print search_result["snippet"]["thumbnails"]["default"]
if search_result["id"]["kind"] == "youtube#video":
v = Video()
if "id" in search_result and "videoId" in search_result["id"]:
v.id = search_result["id"]["videoId"]
get_info(v, search_result)
videos.append(v)
elif search_result["id"]["kind"] == "youtube#channel":
ch = Channel()
get_channel_info(ch, search_result)
channels.append(ch)
return render(request, 'music/search.html', {'query': query, 'videos': videos, 'channels': channels})
def watch(request, id):
related_videos = related(id)
return render(request, 'music/watch.html', {'id': id, 'related_videos': related_videos})
# channel playlists can be obtained from playlist.list or using contentDetails in channel
# channel["contentDetails"]["relatedPlaylists"]["uploads"]
def channel(request,id):
search_response = youtube.playlists().list(
channelId=id,
part="id,snippet"
).execute()
playlists=[]
print("Channel Id : ",id)
for search_result in search_response.get("items",[]):
pl=Playlist()
if "id" in search_result:
pl.id=search_result["id"]
print("Playlist Id : ",pl.id)
pl.title=search_result["snippet"]["title"]
pl.channelId=search_result["snippet"]["channelId"]
pl.channelTitle=search_result["snippet"]["channelTitle"]
pl.thumbnail_url=search_result["snippet"]["thumbnails"]["default"]["url"]
pl.thumbnail_width=search_result["snippet"]["thumbnails"]["default"]["width"]
pl.thumbnail_height=search_result["snippet"]["thumbnails"]["default"]["height"]
playlists.append(pl)
context={
'channel_id':id,
'playlists':playlists,
}
return render(request,'music/channel.html',context)
def playlist(request,id):
search_response = youtube.playlistItems().list(
playlistId=id,
part="id,snippet"
).execute()
playlistItems=[]
for search_result in search_response.get("items",[]):
pli=PlayListItem()
pli.playlistId=search_result["id"]
pli.id=search_result["snippet"]["resourceId"]["videoId"]
pli.title=search_result["snippet"]["title"]
pli.description=search_result["snippet"]["description"]
pli.thumbnail_url=search_result["snippet"]["thumbnails"]["default"]["url"]
pli.thumbnail_width=search_result["snippet"]["thumbnails"]["default"]["width"]
pli.thumbnail_height=search_result["snippet"]["thumbnails"]["default"]["height"]
pli.channelTitle=search_result["snippet"]["channelTitle"]
playlistItems.append(pli)
context={'playlistItems':playlistItems}
print("playlist Id : ",id)
return render(request,'music/playlist.html',context)
def popular():
# youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
# developerKey=DEVELOPER_KEY)
print("Popular Request")
video_response = youtube.videos().list(
chart="mostPopular",
part='id,snippet,statistics,contentDetails',
maxResults=5,
videoCategoryId="10",
).execute()
videos = []
# print(video_response)
# Add each result to the list, and then display the list of matching videos.
for video_result in video_response.get("items", []):
v = Video()
if "id" in video_result:
v.id = video_result["id"]
get_info(v, video_result)
videos.append(v)
# print("Videos:\n", "\n".join(videos), "\n")
return videos
def popular_channels():
print("popular_channels request")
search_response = youtube.channels().list(
categoryId="GCTXVzaWM",
part="snippet,id,contentDetails",
maxResults=5
).execute()
channels=[]
for search_result in search_response.get("items",[]):
ch=Channel()
ch.id=search_result["id"]
ch.description=search_result["snippet"]["description"]
ch.title=search_result["snippet"]["title"]
ch.thumbnail_url=search_result["snippet"]["thumbnails"]["default"]["url"]
# ch.thumbnail_width=search_result["snippet"]["thumbnails"]["default"]["width"]
# ch.thumbnail_height=search_result["snippet"]["thumbnails"]["default"]["height"]
channels.append(ch)
return channels
def related(id):
search_response = youtube.search().list(
type="video",
relatedToVideoId=id,
part="id,snippet",
maxResults=5,
).execute()
videos = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
v = Video()
if "id" in search_result and "videoId" in search_result["id"]:
v.id = search_result["id"]["videoId"]
get_info(v, search_result)
videos.append(v)
return videos
def get_info(v, video_result):
# if "id" in video_result:
# v.id = video_result["id"]
if "snippet" in video_result:
if "title" in video_result["snippet"]:
v.title = video_result["snippet"]["title"]
if "description" in video_result["snippet"]:
v.description = video_result["snippet"]["description"]
if "thumbnails" in video_result["snippet"]:
if "default" in video_result["snippet"]["thumbnails"]:
if "url" in video_result["snippet"]["thumbnails"]["default"]:
v.thumbnail_url = video_result["snippet"]["thumbnails"]["default"]["url"]
# print(v.thumbnail_url)
if "width" in video_result["snippet"]["thumbnails"]["default"]:
v.thumbnail_width = video_result["snippet"]["thumbnails"]["default"]["width"]
if "height" in video_result["snippet"]["thumbnails"]["default"]:
v.thumbnail_height = video_result["snippet"]["thumbnails"]["default"]["height"]
if "channelTitle" in video_result["snippet"]:
v.channelTitle = video_result["snippet"]["channelTitle"]
if "contentDetails" in video_result:
if "duration" in video_result["contentDetails"]:
v.duration = video_result["contentDetails"]["duration"]
if "caption" in video_result["contentDetails"]:
v.caption = video_result["contentDetails"]["caption"]
if "statistics" in video_result:
if "viewCount" in video_result["statistics"]:
v.viewCount = video_result["statistics"]["viewCount"]
if "likeCount" in video_result["statistics"]:
v.likeCount = video_result["statistics"]["likeCount"]
# channel result in search
def get_channel_info(ch, search_result):
if "id" in search_result:
ch.id = search_result["id"]["channelId"]
if "snippet" in search_result:
if "channelTitle" in search_result["snippet"]:
ch.channelTitle = search_result["snippet"]["channelTitle"]
if "descritption" in search_result["snippet"]:
ch.description = search_result["snippet"]["description"]
if "thumbnails" in search_result["snippet"]:
if "default" in search_result["snippet"]["thumbnails"]:
if "url" in search_result["snippet"]["thumbnails"]["default"]:
ch.thumbnail_url = search_result["snippet"]["thumbnails"]["default"]["url"]
if "width" in search_result["snippet"]["thumbnails"]["default"]:
ch.thumbnail_width = search_result["snippet"]["thumbnails"]["default"]["width"]
if "height" in search_result["snippet"]["thumbnails"]["default"]:
ch.thumbnail_height = search_result["snippet"]["thumbnails"]["default"]["height"]
@task(name="get_next_page")
def get_next_page(token):
print("getting next page")
logger.info("getting next page")
search_response = youtube.search().list(
pageToken=token,
part="id,snippet",
maxResults=5
).execute()
videos = []
channels = []
# playlists = []
for search_result in search_response.get("items", []):
# print search_result
# if "snippet" in search_result and "thumbnails" in search_result["snippet"] and "default" in search_result["snippet"]["thumbnails"]:
# print search_result["snippet"]["thumbnails"]["default"]
if search_result["id"]["kind"] == "youtube#video":
v = Video()
if "id" in search_result and "videoId" in search_result["id"]:
v.id = search_result["id"]["videoId"]
get_info(v, search_result)
videos.append(v)
elif search_result["id"]["kind"] == "youtube#channel":
ch = Channel()
get_channel_info(ch, search_result)
channels.append(ch)
#return channels,videos
tu=(channels,videos)
return tu
@task(name="send_feedback_email_task")
def send_feedback_email_task(message):
"""sends an email when feedback form is filled successfully"""
logger.info("Sent feedback email")
#return send_feedback_email(email, message)
message="HI It is our DBMS Project"
print("Received : ",message)
return message
@task(name="sum_two_numbers")
def add(x, y):
return x + y
def get_popular_videos():
popular_videos=cache.get('popular_videos')
if popular_videos is None:
print("Not cached")
popular_videos=popular()
cache.set('popular_videos',popular_videos,120)
return popular_videos
|
python
|
def BSDriver(LoadCase):
# BoundingSurface J2 with kinematic hardening
# Written by Pedro Arduino, Mar. 22 2019
# Copyright Arduino Computational Geomechanics Group
# Ported into Python/Jupyter Notebook by Justin Bonus, Jul. 2019
#
#
# LoadCase:
# 1 ... proportionally increasing strain
# 2 ... cyclic strain
# 3 ... proportionally increasing stress
# 4 ... cyclic stress
#
# ====== LOADING CASES ==================================================
import numpy as np
from collections import namedtuple
nPoints = 200
## Switch for LoadCases:
## Pseudo-switch created by using python dictionary to hold LoadCase functions
def case_one():
case_one.time = np.linspace(0,1,nPoints+1)
case_one.strain = np.array([ 0.05, -0.015, -0.015, 0.000, 0.000, 0.000 ]).reshape(6,1) * case_one.time
case_one.StressDriven = 0
return case_one
def case_two():
nCycles = 3
omega = 0.15
case_two.time = np.linspace(0,nCycles*2*np.pi/omega,nCycles*nPoints+1);
case_two.strain = np.array([ 0.00, -0.000, -0.000, 0.045, 0.000, 0.000 ]).reshape(6,1) * np.sin( omega*case_two.time )
case_two.StressDriven = 0
return case_two
def case_three():
case_three.time = np.linspace(0,1,nPoints+1)
case_three.stress = np.array([[0.100],
[0.000],
[0.000],
[0.000],
[0.000],
[0.000]])*case_three.time + 0.0*np.array([1,1,1,0,0,0]).reshape(6,1)*np.ones( case_three.time.shape )
case_three.StressDriven = 1
return case_three
def case_four():
nCycles = 3
omega = 0.15
case_four.time = np.linspace(0, nCycles*2*np.pi/omega, nCycles*nPoints+1)
case_four.stress = np.array([[0.000],
[0.000],
[0.000], #.01, .03, -.01, .05, 0, -.02
[0.050],
[0.000],
[0.000]])*np.sin( omega*case_four.time ) + 0.0*np.array([1,1,1,0,0,0]).reshape(6,1)*np.ones( case_four.time.shape )
case_four.StressDriven = 1
return case_four
case_switcher = {
1: case_one,
2: case_two,
3: case_three,
4: case_four
}
case = case_switcher.get(LoadCase, lambda: "Invalid LoadCase")
case() #Runs the LoadCase function. Creates: case.time, case.strain | case.stress, case.StressDriven
time, StressDriven = case.time, case.StressDriven
if StressDriven:
stress = case.stress
strain = np.zeros((6,1)) #initialize empty 6x1 strain numpy array for stress-driven scenario
else:
strain = case.strain
stress = np.zeros((6,1)) #initialize empty 6x1 stress numpy array for strain-driven scenario
Stress0 = np.zeros((6,1)) #Initialize first 'unloading' point
StrainDriven = int(not StressDriven)
# ========================================================================
# ---- MATERIAL PARAMETERS
# Static Parameters
# Static Parameters
E = 20 #Elastic Modulus MPa
v= 0.49 #Poissons ratio, less than 0.5 to allow compresibility
G = E/(2*(1+v)) #Shear modulus
K = E/(3*(1-2*v)) #Bulk modulus
Kmod = 0 #Isotropic Hardening
Su = 0.061 #Yield stress in 1-D tension test MPa
hh = G #kinematic hardening parameter
mm = 1.0 #kinematic hardening parameter
beta = 0.5 #midpoint integration
RR = np.sqrt(8/3)*Su
#namedtuple used to organzie related variables, similar to a structure
static = namedtuple('StaticParam',['E','v','G','K','Kmod','Su','hh','mm','beta','RR'])
StaticParam = static(E,v,G,K,Kmod,Su,hh,mm,beta,RR)
# ========================================================================
# ---- INITIAL CONDITIONS
# Initialize the state variables
if StrainDriven:
IniStress = -0.0*(np.array([1, 1, 1, 0, 0, 0]).reshape(6,1))
IniStrain = np.linalg.solve(GetCe(StaticParam), IniStress) #Check if GetCe compacts to nxn
elif StressDriven:
IniStress = 0.0*(np.array([1, 1, 1, 0, 0, 0]).reshape(6,1))
IniStrain = 0.0*(np.array([1, 1, 1, 0, 0, 0]).reshape(6,1))
#Structure for IniState (initial state parameters, static) and CurState (changing state parameters)
state = namedtuple('state', ['eP','alphaISO','Stress0', 'Kappa', 'Psi'])
eP = 0.0*(np.array([1, 1, 1, 0, 0, 0]).reshape(6,1))
alphaISO = 0.0
Stress0 = 0.0*(np.array([1, 1, 1, 0, 0, 0]).reshape(6,1))
Kappa = 0.0
Psi = 0.0
IniState = state(eP, alphaISO, Stress0, Kappa, Psi)
# For first iteration
CurStress = IniStress
CurStrain = IniStrain
CurState = IniState
# Variables used for plotting
alphaISO_plot, j2_plot, j2e_plot, stress_var_plot, stress_var2_plot = [], [], [], [], [] #Initiliaze list format
alphaISO_plot.append(0) #Python list allows for easy data addition
strain[:,0] = CurStrain.T - IniStrain.T
stress[:,0] = CurStress.T
j2_plot.append(0)
j2e_plot.append(0)
stress_var_plot.append(0)
Stress0[:,0] = CurStress.T
Iter = np.zeros(time.shape)
# ========================================================================
# ---- COMPUTATION CYCLES
if StrainDriven:
#StrainDriven
for i in range(1, (len(strain[0]) )):
NextStrain = strain[:,i] + IniStrain.T
dStrain = strain[:,i] - strain[:, i-1] #Driving variable
#Current BSRadialMap is a function, will be transformed into a class eventually
NextStress, NextState, NextCep = BSRadialMap(dStrain, StaticParam, CurStress, CurState)
# Update Stress, Strain, and State
CurStress = NextStress
CurState = NextState
# Variables created for plotting purposes
alphaISO_plot.append(CurState.alphaISO)
stress = np.append(stress, CurStress, 1)
j2_plot.append(GetJ2(CurStress))
stress_var_plot.append(np.sqrt(2*j2_plot[i])*np.sqrt(3/2)*np.sign(stress[0,i] - stress[1,i]))
stress_var2_plot.append((stress[0,i] - stress[1,i]))
Stress0 = np.append(Stress0, CurState.Stress0, 1)
elif StressDriven:
# StressDriven driver
# set tolerance value for iterative procedure(s)
TOLERANCE = 1e-10
for i in range(0, len(stress[0])-1):
# initialize strain epsilon_{n+1}^{(0)} = eps_{n} using the old state
# (this is the initial approximation for eps_{n+1}
if i == 0:
# special settings for initial values at t_1
NextStrain = np.array([0,0,0,0,0,0]).reshape(6,1)
dStrain = np.array([0,0,0,0,0,0]).reshape(6,1)
CurState = IniState
else:
NextStrain = CurStrain
dStrain = np.array([0,0,0,0,0,0]).reshape(6,1)
NextStress, NextState, Cep = BSRadialMap(dStrain, StaticParam, CurStress, CurState)
RR = stress[:, i].reshape(6,1) - NextStress
RR = RR.reshape(6,1)
RR0 = normS(RR)
# reset iteration counter
kk = 0
# iterate until convergence
while normS(RR)/RR0 > TOLERANCE:
# update strain from eps_{n+1}^{(k)} to eps_{n+1}^{(k+1)}
dStrain = np.linalg.solve(Cep, RR)
NextStrain = NextStrain + dStrain
# compute material response for estimated strain state
# NOTE: the state variables are taken at t_n
NextStress, NextState, Cep = BSRadialMap(dStrain, StaticParam, CurStress, CurState)
#print('NextStress:',NextStress)
#print('Stress0:',NextState.Stress0)
# check for equilibrium
RR = stress[:,i].reshape(6,1) - NextStress
RR = RR.reshape(6,1)
kk = kk + 1
# emergence exit if procedure does not converge
if kk > 3:
print('procedure slow to converge. Error : ', normS( RR )/RR0)
if kk > 20:
print('procedure did not converge. Error : ', normS( RR )/RR0)
print('YOUR TANGENT Cep IS WRONG', normS( RR )/RR0)
break
Iter[i] = kk
CurStress = NextStress
CurState = NextState
# Update State variables for next step
CurStress = NextStress
CurStrain = NextStrain
CurState = NextState
# Update variables for plotting purposes
strain = np.append(strain, CurStrain, 1)
alphaISO_plot.append(CurState.alphaISO)
j2_plot.append(GetJ2(CurStress))
stress_var_plot.append(np.sqrt(2*j2_plot[i])*np.sqrt(3/2)*np.sign(stress[3,i]))
Stress0 = np.append(Stress0, CurState.Stress0, 1)
DriverOutput = namedtuple('DriverOutput',['StaticParam','time','strain','stress','alphaISO','j2','stress_var','stress_var2', 'Stress0','Iter'])
DriverOutput = DriverOutput(StaticParam, time, strain, stress, alphaISO_plot, j2_plot, stress_var_plot, stress_var2_plot, Stress0, Iter)
return DriverOutput
# =========================================================================
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
from pandapower.control.controller.trafo_control import TrafoController
class ContinuousTapControl(TrafoController):
"""
Trafo Controller with local tap changer voltage control.
INPUT:
**net** (attrdict) - Pandapower struct
**tid** (int) - ID of the trafo that is controlled
**vm_set_pu** (float) - Maximum OLTC target voltage at bus in pu
OPTIONAL:
**tol** (float, 0.001) - Voltage tolerance band at bus in percent (default: 1% = 0.01pu)
**side** (string, "lv") - Side of the transformer where the voltage is controlled
**trafo_type** (float, "2W") - Trafo type ("2W" or "3W")
**in_service** (bool, True) - Indicates if the controller is currently in_service
**check_tap_bounds** (bool, True) - In case of true the tap_bounds will be considered
**drop_same_existing_ctrl** (bool, False) - Indicates if already existing controllers of the same type and with the same matching parameters (e.g. at same element) should be dropped
"""
def __init__(self, net, tid, vm_set_pu, tol=1e-3, side="lv", trafotype="2W", in_service=True,
check_tap_bounds=True, level=0, order=0, drop_same_existing_ctrl=False,
matching_params=None, **kwargs):
if matching_params is None:
matching_params = {"tid": tid, 'trafotype': trafotype}
super().__init__(net, tid=tid, side=side, tol=tol, in_service=in_service,
trafotype=trafotype, level=level, order=order,
drop_same_existing_ctrl=drop_same_existing_ctrl,
matching_params=matching_params, **kwargs)
t = net[self.trafotable]
b = net.bus
if trafotype == "2W":
self.t_nom = t.at[tid, "vn_lv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[net[self.trafotable].at[tid, "lv_bus"], "vn_kv"]
elif side == "lv":
self.t_nom = t.at[tid, "vn_lv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[net[self.trafotable].at[tid, "lv_bus"], "vn_kv"]
elif side == "mv":
self.t_nom = t.at[tid, "vn_mv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[net[self.trafotable].at[tid, "mv_bus"], "vn_kv"]
self.check_tap_bounds = check_tap_bounds
self.vm_set_pu = vm_set_pu
self.trafotype = trafotype
self.tol = tol
def control_step(self, net):
"""
Implements one step of the ContinuousTapControl
"""
delta_vm_pu = net.res_bus.at[self.controlled_bus, "vm_pu"] - self.vm_set_pu
tc = delta_vm_pu / self.tap_step_percent * 100 / self.t_nom
self.tap_pos += tc * self.tap_side_coeff * self.tap_sign
if self.check_tap_bounds:
self.tap_pos = np.clip(self.tap_pos, self.tap_min, self.tap_max)
# WRITE TO NET
if net[self.trafotable].tap_pos.dtype != "float":
net[self.trafotable].tap_pos = net[self.trafotable].tap_pos.astype(float)
net[self.trafotable].at[self.tid, "tap_pos"] = self.tap_pos
def is_converged(self, net):
"""
The ContinuousTapControl is converged, when the difference of the voltage between control steps is smaller
than the Tolerance (tol).
"""
if not net[self.trafotable].at[self.tid, 'in_service']:
return True
vm_pu = net.res_bus.at[self.controlled_bus, "vm_pu"]
self.tap_pos = net[self.trafotable].at[self.tid, 'tap_pos']
difference = 1 - self.vm_set_pu / vm_pu
if self.check_tap_bounds:
if self.tap_side_coeff * self.tap_sign == 1:
if vm_pu < self.vm_set_pu and self.tap_pos == self.tap_min:
return True
elif vm_pu > self.vm_set_pu and self.tap_pos == self.tap_max:
return True
elif self.tap_side_coeff * self.tap_sign == -1:
if vm_pu > self.vm_set_pu and self.tap_pos == self.tap_min:
return True
elif vm_pu < self.vm_set_pu and self.tap_pos == self.tap_max:
return True
return abs(difference) < self.tol
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.