content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/initial_release_prep.py."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import subprocess
import constants
from core.tests import test_utils
import python_utils
from scripts import common
from scripts.release_scripts import initial_release_prep
class InitialReleasePrepTests(test_utils.GenericTestBase):
"""Test the methods for intial release preparation."""
def test_get_mail_message_template(self):
expected_mail_message_template = (
'Hi Sean,\n\n'
'You will need to run these jobs on the backup server:\n\n'
'[List of jobs formatted as: {{Job Name}} (instructions: '
'{{Instruction doc url}}) (Author: {{Author Name}})]\n'
'The specific instructions for jobs are linked with them. '
'The general instructions are as follows:\n\n'
'1. Login as admin\n'
'2. Navigate to the admin panel and then the jobs tab\n'
'3. Run the above jobs\n'
'4. In case of failure/success, please send the output logs for '
'the job to me and the job authors: {{Author names}}\n\n'
'Thanks!\n')
self.assertEqual(
initial_release_prep.get_mail_message_template(),
expected_mail_message_template)
def test_exception_is_raised_if_release_journal_is_not_created(self):
def mock_open_tab(unused_url):
pass
def mock_ask_user_to_confirm(unused_msg):
pass
def mock_input():
return 'n'
def mock_verify_current_branch_name(unused_branch_name):
pass
open_tab_swap = self.swap(
common, 'open_new_tab_in_browser_if_possible', mock_open_tab)
ask_user_swap = self.swap(
common, 'ask_user_to_confirm', mock_ask_user_to_confirm)
input_swap = self.swap(python_utils, 'INPUT', mock_input)
branch_check_swap = self.swap(
common, 'verify_current_branch_name',
mock_verify_current_branch_name)
with open_tab_swap, ask_user_swap, input_swap, branch_check_swap:
with self.assertRaisesRegexp(
Exception,
'Please ensure a new doc is created for the '
'release before starting with the release process.'):
initial_release_prep.main()
def test_get_extra_jobs_due_to_schema_changes(self):
def mock_run_cmd(unused_cmd_tokens):
return (
'"diff --git a/feconf.py b/feconf.py\n'
'--- a/feconf.py\n+++ b/feconf.py\n'
'@@ -36,6 +36,10 @@ POST_COMMIT_STATUS_PRIVATE = \'private\'\n'
' # Whether to unconditionally log info messages.\n'
' DEBUG = False\n \n'
'+# The path for generating release_summary.md '
'file for the current release.\n'
'-CURRENT_MISCONCEPTIONS_SCHEMA_VERSION = 2\n'
'+CURRENT_MISCONCEPTIONS_SCHEMA_VERSION = 1\n')
run_cmd_swap = self.swap(common, 'run_cmd', mock_run_cmd)
with run_cmd_swap:
self.assertEqual(
initial_release_prep.get_extra_jobs_due_to_schema_changes(
'upstream', '1.2.3'), ['SkillMigrationOneOffJob'])
def test_did_supported_audio_languages_change_with_change_in_languages(
self):
all_cmd_tokens = []
mock_constants = {
'SUPPORTED_AUDIO_LANGUAGES': [{
'id': 'en',
'description': 'English',
'relatedLanguages': ['en']}]}
def mock_run_cmd(cmd_tokens):
mock_constants['SUPPORTED_AUDIO_LANGUAGES'].append({
'id': 'ak',
'description': 'Akan',
'relatedLanguages': ['ak']
})
all_cmd_tokens.append(cmd_tokens)
run_cmd_swap = self.swap(common, 'run_cmd', mock_run_cmd)
constants_swap = self.swap(constants, 'constants', mock_constants)
with run_cmd_swap, constants_swap:
self.assertTrue(
initial_release_prep.did_supported_audio_languages_change(
'upstream', '1.2.3'))
self.assertEqual(
all_cmd_tokens, [
[
'git', 'checkout', 'upstream/release-1.2.3',
'--', 'assets/constants.ts'],
['git', 'reset', 'assets/constants.ts'],
['git', 'checkout', '--', 'assets/constants.ts']])
def test_did_supported_audio_languages_change_without_change_in_languages(
self):
all_cmd_tokens = []
mock_constants = {
'SUPPORTED_AUDIO_LANGUAGES': [{
'id': 'en',
'description': 'English',
'relatedLanguages': ['en']}]}
def mock_run_cmd(cmd_tokens):
all_cmd_tokens.append(cmd_tokens)
run_cmd_swap = self.swap(common, 'run_cmd', mock_run_cmd)
constants_swap = self.swap(constants, 'constants', mock_constants)
with run_cmd_swap, constants_swap:
self.assertFalse(
initial_release_prep.did_supported_audio_languages_change(
'upstream', '1.2.3'))
self.assertEqual(
all_cmd_tokens, [
[
'git', 'checkout', 'upstream/release-1.2.3',
'--', 'assets/constants.ts'],
['git', 'reset', 'assets/constants.ts'],
['git', 'checkout', '--', 'assets/constants.ts']])
def test_cut_release_branch_with_correct_version(self):
check_function_calls = {
'open_new_tab_in_browser_if_possible_is_called': False,
'check_call_is_called': False
}
expected_check_function_calls = {
'open_new_tab_in_browser_if_possible_is_called': True,
'check_call_is_called': True
}
def mock_open_tab(unused_url):
check_function_calls[
'open_new_tab_in_browser_if_possible_is_called'] = True
def mock_check_call(unused_cmd_tokens):
check_function_calls['check_call_is_called'] = True
def mock_input():
return '1.2.3'
open_tab_swap = self.swap(
common, 'open_new_tab_in_browser_if_possible',
mock_open_tab)
check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
input_swap = self.swap(
python_utils, 'INPUT', mock_input)
with open_tab_swap, check_call_swap, input_swap:
initial_release_prep.cut_release_branch()
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_cut_release_branch_with_incorrect_version(self):
check_function_calls = {
'open_new_tab_in_browser_if_possible_is_called': False,
'check_call_is_called': False
}
expected_check_function_calls = {
'open_new_tab_in_browser_if_possible_is_called': True,
'check_call_is_called': False
}
def mock_open_tab(unused_url):
check_function_calls[
'open_new_tab_in_browser_if_possible_is_called'] = True
def mock_check_call(unused_cmd_tokens):
check_function_calls['check_call_is_called'] = True
def mock_input():
return 'invalid'
open_tab_swap = self.swap(
common, 'open_new_tab_in_browser_if_possible',
mock_open_tab)
check_call_swap = self.swap(
subprocess, 'check_call', mock_check_call)
input_swap = self.swap(
python_utils, 'INPUT', mock_input)
with open_tab_swap, check_call_swap, input_swap:
with self.assertRaises(AssertionError):
initial_release_prep.cut_release_branch()
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_function_calls(self):
check_function_calls = {
'open_new_tab_in_browser_if_possible_is_called': False,
'ask_user_to_confirm_is_called': False,
'get_mail_message_template_is_called': False,
'get_extra_jobs_due_to_schema_changes_is_called': False,
'did_supported_audio_languages_change_is_called': False,
'get_remote_alias_is_called': False,
'verify_current_branch_name_is_called': False,
'cut_release_branch_is_called': False
}
expected_check_function_calls = {
'open_new_tab_in_browser_if_possible_is_called': True,
'ask_user_to_confirm_is_called': True,
'get_mail_message_template_is_called': True,
'get_extra_jobs_due_to_schema_changes_is_called': True,
'did_supported_audio_languages_change_is_called': True,
'get_remote_alias_is_called': True,
'verify_current_branch_name_is_called': True,
'cut_release_branch_is_called': True
}
def mock_open_tab(unused_url):
check_function_calls[
'open_new_tab_in_browser_if_possible_is_called'] = True
def mock_ask_user_to_confirm(unused_msg):
check_function_calls['ask_user_to_confirm_is_called'] = True
print_arr = []
def mock_input():
if print_arr[-1] == 'Enter version of previous release.':
return '1.2.3'
return 'y'
def mock_print(msg):
print_arr.append(msg)
def mock_get_mail_message_template():
check_function_calls['get_mail_message_template_is_called'] = True
return 'Mail message for testing.'
def mock_get_extra_jobs_due_to_schema_changes(
unused_remote_alias, unused_previous_release_version):
check_function_calls[
'get_extra_jobs_due_to_schema_changes_is_called'] = True
return []
def mock_did_supported_audio_languages_change(
unused_remote_alias, unused_previous_release_version):
check_function_calls[
'did_supported_audio_languages_change_is_called'] = True
return True
def mock_get_remote_alias(unused_remote_url):
check_function_calls['get_remote_alias_is_called'] = True
def mock_verify_current_branch_name(unused_branch_name):
check_function_calls['verify_current_branch_name_is_called'] = True
def mock_cut_release_branch():
check_function_calls['cut_release_branch_is_called'] = True
open_tab_swap = self.swap(
common, 'open_new_tab_in_browser_if_possible', mock_open_tab)
ask_user_swap = self.swap(
common, 'ask_user_to_confirm', mock_ask_user_to_confirm)
input_swap = self.swap(python_utils, 'INPUT', mock_input)
print_swap = self.swap(python_utils, 'PRINT', mock_print)
mail_msg_swap = self.swap(
initial_release_prep, 'get_mail_message_template',
mock_get_mail_message_template)
get_extra_jobs_swap = self.swap(
initial_release_prep, 'get_extra_jobs_due_to_schema_changes',
mock_get_extra_jobs_due_to_schema_changes)
check_changes_swap = self.swap(
initial_release_prep, 'did_supported_audio_languages_change',
mock_did_supported_audio_languages_change)
get_alias_swap = self.swap(
common, 'get_remote_alias', mock_get_remote_alias)
branch_check_swap = self.swap(
common, 'verify_current_branch_name',
mock_verify_current_branch_name)
cut_branch_swap = self.swap(
initial_release_prep, 'cut_release_branch',
mock_cut_release_branch)
with open_tab_swap, ask_user_swap, input_swap, print_swap:
with mail_msg_swap, get_alias_swap, check_changes_swap:
with get_extra_jobs_swap, branch_check_swap, cut_branch_swap:
initial_release_prep.main()
self.assertEqual(check_function_calls, expected_check_function_calls)
|
python
|
from random import SystemRandom
import pytest
from cacheout import LFUCache
parametrize = pytest.mark.parametrize
random = SystemRandom()
@pytest.fixture
def cache():
_cache = LFUCache(maxsize=5)
return _cache
def assert_keys_evicted_in_order(cache, keys):
"""Assert that cache keys are evicted in the same order as `keys`."""
keys = keys.copy()
for n in range(cache.maxsize, cache.maxsize * 2):
cache.set(n, n)
assert cache.full()
assert keys.pop(0) not in cache
for key in keys:
assert key in cache
def test_lfu_eviction(cache):
"""Test that LFUCache evicts least frequently used set entries first."""
key_counts = [("a", 4), ("b", 3), ("c", 5), ("d", 1), ("e", 2)]
for key, count in key_counts:
cache.set(key, key)
for _ in range(count):
cache.get(key)
sorted_key_counts = sorted(key_counts, key=lambda kc: kc[1])
eviction_order = [kc[0] for kc in sorted_key_counts]
max_access_count = max([kc[1] for kc in sorted_key_counts])
for n in range(len(key_counts)):
cache.set(n, n)
for _ in range(max_access_count + 1):
cache.get(n)
assert cache.full()
assert eviction_order[n] not in cache
for key in eviction_order[(n + 1) :]:
assert key in cache
def test_lfu_get(cache):
"""Test that LFUCache.get() returns cached value."""
for key, value in cache.items():
assert cache.get(key) == value
def test_lfu_clear(cache):
"""Test that LFUCache.clear() resets access counts."""
cache.maxsize = 2
cache.set(1, 1)
cache.set(2, 2)
for _ in range(5):
cache.get(1)
cache.set(3, 3)
assert 2 not in cache
cache.clear()
assert len(cache) == 0
cache.set(1, 1)
cache.set(2, 2)
cache.get(2)
cache.set(3, 3)
assert 1 not in cache
|
python
|
pessoaslist = []
dicionario = {}
pessoastotal = 0
soma = media = 0
mulheres = []
acimamedia = []
while True:
dicionario['Nome'] = str(input("Nome: "))
dicionario['Sexo'] = str(input("Sexo: [M/F] "))
dicionario['Idade'] = int(input("Idade: "))
resp = str(input("Continuar?: [S/N]"))
pessoaslist.append(dicionario.copy())
pessoastotal += 1
dicionario.clear()
if resp == "N":
break
for i, v in enumerate(pessoaslist):
soma += pessoaslist[i]['Idade']
media = soma / pessoastotal
if v['Sexo'] == "F":
mulheres.append(v['Nome'])
print(f'- O grupo tem {pessoastotal} pessoas. \n- A média de idade é de {media:.2f} anos. \n- As mulheres cadastradas foram {mulheres}')
for v in pessoaslist:
if v['Idade'] > media:
acimamedia.append(v)
print(f"- Lista das pessoas que estão acima da média: \n{acimamedia}")
|
python
|
from ..utils import Object
class MessageSchedulingStateSendAtDate(Object):
"""
The message will be sent at the specified date
Attributes:
ID (:obj:`str`): ``MessageSchedulingStateSendAtDate``
Args:
send_date (:obj:`int`):
Date the message will be sentThe date must be within 367 days in the future
Returns:
MessageSchedulingState
Raises:
:class:`telegram.Error`
"""
ID = "messageSchedulingStateSendAtDate"
def __init__(self, send_date, **kwargs):
self.send_date = send_date # int
@staticmethod
def read(q: dict, *args) -> "MessageSchedulingStateSendAtDate":
send_date = q.get('send_date')
return MessageSchedulingStateSendAtDate(send_date)
|
python
|
'''Desarrollar un programa que cargue los datos de un triángulo.
Implementar una clase con los métodos para inicializar los atributos, imprimir el valor del
lado con un tamaño mayor y el tipo de triángulo que es (equilátero, isósceles o escaleno).'''
import os
class Triangulo():
def __init__(self, lado1, lado2, lado3):
self.lado1 = int(lado1)
self.lado2 = int(lado2)
self.lado3 = int(lado3)
def es_triangulo(self):
if (self.lado1 + self.lado2) > self.lado3 and (self.lado1 + self.lado3) > \
self.lado2 and (self.lado2 + self.lado3) > self.lado1:
return True
else:
return False
def que_soy(self):
if self.lado1 == self.lado2 == self.lado3:
return "Equilatero"
elif self.lado1 != self.lado2 != self.lado3 and self.lado1 != self.lado3:
return "Escaleno"
else:
return "Isosceles"
def mayor_lado(self):
if self.que_soy() != "Equilatero":
return str(max([self.lado1, self.lado2, self.lado3]))
else:
return "\tTodos los lados son iguales"
if __name__ == "__main__":
os.system("cls")
mensaje = ""
lados = dict()
print("\t*****************************************")
print("\tVamos a cargar 3 lados de un Triangulo\n")
print("\t*****************************************")
while True:
print(mensaje)
lista_lados = input("\n\tIngrese los tres lados separados por comas:").split(",")
if len(lista_lados) == 3:
lado1, lado2, lado3 = lista_lados
else:
mensaje = '''\tERROR! Se ingresaron valores incorrectos!
ingrese nuevamente!'''
continue
if lado1 and lado2 and lado3:
lado1, lado2, lado3 = [lado.strip() for lado in [lado1,lado2,lado3]]
if (lado1 + lado2 + lado3).isdigit():
lados[lado1] = "lado1"
lados[lado2] = "lado2"
lados[lado3] = "lado3"
un_triangulo = Triangulo(lado1, lado2, lado3)
if un_triangulo.es_triangulo():
if un_triangulo.que_soy() != "Equilatero":
print(f"\tEl lado mas grande es: {lados[un_triangulo.mayor_lado()]}\n")
else:
print(un_triangulo.mayor_lado())
print(f"\tSoy {type(un_triangulo).__name__} {un_triangulo.que_soy()}")
else:
mensaje = '''\tERROR! los valores ingresados no corresponden a un triangulo
La suma de dos lados debe ser mayor al tercer lado!,
Vuelva a cargar!'''
continue
else:
mensaje = '''\tERROR Debe ingresar solo numeros!
Vuelva a cargar!'''
continue
else:
mensaje ='''\tERROR! Debe ingresar los tres valores separados por comas!
Vuelva a cargar! Ej: valor1, valor2, valor3'''
continue
opcion = input("\tContinua Cargando? (Enter = s o N = Sale)")
if opcion.lower() == "n":
break
print("\n\t*********************")
print("\tGracias! Hasta Pronto")
print("\t*********************")
|
python
|
import ldap
import pandas
import datetime.datetime
from ldap_paged_search import LdapPagedSearch
host = 'ldaps://example.com:636'
username = 'domain\\username'
password = 'password'
baseDN = 'DC=example,DC=com'
filter = "(&(objectCategory=computer))"
#attributes = ['dn']
attributes = ['*']
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
l = LdapPagedSearch(host, username, password, maxPages=0, pageSize=1000)
results = l.search(baseDN, filter, attributes = attributes)
computers = []
for computer in results:
dn = computer[0]
try:operatingSystem = computer[1]['operatingSystem'][0]
except: operatingSystem = False
try: operatingSystemServicePack = computer[1]['operatingSystemServicePack'][0]
except: operatingSystemServicePack = False
hostname = computer[1]['cn'][0]
try: fqdn = computer[1]['dNSHostName'][0]
except: fqdn = False
whenCreated = computer[1]['whenCreated'][0]
try: lastLogonTimestamp = datetime.datetime.utcfromtimestamp((int(computer[1]['lastLogonTimestamp'][0]) - 116444736000000000) / 10000000)
except: lastLogonTimestamp = False
try: description = computer[1]['description'][0]
except: description = False
GUID = computer[1]['objectGUID'][0]
computers.append((dn,hostname,fqdn,operatingSystem,operatingSystemServicePack,whenCreated,lastLogonTimestamp,description,GUID))
comp = pandas.DataFrame(computers)
comp.columns = ['dn','hostname','fqdn','operatingSystem','operatingSystemServicePack','whenCreated','lastLogonTimestamp','description','GUID']
windows = comp[comp['operatingSystem'] != "Mac OS X"]
|
python
|
"""Custom pandas accessors."""
import numpy as np
import plotly.graph_objects as go
from vectorbt import defaults
from vectorbt.root_accessors import register_dataframe_accessor
from vectorbt.utils import checks
from vectorbt.utils.widgets import CustomFigureWidget
from vectorbt.generic.accessors import Generic_DFAccessor
@register_dataframe_accessor('ohlcv')
class OHLCV_DFAccessor(Generic_DFAccessor): # pragma: no cover
"""Accessor on top of OHLCV data. For DataFrames only.
Accessible through `pd.DataFrame.vbt.ohlcv`."""
def __init__(self, obj, column_names=None, freq=None):
if not checks.is_pandas(obj): # parent accessor
obj = obj._obj
self._column_names = column_names
Generic_DFAccessor.__init__(self, obj, freq=freq)
def plot(self,
display_volume=True,
candlestick_kwargs={},
bar_kwargs={},
fig=None,
**layout_kwargs):
"""Plot OHLCV data.
Args:
display_volume (bool): If `True`, displays volume as bar chart.
candlestick_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Candlestick`.
bar_kwargs (dict): Keyword arguments passed to `plotly.graph_objects.Bar`.
fig (plotly.graph_objects.Figure): Figure to add traces to.
**layout_kwargs: Keyword arguments for layout.
Example:
```py
import vectorbt as vbt
import yfinance as yf
yf.Ticker("BTC-USD").history(period="max").vbt.ohlcv.plot()
```
"""
column_names = defaults.ohlcv['column_names'] if self._column_names is None else self._column_names
open = self._obj[column_names['open']]
high = self._obj[column_names['high']]
low = self._obj[column_names['low']]
close = self._obj[column_names['close']]
# Set up figure
if fig is None:
fig = CustomFigureWidget()
candlestick = go.Candlestick(
x=self.index,
open=open,
high=high,
low=low,
close=close,
name='OHLC',
yaxis="y2",
xaxis="x"
)
candlestick.update(**candlestick_kwargs)
fig.add_trace(candlestick)
if display_volume:
volume = self._obj[column_names['volume']]
marker_colors = np.empty(volume.shape, dtype=np.object)
marker_colors[(close.values - open.values) > 0] = 'green'
marker_colors[(close.values - open.values) == 0] = 'lightgrey'
marker_colors[(close.values - open.values) < 0] = 'red'
bar = go.Bar(
x=self.index,
y=volume,
marker_color=marker_colors,
marker_line_width=0,
name='Volume',
yaxis="y",
xaxis="x"
)
bar.update(**bar_kwargs)
fig.add_trace(bar)
fig.update_layout(
yaxis2=dict(
domain=[0.33, 1]
),
yaxis=dict(
domain=[0, 0.33]
)
)
fig.update_layout(
showlegend=True,
xaxis_rangeslider_visible=False,
xaxis_showgrid=True,
yaxis_showgrid=True
)
fig.update_layout(**layout_kwargs)
return fig
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: npu_utilization.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import telemetry_top_pb2 as telemetry__top__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='npu_utilization.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x15npu_utilization.proto\x1a\x13telemetry_top.proto\"C\n\x1bNetworkProcessorUtilization\x12$\n\x0enpu_util_stats\x18\x01 \x03(\x0b\x32\x0c.Utilization\"q\n\x0bUtilization\x12\x12\n\nidentifier\x18\x01 \x02(\t\x12\x13\n\x0butilization\x18\x02 \x01(\r\x12\x1c\n\x07packets\x18\x03 \x03(\x0b\x32\x0b.PacketLoad\x12\x1b\n\x06memory\x18\x04 \x03(\x0b\x32\x0b.MemoryLoad\"\xba\x01\n\nMemoryLoad\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x61verage_util\x18\x02 \x01(\r\x12\x14\n\x0chighest_util\x18\x03 \x01(\r\x12\x13\n\x0blowest_util\x18\x04 \x01(\r\x12\x1e\n\x16\x61verage_cache_hit_rate\x18\x05 \x01(\r\x12\x1e\n\x16highest_cache_hit_rate\x18\x06 \x01(\r\x12\x1d\n\x15lowest_cache_hit_rate\x18\x07 \x01(\r\"\xa2\x01\n\nPacketLoad\x12\x12\n\nidentifier\x18\x01 \x02(\t\x12\x0c\n\x04rate\x18\x02 \x01(\x04\x12\'\n\x1f\x61verage_instructions_per_packet\x18\x03 \x01(\r\x12&\n\x1e\x61verage_wait_cycles_per_packet\x18\x04 \x01(\r\x12!\n\x19\x61verage_cycles_per_packet\x18\x05 \x01(\r:W\n\x18jnpr_npu_utilization_ext\x12\x17.JuniperNetworksSensors\x18\x0c \x01(\x0b\x32\x1c.NetworkProcessorUtilization')
,
dependencies=[telemetry__top__pb2.DESCRIPTOR,])
JNPR_NPU_UTILIZATION_EXT_FIELD_NUMBER = 12
jnpr_npu_utilization_ext = _descriptor.FieldDescriptor(
name='jnpr_npu_utilization_ext', full_name='jnpr_npu_utilization_ext', index=0,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)
_NETWORKPROCESSORUTILIZATION = _descriptor.Descriptor(
name='NetworkProcessorUtilization',
full_name='NetworkProcessorUtilization',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='npu_util_stats', full_name='NetworkProcessorUtilization.npu_util_stats', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=46,
serialized_end=113,
)
_UTILIZATION = _descriptor.Descriptor(
name='Utilization',
full_name='Utilization',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='identifier', full_name='Utilization.identifier', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='utilization', full_name='Utilization.utilization', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packets', full_name='Utilization.packets', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory', full_name='Utilization.memory', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=228,
)
_MEMORYLOAD = _descriptor.Descriptor(
name='MemoryLoad',
full_name='MemoryLoad',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='MemoryLoad.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_util', full_name='MemoryLoad.average_util', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='highest_util', full_name='MemoryLoad.highest_util', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lowest_util', full_name='MemoryLoad.lowest_util', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_cache_hit_rate', full_name='MemoryLoad.average_cache_hit_rate', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='highest_cache_hit_rate', full_name='MemoryLoad.highest_cache_hit_rate', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lowest_cache_hit_rate', full_name='MemoryLoad.lowest_cache_hit_rate', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=231,
serialized_end=417,
)
_PACKETLOAD = _descriptor.Descriptor(
name='PacketLoad',
full_name='PacketLoad',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='identifier', full_name='PacketLoad.identifier', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rate', full_name='PacketLoad.rate', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_instructions_per_packet', full_name='PacketLoad.average_instructions_per_packet', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_wait_cycles_per_packet', full_name='PacketLoad.average_wait_cycles_per_packet', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_cycles_per_packet', full_name='PacketLoad.average_cycles_per_packet', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=420,
serialized_end=582,
)
_NETWORKPROCESSORUTILIZATION.fields_by_name['npu_util_stats'].message_type = _UTILIZATION
_UTILIZATION.fields_by_name['packets'].message_type = _PACKETLOAD
_UTILIZATION.fields_by_name['memory'].message_type = _MEMORYLOAD
DESCRIPTOR.message_types_by_name['NetworkProcessorUtilization'] = _NETWORKPROCESSORUTILIZATION
DESCRIPTOR.message_types_by_name['Utilization'] = _UTILIZATION
DESCRIPTOR.message_types_by_name['MemoryLoad'] = _MEMORYLOAD
DESCRIPTOR.message_types_by_name['PacketLoad'] = _PACKETLOAD
DESCRIPTOR.extensions_by_name['jnpr_npu_utilization_ext'] = jnpr_npu_utilization_ext
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NetworkProcessorUtilization = _reflection.GeneratedProtocolMessageType('NetworkProcessorUtilization', (_message.Message,), {
'DESCRIPTOR' : _NETWORKPROCESSORUTILIZATION,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:NetworkProcessorUtilization)
})
_sym_db.RegisterMessage(NetworkProcessorUtilization)
Utilization = _reflection.GeneratedProtocolMessageType('Utilization', (_message.Message,), {
'DESCRIPTOR' : _UTILIZATION,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:Utilization)
})
_sym_db.RegisterMessage(Utilization)
MemoryLoad = _reflection.GeneratedProtocolMessageType('MemoryLoad', (_message.Message,), {
'DESCRIPTOR' : _MEMORYLOAD,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:MemoryLoad)
})
_sym_db.RegisterMessage(MemoryLoad)
PacketLoad = _reflection.GeneratedProtocolMessageType('PacketLoad', (_message.Message,), {
'DESCRIPTOR' : _PACKETLOAD,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:PacketLoad)
})
_sym_db.RegisterMessage(PacketLoad)
jnpr_npu_utilization_ext.message_type = _NETWORKPROCESSORUTILIZATION
telemetry__top__pb2.JuniperNetworksSensors.RegisterExtension(jnpr_npu_utilization_ext)
# @@protoc_insertion_point(module_scope)
|
python
|
#!/usr/bin/env python
import fire
from lib.dsv.commands.dsv_command import DSVCommand
if __name__ == '__main__':
fire.Fire(DSVCommand)
|
python
|
import predpy
from predpy.predpy import *
#from predpy.predpy import predpy
from predpy.predpy import cleandata
from predpy.predpy import galgraphs
|
python
|
#
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import humps
import json
from hsfs import util
class FeatureGroupCommit:
def __init__(
self,
commitid=None,
commit_date_string=None,
rows_inserted=None,
rows_updated=None,
rows_deleted=None,
committime=None,
type=None,
items=None,
count=None,
href=None,
):
self._commitid = commitid
self._commit_date_string = commit_date_string
self._rows_inserted = rows_inserted
self._rows_updated = rows_updated
self._rows_deleted = rows_deleted
@classmethod
def from_response_json(cls, json_dict):
json_decamelized = humps.decamelize(json_dict)
if json_decamelized["count"] >= 1:
return [cls(**commit_dto) for commit_dto in json_decamelized["items"]]
return cls(**json_decamelized)
def update_from_response_json(self, json_dict):
json_decamelized = humps.decamelize(json_dict)
_ = json_decamelized.pop("type")
_ = json_decamelized.pop("href")
_ = json_decamelized.pop("committime")
self.__init__(**json_decamelized)
return self
def json(self):
return json.dumps(self, cls=util.FeatureStoreEncoder)
def to_dict(self):
return {
"commitID": self._commitid,
"commitDateString": self._commit_date_string,
"rowsInserted": self._rows_inserted,
"rowsUpdated": self._rows_updated,
"rowsDeleted": self._rows_deleted,
}
@property
def commitid(self):
return self._commitid
@property
def commit_date_string(self):
return self._commit_date_string
@property
def rows_inserted(self):
return self._rows_inserted
@property
def rows_updated(self):
return self._rows_updated
@property
def rows_deleted(self):
return self._rows_deleted
@commitid.setter
def commitid(self, commitid):
self._commitid = commitid
@commit_date_string.setter
def commit_date_string(self, commit_date_string):
self._commit_date_string = commit_date_string
@rows_inserted.setter
def rows_inserted(self, rows_inserted):
self._rows_inserted = rows_inserted
@rows_updated.setter
def rows_updated(self, rows_updated):
self._rows_updated = rows_updated
@rows_deleted.setter
def rows_deleted(self, rows_deleted):
self._rows_deleted = rows_deleted
|
python
|
from rest_framework import serializers
from rest_framework.reverse import reverse
from onadata.apps.fsforms.models import FieldSightXF
from onadata.apps.logger.models import XForm
from onadata.libs.utils.decorators import check_obj
class XFormListSerializer(serializers.ModelSerializer):
class Meta:
model = XForm
fields = ('id', 'title')
|
python
|
import numpy as np
from torch import nn, tensor
import torch
from torch.autograd import Variable
class time_loss(nn.Module):
def __init__(self, margin=0.1, dist_type = 'l2'):
super(time_loss, self).__init__()
self.margin = margin
self.dist_type = dist_type
if dist_type == 'l2':
self.dist = nn.MSELoss(reduction='sum')
if dist_type == 'cos':
self.dist = nn.CosineSimilarity(dim=0)
if dist_type == 'l1':
self.dist = nn.L1Loss()
def forward(self, feat, label1):
feat_size = feat.size()[1]
feat_num = feat.size()[0]
label_num = len(label1.unique())
feat = feat.chunk(label_num, 0)
#loss = Variable(.cuda())
for i in range(label_num):
center1 = torch.mean(feat[i], dim=0)
if self.dist_type == 'l2' or self.dist_type == 'l1':
if i == 0:
dist = max(0, abs(self.dist(center1, center1)))
else:
dist += max(0, abs(self.dist(center1, center1)))
elif self.dist_type == 'cos':
if i == 0:
dist = max(0, 1-self.dist(center1, center1))
else:
dist += max(0, 1-self.dist(center1, center1))
return dist
|
python
|
from types import FunctionType
import pygame
from pygame.locals import *
from pygame.font import Font
from pygameMenuPro.event import Event
COLOR_BLACK = Color(0, 0, 0)
COLOR_WHITE = Color(255, 255, 255)
class InputManager:
def __init__(self):
self.last_checked_input = []
self.last_mouse_position:list[tuple[int,int]] = []
self.mouse_clicked = (False,False,False)
self.mouse_wheel = (0,0)
def check_input(self) -> int:
for event in pygame.event.get():
if(event.type == pygame.QUIT):
pygame.quit()
exit(0)
elif(event.type == KEYDOWN):
self.last_checked_input.append(event.key)
return event.key
elif(event.type == MOUSEWHEEL):
self.mouse_wheel = (event.x, event.y)
self.last_mouse_position.append(pygame.mouse.get_pos())
self.mouse_clicked = pygame.mouse.get_pressed()
return 0
def reset(self):
self.reset_last_checked()
self.reset_last_mouse_position()
self.reset_mouse_wheel()
def reset_last_checked(self):
self.last_checked_input.clear()
def reset_last_mouse_position(self):
self.last_mouse_position.clear()
def reset_mouse_wheel(self):
self.mouse_wheel = (0,0)
class FontManager:
def __init__(self, fonts: dict[str, Font] = {}):
pygame.font.init()
self._fonts = fonts
def add_font(self, font_str: str, font: Font):
self._fonts[font_str] = font
def get_font(self, font_str: str):
return self._fonts.get(font_str, None)
def set_default_option(self, font: Font):
"""
set the default option font
"""
self.add_font('default_option_font', font)
def set_default_highlight(self, font: Font):
self.add_font('default_highlight_font', font)
def set_default_title(self, font: Font):
self.add_font('default_title_font', font)
def draw_text(self, text: str, font_str: str, color: Color = Color(255, 255, 255)):
font = self._fonts[font_str]
lines = text.splitlines()
maxline = max(lines, key=len)
surface = pygame.Surface((font.size(maxline)[0], font.get_height() * 1.25 * len(lines)), pygame.SRCALPHA, 32)
for i, line in enumerate(lines):
line_surf = font.render(line, True, color)
text_rect = line_surf.get_rect()
text_rect.centerx = surface.get_rect().centerx
text_rect.top = i * font.get_height() * 1.25
surface.blit(line_surf, text_rect.topleft)
surface.convert_alpha()
return surface
class Option:
# static attribute to check the input
input = InputManager()
# static attribute manage the user fonts
font = FontManager()
clock = pygame.time.Clock()
def __init__(self, text: str, font_str: str = 'default_option_font', color: Color = COLOR_WHITE, event=None):
self.add = AddExtention(self)
self._event = event
if(self._event == None):
self._event = Event()
self.text = text
self._pos = None
self._font_str = font_str
self._activation_keys: list[int] = [K_RETURN]
self.color = color
self.rect = None
def is_selected(self):
"""
returns true iff on of the activation keys is in Option.input.last_checked_input
"""
return len(list(set(Option.input.last_checked_input) & set(self._activation_keys))) > 0
def on_select(self):
"""
will be called when is_selected is true
"""
self._event.post_event('on_select', self)
def on_active(self):
"""
will be called when this option is the current active option in the menu
"""
self._event.post_event('on_active', self)
if(self.is_selected()):
self.on_select()
def on_deactive(self):
"""
will be called before the next option is being activated
"""
self._event.post_event('on_deactive', self)
def draw(self, surface:pygame.Surface, pos):
surf = self.render()
self.rect = surface.blit(surf, (self._pos[0] - surf.get_width()//2, self._pos[1]))
def render(self):
return Option.font.draw_text(self.text, self._font_str, color=self.color)
class AddExtention():
def __init__(self, option: Option):
self._option = option
def option(self):
return self._option
def highlight(self, font_str='default_highlight_font'):
"""
Add a Highlight decorator
"""
self._regular_font_str = self._option._font_str
def highlight_me(option: Option):
option._font_str = font_str
def dont_highlight_me(option: Option):
option._font_str = self._regular_font_str
self._option.add.active_listener(highlight_me)\
.add.deactive_listener(dont_highlight_me)
return self._option
def input(self, input):
"""
add input decorator
"""
head = self._option.text
setattr(self._option, 'input_output', input)
self._option.left = K_LEFT
self._option.right = K_RIGHT
self._option.input_output = input
self._option.text = head + ' ' + str(self._option.input_output)
def update_text_with_input(option: Option):
option.text = head + ' ' + str(self._option.input_output)
self._option.add.active_listener(update_text_with_input)
return self._option
def menu(self, surface: pygame.Surface, title_pos: tuple[int, int], title_font_str: str = 'default_title_font', options: list[Option] = [], background_color=COLOR_BLACK, cursor: pygame.Surface = None):
"""
convert this option to a menu.
The menu title will be same as the option text
"""
self._option = Menu(self.option(), surface, title_pos,
title_font_str, options, background_color, cursor)
return self._option
def mouse_menu(self, surface: pygame.Surface, title_pos: tuple[int, int], title_font_str: str = 'default_title_font', options: list[Option] = [], background_color=COLOR_BLACK, cursor: pygame.Surface = None):
"""
convert this option to a mouse menu
The menu title will be same as the option text
The options of this menu will be activated by mouse hover,
and selected by mouse click.
"""
self._option = MouseMenu(self.option(), surface, title_pos, title_font_str, options, background_color, cursor)
return self._option
def select_listener(self, func: FunctionType):
"""
will be called inside on_select()
"""
self.option()._event.subscribe('on_select', func)
return self.option()
def active_listener(self, func: FunctionType):
"""
will be called inside on_active()
"""
self.option()._event.subscribe('on_active', func)
return self.option()
def deactive_listener(self, func: FunctionType):
"""
will be called inside on_deactive()
"""
self.option()._event.subscribe('on_deactive', func)
return self.option()
def activation_key(self, key: int):
"""
add another activation key to this option
"""
self.option()._activation_keys.append(key)
return self.option()
class Menu(Option):
def __init__(self, option: Option, surface: pygame.Surface, title_pos: tuple[int, int], title_font_str: str = 'default_title_font', options: list[Option] = [], background_color=COLOR_BLACK, cursor: pygame.Surface = None):
super().__init__(option.text, option._font_str, option.color, option._event)
# private:
self._option = option
self._surface = surface
self._title_pos = title_pos
self._options = options
self._background_color = background_color
# public:
self.title_font_str = title_font_str
self.run_display = False
self.state = 0
self.up = K_UP
self.down = K_DOWN
self.quit = K_ESCAPE
self.cursor = cursor
self.cursor_offset = 0
def activate_display_menu(_):
Option.input.reset_last_checked()
self.display_menu()
self.add.select_listener(activate_display_menu)
def display_menu(self):
"""
Run this display. It can be called from another menu and "hide" this menu.
Practicaly, this will stop the current menu loop and start this menu loop.
"""
self.run_display = True
while(self.run_display):
self._surface.fill(self._background_color)
# draw title:
title_surf = Option.font.draw_text(self.text, self.title_font_str)
self._surface.blit(title_surf, (self._title_pos[0] - title_surf.get_width()//2, self._title_pos[1]))
# checking input:
k = self.input.check_input()
self.update_state(k)
if(len(self._options) > 0):
# activate selected option:
if(self.state >= 0):
self._options[self.state].on_active()
# draw options:
last_height = Option.font.get_font(
self.title_font_str).get_height() + self._title_pos[1]
for option in self.get_options():
option._pos = (self._title_pos[0], last_height)
option.draw(self._surface, option._pos)
text_height = option.rect.height
last_height = option._pos[1] + text_height
# draw cursor:
if(self.cursor != None):
selected_option = self._options[self.state]
option_font_size = Option.font.get_font(
selected_option._font_str).size(selected_option.text)
self._surface.blit(self.cursor, (selected_option.rect.left + self.cursor_offset, selected_option.rect.top))
# reset input list:
Option.input.reset()
# refresh:
pygame.display.update()
Option.clock.tick(60)
def update_state(self, k: int):
"""
This method is being called once in every menu's main loop iteration.
You shouldn't modify this unless you know what you do
"""
if(k > 0):
if(k == self.quit):
self.run_display = False
if(len(self._options) > 0):
if(k == self.up):
self._options[self.state].on_deactive()
self.state -= 1
elif(k == self.down):
self._options[self.state].on_deactive()
self.state += 1
self.state %= len(self._options)
def add_option(self, option: Option, index: int = -1):
"""
Add an option to this menu. it can be Menu as well...
"""
if(index == -1):
self._options.append(option)
else:
self._options.insert(index, option)
def set_options(self, options: list[Option]):
"""
Set the options list to this menu. The list can contain other menus.
The state of this menu will be reset to 0
"""
self.state = 0
self._options = options
return self
def get_options(self):
"""
Returns the option list of this menu
"""
return self._options
def __getattr__(self, name:str):
return self._option.__getattribute__(name)
class MouseMenu(Menu):
def __init__(self, option: Option, surface: pygame.Surface, title_pos: tuple[int, int], title_font_str: str = 'default_title_font', options: list[Option] = [], background_color=COLOR_BLACK, cursor: pygame.Surface = None):
super().__init__(option, surface, title_pos, title_font_str, options=options, background_color=background_color, cursor=cursor)
self.state = -1
def update_state(self, k: int):
some_option_active = False
for i, option in enumerate(self._options):
rect = option.rect
if(rect != None):
if(len(Option.input.last_mouse_position)>0 and rect.collidepoint(Option.input.last_mouse_position[-1])):
if(self.state != i and self.state >= 0):
self._options[self.state].on_deactive()
some_option_active = True
self.state = i
if(not some_option_active):
if(self.state >= 0):
self._options[self.state].on_deactive()
self.state = -1
def set_options(self, options: list[Option]):
super().set_options(options)
def select_with_mouse(option:Option):
if(Option.input.mouse_clicked[0]):
option.on_select()
for option in self._options:
option.add.active_listener(select_with_mouse)
return self
|
python
|
# Define player object
class Player:
def __init__(self, name, house):
self.name = name
self.house = house
self.hasNumber = False
# Method for setting users phonenumbers
def setPhonenumber(self, phoneNumber):
self.phoneNumber = phoneNumber
self.hasNumber = True
# 12B
LF12B = [
"Phill",
"Dave",
"Jake",
"Pat",
"Gabe",
"Evan"
]
# 1308
WARD1308 = [
"Jason",
"Zack",
"Jack",
"JC"
]
# 13C
LF13C = [
"Lynnanne",
"Tori",
"Alyssa",
"Ollie"
]
# 12A
LF12A = [
"Ashley",
"Carly",
"Lauren",
"Alexa",
"Gabby",
"Lexi",
"Steph"
]
# Array of all houses which contain people in each house
houses = [LF12B, LF13C, LF12A, WARD1308]
houseNames = ['Lower Fulton 12B','Lower Fulton 13C', 'Lower Fulton 12A', 'Ward 1308']
# Empty people array to be appended to
people = []
index = -1;
# Adds each person for each house to the list of people
for i in houses:
index += 1
for j in range(len(i)):
people.append(Player(i[j], houseNames[index]))
# people.append(i[j])
for i in people:
print(i.name + ' Representing ' + i.house)
|
python
|
# ===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import argparse
import bench
from cuml import LogisticRegression
parser = argparse.ArgumentParser(description='cuML logistic '
'regression benchmark')
parser.add_argument('--no-fit-intercept', dest='fit_intercept',
action='store_false', default=True,
help="Don't fit intercept")
parser.add_argument('--solver', default='qn', choices=('qn', 'owl'),
help='Solver to use.')
parser.add_argument('--linesearch-max-iter', type=int, default=50,
help='Maximum iterations per solver outer iteration')
parser.add_argument('--maxiter', type=int, default=100,
help='Maximum iterations for the iterative solver')
parser.add_argument('-C', dest='C', type=float, default=1.0,
help='Regularization parameter')
parser.add_argument('--tol', type=float, default=1e-10,
help='Tolerance for solver. Default is 1e-10.')
params = bench.parse_args(parser)
# Load generated data
X_train, X_test, y_train, y_test = bench.load_data(params)
params.n_classes = y_train[y_train.columns[0]].nunique()
# Create our classifier object
clf = LogisticRegression(penalty='l2', C=params.C,
linesearch_max_iter=params.linesearch_max_iter,
fit_intercept=params.fit_intercept, verbose=params.verbose,
tol=params.tol,
max_iter=params.maxiter, solver=params.solver)
# Time fit and predict
fit_time, _ = bench.measure_function_time(clf.fit, X_train, y_train, params=params)
y_pred = clf.predict(X_train)
train_acc = 100 * bench.accuracy_score(y_pred, y_train)
predict_time, y_pred = bench.measure_function_time(
clf.predict, X_test, params=params)
test_acc = 100 * bench.accuracy_score(y_pred, y_test)
bench.print_output(library='cuml', algorithm='log_reg',
stages=['training', 'prediction'], params=params,
functions=['LogReg.fit', 'LogReg.predict'],
times=[fit_time, predict_time], metric_type='accuracy[%]',
metrics=[train_acc, test_acc], data=[X_train, X_test],
alg_instance=clf)
|
python
|
from data.station_number_data import StationNumberData
from states_machine.state_context import State
from states_machine.states.on_which_sum import OnWhichSum
class StationNumber(State):
def __init__(self):
pass
def income_handle(self) -> None:
# TODO:
#
print(f"income_handle: StationNumber")
def outcome_handle(self) -> None:
result = None
print(f"outcome_handle: StationNumber")
try:
self.text_machine.start_transaction()
word = self.text_machine.move()
if word in (StationNumberData.number_words + StationNumberData.number_nums):
if self.text_machine.move() in StationNumberData.main_words:
# TODO: Add to state machine info!!!
print("*"*10 + " StationNumber " + "*"*10)
print(self.text_machine.get_current())
if word.isnumeric():
result = word
else:
for i in range(0, len(StationNumberData.number_words)):
if word == StationNumberData.number_words[i]:
result = i + 1
break
print("*" * 10 + " StationNumber " + "*" * 10)
self.text_machine.commit()
else:
self.text_machine.rollback()
else:
self.text_machine.rollback()
except:
print(f" StationNumber: exception ")
self.text_machine.rollback()
else:
pass
finally:
if not self.text_machine.has_finished():
self.text_machine.rollback()
print(f" StationNumber wants to change the state of the context OnWhichSum.")
if result is not None:
self.context.add_result(result, 0)
self.context.transition_to(OnWhichSum())
|
python
|
class Transformer:
def transform(self, message):
yield from map(self.map, (message,))
def map(self, message):
raise NotImplementedError('Transformer is an abstract class.')
|
python
|
import socket
import struct
from struct import *
import sys
def ethernet_head(raw_data):
dest, src, prototype = struct.unpack('! 6s 6s H', raw_data[:14])
dest_mac = get_mac_addr(dest)
src_mac = get_mac_addr(src)
proto = socket.htons(prototype)
data = raw_data[14:]
return dest_mac, src_mac, proto, data
def get_ip(addr):
return '.'.join(map(str, addr))
def main():
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(3))
while True:
raw_data, addr = s.recvfrom(65535)
eth = ethernet_head(raw_data)
print('\nEthernet Frame:')
print('Destination: {}, Source: {}, Protocol: {}'.format(eth[0], eth[1], eth[2]))
if eth[2] == 8:
ipv4 = ipv4(eth[4])
print('\t - ' + 'IPv4 Packet:')
print('\t\t - ' + 'Version: {}, Header Length: {}, TTL:{},'.format(ipv4[1], ipv4[2], ipv4[3]))
print('\t\t - ' + 'Protocol: {}, Source: {}, Target:{}'.format(ipv4[4], ipv4[5], ipv4[6]))
def tcp_head(raw_data):
(src_port, dest_port, sequence, acknowledgment, offset_reserved_flags) = struct.unpack('! H H L L H', raw_data[:14])
offset = (offset_reserved_flags >> 12) * 4
flag_urg = (offset_reserved_flags & 32) >> 5
flag_ack = (offset_reserved_flags & 16) >> 4
flag_psh = (offset_reserved_flags & 8) >> 3
flag_rst = (offset_reserved_flags & 4) >> 2
flag_syn = (offset_reserved_flags & 2) >> 1
flag_fin = offset_reserved_flags & 1
data = raw_data[offset:]
return src_port, dest_port, sequence, acknowledgment, flag_urg, flag_ack, flag_psh, flag_rst, flag_syn, flag_fin, data
def ipv4_head(raw_data):
version_header_length = raw_data[0]
version = version_header_length >> 4
header_length = (version_header_length & 15) * 4
ttl, proto, src, target = struct.unpack('! 8x B B 2x 4s 4s', raw_data[:20])
data = raw_data[header_length:]
src = get_ip(src)
target = get_ip(target)
return version, header_length, ttl, proto, src, target, data
HOST = socket.gethostbyname(socket.gethostname())
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP)
s.bind((HOST, 0))
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
s.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
while True:
print(s.recvfrom(65565))
#
# s.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
|
python
|
import unittest
import random
import numpy as np
import pyquil
from cirq import GridQubit, LineQubit, X, Y, Z, PauliSum, PauliString
from openfermion import (
QubitOperator,
IsingOperator,
FermionOperator,
get_interaction_operator,
get_fermion_operator,
jordan_wigner,
qubit_operator_sparse,
)
from zquantum.core.measurement import ExpectationValues
from zquantum.core.utils import RNDSEED, create_object
from zquantum.core.interfaces.mock_objects import MockAnsatz
from zquantum.core.testing import create_random_qubitop, create_random_isingop
from zquantum.core.circuit import build_uniform_param_grid
from ._utils import (
generate_random_qubitop,
get_qubitop_from_coeffs_and_labels,
evaluate_qubit_operator,
get_qubitop_from_matrix,
reverse_qubit_order,
expectation,
change_operator_type,
evaluate_operator_for_parameter_grid,
get_fermion_number_operator,
get_diagonal_component,
get_polynomial_tensor,
qubitop_to_paulisum,
)
class TestQubitOperator(unittest.TestCase):
def test_build_qubitoperator_from_coeffs_and_labels(self):
# Given
test_op = QubitOperator(((0, "Y"), (1, "X"), (2, "Z"), (4, "X")), 3.0j)
coeffs = [3.0j]
labels = [[2, 1, 3, 0, 1]]
# When
build_op = get_qubitop_from_coeffs_and_labels(coeffs, labels)
# Then
self.assertEqual(test_op, build_op)
def test_qubitop_matrix_converion(self):
# Given
m = 4
n = 2 ** m
TOL = 10 ** -15
random.seed(RNDSEED)
A = np.array([[random.uniform(-1, 1) for x in range(n)] for y in range(n)])
# When
A_qubitop = get_qubitop_from_matrix(A)
A_qubitop_matrix = np.array(qubit_operator_sparse(A_qubitop).todense())
test_matrix = A_qubitop_matrix - A
# Then
for row in test_matrix:
for elem in row:
self.assertEqual(abs(elem) < TOL, True)
def test_generate_random_qubitop(self):
# Given
nqubits = 4
nterms = 5
nlocality = 2
max_coeff = 1.5
fixed_coeff = False
# When
qubit_op = generate_random_qubitop(
nqubits, nterms, nlocality, max_coeff, fixed_coeff
)
# Then
self.assertEqual(len(qubit_op.terms), nterms)
for term, coefficient in qubit_op.terms.items():
for i in range(nlocality):
self.assertLess(term[i][0], nqubits)
self.assertEqual(len(term), nlocality)
self.assertLessEqual(np.abs(coefficient), max_coeff)
# Given
fixed_coeff = True
# When
qubit_op = generate_random_qubitop(
nqubits, nterms, nlocality, max_coeff, fixed_coeff
)
# Then
self.assertEqual(len(qubit_op.terms), nterms)
for term, coefficient in qubit_op.terms.items():
self.assertEqual(np.abs(coefficient), max_coeff)
def test_evaluate_qubit_operator(self):
# Given
qubit_op = QubitOperator("0.5 [] + 0.5 [Z1]")
expectation_values = ExpectationValues([0.5, 0.5])
# When
value_estimate = evaluate_qubit_operator(qubit_op, expectation_values)
# Then
self.assertAlmostEqual(value_estimate.value, 0.5)
def test_evaluate_operator_for_parameter_grid(self):
# Given
ansatz = MockAnsatz(4, 2)
grid = build_uniform_param_grid(1, 2, 0, np.pi, np.pi / 10)
backend = create_object(
{
"module_name": "zquantum.core.interfaces.mock_objects",
"function_name": "MockQuantumSimulator",
}
)
op = QubitOperator("0.5 [] + 0.5 [Z1]")
previous_layer_parameters = [1, 1]
# When
(
parameter_grid_evaluation,
optimal_parameters,
) = evaluate_operator_for_parameter_grid(
ansatz, grid, backend, op, previous_layer_params=previous_layer_parameters
)
# Then (for brevity, only check first and last evaluations)
self.assertIsInstance(parameter_grid_evaluation[0]["value"].value, float)
self.assertEqual(parameter_grid_evaluation[0]["parameter1"], 0)
self.assertEqual(parameter_grid_evaluation[0]["parameter2"], 0)
self.assertIsInstance(parameter_grid_evaluation[99]["value"].value, float)
self.assertEqual(
parameter_grid_evaluation[99]["parameter1"], np.pi - np.pi / 10
)
self.assertEqual(
parameter_grid_evaluation[99]["parameter2"], np.pi - np.pi / 10
)
self.assertEqual(len(optimal_parameters), 4)
self.assertEqual(optimal_parameters[0], 1)
self.assertEqual(optimal_parameters[1], 1)
def test_reverse_qubit_order(self):
# Given
op1 = QubitOperator("[Z0 Z1]")
op2 = QubitOperator("[Z1 Z0]")
# When/Then
self.assertEqual(op1, reverse_qubit_order(op2))
# Given
op1 = QubitOperator("Z0")
op2 = QubitOperator("Z1")
# When/Then
self.assertEqual(op1, reverse_qubit_order(op2, n_qubits=2))
self.assertEqual(op2, reverse_qubit_order(op1, n_qubits=2))
def test_expectation(self):
"""Check <Z0> and <Z1> for the state |100>"""
# Given
wf = pyquil.wavefunction.Wavefunction([0, 1, 0, 0, 0, 0, 0, 0])
op1 = QubitOperator("Z0")
op2 = QubitOperator("Z1")
# When
exp_op1 = expectation(op1, wf)
exp_op2 = expectation(op2, wf)
# Then
self.assertAlmostEqual(-1, exp_op1)
self.assertAlmostEqual(1, exp_op2)
def test_change_operator_type(self):
# Given
operator1 = QubitOperator("Z0 Z1", 4.5)
operator2 = IsingOperator("Z0 Z1", 4.5)
operator3 = IsingOperator()
operator4 = IsingOperator("Z0", 0.5) + IsingOperator("Z1", 2.5)
# When
new_operator1 = change_operator_type(operator1, IsingOperator)
new_operator2 = change_operator_type(operator2, QubitOperator)
new_operator3 = change_operator_type(operator3, QubitOperator)
new_operator4 = change_operator_type(operator4, QubitOperator)
# Then
self.assertEqual(IsingOperator("Z0 Z1", 4.5), new_operator1)
self.assertEqual(QubitOperator("Z0 Z1", 4.5), new_operator2)
self.assertEqual(QubitOperator(), new_operator3)
self.assertEqual(
QubitOperator("Z0", 0.5) + QubitOperator("Z1", 2.5), new_operator4
)
def test_get_fermion_number_operator(self):
# Given
n_qubits = 4
n_particles = None
correct_operator = get_interaction_operator(
FermionOperator(
"""
0.0 [] +
1.0 [0^ 0] +
1.0 [1^ 1] +
1.0 [2^ 2] +
1.0 [3^ 3]
"""
)
)
# When
number_operator = get_fermion_number_operator(n_qubits)
# Then
self.assertEqual(number_operator, correct_operator)
# Given
n_qubits = 4
n_particles = 2
correct_operator = get_interaction_operator(
FermionOperator(
"""
-2.0 [] +
1.0 [0^ 0] +
1.0 [1^ 1] +
1.0 [2^ 2] +
1.0 [3^ 3]
"""
)
)
# When
number_operator = get_fermion_number_operator(n_qubits, n_particles)
# Then
self.assertEqual(number_operator, correct_operator)
class TestOtherUtils(unittest.TestCase):
def test_get_diagonal_component_polynomial_tensor(self):
fermion_op = FermionOperator("0^ 1^ 2^ 0 1 2", 1.0)
fermion_op += FermionOperator("0^ 1^ 2^ 0 1 3", 2.0)
fermion_op += FermionOperator((), 3.0)
polynomial_tensor = get_polynomial_tensor(fermion_op)
diagonal_op, remainder_op = get_diagonal_component(polynomial_tensor)
self.assertTrue((diagonal_op + remainder_op) == polynomial_tensor)
diagonal_qubit_op = jordan_wigner(get_fermion_operator(diagonal_op))
remainder_qubit_op = jordan_wigner(get_fermion_operator(remainder_op))
for term in diagonal_qubit_op.terms:
for pauli in term:
self.assertTrue(pauli[1] == "Z")
for term in remainder_qubit_op.terms:
is_diagonal = True
for pauli in term:
if pauli[1] != "Z":
is_diagonal = False
break
self.assertFalse(is_diagonal)
def test_get_diagonal_component_interaction_op(self):
fermion_op = FermionOperator("1^ 1", 0.5)
fermion_op += FermionOperator("2^ 2", 0.5)
fermion_op += FermionOperator("1^ 2^ 0 3", 0.5)
diagonal_op, remainder_op = get_diagonal_component(
get_interaction_operator(fermion_op)
)
self.assertTrue(
(diagonal_op + remainder_op) == get_interaction_operator(fermion_op)
)
diagonal_qubit_op = jordan_wigner(diagonal_op)
remainder_qubit_op = jordan_wigner(remainder_op)
for term in diagonal_qubit_op.terms:
for pauli in term:
self.assertTrue(pauli[1] == "Z")
is_diagonal = True
for term in remainder_qubit_op.terms:
for pauli in term:
if pauli[1] != "Z":
is_diagonal = False
break
self.assertFalse(is_diagonal)
def test_qubitop_to_paulisum_identity_operator(self):
# Given
qubit_operator = QubitOperator("", 4)
# When
paulisum = qubitop_to_paulisum(qubit_operator)
# Then
self.assertEqual(paulisum.qubits, ())
self.assertEqual(paulisum, PauliSum() + 4)
def test_qubitop_to_paulisum_z0z1_operator(self):
# Given
qubit_operator = QubitOperator("Z0 Z1", -1.5)
expected_qubits = (GridQubit(0, 0), GridQubit(1, 0))
expected_paulisum = (
PauliSum()
+ PauliString(Z.on(expected_qubits[0]))
* PauliString(Z.on(expected_qubits[1]))
* -1.5
)
# When
paulisum = qubitop_to_paulisum(qubit_operator)
# Then
self.assertEqual(paulisum.qubits, expected_qubits)
self.assertEqual(paulisum, expected_paulisum)
def test_qubitop_to_paulisum_setting_qubits(self):
# Given
qubit_operator = QubitOperator("Z0 Z1", -1.5)
expected_qubits = (LineQubit(0), LineQubit(5))
expected_paulisum = (
PauliSum()
+ PauliString(Z.on(expected_qubits[0]))
* PauliString(Z.on(expected_qubits[1]))
* -1.5
)
# When
paulisum = qubitop_to_paulisum(qubit_operator, qubits=expected_qubits)
# Then
self.assertEqual(paulisum.qubits, expected_qubits)
self.assertEqual(paulisum, expected_paulisum)
def test_qubitop_to_paulisum_more_terms(self):
# Given
qubit_operator = (
QubitOperator("Z0 Z1 Z2", -1.5)
+ QubitOperator("X0", 2.5)
+ QubitOperator("Y1", 3.5)
)
expected_qubits = (LineQubit(0), LineQubit(5), LineQubit(8))
expected_paulisum = (
PauliSum()
+ (
PauliString(Z.on(expected_qubits[0]))
* PauliString(Z.on(expected_qubits[1]))
* PauliString(Z.on(expected_qubits[2]))
* -1.5
)
+ (PauliString(X.on(expected_qubits[0]) * 2.5))
+ (PauliString(Y.on(expected_qubits[1]) * 3.5))
)
# When
paulisum = qubitop_to_paulisum(qubit_operator, qubits=expected_qubits)
# Then
self.assertEqual(paulisum.qubits, expected_qubits)
self.assertEqual(paulisum, expected_paulisum)
|
python
|
"""UseCase for updating a metric entry's properties."""
import logging
from argparse import Namespace, ArgumentParser
from typing import Final, Optional
import jupiter.command.command as command
from jupiter.domain.adate import ADate
from jupiter.use_cases.metrics.entry.update import MetricEntryUpdateUseCase
from jupiter.framework.update_action import UpdateAction
from jupiter.framework.base.entity_id import EntityId
LOGGER = logging.getLogger(__name__)
class MetricEntryUpdate(command.Command):
"""UseCase for updating a metric entry's properties."""
_command: Final[MetricEntryUpdateUseCase]
def __init__(self, the_command: MetricEntryUpdateUseCase) -> None:
"""Constructor."""
self._command = the_command
@staticmethod
def name() -> str:
"""The name of the command."""
return "metric-entry-update"
@staticmethod
def description() -> str:
"""The description of the command."""
return "Update a metric entry"
def build_parser(self, parser: ArgumentParser) -> None:
"""Construct a argparse parser for the command."""
parser.add_argument("--id", dest="ref_id", required=True, help="The id of the metric")
parser.add_argument("--collection-time", dest="collection_time", required=False,
help="The time at which a metric should be recorded")
parser.add_argument("--value", dest="value", required=False, type=float,
help="The value for the metric")
parser.add_argument("--notes", dest="notes", required=False, type=str,
help="A note for the metric")
parser.add_argument("--clear-notes", dest="clear_notes", default=False,
action="store_const", const=True, help="Clear the notes")
def run(self, args: Namespace) -> None:
"""Callback to execute when the command is invoked."""
ref_id = EntityId.from_raw(args.ref_id)
collection_time = UpdateAction.change_to(ADate.from_str(args.collection_time)) \
if args.collection_time is not None else UpdateAction.do_nothing()
value = UpdateAction.change_to(args.value) if args.value is not None else UpdateAction.do_nothing()
notes: UpdateAction[Optional[str]]
if args.clear_notes:
notes = UpdateAction.change_to(None)
elif args.notes is not None:
notes = UpdateAction.change_to(args.notes)
else:
notes = UpdateAction.do_nothing()
self._command.execute(MetricEntryUpdateUseCase.Args(
ref_id=ref_id, collection_time=collection_time, value=value, notes=notes))
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
USD Opinion Editor widget implementations
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "[email protected]"
from Qt.QtCore import *
from Qt.QtWidgets import *
from pxr import Usd
from pxr.UsdQt._bindings import _DisplayGroupProxy, _PrimProxy, _AttributeProxy, _MetadataProxy
from pxr.UsdQt.opinionStackModel import _AttributeHandler, _PrimMetadataHandler
from pxr.UsdQt import valueDelegate, opinionStackModel
from artellapipe.libs.usd.core import usdqtutils
class OpinionEditor(QWidget, object):
def __init__(self, deleagate=None, parent=None):
super(OpinionEditor, self).__init__(parent=parent)
self._menu_bar = QMenuBar()
self._layout = QVBoxLayout()
self.setLayout(self._layout)
self._filter_line_edit = QLineEdit()
self._view = usdqtutils.SelectionEditTreeView()
item_delegate = deleagate if deleagate else valueDelegate.ValueDelegate()
self._view.setItemDelegate(item_delegate)
self._view.setEditTriggers(
QAbstractItemView.CurrentChanged | QAbstractItemView.SelectedClicked | QAbstractItemView.EditKeyPressed)
self._view.setSelectionMode(QAbstractItemView.ExtendedSelection)
self._splitter = QSplitter(Qt.Vertical, self)
self._layout.addWidget(self._menu_bar)
self._layout.addWidget(self._filter_line_edit)
self._layout.addWidget(self._splitter)
self._splitter.addWidget(self._view)
self._setup_actions()
self._setup_options_menu()
self._setup_edit_menu()
self._setup_option_view_widget()
@property
def view(self):
return self._view
def launch_opinions_viewer(self, prim, handler):
self._opinion_viewer.launch(opinionStackModel.OpinionStackModel(prim, handler))
def set_source_model(self, model):
self._view.setModel(model)
self.reset_column_spanned()
def reset_column_spanned(self):
for index in self._traverse_all_descendents(QModelIndex()):
if type(index.internalPointer()) in (_DisplayGroupProxy, _PrimProxy):
self._view.setFirstColumnSpanned(index.row(), index.parent(), True)
def _traverse_all_descendents(self, index):
for i in range(self._view.model().rowCount(index)):
child_index = self._view.model().index(i, 0, index)
yield child_index
for descendent in self._traverse_all_descendents(child_index):
yield descendent
def _setup_actions(self):
pass
def _setup_options_menu(self):
self._options_menu = QMenu('Options')
self._menu_bar.addMenu(self._options_menu)
def _setup_edit_menu(self):
self._edit_menu = QMenu('Edit')
self._menu_bar.addMenu(self._edit_menu)
def _setup_option_view_widget(self):
self._opinion_viewer = OpinionStackWidget()
self._options_menu.hide()
self._splitter.addWidget(self._opinion_viewer)
class OpinionStackWidget(QWidget, object):
def __init__(self, parent=None):
super(OpinionStackWidget, self).__init__(parent=parent)
self._toolbar = QToolBar()
self._toolbar.addWidget(QLabel('Opinion Stack'))
self._toolbar.addSeparator()
self._show_all_action = self._toolbar.addAction('Show All')
self._show_all_action.setCheckable(True)
self._close_action = self._toolbar.addAction('Close')
self._show_all_action.toggled.connect(self._on_show_all_toggled)
self._close_action.triggered.connect(self._on_close)
self._opinion_filter = opinionStackModel.OpinionStackFilter()
self._view = QTreeView()
self._view.setModel(self._opinion_filter)
self._layout = QVBoxLayout()
self.setLayout(self._layout)
self._layout.addWidget(self._toolbar)
self._layout.addWidget(self._view)
self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding)
def launch(self, model):
self._opinion_filter.setSourceModel(model)
self.show()
def close_(self):
self.hide()
self._opinion_filter.setSourceModel(None)
def _on_show_all_toggled(self, checked):
self._opinion_filter.SetShowFullStack(checked)
def _on_close(self):
self.close_()
class OpinionController(QObject, object):
def __init__(self, model, editor, parent=None):
super(OpinionController, self).__init__(parent)
self._model = model
self._editor = editor
self._editor.view.doubleClicked.connect(self._on_double_clicked)
def reset_prims(self, prims):
self._model.ResetPrims(prims)
self._editor.reset_columns_spanned()
def _on_double_clicked(self, index):
proxy = self._model.GetProxyForIndex(index)
if type(proxy) == _AttributeProxy:
if proxy.GetSize() == 1:
attributes = proxy.GetAttributes()
attribute = attributes[0]
self._editor.launch_opinions_viewer(
attribute.GetPrim(), _AttributeHandler(attribute.GetName(), Usd.TimeCode.Default()))
elif type(proxy) == _MetadataProxy:
if proxy.GetSize() == 1:
objects = proxy.GetObjects()
obj = objects[0]
if type(obj) == Usd.Prim:
self._editor.launch_opinions_viewer(obj, _PrimMetadataHandler(proxy.GetName()))
|
python
|
# 000
# 999
#
a=['one','two','three','four']
b=range(5)
c=(9,-1,2)
#one 0 9
#...
#four 4 2
S = [4,5,3]
def next_value(current, S):
"[0,0,0] -> next one [1,0,0]"
N = current[:]
i = 0
N[i] += 1
while N[i]==S[i]:
N[i] = 0
i += 1
if i==len(N):
break
N[i] += 1
return N
c =[0,0,0]
for i in range(60):
print(c)
c = next_value(c,S)
def product(S):
N = [0]*len(S)
i = 0
N[i] += 1
while N[i]==S[i]:
N[i] = 0
i += 1
if i==len(N):
break
N[i] += 1
|
python
|
# -*- coding: utf-8 -*-
#
# This file is part of the pyfixp project hosted at https://github.com/chipmuenk/pyfixp
#
# Copyright © Christian Muenker
# Licensed under the terms of the MIT License
# (see file LICENSE.txt in root directory for details)
"""
Store the version number here for setup.py and pyfixp.py
"""
__version__ = '0.9.0'
|
python
|
class Card():
def __init__(self, id: int, img: str, owner, position: int):
""" Card defines a card.
Args:
id (int): card id (incremental int)
img (str): path of the card img
owner (int): card owner player, or None if in the deck/used
position (int): when it is played, the position to vote
removed (bool): if the card is used in this turn, so removed before next
"""
self.id = id
self.img = img
self.owner = owner
self.position = position
self.removed = False
def __str__(self):
text = str(self.id) + " "
text += self.img + " "
#text += str(self.owner) + " "
text += str(self.position) + " "
return text
|
python
|
from django.urls import path
from .views import (
RideListView,
RideDetailView,
RideCreateView,
RideUpdateView,
RideDeleteView,
OwnerRideListView,
ShareCreateView,
SharePickRideListView,
ShareRideListView,
DriverListView,
ShareUpdateView
)
from . import views
from .models import Ride, Share
from django.contrib.auth import views as auth_views
app_name = 'ride'
urlpatterns = [
path('', auth_views.LoginView.as_view(template_name='users/login.html')),
path('home/', views.home, name='ride-home'),
# path('about/', views.about, name='ride-about'),
path('owner/', views.owner, name='owner-home'),
path('sharer/', views.sharer, name='sharer-home'),
path('driver/', views.driver, name='driver-home'),
path('owner/request/', RideCreateView.as_view(), name='owner-request'),
path('driver/request/', DriverListView.as_view(), name='driver-request'),
path('owner/view/', OwnerRideListView.as_view(), name='owner-view'),
path('ride/<int:pk>/', RideDetailView.as_view(), name='ride-detail'),
path('owner/view/<int:pk>/update/', RideUpdateView.as_view(), name='owner-update'),
path('owner/view/<int:pk>/delete/', RideDeleteView.as_view(), name='owner-delete'),
path('driver/<int:ride_id>/confirm/', views.driver_confirm, name='driver-confirm'),
path('driver/<int:ride_id>/complete/', views.driver_complete, name='driver-complete'),
path('sharer/request/', ShareCreateView.as_view(), name='share-request'),
path('sharer/list/', SharePickRideListView.as_view(), name='share-list'),
path('sharer/<int:ride_id>/join/', views.share_join, name='share-join'),
path('sharer/<int:ride_id>/cancel/', views.share_cancel, name='share-cancel'),
# path('sharer/view/', ShareRideListView.as_view(), name='share-view'),
path('sharer/view/', views.share_view, name='share-view'),
path('sharer/<int:pk>/update/', ShareUpdateView.as_view(), name='share-update'),
path('driver/view/', views.driver_view, name='driver-view'),
]
|
python
|
##############################################################
# Fixing missmatches in parsing of doccano output
# The script also does not claim to be a reusable solution,
# instead it merely adapts the doccano output files to correspond
# to the entity split style of LitBank.
# Input: pandas dataframe, filename
# Output: pandas dataframe
##############################################################
import pandas as pd
def fix_ner_label(df):
'''
The exported files contain a label represented by a number (id).
We replace those ids with the respective text of the label.
'''
for label in ['B-3','B-4','B-5','B-6','B-7','B-8','B-9','B-10','B-11','B-12','B-13','B-14']:
df = df.replace([label], 'B-PERSON')
for label in ['I-3','I-4','I-5','I-6','I-7','I-8','I-9','I-10','I-11','I-12','I-13','I-14']:
df = df.replace([label], 'I-PERSON')
for label in ['B-18','B-19','B-20','B-21','B-22','B-23','B-24','B-25','B-26','B-27','B-28','B-29']:
df = df.replace([label], 'B-PERX')
for label in ['I-18','I-19','I-20','I-21','I-22','I-23','I-24','I-25','I-26','I-27','I-28','I-29']:
df = df.replace([label], 'I-PERX')
return df
def fix_titles(df):
'''
titles such as "Mr." are separated into "Mr" and "." .
We correct this by merging them together. #St.
'''
list_titles_Mr = df[df['original_word']=='Mr'].index.tolist()
list_titles_Mrs = df[df['original_word']=='Mrs'].index.tolist()
list_titles_St = df[df['original_word']=='St'].index.tolist()
list_titles_Dr = df[df['original_word']=='Dr'].index.tolist()
list_titles = list_titles_Mr + list_titles_Mrs + list_titles_St + list_titles_Dr #todo
list_to_drop = []
for i in list_titles:
if df['original_word'].iloc[i+1] == '.':
current_title = df.loc[i,'original_word']
df.loc[i,'original_word'] = current_title + "."
list_to_drop.append(i+1)
else:
continue
df = df.drop(df.index[[list_to_drop]])
return df
def add_layer(df):
'''
For the cases where a token is both a (part of) PERSON and PERX entity,
we need a separate column, in which we can add the second entity type.
By default the second layer is O.
'''
df['m_ner'] = 'O'
return df
def fix_parsing(df, filename):
'''
The parsing sometimes has lead to different tokens, which we fix in this step.
'''
list_to_drop = []
if filename == "AliceInWonderland.jsonl":
for i in df[df['original_word']=="’"].index.tolist():
if df['original_word'].iloc[i+1] == 'll':
df.loc[i,'original_word'] = "’ll"
list_to_drop.append(i+1)
elif df['original_word'].iloc[i+1] == 've':
df.loc[i,'original_word'] = "’ve"
list_to_drop.append(i+1)
elif df['original_word'].iloc[i+1] == 's':
df.loc[i,'original_word'] = "’s"
list_to_drop.append(i+1)
elif df['original_word'].iloc[i-1] == 'Ma' and df['original_word'].iloc[i+1] == 'am':
df.loc[i,'original_word'] = "Ma’am"
list_to_drop.append(i-1)
list_to_drop.append(i+1)
elif df['original_word'].iloc[i+1] == 're':
df.loc[i,'original_word'] = "’re"
list_to_drop.append(i+1)
elif df['original_word'].iloc[i+1] == 'm':
df.loc[i,'original_word'] = "’m"
list_to_drop.append(i+1)
else:
continue
for i in df[df['original_word']=="wouldn"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["would","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="couldn"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["could","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="didn"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["did","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="thing."].index.tolist():
df.loc[i,'original_word'] = "thing"
df.loc[i+1,'original_word'] = "."
elif filename == "Emma.jsonl":
for i in df[df['original_word']=="_them_"].index.tolist():
fixed_words = ["_","them","_"]
df.loc[i,'original_word'] = fixed_words
for i in df[df['original_word']=="consciousness."].index.tolist():
fixed_words = ["consciousness","."]
df.loc[i,'original_word'] = fixed_words
for i in df[df['original_word']=="large."].index.tolist():
fixed_words = ["large","."]
df.loc[i,'original_word'] = fixed_words
for i in df[df['original_word']=="_We_"].index.tolist():
fixed_words = ["_","We","_"]
df.loc[i,'original_word'] = fixed_words
elif filename == "Frankenstein.jsonl":
list_issues = df[df['original_word']=="R"].index.tolist()
for i in list_issues:
if df['original_word'].iloc[i+1] == '.':
df.loc[i,'original_word'] = "R."
list_to_drop.append(i+1)
else:
continue
elif filename == "DavidCopperfield.jsonl":
for i in df[df['original_word']=="o"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 'clock':
df.loc[i,'original_word'] = "o’clock"
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="don"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["do","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="Don"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["Do","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="’"].index.tolist():
if df['original_word'].iloc[i+1] == 's':
df.loc[i,'original_word'] = "’s"
list_to_drop.append(i+1)
else:
continue
for i in df[df['original_word']=="couldn"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["could","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
elif filename == "PrideAndPrejudice.jsonl":
for i in df[df['original_word'].str.match('_[a-zA-Z]+_')==True].index.tolist():
fixed_words = ["_",df.loc[i,'original_word'][1:-1],"_"]
df.loc[i,'original_word'] = fixed_words
elif filename == "Ulysses.jsonl":
for i in df[df['original_word'].str.match('—[a-zA-Z]+')==True].index.tolist():
fixed_words = ["—",df.loc[i,'original_word'][1:]]
df.loc[i,'original_word'] = fixed_words
for i in df[df['original_word']=="’"].index.tolist():
if df['original_word'].iloc[i+1] == 's':
df.loc[i,'original_word'] = "’s"
list_to_drop.append(i+1)
else:
continue
for i in df[df['original_word']=="hasn"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["has","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="isn"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["is","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="can"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["ca","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="don"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["do","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="Isn"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["Is","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="’"].index.tolist():
if df['original_word'].iloc[i+1] == 're':
df.loc[i,'original_word'] = "’re"
list_to_drop.append(i+1)
elif df['original_word'].iloc[i+1] == 'm':
df.loc[i,'original_word'] = "’m"
list_to_drop.append(i+1)
elif df['original_word'].iloc[i+1] == 'll':
df.loc[i,'original_word'] = "’ll"
list_to_drop.append(i+1)
else:
continue
for i in df[df['original_word']=="won"].index.tolist():
if df['original_word'].iloc[i+1] == '’' and df['original_word'].iloc[i+2] == 't':
fixed_words = ["wo","n’t"]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
list_to_drop.append(i+2)
else:
continue
for i in df[df['original_word']=="...."].index.tolist():
if df['original_word'].iloc[i+1] == 'He':
df.loc[i,'original_word'] = "..."
else:
continue
for i in df[df['original_word']=="g"].index.tolist():
if df['original_word'].iloc[i+1] == '.':
df.loc[i,'original_word'] = "g."
list_to_drop.append(i+1)
else:
continue
for i in df[df['original_word']=="p"].index.tolist():
if df['original_word'].iloc[i+1] == '.':
df.loc[i,'original_word'] = "p."
list_to_drop.append(i+1)
else:
continue
for i in df[df['original_word']=="i"].index.tolist():
if df['original_word'].iloc[i+1] == '.':
df.loc[i,'original_word'] = "i."
list_to_drop.append(i+1)
else:
continue
elif filename == "HuckleberryFinn.jsonl":
for i in df[df['original_word']=="sumf'n"].index.tolist():
if df['original_word'].iloc[i+1] == '.':
fixed_words = ["sumf","'","n."]
df.loc[i,'original_word'] = fixed_words
list_to_drop.append(i+1)
else:
continue
elif filename == "Dracula.jsonl":
for i in df[df['original_word']=="_3"].index.tolist():
fixed_words = ["_","3"]
df.loc[i,'original_word'] = fixed_words
for i in df[df['original_word']=="Bistritz._"].index.tolist():
fixed_words = ["Bistritz",".","_"]
df.loc[i,'original_word'] = fixed_words
for i in df[df['original_word']=="P"].index.tolist():
if df['original_word'].iloc[i+1] == '.':
df.loc[i,'original_word'] = "P."
list_to_drop.append(i+1)
else:
continue
for i in df[df['original_word']=="_Mem._"].index.tolist():
fixed_words = ["_","Mem",".","_"]
df.loc[i,'original_word'] = fixed_words
for i in df[df['original_word']=="``"].index.tolist():
df.loc[i,'original_word'] = '"'
for i in df[df['original_word']=="''"].index.tolist():
df.loc[i,'original_word'] = '"'
for i in df[df['original_word']=="Friend."].index.tolist():
fixed_words = ["Friend","."]
df.loc[i,'original_word'] = fixed_words
for i in df[df['original_word']=="_4"].index.tolist():
fixed_words = ["_","4"]
df.loc[i,'original_word'] = fixed_words
for i in df[df['original_word']=="May._"].index.tolist():
fixed_words = ["May",".","_"]
df.loc[i,'original_word'] = fixed_words
elif filename == "VanityFair.jsonl":
for i in df[df['original_word']=="``"].index.tolist():
df.loc[i,'original_word'] = '"'
for i in df[df['original_word']=="''"].index.tolist():
df.loc[i,'original_word'] = '"'
elif filename == "OliverTwist.jsonl":
for i in df[df['original_word'].str.match("'[a-zA-Z]+")==True].index.tolist():
if df['original_word'].iloc[i] in ["'s","'em","'ll","'S"]:
continue
else:
fixed_words = ["'",df.loc[i,'original_word'][1:]]
df.loc[i,'original_word'] = fixed_words
for i in df[df['original_word'].str.match('_[a-zA-Z]+_')==True].index.tolist():
fixed_words = ["_",df.loc[i,'original_word'][1:-1],"_"]
df.loc[i,'original_word'] = fixed_words
elif filename == "TheCallOfTheWild.jsonl":
for i in df[df['original_word']=="'m"].index.tolist():
if df['original_word'].iloc[i-1] == "'":
list_to_drop.append(i-1)
for i in df[df['original_word']=="'Frisco"].index.tolist():
fixed_words = ["'","Frisco"]
df.loc[i,'original_word'] = fixed_words
df = df.drop(df.index[[list_to_drop]])
df = df.assign(original_word=df['original_word']).explode('original_word')
df = df.reset_index(drop=True)
return df
def fix_inconsistencies(df, filename):
'''
Fixes the inconsistencies between LitBank and Dekker et al.,
which originate from the use of the raw texts from LitBank.
(E.g. occurrence of the tokens "glasses" "!", which does not exist in Dekker et al.)
'''
if filename == "MobyDick.jsonl":
df = df.drop(df.index[[381,382,539]])
df = df.reset_index(drop=True)
elif filename == "Frankenstein.jsonl":
df.loc[2302,'original_word'] = '"'
df.loc[2308,'original_word'] = '"'
return df
def fix_all_in_one(df, filename):
'''
Run all steps by calling one function
'''
df = fix_ner_label(df)
df = fix_parsing(df, filename)
df = fix_titles(df)
df = add_layer(df)
df = df.reset_index(drop=True)
df = fix_inconsistencies(df, filename)
return df
|
python
|
# -*- coding: utf-8 -*-
# @Date : 2017-07-18 13:26:17
# @Author : lileilei
'''
导入测试接口等封装
'''
import xlrd
def pasre_inter(filename):#导入接口
file=xlrd.open_workbook(filename)
me=file.sheets()[0]
nrows=me.nrows
ncol=me.ncols
project_name=[]
model_name=[]
interface_name=[]
interface_url=[]
interface_meth=[]
interface_par=[]
interface_header=[]
interface_bas=[]
jiekou_bianhao=[]
interface_type=[]
for i in range(2,nrows):
jiekou_bianhao.append(me.cell(i,0).value)
project_name.append(me.cell(i,2).value)
model_name.append(me.cell(i,3).value)
interface_name.append(me.cell(i,1).value)
interface_url.append(me.cell(i,4).value)
interface_type.append(me.cell(i,5).value)
interface_header.append(me.cell(i,6).value)
interface_meth.append(me.cell(i,7).value)
interface_par.append(me.cell(i,8).value)
interface_bas.append(me.cell(i,9).value)
i+=1
return jiekou_bianhao,interface_name,project_name,model_name,interface_url,\
interface_header,interface_meth,interface_par,interface_bas,interface_type
#导入测试用例
def paser_interface_case(filename):
file=xlrd.open_workbook(filename)
me=file.sheets()[0]
nrows=me.nrows
ncol=me.ncols
project_name=[]
model_name=[]
interface_name=[]
interface_url=[]
interface_meth=[]
interface_par=[]
interface_header=[]
interface_bas=[]
jiekou_bianhao=[]
interface_type=[]
is_save_result=[]
yilai_is=[]
yilai=[]
yilai_ziduan=[]
is_cha_data=[]
data_sql=[]
paser_base=[]
for i in range(2,nrows):
jiekou_bianhao.append(me.cell(i,0).value)
project_name.append(me.cell(i,2).value)
model_name.append(me.cell(i,3).value)
interface_name.append(me.cell(i,1).value)
interface_url.append(me.cell(i,4).value)
interface_type.append(me.cell(i,5).value)
interface_header.append(me.cell(i,6).value)
interface_meth.append(me.cell(i,7).value)
interface_par.append(me.cell(i,8).value)
interface_bas.append(me.cell(i,9).value)
is_save_result.append(me.cell(i,10).value)
yilai_is.append(me.cell(i,11).value)
yilai.append(me.cell(i,12).value)
yilai_ziduan.append(me.cell(i,13).value)
is_cha_data.append(me.cell(i,14).value)
data_sql.append(me.cell(i,15).value)
paser_base.append(me.cell(i,16).value)
i+=1
return jiekou_bianhao,interface_name,project_name,model_name,interface_url,\
interface_header,interface_meth,interface_par,interface_bas,interface_type,\
is_save_result,yilai_is,yilai,yilai_ziduan,is_cha_data,data_sql,paser_base
|
python
|
# Create a function named same_name() that has two parameters named your_name and my_name.
# If our names are identical, return True. Otherwise, return False.
def same_name(your_name, my_name):
if your_name == my_name:
return True
else:
return False
|
python
|
import numpy as np
import json
def _reject_outliers(data, m=5.):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d / mdev if mdev else 0.
return data[s < m]
def reject(input_):
for line in input_:
d = json.loads(line)
print(_reject_outliers(np.array(d)).tolist())
|
python
|
# -*- coding: utf-8 -*-
from django.template import loader
from django.utils import formats
from django.utils.text import Truncator
import django_tables2 as tables
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from ..html import AttributeDict, Icon
def merge_attrs(base_attrs, attrs):
"""
Merge attrs based in attribute dict.
"""
td = AttributeDict(base_attrs.get('td', {}))
th = AttributeDict(base_attrs.get('th', {}))
# merge td
for key, value in attrs.get('td', {}).items():
td.attr(key, value)
# merge th
for key, value in attrs.get('th', {}).items():
th.attr(key, value)
return {'td': td, 'th': th}
class CheckBoxColumn(tables.CheckBoxColumn):
BASE_ATTRS = {
'th': {"width": "40px"},
'td': {"class": "center"}
}
def __init__(self, attrs=None, orderable=False, **extra):
attrs = merge_attrs(CheckBoxColumn.BASE_ATTRS, attrs or {})
super(CheckBoxColumn, self).__init__(attrs=attrs, orderable=orderable, **extra)
@property
def header(self):
default = {'type': 'checkbox'}
general = self.attrs.get('input')
specific = self.attrs.get('th__input')
attrs = AttributeDict(default, **(specific or general or {}))
attrs.update({"class": "ace"})
return mark_safe('<label><input %s/><span class="lbl"></span></label>' % attrs.as_html())
def render(self, value, bound_column):
default = {
'type': 'checkbox',
'name': bound_column.name,
'value': value
}
general = self.attrs.get('input')
specific = self.attrs.get('td__input')
attrs = AttributeDict(default, **(specific or general or {}))
attrs.update({"class": "ace", "data-toggle": "checkbox"})
return mark_safe('<label><input %s/><span class="lbl"></span></label>' % attrs.as_html())
class BooleanColumn(tables.BooleanColumn):
BASE_ATTRS = {'th': {'width': '100px'}, 'td': {'class': 'center'}}
DEFAULT_ICONS = ['check-circle', 'times-circle']
def __init__(self, null=False, attrs=None, orderable=False, **kwargs):
attrs = merge_attrs(BooleanColumn.BASE_ATTRS, attrs or {})
super(BooleanColumn, self).__init__(null=null, attrs=attrs, orderable=orderable, **kwargs)
def get_icon(self, value):
index = int(not value)
text = self.yesno[index]
text = BooleanColumn.DEFAULT_ICONS[index] if text in (u'✔', u'✘', None, '') else text
attrs = AttributeDict({'class': 'bigger-130 ace-icon'})
attrs.add_class('green' if value else 'red')
return Icon(text, attrs=attrs)
def render(self, value, record, bound_column):
icon = self.get_icon(value)
return icon.as_html()
class IntegerColumn(tables.Column):
def __init__(self, format="%d", *args, **kwargs):
self.format = format
super(IntegerColumn, self).__init__(*args, **kwargs)
def render(self, value):
return self.format % int(value)
class IdColumn(IntegerColumn):
BASE_ATTRS = {"th": {"width": "100px"}, "td": {"class": "center"}}
def __init__(self, verbose_name=_("Code"), format="%05d", attrs=None, *args, **kwargs):
attrs = merge_attrs(IdColumn.BASE_ATTRS, attrs or {})
super(IdColumn, self).__init__(verbose_name=verbose_name, format=format, attrs=attrs, *args, **kwargs)
class ChoiceColumn(tables.Column):
def __init__(self, conf, *args, **kwargs):
super(ChoiceColumn, self).__init__(*args, **kwargs)
self._conf = conf
def _get_conf(self):
conf = {}
for key, value in self._conf.items():
attrs = AttributeDict()
conf[key] = {}
for k, v in value.items():
if k == 'icon':
icon = Icon(v) if isinstance(v, basestring) else v
conf[key][k] = icon.as_html()
elif k == 'color':
attrs.add_class(v)
else:
attrs.attr(k, v)
conf[key]['attrs'] = attrs
return conf
conf = property(_get_conf)
def render(self, value, record, bound_column):
v = getattr(record, bound_column.accessor)
conf = self.conf.get(v, None)
# add a tip text
attrs = conf.get('attrs', AttributeDict())
attrs.add_class('tip')
attrs.attr('title', value)
template = loader.get_template_from_string("<span {{ attrs }}>{{ value }}</span>")
return template.render(loader.Context({
'value': conf.get('icon', None) or value,
'attrs': attrs.as_html()
}))
class CheckBoxColumn(tables.CheckBoxColumn):
def __init__(self, attrs=None, accessor=tables.A("pk"), orderable=False, **extra):
attrs = attrs or {}
attrs.update({
"th": {"width": "40px", "class": "center"},
"td": {"class": "center"}
})
super(CheckBoxColumn, self).__init__(attrs=attrs, accessor=accessor, orderable=False, **extra)
@property
def header(self):
default = {'type': 'checkbox'}
general = self.attrs.get('input')
specific = self.attrs.get('th__input')
attrs = AttributeDict(default, **(specific or general or {}))
attrs.update({"class": "ace"})
return mark_safe('<label><input %s/><span class="lbl"></span></label>' % attrs.as_html())
def render(self, value, bound_column): # pylint: disable=W0221
default = {
'type': 'checkbox',
'name': bound_column.name,
'value': value
}
general = self.attrs.get('input')
specific = self.attrs.get('td__input')
attrs = AttributeDict(default, **(specific or general or {}))
attrs.update({"class": "ace", "data-toggle": "checkbox"})
return mark_safe('<label><input %s/><span class="lbl"></span></label>' % attrs.as_html())
class LinkColunmWithPerm(tables.LinkColumn):
def __init__(self, perm_name, *args, **kwargs):
self.perm_name = perm_name
kwargs["args"] = kwargs.get("args", None) or [tables.A("pk")] # url args
return super(LinkColunmWithPerm, self).__init__(*args, **kwargs)
def render(self, value, record, bound_column):
if self.perm_name == "":
return value
else:
return super(LinkColunmWithPerm, self).render(value, record, bound_column)
class CurrencyColumn(tables.Column):
def render(self, value):
return formats.number_format(value, decimal_pos=2)
class TruncateCharsColumn(tables.Column):
def __init__(self, length=30, *args, **kwargs):
self.length = length
super(TruncateCharsColumn, self).__init__(*args, **kwargs)
def render(self, value):
return Truncator(value).chars(self.length)
class TruncateWordsColumn(tables.Column):
def __init__(self, words=5, *args, **kwargs):
self.words = words
super(TruncateWordsColumn, self).__init__(*args, **kwargs)
def render(self, value):
return Truncator(value).words(self.words, truncate=' ...')
|
python
|
"""
Implement a Caesar cipher, both encoding and decoding. The key is an integer from 1 to 25.
This cipher rotates the letters of the alphabet (A to Z).
The encoding replaces each letter with the 1st to 25th next letter in the alphabet (wrapping Z to A).
So key 2 encrypts "HI" to "JK", but key 20 encrypts "HI" to "BC".
This simple "mono-alphabetic substitution cipher" provides almost no security,
because an attacker who has the encoded message can either use frequency analysis to guess the key,
or just try all 25 keys.
***Problem found at: https://github.com/karan/Projects/blob/master/README.md#security***
"""
from exceptions import InvalidKeyException, InvalidModeException
LETTERS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def caesar_cipher(key, string, mode):
"""
A Caesarean cipher is used on a string given the provided key.
:param key: int in range [1, 25]
:param string: string to encode/decode
:param mode: selection of encryption (1) or decryption (2)
"""
# validate input
if type(key) != int:
raise TypeError("Input param 'key' was not type of int")
if type(mode) != int:
raise TypeError("Input param 'mode' was not type of int")
if type(string) != str:
raise TypeError("Input param 'string' was not type of str")
if key < 1 or key > 25:
raise InvalidKeyException
if string == "":
return ""
if mode < 1 or mode > 2:
raise InvalidModeException
# perform cipher
result = ""
string = string.upper()
if mode == 1:
for character in string:
if str.isalpha(character):
encrypted_char = encrypt(key, character)
result += LETTERS[encrypted_char]
else:
result += character
else:
for character in string:
if str.isalpha(character):
decrypted_char = decrypt(key, character)
result += LETTERS[decrypted_char]
else:
result += character
return result
def encrypt(key, letter):
return (LETTERS.index(letter) + key) % 26
def decrypt(key, letter):
return (LETTERS.index(letter) - key) % 26
|
python
|
import re
import pprint
from collections import Counter
ihaveadream = open('ihaveadream.txt', 'r')
read_dream = ihaveadream.read()
split = read_dream.split()
just_words = [re.sub('[^a-zA-Z]+','',i) for i in split]
just_long_words = []
for word in just_words:
word = word.lower()
if len(word) > 3:
just_long_words.append(word)
freqs = [just_words.count(i) for i in just_long_words]
word_freq = zip(just_long_words, freqs)
repeat = []
for word in word_freq:
if word not in repeat:
repeat.append(word)
sorted_wfreq = sorted(repeat, key = lambda x: x[1], reverse = True)
for word in sorted_wfreq:
print word[0] + ':' + str(word[1]+1)
|
python
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
import os
import sys
from glob import glob
import re
import json
import random
import shutil
import re
import codecs
dones = [x for x in open("done.txt",'r').read().split("\n") if len(x)]
correct = json.loads(open("bad-corrected.json",'r').read())
# print(correct)
box = sys.argv[1]
print("DOING BOX: ",box)
chrome_options = Options()
# chrome_options.add_argument("--headless")
chrome_options.add_argument("safebrowsing-disable-extension-blacklist")
chrome_options.add_argument("--safebrowsing-disable-download-protection")
chrome_options.add_experimental_option("prefs", {'safebrowsing.enabled': 'false'})
filenames = [x.split(".")[0].split("/") for x in str(open("canonical_filename_order.txt",'r').read()).split("\n") if len(x)]
filenames = [x[1] for x in filenames if x[1] not in dones and x[1] in correct and correct[x[1]] != "NO"]
# filenames = [x for x in correct if correct[x] != "NO"]
print(filenames)
print("NUM STUFF IN BOX: ",len(filenames))
#filenames = [x.split("/")[1].split(".")[0] for x in str(open("canonical_filename_order.txt",'r').read()).split("\n") if len(x)]
def init_driver(path=os.path.join(os.getcwd(),"chromedriver")):
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=path)
return driver
def parse_info_html(html):
url = html.split('href="')[1].split('">')[0]
creator = html.split('creator-link\">')[1].split('</a>')[0]
date = html.split('Date:</dt><dd class="search-result__value">')[1].split("</dd>")[0]
desc = html.split('">')[2].split('</a>')[0]
return url,desc,creator,date
def parse_accession_number(html):
return html.split('Accession number:')[1].split('object__attributes-value">')[1].split('</dd>')[0]
driver = init_driver();
time.sleep(3);
for idx,fname in enumerate(filenames):
print("now processing ",fname)
entry = ("no description","no date","no accession number","no object id")
iurl = correct[fname]
print(iurl)
try:
driver.get(iurl);
time.sleep(2)
desc = driver.find_elements_by_class_name("object__title")[0].get_attribute('innerHTML')
date = driver.find_elements_by_class_name("object__date")[0].find_elements_by_class_name("level-3")[0].get_attribute('innerHTML')
obj = driver.find_elements_by_class_name("object")[1].get_attribute('innerHTML')
acc = parse_accession_number(obj)
print(desc,date,acc,iurl)
entry = (desc,date,acc,iurl.split("/")[-1])
except:
print("SHIT!!!! DONT KNOW WHAT WENT WRONG",fname)
print(sys.exc_info())
codecs.open("out/"+box+".txt",'a+',encoding='utf8').write(fname+"\t"+entry[0]+"\t"+entry[1]+"\t"+entry[2]+"\t"+entry[3]+"\n")
|
python
|
from future.utils import with_metaclass
from openpyxl_templates.exceptions import OpenpyxlTemplateException
from openpyxl_templates.utils import OrderedType, Typed
class TemplatedWorkbookNotSet(OpenpyxlTemplateException):
def __init__(self, templated_sheet):
super(TemplatedWorkbookNotSet, self).__init__(
"The sheet '%s' has no assosiated workbook. This should be done automatically by the TemplatedWorkbook."
% templated_sheet.sheetname
)
class WorksheetDoesNotExist(OpenpyxlTemplateException):
def __init__(self, templated_sheet):
super(WorksheetDoesNotExist, self).__init__(
"The workbook has no sheet '%s'." % templated_sheet.sheetname
)
class SheetnameNotSet(OpenpyxlTemplateException):
def __init__(self):
super(SheetnameNotSet, self).__init__(
"Sheetname not specified. This should be done automatically by the TemplatedWorkbook.")
class TemplatedWorksheet(with_metaclass(OrderedType)):
_sheetname = Typed("_sheetname", expected_type=str, allow_none=True)
active = Typed("active", expected_type=bool, value=False)
_workbook = None
template_styles = None
# order = ... # TODO: Add ordering to sheets either through declaration on workbook or here
def __init__(self, sheetname=None, active=None):
self._sheetname = sheetname or self._sheetname
self.active = active if active is not None else self.active
@property
def exists(self):
return self.sheetname in self.workbook
@property
def empty(self):
if not self.exists:
return True
return not bool(len(self.worksheet._cells))
@property
def worksheet(self):
if not self.exists:
self.workbook.create_sheet(self.sheetname)
return self.workbook[self.sheetname]
@property
def workbook(self):
if not self._workbook:
raise TemplatedWorkbookNotSet(self)
return self._workbook
@workbook.setter
def workbook(self, workbook):
self._workbook = workbook
@property
def sheet_index(self):
try:
return self.workbook.sheetnames.index(self.sheetname)
except ValueError:
raise WorksheetDoesNotExist(self)
def write(self, data):
raise NotImplemented()
# 'self.sheet_template.write(self.worksheet, self.templated_workbook.styles, data)
def read(self):
raise NotImplemented()
def remove(self):
if self.exists:
del self.workbook[self.sheetname]
# def activate(self):
# self.workbook.active = self.sheet_index
@property
def sheetname(self):
if not self._sheetname:
raise SheetnameNotSet()
return self._sheetname
@sheetname.setter
def sheetname(self, value):
self._sheetname = value
def __str__(self):
return self._sheetname or self.__class__.__name__
def __repr__(self):
return str(self)
|
python
|
import hashlib, hmac, time
def compute_signature(message, secret):
message = message.encode('utf-8')
timestamp = str(int(time.time()*100)).encode('ascii')
hashdata = message + timestamp
signature = hmac.new(secret.encode('ascii'),
hashdata,
hashlib.sha256).hexdigest()
return {
'message': message,
'signature': signature,
'timestamp': timestamp
}
def verify_signature(signed_message, secret):
timestamp = signed_message['timestamp']
expected_signature = signed_message['signature']
message = signed_message['message']
hashdata = message + timestamp
signature = hmac.new(secret.encode('ascii'),
hashdata,
hashlib.sha256).hexdigest()
return signature == expected_signature
signed_msg = compute_signature('Hello World', 'very_secret')
print(
verify_signature(signed_msg, 'very_secret')
)
signed_msg['message'] = b'Hello Boat'
print(
verify_signature(signed_msg, 'very_secret')
)
|
python
|
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
TEASERSWRAP_ALLOW_CHILDREN = getattr(
settings,
'TEASERS_TEASERSWRAP_ALLOW_CHILDREN',
True
)
TEASERSWRAP_PLUGINS = getattr(
settings,
'TEASERS_TEASERSWRAP_PLUGINS',
[
'TeaserPlugin',
]
)
TEASERSWRAP_FIELDSETS = getattr(
settings,
'TEASERS_TEASERSWRAP_FIELDSETS',
[
(_('content'), {
'classes': ['section'],
'fields': [
'name',
],
}),
(_('settings'), {
'classes': [
'section',
'collapse',
],
'fields': [
'css_class',
'width',
],
}),
]
)
TEASERSWRAP_CSS_CLASSES = getattr(
settings,
'TEASERS_TEASERSWRAP_CSS_CLASSES',
[
('', _('None')),
]
)
TEASERSWRAP_HEIGHTS = getattr(
settings,
'TEASERS_TEASERSWRAP_HEIGHTS',
[
('', _('auto')),
]
)
TEASERSWRAP_WIDTHS = getattr(
settings,
'TEASERS_TEASERSWRAP_WIDTHS',
[
('', _('auto')),
]
)
TEASER_FIELDSETS = getattr(
settings,
'TEASERS_TEASER_FIELDSETS',
[
(_('Page (auto content)'), {
'classes': ['section'],
'fields': [
'link_cms',
],
}),
(_('Content'), {
'classes': ['section'],
'fields': [
'name',
'body',
'filer_image',
'filer_icon',
],
}),
]
)
TEASER_ALLOW_CHILDREN = getattr(
settings,
'TEASERS_TEASER_ALLOW_CHILDREN',
False
)
TEASER_PLUGINS = getattr(
settings,
'TEASERS_TEASER_PLUGINS',
[]
)
TEASER_LINK_MODEL = getattr(
settings,
'TEASERS_TEASER_LINK_MODEL',
None
)
TEASER_LINK_FIELDS = getattr(
settings,
'TEASERS_TEASER_LINK_FIELDS',
None
)
TEASER_PAGE_INFO_MODELS = getattr(
settings,
'TEASERS_TEASER_PAGE_INFO_MODELS',
None
)
|
python
|
from .models_1d import *
from .models_2d import *
from .models_3d import *
def M1D(config):
if config.model_module == 'V2SD':
model = V2StochasticDepth(n=config.channels,
proba_final_layer=config.proba_final_layer,
sdrop=config.sdrop,
use_raw_wave=config.use_raw_wave,
avr_w0_path=config.avr_w0_path)
elif config.model_module == "V2S":
model = ModelIafossV2S(n=config.channels,
sdrop=config.sdrop,
use_raw_wave=config.use_raw_wave,
avr_w0_path=config.avr_w0_path)
elif config.model_module == "Model1DCNNGEM":
model = Model1DCNNGEM(initial_channnels=config.channels)
elif config.model_module == 'V2SDCBAM':
model = V2SDCBAM(n=config.channels,
proba_final_layer=config.proba_final_layer,
reduction=config.reduction,
CBAM_SG_kernel_size=config.CBAM_SG_kernel_size
)
return model
def M2D(config):
if config.model_module == 'resnet34':
model = Model_2D(encoder=config.encoder,
use_raw_wave=config.use_raw_wave,
avrSpecDir=config.inputDataFolder)
return model
def M3D(config):
if config.model_module == "M3D":
fold = config.fold
config.model_module = config.model_1D
model_1d = M1D(config)
if config.model_1D_pretrain_dir is not None and fold is not None:
path = f"{config.model_1D_pretrain_dir}/Fold_{fold}_best_model.pth"
print("Loading model from path: ", path)
checkpoint = torch.load(path, map_location='cuda:0')
model_1d.load_state_dict(checkpoint['model_state_dict'])
model_1d.use_raw_wave = False
config.model_module = config.model_2D
model_2d = M2D(config)
if config.model_2D_pretrain_dir is not None and fold is not None:
path = f"{config.model_2D_pretrain_dir}/Fold_{fold}_best_model.pth"
print("Loading model from path: ", path)
checkpoint = torch.load(path, map_location='cuda:0')
model_2d.load_state_dict(checkpoint['model_state_dict'])
model_2d.use_raw_wave = False
model = Combined1D2D(model_1d, model_2d,
emb_1d=config.model_1D_emb,
emb_2d=config.model_2D_emb,
first=config.first,
ps=config.ps,
avrSpecDir=config.inputDataFolder)
model.freeze_conv(req_grad=False)
config.model_module = "M3D"
return model
def getModel(config):
if config.model_module in ['resnet34']:
return M2D(config)
elif config.model_module in ['M3D']:
return M3D(config)
else:
return M1D(config)
|
python
|
"""Tests for the :mod:`campy.datastructures.sparsegrid` module."""
|
python
|
# -*- coding: utf-8 -*-
#
# GOM-Script-Version: 6.2.0-6
# Anja Cramer / RGZM
# Timo Homburg / i3mainz
# Laura Raddatz / i3mainz
# Informationen von atos-Projektdateien (Messdaten: *.amp / *.session)
# 2020/2021
import gom
import xml, time, os, random
import math
import datetime
## Indicates if only properties for which a URI has been defined in the JSON dict should be considered for the TTL export .
#includeonlypropswithuri=False
includeonlypropswithuri=True
# python script version
script_name = "atos-v62_3dcap_metadata.py"
script_label = "ATOS v6.2 3DCAP Metadata Script"
github_release = "0.1.2"
####################### TTL Export #############################
## Mapping of datatypes present in the JSON dictionary to datatypes present in the TTL file .
datatypes={"float":"xsd:float","double":"xsd:double","str":"xsd:string","date":"xsd:date","int":"xsd:integer","bool":"xsd:boolean","NoneType":"xsd:string", "dateTime":"xsd:dateTime", "list":"xsd:list"}
## Namespace for classes defined in the resulting ontology model .
ontologynamespace="http://objects.mainzed.org/ont#"
## Prefix name for the data namespace .
dataprefix="ex"
# variable python script
script_uri=str(dataprefix)+":"+script_name
## Prefix name for the class namespace .
ontologyprefix="giga"
toolnamespace="http://objects.mainzed.org/tool/"
toolpropnamespace="http://objects.mainzed.org/tool/atos/62/"
## Namespace for instances defined in the TTL export .
datanamespace="http://objects.mainzed.org/data/"
## Prefix name for the exif namespace .
exifnamespace="http://www.w3.org/2003/12/exif/"
## Prefix name for the exif namespace .
om="http://www.ontology-of-units-of-measure.org/resource/om-2/"
## Prefix name for the rdfs namespace .
rdfs='http://www.w3.org/2000/01/rdf-schema#'
##Prefix name for the gigamesh namespace
giganamespace="http://www.gigamesh.eu/ont#"
# Prefix name for prov-o namespace .
provnamespace = "http://www.w3.org/ns/prov#"
#atos 2016
referencepointid="reference_point_id"
globalreferencepointid="point_id"
## Provenance dictionary: Might be used to change the provenance vocabulary .
provenancedict_prov_o={
"entity":"prov:Entity",
"activity":"prov:Activity",
"agent":"prov:Agent",
"used":"prov:used",
"person":"foaf:Person"
}
## Provenance dictionary cidoc crm: Might be used to change the provenance vocabulary .
provenancedict_crmdig={
"entity":"http://www.cidoc-crm.org/cidoc-crm/D1",
"activity":"http://www.cidoc-crm.org/cidoc-crm/D11",
"agent":"prov:Agent",
"used":"prov:used",
"person":"http://www.cidoc-crm.org/cidoc-crm/D21"
}
sensorTypeToClass={
"ATOS III Rev.01": str(ontologyprefix)+":StructuredLightScanner",
"ATOS Core": str(ontologyprefix)+":StructuredLightScanner",
"ATOS II (first generation)": str(ontologyprefix)+":StructuredLightScanner",
"ATOS III Rev.02": str(ontologyprefix)+":StructuredLightScanner",
}
provenancedict=provenancedict_prov_o
## Key for the german label as present in the JSON dictionary .
germanlabel="key_deu"
## Key for the english label as present in the JSON dictionary .
englishlabel="key_eng"
artifactURI=None
## Header for the TTL export which includes all necessary namespaces.
ttlstringhead="@prefix "+str(ontologyprefix)+": <"+str(ontologynamespace)+"> .\n@prefix geo: <http://www.opengis.net/ont/geosparql#> .\n@prefix "+str(dataprefix)+": <"+str(datanamespace)+"> .\n@prefix foaf: <http://xmlns.com/foaf/0.1/> .\n@prefix prov: <http://www.w3.org/ns/prov-o/> .\n@prefix rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#> . \n@prefix om:<http://www.ontology-of-units-of-measure.org/resource/om-2/> .\n@prefix rdfs:<http://www.w3.org/2000/01/rdf-schema#> . \n@prefix owl:<http://www.w3.org/2002/07/owl#> . \n@prefix i3atos:<http://www.i3mainz.de/metadata/atos#> . \n@prefix dc:<http://purl.org/dc/terms/> .\n@prefix i3data:<http://www.i3mainz.de/data/grabbauten/> . \n@prefix i3:<http://www.i3mainz.de/ont#> . \n@prefix xsd:<http://www.w3.org/2001/XMLSchema#> . \n"
## Generates a UUID.
def generate_uuid():
random_string = ''
random_str_seq = "0123456789abcdef"
uuid_format = [8, 4, 4, 4, 12]
for n in uuid_format:
for i in range(0,n):
random_string += str(random_str_seq[random.randint(0, len(random_str_seq) - 1)])
if i != n:
random_string += '-'
return random_string[:-1]
## Turns the first character of a String to lowercase .
# @param s The string to modify .
# @return a String with the first character to lowercase
def first_upper(s):
if len(s) == 0:
return s
else:
return s[0].upper() + s[1:]
## Turns the first character of a String to lowercase .
# @param s The string to modify .
# @return a String with the first character to lowercase
def first_lower(s):
if len(s) == 0:
return s
else:
return s[0].lower() + s[1:]
## Reads a TTL file, splits its header and body and merges it to the internal TTL set .
# @param filepath The filepath of the TTL file to read .
# @param ttlstring The set of triples to append to .
def readInputTTL(filepath,ttlstring):
file1 = open(filepath, 'r')
Lines = file1.readlines()
count = 0
for line in Lines:
if line.startswith("@"):
ttlstringhead+=line+"\n"
else:
ttlstring.add(line)
file1.close()
## Extracts the ID of a previously created object to extend its provenance hierarchy .
# @param ttlstring The set of triples to append to .
# @param filterclass The class to use for filtering .
# @return a set of filtered triples
def filterLastId(ttlstring,filterclass):
concernedtriples=set()
for triple in ttlstring:
for filt in filterclass:
if filt in triple:
concernedtriples.add(triple)
if len(concernedtriples)==0:
return None
if len(concernedtriples)==1 and concernedtriples[0].contains("rdf:type"):
return concernedtriples.split(" ")[0];
## Reads an artifact description given in a text file and converts its information to TLL .
# @param filepath the path of the text file to process
# @param ttlstring the set of triples to store the result in
def readInputTXTForArtifactDescription(filepath,ttlstring):
file1 = open(filepath, 'r')
firstline = file1.readline()
if ";" in firstline:
entities=firstline.split(";")
if len(entities)>2:
if entities[0].startswith("http") and entities[2].startswith("http"):
ttlstring.add("<"+entities[0]+"> rdf:type <"+entities[2]+"> .\n")
ttlstring.add("<"+entities[0]+"> rdfs:label \""+entities[1]+" Artifact\"@en .\n")
ttlstring.add("<"+entities[2]+"> rdf:type owl:Class .\n")
ttlstring.add("<"+entities[2]+"> rdfs:subClassOf prov:Entity .\n")
elif entities[0].startswith("http") and not entities[2].startswith("http"):
ttlstring.add("<"+entities[0]+"> rdf:type "+entities[2]+" .\n")
ttlstring.add("<"+entities[0]+"> rdfs:label \""+entities[1]+" Artifact\"@en .\n")
ttlstring.add(entities[2]+" rdf:type owl:Class .\n")
ttlstring.add(entities[2]+" rdfs:subClassOf prov:Entity .\n")
elif not entities[0].startswith("http") and not entities[2].startswith("http"):
ttlstring.add("<"+datanamespace+entities[0]+"> rdf:type "+entities[2]+" .\n")
ttlstring.add("<"+datanamespace+entities[0]+"> rdfs:label \""+entities[1]+" Artifact\"@en .\n")
ttlstring.add(entities[2]+" rdf:type owl:Class .\n")
ttlstring.add(entities[2]+" rdfs:subClassOf prov:Entity .\n")
else:
ttlstring.add("<"+datanamespace+entities[0]+"> rdf:type <"+entities[2]+"> .\n")
ttlstring.add("<"+datanamespace+entities[0]+"> rdfs:label \""+entities[1]+" Artifact\"@en .\n")
ttlstring.add("<"+entities[2]+"> rdf:type owl:Class .\n")
ttlstring.add("<"+entities[2]+"> rdfs:subClassOf prov:Entity .\n")
else:
if entities[0].startswith("http"):
ttlstring.add("<"+entities[0]+"> rdf:type giga:Artifact .\n")
else:
ttlstring.add("<"+datanamespace+entities[0]+"> rdf:type giga:Artifact .\n")
if entities[0].startswith("http"):
artifactURI=entities[0]
else:
artifactURI=datanamespace+entities[0]
file1.close()
## Reads an instance present in a JSON representation and appends its TTL representation to the triple set .
# @param jsonobj The JSON object to process
# @param id The id of the current instance
# @param classs The class of the current instance
# @param labelprefix A string prefix to be prepended to a rdfs:label expression
# @param ttlstring The set of triples to append to
def exportInformationFromIndAsTTL(jsonobj,id,classs,labelprefix,ttlstring):
for info in jsonobj:
#print(classs)
#print(jsonobj[info])
if not info in jsonobj or not "value" in jsonobj[info] or jsonobj[info]["value"]==None or jsonobj[info]["value"]=="":
continue
propuri=str(ontologyprefix)+":"+first_lower(str(info)).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","").replace("[","_").replace("]","")
if "uri" in jsonobj[info]:
#print(jsonobj[info]["uri"])
if jsonobj[info]["uri"].startswith("http"):
propuri="<"+str(jsonobj[info]["uri"][0:jsonobj[info]["uri"].rfind("#")]+"#"+first_lower(jsonobj[info]["uri"][jsonobj[info]["uri"].rfind("#")+1:]))+">"
elif ":" in jsonobj[info]["uri"]:
propuri=str(jsonobj[info]["uri"][0:jsonobj[info]["uri"].rfind(":")]+":"+first_lower(jsonobj[info]["uri"][jsonobj[info]["uri"].rfind(":")+1:]))
else:
propuri=str(ontologyprefix)+":"+first_lower(str(info)).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","").replace("[","_").replace("]","")
ttlstring.add(str(propuri)+" rdfs:isDefinedBy <"+str(toolpropnamespace)+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","").replace("[","_").replace("]","")+"> .\n")
#print("Propuri: "+propuri)
#print(jsonobj[info]["value"])
#print(isinstance(jsonobj[info]["value"],list))
if isinstance(jsonobj[info]["value"],list):
for val in jsonobj[info]["value"]:
ttlstring=handleProperty(jsonobj,info,id,labelprefix,propuri,classs,ttlstring,val,str(ontologyprefix)+":"+first_upper(str(info)).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","").replace("[","_").replace("]",""))
else:
ttlstring=handleProperty(jsonobj,info,id,labelprefix,propuri,classs,ttlstring,jsonobj[info]["value"],str(ontologyprefix)+":"+first_upper(str(info)).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","").replace("[","_").replace("]",""))
#print ("ttlstring")
return ttlstring
## Processes a given property depending on its type .
def handleProperty(jsonobj,info,id,labelprefix,propuri,classs,ttlstring,inputvalue,propclass):
if "unit" in jsonobj[info] and jsonobj[info]["unit"]!=None and jsonobj[info]["unit"]!="":
ttlstring.add(str(propuri)+" rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(propuri)+" rdfs:domain "+str(classs)+" .\n")
ttlstring.add(str(propuri)+" rdfs:range om:Measure .\n")
if englishlabel in jsonobj[info] and jsonobj[info][englishlabel]!=None and str(jsonobj[info][englishlabel])!="" and str(jsonobj[info][englishlabel]).strip()!="...":
ttlstring.add(str(propuri)+" rdfs:label \""+str(jsonobj[info][englishlabel]).replace("\"","'")+"\"@en .\n")
if labelprefix=="":
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+" rdfs:label \""+str(jsonobj[info][englishlabel]).replace("\"","'")+" \"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+"_value rdfs:label \""+str(jsonobj[info][englishlabel]).replace("\"","'")+" Measurement Value \"@en .\n")
else:
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+" rdfs:label \""+str(jsonobj[info][englishlabel]).replace("\"","'")+" ("+str(labelprefix)+")\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+"_value rdfs:label \""+str(jsonobj[info][englishlabel]).replace("\"","'")+" Measurement Value ("+str(labelprefix)+")\"@en .\n")
if germanlabel in jsonobj[info] and jsonobj[info][germanlabel]!=None and str(jsonobj[info][germanlabel])!="" and str(jsonobj[info][germanlabel])!="...":
ttlstring.add(str(propuri)+" rdfs:label \""+str(jsonobj[info][germanlabel]).replace("\"","'")+"\"@de .\n")
if labelprefix=="":
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+" rdfs:label \""+str(jsonobj[info][germanlabel]).replace("\"","'")+" \"@de .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+"_value rdfs:label \""+str(jsonobj[info][germanlabel]).replace("\"","'")+" Messwert \"@de .\n")
else:
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+" rdfs:label \""+str(jsonobj[info][germanlabel]).replace("\"","'")+" ("+str(labelprefix)+")\"@de .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+"_value rdfs:label \""+str(jsonobj[info][germanlabel]).replace("\"","'")+" Messwert ("+str(labelprefix)+")\"@de .\n")
if "measurementclass" in jsonobj[info] and jsonobj[info]["measurementclass"]!=None and str(jsonobj[info]["measurementclass"])!="":
if ":" in jsonobj[info]["measurementclass"]:
if jsonobj[info]["measurementclass"].startswith("http"):
ttlstring.add("<"+jsonobj[info]["measurementclass"].replace(" ","")+"> rdf:type owl:Class .\n")
ttlstring.add("<"+jsonobj[info]["measurementclass"].replace(" ","")+"> rdfs:label \""+jsonobj[info]["measurementclass"].replace("\"","'")+"\"@en .\n")
ttlstring.add("<"+jsonobj[info]["measurementclass"].replace(" ","")+"> rdfs:subClassOf om:Quantity .\n")
else:
ttlstring.add(str(ontologyprefix)+":"+jsonobj[info]["measurementclass"].replace(" ","")+" rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":"+jsonobj[info]["measurementclass"].replace(" ","")+" rdfs:label \""+jsonobj[info]["measurementclass"].replace("\"","'")+"\"@en .\n")
ttlstring.add(str(ontologyprefix)+":"+jsonobj[info]["measurementclass"].replace(" ","")+" rdfs:subClassOf om:Quantity .\n")
else:
ttlstring.add("<"+jsonobj[info]["measurementclass"].replace(" ","")+"> rdf:type owl:Class .\n")
ttlstring.add("<"+jsonobj[info]["measurementclass"].replace(" ","")+"> rdfs:label \""+jsonobj[info]["measurementclass"].replace("\"","'")+"\"@en .\n")
ttlstring.add("<"+jsonobj[info]["measurementclass"].replace(" ","")+"> rdfs:subClassOf om:Quantity .\n")
else:
ttlstring.add(propclass+" rdf:type owl:Class .\n")
ttlstring.add(propclass+" rdfs:label \""+propclass.replace("_"," ").replace(ontologyprefix+":","")+"\"@en .\n")
ttlstring.add(propclass+" rdfs:subClassOf om:Quantity .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+" rdf:type "+propclass+" .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+"_value rdf:type om:Measure .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+" om:hasValue "+str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+"_value .\n")
#print(jsonobj[info]["unit"])
if jsonobj[info]["unit"].startswith("http"):
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+"_value om:hasUnit <"+str(jsonobj[info]["unit"])+"> .\n")
ttlstring.add("<"+str(jsonobj[info]["unit"])+"> rdf:type om:UnitOfMeasure .\n")
ttlstring.add("<"+str(jsonobj[info]["unit"])+"> rdfs:label \""+jsonobj[info]["unit"].replace("\"","'")+"\"@en .\n")
elif ":" in jsonobj[info]["unit"]:
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+"_value om:hasUnit "+str(jsonobj[info]["unit"].replace(" ",""))+" .\n")
ttlstring.add(str(jsonobj[info]["unit"].replace(" ",""))+" rdf:type om:UnitOfMeasure .\n")
ttlstring.add(str(jsonobj[info]["unit"].replace(" ",""))+" rdfs:label \""+jsonobj[info]["unit"].replace("\"","'")+"\" .\n")
else:
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+"_value om:hasUnit \""+str(jsonobj[info]["unit"])+"\" .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+"_value om:hasNumericalValue \""+str(inputvalue).replace("\\","\\\\")+"\"^^"+str(datatypes[jsonobj[info]["value_type"]])+" .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+" "+str(propuri)+" "+str(dataprefix)+":"+str(id)+"_"+str(info).replace(" ","").replace("[","_").replace("]","").replace("(","").replace(")","")+" .\n")
elif "value_type" in jsonobj[info] and jsonobj[info]["value_type"]=="enumeration":
ttlstring.add(str(propuri)+" rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(propuri)+" rdfs:domain "+str(classs)+" .\n")
if "measurementclass" in jsonobj[info] and jsonobj[info]["measurementclass"]!=None and str(jsonobj[info]["measurementclass"])!="":
if ":" in jsonobj[info]["measurementclass"]:
ttlstring.add(str(ontologyprefix)+":"+jsonobj[info]["measurementclass"].replace(" ","")+" rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":"+jsonobj[info]["measurementclass"].replace(" ","")+" rdfs:label \""+jsonobj[info]["measurementclass"].replace("\"","'")+"\"@en .\n")
ttlstring.add(str(ontologyprefix)+":"+jsonobj[info]["measurementclass"].replace(" ","")+" rdfs:subClassOf "+str(ontologyprefix)+":Enumeration .\n")
ttlstring.add(str(ontologyprefix)+":"+jsonobj[info]["measurementclass"].replace(" ","")+"_"+inputvalue+" rdf:type "+str(ontologyprefix)+":"+jsonobj[info]["measurementclass"].replace(" ","")+" .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+" "+str(propuri)+" "+str(ontologyprefix)+":"+jsonobj[info]["measurementclass"]+"_"+str(inputvalue)+" .\n")
else:
ttlstring.add("<"+jsonobj[info]["measurementclass"].replace(" ","")+"> rdf:type owl:Class .\n")
ttlstring.add("<"+jsonobj[info]["measurementclass"].replace(" ","")+"> rdfs:label \""+jsonobj[info]["measurementclass"].replace("\"","'")++"\"@en .\n")
ttlstring.add("<"+jsonobj[info]["measurementclass"].replace(" ","")+"> rdfs:subClassOf "+str(ontologyprefix)+":Enumeration .\n")
ttlstring.add("<"+jsonobj[info]["measurementclass"].replace(" ","")+"_"+inputvalue+"> rdf:type <"+jsonobj[info]["measurementclass"].replace(" ","")+"> .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+" "+str(propuri)+" <"+jsonobj[info]["measurementclass"].replace(" ","")+"_"+str(inputvalue).replace(" ","")+"> .\n")
else:
classuri=str(ontologyprefix)+":"+str(propuri).replace(str(ontologyprefix),"").capitalize()
ttlstring.add(classuri+" rdf:type owl:Class .\n")
ttlstring.add(classuri+" rdfs:subClassOf "+str(ontologyprefix)+":Enumeration .\n")
ttlstring.add(classuri+"_"+str(inputvalue)+" rdf:type "+classuri+" .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+" "+str(propuri)+" "+classuri+"_"+str(inputvalue)+" .\n")
else:
if propuri=="http://www.w3.org/2000/01/rdf-schema#label" or propuri=="rdfs:label" or propuri=="http://www.w3.org/2000/01/rdf-schema#comment" or propuri=="rdfs:comment":
ttlstring.add(str(propuri)+" rdf:type owl:AnnotationProperty .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+" "+str(propuri)+" \""+str(inputvalue)+"\" .\n")
else:
ttlstring.add(str(propuri)+" rdf:type owl:DatatypeProperty .\n")
ttlstring.add(str(propuri)+" rdfs:domain "+str(classs)+" .\n")
if englishlabel in jsonobj[info] and jsonobj[info][englishlabel]!=None and str(jsonobj[info][englishlabel])!="" and str(jsonobj[info][englishlabel])!="...":
ttlstring.add(str(propuri)+" rdfs:label \""+str(jsonobj[info][englishlabel]).replace("\"","'")+"\"@en .\n")
if germanlabel in jsonobj[info] and jsonobj[info][germanlabel]!=None and str(jsonobj[info][germanlabel])!="" and str(jsonobj[info][germanlabel])!="...":
ttlstring.add(str(propuri)+" rdfs:label \""+str(jsonobj[info][germanlabel]).replace("\"","'")+"\"@de .\n")
ttlstring.add(str(propuri)+" rdfs:range "+str(datatypes[jsonobj[info]["value_type"]])+" .\n")
ttlstring.add(str(dataprefix)+":"+str(id)+" "+str(propuri)+" \""+str(inputvalue).replace("\\","\\\\")+"\"^^"+str(datatypes[jsonobj[info]["value_type"]])+" .\n")
#print("handled Property")
return ttlstring
## Converts a preformatted dictionary to a set of triples .
# @param dict the dictionary to export from
# @param measurementToExport indicates whether to export measurements
def exportToTTL(dict,measurementToExport,ttlstring):
###print ("drin in exportToTTL")
projectid=str(generate_uuid())
userid=str(generate_uuid())
projlabelkey="prj_n"
projects="projects"
projkey="measurement_series"
userkey="user_keywords"
mesheskey="meshes"
meshprocessingkey="processing"
calibkey="calibration"
sensorskey="sensors"
sensorinformationkey="calibration"
meshinfokey="mesh_information"
globalrefpointkey="global_referencepoints"
refpointkey="referencepoints"
globalrefpointinfo="global_referencepoints_information"
projinfokey="project_information"
measurmentserieskey = "measurement_series_information"
measurementskey="measurements"
measurementinformation="measurement_properties"
messungkey="messung"
applicationkey="applications"
capturingdevice="capturing_device"
mssetup="measurement_setup"
calsetup="cal_setup"
calobject="cal_object"
calproperties="cal_properties"
mscheck="measurement_check"
softwareid="ATOS2016"
labelprefix=""
projectname=""
if ttlstring==None:
ttlstring=set()
ttlstring.add(str(ontologyprefix)+":Mesh rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":Mesh rdfs:subClassOf geo:Geometry .\n")
ttlstring.add(str(ontologyprefix)+":Mesh rdfs:label \"Mesh\"@en .\n")
ttlstring.add(str(ontologyprefix)+":IntermediateMeshResult rdfs:subClassOf "+str(ontologyprefix)+":Mesh .\n")
ttlstring.add(str(ontologyprefix)+":IntermediateMeshResult rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":IntermediateMeshResult rdfs:label \"Intermediate Mesh Result\"@en .\n")
ttlstring.add("rdfs:label rdf:type owl:AnnotationProperty .\n")
ttlstring.add(str(ontologyprefix)+":Tool rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":Tool rdfs:label \"Tool\"@en .\n")
ttlstring.add(str(ontologyprefix)+":Tool rdfs:subClassOf "+provenancedict.get("entity")+" .\n")
ttlstring.add(str(ontologyprefix)+":CapturingDevice rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":CapturingDevice rdfs:label \"capturing device\"@en .\n")
ttlstring.add(str(ontologyprefix)+":CapturingDevice rdfs:subClassOf "+str(ontologyprefix)+":Tool .\n")
ttlstring.add(str(ontologyprefix)+":Scanner rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":Scanner rdfs:label \"scanner\"@en .\n")
ttlstring.add(str(ontologyprefix)+":Scanner rdfs:subClassOf "+str(ontologyprefix)+":CapturingDevice .\n")
ttlstring.add(str(ontologyprefix)+":Sensor rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":Sensor rdfs:label \"Sensor\"@en .\n")
ttlstring.add(str(ontologyprefix)+":Software rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":Software rdfs:label \"software\"@en .\n")
ttlstring.add(str(ontologyprefix)+":Software rdfs:subClassOf "+provenancedict.get("entity")+" .\n")
ttlstring.add(str(ontologyprefix)+":Verification rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":Verification rdfs:label \"verification\"@en .\n")
ttlstring.add(str(ontologyprefix)+":Verification rdfs:subClassOf "+provenancedict.get("entity")+" .\n")
ttlstring.add(str(ontologyprefix)+":Setup rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":Setup rdfs:label \"setup\"@en .\n")
ttlstring.add(str(ontologyprefix)+":Setup rdfs:subClassOf "+provenancedict.get("entity")+" .\n")
ttlstring.add(str(ontologyprefix)+":StructuredLightScanner rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":StructuredLightScanner rdfs:label \"structured light scanner\"@en .\n")
ttlstring.add(str(ontologyprefix)+":StructuredLightScanner rdfs:subClassOf "+str(ontologyprefix)+":Scanner .\n")
ttlstring.add(str(ontologyprefix)+":CalibrationObject rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":CalibrationObject rdfs:label \"calibration object\"@en .\n")
ttlstring.add(str(ontologyprefix)+":CalibrationObject rdfs:subClassOf "+str(ontologyprefix)+":Tool .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementSetup rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementSetup rdfs:label \"measurement setup\"@en .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementSetup rdfs:subClassOf "+str(ontologyprefix)+":Setup .\n")
ttlstring.add(str(ontologyprefix)+":CalibrationSetup rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":CalibrationSetup rdfs:label \"calibration setup\"@en .\n")
ttlstring.add(str(ontologyprefix)+":CalibrationSetup rdfs:subClassOf "+str(ontologyprefix)+":Setup .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementCheck rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementCheck rdfs:label \"measurement check\"@en .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementCheck rdfs:subClassOf "+str(ontologyprefix)+":Verification .\n")
ttlstring.add(str(ontologyprefix)+":Algorithm rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":Algorithm rdfs:label \"Algorithm\"@en .\n")
ttlstring.add(str(ontologyprefix)+":Algorithm rdfs:subClassOf "+provenancedict.get("agent")+" .\n")
ttlstring.add(provenancedict.get("entity")+" rdf:type owl:Class .\n")
ttlstring.add(provenancedict.get("person")+" rdf:type owl:Class .\n")
ttlstring.add(provenancedict.get("person")+" rdfs:label \"Person\".\n")
ttlstring.add(provenancedict.get("person")+" rdfs:subClassOf "+provenancedict.get("agent")+" .\n")
ttlstring.add(provenancedict.get("entity")+" rdfs:subClassOf owl:Thing .\n")
ttlstring.add("owl:Thing rdf:type owl:Class .\n")
ttlstring.add("owl:Thing rdfs:label \"Thing\" .\n")
ttlstring.add(provenancedict.get("entity")+" rdfs:label \"Entity\".\n")
ttlstring.add(provenancedict.get("agent")+" rdf:type owl:Class .\n")
ttlstring.add(provenancedict.get("agent")+" rdfs:label \"Agent\".\n")
ttlstring.add(provenancedict.get("agent")+" rdfs:subClassOf owl:Thing .\n")
ttlstring.add(provenancedict.get("activity")+" rdf:type owl:Class .\n")
ttlstring.add(provenancedict.get("activity")+" rdfs:label \"Activity\".\n")
ttlstring.add(provenancedict.get("activity")+" rdfs:subClassOf owl:Thing .\n")
ttlstring.add("dc:creator rdf:type owl:ObjectProperty .\n")
ttlstring.add("dc:creator rdfs:domain "+str(ontologyprefix)+":Mesh .\n")
ttlstring.add("dc:creator rdfs:range foaf:Person .\n")
ttlstring.add("prov:wasDerivedFrom rdf:type owl:ObjectProperty .\n")
ttlstring.add("prov:wasDerivedFrom rdfs:range "+provenancedict.get("entity")+" .\n")
ttlstring.add("prov:wasDerivedFrom rdfs:domain "+provenancedict.get("entity")+" .\n")
ttlstring.add("prov:wasInformedBy rdf:type owl:ObjectProperty .\n")
ttlstring.add("prov:wasInformedBy rdfs:range "+provenancedict.get("activity")+" .\n")
ttlstring.add("prov:wasInformedBy rdfs:domain "+provenancedict.get("activity")+" .\n")
ttlstring.add("prov:wasInvalidatedBy rdf:type owl:ObjectProperty .\n")
ttlstring.add("prov:wasInvalidatedBy rdfs:range "+provenancedict.get("activity")+" .\n")
ttlstring.add("prov:wasInvalidatedBy rdfs:domain "+provenancedict.get("activity")+" .\n")
ttlstring.add("prov:wasGeneratedBy rdf:type owl:ObjectProperty .\n")
ttlstring.add("prov:wasGeneratedBy rdfs:range "+provenancedict.get("activity")+" .\n")
ttlstring.add("prov:wasGeneratedBy rdfs:domain "+provenancedict.get("entity")+" .\n")
ttlstring.add("prov:actedOnBehalfOf rdf:type owl:ObjectProperty .\n")
ttlstring.add("prov:actedOnBehalfOf rdfs:range "+provenancedict.get("agent")+" .\n")
ttlstring.add("prov:actedOnBehalfOf rdfs:domain "+provenancedict.get("agent")+" .\n")
ttlstring.add("prov:wasAttributedTo rdf:type owl:ObjectProperty .\n")
ttlstring.add("prov:wasAttributedTo rdfs:range "+provenancedict.get("agent")+" .\n")
ttlstring.add("prov:wasAttributedTo rdfs:domain "+provenancedict.get("entity")+" .\n")
ttlstring.add("prov:used rdf:type owl:ObjectProperty .\n")
ttlstring.add("prov:used rdfs:range "+provenancedict.get("entity")+" .\n")
ttlstring.add("prov:used rdfs:domain "+provenancedict.get("activity")+" .\n")
ttlstring.add("prov:wasAssociatedWith rdf:type owl:ObjectProperty .\n")
ttlstring.add("prov:wasAssociatedWith rdfs:range "+provenancedict.get("agent")+" .\n")
ttlstring.add("prov:wasAssociatedWith rdfs:domain "+provenancedict.get("entity")+" .\n")
ttlstring.add("om:hasNumericalValue rdf:type owl:DatatypeProperty .\n")
ttlstring.add("om:hasNumericalValue rdfs:range xsd:integer .\n")
ttlstring.add("om:hasNumericalValue rdfs:domain om:Measure .\n")
ttlstring.add("om:hasNumericalValue rdfs:label \"has numerical value\"@en .\n")
ttlstring.add("om:hasValue rdf:type owl:ObjectProperty .\n")
ttlstring.add("om:hasValue rdfs:label \"has value\"@en .\n")
ttlstring.add("om:hasUnit rdf:type owl:ObjectProperty .\n")
ttlstring.add("om:hasUnit rdfs:label \"has unit\"@en .\n")
ttlstring.add("om:hasUnit rdfs:domain om:Measure .\n")
ttlstring.add("om:hasUnit rdfs:range om:UnitOfMeasure .\n")
ttlstring.add("geo:asWKT rdf:type owl:DatatypeProperty .\n")
ttlstring.add("geo:asWKT rdfs:label \"asWKT\"@en .\n")
ttlstring.add("om:Quantity rdf:type owl:Class .\n")
ttlstring.add("om:Quantity rdfs:label \"Quantity\".\n")
ttlstring.add("om:Quantity rdfs:subClassOf owl:Thing .\n")
ttlstring.add("om:Measure rdf:type owl:Class .\n")
ttlstring.add("om:Measure rdfs:label \"Measure\".\n")
ttlstring.add("om:Measure rdfs:subClassOf owl:Thing .\n")
ttlstring.add("om:UnitOfMeasure rdf:type owl:Class .\n")
ttlstring.add("om:UnitOfMeasure rdfs:label \"Unit Of Measure\".\n")
ttlstring.add("om:UnitOfMeasure rdfs:subClassOf owl:Thing .\n")
ttlstring.add(str(ontologyprefix)+":calibration rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":calibration rdfs:range "+str(ontologyprefix)+":Calibration .\n")
ttlstring.add(str(ontologyprefix)+":calibration rdfs:domain "+str(ontologyprefix)+":Measurement .\n")
ttlstring.add(str(ontologyprefix)+":sensor rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":sensor rdfs:range "+str(ontologyprefix)+":Sensor .\n")
ttlstring.add(str(ontologyprefix)+":sensor rdfs:domain "+str(ontologyprefix)+":Measurement .\n")
ttlstring.add(str(ontologyprefix)+":calibrationsetup rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":calibrationsetup rdfs:range "+str(ontologyprefix)+":Setup .\n")
ttlstring.add(str(ontologyprefix)+":calibrationsetup rdfs:domain "+str(ontologyprefix)+":Calibration .\n")
ttlstring.add(str(ontologyprefix)+":calibrationobject rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":calibrationobject rdfs:range "+str(ontologyprefix)+":CalibrationObject .\n")
ttlstring.add(str(ontologyprefix)+":calibrationobject rdfs:domain "+str(ontologyprefix)+":Calibration .\n")
ttlstring.add(str(ontologyprefix)+":capturingdevice rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":capturingdevice rdfs:range "+str(ontologyprefix)+":Tool .\n")
ttlstring.add(str(ontologyprefix)+":capturingdevice rdfs:domain "+str(ontologyprefix)+":Measurement .\n")
ttlstring.add(str(ontologyprefix)+":globalReferencePoint rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":globalReferencePoint rdfs:range "+str(ontologyprefix)+":GRP .\n")
ttlstring.add(str(ontologyprefix)+":globalReferencePoint rdfs:domain "+str(ontologyprefix)+":MeasurementSeries .\n")
ttlstring.add(str(ontologyprefix)+":referencePoint rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":referencePoint rdfs:range "+str(ontologyprefix)+":ReferencePoint .\n")
ttlstring.add(str(ontologyprefix)+":referencePoint rdfs:domain "+str(ontologyprefix)+":Measurement .\n")
ttlstring.add(str(ontologyprefix)+":partOf rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":partOf rdfs:range "+str(ontologyprefix)+":MeasurementCheck.\n")
ttlstring.add(str(ontologyprefix)+":partOf rdfs:range "+str(ontologyprefix)+":MeasurementSetup.\n")
ttlstring.add(str(ontologyprefix)+":partOf rdfs:domain "+str(ontologyprefix)+":Measurement .\n")
ttlstring.add(str(ontologyprefix)+":usedBy rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":usedBy rdfs:range "+str(ontologyprefix)+":Measurement .\n")
ttlstring.add(str(ontologyprefix)+":usedBy rdfs:domain "+str(ontologyprefix)+":CapturingDevice .\n")
ttlstring.add(str(ontologyprefix)+":setup rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":setup rdfs:range "+str(ontologyprefix)+":MeasurementSetup .\n")
ttlstring.add(str(ontologyprefix)+":setup rdfs:domain "+str(ontologyprefix)+":Measurement .\n")
ttlstring.add(str(ontologyprefix)+":verification rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":verification rdfs:range "+str(ontologyprefix)+":MeasurementCheck .\n")
ttlstring.add(str(ontologyprefix)+":verification rdfs:domain "+str(ontologyprefix)+":Measurement .\n")
ttlstring.add(str(ontologyprefix)+":measurementSeries rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":measurementSeries rdfs:range "+str(ontologyprefix)+":MeasurementSeries .\n")
ttlstring.add(str(ontologyprefix)+":measurementSeries rdfs:domain "+str(ontologyprefix)+":MeasurementProject .\n")
ttlstring.add(str(ontologyprefix)+":measurement rdf:type owl:ObjectProperty .\n")
ttlstring.add(str(ontologyprefix)+":measurement rdfs:range "+str(ontologyprefix)+":Measurement .\n")
ttlstring.add(str(ontologyprefix)+":measurement rdfs:domain "+str(ontologyprefix)+":MeasurementSeries .\n")
ttlstring.add(str(ontologyprefix)+":Calibration rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":CalibrationObject rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":Measurement rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":Measurement rdfs:label \"Measurement\".\n")
ttlstring.add(str(ontologyprefix)+":Measurement rdfs:subClassOf prov:Entity .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementSeries rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementSeries rdfs:label \"Measurement Series\".\n")
ttlstring.add(str(ontologyprefix)+":MeasurementSeries rdfs:subClassOf prov:Entity .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementProjectMetadata rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementProjectMetadata rdfs:label \"Measurement Project Metadata\".\n")
ttlstring.add(str(ontologyprefix)+":MeasurementProjectMetadata rdfs:subClassOf prov:Entity .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementProject rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":MeasurementProject rdfs:label \"Measurement Project\".\n")
ttlstring.add(str(ontologyprefix)+":MeasurementProject rdfs:subClassOf prov:Entity .\n")
ttlstring.add(str(ontologyprefix)+":ReferencePoint rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":ReferencePoint rdfs:label \"reference point\".\n")
ttlstring.add(str(ontologyprefix)+":ReferencePoint rdfs:subClassOf geo:Point . geo:Point rdfs:subClassOf geo:Geometry . geo:Geometry rdfs:subClassOf prov:Entity .\n")
ttlstring.add(str(ontologyprefix)+":GRP rdf:type owl:Class .\n")
ttlstring.add(str(ontologyprefix)+":GRP rdfs:label \"global reference point\".\n")
ttlstring.add(str(ontologyprefix)+":GRP rdfs:subClassOf "+str(ontologyprefix)+":ReferencePoint .\n")
ttlstring.add(str(ontologyprefix)+":Calibration rdfs:subClassOf prov:Entity .\n")
ttlstring.add(str(ontologyprefix)+":Calibration rdfs:label \"Calibration\".\n")
ttlstring.add(str(dataprefix)+":metadata_calculation_activity rdf:type "+provenancedict.get("activity")+" . \n")
ttlstring.add(str(ontologyprefix)+":GRP_calculation_algorithm rdf:type "+str(ontologyprefix)+":Algorithm . \n")
for pro in dict[projects]:
#print(projkey)
#print (pro[projinfokey])
if projinfokey in pro:
if "prj_n" in pro[projinfokey]:
labelprefix=pro[projinfokey]["prj_n"]["value"]
projectname=pro[projinfokey]["prj_n"]["value"]
ttlstring.add(str(dataprefix)+":"+str(projectid)+" rdf:type "+str(ontologyprefix)+":MeasurementProject .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_metadata rdf:type "+str(ontologyprefix)+":MeasurementProjectMetadata .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_metadata prov:wasGeneratedBy "+str(dataprefix)+":metadata_calculation_activity .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_metadata prov:wasAttributedTo "+str(dataprefix)+":"+script_name+".\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+" "+str(dataprefix)+":metadata "+str(dataprefix)+":"+str(projectid)+"_metadata .\n")
#print(pro[projinfokey])
ttlstring=exportInformationFromIndAsTTL(pro[projinfokey],projectid,str(ontologyprefix)+":MeasurementProject",labelprefix,ttlstring)
ttlstring.add(str(dataprefix)+":"+str(userid)+" rdf:type foaf:Person, "+provenancedict.get("agent")+" .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+" dc:creator "+str(dataprefix)+":"+str(userid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(userid)+" rdfs:label \"Creator of "+str(labelprefix)+"\" .\n")
#print(pro[applicationkey])
if applicationkey in pro:
for appl in pro[applicationkey]:
if "script_name" in appl and "value" in appl["script_name"] and appl["script_name"]["value"]==script_name:
ttlstring.add(str(dataprefix)+":"+script_name+" rdf:type "+str(ontologyprefix)+":Software .\n")
ttlstring.add(str(dataprefix)+":"+script_name+" rdfs:label \""+str(script_label)+"\"@en .\n")
ttlstring=exportInformationFromIndAsTTL(appl,script_name,str(ontologyprefix)+":Software",labelprefix,ttlstring)
else:
if "PROJECT.TYPE" in appl and "PROJECT.VERSION" in appl:
softwareid=str(appl["PROJECT.TYPE"]["value"]).strip().replace(" ","_")+"_"+str(appl["PROJECT.VERSION"]["value"]).strip().replace(" ","_").replace(".","_").replace("-","_")
elif "application_name" in appl and "application_build_information.version" in appl:
softwareid=str(appl["application_name"]["value"]).strip().replace(" ","_")+"_"+str(appl["application_build_information.version"]["value"]).strip().replace(" ","_").replace(".","_").replace("-","_")
else:
softwareid="ATOS2016"
ttlstring.add(str(dataprefix)+":"+softwareid+" rdf:type "+str(ontologyprefix)+":Software .\n")
ttlstring.add(str(dataprefix)+":"+softwareid+" rdfs:label \""+str(softwareid).replace("_"," ")+"\"@en .\n")
ttlstring=exportInformationFromIndAsTTL(appl,softwareid,str(ontologyprefix)+":Software",labelprefix,ttlstring)
if projkey in pro:
for msindex, project in enumerate(pro[projkey]):
#print(project)
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+" rdf:type "+str(ontologyprefix)+":MeasurementSeries .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+" prov:wasDerivedFrom "+str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+" "+str(ontologyprefix)+":measurementSeries "+str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+" rdfs:label \"Measurement Series "+str(msindex)+" for "+str(labelprefix)+"\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+" prov:wasAttributedTo "+str(dataprefix)+":"+str(userid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+" prov:wasGeneratedBy "+str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_activity .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_activity prov:wasAssociatedWith "+str(dataprefix)+":"+str(userid)+" .\n")
if artifactURI!=None:
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_activity prov:used "+artifactURI+" .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_activity rdf:type prov:Activity .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_activity rdfs:label \"MS "+str(msindex)+" Activity ("+str(labelprefix)+")\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_activity rdfs:label \" Messreihe "+str(msindex)+" ("+str(labelprefix)+")\"@de .\n")
if measurmentserieskey in project:
#print(project[measurmentserieskey])
ttlstring=exportInformationFromIndAsTTL(project[measurmentserieskey],str(projectid)+"_ms_"+str(msindex),str(ontologyprefix)+":MeasurementSeries",labelprefix,ttlstring)
if measurementToExport==None:
#print ("measurementToExport==None:")
if projkey in project:
#print (project[projinfokey])
if projinfokey in project:
if "prj_n" in project[projinfokey]:
labelprefix=project[projinfokey]["prj_n"]["value"]+"Measurement Series "+str(msindex)
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+" rdf:type "+str(ontologyprefix)+":MeasurementSeries, prov:Entity .\n")
#print(project[projinfokey])
ttlstring=exportInformationFromIndAsTTL(project[measurmentserieskey],projectid+"_ms_"+str(msindex),str(ontologyprefix)+":MeasurementSeries",labelprefix,ttlstring)
ttlstring.add(str(dataprefix)+":"+str(userid)+" rdf:type foaf:Person, "+provenancedict.get("agent")+" .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+" dc:creator "+str(dataprefix)+":"+str(userid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(userid)+" rdfs:label \"Creator of "+str(labelprefix)+"\" .\n")
#print(ttlstring)
if userkey in project:
ttlstring=exportInformationFromIndAsTTL(project[userkey],userid,"foaf:Person",labelprefix,ttlstring)
#print(ttlstring)
#print(project[globalrefpointkey])
if globalrefpointkey in project and refpointkey in project[globalrefpointkey]:
for index, grp in enumerate(project[globalrefpointkey][refpointkey]):
if "point_id" in grp:
index = grp["point_id"]["value"]
#print (index)
elif "r_id" in grp:
index = grp["r_id"]["value"]
#print (index)
grpid=str(projectid)+"_ms_"+str(msindex)+"_grp"+str(index)
#print (grpid)
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+" "+str(ontologyprefix)+":globalReferencePoint "+str(dataprefix)+":"+str(grpid)+" . \n")
ttlstring.add(str(dataprefix)+":"+str(grpid)+" rdf:type "+str(ontologyprefix)+":GRP .\n")
ttlstring.add(str(dataprefix)+":"+str(grpid)+" rdfs:label \"GRP"+str(index)+" ( Measurement Series "+str(msindex)+")\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(grpid)+" rdfs:label \"GRP"+str(index)+" ( Messreihe "+str(msindex)+")\"@de .\n")
ttlstring.add(str(dataprefix)+":"+str(grpid)+" prov:wasGeneratedBy "+str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_grp_calculation_activity .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_grp_calculation_activity prov:wasAssociatedWith "+str(ontologyprefix)+":GRP_calculation_algorithm. \n")
ttlstring.add(str(ontologyprefix)+":GRP_calculation_algorithm prov:actedOnBehalfOf "+str(dataprefix)+":"+str(userid)+" . \n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_grp_calculation_activity rdf:type "+provenancedict.get("activity")+" .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_grp_calculation_activity rdfs:label \"GRP Calculation Activity\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_grp_calculation_activity rdfs:label \"GRP Berechnung\"@de .\n")
#print("265:"+str(project[globalrefpointkey]))
#print("266: "+str(grp))
ttlstring=exportInformationFromIndAsTTL(grp,grpid,str(ontologyprefix)+":GRP",labelprefix+" MS "+str(msindex)+" GRP"+str(index),ttlstring)
if "r_x" in grp and "r_y" in grp and "r_z" in grp:
ttlstring.add(str(dataprefix)+":"+str(grpid)+" geo:asWKT \"POINT("+str(grp["r_x"]["value"])+" "+str(grp["r_y"]["value"])+" "+str(grp["r_z"]["value"])+")\"^^geo:wktLiteral .\n")
elif "coordinate.x" in grp and "coordinate.y" in grp and "coordinate.z" in grp:
ttlstring.add(str(dataprefix)+":"+str(grpid)+" geo:asWKT \"POINT("+str(grp["coordinate.x"]["value"])+" "+str(grp["coordinate.y"]["value"])+" "+str(grp["coordinate.z"]["value"])+")\"^^geo:wktLiteral .\n")
if sensorskey in project:
for seindex, sensor in enumerate(project[sensorskey]):
sensorid=str(projectid)+"_sensor_"+str(seindex)
calibid=str(sensorid)+"_calibration"
mscheckid=str(sensorid)+"_mscheck"
capturedevid=str(sensorid)+"_capturingdevice"
ttlstring.add(str(dataprefix)+":"+str(sensorid)+" rdf:type "+str(ontologyprefix)+":Sensor, "+provenancedict.get("entity")+" .\n")
ttlstring.add(str(dataprefix)+":"+str(sensorid)+" rdfs:label \"Sensor "+str(seindex)+" from "+str(projectname)+"\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(sensorid)+" prov:wasDerivedFrom "+str(dataprefix)+":"+str(projectid)+" .\n")
if capturingdevice in sensor:
if "sensor_type" in sensor[capturingdevice] and sensor[capturingdevice]["sensor_type"]["value"] in sensorTypeToClass:
ttlstring.add(str(dataprefix)+":"+str(capturedevid)+" rdf:type "+str(sensorTypeToClass[sensor[capturingdevice]["sensor_type"]["value"]])+" .\n")
else:
ttlstring.add(str(dataprefix)+":"+str(capturedevid)+" rdf:type "+str(ontologyprefix)+":CapturingDevice .\n")
ttlstring.add(str(dataprefix)+":"+str(sensorid)+" "+str(ontologyprefix)+":capturingdevice "+str(dataprefix)+":"+str(capturedevid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(mscheckid)+" "+str(ontologyprefix)+":partOf "+str(dataprefix)+":"+str(sensorid)+"_activity .\n")
ttlstring.add(str(dataprefix)+":"+str(capturedevid)+" rdfs:label \""+labelprefix+"Sensor "+str(seindex)+" Capturing Device\"@en .\n")
ttlstring=exportInformationFromIndAsTTL(sensor[capturingdevice],capturedevid,str(ontologyprefix)+":CapturingDevice",labelprefix+" Sensor "+str(seindex)+" Caturing Device",ttlstring)
if calibkey in sensor:
ttlstring.add(str(dataprefix)+":"+str(sensorid)+" "+str(ontologyprefix)+":calibration "+str(dataprefix)+":"+str(calibid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(calibid)+" rdfs:label \"Sensor "+str(seindex)+" Calibration\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(calibid)+" rdf:type "+str(ontologyprefix)+":Calibration .\n")
ttlstring.add(str(dataprefix)+":"+str(calibid)+"_activity rdf:type prov:Activity .\n")
if labelprefix=="":
ttlstring.add(str(dataprefix)+":"+str(calibid)+"_activity rdfs:label \"MS "+str(seindex)+" Measurement "+str(msindex)+" Calibration Activity \"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(calibid)+"_activity rdfs:label \"Sensor "+str(seindex)+" Messvorgang "+str(msindex)+" Kalibrierung \"@de .\n")
else:
ttlstring.add(str(dataprefix)+":"+str(calibid)+"_activity rdfs:label \"MS "+str(seindex)+" Measurement "+str(msindex)+" Calibration Activity ("+str(labelprefix)+")\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(calibid)+"_activity rdfs:label \"Sensor "+str(seindex)+" Messvorgang "+str(msindex)+" Kalibrierung ("+str(labelprefix)+")\"@de .\n")
ttlstring.add(str(dataprefix)+":"+str(calibid)+"_activity prov:wasAssociatedWith "+str(dataprefix)+":"+str(userid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(calibid)+" rdf:type "+str(ontologyprefix)+":Calibration .\n")
if calobject in sensor[calibkey]:
calobjid=""
calobjname=""
if "calibration_object_name" in sensor[calibkey][calobject]:
#print(messung[calibkey][calobject])
calobjid=str(sensor[calibkey][calobject]["calibration_object_name"]["value"]).replace(" ","")+"_calibration_object"
calobjname=str(sensor[calibkey][calobject]["calibration_object_name"]["value"])
else:
calobjid=str(sensorid)+"_calibration_object"
ttlstring.add(str(dataprefix)+":"+str(calibid)+" "+str(ontologyprefix)+":calibrationobject "+str(dataprefix)+":"+str(calobjid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(calobjid)+" rdfs:label \""+labelprefix+" Sensor "+str(seindex)+" Calibration Object"+"\" .\n")
ttlstring.add(str(dataprefix)+":"+str(calibid)+"_activity prov:used "+str(dataprefix)+":"+str(calobjid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(calobjid)+" rdf:type "+str(ontologyprefix)+":CalibrationObject .\n")
ttlstring=exportInformationFromIndAsTTL(sensor[calibkey][calobject],calobjid,str(ontologyprefix)+":CalibrationObject",calobjname+" Calibration Object",ttlstring)
if calsetup in sensor[calibkey]:
calsetupid=str(sensorid)+"_calibration_setup"
ttlstring.add(str(dataprefix)+":"+str(calsetupid)+" rdf:type "+str(ontologyprefix)+":CalibrationSetup .\n")
ttlstring.add(str(dataprefix)+":"+str(calsetupid)+" rdfs:label \""+labelprefix+" Sensor "+str(seindex)+" Calibration Setup"+"\" .\n")
ttlstring.add(str(dataprefix)+":"+str(calibid)+" "+str(ontologyprefix)+":calibrationsetup "+str(dataprefix)+":"+str(calsetupid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(calsetupid)+" "+str(ontologyprefix)+":partOf "+str(dataprefix)+":"+str(calibid)+"_activity .\n")
ttlstring=exportInformationFromIndAsTTL(sensor[calibkey][calsetup],calsetupid,str(ontologyprefix)+":CalibrationSetup",labelprefix+" Sensor "+str(seindex)+" Calibration Setup",ttlstring)
if calproperties in sensor[calibkey]:
ttlstring=exportInformationFromIndAsTTL(sensor[calibkey][calproperties],calibid,str(ontologyprefix)+":Calibration",labelprefix+" Sensor "+str(seindex)+" Calibration",ttlstring)
ttlstring=exportInformationFromIndAsTTL(sensor[calibkey],calibid,str(ontologyprefix)+":Calibration",labelprefix+" Sensor "+str(seindex)+" Calibration",ttlstring)
#print(ttlstring)
if measurementskey in project:
for index, messung in enumerate(project[measurementskey]):
#print(index)
if measurementToExport==None or measurementToExport==index:
messungid=str(projectid)+"_ms_"+str(msindex)+"_measurement"+str(index)
calibid=str(messungid)+"_calibration"
capturedevid=str(messungid)+"_capturingdevice"
mssetupid=str(messungid)+"_mssetup"
mscheckid=str(messungid)+"_mscheck"
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+" "+str(ontologyprefix)+":measurement "+str(dataprefix)+":"+str(messungid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+" prov:wasDerivedFrom "+str(dataprefix)+":"+str(messungid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+" rdf:type "+str(ontologyprefix)+":Measurement .\n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+" rdfs:label \"MS "+str(msindex)+" Measurement "+str(index)+" for "+str(labelprefix)+"\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+" prov:wasAttributedTo "+str(dataprefix)+":"+str(userid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+" prov:wasGeneratedBy "+str(dataprefix)+":"+str(messungid)+"_activity .\n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+"_activity prov:wasAssociatedWith "+str(dataprefix)+":"+str(userid)+" .\n")
if artifactURI!=None:
ttlstring.add(str(dataprefix)+":"+str(messungid)+"_activity prov:used "+artifactURI+" .\n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+"_activity rdf:type prov:Activity .\n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+"_activity prov:used "+str(dataprefix)+":"+softwareid+" .\n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+"_activity rdfs:label \"MS "+str(msindex)+" Measurement "+str(index)+" Activity ("+str(labelprefix)+")\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+"_activity rdfs:label \"Messreihe "+str(msindex)+" Messvorgang "+str(index)+" ("+str(labelprefix)+")\"@de .\n")
if measurementinformation in messung and "sensor_id" in messung[measurementinformation] and "value" in messung[measurementinformation]["sensor_id"]:
ttlstring.add(str(dataprefix)+":"+str(messungid)+" "+str(ontologyprefix)+":sensor "+str(dataprefix)+":"+str(projectid)+"_sensor_"+str(messung[measurementinformation]["sensor_id"]["value"])+" .\n")
if mssetup in messung:
ttlstring.add(str(dataprefix)+":"+str(mssetupid)+" rdf:type "+str(ontologyprefix)+":MeasurementSetup .\n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+" "+str(ontologyprefix)+":setup "+str(dataprefix)+":"+str(mssetupid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(mssetupid)+" "+str(ontologyprefix)+":partOf "+str(dataprefix)+":"+str(messungid)+"_activity .\n")
ttlstring.add(str(dataprefix)+":"+str(mssetupid)+" rdfs:label \""+labelprefix+" MS "+str(msindex)+" Measurement "+str(index)+" Setup\"@en .\n")
ttlstring=exportInformationFromIndAsTTL(messung[mssetup],mssetupid,str(ontologyprefix)+":MeasurementSetup",labelprefix+" MS "+str(msindex)+" Measurement "+str(index)+" Setup",ttlstring)
if mscheck in messung:
ttlstring.add(str(dataprefix)+":"+str(mscheckid)+" rdf:type "+str(ontologyprefix)+":MeasurementCheck .\n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+" "+str(ontologyprefix)+":verification "+str(dataprefix)+":"+str(mscheckid)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(mscheckid)+" prov:used "+str(dataprefix)+":"+str(messungid)+"_activity .\n")
ttlstring.add(str(dataprefix)+":"+str(mscheckid)+" rdfs:label \""+labelprefix+" MS "+str(msindex)+" Measurement "+str(index)+" Measurement Check\"@en .\n")
ttlstring=exportInformationFromIndAsTTL(messung[mscheck],mscheckid,str(ontologyprefix)+":MeasurementCheck",labelprefix+" MS "+str(msindex)+" Measurement "+str(index)+" Measurement Check",ttlstring)
ttlstring=exportInformationFromIndAsTTL(messung[measurementinformation],messungid,str(ontologyprefix)+":Measurement",labelprefix+" MS "+str(msindex)+" Measurement "+str(index),ttlstring)
#print(messung)
index2=0
index2oid = 0
messungindex=index
if refpointkey in messung:
for index,rp in enumerate(messung[refpointkey]):
if "r_id" in rp:
index2 = rp["r_id"]["value"]
elif "reference_point_id" in rp:
index2 = rp["reference_point_id"]["value"]
else:
index2 = "_noid_" + str(index2oid)
index2oid+=1
#print("aaa:"+str(rp))
rpuri=str(messungid)+"_rp"+str(index2)
ttlstring.add(str(dataprefix)+":"+str(messungid)+" "+str(ontologyprefix)+":referencePoint "+str(dataprefix)+":"+str(rpuri)+" . \n")
ttlstring.add(str(dataprefix)+":"+str(rpuri)+" rdf:type "+str(ontologyprefix)+":ReferencePoint .\n")
ttlstring.add(str(dataprefix)+":"+str(rpuri)+" rdfs:label \"RP"+str(index2)+" ("+str(labelprefix)+" MS "+str(msindex)+" Measurement "+str(messungindex)+")\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(rpuri)+" rdfs:label \"RP"+str(index2)+" ("+str(labelprefix)+" Messreihe "+str(msindex)+" Messung "+str(messungindex)+")\"@de .\n")
ttlstring=exportInformationFromIndAsTTL(rp,rpuri,str(ontologyprefix)+":ReferencePoint",labelprefix+" MS "+str(msindex)+" Measurement "+str(index)+" RP"+str(index2),ttlstring)
if "r_x" in rp and "r_y" in rp and "r_z" in rp:
### atos v6.2
ttlstring.add(str(dataprefix)+":"+str(rpuri)+" geo:asWKT \"POINT("+str(rp["r_x"]["value"])+" "+str(rp["r_y"]["value"])+" "+str(rp["r_z"]["value"])+")\"^^geo:wktLiteral .\n")
###atos 2016
elif "reference_point_coordinate.x" in rp and "reference_point_coordinate.y" in rp and "reference_point_coordinate.z" in rp:
ttlstring.add(str(dataprefix)+":"+str(rpuri)+" geo:asWKT \"POINT("+str(rp["reference_point_coordinate.x"]["value"])+" "+str(rp["reference_point_coordinate.y"]["value"])+" "+str(rp["reference_point_coordinate.z"]["value"])+")\"^^geo:wktLiteral .\n")
#print(rp)
ttlstring.add(str(dataprefix)+":"+str(rpuri)+" prov:wasGeneratedBy "+str(dataprefix)+":"+str(messungid)+"_activity . \n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+"_activity rdfs:label \"MS "+str(msindex)+" Measurement "+str(index)+" Activity\"@en. \n")
ttlstring.add(str(dataprefix)+":"+str(rpuri)+" prov:wasAttributedTo "+str(dataprefix)+":"+str(messungid)+"_algorithm . \n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+"_algorithm rdfs:label \"MS "+str(msindex)+" Measurement "+str(index)+" Algorithm\"@en. \n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+"_algorithm rdf:type "+str(ontologyprefix)+":Algorithm . \n")
ttlstring.add(str(dataprefix)+":"+str(messungid)+"_algorithm prov:actedOnBehalfOf "+str(dataprefix)+":"+str(userid)+" . \n")
#print(rpuri)
if measurementToExport==None and index2!=None:
#print("grp loop")
if globalrefpointkey in project:
if refpointkey in project[globalrefpointkey]:
for index, point in enumerate(project[globalrefpointkey][refpointkey]):
if referencepointid in rp and globalreferencepointid in point and rp[referencepointid]["value"]==point[globalreferencepointid]["value"]:
if "point_id" in point:
index = point["point_id"]["value"]
#print (index)
elif "r_id" in point:
index = point["r_id"]["value"]
#print (index)
#print(str(rp[referencepointid]["value"])+" - "+str(point[globalreferencepointid]["value"]))
#print(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_grp"+str(index)+" prov:wasDerivedFrom "+str(dataprefix)+":"+str(rpuri)+" . \n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_grp"+str(index)+" prov:wasDerivedFrom "+str(dataprefix)+":"+str(rpuri)+" . \n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_grp_calculation_activity rdf:type prov:Activity . \n")
ttlstring.add(str(dataprefix)+":"+str(projectid)+"_ms_"+str(msindex)+"_grp_calculation_activity prov:used "+str(dataprefix)+":"+str(rpuri)+" . \n")
#print("next")
if mesheskey in pro:
for index, mesh in enumerate(pro[mesheskey]):
meshid=str(projectid)+"_mesh_"+str(index)
ttlstring.add(str(dataprefix)+":"+str(meshid)+" rdf:type "+str(ontologyprefix)+":Mesh, "+provenancedict.get("entity")+" .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+" rdfs:label \"Mesh "+str(meshid)+" from "+str(projectname)+"\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+" prov:wasDerivedFrom "+str(dataprefix)+":"+str(projectid)+" .\n")
lastprocid=""
if meshprocessingkey in mesh:
#print(mesh[meshprocessingkey])
for indexprocstep, procstep in enumerate(mesh[meshprocessingkey]):
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(0)+"_activity rdf:type prov:Activity .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(0)+"_activity rdfs:label \"Mesh Creation Activity "+str(0)+": "+str(procstep["processname"]["value"])+"\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(0)+"_activity rdfs:label \"Mesherstellungsschritt "+str(0)+": ("+str(procstep["processname"]["value"])+")\"@de .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(0)+"_activity prov:used "+str(dataprefix)+":"+str(projectid)+" .\n")
if "setup" in procstep:
ttlstring=exportInformationFromIndAsTTL(procstep["setup"],str(meshid)+"_creation_"+str(0)+"_activity","prov:Activity","Mesh Creation Activity "+procstep["processname"]["value"],ttlstring)
if "postprocessing" in procstep:
for indexpostproc, postproc in enumerate(procstep["postprocessing"]):
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity rdf:type prov:Activity .\n")
if "processname" in postproc and "value" in postproc["processname"]:
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity rdfs:label \"Mesh Creation Activity "+str(indexpostproc+1)+": "+str(postproc["processname"]["value"])+"\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity rdfs:label \"Mesherstellungsschritt "+str(indexpostproc+1)+": "+str(postproc["processname"]["value"])+"\"@de .\n")
else:
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity rdfs:label \"Mesh Creation Activity "+str(indexpostproc+1)+"\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity rdfs:label \"Mesherstellungsschritt "+str(indexpostproc+1)+"\"@de .\n")
if indexpostproc==0:
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity prov:wasInformedBy "+str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc)+"_activity .\n")
if indexpostproc!=0:
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc+1)+" prov:wasDerivedFrom "+str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc+1)+" prov:wasGeneratedBy "+str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity .\n")
else:
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity prov:wasInformedBy "+str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc)+"_activity .\n")
if indexpostproc!=0:
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc+1)+" prov:wasDerivedFrom "+str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc)+" prov:wasInvalidatedBy "+str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc+1)+" prov:wasGeneratedBy "+str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity prov:used "+str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc)+" .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc+1)+" rdfs:label \"Mesh "+str(meshid)+" Intermediate Result "+str(indexpostproc)+"\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc+1)+" rdfs:label \"Mesh "+str(meshid)+" Zwischenergebnis "+str(indexpostproc)+"\"@en .\n")
ttlstring.add(str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc+1)+" rdf:type "+str(ontologyprefix)+":IntermediateMeshResult .\n")
lastprocid=str(dataprefix)+":"+str(meshid)+"_intermediate_"+str(indexpostproc+1)
if "processname" in postproc and "value" in postproc["processname"]:
ttlstring=exportInformationFromIndAsTTL(postproc,str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity","prov:Activity","Mesh Creation Activity "+str(indexpostproc)+": "+str(postproc["processname"]["value"])+" ",ttlstring)
else:
ttlstring=exportInformationFromIndAsTTL(postproc,str(meshid)+"_creation_"+str(indexpostproc+1)+"_activity","prov:Activity","Mesh Creation Activity "+str(indexpostproc)+" ",ttlstring)
else:
ttlstring.add(str(dataprefix)+":"+str(meshid)+" prov:wasGeneratedBy "+str(dataprefix)+":"+str(meshid)+"_creation_"+str(0)+"_activity .\n")
if lastprocid!="":
ttlstring.add(str(dataprefix)+":"+str(meshid)+" owl:sameAs "+str(lastprocid)+" .\n")
ttlstring=exportInformationFromIndAsTTL(mesh[meshinfokey],meshid,str(ontologyprefix)+":Mesh",labelprefix+" Mesh Attribute ",ttlstring)
return ttlstring
####################################################################################################
#
######### Methode zum Speichern der Skript Informationen
# metadaten python-script
def script_version():
# Zeitpunkt
now = datetime.datetime.now()
now_string = str(now.year)+"-"+str(now.month).zfill(2)+"-"+str(now.day).zfill(2)+'T'+str(now.hour).zfill(2)+':'+str(now.minute).zfill(2)+':'+str(now.second).zfill(2)
# def dictionary
dic_script = {}
dic_script["github"]={}
dic_script["github"]["key_deu"]="GitHub Repository"
dic_script["github"]["key_eng"]="GitHub Repository"
dic_script["github"]["value"]="http://github.com/i3mainz/3dcap-md-gen"
dic_script["github"]["value_type"]="str"
dic_script["github"]["uri"]="http://www.wikidata.org/entity/Q364"
dic_script["github"]["from_application"]="false"
dic_script["github_release"]={}
dic_script["github_release"]["key_deu"]="GitHub Release"
dic_script["github_release"]["key_eng"]="GitHub Release"
dic_script["github_release"]["value"]=github_release
dic_script["github_release"]["value_type"]="str"
dic_script["github_release"]["uri"]="http://www.wikidata.org/entity/Q20631656"
dic_script["github_release"]["from_application"]="false"
dic_script["script_name"]={}
dic_script["script_name"]["key_deu"]="Python Skript Name"
dic_script["script_name"]["key_eng"]="Python Script name"
dic_script["script_name"]["value"]=script_name
dic_script["script_name"]["value_type"]="str"
dic_script["script_name"]["uri"]="http://www.wikidata.org/entity/Q15955723"
dic_script["script_name"]["from_application"]="false"
dic_script["start_time_script"]={}
dic_script["start_time_script"]["key_deu"]="Skriptausführungszeit"
dic_script["start_time_script"]["key_eng"]="Script execution time"
dic_script["start_time_script"]["value"]=now_string
dic_script["start_time_script"]["value_type"]="dateTime"
dic_script["start_time_script"]["uri"]=provnamespace+"startedAtTime"
dic_script["start_time_script"]["from_application"]="false"
return dic_script
######################## GET METADATA ###############################
######################## PROJECTS ###############################
######## project / measurement series ###############
input_folder = r"F:\3d-data\atos-v62_project"
for root, dirs, files in os.walk(input_folder):
for atos_file in files:
if os.path.splitext(atos_file)[-1]==".session":
gom.script.sys.load_session (
files=[root + "/" + atos_file],
mode=gom.List ('delete', ['append', 'delete']),
remember_file_name=True)
dic_prj ={}
list_prj=[]
prj = 0
while prj < 1:
dic_dig = {}
list_projects = []
#### application ####
dic_dig_app = {}
list_app = []
#### project ####
dic_dig_project = {}
## Creates a dictionary / JSON object about information given from the respective software.
# @param beschreibung the description of the parameter of the software in German
# @param description the description of the parameter of the software in English
# @param keyword
# @param einheit The unit to be associated with the software parameter
# @param uri The URI to be associated with the software parameter
# @param measurementclass
# @param application indicates if the parameter is a software parameter or an external parameter
def infos_app (beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
dir["value"] = gom.app.get(keyword)
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
dir["value_type"] = type(gom.app.get(keyword)).__name__
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
# if keyword = 'PROJECT.DATE' - change datetype
if keyword == 'PROJECT.DATE':
value_new = dir["value"]
capturetime = time.strptime(value_new, "%m/%d/%y")
dir["value"] = (time.strftime("%Y-%m-%d",capturetime))
dir["value_type"] = "date"
# store in dictionary
if dir["value"] != None:
if len(str(dir["value"])) != 0:
dic_dig_app[keyword]= {}
dic_dig_app[keyword]= dir
def infos_project (beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
if keyword == 'acquisition_technology':
dir["value"] = 'fringe projection'
dir["value_type"] = type(dir["value"]).__name__
elif keyword == 'project_name':
try:
value = gom.app.get('ACTUAL_SESSION_FILE')
dir["value"] = (value.split("\\")[-1].replace(".session",""))
dir["value_type"] = type(dir["value"]).__name__
except:
dir["value"] = None
dir["value_type"] = type(dir["value"]).__name__
else:
dir["value"] = gom.app.get(keyword)
dir["value_type"] = type(gom.app.get(keyword)).__name__
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if keyword == 'PROJECT.DATE':
value_new = dir["value"]
capturetime = time.strptime(value_new, "%m/%d/%y")
dir["value"] = (time.strftime("%Y-%m-%d",capturetime))
dir["value_type"] = "date"
# store in dictionary
if dir["value"] != None:
if len(str(dir["value"])) != 0:
dic_dig_project[keyword]= {}
dic_dig_project[keyword]= dir
############ get values #############
# Aktuelles Datum
beschreibung = "Aktuelles Datum"
description = None
keyword = 'PROJECT.DATE'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_app (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Applikationsname
beschreibung = "Applikationsname"
description = "Application name"
keyword = 'PROJECT.TYPE'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_app (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Applikationsversion
beschreibung = "Applikationsversion"
description = "Application version"
keyword = 'PROJECT.VERSION'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_app (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Heimatverzeichnis
beschreibung = "Heimatverzeichnis"
description = "Home directory"
keyword = 'HOME'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_app (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Projektverzeichnis
beschreibung = "Projektverzeichnis"
description = "Project directory"
keyword = 'PROJECTDIR'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_app (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Sitzungsname (komplett)
beschreibung = "Sitzungsname (komplett)"
description = "Session name (complete)"
keyword = 'ACTUAL_SESSION_FILE'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_project (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Projektname (komplett)
beschreibung = "Projektname"
description = "Project name"
keyword = 'project_name'
einheit = None
uri= 'rdfs:label'
measurementclass = None
application = "false"
infos_project (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Softwarsprache
beschreibung = "Softwaresprache"
description = "Software language"
keyword = 'LANGUAGE'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_app (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Softwareverzeichnis
beschreibung = "Softwareverzeichnis"
description = "Software directory"
keyword = 'SOFTWAREDIR'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_app (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Verzeichnis für temporäre Daten
beschreibung = "Verzeichnis für temporäre Daten"
description = "Temporary directory"
keyword = 'TEMPDIR'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_app (beschreibung, description, keyword, einheit, uri, measurementclass, application)
### MEASUREMENT SERIES (in atos v6.2 project information) #############################################################################################
p = 0
while p < len( gom.app.projects):
#### measurment series ####
dic_project = {}
#### measurment_series_information ####
dic_prj_info = {}
##### calibration / sensor ID
sensor_id = 0
## Creates a dictionary / JSON object about information given from the respective software.
# @param beschreibung the description of the parameter of the software in German
# @param description the description of the parameter of the software in English
# @param keyword
# @param einheit The unit to be associated with the software parameter
# @param uri The URI to be associated with the software parameter
# @param measurementclass
# @param application indicates if the parameter is a software parameter or an external parameter
def infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
dir["value"] = gom.app.projects[p].get(keyword)
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
dir["value_type"] = type(gom.app.projects[p].get(keyword)).__name__
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_prj_info[keyword] = {}
dic_prj_info[keyword] = dir
if not includeonlypropswithuri:
dic_prj_info[keyword] = {}
dic_prj_info[keyword] = dir
############ get values #############
# Eckmasksierung
beschreibung = "Eckmaskierung"
description = "Corner mask size"
keyword = 'prj_corner_mask'
einheit = om+"percent"
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Ellipsenqualität
beschreibung = "Ellipsenqualität"
description = "Ellipse quality"
keyword = 'prj_ellipse_quality'
einheit = om+"pixel"
uri=ontologynamespace+"EllipseQuality"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Expansionskoeffizient
beschreibung = "Expansionskoeffizient"
description = "Expansion coefficient"
keyword = 'prj_ref_frame_exp_coeff'
einheit = None
uri= ontologynamespace+"ExpansionCoefficient"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kalibrierungstemperatur Rahmen
beschreibung = "Kalibrierungstemperatur Rahmen"
description = "Frame calibration temperature"
keyword = 'prj_ref_frame_cal_temperature'
einheit = om+"degreeCelsius"
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Feinausrichtung
beschreibung = "Feinausrichtung"
description = "Alignment"
keyword = 'prj_alignment'
einheit = None
uri=ontologynamespace+"Alignment"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Lichtfaktorkalibrierungs-Umgebung
beschreibung = "Lichtfaktorkalibrierungs-Umgebung"
description = "Light factor calibration enviroment"
keyword = 'prj_light_factor_calibration_environment'
einheit = None
uri=ontologynamespace+"LightFactorCalibrationEnvironment"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Max. Lichtänderung
beschreibung = "Max. Lichtänderung"
description = None
keyword = 'prj_max_lighting'
einheit = "Grauwerte" #gehen von 0 bis 100
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Max. Sekunden zwischen Lichtfaktorkalibrierung
beschreibung = "Max. Sekunden zwischen Lichtfaktorkalibrierung"
description = None
keyword = 'prj_max_sec_between_light_factor_calibration'
einheit = om+"seconds-Time"
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Max. Sensorbewegung
beschreibung = "Max. Sensorbewegung"
description = "Max. sensor movement"
keyword = 'prj_max_movement'
einheit = om+"pixel"
uri= ontologynamespace+ "MaximumSensorMovement"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Messtemperatur
beschreibung = "Messtemperatur"
description = "Measurement temperature"
keyword = 'prj_measurement_temperature'
einheit = om+"degreeCelsius"
uri=ontologynamespace+"MeasurementTemperature"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Min. Modulationsschwelle
beschreibung = "Min. Modulationsschwelle"
description = None
keyword = 'prj_mod_threshold'
einheit = "Grauwerte" #gehen von 0 bis 255
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Nah zum Sensor
beschreibung = "Nah zum Sensor"
description = "Close to sensor"
keyword = 'prj_depth_min'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Phasensteps
beschreibung = "Phasensteps"
description = "Phase steps"
keyword = 'prj_phase_steps'
einheit = None
uri=ontologynamespace+"numberOfPhaseSteps"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Projekt Basisverzeichnis
beschreibung = "Projekt Basisverzeichnis"
description = None
keyword = 'prj_directory'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Projektname
beschreibung = "Projektname"
description = "Project name"
keyword = 'prj_n'
uri= 'rdfs:label'
einheit = None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Referenzpunktfarbe
beschreibung = "Referenzpunktfarbe"
description = None
keyword = 'prj_ref_color'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Referenzpunktgröße
beschreibung = "Referenzpunktgröße"
description = None
keyword = 'prj_ref_type'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Referenzpunktvorlage-Name
beschreibung = "Referenzpunktvorlage-Name"
description = None
keyword = 'prj_ref_frame_template_name'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Bewegungskontrolle an?
beschreibung = "Status: Bewegungskontrolle an?"
description = "State: enable movement check?"
keyword = 'prj_check_movement'
einheit = None
uri=ontologynamespace+"movementControlActivated"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Daten von einer Kamera?
beschreibung = "Status: Daten von einer Kamera?"
description = None
keyword = 'prj_one_cam'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Feinausrichtung berechnen?
beschreibung = "Status: Feinausrichtung berechnen?"
description = "State: alignment computed?"
keyword = 'prj_aligned'
einheit = None
uri=ontologynamespace+"areMeasurementsAligned"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Lichtkontrolle an?
beschreibung = "Status: Lichtkontrolle an?"
description = "State: enable lighting check?"
keyword = 'prj_check_lighting'
einheit = None
uri=ontologynamespace+"lightControlActivated"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Punkte an Glanzstellen berechnen?
beschreibung = "Status: Punkte an Glanzstellen berechnen?"
description = "State: use shiny points?"
keyword = 'prj_shiny_points'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Punkte bei starken Grauwertübergängen verwendet?
beschreibung = "Status: Punkte bei starken Grauwertübergängen verwendet?"
description = None
keyword = 'prj_col_trans'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Ref.-Punktgröße verwendet?
beschreibung = "Status: Ref.-Punktgröße verwendet?"
description = None
keyword = 'prj_use_ref_type'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Referenzpunkte einsammeln?
beschreibung = "Status: Referenzpunkte einsammeln?"
description = None
keyword = 'prj_add_ref'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Transformationskontrolle an?
beschreibung = "Status: Transformationskontrolle an?"
description = "State: enable transformation check?"
keyword = 'prj_check_trafo'
einheit = None
uri=ontologynamespace+"transformationCheck"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: alle Entfaltungschecks verwendet? (ATOS II / III)?
beschreibung = "Status: alle Entfaltungschecks verwendet? (ATOS II / III)?"
description = None
keyword = 'prj_use_all_unwrap_checks'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: kleinste Modulationsmakse verwendet (nur ATOS III)?
beschreibung = "Status: kleinste Modulationsmakse verwendet (nur ATOS III)?"
description = None
keyword = 'prj_smallest_mod_mask'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: lange Dreiecke verwendet?
beschreibung = "Status: lange Dreiecke verwendet?"
description = "State: use long triangles?"
keyword = 'prj_long_triangles'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: starkes Umgebungslicht?
beschreibung = "Status: starkes Umgebungslicht?"
description = "State: strong ambient light?"
keyword = 'prj_ambient_light'
einheit = None
uri=ontologynamespace+"strongAmbientLight"
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Tiefenbeschränkung
beschreibung = "Tiefenbeschränkung"
description = None
keyword = 'prj_depth'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Vorschau-Raster
beschreibung = "Vorschau-Raster"
description = None
keyword = 'prj_raster'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Weiter entfernt vom Sensor
beschreibung = "Weiter entfernt vom Sensor"
description = None
keyword = 'prj_depth_max'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Abteilung
beschreibung = gom.app.projects[p].get ('d_department')
description = "Abteilung"
keyword = 'c_department'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Ausrichtung
beschreibung = gom.app.projects[p].get ('d_alignment')
description = "Ausrichtung"
keyword = 'c_alignment'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Bauteil
beschreibung = gom.app.projects[p].get ('d_part')
description = "Part"
keyword = 'c_part'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Chargen-Nr.
beschreibung = gom.app.projects[p].get ('d_charge_nr')
description = "Charge number"
keyword = 'c_charge_nr'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Datenstand
beschreibung = gom.app.projects[p].get ('d_version')
description = "Version"
keyword = 'c_version'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Datum
beschreibung = gom.app.projects[p].get ('d_date')
description = "Date"
keyword = 'c_date'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Firma
beschreibung = gom.app.projects[p].get ('d_company')
description = "Company"
keyword = 'c_company'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kommentar 1
beschreibung = gom.app.projects[p].get ('d_comment1')
description = "Comment 1"
keyword = 'c_comment1'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kommentar 2
beschreibung = gom.app.projects[p].get ('d_comment2')
description = "Comment 2"
keyword = 'c_comment2'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kommentar 3
beschreibung = gom.app.projects[p].get ('d_comment3')
description = "Comment 3"
keyword = 'c_comment3'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Ort
beschreibung = gom.app.projects[p].get ('d_location')
description = "Location"
keyword = 'c_location'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Projekt
beschreibung = gom.app.projects[p].get ('d_project')
description = "Project"
keyword = 'c_project'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Prüfer
beschreibung = gom.app.projects[p].get ('d_inspector')
description = "Inspector"
uri= None
keyword = 'c_inspector'
einheit = None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# System
beschreibung = gom.app.projects[p].get ('d_system')
description = "System"
keyword = 'c_system'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Teile-Nr.
beschreibung = gom.app.projects[p].get ('d_part_nr')
description = "Part number"
keyword = 'c_part_nr'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_projects (beschreibung, description, keyword, einheit, uri, measurementclass, application)
### GLOBALE REFERENZPUNKTE #############################################################################################
list_grp = []
grp = 0
while grp < len(gom.app.projects[p].gref_points):
dic_grp = {}
## Creates a dictionary / JSON object about information given from the respective software.
# @param beschreibung the description of the parameter of the software in German
# @param description the description of the parameter of the software in English
# @param keyword
# @param einheit The unit to be associated with the software parameter
# @param uri The URI to be associated with the software parameter
# @param measurementclass
# @param application indicates if the parameter is a software parameter or an external parameter
def infos_grp(beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
dir["value"] = gom.app.projects[p].gref_points[grp].get(keyword)
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
dir["value_type"] = type(gom.app.projects[p].gref_points[grp].get(keyword)).__name__
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_grp[keyword]= {}
dic_grp[keyword]= dir
if not includeonlypropswithuri:
dic_grp[keyword]= {}
dic_grp[keyword]= dir
############ get values #############
# Punkt ID
beschreibung = "Punkt ID"
description = "Point ID"
keyword = 'r_id'
einheit = None
uri= ontologynamespace+'PointID'
measurementclass = None
application = "true"
infos_grp (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Punktekennzeichnung (Messung)
beschreibung = "Punktekennzeichnung (Messung)"
description = "Point flags (measurement)"
keyword = 'r_state'
einheit = None
uri= ontologynamespace+'PointMeasurementState'
measurementclass = None
application = "true"
infos_grp (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Punktekennzeichnung (Project)
beschreibung = "Punktekennzeichnung (Project)"
description = "Point flags (project)"
keyword = 'r_pstate'
einheit = None
uri=ontologynamespace+'PointProjectState'
measurementclass = None
application = "true"
infos_grp (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Punkt ist gemeinsamer Referenzpunkt?
beschreibung = "Status: Punkt ist gemeinsamer Referenzpunkt?"
description = "State: point is common ref.point?"
keyword = 'r_common'
einheit = None
uri=ontologynamespace+'commonReferencepoint'
measurementclass = None
application = "true"
infos_grp (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: selektiert?
beschreibung = "Status: selektiert?"
description = "State: selected?"
keyword = 'selected'
einheit = None
uri=None
measurementclass = None
application = "true"
infos_grp (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# x-coordinate
beschreibung = "x-Koordinate"
description = "x-coordinate"
keyword = 'r_x'
einheit = om+"millimetre"
uri=ontologynamespace+'xCoordinate'
measurementclass = None
application = "true"
infos_grp (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# y-coordinate
beschreibung = "y-Koordinate"
description = "y-coordinate"
keyword = 'r_y'
einheit = om+"millimetre"
uri=ontologynamespace+'yCoordinate'
measurementclass = None
application = "true"
infos_grp (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# z-coorinate
beschreibung = "z-Koordinate"
description = "z-coordinate"
keyword = 'r_z'
einheit = om+"millimetre"
uri=ontologynamespace+'zCoordinate'
measurementclass = None
application = "true"
infos_grp (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Point deviation
beschreibung = "Punktabweichung"
description = "Point deviation"
keyword = 'r_dev'
einheit = om+"millimetre"
uri=ontologynamespace+'PointMeasurementDeviation'
measurementclass = None
application = "true"
infos_grp (beschreibung, description, keyword, einheit, uri, measurementclass, application)
if len(dic_grp) > 0:
list_grp.append(dic_grp)
grp = grp + 1
### MEASUREMENTS #############################################################################################
list_sensors = []
temp_list_cal_time = [] # hier kommt nur der Zeitpunkt der Kalibrierung rein, für spätere Abfrage
list_measurements = []
m = 0
while m < len(gom.app.projects[p].measurements):
dic_sensor = {}
dic_measurement = {}
dic_measurement_sensor = {}
dic_measurement_cal = {}
dic_measurement_setup ={}
dic_measurement_check ={}
dic_measurement_info = {}
dic_measurement_cal_calobject = {}
dic_measurement_cal_calsetup = {}
dic_measurement_cal_calresults = {}
### Information added manually ###
# recording technology
# nur wenn es auch Messungen gibt
beschreibung = "Aufnahmeverfahren"
description = "acquisition technology"
keyword = 'acquisition_technology'
einheit = None
uri= ontologynamespace+"AcquisitionTechnology"
measurementclass = ontologynamespace+"FringeProjection"
application = "false"
infos_project (beschreibung, description, keyword, einheit, uri, measurementclass, application)
## Creates a dictionary / JSON object about information given from the respective software.
# @param beschreibung the description of the parameter of the software in German
# @param description the description of the parameter of the software in English
# @param keyword
# @param einheit The unit to be associated with the software parameter
# @param uri The URI to be associated with the software parameter
# @param measurementclass
# @param application indicates if the parameter is a software parameter or an external parameter
def measurements_sensor (beschreibung, description, keyword, einheit, uri, measurementclass, application, value=None):
dir = {}
dir.clear()
# zeitangaben
if keyword == 'm_cal_time':
value = gom.app.projects[p].measurements[m].get(keyword)
if value != None:
capturetime = time.strptime(value, "%a %b %d %H:%M:%S %Y")
dir["value"] = (time.strftime("%Y-%m-%dT%H:%M:%S",capturetime))
elif keyword =='theoretical_measuring_point_distance':
dir["value"] = value
else:
dir["value"] = gom.app.projects[p].measurements[m].get(keyword)
if keyword == 'm_cal_time':
dir["value_type"] = "dateTime"
elif keyword =='theoretical_measuring_point_distance':
dir["value_type"] = type(value).__name__
else:
dir["value_type"] = type(gom.app.projects[p].measurements[m].get(keyword)).__name__
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_measurement_sensor[keyword]= {}
dic_measurement_sensor[keyword]= dir
if not includeonlypropswithuri:
dic_measurement_sensor[keyword] = {}
dic_measurement_sensor[keyword] = dir
def measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
if keyword == "invert m_one_cam":
dir["value"] = gom.app.projects[p].measurements[m].get('m_one_cam')
if dir["value"] == True:
dir["value"] = False
if dir["value"] == False:
dir["value"] = True
dir["value_type"] = type(gom.app.projects[p].measurements[m].get('m_one_cam')).__name__
elif keyword == "invert m_shiny_points":
dir["value"] = gom.app.projects[p].measurements[m].get('m_shiny_points')
if dir["value"] == True:
dir["value"] = False
if dir["value"] == False:
dir["value"] = True
dir["value_type"] = type(gom.app.projects[p].measurements[m].get('m_shiny_points')).__name__
else:
dir["value"] = gom.app.projects[p].measurements[m].get(keyword)
dir["value_type"] = type(gom.app.projects[p].measurements[m].get(keyword)).__name__
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_measurement_setup[keyword] = {}
dic_measurement_setup[keyword] = dir
if not includeonlypropswithuri:
dic_measurement_setup[keyword] = {}
dic_measurement_setup[keyword] = dir
def measurements_check (beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
if keyword == "invert m_one_cam":
dir["value"] = gom.app.projects[p].measurements[m].get('m_one_cam')
if dir["value"] == True:
dir["value"] = False
if dir["value"] == False:
dir["value"] = True
dir["value_type"] = type(gom.app.projects[p].measurements[m].get('m_one_cam')).__name__
elif keyword == "invert m_shiny_points":
dir["value"] = gom.app.projects[p].measurements[m].get('m_shiny_points')
if dir["value"] == True:
dir["value"] = False
if dir["value"] == False:
dir["value"] = True
dir["value_type"] = type(gom.app.projects[p].measurements[m].get('m_shiny_points')).__name__
else:
dir["value"] = gom.app.projects[p].measurements[m].get(keyword)
dir["value_type"] = type(gom.app.projects[p].measurements[m].get(keyword)).__name__
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_measurement_check[keyword] = {}
dic_measurement_check[keyword] = dir
if not includeonlypropswithuri:
dic_measurement_check[keyword] = {}
dic_measurement_check[keyword] = dir
def infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
if keyword == "invert_m_one_cam":
dir["value"] = gom.app.projects[p].measurements[m].get('m_one_cam')
if dir["value"] == True:
dir["value"] = False
if dir["value"] == False:
dir["value"] = True
dir["value_type"] = type(gom.app.projects[p].measurements[m].get('m_one_cam')).__name__
elif keyword == "invert_m_shiny_points":
dir["value"] = gom.app.projects[p].measurements[m].get('m_shiny_points')
if dir["value"] == True:
dir["value"] = False
if dir["value"] == False:
dir["value"] = True
dir["value_type"] = type(gom.app.projects[p].measurements[m].get('m_shiny_points')).__name__
elif keyword == "invert_m_col_trans":
dir["value"] = gom.app.projects[p].measurements[m].get('m_col_trans')
if dir["value"] == True:
dir["value"] = False
if dir["value"] == False:
dir["value"] = True
dir["value_type"] = type(gom.app.projects[p].measurements[m].get('m_col_trans')).__name__
elif keyword == "adapted_m_trafo_mode":
dir["value"] = gom.app.projects[p].measurements[m].get('m_trafo_mode')
if dir["value"] == "automatic":
dir["value"] = "reference_points"
dir["value_type"] = type(gom.app.projects[p].measurements[m].get('m_trafo_mode')).__name__
else:
dir["value"] = gom.app.projects[p].measurements[m].get(keyword)
dir["value_type"] = type(gom.app.projects[p].measurements[m].get(keyword)).__name__
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_measurement_info[keyword] = {}
dic_measurement_info[keyword] = dir
if not includeonlypropswithuri:
dic_measurement_info[keyword] = {}
dic_measurement_info[keyword] = dir
def cal_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
if keyword == 'm_cal_time':
value = gom.app.projects[p].measurements[m].get(keyword)
if value != None:
capturetime = time.strptime(value, "%a %b %d %H:%M:%S %Y")
dir["value"] = (time.strftime("%Y-%m-%dT%H:%M:%S",capturetime))
else:
dir["value"] = None
else:
dir["value"] = gom.app.projects[p].measurements[m].get(keyword)
if keyword == 'm_cal_time':
dir["value_type"] = "dateTime"
else:
dir["value_type"] = type(gom.app.projects[p].measurements[m].get(keyword)).__name__
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_measurement_cal[keyword]= {}
dic_measurement_cal[keyword]= dir
if not includeonlypropswithuri:
dic_measurement_cal[keyword]= {}
dic_measurement_cal[keyword]= dir
def cal_measurements_calobject (beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
if keyword == 'm_cal_time':
value = gom.app.projects[p].measurements[m].get(keyword)
if value != None:
capturetime = time.strptime(value, "%a %b %d %H:%M:%S %Y")
dir["value"] = (time.strftime("%Y-%m-%dT%H:%M:%S",capturetime))
else:
dir["value"] = gom.app.projects[p].measurements[m].get(keyword)
if keyword == 'm_cal_time':
dir["value_type"] = "dateTime"
else:
dir["value_type"] = type(gom.app.projects[p].measurements[m].get(keyword)).__name__
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_measurement_cal_calobject[keyword]= {}
dic_measurement_cal_calobject[keyword]= dir
if not includeonlypropswithuri:
dic_measurement_cal_calobject[keyword]= {}
dic_measurement_cal_calobject[keyword]= dir
def cal_measurements_calsetup (beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
if keyword == 'm_cal_time':
value = gom.app.projects[p].measurements[m].get(keyword)
if value != None:
capturetime = time.strptime(value, "%a %b %d %H:%M:%S %Y")
dir["value"] = (time.strftime("%Y-%m-%dT%H:%M:%S",capturetime))
else:
dir["value"] = gom.app.projects[p].measurements[m].get(keyword)
if keyword == 'm_cal_time':
dir["value_type"] = "dateTime"
else:
dir["value_type"] = type(gom.app.projects[p].measurements[m].get(keyword)).__name__
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_measurement_cal_calsetup[keyword]= {}
dic_measurement_cal_calsetup[keyword]= dir
if not includeonlypropswithuri:
dic_measurement_cal_calsetup[keyword]= {}
dic_measurement_cal_calsetup[keyword]= dir
def cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
#print ("-----")
#print (keyword)
if keyword == 'm_cal_time':
value = gom.app.projects[p].measurements[m].get(keyword)
#print (value)
if value != None:
capturetime = time.strptime(value, "%a %b %d %H:%M:%S %Y")
dir["value"] = (time.strftime("%Y-%m-%dT%H:%M:%S",capturetime))
dir["value_type"] = "dateTime"
else:
dir["value"] = value
dir["value_type"] = type(value).__name__
else:
dir["value"] = gom.app.projects[p].measurements[m].get(keyword)
dir["value_type"] = type(gom.app.projects[p].measurements[m].get(keyword)).__name__
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_measurement_cal_calresults[keyword]= {}
dic_measurement_cal_calresults[keyword]= dir
if not includeonlypropswithuri:
dic_measurement_cal_calresults[keyword]= {}
dic_measurement_cal_calresults[keyword]= dir
############ get values #############
# Aktuelle Kameratemperatur
beschreibung = "Aktuelle Kameratemperatur"
description = None
keyword = 'm_x2006_cam_temp_act'
einheit = om+"degreeCelsius"
uri = None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Anzahl der Belichtungen
beschreibung = "Anzahl der Belichtungen"
description = "Number of shutter times"
keyword = 'm_shutter_times'
einheit = None
uri= ontologynamespace+"numberOfShutterTimes"
measurementclass = None
application = "true"
measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Anzahl der Punkte
beschreibung = "Anzahl der Punkte"
description = None
keyword = 'm_points'
einheit = None
uri= ontologynamespace+"numberOfPoints"
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Anzahl der Referenzpunkte
beschreibung = "Anzahl der Referenzpunkte"
description = None
keyword = 'm_num_rp'
einheit = None
uri= ontologynamespace+"numberOfReferencePoints"
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Anzahl der Transformationspunkte
beschreibung = "Anzahl der Transformationspunkte"
description = "Number of reference points"
keyword = 'm_num_trafo_points'
einheit = None
uri= ontologynamespace+"numberOfTransformationPoints"
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Belichtungszeit 0
beschreibung = "Belichtungszeit 0"
description = "Shutter time"
keyword = 'm_shutter_time0'
einheit = om+"seconds-Time"
uri= exifnamespace+'exposureTime'
measurementclass = None
application = "true"
measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Belichtungszeit 1
beschreibung = "Belichtungszeit 1"
description = "Shutter time"
keyword = 'm_shutter_time1'
einheit = om+"seconds-Time"
uri= exifnamespace+'exposureTime'
measurementclass = None
application = "true"
measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Belichtungszeit 2
beschreibung = "Belichtungszeit 2"
description = "Shutter time"
keyword = 'm_shutter_time2'
einheit = om+"seconds-Time"
uri= exifnamespace+'exposureTime'
measurementclass = None
application = "true"
measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Belichtungszeit 3
beschreibung = "Belichtungszeit 3"
description = "Shutter time"
keyword = 'm_shutter_time3'
einheit = om+"seconds-Time"
uri= exifnamespace+'exposureTime'
measurementclass = None
application = "true"
measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Belichtungszeit 4
beschreibung = "Belichtungszeit 4"
description = "Shutter time"
keyword = 'm_shutter_time4'
einheit = om+"seconds-Time"
uri= exifnamespace+'exposureTime'
measurementclass = None
application = "true"
measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Belichtugnszeit 5
beschreibung = "Belichtungszeit 5"
description = "Shutter time"
keyword = 'm_shutter_time5'
einheit = om+"seconds-Time"
uri= exifnamespace+'exposureTime'
measurementclass = None
application = "true"
measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Belichtungszeit 6
beschreibung = "Belichtungszeit 6"
description = "Shutter time"
keyword = 'm_shutter_time6'
einheit = om+"seconds-Time"
uri= exifnamespace+'exposureTime'
measurementclass = None
application = "true"
measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Belichtungszeit Referenzpunkte
beschreibung = "Belichtungszeit Referenzpunkte"
description = "Reference points shutter time"
keyword = 'm_ref_shutter'
einheit = om+"seconds-Time"
uri= ontologynamespace+"ExposureTimeForReferencePoints"
measurementclass = None
application = "true"
measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Belichtungszeit Tapelinienbeleuchtung
beschreibung = "Belichtungszeit Tapelinienbeleuchtung"
description = None
keyword = 'm_gray_shutter'
einheit = om+"seconds-Time"
uri= None
measurementclass = None
application = "true"
measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# aus width wird length bei ATOS II
if gom.app.projects[p].measurements[m].get('m_sensor') == "atos 2":
# Breite des Messvolumens
beschreibung = "Länge Messvolumen"
description = None
keyword = 'm_vol_width'
einheit = om+"millimetre"
uri=ontologynamespace+"MeasuringVolumeLength"
measurementclass = None
application = "true"
measurements_sensor (beschreibung, description, keyword, einheit, uri, measurementclass, application)
else:
# Breite des Messvolumens
beschreibung = "Breite des Messvolumens"
description = None
keyword = 'm_vol_width'
einheit = om+"millimetre"
uri=ontologynamespace+"MeasuringVolumeDepth"
measurementclass = None
application = "true"
measurements_sensor (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Eckmaskierung
beschreibung = "Eckmaskierung"
description = "Corner mask size"
keyword = 'm_corner_mask'
einheit = om+"percent"
uri= ontologynamespace+"CornerMask"
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Fehler während der Automatisierungsprotokoll-Ausführung
beschreibung = "Fehler während der Automatisierungsprotokoll-Ausführung"
description = None
keyword = 'm_automation_error'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Gültige Kameratemperatur
beschreibung = "Gültige Kameratemperatur"
description = None
keyword = 'm_x2006_cam_temp_valid'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Gültige Lampenaufwärmung
beschreibung = "Gültige Lampenaufwärmung"
description = None
keyword = 'm_x2006_lamp_warmup_valid'
einheit = None
uri= None
measurementclass = None
application = "true"
measurements_check (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Höhe des Messvolumens
beschreibung = "Höhe des Messvolumens"
description = None
keyword = 'm_vol_height'
einheit = om+"millimetre"
uri=ontologynamespace+"MeasuringVolumeWidth"
measurementclass = None
application = "true"
measurements_sensor (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kamera Betriebstemperatur
beschreibung = "Kamera Betriebstemperatur"
description = None
keyword = 'm_x2006_cam_temp_nom'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kamerakennung (links)
beschreibung = "Kamerakennung (links)"
description = None
keyword = 'm_camera_identifier_left'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kamerakennung (rechts)
beschreibung = "Kamerakennung (rechts)"
description = None
keyword = 'm_camera_identifier_right'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kameratemperatur in Ordnung
beschreibung = "Kameratemperatur in Ordnung"
description = None
keyword = 'm_x2006_cam_temp_ok'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kameratyp
beschreibung = "Kameratyp"
description = None
keyword = 'm_sensor_camera_type'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kamerawinkel
beschreibung = "Kamerawinkel"
description = None
keyword = 'm_camera_angle'
einheit = om+"radian"
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Lichtfaktor
beschreibung = "Lichtfaktor"
description = None
keyword = 'm_x2006_light_factor'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Lichtintensität
beschreibung = "Lichtintensität"
description = None
keyword = 'm_x2006_brightness'
einheit = om+"percent"
uri= ontologynamespace+"LightIntensity"
measurementclass = None
application = "true"
measurements_setup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Lichtänderung (linkes Bild)
beschreibung = "Lichtänderung (linkes Bild)"
description = None
keyword = 'm_lighting_left'
einheit = None
uri= None
measurementclass = None
application = "true"
measurements_check (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Lichtänderung (rechtes Bild)
beschreibung = "Lichtänderung (rechtes Bild)"
description = None
keyword = 'm_lighting_right'
einheit = None
uri= None
measurementclass = None
application = "true"
measurements_check (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Länge Messvolumen
## für ATOS II anpassen
if gom.app.projects[p].measurements[m].get('m_sensor') == "atos 2":
beschreibung = "Tiefe Messvolumen"
description = None
keyword = 'm_vol_length'
einheit = om+"millimetre"
uri=ontologynamespace+"MeasuringVolumeDepth"
measurementclass = None
application = "true"
measurements_sensor (beschreibung, description, keyword, einheit, uri, measurementclass, application)
else:
beschreibung = "Länge Messvolumen"
description = None
keyword = 'm_vol_length'
einheit = om+"millimetre"
uri=ontologynamespace+"MeasuringVolumeLength"
measurementclass = None
application = "true"
measurements_sensor (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Messbeleuchtung
beschreibung = "Messbeleuchtung"
description = None
keyword = 'm_measure_light'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Messung-Abweichung
beschreibung = "Messung-Abweichung"
description = None
keyword = 'm_dev'
einheit = om+"millimetre"
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Min. Modulationsschwelle (linkes Bild)
beschreibung = "Min. Modulationsschwelle (linkes Bild)"
description = "Min. Mask threshold (left image)"
keyword = 'm_mod_left'
einheit = "Grauwerte"
uri= ontologynamespace+"MinimumFringeContrastLeftImage"
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
## Min. Modulationsschwelle (rechtes Bild) #### ACHTUNG HIER IST FALSCHER WERT !!!!!!!! IST GLEICH "NAH ZUM SENSOR"
#gom.app.projects['V14_022a.amp'].measurements['M1'].get ('m_mod_right')
# Nah zum Sensor
beschreibung = "Nah zum Sensor"
description = None
keyword = 'm_depth_min'
einheit = om+"millimetre"
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Name der Messung
beschreibung = "Name der Messung"
description = "Measurement name"
keyword = 'm_name'
einheit = None
uri= rdfs+"label"
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Punkt ID - do not work !!!
# beschreibung = "Punkt ID"
# keyword = 'r_id' , 0
# infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Punktekennzeichung (Messung) - do not work !!!
# beschreibung = "Punktekennzeichung (Messung)"
# keyword = 'r_state',0
# infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Punktekennzeichnung (Projekt) - do not work !!!
# beschreibung = "Punktekennzeichnung (Projekt)"
# keyword = 'r_pstate',0
# infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Referenzpunkt-Lichtquelle
beschreibung = "Referenzpunkt-Lichtquelle"
description = None
keyword = 'm_ref_point_source'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Referenzpunktbeleuchtung
beschreibung = "Referenzpunktbeleuchtung"
description = None
keyword = 'm_reference_light'
einheit = om+"seconds-Time"
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Sekunden seit der letzten Lichtfaktorkalibrierung
beschreibung = "Sekunden seit der letzten Lichtfaktorkalibrierung"
description = None
keyword = 'm_x2006_seconds_since_light_factor_calibration'
einheit = om+"seconds-Time"
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Sensorbewegung (linkes Bild)
beschreibung = "Sensorbewegung (linkes Bild)"
description = None
keyword = 'm_movement_left'
einheit = None
uri= None
measurementclass = None
application = "true"
measurements_check (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Sensorbewegung (rechtes Bild)
beschreibung = "Sensorbewegung (rechtes Bild)"
description = None
keyword = 'm_movement_right'
einheit = None
uri= None
measurementclass = None
application = "true"
measurements_check (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Sensorkennung
beschreibung = "Sensorkennung"
description = "Sensor identifier"
keyword = 'm_sensor_identifier'
einheit = None
uri= ontologynamespace+"serialNumber"
measurementclass="http://www.wikidata.org/entity/Q1198578"
application = "true"
measurements_sensor (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Sensormodell
beschreibung = "Sensormodell"
description = None
keyword = 'm_sensor'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Daten von einzelner Kamera?
beschreibung = "Status: Daten von einzelner Kamera?"
description = "State: data from one camera only?"
keyword = 'm_one_cam'
einheit = None
uri= ontologynamespace+"useTripleScanPoints"
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Daten von einzelner Kamera?
# ** INVERTIERT **
beschreibung = "Status: Triple-Scan-Punkte vermeiden?"
description = "State: Avoid Triple Scan points?"
keyword = "invert_m_one_cam"
einheit = None
uri= ontologynamespace+"avoidTripleScanPoints"
measurementclass = None
application = "true, inverted value from keyword= m_one_cam"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Kamera- und Sensorkennung gültig?
beschreibung = "Status: Kamera- und Sensorkennung gültig?"
description = None
keyword = 'm_camera_sensor_identifier_valid'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Lichtfaktorkalibrierung?
beschreibung = "Status: Lichtfaktorkalibrierung?"
description = None
keyword = 'm_x2006_light_factor_calibrated'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Messung transformiert?
beschreibung = "Status: Messung transformiert?"
description = None
keyword = 'm_transformed'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Projektorkalibrierung verwenden?
beschreibung = "Status: Projektorkalibrierung verwenden?"
description = None
keyword = 'm_proj_calib'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Punkt ist gemeinsamer Referenzpunk? - do not work !!!
# beschreibung = "Status: Punkt ist gemeinsamer Referenzpunk?"
# keyword = 'r_common', 0
# infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Punkte an Glanzstellen berechnen?
beschreibung = "Status: Punkte an Glanzstellen berechnen?"
description = "State: use shiny points?"
keyword = 'm_shiny_points'
einheit = None
uri=ontologynamespace+"usePointsOnShinyAreas"
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Punkte an Glanzstellen berechnen?
# ** INVERTIERT **
beschreibung = "Status: Punkte auf Glanzstellen vermeiden?"
description = "State: Avoid points on shiny surfaces?"
keyword = 'invert_m_shiny_points'
einheit = None
uri=ontologynamespace+"avoidPointsOnShinyAreas"
measurementclass = None
application = "true, inverted value from keyword= m_shiny_points"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Punkte bei starkten Grauwertunterschieden berechnen?
beschreibung = "Status: Punkte bei starkten Grauwertunterschieden berechnen?"
description = "Status: use points at strong color transitions?"
keyword = 'm_col_trans'
einheit = None
uri=ontologynamespace+"useStrongColorTransitionsPoints"
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Punkte bei starkten Grauwertunterschieden berechnen?
# ** INVERTIERT **
beschreibung = "Status: Punkte bei starken Helligkeitsunterschieden vermeiden?"
description = "State: Avoid points at strong brightness differences?"
keyword = 'invert_m_col_trans'
einheit = None
uri= ontologynamespace+"avoidStrongBrightnessDifferencePoints"
measurementclass = None
application = "true, inverted value from keyword= m_col_trans"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Tapelinienbeleuchtung verwendet?
beschreibung = "Status: Tapelinienbeleuchtung verwendet?"
description = None
keyword = 'm_use_gray_shutter'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: kleinste Modulationsmaske verwenden (nur ATOS III) verwendet?
beschreibung = "Status: kleinste Modulationsmaske verwenden (nur ATOS III) verwendet?"
description = None
keyword = 'm_smallest_mod_mask'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Statur: lange Dreiecke verwendet?
beschreibung = "Statur: lange Dreiecke verwendet?"
description = None
keyword = 'm_long_tri'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: selektiert? - do not work !!!
# beschreibung = "Status: selektiert?"
# keyword = 'selected',0
# infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Tiefenbeschänkung
beschreibung = "Tiefenbeschänkung"
description = None
keyword = 'm_depth'
einheit = om+"millimetre"
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Transformationsmatrix der Messung
beschreibung = "Transformationsmatrix der Messung"
description = None
keyword = 'm_trafo_matrix'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Transformationsmodus der Messung
beschreibung = "Transformationsmodus der Messung"
description = "Measurement transformation mode"
keyword = 'm_trafo_mode'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Transformationsmodus der Messung
# ** INVERTIERT **
beschreibung = "Transformationsmethode"
description = "Transformation method"
keyword = 'adapted_m_trafo_mode'
einheit = None
uri= ontologynamespace+"measurementTransformationMethod"
measurementclass = None
application = "true, adapted value from keyword= m_trafo_mode"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Verbleibende Lampenaufwärmzeit
beschreibung = "Verbleibende Lampenaufwärmzeit"
description = None
keyword = 'm_x2006_lamp_warmup_rem_sec'
einheit = om+"seconds-Time"
uri= None
measurementclass = None
application = "true"
measurements_check (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Vorschau-Raster
beschreibung = "Vorschau-Raster"
description = None
keyword = 'm_raster'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Weiter entfernt vom Sensor
beschreibung = "Weiter entfernt vom Sensor"
description = None
keyword = 'm_depth_max'
einheit = om+"millimetre"
uri= None
measurementclass = None
application = "true"
infos_measurements (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Anzahl der Kameras
beschreibung = "Anzahl der Kameras"
description = "Number of cameras"
keyword = 'm_cal_num_cameras'
einheit = None
uri = ontologynamespace+"numberOfCameras"
measurementclass = None
application = "true"
measurements_sensor (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Anzahl der Maßstäbe
beschreibung = "Anzahl der Maßstäbe"
description = "Number of scales"
keyword = 'm_cal_num_scales'
einheit = None
uri=ontologynamespace+"numberOfScales"
measurementclass = None
application = "true"
cal_measurements_calobject (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Aufnahmemodus
beschreibung = "Aufnahmemodus"
description = None
keyword = 'm_cal_snap_mode'
einheit = None
uri= None
measurementclass = None
application = "true"
cal_measurements_calsetup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Ausdehnungskoeffizient
beschreibung = "Ausdehnungskoeffizient"
description = None
keyword = 'm_cal_exp_coeff'
einheit = None
uri= None
measurementclass = None
application = "true"
cal_measurements_calobject (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Breite des Messvolumens
# für ATOS II anpassen
### HIER
if (gom.app.projects[p].measurements[m].get('m_sensor') == "atos 2") and (gom.app.projects[p].measurements[m].get('m_cal_sensor_width') == None):
beschreibung = "Länge des Messvolumens"
description = "Measurement volume length"
keyword = 'm_vol_width'
einheit = om+"millimetre"
uri=ontologynamespace+"CalibrationVolumeLength"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
else:
beschreibung = "Länge des Messvolumens"
description = "Measurement volume length"
keyword = 'm_cal_sensor_width'
einheit = om+"millimetre"
uri=ontologynamespace+"CalibrationVolumeDepth"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Datum der Kalibrierung
beschreibung = "Datum der Kalibrierung"
description = "Calibration date"
keyword = 'm_cal_time'
einheit = None
uri = ontologynamespace+"CalibrationDate"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Höhe des Messvolumens
if (gom.app.projects[p].measurements[m].get('m_sensor') == "atos 2") and (gom.app.projects[p].measurements[m].get('m_cal_sensor_width') == None):
beschreibung = "Höhe des Messvolumens"
description = "Measurement volume height"
keyword = 'm_vol_height'
einheit = om+"millimetre"
uri=ontologynamespace+"CalibrationVolumeWidth"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
else:
beschreibung = "Höhe des Messvolumens"
description = "Measurement volume height"
keyword = 'm_cal_sensor_height'
einheit = om+"millimetre"
uri=ontologynamespace+"CalibrationVolumeWidth"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Höhenänderung
beschreibung = "Höhenänderung"
description = "Height variance"
keyword = 'm_cal_var_height'
einheit = om+"millimetre"
uri= ontologynamespace+"HeightVariance"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Identifizierungspunkt-ID
beschreibung = "Identifizierungspunkt-ID"
description = None
keyword = 'm_cal_obj_id'
einheit = None
uri= None
measurementclass = None
application = "true"
cal_measurements_calobject (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kalibrierabweichung
beschreibung = "Kalibrierabweichung"
description = "Calibration deviation"
keyword = 'm_cal_residual'
einheit = om+"millimetre"
uri= ontologynamespace+"CalibrationDeviation"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kalibrierobjekt
beschreibung = "Kalibrierobjekt"
description = None
keyword = 'm_cal_obj_type'
einheit = None
uri= None
measurementclass = None
application = "true"
cal_measurements_calobject (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Lichtintensität
beschreibung = "Lichtintensität"
description = None
keyword = 'm_cal_light_intensity'
einheit = None
uri= None
measurementclass = None
application = "true"
cal_measurements_calsetup (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Max. Winkel
beschreibung = "Max. Winkel"
description = None
keyword = 'm_cal_max_angle'
einheit = om+"radian"
uri= None
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Maßstabsabweichung
beschreibung = "Maßstabsabweichung"
description = "scale deviation"
keyword = 'm_cal_scale_dev'
einheit = om+"millimetre"
uri=ontologynamespace+"ScaleDeviation"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Messtemperatur
beschreibung = "Messtemperatur"
description = "Measurement temperature"
keyword = 'm_cal_temp'
einheit = om+"degreeCelsius"
uri= ontologynamespace+"CalibrationTemperature"
measurementclass = None
application = "true"
cal_measurements_calobject (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Min. Winkel
beschreibung = "Min. Winkel"
description = None
keyword = 'm_cal_min_angle'
einheit = om+"radian"
uri= None
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Name
beschreibung = "Name"
description = "Name"
keyword = 'm_cal_obj_name'
einheit = None
uri= ontologynamespace+"calibrationObjectName"
measurementclass = None
application = "true"
cal_measurements_calobject (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Objektiv
beschreibung = "Objektiv"
description = "Camera lenses"
keyword = 'm_cal_lense'
einheit = om+"millimetre"
measurementclass="http://www.wikidata.org/entity/Q193540"
uri= ontologynamespace+"FocalLengthCamera"
application = "true"
measurements_sensor (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Retro-Kalibrierung
beschreibung = "Retro-Kalibrierung"
description = None
keyword = 'm_cal_retro'
einheit = None
uri= None
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Schnellkalibrierung
beschreibung = "Schnellkalibrierung"
description = None
keyword = 'm_cal_quick'
einheit = None
uri= ontologynamespace+"isQuickCalibrated"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Tiefe des Messvolumens
### Anpassen für ATOS II
if (gom.app.projects[p].measurements[m].get('m_sensor') == "atos 2") and (gom.app.projects[p].measurements[m].get('m_cal_sensor_width') == None):
beschreibung = "Tiefe des Messvolumens"
description = "Measurement volume depth"
keyword = 'm_vol_length'
einheit = om+"millimetre"
uri=ontologynamespace+"CalibrationVolumeDepth"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
else:
beschreibung = "Tiefe des Messvolumens"
description = "Measurement volume depth"
keyword = 'm_cal_sensor_depth'
einheit = om+"millimetre"
uri=ontologynamespace+"CalibrationVolumeLength"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Triangulationswinkel
beschreibung = "Triangulationswinkel"
description = "Camera angle"
keyword = 'm_cal_cam_angle'
einheit = om+"radian"
uri=ontologynamespace+"CameraAngle"
measurementclass = None
application = "true"
cal_measurements_calresults (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Zertifizierungstemperatur
beschreibung = "Zertifizierungstemperatur"
description = "Certification temperature"
keyword = 'm_cal_ref_temp'
einheit = om+"degreeCelsius"
uri=ontologynamespace+"ReferenceTemperature"
measurementclass = None
application = "true"
cal_measurements_calobject (beschreibung, description, keyword, einheit, uri, measurementclass, application)
#### additional_infos ####
#### INFOS die nicht mit dem Skript abgegriffen werden können ####
# image_width
def additional_infos_sensor (beschreibung, description, value, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
dir["value"] = value
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
dir["value_type"] = type(dir["value"]).__name__
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_measurement_sensor[keyword]= {}
dic_measurement_sensor[keyword]= dir
if not includeonlypropswithuri:
dic_measurement_sensor[keyword]= {}
dic_measurement_sensor[keyword]= dir
def additional_infos (beschreibung, description, value, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
dir["value"] = value
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if keyword == "acquisition_time":
dir["value_type"] = "dateTime"
else:
dir["value_type"] = type(dir["value"]).__name__
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_measurement_info[keyword]= {}
dic_measurement_info[keyword]= dir
if not includeonlypropswithuri:
dic_measurement_info[keyword]= {}
dic_measurement_info[keyword]= dir
def additional_info_setup (beschreibung, description, value, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
dir["value"] = value
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if keyword == "acquisition_time":
dir["value_type"] = "dateTime"
else:
dir["value_type"] = type(dir["value"]).__name__
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_measurement_setup[keyword]= {}
dic_measurement_setup[keyword]= dir
if not includeonlypropswithuri:
dic_measurement_setup[keyword]= {}
dic_measurement_setup[keyword]= dir
# Für die Bildbreite und den Sensortyp muss klar sein welcher Scanner verwendet wurde
# wenn sensortyp = atos II dann wird folgendes übertragen
if (gom.app.projects[p].measurements[m].get('m_sensor') == "atos 2") and (gom.app.projects[p].measurements[m].get('m_sensor_identifier') == None):
# sensor_type
beschreibung = "Sensortyp"
description = "Sensor type"
value = "ATOS II (first generation)"
keyword = "sensor_type"
einheit = None
uri = ontologynamespace+"sensorType"
measurementclass = None
application = "false"
additional_infos_sensor (beschreibung, description, value, keyword, einheit, uri, measurementclass, application)
# Bildbreite
beschreibung = "Bildbreite"
description = "Image width"
value = int("1280")
keyword = "image_width"
einheit = om+"pixel"
uri = exifnamespace+"imageWidth"
measurementclass = None
dic = dic_measurement_info
application = "false"
additional_info_setup (beschreibung, description, value, keyword, einheit, uri, measurementclass, application)
# Bildhöhe
beschreibung = "Bildhöhe"
description = "Image height"
value = int("1024")
keyword = "image_height"
einheit = om+"pixel"
uri = exifnamespace+"imageHeight"
measurementclass = None
dic = dic_measurement_info
application = "false"
additional_info_setup (beschreibung, description, value, keyword, einheit, uri, measurementclass, application)
# kalibriertes Messvolumen ist bei ATOS II leer, kann aber von dem vordefinierten übernommen werden
# wobei das vordefinierte erstmal neue uri bekommen muss....
beschreibung = "Bildhöhe"
description = "Image height"
value = int("1024")
keyword = "image_height"
einheit = om+"pixel"
uri = exifnamespace+"imageHeight"
measurementclass = None
dic = dic_measurement_info
application = "false"
additional_info_setup (beschreibung, description, value, keyword, einheit, uri, measurementclass, application)
# wenn seriennummer = "D08061" dann ist es ATOS III Rev. 01 und folgendes wird übertragen:
if gom.app.projects[p].measurements[m].get ('m_sensor_identifier') == "D08061":
# sensor_type
beschreibung = "Sensortyp"
description = "Sensor type"
value = "ATOS III Rev.01"
keyword = "sensor_type"
einheit = None
uri = ontologynamespace+"sensorType"
measurementclass = None
application = "false"
additional_infos_sensor (beschreibung, description, value, keyword, einheit, uri, measurementclass, application)
# Bildbreite
beschreibung = "Bildbreite"
description = "Image width"
value = int("2048")
keyword = "image_width"
einheit = om+"pixel"
uri = exifnamespace+"imageWidth"
measurementclass = None
dic = dic_measurement_info
application = "false"
additional_info_setup (beschreibung, description, value, keyword, einheit, uri, measurementclass, application)
# Bildhöhe
beschreibung = "Bildhöhe"
description = "Image height"
value = int("2048")
keyword = "image_height"
einheit = om+"pixel"
uri = exifnamespace+"imageHeight"
measurementclass = None
dic = dic_measurement_info
application = "false"
additional_info_setup (beschreibung, description, value, keyword, einheit, uri, measurementclass, application)
# Aufnahmezeitpunkt
m_file = gom.app.projects[p].get ('prj_directory') + "\\measurements\\" + gom.app.projects[p].measurements[m].get ('m_name') + ".atos"
capturetime = time.ctime(os.path.getmtime(m_file))
capturetime = time.strptime(capturetime, "%a %b %d %H:%M:%S %Y")
capturetime = (time.strftime("%Y-%m-%dT%H:%M:%S",capturetime))
beschreibung = "Aufnahmezeit"
description = "Acquisition time"
value = capturetime
keyword = "acquisition_time"
einheit = None
uri = ontologynamespace+'acquisitionTime'
measurementclass = None
dic = dic_measurement_info
application = "false"
additional_infos (beschreibung, description, value, keyword, einheit, uri, measurementclass, application)
######################## REFERENZPUNKTE #############################################################################################
# einzelne einzelne Referenzpunkte
list_refpoints = []
rp = 0
while rp < len(gom.app.projects[p].measurements[m].ref_points):
dic_refpoint = {}
def infos_refpoints (beschreibung, description, keyword, einheit, uri, measurementclass, application):
dir = {}
dir.clear()
dir["value"] = gom.app.projects[p].measurements[m].ref_points[rp].get(keyword)
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
dir["value_type"] = type(gom.app.projects[p].measurements[m].ref_points[rp].get(keyword)).__name__
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_refpoint[keyword]= {}
dic_refpoint[keyword]= dir
if not includeonlypropswithuri:
dic_refpoint[keyword]= {}
dic_refpoint[keyword]= dir
############ get values #############
# Punkt ID
beschreibung = "Punkt ID"
description = "Point ID"
keyword = 'r_id'
einheit = None
uri = ontologynamespace+'PointID'
measurementclass = None
application = "true"
infos_refpoints (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Punktekennzeichnung (Messung)
beschreibung = "Punktekennzeichnung (Messung)"
description = "Point flags (measurement)"
keyword = 'r_state'
einheit = None
uri= ontologynamespace+'PointMeasurementState'
measurementclass = None
application = "true"
infos_refpoints (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Punktekennzeichnung (Project)
beschreibung = "Punktekennzeichnung (Project)"
description = "Point flags (project)"
keyword = 'r_pstate'
einheit = None
uri= ontologynamespace+'PointProjectState'
measurementclass = None
application = "true"
infos_refpoints (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Punkt ist gemeinsamer Referenzpunkt?
beschreibung = "Status: Punkt ist gemeinsamer Referenzpunkt?"
description = "State: point is common ref.point?"
keyword = 'r_common'
einheit = None
uri= ontologynamespace+'commonReferencepoint'
measurementclass = None
application = "true"
infos_refpoints (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: selektiert?
beschreibung = "Status: selektiert?"
description = "State: selected?"
keyword = 'selected'
einheit = None
uri= None
measurementclass = None
application = "true"
infos_refpoints (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# x-coordinate
beschreibung = "x-Koordinate"
description = "x-coordinate"
keyword = 'r_x'
einheit = om+"millimetre"
uri= ontologynamespace+'xCoordinate'
measurementclass = None
application = "true"
infos_refpoints (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# y-coordinate
beschreibung = "y-Koordinate"
description = "y-coordinate"
keyword = 'r_y'
einheit = om+"millimetre"
uri= ontologynamespace+'yCoordinate'
measurementclass = None
application = "true"
infos_refpoints (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# z-coorinate
beschreibung = "z-Koordinate"
description = "z-coordinate"
keyword = 'r_z'
einheit = om+"millimetre"
uri= ontologynamespace+'zCoordinate'
measurementclass = None
application = "true"
infos_refpoints (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Point deviation
beschreibung = "Punktabweichung"
description = "Point deviation"
keyword = 'r_dev'
einheit = om+"millimetre"
uri= ontologynamespace+'PointMeasurementDeviation'
measurementclass = None
application = "true"
infos_refpoints (beschreibung, description, keyword, einheit, uri, measurementclass, application)
if len(dic_refpoint) > 0:
list_refpoints.append(dic_refpoint)
rp = rp + 1
if len(dic_measurement_cal_calobject) > 0:
dic_measurement_cal["cal_object"] = dic_measurement_cal_calobject
if len(dic_measurement_cal_calsetup) > 0:
dic_measurement_cal["cal_setup"] = dic_measurement_cal_calsetup
if len(dic_measurement_cal_calresults) > 0:
dic_measurement_cal["cal_properties"] = dic_measurement_cal_calresults
### theroretischer Messpunktabstand
if 'sensor_type' in dic_measurement_sensor:
st = (dic_measurement_sensor['sensor_type']['value'])
## "ATOS III Rev.01"
if st == "ATOS III Rev.01":
value = None
mv_length = gom.app.projects[p].measurements[m].get('m_vol_length')
# MV 30
if 27 < mv_length < 33:
value = 0.02
# MV 65
elif 58 < mv_length < 72 :
value = 0.03
# MV 100
elif 90 < mv_length < 110:
value = 0.05
# MV 150
elif 135 < mv_length < 165:
value = 0.07
# MV 300
elif 270 < mv_length < 330:
value = 0.15
# MV 500
elif 450 < mv_length < 550:
value = 0.25
# MV 1000
elif 900 < mv_length < 1100:
value = 0.50
# MV 1500
elif 1350 < mv_length < 1650:
value = 0.75
# MV 2000
elif 1800 < mv_length < 2200:
value = 1.00
# Länge Messvolumen
value = value
beschreibung = 'Theoretischer Messpunktabstand'
description = 'theoretical measuring point distance'
keyword = 'theoretical_measuring_point_distance'
einheit = om+"millimetre"
uri=ontologynamespace+'TheoreticalMeasuringPointDistance'
measurementclass = None
application = 'derived from the used measuring volume'
measurements_sensor (beschreibung, description, keyword, einheit, uri, measurementclass, application, value)
elif st == "ATOS II (first generation)":
value = None
# hier muss nach width gefragt werden, ansonsten gleich
mv_length = gom.app.projects[p].measurements[m].get('m_vol_width')
# MV 100
if 90 < mv_length < 110:
value = 0.07
# MV 35
elif 31 < mv_length < 39:
value = 0.03
# Länge Messvolumen
value = value
beschreibung = 'Theoretischer Messpunktabstand'
description = 'theoretical measuring point distance'
keyword = 'theoretical_measuring_point_distance'
einheit = om+"millimetre"
uri=ontologynamespace+'TheoreticalMeasuringPointDistance'
measurementclass = None
application = 'derived from the used measuring volume'
measurements_sensor (beschreibung, description, keyword, einheit, uri, measurementclass, application, value)
dic_sensor["calibration"] = dic_measurement_cal
dic_sensor["capturing_device"] = dic_measurement_sensor
########### calibratino NEW ##############
# temporäre Vergleichsliste mit allen Kalibrierzeiten anlegen und wenn noch nicht in Liste "temp_list_cal_time", dann rein einen "sensor" anlegen
if "m_cal_time" in dic_measurement_cal_calresults:
cal_time_new = dic_measurement_cal_calresults["m_cal_time"]["value"]
else:
cal_time_new = None
# schauen ob aktuelle Kalibrierungszeit schon in der Liste ist
# wenn ja, dann die sensor_id abgreifen und der aktuellen Messung hinzufügen
if cal_time_new in temp_list_cal_time:
for s in list_sensors:
if "calibration" in s:
if "cal_properties" in s["calibration"]:
if "m_cal_time" in s["calibration"]["cal_properties"]:
if "value" in s["calibration"]["cal_properties"]["m_cal_time"]:
cal_time_store = s["calibration"]["cal_properties"]["m_cal_time"]["value"] # Zeiten die schon in temp. liste gespeichert sind
if cal_time_store == cal_time_new: # wenn gleich, dann ...
dic_measurement_info["sensor_id"] = s["capturing_device"]["sensor_id"] #sensor_id abgreifen und der aktuellen Messung hinzufügen
# wenn Kalibrierungszeit noch in in Liste, dann
# neue sensor_id erzeugen und zu den sensor-informationen / kalibrierungen hinzufügen
if not cal_time_new in temp_list_cal_time:
#print ("noch nicht drin")
temp_list_cal_time.append(cal_time_new)
# sensor_id zu dic_sensor hinzufügen
dic_s = {}
dic_s["value"] = sensor_id
dic_s["key_deu"] = "Sensor ID"
dic_s["from_application"] = "false"
dic_s["key_eng"] = "sensor ID"
dic_s["uri"]= ontologynamespace+'sensor_id'
dic_s["value_type"] = type(dic_s["value"]).__name__
dic_sensor["capturing_device"]["sensor_id"] = dic_s
# sensor_id zu Messung hinzufügen
dic_measurement_info["sensor_id"] = dic_s
# sensor_id hochzählen
sensor_id =+1
# alles zu "sensor_lise" hinzufügen
list_sensors.append(dic_sensor)
#### Merge Dictonaries #####
if len(dic_measurement_setup) > 0:
dic_measurement["measurement_setup"] = dic_measurement_setup
if len(dic_measurement_check) > 0:
dic_measurement["measurement_check"] = dic_measurement_check
if len(dic_measurement_info) > 0:
dic_measurement["measurement_properties"] = dic_measurement_info
if len(list_refpoints) > 0:
dic_measurement["referencepoints"] = list_refpoints
if len(dic_measurement) > 0:
list_measurements.append(dic_measurement)
m = m + 1
if len(dic_prj_info) > 0:
dic_project["measurement_series_information"] = dic_prj_info
if len(list_measurements) > 0:
dic_project["measurements"] = list_measurements
dic_project["sensors"] = list_sensors
dic_gp2 = {}
if len(list_grp) > 0:
dic_gp2["referencepoints"] = list_grp
if len(dic_gp2) > 0:
dic_project["global_referencepoints"] = dic_gp2
if len(dic_project) > 0:
list_projects.append(dic_project)
p = p + 1
###################### MESHES ######################
list_meshes = []
me = 0
while me < len( gom.app.meshes):
dic_mesh = {}
dic_mesh_info = {}
list_mesh_processing = []
dic_mesh_processing = {}
str_mesh_processing_poly = None
dic_mesh_processing_poly = {}
dic_mesh_processing_poly_setup = {}
list_mesh_processing_poly_post = []
dic_mesh_processing_poly_post = {}
def infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application, value=None):
dir = {}
dir.clear()
if keyword == "comment":
c = gom.app.meshes[me].get(keyword)
dir["value"] = c.replace("\n",", ")
elif value == None:
dir["value"] = gom.app.meshes[me].get(keyword)
else:
dir["value"] = value
dir["key_deu"] = beschreibung
if description!=None:
dir["key_eng"] = description
if uri!=None:
dir["uri"] = uri
if measurementclass!=None:
dir["measurementclass"] = measurementclass
if value == None:
dir["value_type"] = type(gom.app.meshes[me].get(keyword)).__name__
else:
dir["value_type"] = type(value).__name__
if einheit!=None:
dir["unit"] = einheit
if application!=None:
dir["from_application"] = application
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_mesh_info[keyword]= {}
dic_mesh_info[keyword]= dir
if not includeonlypropswithuri:
dic_mesh_info[keyword]= {}
dic_mesh_info[keyword]= dir
###############################################
# Anzahl der Dreiecke
beschreibung = "Anzahl der Dreiecke"
description = "Number of triangles"
keyword = "num_triangles"
einheit = None
uri= giganamespace+"TotalNumberOfFaces"
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Anzahl der Punkte
beschreibung = "Anzahl der Punkte"
description = "Number of points"
keyword = "num_points"
einheit = None
uri=giganamespace+"TotalNumberOfVertices"
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Belichtungszeiteinheit
beschreibung = "Belichtungszeiteinheit"
description = None
keyword = "u_shutter"
einheit = om+"seconds-Time"
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Fläche
beschreibung = "Fläche"
description = "Area"
keyword = "area"
einheit = om+"squareMillimetre"
uri=giganamespace+"TotalArea"
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Fläche (selektiert)
beschreibung = "Fläche (selektiert)"
description = None
keyword = "selected_area"
einheit = om+"squareMillimetre"
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Flächeneinheit
beschreibung = "Flächeneinheit"
description = None
keyword = "u_area"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Inaktiv-Grund
beschreibung = "Inaktiv-Grund"
description = None
keyword = "inact_reason"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Kommentar
beschreibung = "Kommentar"
description = "comment"
keyword = "comment"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Längeneinheit
beschreibung = "Längeneinheit"
description = None
keyword = "u"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Materialstärke
beschreibung = "Materialstärke"
description = None
keyword = "off"
einheit = om+"millimetre"
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Max. X-Grenze
beschreibung = "Max. X-Grenze"
description = "Max. X limit"
keyword = "boundb_maxx"
einheit = om+"millimetre"
uri= giganamespace+"MaximumXCoordinate"
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Max. Y-Grenze
beschreibung = "Max. Y-Grenze"
description = "Max. Y limit"
keyword = "boundb_maxy"
einheit = om+"millimetre"
uri= giganamespace+"MaximumYCoordinate"
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Max. Z-Grenze
beschreibung = "Max. Z-Grenze"
description = "Max. Z limit"
keyword = "boundb_maxz"
einheit = om+"millimetre"
uri= giganamespace+"MaximumZCoordinate"
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Min. X-Grenze
beschreibung = "Min. X-Grenze"
description = "Min. X limit"
keyword = "boundb_minx"
einheit = om+"millimetre"
uri= giganamespace+"MinimumXCoordinate"
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Min. Y-Grenze
beschreibung = "Min. Y-Grenze"
description = "Min. Y limit"
keyword = "boundb_miny"
einheit = om+"millimetre"
uri= giganamespace+"MinimumYCoordinate"
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Min. Z-Grenze
beschreibung = "Min. Z-Grenze"
description = "Min. Z limit"
keyword = "boundb_minz"
einheit = om+"millimetre"
uri= giganamespace+"MinimumZCoordinate"
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Name
beschreibung = "Name"
description = "Name"
keyword = "n"
einheit = None
uri= rdfs+"label"
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Punktindex
# gom.app.meshes["V14_022a_1_1"].get ("id", 0)
# Reporteinheit
beschreibung = "Reporteinheit"
description = None
keyword = "u_figure"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Gegenseite messen?
beschreibung = "Status: Gegenseite messen?"
description = None
keyword = "uoff"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: Referenz?
beschreibung = "Status: Referenz?"
description = None
keyword = "is_ref"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: aktiv?
beschreibung = "Status: aktiv?"
description = None
keyword = "is_active"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: inaktiv?
beschreibung = "Status: inaktiv?"
description = None
keyword = "is_inact"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Status: selektiert?
beschreibung = "Status: selektiert?"
description = None
keyword = "selected"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Temperatureinheit
beschreibung = "Temperatureinheit"
description = None
keyword = "u_temp"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Typ
beschreibung = "Typ"
description = None
keyword = "type"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Verzeichnispfad
beschreibung = "Verzeichnispfad"
description = None
keyword = "n_path"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Volumen
beschreibung = "Volumen"
description = "volume"
keyword = "volume"
einheit = om+"cubicMillimetre"
uri=ontologynamespace+"volume"
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Volumeneinheit
beschreibung = "Volumeneinheit"
description = None
keyword = "u_volume"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Winkeleinheit
beschreibung = "Winkeleinheit"
description = None
keyword = "u_angle"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
# Zeiteinheit
beschreibung = "Zeiteinheit"
description = None
keyword = "u_time"
einheit = None
uri= None
measurementclass = None
application = "true"
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application)
##### BERECHNUNGEN #####
# berechneter Wert
# durchschnittliche 3D-Punkt-Auflösung pro mm²
beschreibung= "durchschnittliche Auflösung"
description= "average resolution"
keyword= "average_resolution"
einheit = "1/mm²"
uri=None
measurementclass=None
application= "script-based calculation"
anz_punkte_netz_ar = gom.app.meshes[me].get ("num_points")
flaeche_netz_ar = gom.app.meshes[me].get ("area")
value = anz_punkte_netz_ar / flaeche_netz_ar
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application, value)
# berechneter Wert
# durchschnittlicher Punktabstand
beschreibung= "durchschnittlicher Punktabstand"
description= "average point distance"
keyword= "average_point_distance_mist"
einheit= om+"millimetre"
uri=ontologynamespace +"AveragePointDistance"
measurementclass=None
application= "script-based calculation"
anz_punkte_netz= gom.app.meshes[me].get ("num_points")
flaeche_netz= gom.app.meshes[me].get ("area")
value= 1/math.sqrt(anz_punkte_netz / flaeche_netz)
infos_meshes (beschreibung, description, keyword, einheit, uri, measurementclass, application, value)
#### POLYGONISATION ####
#### Metadaten aus dem Kommentar ####
c = gom.app.meshes[me].get ("comment")
for poly in c.split("\n"):
#print ("-")
#print (poly)
liste = poly.split("=")
#print (liste)
if len(liste) == 2:
if (liste[0].replace(" ","")) == "poly_raster":
dir = {}
dir.clear()
dir["value"] = "polygonisation"
dir["value_type"] = type(dir["value"]).__name__
dic_mesh_processing["processname"] = dir
# poly_raster
keyword = liste[0].replace(" ","")
dir = {}
dir.clear()
dir["value"] = liste[1].replace(" ","")
dir["key_deu"] = "Polygonisierungsraster"
dir["key_eng"] = "polygonisation raster"
dir["value_type"] = type(dir["value"]).__name__
dir["uri"] = ontologynamespace+"polyRaster"
dir["from_application"] = "True, part value from keyword=’comment’"
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_mesh_processing_poly_setup[keyword]= {}
dic_mesh_processing_poly_setup[keyword]= dir
if not includeonlypropswithuri:
dic_mesh_processing_poly_setup[keyword]= {}
dic_mesh_processing_poly_setup[keyword]= dir
if dir["value"] != "1:1":
#smooth
dic_mesh_processing_poly_post_s = {}
# prozess
dir = {}
dir.clear()
dir["value"] = "smooth"
dir["value_type"] = type(dir["value"]).__name__
dic_mesh_processing_poly_post_s["processname"] = {}
dic_mesh_processing_poly_post_s["processname"] = dir
# automatic
dir = {}
dir.clear()
dir["value"] = True
dir["value_type"] = type(dir["value"]).__name__
dir["from_application"] = "False"
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_mesh_processing_poly_post_s["automatic"]= {}
dic_mesh_processing_poly_post_s["automatic"]= dir
if not includeonlypropswithuri:
dic_mesh_processing_poly_post_s["automatic"]= {}
dic_mesh_processing_poly_post_s["automatic"]= dir
if len(dic_mesh_processing_poly_post_s) > 0:
list_mesh_processing_poly_post.append(dic_mesh_processing_poly_post_s)
#thin
dic_mesh_processing_poly_post_t = {}
# prozess
dir = {}
dir.clear()
dir["value"] = "thin"
dir["value_type"] = type(dir["value"]).__name__
dic_mesh_processing_poly_post_t["processname"] = {}
dic_mesh_processing_poly_post_t["processname"] = dir
# automatic
dir = {}
dir.clear()
dir["value"] = True
dir["value_type"] = type(dir["value"]).__name__
dir["from_application"] = "False"
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_mesh_processing_poly_post_t["automatic"]= {}
dic_mesh_processing_poly_post_t["automatic"]= dir
if not includeonlypropswithuri:
dic_mesh_processing_poly_post_t["automatic"]= {}
dic_mesh_processing_poly_post_t["automatic"]= dir
if len(dic_mesh_processing_poly_post_t) > 0:
list_mesh_processing_poly_post.append(dic_mesh_processing_poly_post_t)
if len(list_mesh_processing_poly_post) > 0:
dic_mesh_processing["postprocessing"] = list_mesh_processing_poly_post
elif (liste[0].replace(" ","")) == "ref_points":
dic_mesh_processing_poly_post_r = {}
# prozess
dir = {}
dir.clear()
dir["value"] = "reference points"
dir["value_type"] = type(dir["value"]).__name__
dic_mesh_processing_poly_post_r["processname"] = {}
dic_mesh_processing_poly_post_r["processname"] = dir
# automatic
dir = {}
dir.clear()
dir["value"] = True
dir["value_type"] = type(dir["value"]).__name__
dir["from_application"] = "False"
dic_mesh_processing_poly_post_r["automatic"] = {}
dic_mesh_processing_poly_post_r["automatic"] = dir
# ref_points
keyword = liste[0].replace(" ","")
dir = {}
dir.clear()
dir["value"] = liste[1].replace(" ","")
dir["key_deu"] = "automatische Bearbeitung von Referenzpunkten"
dir["key_eng"] = "automatic postprocessing of referenzpoints"
dir["value_type"] = type(liste[1].replace(" ","")).__name__
dir["from_application"] = "True, part value from keyword=’comment’"
dir["uri"] = ontologynamespace+"refpoints"
if dir["value"] != None:
if len(str(dir["value"])) != 0:
if includeonlypropswithuri and "uri" in dir:
dic_mesh_processing_poly_post_r[keyword]= {}
dic_mesh_processing_poly_post_r[keyword]= dir
if not includeonlypropswithuri:
dic_mesh_processing_poly_post_r[keyword]= {}
dic_mesh_processing_poly_post_r[keyword]= dir
if len(dic_mesh_processing_poly_post_r) > 0:
list_mesh_processing_poly_post.append(dic_mesh_processing_poly_post_r)
if len(dic_mesh_processing_poly_setup) > 0:
dic_mesh_processing["setup"] = dic_mesh_processing_poly_setup
if len(dic_mesh_processing) > 0:
list_mesh_processing.append(dic_mesh_processing)
if len(dic_mesh_info) > 0:
dic_mesh["mesh_information"] = dic_mesh_info
if len(dic_mesh_processing) > 0:
dic_mesh["processing"] = list_mesh_processing
if len(dic_mesh) > 0:
list_meshes.append(dic_mesh)
me = me + 1
if len(list_meshes) > 0:
dic_dig["meshes"] = list_meshes
if len(dic_dig_app) > 0:
list_app.append(dic_dig_app)
if len(script_version()) > 0:
list_app.append(script_version())
if len(list_app) > 0:
dic_dig["applications"] = list_app
if len(dic_dig_project) > 0:
dic_dig["project_information"] = dic_dig_project
if len(list_projects) > 0:
dic_dig["measurement_series"] = list_projects
if len(dic_dig) > 0:
list_prj.append(dic_dig)
prj = prj + 1
if len(list_prj) > 0:
dic_prj["projects"]=list_prj
######################## PROJECTS END ###############################
############### EXPORTS #################
## output files
newfiles = (gom.app.get ("ACTUAL_SESSION_FILE")).split(".")[0]
if len(newfiles) == 0:
newfiles = gom.app.projects[0].get ("prj_directory") + "/" + (gom.app.projects[0].get ("prj_n")).split(".")[0]
out_file_txt = newfiles + ".txt"
out_file_json = newfiles + ".json"
out_file_ttl = newfiles + ".ttl"
#### json ####
out_json = open(out_file_json , "w")
#out_json.write(str(dic_prj).replace(""","").replace(""",""").replace(""\\"",""\\\\"").decode("string_escape"))
out_json.close()
#### dictionary in textfile ####
out_txt = open(out_file_txt , "w")
out_txt.write(str(dic_prj).replace("True","true").replace("False","false").decode("string_escape"))
out_txt.close()
######## ttl ########
ttlstring=set()
fertiges_ttl = exportToTTL(dic_prj, None, ttlstring)
text_file = open(out_file_ttl, "w")
text_file.write(ttlstringhead)
for item in fertiges_ttl:
text_file.write("%s" % item)
text_file.close()
#######################
######
print (atos_file)
print ("fertsch :-) :-)")
|
python
|
# Generated by Django 2.2.2 on 2019-09-19 12:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('dcodex', '0001_initial'),
('dcodex_bible', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DayOfYear',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day_of_week', models.IntegerField(choices=[(0, 'Sunday'), (1, 'Monday'), (2, 'Tuesday'), (3, 'Wednesday'), (4, 'Thursday'), (5, 'Friday'), (6, 'Saturday')])),
('period', models.CharField(choices=[('E', 'Easter'), ('P', 'Pentecost'), ('F', 'Feast of the Cross'), ('L', 'Lent'), ('G', 'Great Week')], max_length=1)),
('week', models.CharField(max_length=15)),
('weekday_number', models.CharField(max_length=32)),
('earliest_date', models.CharField(max_length=15)),
('latest_date', models.CharField(max_length=15)),
],
options={
'verbose_name_plural': 'Days of year',
},
),
migrations.CreateModel(
name='Lection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='LectionarySystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='LectionInSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_on_day', models.IntegerField(default=0)),
('day_of_year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dcodex_lectionary.DayOfYear')),
('lection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dcodex_lectionary.Lection')),
('system', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dcodex_lectionary.LectionarySystem')),
],
options={
'ordering': ['day_of_year', 'order_on_day'],
},
),
migrations.CreateModel(
name='LectionaryVerse',
fields=[
('verse_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dcodex.Verse')),
('unique_string', models.CharField(default='', max_length=20)),
('bible_verse', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dcodex_bible.BibleVerse')),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
bases=('dcodex.verse',),
),
migrations.AddField(
model_name='lectionarysystem',
name='lections',
field=models.ManyToManyField(through='dcodex_lectionary.LectionInSystem', to='dcodex_lectionary.Lection'),
),
migrations.CreateModel(
name='Lectionary',
fields=[
('manuscript_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='dcodex.Manuscript')),
('system', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dcodex_lectionary.LectionarySystem')),
],
options={
'verbose_name_plural': 'Lectionaries',
},
bases=('dcodex.manuscript',),
),
migrations.AddField(
model_name='lection',
name='verses',
field=models.ManyToManyField(to='dcodex_lectionary.LectionaryVerse'),
),
]
|
python
|
for i in range(0, 10, 2):
print(f"i is now {i}")
|
python
|
# -- coding: utf-8 --
# Copyright 2019 FairwindsOps Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import logging
import semver
import sys
import os
from typing import List
from .hooks import Hook
from .config import Config
from .repository import Repository
from .chart import Chart, ChartResult
from .secrets import Secret
from .helm.client import get_helm_client
from .yaml.handler import Handler as yaml_handler
from .meta import __version__ as reckoner_version
from .exception import MinimumVersionException, ReckonerCommandException, NoChartsToInstall, ReckonerException
from io import BufferedReader
class Course(object):
"""
Description:
- Top level class for the attributes of the course.yml file
- Parses yaml file into various Reckoner classes
Arguments:
- file (File)
Attributes:
- config: Instance of Config()
- helm: Instance of HelmClient()
- charts: List of Chart() instances
- repositories: List of Repository() instances
"""
def __init__(self, course_file: BufferedReader):
"""
Parse course.yml contents into instances.
"""
self.config = Config()
try:
self._dict = yaml_handler.load(course_file)
except Exception as err:
raise ReckonerException("Error loading the course file: {}".format(err))
try:
self.helm = get_helm_client(helm_arguments=self.config.helm_args)
except Exception as e:
raise ReckonerException("Helm Client Failed to initialize: {}".format(e))
self._repositories = []
self._charts = []
for name, repository in self._dict.get('repositories', {}).items():
repository['name'] = name
self._repositories.append(Repository(repository, self.helm))
# Send entire dictionary of the secret as kwargs
self._secrets = []
for secret in self._dict.get('secrets', []):
self._secrets.append(Secret(**secret))
for name, chart in self._dict.get('charts', {}).items():
self._set_chart_repository(chart)
self._charts.append(Chart({name: chart}, self.helm))
# Parsing and prepping hooks from course.yml
self._hooks = self._dict.get('hooks', {})
self._pre_install_hook = Hook(
self.hooks.get(
'pre_install',
[]
),
'Course pre install',
self.config.course_base_directory
)
self._post_install_hook = Hook(
self.hooks.get(
'post_install',
[]
),
'Course post install',
self.config.course_base_directory
)
self._init_hook = Hook(
self.hooks.get(
'init',
[]
),
'Course Init',
self.config.course_base_directory
)
# Run Init hook before we do anything other than parse the course
self.init_hook.run()
if self.config.update_repos:
for repo in self._repositories:
# Skip install of repo if it is git based since it will be installed from the chart class
if repo.git is None:
logging.debug("Installing repository: {}".format(repo))
repo.install(chart_name=repo._repository['name'], version=repo._repository.get('version'))
else:
logging.debug("Skipping git install of repository to later be installed at the chart level: {}".format(repo))
self.helm.repo_update()
try:
self._compare_required_versions()
except MinimumVersionException as e:
logging.error(e)
sys.exit(1)
# HACK: This logic is here to try to replace a named reference to a helm repo defined in the main
# course repositories. This is because we want to pass the git repo and some
# other data to the chart (since the chart also tries to install the repo)
#
# NOTE: The real fix here would be to unify the way repositories are installed and managed instead
# running the install() function twice from the course.yml and from the charts.yml.
#
# IF: chart has a repository definition that is a string reference, then find that
# reference (if exists) from the main repositories for the course and replace the
# string definition with the repositories setting.
def _set_chart_repository(self, chart: dict):
"""_set_chart_repository will convert the string reference of a
repository into the dictionary configuration of that repository
or, if None, or if the string isn't in the repositories section,
it will leave it alone."""
if isinstance(chart.get('repository', None), str) and chart['repository'] in [x.name for x in self.repositories]:
logging.debug('Found a reference to a repository installed via repositories section of course, replacing reference.')
chart['repository'] = self._dict['repositories'][chart['repository']]
def __str__(self):
return str(self._dict)
@property
def repositories(self):
""" Course repositories """
return self._repositories
@property
def secrets(self):
""" Secrets Defined in the Chart """
return self._secrets
@property
def namespace_management(self):
""" The default namespace manager block from the course if it exists
Otherwise, returns {} """
_namespace_management = self._dict.get('namespace_management')
if _namespace_management is None:
return {}
else:
return _namespace_management.get('default', {})
def __getattr__(self, key):
return self._dict.get(key)
@property
def hooks(self):
return self._hooks
@property
def charts(self):
""" List of Chart() instances """
return self._charts
@property
def pre_install_hook(self):
return self._pre_install_hook
@property
def post_install_hook(self):
return self._post_install_hook
@property
def init_hook(self):
return self._init_hook
def merge_secrets_into_environment(self) -> None:
"""
Accepts no Argument
Returns None
Loops over list of secrets and merges the name:values into the environment
Throws ReckonerException if there is an existing Environment of the same name
"""
for secret in self.secrets:
if secret.name in os.environ.keys():
raise ReckonerException(
f"Found Secret {secret.name} with the same name as existing environment variable. "
"Secrets may not have the same name as and existing environment variable"
)
try:
os.environ[secret.name] = secret.value
except Exception as e:
logging.error(f"Error retrieving value of secret {secret.name}")
logging.debug(traceback.format_exc())
raise e
def __run_command_for_charts_list(self, command: str, charts: list) -> List[ChartResult]:
results = []
self.merge_secrets_into_environment()
for chart in charts:
namespace = chart.namespace or self.namespace
logging.info(f"Running '{command}' on {chart.release_name} in {namespace}")
try:
getattr(chart, command)(
default_namespace=self.namespace,
default_namespace_management=self.namespace_management,
context=self.context
)
except (Exception, ReckonerCommandException) as e:
logging.debug(traceback.format_exc())
if type(e) == ReckonerCommandException:
logging.error(e.stderr)
if type(e) == Exception:
logging.error(e)
logging.error(f'ERROR: {command} Failed on {chart.release_name}')
if not self.config.continue_on_error:
logging.error(str(e))
raise ReckonerCommandException(
f"Stopping '{command}' for chart due to an error!"
" Some of your requested actions may not have been"
" completed!") from None
finally:
# Always grab any results in the chart results
results.append(chart.result)
return results
def install_charts(self, charts_to_install: list) -> List[ChartResult]:
"""
For a list of charts_to_install, run the `install` method on each chart instance.
Accepts list of `Chart()`
Returns list of `ChartResult()`
"""
return self.__run_command_for_charts_list('install', charts_to_install)
def update_charts(self, charts_to_update: list) -> List[ChartResult]:
"""
For a list of charts_to_update, run the `update` method on each chart instance.
Accepts list of `Chart()`
Returns list of `ChartResult()`
"""
return self.__run_command_for_charts_list('update', charts_to_update)
def template_charts(self, charts_to_template: list) -> List[ChartResult]:
"""
For a list of charts_to_install, run the `template` method on each chart instance
Accepts list of `Chart()`
Returns list of `ChartResult()`
"""
return self.__run_command_for_charts_list('template', charts_to_template)
def get_chart_manifests(self, charts_to_manifest: list) -> List[ChartResult]:
"""
For a list of charts_to_install, run the `get_manifest` method on each chart instance
Accepts list of `Chart()`
Returns list of `ChartResult()`
"""
return self.__run_command_for_charts_list('get_manifest', charts_to_manifest)
def diff_charts(self, charts_to_diff: list) -> List[ChartResult]:
"""
For a list of charts_to_install, run the `get_manifest` method on each chart instance
Accepts list of `Chart()`
Returns list of `ChartResult()`
"""
return self.__run_command_for_charts_list('diff', charts_to_diff)
def only_charts(self, charts_requested: list) -> List[str]:
"""
Accepts the list of requested charts, compares that to the course
return the intersection. Will log if chart is requested but not
present in the chart
"""
self._only_charts = []
# NOTE: Unexpected feature here: Since we're iterating on all charts
# in the course to find the ones the user has requested, a
# byproduct is that the --only's will always be run in the order
# defined in the course.yml. No matter the order added to via
# command line arguments.
for chart in self.charts:
if chart.release_name in charts_requested:
self._only_charts.append(chart)
charts_requested.remove(chart.release_name)
else:
logging.debug(
'Skipping {} in course.yml, not found '
'in your requested charts list'.format(chart.release_name)
)
# If any items remain in charts requested - warn that we didn't find them
self._warn_about_missing_requested_charts(charts_requested)
if len(self._only_charts) == 0:
raise NoChartsToInstall(
'None of the charts you requested ({}) could be '
'found in the course list. Verify you are using the '
'release-name and not the chart name.'.format(', '.join(charts_requested))
)
return self._only_charts
def template(self, charts_requested_to_template: list) -> List[str]:
"""
Accepts charts_requested_to_template, an iterable of the names of the charts
to template. This method compares the charts in the argument to the
charts in the course and calls Chart.template()
"""
# return the text of the charts templating
results = self.template_charts(self.only_charts(charts_requested_to_template))
return results
def get_manifests(self, charts_manifests_requested: list) -> List[str]:
"""
Accepts charts_manifests_requested, an iterable of the names of the charts
to get manifests for. This method compares the charts in the argument to the
charts in the course and calls Chart.get_manifest()
"""
# return the text of the charts templating
results = self.get_chart_manifests(self.only_charts(charts_manifests_requested))
return results
def diff(self, chart_diffs_requested: list) -> List[str]:
"""
Accepts chart_diffs_requested, an iterable of the names of the charts
to get manifests for. This method compares the charts in the argument to the
charts in the course and calls Chart.diff()
"""
# return the text of the charts templating
results = self.diff_charts(self.only_charts(chart_diffs_requested))
return results
def plot(self, charts_requested_to_install: list) -> List[ChartResult]:
"""
Accepts charts_to_install, an iterable of the names of the charts
to install. This method compares the charts in the argument to the
charts in the course and calls Chart.install()
"""
# return the results of the charts installation, exit on error to prevent post install hook run
self.pre_install_hook.run()
results = self.install_charts(self.only_charts(charts_requested_to_install))
for chart in results:
if chart.failed == True and not self.config.continue_on_error:
logging.error("Not running Course post_install hook due to a chart install error!")
return results
self.post_install_hook.run()
return results
def update(self, charts_requested_to_update: list) -> List[ChartResult]:
"""
Accepts charts_to_update, an iterable of the names of the charts
to update. This method compares the charts in the argument to the
charts in the course and calls Chart.update()
"""
# return the results of the charts update, exit on error to prevent post install hook run
self.pre_install_hook.run()
results = self.update_charts(self.only_charts(charts_requested_to_update))
for chart in results:
if chart.failed == True and not self.config.continue_on_error:
logging.error("Not running Course post_install hook due to a chart install error!")
return results
self.post_install_hook.run()
return results
def _warn_about_missing_requested_charts(self, charts_which_were_not_found):
if charts_which_were_not_found:
for missing_chart in charts_which_were_not_found:
logging.warning(
'Could not find {} in course.yml'.format(missing_chart)
)
logging.warning('Some of the requested charts were not found in '
'your course.yml')
def _compare_required_versions(self):
"""
Compare installed versions of helm and reckoner to the minimum versions
required by the course.yml
Accepts no arguments
"""
if self.minimum_versions is None:
return True
helm_minimum_version = self.minimum_versions.get('helm', '0.0.0')
reckoner_minimum_version = self.minimum_versions.get('reckoner', '0.0.0')
logging.debug("Helm Minimum Version is: {}".format(helm_minimum_version))
logging.debug("Helm Installed Version is {}".format(self.helm.version))
logging.debug("Reckoner Minimum Version is {}".format(reckoner_minimum_version))
logging.debug("Reckoner Installed Version is {}".format(reckoner_version))
r1 = semver.compare(reckoner_version, reckoner_minimum_version)
if r1 < 0:
raise MinimumVersionException("reckoner Minimum Version {} not met.".format(reckoner_minimum_version))
r2 = semver.compare(self.helm.version, helm_minimum_version)
if r2 < 0:
raise MinimumVersionException("helm Minimum Version {} not met.".format(helm_minimum_version))
return True
|
python
|
import re
import ast
import discord
from redbot.core import Config
from redbot.core import commands
from redbot.core import checks
defaults = {"Prefixes": {}}
class PrefixManager(commands.Cog):
"""Used to set franchise and role prefixes and give to members in those franchises or with those roles"""
def __init__(self):
self.config = Config.get_conf(self, identifier=1234567891, force_registration=True)
self.config.register_guild(**defaults)
@commands.command()
@commands.guild_only()
@checks.admin_or_permissions(manage_guild=True)
async def addPrefixes(self, ctx, *prefixes_to_add):
"""Add the prefixes and corresponding GM name.
Arguments:
prefixes_to_add -- One or more prefixes in the following format:
\t"['<gm_name>','<prefix>']"
Each prefix should be separated by a space.
Examples:
\t[p]addPrefixes "['Adammast','OCE']"
\t[p]addPrefixes "['Adammast','OCE']" "['Shamu','STM']"
"""
addedCount = 0
try:
for prefixStr in prefixes_to_add:
prefix = ast.literal_eval(prefixStr)
await ctx.send("Adding prefix: {0}".format(repr(prefix)))
prefixAdded = await self.add_prefix(ctx, *prefix)
if prefixAdded:
addedCount += 1
finally:
await ctx.send("Added {0} prefixes(s).".format(addedCount))
await ctx.send("Done.")
@commands.command()
@commands.guild_only()
@checks.admin_or_permissions(manage_guild=True)
async def addPrefix(self, ctx, gm_name: str, prefix: str):
"""Add a single prefix and corresponding GM name."""
prefixAdded = await self.add_prefix(ctx, gm_name, prefix)
if(prefixAdded):
await ctx.send("Done.")
else:
await ctx.send("Error adding prefix: {0}".format(prefix))
@commands.command(aliases=["listPrefixes", "prefixes"])
@commands.guild_only()
async def getPrefixes(self, ctx):
"""Get all prefixes in the prefix dictionary"""
prefixes = await self._prefixes(ctx)
if(len(prefixes.items()) > 0):
message = "```Prefixes:"
for key, value in prefixes.items():
message += "\n\t{0} = {1}".format(key, value)
message += "```"
await ctx.send(message)
else:
await ctx.send(":x: No prefixes are set in the dictionary")
@commands.command()
@commands.guild_only()
@checks.admin_or_permissions(manage_guild=True)
async def removePrefix(self, ctx, gm_name: str):
"""Remove a single prefix. The GM will no longer have a prefix in the dictionary"""
prefixRemoved = await self.remove_prefix(ctx, gm_name)
if(prefixRemoved):
await ctx.send("Done.")
else:
await ctx.send("Error removing prefix for {0}".format(gm_name))
@commands.command()
@commands.guild_only()
@checks.admin_or_permissions(manage_guild=True)
async def clearPrefixes(self, ctx):
"""Clear the prefix dictionary"""
prefixes = await self._prefixes(ctx)
try:
prefixes.clear()
await self._save_prefixes(ctx, prefixes)
await ctx.send(":white_check_mark: All prefixes have been removed from dictionary")
except:
await ctx.send(":x: Something went wrong when trying to clear the prefix dictionary")
@commands.command()
@commands.guild_only()
async def lookupPrefix(self, ctx, gm_name: str):
"""Gets the prefix corresponding to the GM's franchise"""
prefix = await self._get_gm_prefix(ctx, gm_name)
if(prefix):
await ctx.send("Prefix for {0} = {1}".format(gm_name, prefix))
return
await ctx.send(":x: Prefix not found for {0}".format(gm_name))
def _find_role(self, ctx, role_id):
guild = ctx.message.guild
roles = guild.roles
for role in roles:
if role.id == role_id:
return role
raise LookupError('No role with id: {0} found in server roles'.format(role_id))
@commands.command()
@commands.guild_only()
@checks.admin_or_permissions(manage_nicknames=True)
async def removeNicknames(self, ctx, *userList):
"""Removes any nickname from every member that can be found from the userList"""
empty = True
removed = 0
notFound = 0
message = ""
for user in userList:
try:
member = await commands.MemberConverter().convert(ctx, user)
if member in ctx.guild.members:
await member.edit(nick=None)
removed += 1
empty = False
except:
if notFound == 0:
message += "Couldn't find:\n"
message += "{0}\n".format(user)
notFound += 1
if empty:
message += ":x: Nobody found from list"
else:
message += ":white_check_mark: Removed nicknames from everyone that was found from list"
if notFound > 0:
message += ". {0} user(s) were not found".format(notFound)
if removed > 0:
message += ". {0} user(s) had their nickname removed".format(removed)
await ctx.send(message)
async def add_prefix(self, ctx, gm_name: str, prefix: str):
prefixes = await self._prefixes(ctx)
proper_gm_name = self._get_proper_gm_name(ctx, gm_name)
# Validation of input
# There are other validations we could do, but don't
# - that there aren't extra args
errors = []
if not proper_gm_name:
errors.append("GM not found with name {0}.".format(gm_name))
if not prefix:
errors.append("Prefix not found from input for GM {0}.".format(gm_name))
if errors:
await ctx.send(":x: Errors with input:\n\n "
"* {0}\n".format("\n * ".join(errors)))
return
try:
prefixes[proper_gm_name] = prefix
except:
return False
await self._save_prefixes(ctx, prefixes)
return True
async def remove_prefix(self, ctx, gm_name: str):
prefixes = await self._prefixes(ctx)
try:
del prefixes[gm_name]
except ValueError:
await ctx.send("{0} does not have a prefix.".format(gm_name))
return False
await self._save_prefixes(ctx, prefixes)
return True
def _get_proper_gm_name(self, ctx, gm_name):
guild = ctx.message.guild
roles = guild.roles
for role in roles:
try:
gmNameFromRole = re.findall(r'(?<=\().*(?=\))', role.name)[0]
if gmNameFromRole.lower() == gm_name.lower():
return gmNameFromRole
except:
continue
async def _get_gm_prefix(self, ctx, gm_name):
prefixes = await self._prefixes(ctx)
try:
return prefixes[self._get_proper_gm_name(ctx, gm_name)]
except:
return None
async def _get_franchise_prefix(self, ctx, franchise_role):
prefixes = await self._prefixes(ctx)
try:
gm_name = re.findall(r'(?<=\().*(?=\))', franchise_role.name)[0]
return prefixes[gm_name]
except:
raise LookupError('GM name not found from role {0}'.format(franchise_role.name))
async def _prefixes(self, ctx):
return await self.config.guild(ctx.guild).Prefixes()
async def _save_prefixes(self, ctx, prefixes):
await self.config.guild(ctx.guild).Prefixes.set(prefixes)
|
python
|
#!/usr/bin/env python
"""
.. py:currentmodule:: FileFormat.Results.test_PhirhozElement
.. moduleauthor:: Hendrix Demers <[email protected]>
Tests for the module `PhirhozElement`.
"""
# Script information for the file.
__author__ = "Hendrix Demers ([email protected])"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 Hendrix Demers"
__license__ = ""
# Subversion informations for the file.
__svnRevision__ = "$Revision$"
__svnDate__ = "$Date$"
__svnId__ = "$Id$"
# Standard library modules.
import unittest
import logging
# Third party modules.
# Local modules.
# Project modules
import pymcxray.FileFormat.Results.PhirhozElement as PhirhozElement
# Globals and constants variables.
class TestPhirhozElement(unittest.TestCase):
"""
TestCase class for the module `PhirhozElement`.
"""
def setUp(self):
"""
Setup method.
"""
unittest.TestCase.setUp(self)
def tearDown(self):
"""
Teardown method.
"""
unittest.TestCase.tearDown(self)
def testSkeleton(self):
"""
First test to check if the testcase is working with the testing framework.
"""
#self.fail("Test if the testcase is working.")
self.assert_(True)
def test_readFromLine(self):
"""
Tests for method `readFromLines`.
"""
line, phirhozElementRef = getLineAndReference()
phirhozElement = PhirhozElement.PhirhozElement()
phirhozElement.readFromLine(line)
self.assertEquals(phirhozElementRef.symbol, phirhozElement.symbol)
self.assertEquals(phirhozElementRef.weightFraction, phirhozElement.weightFraction)
self.assertEquals(phirhozElementRef.isIonizationShell_K, phirhozElement.isIonizationShell_K)
self.assertEquals(phirhozElementRef.isIonizationShell_L, phirhozElement.isIonizationShell_L)
self.assertEquals(phirhozElementRef.isIonizationShell_M, phirhozElement.isIonizationShell_M)
#self.fail("Test if the testcase is working.")
def getLineAndReference():
line = "Au, 100.0000000 % Ionization shells 0 1 1"
phirhozElementRef = PhirhozElement.PhirhozElement()
phirhozElementRef.symbol = 'Au'
phirhozElementRef.weightFraction = 1.0
phirhozElementRef.isIonizationShell_K = False
phirhozElementRef.isIonizationShell_L = True
phirhozElementRef.isIonizationShell_M = True
return line, phirhozElementRef
if __name__ == '__main__': #pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
from pymcxray.Testings import runTestModuleWithCoverage
runTestModuleWithCoverage(__file__, withCoverage=False)
|
python
|
from json import loads
from random import choice
from .base_menti_question import BaseMentiQuestion
QUOTES_PATH = 'quotes.json'
def get_quotes(filename=QUOTES_PATH):
with open(filename) as f:
return f.read()
return ''
class MentiText(BaseMentiQuestion):
max_text_len = 140
needed_attr = []
def __init__(self, params=None):
super().__init__(params)
self.dictionary = loads(get_quotes())
def flood(self):
quote = choice(self.dictionary)
response = "{}: \"{}\"".format(quote.get("name", ''),
quote.get("quote", ''))
return response[:self.max_text_len]
|
python
|
# -*- coding: utf-8 -*-
from .wiserapi import WiserBaseAPI, _convert_case
class SetPoint:
def __init__(self):
self.time = None
self.temperature = None
class Schedule(WiserBaseAPI):
"""Represnts the /Schedule object in the Restful API"""
def __init__(self, *args, **kwargs):
# Defining default values
self.id = None
self.type = None # "Heating"
self.monday = None
self.tuesday = None
self.wednesday = None
self.thursday = None
self.friday = None
self.saturday = None
# "Sunday": {
# "SetPoints": [
# {
# "Time": 700,
# "DegreesC": 200
# },
# {
# "Time": 900,
# "DegreesC": 180
# },
# {
# "Time": 1600,
# "DegreesC": 210
# },
# {
# "Time": 2300,
# "DegreesC": -200
# }
# ]
# },
super(Schedule, self).__init__(*args, **kwargs)
# def _load_attributes(self, *args, **kwargs):
# pass
# # Set attributes of object from CamelCase
# #print("LOAD custom schedule data!!")
# #for attr in kwargs.keys():
# #setattr(self, _convert_case(attr), kwargs[attr])
#
|
python
|
def lagrange(vec_x, vec_f, x=0):
tam,res = len(vec_x),0
L = [0]*tam
for i in range(tam):
L[i] = 1
for j in range(tam):
if j != i:
L[i] = L[i] * (x - vec_x[j])/(vec_x[i] - vec_x[j])
for k in range (tam):
res = res + vec_f[k]*L[k]
print(res)
x = [0.81,0.83,0.86] # x
y = [16,9,0.24,2.94] # f(x)
x_bar = 0.84 # O que quer ser encontrado
lagrange(x, y, x_bar)
|
python
|
import os
import pandas as pd
import numpy as np
import shutil
cwd = os.getcwd()
sysname = os.path.basename(cwd)
print(sysname)
if os.path.isdir(cwd+'/lmbl/'):
shutil.rmtree(cwd+'/lmbl/')
os.mkdir('lmbl')
coor = os.path.join(cwd+ '/coordinates/')
lmbl = os.path.join(cwd+ '/lmbl/')
os.chdir(lmbl)
for filename in os.listdir(coor):
other_name=[]
other_X = []
other_Y = []
other_Z = []
C_X = []
C_Y = []
C_Z = []
count_C = 0
pos_C = []
print(filename)
with open(os.path.join(coor+filename)) as f:
lines=f.readlines()
f.close()
#obtian the number of C atoms and the coordinates of other atoms
for i in range(1,len(lines)):
lines_ele = lines[i].strip().split(',')
ele = lines_ele[0]
if ele == sysname:
count_C+=1
pos_C.append(i)
continue
else:
other_name.append(lines_ele[0])
other_X.append(lines_ele[1])
other_Y.append(lines_ele[2])
other_Z.append(lines_ele[3])
len_others=len(lines)-1-count_C
#obtain coordinates of
for j in range(0,count_C):
lines_C = lines[pos_C[j]].strip().split(',')
C_X.append(lines_C[1])
C_Y.append(lines_C[2])
C_Z.append(lines_C[3])
#calculate dist between C and other atoms
#dist between first C and other atoms
print(C_X)
CX = C_X[0]
CY = C_Y[0]
CZ = C_Z[0]
pair=[]
dist=[]
for k in range(0,len_others):
Xdiff = float(other_X[k]) - float(CX)
Ydiff = float(other_Y[k]) - float(CY)
Zdiff = float(other_Z[k]) - float(CZ)
d = np.sqrt(np.square(Xdiff)+np.square(Ydiff)+np.square(Zdiff))
pair.append(sysname + '-' + other_name[k])
dist.append(d)
if count_C > 1:
for l in range(1,count_C):
Xdiff = float(C_X[l]) - float(CX)
Ydiff = float(C_Y[l]) - float(CY)
Zdiff = float(C_Z[l]) - float(CZ)
d = np.sqrt(np.square(Xdiff) + np.square(Ydiff) + np.square(Zdiff))
pair.append(sysname + '-' + sysname)
dist.append(d)
weight=[]
for dis in dist:
w = 1 / dis
weight.append(w)
weight_sum=sum(weight)
frac = [ w / weight_sum for w in weight]
df = pd.DataFrame({'pair':pair,'dist':dist,'weight':weight,'weight_sum':weight_sum,'frac':frac})
df.to_csv(filename,index=False)
|
python
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Servlet for Content Security Policy violation reporting.
See http://www.html5rocks.com/en/tutorials/security/content-security-policy/
for more information on how this mechanism works.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import webapp2
import logging
class CSPReportPage(webapp2.RequestHandler):
"""CSPReportPage serves CSP violation reports."""
def post(self):
logging.error('CSP Violation: %s' % self.request.body)
|
python
|
import datetime
from pathlib import Path
from typing import Iterator, Any
import pytest
import google_takeout_parser.parse_json as prj
@pytest.fixture(scope="function")
def tmp_path_f(
request: Any, tmp_path_factory: pytest.TempPathFactory
) -> Iterator[Path]:
"""
Create a new tempdir every time this runs
"""
# request is a _pytest.fixture.SubRequest, function that called this
assert isinstance(request.function.__name__, str), str(request)
assert request.function.__name__.strip(), str(request)
tmp_dir = tmp_path_factory.mktemp(request.function.__name__, numbered=True)
yield tmp_dir
def test_parse_activity_json(tmp_path_f: Path) -> None:
contents = '[{"header": "Discover", "title": "7 cards in your feed", "time": "2021-12-13T03:04:05.007Z", "products": ["Discover"], "locationInfos": [{"name": "At this general area", "url": "https://www.google.com/maps/@?api=1&map_action=map¢er=lat,lon&zoom=12", "source": "From your Location History", "sourceUrl": "https://www.google.com/maps/timeline"}], "subtitles": [{"name": "Computer programming"}, {"name": "Computer Science"}, {"name": "PostgreSQL"}, {"name": "Technology"}]}]'
fp = tmp_path_f / "file"
fp.write_text(contents)
res = list(prj._parse_json_activity(fp))
assert res[0] == prj.Activity(
header="Discover",
title="7 cards in your feed",
time=datetime.datetime(
2021, 12, 13, 3, 4, 5, 7000, tzinfo=datetime.timezone.utc
),
description=None,
titleUrl=None,
subtitles=[
("Computer programming", None),
("Computer Science", None),
("PostgreSQL", None),
("Technology", None),
],
locationInfos=[
(
"At this general area",
"https://www.google.com/maps/@?api=1&map_action=map¢er=lat,lon&zoom=12",
"From your Location History",
"https://www.google.com/maps/timeline",
),
],
details=[],
products=["Discover"],
)
def test_parse_likes_json(tmp_path_f: Path) -> None:
contents = """[{"contentDetails": {"videoId": "J1tF-DKKt7k", "videoPublishedAt": "2015-10-05T17:23:15.000Z"}, "etag": "GbLczUV2gsP6j0YQgTcYropUbdY", "id": "TExBNkR0bmJaMktKY2t5VFlmWE93UU5BLkoxdEYtREtLdDdr", "kind": "youtube#playlistItem", "snippet": {"channelId": "UCA6DtnbZ2KJckyTYfXOwQNA", "channelTitle": "Sean B", "description": "\\u30b7\\u30e5\\u30ac\\u30fc\\u30bd\\u30f3\\u30b0\\u3068\\u30d3\\u30bf\\u30fc\\u30b9\\u30c6\\u30c3\\u30d7 \\nSugar Song and Bitter Step\\n\\u7cd6\\u6b4c\\u548c\\u82e6\\u5473\\u6b65\\u9a5f\\nUNISON SQUARE GARDEN\\n\\u7530\\u6df5\\u667a\\u4e5f\\n\\u8840\\u754c\\u6226\\u7dda\\n\\u5e7b\\u754c\\u6230\\u7dda\\nBlood Blockade Battlefront ED\\nArranged by Maybe\\nScore:https://drive.google.com/open?id=0B9Jb1ks6rtrWSk1hX1U0MXlDSUE\\nThx~~", "playlistId": "LLA6DtnbZ2KJckyTYfXOwQNA", "position": 4, "publishedAt": "2020-07-05T18:27:32.000Z", "resourceId": {"kind": "youtube#video", "videoId": "J1tF-DKKt7k"}, "thumbnails": {"default": {"height": 90, "url": "https://i.ytimg.com/vi/J1tF-DKKt7k/default.jpg", "width": 120}, "high": {"height": 360, "url": "https://i.ytimg.com/vi/J1tF-DKKt7k/hqdefault.jpg", "width": 480}, "medium": {"height": 180, "url": "https://i.ytimg.com/vi/J1tF-DKKt7k/mqdefault.jpg", "width": 320}, "standard": {"height": 480, "url": "https://i.ytimg.com/vi/J1tF-DKKt7k/sddefault.jpg", "width": 640}}, "title": "[Maybe]Blood Blockade Battlefront ED \\u30b7\\u30e5\\u30ac\\u30fc\\u30bd\\u30f3\\u30b0\\u3068\\u30d3\\u30bf\\u30fc\\u30b9\\u30c6\\u30c3\\u30d7 Sugar Song and Bitter Step"}, "status": {"privacyStatus": "public"}}]"""
fp = tmp_path_f / "file"
fp.write_text(contents)
res = list(prj._parse_likes(fp))
assert res == [
prj.LikedYoutubeVideo(
title="[Maybe]Blood Blockade Battlefront ED シュガーソングとビターステップ "
"Sugar Song and Bitter Step",
desc="シュガーソングとビターステップ \n"
"Sugar Song and Bitter Step\n"
"糖歌和苦味步驟\n"
"UNISON SQUARE GARDEN\n"
"田淵智也\n"
"血界戦線\n"
"幻界戰線\n"
"Blood Blockade Battlefront ED\n"
"Arranged by Maybe\n"
"Score:https://drive.google.com/open?id=0B9Jb1ks6rtrWSk1hX1U0MXlDSUE\n"
"Thx~~",
link="https://youtube.com/watch?v=J1tF-DKKt7k",
dt=datetime.datetime(2020, 7, 5, 18, 27, 32, tzinfo=datetime.timezone.utc),
)
]
def test_parse_app_installs(tmp_path_f: Path) -> None:
contents = """[{"install": {"doc": {"documentType": "Android Apps", "title": "Discord - Talk, Video Chat & Hang Out with Friends"}, "firstInstallationTime": "2020-05-25T03:11:53.055Z", "deviceAttribute": {"manufacturer": "motorola", "deviceDisplayName": "motorola moto g(7) play"}, "lastUpdateTime": "2020-08-27T02:55:33.259Z"}}]"""
fp = tmp_path_f / "file"
fp.write_text(contents)
res = list(prj._parse_app_installs(fp))
assert res == [
prj.PlayStoreAppInstall(
title="Discord - Talk, Video Chat & Hang Out with Friends",
dt=datetime.datetime(
2020, 5, 25, 3, 11, 53, 55000, tzinfo=datetime.timezone.utc
),
device_name="motorola moto g(7) play",
)
]
def test_location_old(tmp_path_f) -> None:
contents = '{"locations": [{"timestampMs": "1512947698030", "latitudeE7": 351324213, "longitudeE7": -1122434441, "accuracy": 10}]}'
fp = tmp_path_f / "file"
fp.write_text(contents)
res = list(prj._parse_location_history(fp))
assert res == [
prj.Location(
lng=-112.2434441,
lat=35.1324213,
dt=datetime.datetime(
2017, 12, 10, 23, 14, 58, tzinfo=datetime.timezone.utc
),
accuracy=10,
),
]
def test_location_new(tmp_path_f: Path) -> None:
contents = '{"locations": [{"latitudeE7": 351324213, "longitudeE7": -1122434441, "accuracy": 10, "deviceTag": -80241446968629135069, "deviceDesignation": "PRIMARY", "timestamp": "2017-12-10T23:14:58.030Z"}]}'
fp = tmp_path_f / "file"
fp.write_text(contents)
res = list(prj._parse_location_history(fp))
assert res == [
prj.Location(
lng=-112.2434441,
lat=35.1324213,
dt=datetime.datetime(
2017, 12, 10, 23, 14, 58, 30000, tzinfo=datetime.timezone.utc
),
accuracy=10,
),
]
def test_chrome_history(tmp_path_f: Path) -> None:
contents = '{"Browser History": [{"page_transition": "LINK", "title": "sean", "url": "https://sean.fish", "client_id": "W1vSb98l403jhPeK==", "time_usec": 1617404690134513}]}'
fp = tmp_path_f / "file"
fp.write_text(contents)
res = list(prj._parse_chrome_history(fp))
assert res == [
prj.ChromeHistory(
title="sean",
url="https://sean.fish",
dt=datetime.datetime(
2021, 4, 2, 23, 4, 50, 134513, tzinfo=datetime.timezone.utc
),
),
]
|
python
|
from __future__ import annotations
from typing import TYPE_CHECKING
import discord
from discord.ext import typed_commands
from .exceptions import NotGuildOwner, OnlyDirectMessage
if TYPE_CHECKING:
from discord.ext.commands import core
def dm_only() -> core._CheckDecorator:
def predicate(ctx: core._CT, /) -> bool:
if not isinstance(ctx.channel, discord.DMChannel):
raise OnlyDirectMessage('This command can only be used in private messags.')
return True
return typed_commands.check(predicate)
def is_guild_owner() -> core._CheckDecorator:
def predicate(ctx: core._CT, /) -> bool:
if ctx.guild is None:
raise typed_commands.NoPrivateMessage(
'This command cannot be used in private messages.'
)
if ctx.guild.owner != ctx.author:
raise NotGuildOwner('You do not own this guild')
return True
return typed_commands.check(predicate)
|
python
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipe for updating the go_deps asset."""
DEPS = [
'checkout',
'infra',
'recipe_engine/context',
'recipe_engine/properties',
'recipe_engine/python',
'run',
'vars',
]
def RunSteps(api):
api.vars.setup()
checkout_root = api.checkout.default_checkout_root
api.checkout.bot_update(checkout_root=checkout_root)
skia_dir = checkout_root.join('skia')
with api.context(cwd=skia_dir, env=api.infra.go_env):
script = skia_dir.join('infra', 'bots', 'update_go_deps.py')
api.run(api.python, 'Update Asset', script=script)
def GenTests(api):
builder = 'Housekeeper-Nightly-UpdateGoDEPS'
yield (
api.test(builder) +
api.properties(buildername=builder,
repository='https://skia.googlesource.com/skia.git',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]')
)
|
python
|
import spacy
import numpy as np
import os, shutil, json, sys
import json, argparse, logging
from tqdm import tqdm
import tensorflow as tf
from collections import defaultdict
from MultiHeadAttention import *
class GAN:
def __init__(self, input_shapes, embedding_size, question_padding):
#input shapes is in the order context, answers, noise
self.input_shapes = input_shapes
self.embedding_size = embedding_size
self.question_padding = question_padding
self.generator = self.generator_model()
self.discriminator = self.discriminator_model()
self.adversarial = self.adversarial_model()
def generator_model(self):
c_in = tf.keras.layers.Input(self.input_shapes[0], name = 'c_in')
a_in = tf.keras.layers.Input(self.input_shapes[1], name = 'a_in')
r_in = tf.keras.layers.Input(self.input_shapes[2], name = 'r_in')
c_h = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(200))(c_in)
a_h = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(100))(a_in)
h1 = tf.keras.layers.Concatenate()([c_h, a_h, r_in])
h2 = tf.keras.layers.RepeatVector(self.question_padding)(h1)
h3 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(200, return_sequences = True))(h2)
q_out = tf.keras.layers.GRU(self.embedding_size, return_sequences = True)(h3)
#q_out is shape (1, batch, question_padding, embedding_size)
return tf.keras.models.Model([c_in, a_in, r_in], q_out)
def discriminator_model(self):
q_in = tf.keras.layers.Input((self.question_padding, self.embedding_size))
h1 = tf.keras.layers.Conv1D(128, 3, activation='relu')(q_in)
h2 = tf.keras.layers.Conv1D(128, 3, activation='relu')(h1)
h3 = MultiHeadAttention(128, 16, output_dense=False)(h2)
h4 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(64, return_sequences = True))(h3)
h5 = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(64))(h4)
k_out = tf.keras.layers.Dense(1, activation = 'sigmoid')(h5)
return tf.keras.models.Model(q_in, k_out)
def adversarial_model(self):
c_in = tf.keras.layers.Input(self.input_shapes[0], name = 'c_in')
a_in = tf.keras.layers.Input(self.input_shapes[1], name = 'a_in')
r_in = tf.keras.layers.Input(self.input_shapes[2], name = 'r_in')
q_out = self.generator([c_in, a_in, r_in])
k_out = self.discriminator(q_out)
return tf.keras.models.Model([c_in, a_in, r_in], k_out)
def compile(self, optimizer=tf.keras.optimizers.Adam(), adversarial_loss='binary_crossentropy', question_loss='cosine_similarity'):
self.optimizer = optimizer
self.adversarial_loss = adversarial_loss
self.question_loss = question_loss
self.discriminator.compile(optimizer=self.optimizer, loss=self.adversarial_loss, metrics=[tf.keras.metrics.CategoricalAccuracy()])
self.generator.compile(optimizer=self.optimizer, loss=self.question_loss)
self.adversarial.compile(optimizer=self.optimizer, loss=self.adversarial_loss, metrics=[tf.keras.metrics.CategoricalAccuracy()])
self.compiled = True
def fit(self, loader, epochs, generator_epochs = 1, discriminator_epochs = 1, question_epochs = 1, pretrain_epochs = 1):
if not self.compiled:
raise AssertionError("Model must be compiled first.")
for i in range(pretrain_epochs):
logging.info(f'Pretrain Epoch {i + 1}:')
dl = 0
self.generator.trainable = False
self.discriminator.trainable = True
for b, j in enumerate(np.random.permutation(len(loader))):
c, a, q = loader[j]
r = np.random.normal(0, 1, size = [len(a), self.input_shapes[2][0]])
oq = self.generator.predict_on_batch([c, a, r])
fake_labels = np.zeros([len(a)])
true_labels = np.ones_like(fake_labels)
qs, labels = np.concatenate([q, oq]), np.concatenate([true_labels, fake_labels])
for _ in range(discriminator_epochs):
dl = self.discriminator.train_on_batch(qs, labels)
loader.on_epoch_end()
logging.info(f"Discriminator loss - {dl}")
for i in range(epochs):
logging.info(f'Epoch {i + 1}:')
ql = gl = dl = 0
for b, j in enumerate(np.random.permutation(len(loader))):
c, a, q = loader[j]
r = np.random.normal(0, 1, size = [len(a), self.input_shapes[2][0]])
self.generator.trainable = True
self.discriminator.trainable = False
for _ in range(question_epochs):
ql = self.generator.train_on_batch([c, a, r], q)
for _ in range(generator_epochs):
gl = self.adversarial.train_on_batch([c, a, r], np.ones(len(a)))
if discriminator_epochs:
self.generator.trainable = False
self.discriminator.trainable = True
oq = self.generator.predict_on_batch([c, a, r])
fake_labels = np.zeros([len(a)])
true_labels = np.ones_like(fake_labels)
qs, labels = np.concatenate([q, oq]), np.concatenate([true_labels, fake_labels])
for _ in range(discriminator_epochs):
dl = self.discriminator.train_on_batch(qs, labels)
if b % 500 == 0:
logging.info(f"Batch {b}/{len(loader)}")
logging.info(f"Question loss - {ql}")
logging.info(f"Generator loss - {gl}")
logging.info(f"Discriminator loss - {dl}")
loader.on_epoch_end()
self.generator.save('models/generator.h5', save_format = 'h5')
self.discriminator.save('models/discriminator.h5', save_format = 'h5')
self.adversarial.save('models/adversarial.h5', save_format = 'h5')
def load(self, load_directory):
load_dir = load_directory.rstrip('/')
custom_objects = {'MultiHeadAttention': MultiHeadAttention}
self.generator = tf.keras.models.load_model(f'{load_dir}/generator.h5', custom_objects=custom_objects)
self.discriminator = tf.keras.models.load_model(f'{load_dir}/discriminator.h5', custom_objects=custom_objects)
self.adversarial = self.adversarial_model()
|
python
|
"""A Yelp-powered Restaurant Recommendation Program"""
from abstractions import *
from data import ALL_RESTAURANTS, CATEGORIES, USER_FILES, load_user_file
from ucb import main, trace, interact
from utils import distance, mean, zip, enumerate, sample
from visualize import draw_map
##################################
# Phase 2: Unsupervised Learning #
##################################
def find_closest(location, centroids):
"""Return the centroid in centroids that is closest to location.
If multiple centroids are equally close, return the first one.
>>> find_closest([3.0, 4.0], [[0.0, 0.0], [2.0, 3.0], [4.0, 3.0], [5.0, 5.0]])
[2.0, 3.0]
"""
# BEGIN Question 3
# so this beautiful list comprehension takes the distances between the given location and the centroids, enumerates them,
# finds the lowest distance, accesses the index of the said distance, and uses that for accessing the closest centroid
# yes, it's complicated, but it's too beautiful to delete it
return centroids[min(enumerate([distance(location, centroid) for centroid in centroids]), key = lambda x: x[1])[0]]
# END Question 3
def group_by_first(pairs):
"""Return a list of pairs that relates each unique key in the [key, value]
pairs to a list of all values that appear paired with that key.
Arguments:
pairs -- a sequence of pairs
>>> example = [ [1, 2], [3, 2], [2, 4], [1, 3], [3, 1], [1, 2] ]
>>> group_by_first(example)
[[2, 3, 2], [2, 1], [4]]
"""
keys = []
for key, _ in pairs:
if key not in keys:
keys.append(key)
return [[y for x, y in pairs if x == key] for key in keys]
def group_by_centroid(restaurants, centroids):
"""Return a list of clusters, where each cluster contains all restaurants
nearest to a corresponding centroid in centroids. Each item in
restaurants should appear once in the result, along with the other
restaurants closest to the same centroid.
"""
# BEGIN Question 4
stack = []
for rest in restaurants:
closest_centroid = find_closest(restaurant_location(rest), centroids)
stack.append([closest_centroid, rest])
return group_by_first(stack)
# END Question 4
def find_centroid(cluster):
"""Return the centroid of the locations of the restaurants in cluster."""
# BEGIN Question 5
centx, centy = 0, 0
for rest in cluster:
[restx, resty] = restaurant_location(rest)
centx, centy = restx + centx, resty + centy
return [centx / len(cluster), centy / len(cluster)]
# END Question 5
def k_means(restaurants, k, max_updates=100):
"""Use k-means to group restaurants by location into k clusters."""
assert len(restaurants) >= k, 'Not enough restaurants to cluster'
old_centroids, n = [], 0
# Select initial centroids randomly by choosing k different restaurants
centroids = [restaurant_location(r) for r in sample(restaurants, k)]
while old_centroids != centroids and n < max_updates:
old_centroids = centroids
# BEGIN Question 6
clusters = group_by_centroid(restaurants, old_centroids)
centroids = [find_centroid(cluster) for cluster in clusters]
# END Question 6
n += 1
return centroids
################################
# Phase 3: Supervised Learning #
################################
def find_predictor(user, restaurants, feature_fn):
"""Return a rating predictor (a function from restaurants to ratings),
for a user by performing least-squares linear regression using feature_fn
on the items in restaurants. Also, return the R^2 value of this model.
Arguments:
user -- A user
restaurants -- A sequence of restaurants
feature_fn -- A function that takes a restaurant and returns a number
"""
reviews_by_user = {review_restaurant_name(review): review_rating(review)
for review in user_reviews(user).values()}
xs = [feature_fn(r) for r in restaurants]
ys = [reviews_by_user[restaurant_name(r)] for r in restaurants]
# BEGIN Question 7
b, a, r_squared = 0, 0, 0
mxs, mys = mean(xs), mean(ys)
S_xx, S_yy, S_xy = 0, 0, 0
S_xx = sum([(x - mxs) ** 2 for x in xs])
S_yy = sum([(y - mys) ** 2 for y in ys])
S_xy = sum([(x - mxs) * (y - mys) for x, y in zip(xs, ys)])
b = S_xy / S_xx
a = mean(ys) - b * mean(xs)
r_squared = S_xy ** 2 / (S_xx * S_yy)
# END Question 7
def predictor(restaurant):
return b * feature_fn(restaurant) + a
return predictor, r_squared
def best_predictor(user, restaurants, feature_fns):
"""Find the feature within feature_fns that gives the highest R^2 value
for predicting ratings by the user; return a predictor using that feature.
Arguments:
user -- A user
restaurants -- A list of restaurants
feature_fns -- A sequence of functions that each takes a restaurant
"""
reviewed = user_reviewed_restaurants(user, restaurants)
# BEGIN Question 8
stack = []
for feat_fn in feature_fns:
stack.append([*find_predictor(user, reviewed, feat_fn)])
return max(stack, key = lambda x: x[1])[0]
# END Question 8
def rate_all(user, restaurants, feature_fns):
"""Return the predicted ratings of restaurants by user using the best
predictor based on a function from feature_fns.
Arguments:
user -- A user
restaurants -- A list of restaurants
feature_fns -- A sequence of feature functions
"""
predictor = best_predictor(user, ALL_RESTAURANTS, feature_fns)
reviewed = user_reviewed_restaurants(user, restaurants)
# BEGIN Question 9
ratings = {}
for rest in restaurants:
rest_name = restaurant_name(rest)
ratings[rest_name] = user_rating(user, rest_name) if rest in reviewed else predictor(rest)
return ratings
# END Question 9
def search(query, restaurants):
"""Return each restaurant in restaurants that has query as a category.
Arguments:
query -- A string
restaurants -- A sequence of restaurants
"""
# BEGIN Question 10
return [rest for rest in restaurants if query in restaurant_categories(rest)]
# END Question 10
def feature_set():
"""Return a sequence of feature functions."""
return [lambda r: mean(restaurant_ratings(r)),
restaurant_price,
lambda r: len(restaurant_ratings(r)),
lambda r: restaurant_location(r)[0],
lambda r: restaurant_location(r)[1]]
@main
def main(*args):
import argparse
parser = argparse.ArgumentParser(
description='Run Recommendations',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('-u', '--user', type=str, choices=USER_FILES,
default='test_user',
metavar='USER',
help='user file, e.g.\n' +
'{{{}}}'.format(','.join(sample(USER_FILES, 3))))
parser.add_argument('-k', '--k', type=int, help='for k-means')
parser.add_argument('-q', '--query', choices=CATEGORIES,
metavar='QUERY',
help='search for restaurants by category e.g.\n'
'{{{}}}'.format(','.join(sample(CATEGORIES, 3))))
parser.add_argument('-p', '--predict', action='store_true',
help='predict ratings for all restaurants')
parser.add_argument('-r', '--restaurants', action='store_true',
help='outputs a list of restaurant names')
args = parser.parse_args()
# Output a list of restaurant names
if args.restaurants:
print('Restaurant names:')
for restaurant in sorted(ALL_RESTAURANTS, key=restaurant_name):
print(repr(restaurant_name(restaurant)))
exit(0)
# Select restaurants using a category query
if args.query:
restaurants = search(args.query, ALL_RESTAURANTS)
else:
restaurants = ALL_RESTAURANTS
# Load a user
assert args.user, 'A --user is required to draw a map'
user = load_user_file('{}.dat'.format(args.user))
# Collect ratings
if args.predict:
ratings = rate_all(user, restaurants, feature_set())
else:
restaurants = user_reviewed_restaurants(user, restaurants)
names = [restaurant_name(r) for r in restaurants]
ratings = {name: user_rating(user, name) for name in names}
# Draw the visualization
if args.k:
centroids = k_means(restaurants, min(args.k, len(restaurants)))
else:
centroids = [restaurant_location(r) for r in restaurants]
draw_map(centroids, restaurants, ratings)
|
python
|
import subprocess
import ansiconv
import sys
from django.conf import settings
from django.http import StreamingHttpResponse
from django.shortcuts import get_object_or_404
from django.views.generic import View
from fabric_bolt.projects.models import Deployment
from fabric_bolt.projects.signals import deployment_finished
from fabric_bolt.projects.views import StageSubPageMixin
from .. import backend
class DeploymentOutputStream(StageSubPageMixin, View):
"""
Deployment view does the heavy lifting of calling Fabric Task for a Project Stage
"""
def output_stream_generator(self):
if backend.get_task_details(self.project, self.object.task.name) is None:
return
try:
process = subprocess.Popen(
backend.build_command(self.project, self.object, self.request.session),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
executable=getattr(settings, 'SHELL', '/bin/bash'),
)
all_output = ''
yield '<link rel="stylesheet" type="text/css" href="/static/css/console-style.css">'
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() is not None:
break
all_output += nextline
nextline = '<span class="output-line">{}</span>'.format(ansiconv.to_html(nextline))
yield nextline + ' '*1024
sys.stdout.flush()
self.object.status = self.object.SUCCESS if process.returncode == 0 else self.object.FAILED
yield '<span id="finished" style="display:none;">{}</span> {}'.format(self.object.status, ' '*1024)
self.object.output = all_output
self.object.save()
deployment_finished.send(self.object, deployment_id=self.object.pk)
except Exception as e:
message = "An error occurred: " + e.message
yield '<span class="output-line">{}</span>'.format(message) + ' '*1024
yield '<span id="finished" style="display:none;">failed</span> {}'.format('*1024')
def get(self, request, *args, **kwargs):
self.object = get_object_or_404(
Deployment,
stage=self.stage,
pk=int(kwargs['pk']),
status=Deployment.PENDING
)
resp = StreamingHttpResponse(self.output_stream_generator())
return resp
|
python
|
import json
import asyncio
import sys
from joplin_api import JoplinApi
from httpx import Response
import pprint
import difflib
import logging
with open('.token','r') as f:
token = f.readline()
joplin = JoplinApi(token)
async def search(query):
res = await joplin.search(query,field_restrictions='title')
logging.info(res)
# await joplin.client.aclose()
titles = [item['title'] for item in res.json()['items']]
return titles
async def new_folder(name):
"""Returns folder's id to be used as 'parent_id' for notes"""
res = await joplin.create_folder(folder=name)
return res.json()['id']
async def new_note(title, body, folder_id, tags=[]):
assert title is str
assert body is str
assert folder_id is str
assert tags is list
kwargs = {}
if tags:
kwargs['tags'] = ', '.join(tags)
await joplin.create_note(title="MY NOTE", body=body, parent_id=parent_id, **kwargs)
async def tag_id(title):
"Fetches tags's Id given its title. "
res = (await joplin.search(title, item_type='tag'))
j = json.load(res)
data = j['items'][0]
return data['id']
async def notes_by_tag(title):
"Lists all note (as dics) for a given tags"
return (await joplin.get_tags_notes(await tag_id(title))).json()['items']
async def update_note(note,tags=None):
""" Uploads note to serve.
Required item or they will be erased: 'author', 'source_url', 'is_todo'
if 'is_todo' in addition: 'todo_due', 'todo_completed'
All other items are ignored.
:param note: note data as dict
:param tags: list of tag titles to replace current tags. If None current tags are kept
"""
assert isinstance(note, dict), note
id = note.pop('id')
title = note.pop('title')
body = note.pop('body')
pid = note.pop('parent_id')
# fetch tags from server. There are note returned b which are not returned by `get_note`
if tags:
note['tags'] = ', '.join(tags)
else:
tags = (await joplin.get_notes_tags(id)).json()
note['tags'] = ', '.join([t['title'] for t in tags])
# see https://github.com/foxmask/joplin-api/blob/master/joplin_api/core.py
res = await joplin.update_note(id,title, body, pid, **note)
assert res.status_code == 200, res
async def edit_notes(editor,tag_title, logger):
""" Applies function to every note with given tag and uploads changes.
:param editor: function accepting a note data dict and returning those items that changed
:param tag: notes with a tag of this title will be processed
"""
notes = await notes_by_tag(tag_title)
edits = [(await editor(n)) for n in notes]
differ = difflib.Differ()
for edit, note in zip(edits, notes):
if edit:
# log diff
for k,v in edit.items():
logger.info(f"Updating '{k}' for note {note['id']}.")
diff = differ.compare(note[k].splitlines(), edit[k].splitlines())
for d in diff:
if not d.startswith(' '):
logger.info(d)
# update server
note.update(edit)
await update_note(note)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 30/11/2021 22:19
# @Author : Mr. Y
# @Site :
# @File : similarity_indicators.py
# @Software: PyCharm
import numpy as np
np.seterr(divide='ignore',invalid='ignore')
import time
import os
from utils import Initialize
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
def Calculation_AUC(MatrixAdjacency_Train, MatrixAdjacency_Test, Matrix_similarity, MaxNodeNum):
AUC_TimeStart = time.perf_counter()
print(' Calculation AUC......')
AUCnum = 672400
Matrix_similarity = np.triu(Matrix_similarity - Matrix_similarity * MatrixAdjacency_Train)
Matrix_NoExist = np.ones(MaxNodeNum) - MatrixAdjacency_Train - MatrixAdjacency_Test - np.eye(MaxNodeNum)
Test = np.triu(MatrixAdjacency_Test)
NoExist = np.triu(Matrix_NoExist)
Test_num = len(np.argwhere(Test == 1))
NoExist_num = len(np.argwhere(NoExist == 1))
Test_rd = [int(x) for index, x in enumerate((Test_num * np.random.rand(1, AUCnum))[0])]
NoExist_rd = [int(x) for index, x in enumerate((NoExist_num * np.random.rand(1, AUCnum))[0])]
TestPre = Matrix_similarity * Test
NoExistPre = Matrix_similarity * NoExist
TestIndex = np.argwhere(Test == 1)
Test_Data = np.array([TestPre[x[0], x[1]] for index, x in enumerate(TestIndex)]).T
NoExistIndex = np.argwhere(NoExist == 1)
NoExist_Data = np.array([NoExistPre[x[0], x[1]] for index, x in enumerate(NoExistIndex)]).T
Test_rd = np.array([Test_Data[x] for index, x in enumerate(Test_rd)])
NoExist_rd = np.array([NoExist_Data[x] for index, x in enumerate(NoExist_rd)])
n1, n2 = 0, 0
for num in range(AUCnum):
if Test_rd[num] > NoExist_rd[num]:
n1 += 1
elif Test_rd[num] == NoExist_rd[num]:
n2 += 0.5
else:
n1 += 0
auc = float(n1 + n2) / AUCnum
print(' AUC指标为:%f' % auc)
AUC_TimeEnd = time.perf_counter()
print(' AUCTime:%f s' % (AUC_TimeEnd - AUC_TimeStart))
return auc
def AA(MatrixAdjacency_Train):
similarity_StartTime = time.perf_counter()
logTrain = np.log(sum(MatrixAdjacency_Train))
logTrain = np.nan_to_num(logTrain)
logTrain.shape = (logTrain.shape[0], 1)
MatrixAdjacency_Train_Log = MatrixAdjacency_Train / logTrain
MatrixAdjacency_Train_Log = np.nan_to_num(MatrixAdjacency_Train_Log)
Matrix_similarity = np.dot(MatrixAdjacency_Train, MatrixAdjacency_Train_Log)
similarity_EndTime = time.perf_counter()
print(" SimilarityTime: %f s" % (similarity_EndTime - similarity_StartTime))
return Matrix_similarity
def Jaccavrd(MatrixAdjacency_Train):
similarity_StartTime = time.perf_counter()
Matrix_similarity = np.dot(MatrixAdjacency_Train, MatrixAdjacency_Train)
deg_row = sum(MatrixAdjacency_Train)
deg_row.shape = (deg_row.shape[0], 1)
deg_row_T = deg_row.T
tempdeg = deg_row + deg_row_T
temp = tempdeg - Matrix_similarity
Matrix_similarity = Matrix_similarity / temp
similarity_EndTime = time.perf_counter()
print(" SimilarityTime: %f s" % (similarity_EndTime - similarity_StartTime))
return Matrix_similarity
def RWR(MatrixAdjacency_Train):
similarity_StartTime = time.perf_counter()
Parameter = 0.85
Matrix_TransitionProbobility = MatrixAdjacency_Train / sum(MatrixAdjacency_Train)
Matrix_EYE = np.eye(MatrixAdjacency_Train.shape[0])
Temp = Matrix_EYE - Parameter * Matrix_TransitionProbobility.T
INV_Temp = np.linalg.inv(Temp)
Matrix_RWR = (1 - Parameter) * np.dot(INV_Temp, Matrix_EYE)
Matrix_similarity = Matrix_RWR + Matrix_RWR.T
similarity_EndTime = time.perf_counter()
print(" SimilarityTime: %f s" % (similarity_EndTime - similarity_StartTime))
return Matrix_similarity
def Cn(MatrixAdjacency_Train):
similarity_StartTime = time.perf_counter()
Matrix_similarity = np.dot(MatrixAdjacency_Train, MatrixAdjacency_Train)
similarity_EndTime = time.perf_counter()
print(" SimilarityTime: %f s" % (similarity_EndTime - similarity_StartTime))
return Matrix_similarity
def Salton(MatrixAdjacency_Train):
similarity_StartTime = time.clock()
similarity = np.dot(MatrixAdjacency_Train, MatrixAdjacency_Train)
deg_row = sum(MatrixAdjacency_Train)
deg_row.shape = (deg_row.shape[0], 1)
deg_row_T = deg_row.T
tempdeg = np.dot(deg_row, deg_row_T)
temp = np.sqrt(tempdeg)
Matrix_similarity = np.nan_to_num(similarity / temp)
similarity_EndTime = time.clock()
print(" SimilarityTime: %f s" % (similarity_EndTime - similarity_StartTime))
return Matrix_similarity
|
python
|
"""
"""
try:
from stage_check import Output
except ImportError:
import Output
try:
from stage_check import OutputAssetState
except ImportError:
import OutputAssetState
def create_instance():
return OutputAssetStateText()
class OutputAssetStateText(OutputAssetState.Base, Output.Text):
"""
"""
def __init__(self):
super().__init__()
def amend_interim_result(self, entry, status=None):
try:
message = Output.populate_format(entry, entry["test_format"])
except KeyError:
message = "NO FORMAT PROVIDED..."
if status != Output.Status.OK:
self.message_list.append(message)
self.message_list.append(f" text: {entry['text']}")
return True
def amend_test_result(
self,
entry_tests,
stats) :
return self.test_result_to_text(entry_tests, stats)
|
python
|
import re
from django.urls import re_path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
re_path(r'^$',views.account,name='account'),
re_path(r'^account/',views.account,name='account'),
re_path(r'^home/',views.home,name='home'),
re_path(r'^search/', views.search_results, name='search_results')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
python
|
# coding: utf-8
# flake8: noqa E266
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from flask_env import MetaFlaskEnv
from celery.schedules import crontab
class base_config(object, metaclass=MetaFlaskEnv):
"""Default configuration options."""
ENV_PREFIX = 'APP_'
SITE_NAME = 'TES API Server for Azure Compute'
SECRET_KEY = 'secrets'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://username:password@host:5432/dbname'
CELERY_RESULT_BACKEND = 'redis://key@host:6379'
CELERY_BROKER_URL = 'redis://key@host:6379'
SUPPORTED_LOCALES = ['en']
#
## Common
#
COMPUTE_BACKEND = "mock" # among ['mock', 'aks', 'batch']
STORAGE_ACCOUNT_NAME = ''
STORAGE_ACCOUNT_KEY = ''
PRIVATE_DOCKER_REGISTRY_URL = None # "myregistry.azurecr.io"
PRIVATE_DOCKER_REGISTRY_USERNAME = "username"
PRIVATE_DOCKER_REGISTRY_PASSWORD = "password"
FILETRANSFER_CONTAINER_IMAGE = 'azuretes.azurecr.io/tesazure/container-filetransfer:latest' # FIXME: change this to the public version when available
#
## Logging - App Insights / OpenCensus.
#
APPINSIGHTS_DISABLE_REQUEST_LOGGING = True
APPINSIGHTS_DISABLE_TRACE_LOGGING = False
APPINSIGHTS_DISABLE_EXCEPTION_LOGGING = False
APPINSIGHTS_INSTRUMENTATIONKEY = None
#
## AAD User verification and task restrictions
#
AAD_VERIFY = False
AAD_AUDIENCE = 'aad-client-id'
AAD_TENANT_ID = 'aad-tenant-id'
AAD_JWKS_URI = 'https://login.microsoftonline.com/common/discovery/v2.0/keys'
# Choices among [None, 'per-user', 'per-tenant']
# Anything other than None requires AAD_VERIFY to be True
TASK_ACCESS_RESTRICTIONS = None
#
## Backend - Batch
#
BATCH_ACCOUNT_NAME = ''
BATCH_ACCOUNT_KEY = ''
BATCH_ACCOUNT_URL = ''
BATCH_STORAGE_TMP_CONTAINER_NAME = 'batchtmp'
BATCH_STORAGE_FILESHARE_NAME = 'batchfiles'
BATCH_POOL_DEDICATED_NODE_COUNT = 0
BATCH_POOL_LOW_PRIORITY_NODE_COUNT = 1
BATCH_NODE_ADMIN_USERNAME = None
BATCH_NODE_ADMIN_PASSWORD = None
BATCH_AUTOPOOL_KEEPALIVE = False
#
## Key Vault
#
KEYVAULT_URL = None
KEYVAULT_SECRETS_PREFIX = "TESAZURE-"
AZURE_CLIENT_ID = None
AZURE_SECRET = None
AZURE_TENANT = None
#
## Background tasks
#
# Used by celery beat scheduler container
CELERY_BEAT_SCHEDULE = {
"cleanup_tasks": {
"task": "tesazure.jobs.cleanup_tasks",
"schedule": crontab(minute='*/5')
}
}
TASK_BACKEND_CLEANUP_HOURS = 24
TASK_DATABASE_CLEANUP_HOURS = 48
TASK_EXECUTION_TIMEOUT_HOURS = 12
#
## Workflow engine-specific
#
CROMWELL_STORAGE_CONTAINER_NAME = "cromwell"
class dev_config(base_config, metaclass=MetaFlaskEnv):
"""Development configuration options."""
ENV_PREFIX = 'APP_'
ASSETS_DEBUG = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://tesadmin:testpassword@postgres:5432/tesapi'
CELERY_RESULT_BACKEND = 'redis://redis:6379'
CELERY_BROKER_URL = 'redis://redis:6379'
ADMIN_USER_NAME = "sshdebug"
ADMIN_USER_PASSWORD = "testUser!!12345"
POOL_LOW_PRIORITY_NODE_COUNT = 1
BATCH_OVERRIDE_POOL_ID = None
SQLALCHEMY_ECHO = False
class test_config(base_config, metaclass=MetaFlaskEnv):
"""Testing configuration options."""
ENV_PREFIX = 'APP_'
ENV = 'test'
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///memory'
COMPUTE_BACKEND = 'mock'
|
python
|
You are given an array nums of n positive integers.
You can perform two types of operations on any element of the array any number of times:
If the element is even, divide it by 2.
For example, if the array is [1,2,3,4], then you can do this operation on the last element, and the array will be [1,2,3,2].
If the element is odd, multiply it by 2.
For example, if the array is [1,2,3,4], then you can do this operation on the first element, and the array will be [2,2,3,4].
The deviation of the array is the maximum difference between any two elements in the array.
Return the minimum deviation the array can have after performing some number of operations.
Example 1:
Input: nums = [1,2,3,4]
Output: 1
Explanation: You can transform the array to [1,2,3,2], then to [2,2,3,2], then the deviation will be 3 - 2 = 1.
Example 2:
Input: nums = [4,1,5,20,3]
Output: 3
Explanation: You can transform the array after two operations to [4,2,5,5,3], then the deviation will be 5 - 2 = 3.
Example 3:
Input: nums = [2,10,8]
Output: 3
Constraints:
n == nums.length
2 <= n <= 105
1 <= nums[i] <= 109
Solution:-
class Solution:
def minimumDeviation(self, nums: List[int]) -> int:
from sortedcontainers import SortedList
for i in range(len(nums)):
if nums[i]%2!=0:
nums[i]=nums[i]*2
nums = SortedList(nums)
result = 100000000000
while True:
min_value = nums[0]
max_value = nums[-1]
if max_value % 2 == 0:
nums.pop()
nums.add(max_value // 2)
max_value = nums[-1]
min_value = nums[0]
result = min(result , max_value - min_value)
else:
result = min(result , max_value - min_value)
break
return result
|
python
|
import pandas as pd
import numpy as np
from unittest import TestCase, mock
from unittest.mock import MagicMock, PropertyMock
from gtfs_kit.feed import Feed
from representation.gtfs_metadata import GtfsMetadata
from representation.gtfs_representation import GtfsRepresentation
from usecase.process_stops_count_by_type_for_gtfs_metadata import (
process_stops_count_by_type_for_gtfs_metadata,
LOCATION_TYPE,
)
class TestProcessStopsCountByTypeForGtfsMetadata(TestCase):
def test_process_stops_count_with_none_gtfs_representation_should_raise_exception(
self,
):
self.assertRaises(
TypeError, process_stops_count_by_type_for_gtfs_metadata, None
)
def test_process_stops_count_with_invalid_gtfs_representation_should_raise_exception(
self,
):
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = str
self.assertRaises(
TypeError,
process_stops_count_by_type_for_gtfs_metadata,
mock_gtfs_representation,
)
def test_process_stops_count_with_missing_files(self):
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_stops_count_by_type_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_metadata.stops_count_by_type.assert_not_called()
def test_process_stops_count_with_missing_fields(self):
mock_stops = PropertyMock(return_value=pd.DataFrame({}))
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
type(mock_dataset).stops = mock_stops
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_stops_count_by_type_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_metadata.stops_count_by_type.assert_not_called()
def test_process_stops_count_execution_with_every_stop_type(
self,
):
mock_stops = PropertyMock(
return_value=pd.DataFrame({LOCATION_TYPE: [0, 2, 1, 0, 0, 1, 0, 0, np.nan]})
)
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
type(mock_dataset).stops = mock_stops
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_stops_count_by_type_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_stops.assert_called()
self.assertEqual(
mock_metadata.stops_count_by_type, {"stop": 6, "station": 2, "entrance": 1}
)
def test_process_stops_count_execution_with_some_stop_types(
self,
):
mock_stops = PropertyMock(
return_value=pd.DataFrame({LOCATION_TYPE: [0, 1, 0, 0, 1, 0, 0, np.nan]})
)
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
type(mock_dataset).stops = mock_stops
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_stops_count_by_type_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_stops.assert_called()
self.assertEqual(mock_metadata.stops_count_by_type, {"stop": 6, "station": 2})
def test_process_stops_count_execution_with_empty_stop_types(
self,
):
mock_stops = PropertyMock(return_value=pd.DataFrame({LOCATION_TYPE: []}))
mock_dataset = MagicMock()
mock_dataset.__class__ = Feed
type(mock_dataset).stops = mock_stops
mock_metadata = MagicMock()
mock_metadata.__class__ = GtfsMetadata
mock_gtfs_representation = MagicMock()
mock_gtfs_representation.__class__ = GtfsRepresentation
type(mock_gtfs_representation).dataset = mock_dataset
type(mock_gtfs_representation).metadata = mock_metadata
under_test = process_stops_count_by_type_for_gtfs_metadata(
mock_gtfs_representation
)
self.assertIsInstance(under_test, GtfsRepresentation)
mock_stops.assert_called()
mock_metadata.stops_count_by_type.assert_not_called()
|
python
|
from cryptography.utils import CryptographyDeprecationWarning
import warnings
warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning)
from qc_qubosolv.solver import Solver
|
python
|
from sqlalchemy import create_engine
engine = create_engine('sqlite:///todo.db?check_same_thread=False')
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Date
from datetime import datetime
Base = declarative_base()
class task(Base):
__tablename__ = 'task'
id = Column(Integer, primary_key=True)
task = Column(String, default='')
deadline = Column(Date, default=datetime.today())
def __repr__(self):
return self.task
Base.metadata.create_all(engine)
from sqlalchemy.orm import sessionmaker
Session = sessionmaker(bind=engine)
session = Session()
from datetime import datetime,timedelta
today=datetime.today().date()
rows=session.query(task).filter(task.deadline==today).all()
while (True):
print("1) Today's tasks")
print("2) Week's tasks")
print("3) All tasks")
print("4) Missed tasks")
print("5) Add task")
print("6) Delete task")
print("0) Exit")
n = int(input())
if n == 0:
print("Bye!")
break;
if n == 1:
count = 1
tasks = session.query(task).filter(task.deadline == datetime.today().date()).all()
print("Today {0} {1}:".format(datetime.today().day, datetime.today().strftime('%b')))
for task_today in tasks:
print("{0}. {1}".format(count, task_today))
count += 1
if count == 1:
print("Nothing to do!")
if n == 2:
for i in range(7):
count = 1
tasks = session.query(task).filter(task.deadline == datetime.today().date() + timedelta(days=i)).all()
print("{2} {0} {1}:".format((datetime.today() + timedelta(days=i)).day,
(datetime.today() + timedelta(days=i)).strftime('%b'),
(datetime.today() + timedelta(days=i)).strftime('%A')))
for task_week in tasks:
print("{0}. {1}".format(count, task_week))
count += 1
if count == 1:
print("Nothing to do!")
print()
if n == 3:
count = 1
tasks = session.query(task).all()
print("All tasks:")
for task_x in tasks:
print("{0}. {1}".format(count, task_x))
count += 1
if count == 1:
print("Nothing to do!")
if n == 4:
missed_tasks = session.query(task).filter(task.deadline < datetime.today().date()).all()
print("Missed activities:")
count = 1
for missed_task in missed_tasks:
print("{0}. {1}".format(count, missed_task))
count += 1
if count == 1:
print("Nothing is missed!")
print()
if n == 5:
print("Enter activity")
activity = input()
print("Enter deadline")
activity_deadline_str = input()
activity_deadline = datetime.strptime(activity_deadline_str, '%Y-%m-%d').date()
new_task = task(task=activity, deadline=activity_deadline)
session.add(new_task)
session.commit()
print("The task has been added!")
if n == 6:
print("Chose the number of the task you want to delete:")
tasks = session.query(task).all()
count = 1
for task_x in tasks:
print("{0}. {1}".format(count, task_x))
count += 1
n = int(input())
session.delete(tasks[n - 1])
session.commit()
print("The task has been deleted!")
if count == 1:
print("Nothing to delete!")
|
python
|
n=int(input())
res=""
for i in range(1,n+1):
a,b=map(int, input().split())
if a>=0:
res+="Scenario #"+str(i)+":\n"+str((a+b)*(b-a+1)//2)
elif a<0 and b>=0:
res+="Scenario #"+str(i)+":\n"+str(b*(b+1)//2 + a*(abs(a)+1)//2)
else:
res+="Scenario #"+str(i)+":\n"+str((a+b)*(b-a+1)//2)
if i!=n: res+="\n\n"
print(res,end="")
|
python
|
import torch
import torch.nn as nn
def FocalLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
probs = torch.sigmoid(logits)
pt = (1 - labels) * (1 - probs) + labels * probs
log_pt = torch.log(pt)
floss = - (1 - pt)**2 * log_pt
floss_weighted = floss * inverse_normed_freqs
return torch.mean(floss_weighted)
def FocalLoss2(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
probs = torch.sigmoid(logits)
pt = (1 - labels) * (1 - probs) + labels * probs
log_pt = torch.log(pt)
floss = - (1 - pt)**2 * log_pt
alpha = inverse_normed_freqs.repeat(labels.shape[0]).view((labels.shape[0],-1))
weights = (1 - labels) * (1 - alpha) + labels * alpha
floss_weighted = floss * weights
return torch.mean(floss_weighted)
def FocalLoss3(logits, labels, weights_0):
batch_size = labels.shape[0]
num_labels = labels.shape[1] # should be 9
weights_1 = 1/num_labels - weights_0
labels = labels.type(torch.float32)
probs = torch.sigmoid(logits)
pt = (1 - labels) * (1 - probs) + labels * probs
log_pt = torch.log(pt)
floss = - (1 - pt)**2 * log_pt
alpha_0 = weights_0.repeat(batch_size).view((batch_size,-1))
alpha_1 = weights_1.repeat(batch_size).view((batch_size,-1))
weights = (1 - labels) * alpha_0 + labels * alpha_1
floss_weighted = floss * weights
return torch.mean(floss_weighted)
def BCELoss(logits, labels, inverse_normed_freqs=None):
loss_fct = nn.BCEWithLogitsLoss()
num_labels = labels.shape[1]
# loss = loss_fct(logits.view(-1, num_labels).double(), labels.view(-1, self.num_labels).double())
loss = loss_fct(logits.double(), labels.double())
return loss
def SoftmaxFocalLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
m = nn.Softmax(dim=1)
probs = m(logits)
logprobs = torch.log(probs)
logprobs = (1 - probs)**2 * logprobs
logyhat_for_gold = labels * logprobs * inverse_normed_freqs
logyhat_for_gold_summed = torch.sum(logyhat_for_gold, dim=1)
return torch.mean(-logyhat_for_gold_summed)
def SoftmaxLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
m = nn.Softmax(dim=1)
probs = m(logits)
logyhat_for_gold = labels * torch.log(probs)
logyhat_for_gold_summed = torch.sum(logyhat_for_gold, dim=1)
return torch.mean(-logyhat_for_gold_summed)
def SoftmaxWeightedLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
m = nn.Softmax(dim=1)
probs = m(logits)
logyhat_for_gold = labels * torch.log(probs) * inverse_normed_freqs
logyhat_for_gold_summed = torch.sum(logyhat_for_gold, dim=1)
return torch.mean(-logyhat_for_gold_summed)
def NormalizedLogSoftmaxLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
m = nn.Softmax(dim=1)
probs = m(logits)
logyhat_for_gold = labels * torch.log(probs)
logyhat_for_gold_normalized_summed = torch.sum(logyhat_for_gold / labels.sum(dim=1).reshape((-1,1)), dim=1)
return torch.mean(-logyhat_for_gold_normalized_summed)
def LogNormalizedSoftmaxLoss(logits, labels, inverse_normed_freqs):
labels = labels.type(torch.float32)
m = nn.Softmax(dim=1)
probs = m(logits)
yhat_for_gold = labels * probs
yhat_for_gold_normalized = torch.sum(yhat_for_gold / labels.sum(dim=1).reshape((-1,1)),dim=1)
logyhat_for_gold_normalized = torch.log(yhat_for_gold_normalized)
return torch.mean(-logyhat_for_gold_normalized)
|
python
|
import sys
import inspect
import torch
#from torch_geometric.utils import scatter_
from torch_scatter import scatter
special_args = [
'edge_index', 'edge_index_i', 'edge_index_j', 'size', 'size_i', 'size_j'
]
__size_error_msg__ = ('All tensors which should get mapped to the same source '
'or target nodes must be of same size in dimension 0.')
is_python2 = sys.version_info[0] < 3
getargspec = inspect.getargspec if is_python2 else inspect.getfullargspec
class MessagePassing(torch.nn.Module):
def __init__(self, aggr='sum', flow='source_to_target'):
super(MessagePassing, self).__init__()
self.aggr = aggr
assert self.aggr in ['sum', 'mean', 'max']
self.flow = flow
assert self.flow in ['source_to_target', 'target_to_source']
self.__message_args__ = getargspec(self.message)[0][1:]
self.__special_args__ = [(i, arg)
for i, arg in enumerate(self.__message_args__)
if arg in special_args]
self.__message_args__ = [
arg for arg in self.__message_args__ if arg not in special_args
]
self.__update_args__ = getargspec(self.update)[0][2:]
def propagate(self, edge_index, size=None, dim=0, **kwargs):
dim = 1 # aggregate messages wrt nodes for batched_data: [batch_size, nodes, features]
size = [None, None] if size is None else list(size)
assert len(size) == 2
i, j = (0, 1) if self.flow == 'target_to_source' else (1, 0)
ij = {"_i": i, "_j": j}
message_args = []
for arg in self.__message_args__:
if arg[-2:] in ij.keys():
tmp = kwargs.get(arg[:-2], None)
if tmp is None: # pragma: no cover
message_args.append(tmp)
else:
idx = ij[arg[-2:]]
if isinstance(tmp, tuple) or isinstance(tmp, list):
assert len(tmp) == 2
if tmp[1 - idx] is not None:
if size[1 - idx] is None:
size[1 - idx] = tmp[1 - idx].size(dim)
if size[1 - idx] != tmp[1 - idx].size(dim):
raise ValueError(__size_error_msg__)
tmp = tmp[idx]
if tmp is None:
message_args.append(tmp)
else:
if size[idx] is None:
size[idx] = tmp.size(dim)
if size[idx] != tmp.size(dim):
raise ValueError(__size_error_msg__)
tmp = torch.index_select(tmp, dim, edge_index[idx])
message_args.append(tmp)
else:
message_args.append(kwargs.get(arg, None))
size[0] = size[1] if size[0] is None else size[0]
size[1] = size[0] if size[1] is None else size[1]
kwargs['edge_index'] = edge_index
kwargs['size'] = size
for (idx, arg) in self.__special_args__:
if arg[-2:] in ij.keys():
message_args.insert(idx, kwargs[arg[:-2]][ij[arg[-2:]]])
else:
message_args.insert(idx, kwargs[arg])
update_args = [kwargs[arg] for arg in self.__update_args__]
out = self.message(*message_args)
#out = scatter(self.aggr, out, edge_index[i], dim, dim_size=size[i])
out = scatter(out, edge_index[i], dim=dim, dim_size=size[i], reduce=self.aggr)
out = self.update(out, *update_args)
return out
def message(self, x_j): # pragma: no cover
return x_j
def update(self, aggr_out): # pragma: no cover
return aggr_out
|
python
|
from requests import post,get
from requests_oauthlib import OAuth1
from flask import request
from os import path
from json import dumps, loads
from time import sleep
dir_path = path.dirname(path.realpath(__file__))
def read_env(file=".env"):
read_file = open(dir_path + "/" + file, "r")
split_file = [r.strip().split("=",maxsplit=1) for r in read_file.readlines()]
key,value = zip(*split_file)
return dict(zip(key,value))
def read_params(text):
textList = [item.split("=",maxsplit=1) for item in text.split("&")]
key,value = zip(*textList)
return dict(zip(key,value))
config = read_env()
def send_Webhook(url,data):
return post(url,headers={"content-type":"application/json"},data=data)
def writeFile(fileName,data):
file = open(fileName, "w")
file.write(data)
file.close()
def keepToken():
r = post(url="https://api.twitter.com/oauth/access_token?oauth_token={}&oauth_verifier={}".format(request.args.get("oauth_token"),request.args.get("oauth_verifier")))
params = read_params(r.text)
resource_owner_key = params["oauth_token"]
resource_owner_secret = params["oauth_token_secret"]
screen_name = params["screen_name"]
writeFile(dir_path + "/token.json",dumps({"oauth_token": resource_owner_key, "oauth_secret": resource_owner_secret, "screen_name": screen_name}))
def reqToken():
client_key = config['client_key']
client_secret = config['client_secret']
oauth = OAuth1(client_key, client_secret=client_secret)
r = post(url="https://api.twitter.com/oauth/request_token", auth=oauth)
return read_params(r.text)["oauth_token"]
def getNewImage():
token = open(dir_path + "/token.json", "r")
token_ = loads(token.read())
token.close()
oauth = OAuth1(client_key=config['client_key'], client_secret=config['client_secret'], resource_owner_key=token_["oauth_token"], resource_owner_secret=token_["oauth_secret"])
r = get("https://api.twitter.com/1.1/favorites/list.json?screen_name=" + token_["screen_name"] + "&include_entities=true&tweet_mode=extended",auth=oauth)
response = r.json()
if ("latest" not in token_):
token_["latest"] = response[1]["id"]
for post in response:
if ("media" in post["entities"] and post["id"] > token_["latest"]):
for media in post["entities"]["media"]:
send_Webhook(config["discord_webhook"],dumps({"content":media["media_url_https"]}))
token_["latest"] = response[0]["id"]
writeFile(dir_path + "/token.json",dumps(token_))
sleep(float(config['timeout']))
|
python
|
import gmaps2geojson
writer = gmaps2geojson.Writer()
writer.query("2131 7th Ave, Seattle, WA 98121", "900 Poplar Pl S, Seattle, WA 98144")
writer.query("900 Poplar Pl S, Seattle, WA 98144", "219 Broadway E, Seattle, WA 98102")
writer.save("example.geojson")
|
python
|
"""Этот модуль запускает работу. Работа должна быть асинхронной, модуль запускает и управляет потоками внутри(?)"""
import asyncio
import time
import random
from ss_server_handler import new_order_handler
from PBM_main import TodaysOrders
from controllers_handler import qr_code_alarm, oven_alarm
from settings import QT_DISH_PER_ORDER
def start_testing(equipment_status):
"""Тут вызываем методы контролеров по тестированию оборудования"""
status = equipment_status
equipment_data = {}
return status, equipment_data
async def ss_server(today_orders):
"""Это курутина запускает сервер, мониторит и обрабатывает сообщения от SS
ВОПРОС: запуск сервера должен быть отдельно или тут?
- уведомление о новом заказе
- запрос оставшегося времени работы
"""
# эмуляция поступления заказа
while True:
print("Работает ss_server", time.time())
n = random.randint(1, 50)
print("SS Ждет", n)
await asyncio.sleep(n)
new_order = {"refid": (23 + n), "dishes": [(2, 4, 6, 7), (1, 2, 4, 5)]}
await new_order_handler(new_order, today_orders, QT_DISH_PER_ORDER)
async def controllers_alert_handler(today_orders):
"""Эта курутина обрабатывает уведомления от контроллеров: отказ оборудования и qr код
Можно тут запустить методы мониторинга Арсения."""
while True:
print("Переключились в контролеры", time.time())
qr_code = asyncio.create_task(qr_code_alarm(today_orders))
oven_alarm_id = asyncio.create_task(oven_alarm(today_orders))
await asyncio.gather(qr_code, oven_alarm_id)
# при приостановке нужно заблокировать qr код
async def cooking(today_orders):
"""Эта курутина обеспеивает вызов методов по приготовлению блюд и другой важной работе"""
while True:
print("Работает cooking", time.time())
print("В списке на выдачу", today_orders.orders_requested_for_delivery)
if today_orders.is_cooking_paused:
await today_orders.cooking_pause_handler()
# print("Приостанавливаем работу")
# await asyncio.sleep(10)
elif today_orders.orders_requested_for_delivery:
await today_orders.dish_delivery()
elif today_orders.current_dishes_proceed.keys():
print("Начинаем готовить")
_, current_dish = today_orders.current_dishes_proceed.popitem()
await current_dish.start_dish_cooking(today_orders)
else:
print("Dancing 3 secs")
await asyncio.sleep(3)
print()
async def create_cooking_tasks(today_orders):
"""В этой функции курутины формируются в таски"""
ss_task = asyncio.create_task(ss_server(today_orders))
controllers_task = asyncio.create_task(controllers_alert_handler(today_orders))
cooking_task = asyncio.create_task(cooking(today_orders))
await asyncio.gather(ss_task, controllers_task, cooking_task)
def start_cooking(equipment_data):
"""Эта функция инициирует все необходимое для работы. Пока создание экземпляра класса TodaysOrders"""
today_orders = TodaysOrders()
if today_orders:
asyncio.run(create_cooking_tasks(today_orders))
def pause_cooking():
today_orders.pause_cooking = True
def start_cooking_after_pause():
today_orders.pause_cooking = False
if __name__ == "__main__":
equipment_data = {}
today_orders = start_cooking(equipment_data)
if today_orders:
asyncio.run(create_cooking_tasks())
|
python
|
import uuid
from datetime import datetime
from pydantic import BaseModel, EmailStr, Field
class UserBase(BaseModel):
id: uuid.UUID = Field(default_factory=uuid.uuid4)
username: str
email: EmailStr
class Config:
arbitrary_types_allowed = True
class UserCreate(UserBase):
register_date: datetime = Field(default_factory=datetime.now)
password: str
class UserUpdate(BaseModel):
password: str
class UserInDBBase(UserBase):
register_date: datetime = Field(default_factory=datetime.now)
class User(UserInDBBase):
pass
class UserInDB(UserInDBBase):
hashed_password: str
|
python
|
valores = []
for contador in range(0, 5):
valor = int(input('Digite um valor: '))
if contador == 0 or valor > valores[-1]:
valores.append(valor)
print('Adicionado ao final da lista...')
else:
posicao = 0
while posicao < len(valores):
if valor <= valores[posicao]:
valores.insert(posicao, valor)
print(f'Adicionado na posição {posicao} da lista...')
break
posicao += 1
print('-=' * 30)
print(f'Os valores digitados em ordem foram {valores}')
|
python
|
import os,cStringIO
class Image_Fonts():
def Fonts_Defaults(self):
fontlist = [
'/usr/share/fonts/truetype/freefont/FreeSerif.ttf',
'/usr/share/fonts/truetype/freefont/FreeSans.ttf',
'/usr/share/fonts/truetype/freefont/FreeMono.ttf'
]
fontpath = '.'
for f in fontlist:
if os.path.exists(f):
fontpath = fontpath + ':' + os.path.dirname(f)
self.Font = os.path.abspath(f)
break
os.environ["GDFONTPATH"] = "fontpath"
try:
self.Font
except NameError:
print "no fonts found"
sys.exit(1)
|
python
|
import itertools
from django.http import HttpRequest, HttpResponse
from django.core.exceptions import PermissionDenied
from django.utils import timezone
from django.utils.text import slugify
from .jalali import Gregorian, datetime_to_str, mounth_number_to_name, en_to_fa_numbers
# create unique slug.
def unique_slug(title, max_length, model_name):
slug_candidate = slug_original = slugify(title, allow_unicode=True)[:max_length - 2]
for i in itertools.count(1):
if not model_name.objects.filter(slug=slug_candidate).exists():
break
slug_candidate = f'{slug_original}-{i}'
return slug_candidate
def to_jalali(time):
time = timezone.localtime(time)
calender_to_str = datetime_to_str(time)
jalali_format = Gregorian(calender_to_str).persian_tuple()
month_name = mounth_number_to_name(jalali_format[1])
combined_str = "{} {} {}, ساعت {}:{}".format(
jalali_format[2],
month_name,
jalali_format[0],
time.hour,
time.minute,
)
final_str = en_to_fa_numbers(combined_str)
return final_str
# being author/staff/superuser
def check_author_staff_superuser(request: HttpRequest) -> HttpResponse:
if request.user.is_authenticated:
if (
request.user.is_superuser
or request.user.is_staff
or request.user.is_author
):
return True
return False
# being owner of that article/staff/superuser
def check_owner_staff_superuser(request: HttpRequest, article) -> HttpResponse:
if request.user.is_authenticated:
if (request.user.is_superuser
or request.user.is_staff
or request.user.is_author
and article.author == request.user
):
request.is_ok = True
return request
raise PermissionDenied
def check_staff_superuser(request: HttpRequest) -> HttpResponse:
if request.user.is_authenticated:
if (
request.user.is_staff
or request.user.is_superuser
):
return True
return False
|
python
|
from hask.lang import build_instance
from hask.lang import sig
from hask.lang import H
from hask.lang import t
from hask.Control.Applicative import Applicative
from hask.Control.Monad import Monad
from .Foldable import Foldable
from .Functor import Functor
class Traversable(Foldable, Functor):
"""
Functors representing data structures that can be traversed from left to
right.
Dependencies:
Foldable, Functor
Attributes:
traverse, sequenceA, mapM, sequence
Minimal complete definition:
traverse
"""
@classmethod
def make_instance(typeclass, cls, traverse, sequenceA=None, mapM=None,
sequence=None):
attrs = {"traverse":traverse, "sequenceA":sequenceA, "mapM":mapM,
"sequence":sequence}
build_instance(Traversable, cls, attrs)
return
@sig(H[(Applicative, "f"), (Traversable, "t")]/
(H/ "a" >> t("f", "b")) >> t("t", "a") >> t("f", t("t", "b")))
def traverse(f, t):
"""
``traverse :: (Traversable t, Applicative f) => (a -> f b) -> t a -> f (t b)``
Map each element of a structure to an action, evaluate these these actions
from left to right, and collect the results. actions from left to right,
and collect the results. For a version that ignores the results see
traverse_.
"""
return Traversable[t].traverse(f, t)
@sig(H[(Applicative, "f"), (Traversable, "t")]/
t("t", t("f", "a")) >> t("f", t("t", "a")))
def sequenceA(t):
"""
``sequenceA :: (Traversable t, Applicative f) => t (f a) -> f (t a)``
Evaluate each action in the structure from left to right, and and collect
the results. For a version that ignores the results see sequenceA_.
"""
return Traversable[t].sequenceA(t)
@sig(H[(Monad, "m"), (Traversable, "t")]/
(H/ "a" >> t("m", "b")) >> t("t", "a") >> t("m", t("t", "b")))
def mapM(f, m):
"""
``mapM :: (Traversable t, Monad m) => (a -> m b) -> t a -> m (t b)``
Map each element of a structure to a monadic action, evaluate these actions
from left to right, and collect the results. For a version that ignores the
results see mapM_.
"""
return Traversable[t].mapM(f, t)
@sig(H[(Monad, "m"), (Traversable, "t")]/
t("t", t("m", "a")) >> t("m", t("t", "a")))
def sequence(t):
"""
``sequence :: (Traversable t, Monad m) => t (m a) -> m (t a)``
Evaluate each monadic action in the structure from left to right, and
collect the results. For a version that ignores the results see sequence_.
"""
return Traversable[t].sequence(t)
#=============================================================================#
# Utility functions
@sig(H[(Applicative, "f"), (Traversable, "t")]/
t("t", "a") >> (H/ "a" >> t("f", "b")) >> t("f", t("t", "b")))
def for1(t, f):
"""
``for1 :: (Traversable t, Applicative f) => t a -> (a -> f b) -> f (t b)``
for1 is traverse with its arguments flipped. For a version that ignores the
results see for1_.
"""
return traverse(f, t)
@sig(H[(Monad, "m"), (Traversable, "t")]/
t("t", "a") >> (H/ "a" >> t("m", "b")) >> t("m", t("t", "b")))
def forM(t, f):
"""
``forM :: (Traversable t, Monad m) => t a -> (a -> m b) -> m (t b)``
forM is mapM with its arguments flipped. For a version that ignores the
results see forM_.
"""
return mapM(f, t)
@sig(H[(Traversable, "t")]/ (H/ "a" >> "b" >> ("a", "c")) >> "a" >> t("t", "b")
>> ("a", t("t", "c")))
def mapAccumL(f, a, tb):
"""
``mapAccumL :: Traversable t => (a -> b -> (a, c)) -> a -> t b -> (a, t c)``
The mapAccumL function behaves like a combination of fmap and foldl; it
applies a function to each element of a structure, passing an accumulating
parameter from left to right, and returning a final value of this
accumulator together with the new structure.
"""
raise NotImplementedError()
@sig(H[(Traversable, "t")]/ (H/ "a" >> "b" >> ("a", "c")) >> "a" >> t("t", "b")
>> ("a", t("t", "c")))
def mapAccumR(f, a, tb):
"""
``mapAccumR :: Traversable t => (a -> b -> (a, c)) -> a -> t b -> (a, t c)``
The mapAccumR function behaves like a combination of fmap and foldr; it
applies a function to each element of a structure, passing an accumulating
parameter from right to left, and returning a final value of this
accumulator together with the new structure.
"""
raise NotImplementedError()
|
python
|
from random import choice
from xkcdpass import xkcd_password
class XKCD:
delimiters_numbers = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
delimiters_full = ["!", "$", "%", "^", "&", "*", "-", "_", "+", "=",
":", "|", "~", "?", "/", ".", ";"] + delimiters_numbers
def __init__(self, filename: str):
self.wordlist = xkcd_password.generate_wordlist(
wordfile=filename, valid_chars="[a-z]", min_length=4, max_length=10,
)
def weak(self):
# 2 words, no separators between words
return xkcd_password.generate_xkcdpassword(self.wordlist, numwords=2, delimiter="", )
def normal(self):
# 3 words, random CAPITALIZATION, random number as separator between words
return xkcd_password.generate_xkcdpassword(
self.wordlist, numwords=3, case="random", random_delimiters=True, valid_delimiters=self.delimiters_numbers
)
def strong(self):
# Same as normal_pwd, but 4 words
return xkcd_password.generate_xkcdpassword(
self.wordlist, numwords=4, case="random", random_delimiters=True, valid_delimiters=self.delimiters_full
)
def custom(self, count: int, separators: bool, prefixes: bool):
"""
Custom password generation
:param count: number of words in password
:param separators: bool, whether words must be separated with delimiters
:param prefixes: bool, whether there must be chars from delimiters list in front and in back
:return: generated custom password
"""
pwd = xkcd_password.generate_xkcdpassword(
self.wordlist, numwords=count, case="random", delimiter="",
random_delimiters=separators, valid_delimiters=self.delimiters_full
)
if prefixes == separators:
return pwd
elif separators and not prefixes:
return pwd[1:-1]
elif prefixes and not separators:
return f"{choice(self.delimiters_full)}{pwd}{choice(self.delimiters_full)}"
|
python
|
# -*- coding: utf-8 -*-
'''
Created on : Thursday 18 Jun, 2020 : 00:47:36
Last Modified : Sunday 22 Aug, 2021 : 23:52:53
@author : Adapted by Rishabh Joshi from Original ASAP Pooling Code
Institute : Carnegie Mellon University
'''
import torch
import numpy as np
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import GCNConv, global_mean_pool, GATConv
from torch_scatter import scatter_mean, scatter_max
import pdb
import math, pdb
import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_scatter import scatter_add, scatter_max
from torch_geometric.nn import GCNConv
from torch_geometric.utils import add_remaining_self_loops, remove_self_loops, softmax
from torch_geometric.nn.pool.topk_pool import topk
from torch_sparse import coalesce
from torch_sparse import transpose
from torch_sparse import spspmm
# torch.set_num_threads(1)
def StAS(index_A, value_A, index_S, value_S, device, N, kN):
r"""StAS: a function which returns new edge weights for the pooled graph using the formula S^{T}AS"""
index_A, value_A = coalesce(index_A, value_A, m=N, n=N)
index_S, value_S = coalesce(index_S, value_S, m=N, n=kN)
index_B, value_B = spspmm(index_A, value_A, index_S, value_S, N, N, kN)
index_St, value_St = transpose(index_S, value_S, N, kN)
index_B, value_B = coalesce(index_B, value_B, m=N, n=kN)
index_E, value_E = spspmm(index_St.cpu(), value_St.cpu(), index_B.cpu(), value_B.cpu(), kN, N, kN)
return index_E.to(device), value_E.to(device)
def graph_connectivity(device, perm, edge_index, edge_weight, score, ratio, batch, N):
r"""graph_connectivity: is a function which internally calls StAS func to maintain graph connectivity"""
kN = perm.size(0)
perm2 = perm.view(-1, 1)
# mask contains uint8 mask of edges which originate from perm (selected) nodes
mask = (edge_index[0]==perm2).sum(0, dtype=torch.uint8)
# create the S
S0 = edge_index[1][mask].view(1, -1)
S1 = edge_index[0][mask].view(1, -1)
index_S = torch.cat([S0, S1], dim=0)
value_S = score[mask].detach().squeeze()
save_index_S = index_S.clone()
save_value_S = value_S.clone()
# relabel for pooling ie: make S [N x kN]
n_idx = torch.zeros(N, dtype=torch.long)
n_idx[perm] = torch.arange(perm.size(0))
index_S[1] = n_idx[index_S[1]]
# create A
index_A = edge_index.clone()
if edge_weight is None:
value_A = value_S.new_ones(edge_index[0].size(0))
else:
value_A = edge_weight.clone()
fill_value=1
index_E, value_E = StAS(index_A, value_A, index_S, value_S, device, N, kN)
index_E, value_E = remove_self_loops(edge_index=index_E, edge_attr=value_E)
index_E, value_E = add_remaining_self_loops(edge_index=index_E, edge_weight=value_E,
fill_value=fill_value, num_nodes=kN)
return index_E, value_E, save_index_S, save_value_S
class ASAP_Pooling(torch.nn.Module):
def __init__(self, in_channels, ratio, dropout_att=0, negative_slope=0.2):
super(ASAP_Pooling, self).__init__()
self.in_channels = in_channels
self.ratio = ratio
self.negative_slope = negative_slope
self.dropout_att = dropout_att
self.lin_q = Linear(in_channels, in_channels)
self.gat_att = Linear(2*in_channels, 1)
self.gnn_score = LEConv(self.in_channels, 1) # gnn_score: uses LEConv to find cluster fitness scores
self.gnn_intra_cluster = GCNConv(self.in_channels, self.in_channels) # gnn_intra_cluster: uses GCN to account for intra cluster properties, e.g., edge-weights
self.reset_parameters()
def reset_parameters(self):
self.lin_q.reset_parameters()
self.gat_att.reset_parameters()
self.gnn_score.reset_parameters()
self.gnn_intra_cluster.reset_parameters()
def forward(self, x, edge_index, edge_weight=None, batch=None):
x2 = x.clone(); edge_index2 = edge_index.clone(); batch2 = batch.clone()
if batch is None:
batch = edge_index.new_zeros(x.size(0))
# NxF
x = x.unsqueeze(-1) if x.dim() == 1 else x
# Add Self Loops
fill_value = 1
num_nodes = scatter_add(batch.new_ones(x.size(0)), batch, dim=0)
edge_index, edge_weight = add_remaining_self_loops(edge_index=edge_index, edge_weight=edge_weight,
fill_value=fill_value, num_nodes=num_nodes.sum())
N = x.size(0) # total num of nodes in batch
# ExF
x_pool = self.gnn_intra_cluster(x=x, edge_index=edge_index, edge_weight=edge_weight)
x_pool_j = x_pool[edge_index[1]]
x_j = x[edge_index[1]]
#---Master query formation---
# NxF
X_q, _ = scatter_max(x_pool_j, edge_index[0], dim=0)
# NxF
M_q = self.lin_q(X_q)
# ExF
M_q = M_q[edge_index[0].tolist()]
score = self.gat_att(torch.cat((M_q, x_pool_j), dim=-1))
score = F.leaky_relu(score, self.negative_slope)
score = softmax(score, edge_index[0], num_nodes=num_nodes.sum())
att_wts = score.clone()
# Sample attention coefficients stochastically.
score = F.dropout(score, p=self.dropout_att, training=self.training)
# ExF
v_j = x_j * score.view(-1, 1)
#---Aggregation---
# NxF
out = scatter_add(v_j, edge_index[0], dim=0)
#---Cluster Selection
# Nx1
fitness = torch.sigmoid(self.gnn_score(x=out, edge_index=edge_index)).view(-1)
perm = topk(x=fitness, ratio=self.ratio, batch=batch)
x = out[perm] * fitness[perm].view(-1, 1)
#---Maintaining Graph Connectivity
batch = batch[perm]
edge_index, edge_weight, S_index, S_weight = graph_connectivity(
device = x.device,
perm=perm,
edge_index=edge_index,
edge_weight=edge_weight,
score=score,
ratio=self.ratio,
batch=batch,
N=N)
return x, edge_index, edge_weight, batch, perm, S_index, S_weight, att_wts
def __repr__(self):
return '{}({}, ratio={})'.format(self.__class__.__name__, self.in_channels, self.ratio)
def readout(x, batch):
x_mean = scatter_mean(x, batch, dim=0)
x_max, _ = scatter_max(x, batch, dim=0)
return torch.cat((x_mean, x_max), dim=-1)
class ASAP_Pool(torch.nn.Module):
'''
Code Modified by Rishabh Joshi
Original code from http://github.com/malllabiisc/ASAP
'''
def __init__(self, config, strat_or_da):
#def __init__(self, dataset, num_layers, hidden, ratio=0.8, **kwargs):
super(ASAP_Pool, self).__init__()
ratio = config.ratio # 0.8
if strat_or_da == 'da':
node_features = len(config.da_lbl2id)
else:
node_features = len(config.negotiation_lbl2id)
num_features = node_features
#node_features = config.node_feats # num_strat
hidden = config.graph_hidden # 64
dropout_att = config.graph_drop # 0.0
num_layers = config.graph_layers # 3
self.graph_model= config.graph_model # 'gcn'
if type(ratio)!=list:
ratio = [ratio for i in range(num_layers)]
if not config.node_embed:
self.embeddings = torch.nn.Embedding(num_features, num_features, padding_idx=-1) # Embeddings for the strategies (num_features is num_strategies)
self.embeddings.weight = torch.nn.Parameter(torch.FloatTensor(np.diag(np.diag(np.ones((num_features, num_features)))))) # diag matrix of 1 hot
node_features = num_features
self.embeddings.weight.requires_grad = True
# TODO NO TRAIN
else:
self.embeddings = torch.nn.Embedding(num_features, node_features, padding_idx=-1) # Embeddings for the strategies (num_features is num_strategies)
if self.graph_model == 'gcn':
self.conv1 = GCNConv(node_features, hidden)
elif self.graph_model == 'gat':
self.conv1 = GATConv(node_features, hidden, heads = config.num_heads)
else:
raise NotImplementedError
self.pool1 = ASAP_Pooling(in_channels=hidden*config.num_heads, ratio=ratio[0], dropout_att=dropout_att)
self.convs = torch.nn.ModuleList()
self.pools = torch.nn.ModuleList()
for i in range(num_layers - 1):
if self.graph_model == 'gcn':
self.convs.append(GCNConv(hidden, hidden))
elif self.graph_model == 'gat':
self.convs.append(GATConv(hidden, hidden, heads = config.num_heads))
else:
raise NotImplementedError
self.pools.append(ASAP_Pooling(in_channels=hidden * config.num_heads, ratio=ratio[i], dropout_att=dropout_att))
self.lin1 = Linear(2*hidden * config.num_heads, hidden) # 2*hidden due to readout layer
self.lin2 = Linear(hidden, num_features - 1) # projection layer -> -1 for <start>
self.reset_parameters()
self.strat_or_da = strat_or_da
self.undirected = config.undirected
self.self_loops = config.self_loops
self.num_heads = config.num_heads
try:
if 'only_hidden' in config:
self.only_hidden = config.only_hidden
else:
self.only_hidden = False
except:
self.only_hidden = False
@property
def device(self) -> torch.device:
return next(self.parameters()).device
def set_embeddings(self, np_embedding):
#self.embeddings.weight.data.copy_(torch.from_numpy(np_embedding))
assert np_embedding.shape == self.embeddings.weight.shape
self.embeddings.weight = torch.nn.Parameter(torch.FloatTensor(np_embedding))
def reset_parameters(self):
self.conv1.reset_parameters()
self.pool1.reset_parameters()
for conv, pool in zip(self.convs, self.pools):
conv.reset_parameters()
pool.reset_parameters()
self.lin1.reset_parameters()
self.lin2.reset_parameters()
#def forward(self, data, return_extra = False):
# data = data['input_graph']
# x, edge_index, batch = data.x, data.edge_index, data.batch # x is num_graph x 1
# x = self.embeddings(x.squeeze(1)) # added # x is num_graph x node_feats / 22
# x = F.relu(self.conv1(x, edge_index)); #import pdb; pdb.set_trace() # x: num_graph x 64, 2 x 21252 -> more dense, whwereas x graphs goes down
# x, edge_index, edge_weight, batch, perm, S_index, S_weight, att_wts = self.pool1(x=x, edge_index=edge_index, edge_weight=None, batch=batch)
# save_perm = perm.clone()
# xs = readout(x, batch)
# for conv, pool in zip(self.convs, self.pools):
# if self.graph_model == 'gcn':
# x = F.relu(conv(x=x, edge_index=edge_index, edge_weight=edge_weight))
# elif self.graph_model == 'gat':
# x = F.relu(conv(x=x, edge_index=edge_index))
# else:
# raise NotImplementedError
# x, edge_index, edge_weight, batch, perm, _, _, _ = pool(x=x, edge_index=edge_index, edge_weight=edge_weight, batch=batch) # IGNORING S OF FUTURE LAYERS
# xs += readout(x, batch)
# x = F.relu(self.lin1(xs))
# if self.only_hidden:
# return x
# x = F.dropout(x, p=0.0, training=self.training)
# x = self.lin2(x)
# #out = F.log_softmax(x, dim=-1)
# # x is logits
# # dont need mask here to calculate loss
# logitloss = self.logitcriterion(x, data.y)
# return logitloss, 0.0, x, 0.0, (S_index, S_weight, att_wts, save_perm)
def forward(self, feats, utt_mask, return_extra = True):
# CREATE GRAPH DATA HERE
#pdb.set_trace()
from torch_geometric.data import Batch
num_conv=feats.shape[0]
data_list = []
for i in range(num_conv):
data_list += self.convert_strategyvec_to_graph(feats[i], utt_mask[i])
#data_list = self.convert_strategyvec_to_graph(feats)
batch_graph = Batch.from_data_list(data_list).to(feats.device)
num_utt = feats.shape[1]
num_strategies = feats.shape[2]
x, edge_index, batch = batch_graph.x, batch_graph.edge_index, batch_graph.batch
#x, edge_index, batch = data.x, data.edge_index, data.batch # x is num_graph x 1
#pdb.set_trace()
# if torch.max(x) > self.num_strat:
# pdb.set_trace()#print (x.squeeze(1))
x = self.embeddings(x.squeeze(1)) # added # x is num_graph x node_feats / 22
if self.graph_model == 'gcn':
x = F.relu(self.conv1(x, edge_index)); # import pdb; pdb.set_trace() # x: num_graph x 64, 2 x 21252 -> more dense, whwereas x graphs goes down
else:
# THIS PART#################
#pdb.set_trace()
#x, gat_attn_wts = self.conv1(x, edge_index, return_attention_weights=True)
x = self.conv1(x, edge_index)
gat_attn_wts = self.conv1.attention_score
x = F.relu(x)
x, edge_index, edge_weight, batch, perm, S_index, S_weight, att_wts = self.pool1(x=x, edge_index=edge_index, edge_weight=None, batch=batch)
save_perm = perm.clone()
xs = readout(x, batch)
for conv, pool in zip(self.convs, self.pools):
if self.graph_model == 'gcn':
x = F.relu(conv(x=x, edge_index=edge_index, edge_weight=edge_weight))
elif self.graph_model == 'gat':
x = F.relu(conv(x=x, edge_index=edge_index))
else:
raise NotImplementedError
x, edge_index, edge_weight, batch, perm, _, _, _ = pool(x=x, edge_index=edge_index, edge_weight=edge_weight, batch=batch) # IGNORING S OF FUTURE LAYERS
xs += readout(x, batch)
x = F.relu(self.lin1(xs))
if self.only_hidden:
return x
x = F.dropout(x, p=0.0, training=self.training)
logits = self.lin2(x)#.view(1, num_utt, -1)
#outputs, _ = self.gru(feats, None)
#logits = self.projection_layer(self.relu(outputs))
#logits = logits[:, -1, :].view(1, -1)
if return_extra:
if self.graph_model == 'gat':
return logits, batch_graph.y, (S_index, S_weight, att_wts, save_perm, batch_graph, gat_attn_wts)
else:
return logits, batch_graph.y, (S_index, S_weight, att_wts, save_perm, batch_graph, gat_attn_wts)
return logits, batch_graph.y, None
def convert_strategyvec_to_graph(self, strategies_vec, utt_maski):
'''
Takes a strategies vector and converts it to a list of torch_geometric.data Data items
'''
from torch_geometric.data import Data
#pdb.set_trace()
device = strategies_vec.device
graph_data = []
adj_x, adj_y = [], []
# skip for time step 0
# lower triangle useful
total_rows = 0
for i in range(len(strategies_vec)):
#adj_y.append(np.array(strategies_vec[i + 1]))
num_strategies_in_turn = int(torch.sum(strategies_vec[i]))
new_matrix = np.zeros((total_rows + num_strategies_in_turn, total_rows + num_strategies_in_turn))#.to(device)
new_strategies = np.zeros((total_rows + num_strategies_in_turn, 1))#, dtype=torch.long).to(device)
if i != 0:
new_matrix[: total_rows, : total_rows] = adj_x[i - 1]['matrix'] # copy prev matrix
new_strategies[: total_rows] = adj_x[i - 1]['strategies']
curr_row = total_rows
##stdinturn=0
for stidx, sval in enumerate(strategies_vec[i]):
if sval == 0: continue
new_strategies[curr_row, 0] = stidx
# new_strategies.append(stidx)
new_matrix[curr_row, : total_rows] = 1 # connecting to all in lower half except self
##new_matrix[curr_row, total_rows + stdinturn] = 1
##stdinturn+=1
curr_row += 1
total_rows = curr_row
#new_matrix = torch.LongTensor(new_matrix).to(device)
#new_strategies = torch.LongTensor(new_strategies).to(device)
adj_x.append({
'matrix': new_matrix,
'strategies': new_strategies
})
x = torch.LongTensor(new_strategies).to(device) # (num_strategies, 1) for now. Later will do embedding lookup
edge_index = self.get_edge_index_from_adj_matrix(torch.LongTensor(new_matrix).to(device))
#y = torch.FloatTensor(np.array(strategies_vec[i + 1]).reshape(1, -1))
#y = torch.FloatTensor(np.array(strategies_vec[i]).reshape(1, -1))
try:
y = strategies_vec[i+1, :-1].reshape(1, -1) # -1 for start
except:
y = strategies_vec[0, :-1].reshape(1, -1)#None
#y= None
graph_data.append(Data(x=x, edge_index=edge_index, y=y))
#if i+2 == len(strategies_vec) or utt_maski[i+2] == 0: break
if i+1 == len(strategies_vec) or utt_maski[i+1] == 0: break
return graph_data
def get_edge_index_from_adj_matrix(self, adj_matrix):
from torch_geometric.utils.sparse import dense_to_sparse
from torch_geometric.utils.undirected import to_undirected
from torch_geometric.utils.loop import add_self_loops
edge_index, edge_value = dense_to_sparse(adj_matrix)
undirected = self.undirected
self_loops = self.self_loops
if edge_index.shape[1] != 0 and undirected:
edge_index = to_undirected(edge_index)
if edge_index.shape[1] != 0 and self_loops:
edge_index, _ = add_self_loops(edge_index)
return edge_index
def __repr__(self):
return self.__class__.__name__
import torch
from torch.nn import Parameter
from torch_geometric.utils import remove_self_loops, add_self_loops
from torch_scatter import scatter_add
from torch_geometric.nn.inits import uniform
class LEConv(torch.nn.Module):
r"""Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
"""
def __init__(self, in_channels, out_channels, bias=True):
super(LEConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.lin1 = torch.nn.Linear(in_channels, out_channels, bias=bias)
self.lin2 = torch.nn.Linear(in_channels, out_channels, bias=bias)
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
self.lin1.reset_parameters()
self.lin2.reset_parameters()
def forward(self, x, edge_index, edge_weight=None, size=None):
""""""
num_nodes = x.shape[0]
h = torch.matmul(x, self.weight)
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1),),
dtype=x.dtype,
device=edge_index.device)
edge_index, edge_weight = remove_self_loops(edge_index=edge_index, edge_attr=edge_weight)
deg = scatter_add(edge_weight, edge_index[0], dim=0, dim_size=num_nodes) # + 1e-10
h_j = edge_weight.view(-1, 1) * h[edge_index[1]]
aggr_out = scatter_add(h_j, edge_index[0], dim=0, dim_size=num_nodes)
out = (deg.view(-1, 1) * self.lin1(x) + aggr_out) + self.lin2(x)
edge_index, edge_weight = add_self_loops(edge_index=edge_index, edge_weight=edge_weight, num_nodes=num_nodes)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
|
python
|
# License: MIT
# Author: Karl Stelzner
import os
import sys
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import numpy as np
from numpy.random import random_integers
from PIL import Image
from torch.utils.data._utils.collate import default_collate
import json
def progress_bar(count, total, status=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
sys.stdout.flush()
def make_sprites(n=50000, height=64, width=64):
images = np.zeros((n, height, width, 3))
counts = np.zeros((n,))
print('Generating sprite dataset...')
for i in range(n):
num_sprites = random_integers(0, 2)
counts[i] = num_sprites
for j in range(num_sprites):
pos_y = random_integers(0, height - 12)
pos_x = random_integers(0, width - 12)
scale = random_integers(12, min(16, height-pos_y, width-pos_x))
cat = random_integers(0, 2)
sprite = np.zeros((height, width, 3))
if cat == 0: # draw circle
center_x = pos_x + scale // 2.0
center_y = pos_y + scale // 2.0
for x in range(height):
for y in range(width):
dist_center_sq = (x - center_x)**2 + (y - center_y)**2
if dist_center_sq < (scale // 2.0)**2:
sprite[x][y][cat] = 1.0
elif cat == 1: # draw square
sprite[pos_x:pos_x + scale, pos_y:pos_y + scale, cat] = 1.0
else: # draw square turned by 45 degrees
center_x = pos_x + scale // 2.0
center_y = pos_y + scale // 2.0
for x in range(height):
for y in range(width):
if abs(x - center_x) + abs(y - center_y) < (scale // 2.0):
sprite[x][y][cat] = 1.0
images[i] += sprite
if i % 100 == 0:
progress_bar(i, n)
images = np.clip(images, 0.0, 1.0)
return {'x_train': images[:4 * n // 5],
'count_train': counts[:4 * n // 5],
'x_test': images[4 * n // 5:],
'count_test': counts[4 * n // 5:]}
class Sprites(Dataset):
def __init__(self, directory, n=50000, canvas_size=64,
train=True, transform=None):
np_file = 'sprites_{}_{}.npz'.format(n, canvas_size)
full_path = os.path.join(directory, np_file)
if not os.path.isfile(full_path):
try:
os.mkdir('./data')
except:
print("data folder found !")
gen_data = make_sprites(n, canvas_size, canvas_size)
np.savez(full_path, **gen_data)
data = np.load(full_path)
self.transform = transform
self.images = data['x_train'] if train else data['x_test']
self.counts = data['count_train'] if train else data['count_test']
def __len__(self):
return self.images.shape[0]
def __getitem__(self, idx):
img = self.images[idx]
if self.transform is not None:
img = self.transform(img).float()
return img, self.counts[idx]
class Clevr(Dataset):
def __init__(self, directory, train=True, transform=None):
self.images_path = directory + 'images/train/'
self.filenames = os.listdir(self.images_path)
json_path = directory + 'scenes/CLEVR_train_scenes.json'
with open(json_path) as json_file:
data = json.load(json_file)
self.labels = data['scenes']
self.n = len(self.filenames)
self.transform = transform
def __len__(self):
return self.n
def _name2idx(self, key, value):
if key == 'shape':
if value == 'cube':
return 0
elif value == 'cylinder':
return 1
elif value == 'sphere':
return 2
elif key == 'size':
if value == 'small':
return 0
elif value == 'large':
return 1
elif key == 'material':
if value == 'metal':
return 0
elif value == 'rubber':
return 1
elif key == 'color':
if value == 'red':
return 0
elif value == 'blue':
return 1
elif value == 'purple':
return 2
elif value == 'gray':
return 3
elif value == 'cyan':
return 4
elif value == 'brown':
return 5
elif value == 'yellow':
return 6
elif value == 'green':
return 7
elif key == '3d_coords':
return (np.array(value) + 3)/6
else:
return value
def __getitem__(self, idx):
#Image
imgpath = os.path.join(self.images_path, self.filenames[idx])
img = Image.open(imgpath)
if self.transform is not None:
img = self.transform(img).float()
#Label
image_idx = self.labels[idx]['image_index']
assert image_idx == idx
objects = self.labels[idx]['objects']
num_objects = len(objects)
assert num_objects != 0
keys = objects[0].keys()
label = {k:[] for k in keys}
for i in range(num_objects):
for k in keys:
label[k].append(self._name2idx(k, objects[i][k]))
for k in keys:
t = label[k]
label[k] = torch.as_tensor(t)
return img, label
class ClevrRela(Dataset):
def __init__(self, directory, train=True, transform=None):
self.images_path = directory + 'images/'
self.filenames = os.listdir(self.images_path)
json_path = directory + 'CLEVR_scenes.json'
with open(json_path) as json_file:
data = json.load(json_file)
self.labels = data['scenes']
self.n = len(self.filenames)
self.transform = transform
def __len__(self):
return self.n
def _name2idx(self, key, value):
if key == 'shape':
if value == 'cube':
return 0
elif value == 'cylinder':
return 1
elif value == 'sphere':
return 2
elif key == 'size':
if value == 'small':
return 0
elif value == 'large':
return 1
elif key == 'material':
if value == 'metal':
return 0
elif value == 'rubber':
return 1
elif key == 'color':
if value == 'red':
return 0
elif value == 'blue':
return 1
elif value == 'purple':
return 2
elif value == 'gray':
return 3
elif value == 'cyan':
return 4
elif value == 'brown':
return 5
elif value == 'yellow':
return 6
elif value == 'green':
return 7
elif key == '3d_coords':
return (np.array(value) + 3)/6
else:
return value
def __getitem__(self, idx):
#Image
imgpath = os.path.join(self.images_path, self.filenames[idx])
img = Image.open(imgpath)
if self.transform is not None:
img = self.transform(img).float()
#Label
image_idx = self.labels[idx]['image_index']
assert image_idx == idx
objects = self.labels[idx]['objects']
num_objects = len(objects)
assert num_objects != 0
keys = objects[0].keys()
label = {k:[] for k in keys}
for i in range(num_objects):
for k in keys:
label[k].append(self._name2idx(k, objects[i][k]))
for k in keys:
t = label[k]
label[k] = torch.as_tensor(t)
label['relation'] = torch.as_tensor(self.labels[idx]['relationships'])
return img, label
########################################################################
#
# ADDED
#
#########################################################################
class MultiObjectDataset(Dataset):
def __init__(self, data_path, train, split=0.9, transform = None):
super().__init__()
# Load data
data = np.load(data_path, allow_pickle=True)
# Rescale images and permute dimensions
x = np.asarray(data['x'], dtype=np.float32) / 255
x = np.transpose(x, [0, 3, 1, 2]) # batch, channels, h, w
# Get labels
try:
labels = data['labels'].item()
except:
labels = data['labels']
print(type(labels))
# Split train and test
split = int(split * len(x))
if train:
indices = range(split)
else:
indices = range(split, len(x))
# From numpy/ndarray to torch tensors (labels are lists of tensors as
# they might have different sizes)
self.x = torch.from_numpy(x[indices])
try:
labels.pop('text', None)
labels.pop('vertices', None)
except:
print("No text to pop !")
self.labels = self._labels_to_tensorlist(labels, indices)
@staticmethod
def _labels_to_tensorlist(labels, indices):
out = {k: [] for k in labels.keys()}
for i in indices:
for k in labels.keys():
t = labels[k][i]
t = torch.as_tensor(t)
out[k].append(t)
return out
def __getitem__(self, index):
x = self.x[index]
try:
labels = {k: self.labels[k][index] for k in self.labels.keys()}
except:
labels = self.labels
return x, labels
def __len__(self):
return self.x.size(0)
class MultiObjectDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
assert 'collate_fn' not in kwargs
kwargs['collate_fn'] = self.collate_fn
super().__init__(*args, **kwargs)
@staticmethod
def collate_fn(batch):
# The input is a batch of (image, label_dict)
_, item_labels = batch[0]
keys = item_labels.keys()
# Max label length in this batch
# max_len[k] is the maximum length (in batch) of the label with name k
# If at the end max_len[k] is -1, labels k are (probably all) scalars
max_len = {k: -1 for k in keys}
# If a label has more than 1 dimension, the padded tensor cannot simply
# have size (batch, max_len). Whenever the length is >0 (i.e. the sequence
# is not empty, store trailing dimensions. At the end if 1) all sequences
# (in the batch, and for this label) are empty, or 2) this label is not
# a sequence (scalar), then the trailing dims are None.
trailing_dims = {k: None for k in keys}
# Make first pass to get shape info for padding
for _, labels in batch:
for k in keys:
try:
max_len[k] = max(max_len[k], len(labels[k]))
if len(labels[k]) > 0:
trailing_dims[k] = labels[k].size()[1:]
except TypeError: # scalar
pass
# For each item in the batch, take each key and pad the corresponding
# value (label) so we can call the default collate function
pad = MultiObjectDataLoader._pad_tensor
for i in range(len(batch)):
for k in keys:
if trailing_dims[k] is None:
continue
if k == 'relation':
size = [max_len[k], max_len[k]] + list(trailing_dims[k])[1:]
batch[i][1][k] = MultiObjectDataLoader._pad_tensor_relation(batch[i][1][k], size, value = 0.)
else:
size = [max_len[k]] + list(trailing_dims[k])
batch[i][1][k] = pad(batch[i][1][k], size)
return default_collate(batch)
@staticmethod
def _pad_tensor(x, size, value=None):
assert isinstance(x, torch.Tensor)
input_size = len(x)
if value is None:
value = float('nan')
# Copy input tensor into a tensor filled with specified value
# Convert everything to float, not ideal but it's robust
out = torch.zeros(*size, dtype=torch.float)
out.fill_(value)
if input_size > 0: # only if at least one element in the sequence
out[:input_size] = x.float()
return out
@staticmethod
def _pad_tensor_relation(x, size, value=None):
assert isinstance(x, torch.Tensor)
input_size = x.shape[:2]
if value is None:
value = float('nan')
# Copy input tensor into a tensor filled with specified value
# Convert everything to float, not ideal but it's robust
out = torch.zeros(*size, dtype=torch.float)
out.fill_(value)
#if input_size > 0: # only if at least one element in the sequence
out[:input_size[0], :input_size[1],:] = x.float()
return out
|
python
|
import random
import numpy as np
class Particle():
def __init__(self):
self.position = np.array([(-1) ** (bool(random.getrandbits(1))) * random.random()*50, (-1)**(bool(random.getrandbits(1))) * random.random()*50])
self.pbest_position = self.position
self.pbest_value = float('inf')
self.velocity = np.array([0, 0])
def __str__(self):
print("I am at ", self.position, " meu pbest is ", self.pbest_position)
def move(self):
self.position = self.position + self.velocity
|
python
|
import sys
import os
import logging
import shutil
import inspect
from . import config
logger = logging.getLogger('carson')
_init = None
_debug = False
def initLogging(debug=False):
global _init, _debug
if _init is not None:
return
_init = True
_debug = debug
formatter = logging.Formatter('{asctime} {levelname[0]} {name} {message}', style='{')
handlers = [logging.StreamHandler(sys.stdout)]
ldir, lfile = config.get('log_dir'), config.get('log_file')
if ldir and lfile:
handlers.append(logging.FileHandler(os.path.join(ldir, lfile)))
for handler in handlers:
logger.addHandler(handler)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG if debug else logging.INFO)
if not debug:
basic = logging.Formatter('{message}', style='{', datefmt='%m-%d %H:%M:%S')
handlers[0].setFormatter(basic)
def is_debug_enabled():
return logger.isEnabledFor(logging.DEBUG)
def log(lvl, *args, **kwargs):
msg = ''
if args and isinstance(args[0], str):
msg += f'{args[0]}'
args = tuple(args[1:])
logger.log(lvl, msg, *args, **kwargs)
def error(*args, **kwargs):
log(logging.ERROR, *args, **kwargs)
def warning(*args, **kwargs):
log(logging.WARNING, *args, **kwargs)
def info(*args, **kwargs):
log(logging.INFO, *args, **kwargs)
def debug(*args, **kwargs):
log(logging.DEBUG, *args, **kwargs)
COLS, ROWS = shutil.get_terminal_size((120, 80))
COLS -= 35
NoneType = type(None)
def logobject(obj, name=None, logger=print, multi_line_doc=False):
debug = logger
if hasattr(debug, 'debug'):
debug = debug.debug
debug(f'{"=" * 5} {name or "logobj"} {"=" * COLS * 2}'[:COLS])
otype = type(obj)
otname = f'{otype.__module__}.{otype.__name__}'
debug(f'obj {otname}')
try:
debug(f'file: {inspect.getfile(otype)}')
except TypeError:
pass
doc = (
inspect.getdoc(otype)
or inspect.getcomments(otype)
or inspect.getcomments(obj)
or 'No doc or coment'
)
if '\n' in doc:
doc = '\n'.join(f' {ln}' for ln in doc.split('\n'))
debug(doc)
gentle_items = {
'aiohttp.client_reqrep.ClientResponse': ['ok']
}
members = [
(attr, getattr(obj, attr))
for attr in dir(obj)
if not attr.startswith('__')
and attr not in gentle_items.get(otname, [])
]
gutter = max(20, max(len(attr) for attr, val in members) if members else 20)
is_a_funcs = [
(name[2:], func)
for name in dir(inspect)
if name.startswith('is')
and (func := getattr(inspect, name)) # noqa
and inspect.isfunction(func) # noqa
]
for attr, val in members:
val = 'gentle' if attr in gentle_items else val
line = f'{attr: <{gutter}}'
val_type = type(val)
mname = val_type.__module__
tname = val_type.__name__ if val_type.__name__ not in ('builtin_function_or_method',) else ''
type_desc = f'{mname}.' if mname != 'builtins' else ''
type_desc += tname
if val_type in (NoneType, bool, int):
line += repr(val)
debug(line[:COLS])
continue
if val_type in (str,) or type_desc in ('yarl.URL'):
line += f'{str(val)!r}'
debug(line[:COLS])
continue
isables = ', '.join(name for name, func in is_a_funcs if func(val))
if isables:
line += f'({isables}) '
if type_desc not in isables:
line += type_desc + ' '
if isinstance(val, dict):
line += '{'
entries = []
for dkey, dval in val.items():
parts = []
for part in (dkey, dval):
if isinstance(part, (NoneType, str, int)):
parts.append(repr(part))
else:
parts.append(type(part).__name__)
entries.append(':'.join(parts))
line += ', '.join(entries)
line += '}'
elif isinstance(val, (list, set, tuple)):
line += '('
line += ', '.join(
repr(part)
if isinstance(part, (NoneType, str, int))
else type(part).__name__
for part in val
)
line += ')'
else:
doc = (
inspect.getdoc(val)
or inspect.getcomments(val)
or ''
).strip()
if doc:
doc = doc.split('\n')
line += ': ' + doc[0]
doc = doc[1:] if multi_line_doc else []
while doc:
if line[:COLS].strip():
debug(line[:COLS])
line = f'{" ": <{gutter}}' + doc[0]
doc = doc[1:]
debug(line[:COLS])
debug(f'{"=" * 50}')
|
python
|
import sys
from distutils.core import setup
from setuptools import find_packages
version = '0.1.1'
install_requires = [
'acme>=0.29.0',
'certbot>=1.1.0',
'azure-mgmt-resource',
'azure-mgmt-network',
'azure-mgmt-dns>=3.0.0',
'PyOpenSSL>=19.1.0',
'setuptools', # pkg_resources
'zope.interface'
]
if sys.version_info < (2, 7):
install_requires.append('mock<1.1.0')
else:
install_requires.append('mock')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='certbot-azure',
version=version,
description="Azure plugin for Certbot client",
url='https://github.com/dlapiduz/certbot-azure',
author="Diego Lapiduz",
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
keywords = ['certbot', 'azure', 'app_gateway', 'azure_dns'],
entry_points={
'certbot.plugins': [
'azure-agw = certbot_azure.azure_agw:Installer',
'dns-azure = certbot_azure.dns_azure:Authenticator',
],
},
)
|
python
|
import json
import jwt
from fastapi import Depends, HTTPException, Path, Query
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from fastapi.security.utils import get_authorization_scheme_param
from pydantic import BaseModel
from starlette.requests import Request
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN, HTTP_404_NOT_FOUND
from . import enums
from .factory import app
auth_schema = HTTPBearer()
async def jwt_required(
request: Request, token: HTTPAuthorizationCredentials = Depends(auth_schema)
):
credentials_exception = HTTPException(HTTP_401_UNAUTHORIZED)
try:
payload = jwt.decode(token.credentials, app.admin_secret)
user_id = payload.get("user_id")
if user_id is None:
raise credentials_exception
except jwt.PyJWTError:
raise credentials_exception
request.scope["user_id"] = user_id
return user_id
async def jwt_optional(request: Request):
authorization: str = request.headers.get("Authorization")
scheme, credentials = get_authorization_scheme_param(authorization)
if credentials:
try:
payload = jwt.decode(credentials, app.admin_secret)
user_id = payload.get("user_id")
request.scope["user_id"] = user_id
return user_id
except jwt.PyJWTError:
pass
return
class QueryItem(BaseModel):
page: int = 1
sort: dict
where: dict = {}
with_: dict = {}
size: int = 10
sort: dict = {}
class Config:
fields = {"with_": "with"}
def get_query(query=Query(...)):
query = json.loads(query)
return QueryItem.parse_obj(query)
def get_model(resource: str = Path(...)):
model = app.models.get(resource)
return model
async def parse_body(request: Request, resource: str = Path(...)):
body = await request.json()
resource = await app.get_resource(resource, exclude_pk=True, exclude_m2m_field=False)
resource_fields = resource.resource_fields.keys()
ret = {}
for key in resource_fields:
v = body.get(key)
if v is not None:
ret[key] = v
return ret, resource_fields
async def get_current_user(user_id=Depends(jwt_required)):
user = await app.user_model.get_or_none(pk=user_id)
if not user:
raise HTTPException(HTTP_404_NOT_FOUND)
return user
class PermissionsChecker:
def __init__(self, action: enums.PermissionAction):
self.action = action
async def __call__(self, resource: str = Path(...), user=Depends(get_current_user)):
if not app.permission or user.is_superuser:
return
if not user.is_active:
raise HTTPException(status_code=HTTP_403_FORBIDDEN)
has_permission = False
await user.fetch_related("roles")
for role in user.roles:
if await role.permissions.filter(model=resource, action=self.action):
has_permission = True
break
if not has_permission:
raise HTTPException(status_code=HTTP_403_FORBIDDEN)
read_checker = PermissionsChecker(action=enums.PermissionAction.read)
create_checker = PermissionsChecker(action=enums.PermissionAction.create)
update_checker = PermissionsChecker(action=enums.PermissionAction.update)
delete_checker = PermissionsChecker(action=enums.PermissionAction.delete)
|
python
|
import time
from itertools import chain
from opentrons import instruments, labware, robot
from opentrons.instruments import pipette_config
def _sleep(seconds):
if not robot.is_simulating():
time.sleep(seconds)
def load_pipettes(protocol_data):
pipettes = protocol_data.get('pipettes', {})
pipettes_by_id = {}
for pipette_id, props in pipettes.items():
model = props.get('model')
mount = props.get('mount')
config = pipette_config.load(model)
pipettes_by_id[pipette_id] = instruments._create_pipette_from_config(
config=config,
mount=mount)
return pipettes_by_id
def load_labware(protocol_data):
data = protocol_data.get('labware', {})
loaded_labware = {}
for labware_id, props in data.items():
slot = props.get('slot')
model = props.get('model')
display_name = props.get('display-name')
if slot == '12':
if model == 'fixed-trash':
# pass in the pre-existing fixed-trash
loaded_labware[labware_id] = robot.fixed_trash
else:
# share the slot with the fixed-trash
loaded_labware[labware_id] = labware.load(
model,
slot,
display_name,
share=True
)
else:
loaded_labware[labware_id] = labware.load(
model,
slot,
display_name
)
return loaded_labware
def _get_location(loaded_labware, command_type, params, default_values):
labwareId = params.get('labware')
if not labwareId:
# not all commands use labware param
return None
well = params.get('well')
labware = loaded_labware.get(labwareId)
if not labware:
raise ValueError(
'Command tried to use labware "{}", but that ID does not exist ' +
'in protocol\'s "labware" section'.format(labwareId))
# default offset from bottom for aspirate/dispense commands
offset_default = default_values.get(
'{}-mm-from-bottom'.format(command_type))
# optional command-specific value, fallback to default
offset_from_bottom = params.get(
'offsetFromBottomMm', offset_default)
if offset_from_bottom is None:
# not all commands use offsets
return labware.wells(well)
return labware.wells(well).bottom(offset_from_bottom)
def _get_pipette(command_params, loaded_pipettes):
pipetteId = command_params.get('pipette')
return loaded_pipettes.get(pipetteId)
# TODO (Ian 2018-08-22) once Pipette has more sensible way of managing
# flow rate value (eg as an argument in aspirate/dispense fns), remove this
def _set_flow_rate(
pipette_model, pipette, command_type, params, default_values):
"""
Set flow rate in uL/mm, to value obtained from command's params,
or if unspecified in command params, then from protocol's "default-values".
"""
default_aspirate = default_values.get(
'aspirate-flow-rate', {}).get(pipette_model)
default_dispense = default_values.get(
'dispense-flow-rate', {}).get(pipette_model)
flow_rate_param = params.get('flow-rate')
if flow_rate_param is not None:
if command_type == 'aspirate':
pipette.set_flow_rate(
aspirate=flow_rate_param,
dispense=default_dispense)
return
if command_type == 'dispense':
pipette.set_flow_rate(
aspirate=default_aspirate,
dispense=flow_rate_param)
return
pipette.set_flow_rate(
aspirate=default_aspirate,
dispense=default_dispense
)
# C901 code complexity is due to long elif block, ok in this case (Ian+Ben)
def dispatch_commands(protocol_data, loaded_pipettes, loaded_labware): # noqa: C901 E501
subprocedures = [
p.get('subprocedure', [])
for p in protocol_data.get('procedure', [])]
default_values = protocol_data.get('default-values', {})
flat_subs = chain.from_iterable(subprocedures)
for command_item in flat_subs:
command_type = command_item.get('command')
params = command_item.get('params', {})
pipette = _get_pipette(params, loaded_pipettes)
pipette_model = protocol_data\
.get('pipettes', {})\
.get(params.get('pipette'), {})\
.get('model')
location = _get_location(
loaded_labware, command_type, params, default_values)
volume = params.get('volume')
if pipette:
# Aspirate/Dispense flow rate must be set each time for commands
# which use pipettes right now.
# Flow rate is persisted inside the Pipette object
# and is settable but not easily gettable
_set_flow_rate(
pipette_model, pipette, command_type, params, default_values)
if command_type == 'delay':
wait = params.get('wait', 0)
if wait is True:
# TODO Ian 2018-05-14 pass message
robot.pause()
else:
_sleep(wait)
elif command_type == 'blowout':
pipette.blow_out(location)
elif command_type == 'pick-up-tip':
pipette.pick_up_tip(location)
elif command_type == 'drop-tip':
pipette.drop_tip(location)
elif command_type == 'aspirate':
pipette.aspirate(volume, location)
elif command_type == 'dispense':
pipette.dispense(volume, location)
elif command_type == 'touch-tip':
pipette.touch_tip(location)
def execute_protocol(protocol):
loaded_pipettes = load_pipettes(protocol)
loaded_labware = load_labware(protocol)
dispatch_commands(protocol, loaded_pipettes, loaded_labware)
return {
'pipettes': loaded_pipettes,
'labware': loaded_labware
}
|
python
|
#!/usr/bin/python
import sys
def int2bin(num, width):
spec = '{fill}{align}{width}{type}'.format(
fill='0', align='>', width=width, type='b')
return format(num, spec)
def hex2bin(hex, width):
integer = int(hex, 16)
return int2bin(integer, width)
def sendBinary(send):
binary = '# ' + '\t branch target: ' + \
str(send[0]) + '\t branch taken: ' + str(send[1]) + '\n'
binary += '0001_' + hex2bin(send[0], 64) + '_' + \
hex2bin(send[1], 1) + '_' + hex2bin('0', 31)
return binary
def recvBinary(recv):
binary = '# ' + '\tpc: ' + str(recv[0]) + '\t instr: ' + str(recv[1]) + '\n'
binary += '0010_' + hex2bin(recv[0], 64) + '_' + hex2bin(recv[1], 32)
return binary
def tr_done():
binary = '# Done' + '\n'
binary += '0011_' + hex2bin(str(0), 64) + '_' + hex2bin(str(0), 32)
return binary
name = str(sys.argv[1])
infile = open(name + ".spike", "r")
outfile = open(name + ".tr", "w")
outfile.write("# Trace format: recv (4bit)_pc (64 bit)_instruction(32 bits)\n")
outfile.write("# send (4bit)_branchtarget(64 bit)_branchtaken(1 bit)_padding(31 bits)\n")
msg = []
lines = infile.readlines()
pc_list = [line.split()[2] for line in lines if "core" in line] + ["0x0000000000000000"]
pc_idx = 0
jal_op = "1101111"
jalr_op = "1100111"
branch_op = "1100011"
# TODO: More elegant solution
skip_unbooted = True
boot_pc = "0x0000000080000124"
msg.append(("send", ["0x0000000080000124", "0"]))
for i in xrange(len(lines)-2):
line = lines[i].rstrip("\n\r").split()
reg_line = lines[i+1].rstrip("\n\r").split()
if(len(line) != 0):
if("ecall" in line):
break
if(line[0] == "core" and line[2][:2] == "0x"):
pc = line[2]
pc_idx = pc_idx + 1
if skip_unbooted and boot_pc != pc:
continue
skip_unbooted = False
next_pc = pc_list[pc_idx]
instr_hex = line[3][1:-1]
opcode = hex2bin(instr_hex[-2:], 8)[1:]
if opcode == jal_op or opcode == jalr_op or opcode == branch_op:
branch_target = next_pc
if int(branch_target, 16) == int(pc, 16) + 4:
branch_taken = '0'
else:
branch_taken = '1'
msg.append(("recv", [pc, instr_hex]))
msg.append(("send", [branch_target, branch_taken]))
else:
msg.append(("recv", [pc, instr_hex]))
for i in msg:
if i[0] == "send":
outfile.write(sendBinary(i[1]) + '\n')
else:
outfile.write(recvBinary(i[1]) + '\n')
outfile.write(tr_done() + '\n')
outfile.close()
|
python
|
"""tests rio_tiler.sentinel2"""
import os
from unittest.mock import patch
import pytest
import rasterio
from rio_tiler.errors import InvalidBandName
from rio_tiler_pds.errors import InvalidMODISProduct
from rio_tiler_pds.modis.aws import MODISASTRAEAReader
MODIS_AST_BUCKET = os.path.join(
os.path.dirname(__file__), "fixtures", "astraea-opendata"
)
MCD43A4_SCENE = "MCD43A4.A2017200.h21v11.006.2017209030811"
MOD11A1_SCENE = "MOD11A1.A2020250.h20v11.006.2020251085003"
MYD11A1_SCENE = "MYD11A1.A2008110.h16v12.006.2015345131628"
MOD13A1_SCENE = "MOD13A1.A2020049.h14v04.006.2020066002045"
MYD13A1_SCENE = "MYD13A1.A2020153.h30v10.006.2020170024036"
@pytest.fixture(autouse=True)
def testing_env_var(monkeypatch):
"""Set fake env to make sure we don't hit AWS services."""
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "jqt")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "rde")
monkeypatch.delenv("AWS_PROFILE", raising=False)
monkeypatch.setenv("AWS_CONFIG_FILE", "/tmp/noconfigheere")
monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", "/tmp/noconfighereeither")
monkeypatch.setenv("GDAL_DISABLE_READDIR_ON_OPEN", "EMPTY_DIR")
def mock_rasterio_open(band):
"""Mock rasterio Open."""
assert band.startswith("s3://astraea-opendata")
band = band.replace("s3://astraea-opendata", MODIS_AST_BUCKET)
return rasterio.open(band)
@patch("rio_tiler.io.cogeo.rasterio")
def test_AWS_MODISASTRAEAReader(rio):
"""Test MODIS (ASTRAEA) Reader product."""
rio.open = mock_rasterio_open
with pytest.raises(InvalidMODISProduct):
with MODISASTRAEAReader("MOD00A4.A2017006.h21v11.006.2017018074804"):
pass
with MODISASTRAEAReader(MCD43A4_SCENE) as modis:
assert modis.minzoom == 4
assert modis.maxzoom == 9
assert modis.bands == (
"B01",
"B01qa",
"B02",
"B02qa",
"B03",
"B03qa",
"B04",
"B04qa",
"B05",
"B05qa",
"B06",
"B06qa",
"B07",
"B07qa",
)
assert modis._get_band_url("B1") == modis._get_band_url("B01")
assert modis._get_band_url("B01") == (
"s3://astraea-opendata/MCD43A4.006/21/11/2017200/MCD43A4.A2017200.h21v11.006.2017209030811_B01.TIF"
)
with MODISASTRAEAReader(MOD11A1_SCENE) as modis:
assert modis.minzoom == 4
assert modis.maxzoom == 9
assert modis.bands == (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B09",
"B10",
"B11",
"B12",
)
assert modis._get_band_url("B01") == (
"s3://astraea-opendata/MOD11A1.006/20/11/2020250/MOD11A1.A2020250.h20v11.006.2020251085003_LSTD_B01.TIF"
)
with MODISASTRAEAReader(MYD11A1_SCENE) as modis:
assert modis.minzoom == 4
assert modis.maxzoom == 9
assert modis.bands == (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B09",
"B10",
"B11",
"B12",
)
assert modis._get_band_url("B01") == (
"s3://astraea-opendata/MYD11A1.006/16/12/2008110/MYD11A1.A2008110.h16v12.006.2015345131628_LSTD_B01.TIF"
)
with MODISASTRAEAReader(MOD13A1_SCENE) as modis:
assert modis.minzoom == 4
assert modis.maxzoom == 9
assert modis.bands == (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B09",
"B10",
"B11",
"B12",
)
assert modis._get_band_url("B01") == (
"s3://astraea-opendata/MOD13A1.006/14/04/2020049/MOD13A1.A2020049.h14v04.006.2020066002045_NDVI_B01.TIF"
)
with MODISASTRAEAReader(MYD13A1_SCENE) as modis:
assert modis.scene_params.get("scene") == MYD13A1_SCENE
assert len(modis.bounds) == 4
assert modis.minzoom == 4
assert modis.maxzoom == 9
assert modis.bands == (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B09",
"B10",
"B11",
"B12",
)
with pytest.raises(InvalidBandName):
modis._get_band_url("granule")
assert modis._get_band_url("B01") == (
"s3://astraea-opendata/MYD13A1.006/30/10/2020153/MYD13A1.A2020153.h30v10.006.2020170024036_NDVI_B01.TIF"
)
metadata = modis.info(bands="B01")
assert metadata["band_descriptions"] == [(1, "B01")]
metadata = modis.metadata(bands=("B01", "B02"))
assert metadata["band_descriptions"] == [(1, "B01"), (2, "B02")]
stats = modis.stats(bands="B05")
assert len(stats.items()) == 1
assert stats["B05"]["pc"]
tile_z = 8
tile_x = 219
tile_y = 141
data, mask = modis.tile(tile_x, tile_y, tile_z, bands="B01")
assert data.shape == (1, 256, 256)
assert mask.shape == (256, 256)
|
python
|
import FWCore.ParameterSet.Config as cms
from RecoTracker.DebugTools.TrackAlgoCompareUtil_cfi import *
|
python
|
# -*- coding: utf-8 -*-
# @Time : 2020/1/23 10:10
# @Author : jwh5566
# @Email : [email protected]
# @File : test1.py
# import sys
# print('参数个数: ', len(sys.argv))
# print('参数列表: ', str(sys.argv))
# a = 10
# if a > 0:
# print(a, "是一个正数")
# print("总是打印这句话")
#
# a = -10
# if a > 0:
# print(a, "是一个正数")
# a = 10
# if a > 50:
# print("a is greater than 50")
# elif a == 10:
# print("a is equal to 10")
# else:
# print("a is negative")
# numbers = [6, 5, 3, 8, 4, 2, 5, 4, 11]
# sum = 0
# for i in numbers:
# sum += i
# print("The sum is: ", sum)
# for i in range(5):
# print("The number is", i)
# a = 10
# sum = 0
# i = 1
# while i <= a:
# sum += i
# i += 1
# print("The sum is", sum)
# numbers = [10, 25, 54, 86, 89, 11, 33, 22]
# new_numbers = filter(lambda x: (x%2 == 0), numbers)
# print(type(new_numbers))
# print(list(new_numbers))
# my_List = [1, 5, 4, 6, 8, 11, 3, 12]
# new_list = list(map(lambda x: x*2, my_List))
# print(new_list)
a = 35
b = 57
try:
c = a + b
print("the value of c is: ", c)
d = b / 0
print("the value d is ", d)
except:
print("division by zero is not possible")
print("out if try ... except block")
|
python
|
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import HtmlXPathSelector
from ..items import NewsItem
from datetime import datetime
import pandas as pd
import re
class SabahSpider(CrawlSpider):
name = "sabah"
allowed_domains = ["sabah.com.tr"]
def __init__(self, yearmonth='', *args, **kwargs):
super(SabahSpider, self).__init__(*args, **kwargs)
# http://www.sabah.com.tr/timeline/2017/05/20
begin = pd.Timestamp(yearmonth + "-01")
end = pd.Timestamp(begin) + pd.DateOffset(months=1) - pd.DateOffset(days=1)
end = pd.Timestamp(begin) + pd.DateOffset(days=1)
date_inds = [re.findall("[0-9]{4}/[0-9]{2}/[0-9]{2}", d.date().isoformat().replace("-","/"))[0] for d in pd.date_range(begin,end)]
self.start_urls = ["http://www.sabah.com.tr/timeline/%s" % d for d in date_inds]
rules = (
Rule(LinkExtractor(allow=(), restrict_xpaths=('//div[@class="masonryFrame"]/div[@class="box"]//a',)), callback="parse_items", follow= True),
)
def parse_items(self, response):
hxs = HtmlXPathSelector(response)
item = NewsItem()
item["link"] = response.request.url
item["lang"] = "tr"
item["source"] = "sabah"
category = hxs.xpath("//div[contains(@class,'haber-header')]/header/span[contains(@class,'category')]//text()").extract()
date_time = hxs.xpath("//div[contains(@class,'haber-header')]/div[contains(@class,'info')]/time/text()").extract()
item["author"] = ""
title = hxs.xpath("//div[contains(@class,'haber-header')]/header/h1/text()").extract()
intro = hxs.xpath("//div[contains(@class,'haber-header')]/header/h2/text()").extract()
new_content = hxs.xpath("//div[contains(@class,'content')]/div/p/text()").extract()
#
# Processing outputs
item["intro"] = ' '.join(intro)
item["title"] = ' '.join(title)
new_content = ' '.join(new_content)
new_content = re.sub('\n',' ',new_content)
item["content"] = re.sub('\s{2,}',' ',new_content)
item["category"] = '|'.join(category)
item["date_time"] = " ".join(date_time)
return(item)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenAppXwbsssQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenAppXwbsssQueryResponse, self).__init__()
self._a = None
@property
def a(self):
return self._a
@a.setter
def a(self, value):
self._a = value
def parse_response_content(self, response_content):
response = super(AlipayOpenAppXwbsssQueryResponse, self).parse_response_content(response_content)
if 'a' in response:
self.a = response['a']
|
python
|
'''
--------------------
standard_parsers.py
--------------------
This module contains a set of commands initializing standard
:py:class:`argparse.ArgumentParser` objects with standard sets of pre-defined
options. The idea here is that I have a standard basic parser with set syntax
but can also have a 'cluster parser' with a set of pre-defined cluster oriented
options that can be added to the standard basic parser for scripts that need
cluster support, etc etc
.. moduleauthor:: Nick Schurch <[email protected]>
:module_version: 1.3
:created_on: 2013-04-08
----------------
'''
__version__ = "1.3"
import argparse, tempfile
import script_options.custom_callables as cc
def standard_parser(ver, prog, usage, description=None,
epilog=None, tmpdir=True, infile=True, infiletype="txt",
outfile=True):
'''Set up a command line parser with standard options.
Depending on the options supplied to the function the standard options
include an input file, an output file, a log file, a temporary directory
a verbosity switch and standard :py:class:`argparse.ArgumentParser` version
and help switches.
Returns a :py:class:`argparse.ArgumentParser` instance and two lists; the
first is a list of the positional arguments and their defaults (which
should be None), the second is a list of keyword arguments and their
defaults. These lists are used by :ref:`standard_logging.py
<standard_logging_autodocs>` to give clarity to the logged script options.
Note, the destination in the resulting namespace (and in the first value of
tuples in the keyword arguments lists) is the same as the long option for
all optional arguments; lets try to keep it that way!
'''
# use these to store some metadata on the options added to the parser.
# These will be lists of tuples with the argument name, and default value.
pos_args = []
kw_args = []
# setup the argparse parser
formatter = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(prog=prog,
usage=usage % prog,
description=description,
epilog=epilog,
formatter_class=formatter,
add_help=False)
reqarggroup = parser.add_argument_group('Standard Options (required)')
########## input file #########
if infile:
infilehelp = "Specify an input %s file (inc. path if different from " \
"the current working directory) to be consumed by this " \
"script." % infiletype
reqarggroup.add_argument('infile',
action = 'store',
type = cc.input_file,
help = infilehelp
)
pos_args.append(('infile',None))
########## output file #########
if outfile:
outfilehelp = "Specify an output file (inc. path if different from " \
"the current working directory) to be generated by " \
"this script."
reqarggroup.add_argument('outfile',
action = 'store',
type = cc.output_file,
help = outfilehelp
)
pos_args.append(('outfile',None))
########## log file #########
loghelp = "Specify a log file (inc. path if different from the current " \
"working directory) of the log file generated by this script."
reqarggroup.add_argument('-l', '--log',
action = 'store',
dest='log',
required=True,
type = cc.output_file,
help = loghelp
)
optarggroup = parser.add_argument_group('Standard Options (optional)')
kw_args.append(('log','log',None))
########## tmpdir #########
if tmpdir:
tmpdirhelp = "Specify a directory to use as a temp dir for this " \
"script. If --tmpdir is listed without a directory, or " \
"is omitted entirely, then a system-generated tmpdir " \
"will be used."
tmp_dir = tempfile.mkdtemp()
optarggroup.add_argument('--tmpdir',
nargs = '?',
action = 'store',
dest='tmpdir',
const = tmp_dir,
default = tmp_dir,
type = cc.output_path,
help = tmpdirhelp
)
kw_args.append(('tmpdir','tmpdir',tmp_dir))
########## version, verbose, help #########
optarggroup.add_argument('--version',
action = 'version',
dest = 'version',
version = '%(prog)s'+' %s' % str(ver)
)
optarggroup.add_argument('-v', '--verbose',
action = 'store_true',
dest = 'verbose',
help = 'Verbosity switch for logging and warnings'
)
kw_args.append(('verbose','verbose',False))
optarggroup.add_argument('-h', '--help',
action = 'help',
help = "Show this help message and exit"
)
return(parser, pos_args, kw_args)
def get_std_req_group(parser):
""" Returns the 'Standard Options (required)' argument group from the
standard parser. """
for group in parser._action_groups:
if group.title=="Standard Options (required)":
return(group)
return(None)
def get_std_opt_group(parser):
""" Returns the 'Standard Options (optional)' argument group from the
standard parser. """
for group in parser._action_groups:
if group.title=="Standard Options (optional)":
return(group)
return(None)
|
python
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
def conv(x, out_channel, kernel_size, stride=1, dilation=1):
x = slim.conv2d(x, out_channel, kernel_size, stride, rate=dilation,activation_fn=None)
return x
def global_avg_pool2D(x):
with tf.variable_scope(None, 'global_pool2D'):
n,h,w,c=x.get_shape().as_list
x = slim.avg_pool2d(x, (h,w), stride=1)
return x
def global_context_module(x,squeeze_depth,fuse_method='add',attention_method='att',scope=None):
assert fuse_method in ['add','mul']
assert attention_method in ['att','avg']
with tf.variable_scope(scope,"GCModule"):
if attention_method == 'avg':
context = global_avg_pool2D(x)#[N,1,1,C]
else:
n,h,w,c=x.get_shape().as_list()
context_mask = conv(x,1,1)# [N, H, W,1]
context_mask = tf.reshape(context_mask,shape=tf.convert_to_tensor([tf.shape(x)[0], -1, 1]))# [N, H*W, 1]
context_mask=tf.transpose(context_mask,perm=[0,2,1])# [N, 1, H*W]
context_mask = tf.nn.softmax(context_mask,axis=2)# [N, 1, H*W]
input_x = tf.reshape(x, shape=tf.convert_to_tensor([tf.shape(x)[0], -1,c]))# [N,H*W,C]
context=tf.matmul(context_mask,input_x)# [N, 1, H*W] x [N,H*W,C] =[N,1,C]
context=tf.expand_dims(context,axis=1)#[N,1,1,C]
context=conv(context,squeeze_depth,1)
context=slim.layer_norm(context)
context=tf.nn.relu(context)
context=conv(context,c,1)#[N,1,1,C]
if fuse_method=='mul':
context=tf.nn.sigmoid(context)
out=context*x
else:
out=context+x
return out
if __name__=='__main__':
inputs=tf.placeholder(tf.float32,shape=[None,64,64,128])
input_array=np.ones((1,64,64,128),dtype=np.float32)
out=global_context_module(inputs,squeeze_depth=16)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output=sess.run([out],feed_dict={inputs:input_array})
print(output[0].shape)
|
python
|
from functools import reduce
from typing import Optional
import numpy as np
def find_common_elements(*indices: np.array) -> np.array:
""" Returns array with unique elements common to *all* indices
or the first index if it's the only one. """
common_elements = reduce(np.intersect1d, indices[1:], indices[0])
return common_elements
def flatten_extra_dim(data: Optional[np.array]) -> Optional[np.array]:
""" Removes extra dimension if it is equal to one.
It's different from np.squeeze in that it operates only on the last axis.
:return: reshaped view of the original array or None if input is None. """
if data is not None and data.shape[-1] == 1:
return data.reshape(data.shape[:-1])
return data
def atleast_n_dimensions(data: np.array, ndim: int) -> np.array:
""" Return a view with extra dimensions to the array if necessary,
such that the result has the required number of dimensions."""
while data.ndim < ndim:
data = np.expand_dims(data, axis=-1)
return data
def atleast_2d(data: np.array) -> np.array:
return atleast_n_dimensions(data, ndim=2)
def atleast_4d(data: np.array) -> np.array:
return atleast_n_dimensions(data, ndim=4)
|
python
|
from rest_framework import viewsets, mixins
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Skill
from core.models import Location
from core.models import Job
# from core.models import Job
from job import serializers
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
# from job import serializers
# class TagViewSet(viewsets.GenericViewSet,
# mixins.ListModelMixin,
# mixins.CreateModelMixin):
# """Manage tags in the database"""
# authentication_classes = (TokenAuthentication,)
# permission_classes = (IsAuthenticated,)
# queryset = Tag.objects.all()
# serializer_class = serializers.TagSerializer
#
# def get_queryset(self):
# """Return objects for current authenticated user only"""
# return self.queryset.filter(user=self.request.user).order_by('-name')
#
# def perform_create(self, serializer):
# """Create a new tag"""
# serializer.save(user = self.request.user)
class SkillViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Manage tags in the database"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
queryset = Skill.objects.all()
serializer_class = serializers.SkillSerializer
def get_queryset(self):
"""Return objects for current authenticated user only"""
return self.queryset.filter(user=self.request.user).order_by('-name')
def perform_create(self, serializer):
"""Create a new tag"""
serializer.save(user = self.request.user)
class LocationViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Manage tags in the database"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
queryset = Location.objects.all()
serializer_class = serializers.LocationSerializer
def get_queryset(self):
"""Return objects for current authenticated user only"""
return self.queryset.filter(user=self.request.user).order_by('-name')
def perform_create(self, serializer):
"""Create a new tag"""
serializer.save(user = self.request.user)
class JobViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Manage tags in the database"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
queryset = Job.objects.all()
serializer_class = serializers.JobSerializer
def get_queryset(self):
"""Return objects for current authenticated user only"""
filter_backends = [filters.SearchFilter, DjangoFilterBackend]
search_fields = ['job_title']
filterset_fields = ['experience', 'sponsorship']
return self.queryset.filter(user=self.request.user) #.order_by('-name')
def perform_create(self, serializer):
"""Create a new job info post"""
serializer.save(user = self.request.user)
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return serializers.JobDetailSerializer
return self.serializer_class
|
python
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='clus']/h2[@class='nomargin title_sp']",
'price' : "//h2[@class='nomargin']/font",
'category' : "//div[@class='head_title_center']/div[@class='sp_detai']/a",
'description' : "//div[@class='clus']/div[@id='user_post_view']/p",
'images' : "//div[@class='box_nullstyle']/a[@class='lightbox']/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'vanphongphamanhkhoa.com'
allowed_domains = ['vanphongphamanhkhoa.com']
start_urls = ['http://vanphongphamanhkhoa.com/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/view_product+-\d+/']), 'parse_item'),
Rule(LinkExtractor(allow=['/products-\d+/[a-zA-Z0-9-]+/($|\d\d?$)']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
python
|
from extractors import archive_extractor, ertflix_extractor, star_extractor, megatv_extractor
from downloaders import m3u8_downloader, alpha_downloader
import sys
if __name__ == "__main__":
url = sys.argv[1]
if "ertflix.gr" in url:
extractor = ertflix_extractor.ErtflixExtractor(url)
stream_data = extractor.obtain_data()
m3u8_downloader.Downloader(stream_data).download()
elif "archive.ert.gr" in url:
extractor = archive_extractor.ArchiveExtractor(url)
stream_data = extractor.obtain_data()
m3u8_downloader.Downloader(stream_data).download()
elif "alphatv.gr" in url:
alpha_downloader.AlphaDownloader(url).download()
elif "star.gr" in url:
extractor = star_extractor.StarExtractor(url)
stream_data = extractor.obtain_data()
m3u8_downloader.Downloader(stream_data).download()
elif "megatv.com" in url:
extractor = megatv_extractor.MegatvExtractor(url)
stream_data = extractor.obtain_data()
m3u8_downloader.Downloader(stream_data).download()
|
python
|
#!/usr/bin/env python2
# PYTHON_ARGCOMPLETE_OK
import sys
import agoat._config
import agoat.run_disasms
import agoat.indexer
import agoat.diagnostic
import agoat.keywordsearcher
# set up command-line completion, if argcomplete module is installed
try:
import argcomplete
import argparse
parser = argparse.ArgumentParser(prog=sys.argv[0], description='agoat search')
_subpsrs = parser.add_subparsers(dest='command', help='commands')
agoat.run_disasms.build_argument_parser(_subpsrs.add_parser('disasm'))
agoat.indexer.build_argument_parser(_subpsrs.add_parser('index'))
agoat.diagnostic.build_argument_parser(_subpsrs.add_parser('list'))
agoat.keywordsearcher.build_argument_parser(_subpsrs.add_parser('query'))
argcomplete.autocomplete(parser)
except:
pass
USAGE = "usage: %s {disasm,index,list,query} [-h|--version]\n"
def main(argv):
if len(argv) == 1 or argv[1] in ("-h", "--help"):
sys.stdout.write(USAGE % argv[0])
return
elif argv[1] == "--version":
sys.stdout.write("%s version %s\n" % (argv[0], agoat._config.VERSION))
return
cmd = argv[1]
argv = argv[:]
del argv[1]
argv[0] = argv[0] + " " + cmd
if cmd == 'disasm':
agoat.run_disasms.main(argv)
elif cmd == 'index':
agoat.indexer.main(argv)
elif cmd == 'list':
agoat.diagnostic.main(argv)
elif cmd == 'query':
agoat.keywordsearcher.main(argv)
else:
sys.exit("unknown command: %s" % cmd)
if __name__ == '__main__':
main(sys.argv)
|
python
|
import os
import math
import pygame
class Lava(pygame.sprite.Sprite):
def __init__(self, x, y, width, height):
"""
Create a platform sprite. Note that these platforms are designed to be very wide and not very tall.
It is required that the width is greater than or equal to the height. It is recommended to make height 50 or less.
Best visual effects are when the width is a multiple of the height.
Args:
x: The x coordinate of the platform
y: The y coordinate of the platform
width: The width of the platform. Must be greater than or equal to the height
height: The height of the platform. Recommended to be 50 or less.
"""
super().__init__()
self.image = self.create_image(os.path.join("assets", "lava.png"), width, height)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.width = 440
def create_image(self, image_location, width, height):
"""
Create the image for this sprite by using one base image and tiling it horizontally. Note that vertical tiling has not been implemented.
Args:
image_location: A string representing the file location for the image
width: The width of the output image in pixels
height: The height of the output image in pixels
Returns:
A surface representing the output image.
"""
tile_image = pygame.image.load(image_location).convert_alpha()
# The tile is a square and the height is expected to be smaller than the width
tile_width = height
tile_height = height
tile_image = pygame.transform.scale(tile_image, (tile_width, tile_height))
# The self.image attribute expects a Surface, so we can manually create one and "blit" the tile image onto the surface (i.e. paint an image onto a surface).
# We use list comprehension to quickly make the blits_data list of tuples (each tuple has the tile image, and the X and Y coordinates)
# Don't know what list comprehension is? Go look it up on the Internet. That's what all professional software engineers do ;)
image = pygame.Surface((width, height))
blits_data = [(tile_image, (tile_width * i, 0)) for i in range(math.ceil(width / tile_width))]
image.blits(blits_data)
return image
|
python
|
def pal(a):
s=list(str(a))
i=1
w=[]
while i<=len(s):
w.append(s[-i])
i=i+1
if w==s:
print("it is palimdrom")
else:
print("it is not palimdrom")
string=input("enter the string :")
pal(string)
|
python
|
from aoc import data
from collections import Counter, defaultdict
parser = int
def part1(inputData, days=80):
fish = Counter(inputData)
for _ in range(days):
newFish = fish.pop(0, 0)
fish = Counter({k-1: v for k, v in fish.items()}) + Counter({6: newFish, 8:newFish})
return sum(fish.values())
def part2(inputData):
return part1(inputData, days=256)
parser=int
if __name__ == "__main__":
inputData = data(parser=parser,delimiter=',')
print(part1(inputData))
print(part2(inputData))
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.