hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dbdf4681c8315c30d07f86ec062381cf7da9e8d5
| 1,342 |
py
|
Python
|
spider/Config_2019.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | null | null | null |
spider/Config_2019.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | 2 |
2021-03-31T18:54:16.000Z
|
2021-12-13T19:49:08.000Z
|
spider/Config_2019.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# got文件、MongoDB数据库和Redis数据库配置文件
import os
import sys
import pymongo
# import redis
def get_noau_config():
# got文件
if sys.version_info[0] < 3:
# print('import got')
import got
else:
# print('import got333333333333333333333333333')
# import got
import got3 as got
# MongoDB数据库
# client = pymongo.MongoClient(os.environ['MONGOHOST'], 27017, connect=False)
#client = pymongo.MongoClient('mongodb://3.220.111.222:27017/')
client=pymongo.MongoClient('3.220.111.222', 27017, connect=False)
client.admin.authenticate("aircas", "aircas@2018", mechanism='SCRAM-SHA-1')
# db = client.natural_disaster
db = client['2019HongKong_protest']
# db.authenticate(name='aircas',password='aircas@2018')
# Redis数据库
# r = redis.StrictRedis(host=os.environ['REDISHOST'], port=6379, db=0)
# r = redis.StrictRedis(host='127.0.0.1', port=6379, db=0)
return got, db
def getMongoClient():
client = pymongo.MongoClient('3.220.111.222', 27017, connect=False)
client.admin.authenticate("aircas", "aircas@2018", mechanism='SCRAM-SHA-1')
return client
def closeMongoClient(client):
client.close()
def getGot():
# got文件
if sys.version_info[0] < 3:
import got
else:
import got3 as got
return got
| 27.958333 | 81 | 0.655738 |
0edd2dee96976c08dc40974f087649b10764b73a
| 6,196 |
py
|
Python
|
sample/rpz-sensor/python3/tsl2572.py
|
yappy/RpzIrSensor
|
8ff11ad594e81380975ff84e2394ff0834d380cf
|
[
"MIT"
] | null | null | null |
sample/rpz-sensor/python3/tsl2572.py
|
yappy/RpzIrSensor
|
8ff11ad594e81380975ff84e2394ff0834d380cf
|
[
"MIT"
] | null | null | null |
sample/rpz-sensor/python3/tsl2572.py
|
yappy/RpzIrSensor
|
8ff11ad594e81380975ff84e2394ff0834d380cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
TSL2572 Control Module via I2C
2018/11/15
"""
import smbus
import time
class TSL2572:
AGAIN_0_16 = 0
AGAIN_1 = 1
AGAIN_8 = 2
AGAIN_16 = 3
AGAIN_120 = 4
ATIME_50MS = 0xED
ATIME_200MS = 0xB6
ATIME_600MS = 0x24
def __init__(self, i2c_addr):
self.i2c_addr = i2c_addr
self.i2c = smbus.SMBus(1)
self.ch0 = 0
self.ch1 = 0
self.lux = 0
self.again = TSL2572.AGAIN_8
self.atime = TSL2572.ATIME_200MS
# I2C read length byte from addr
def read_address(self, addr, length):
addr = addr | 0xA0
try:
return self.i2c.read_i2c_block_data(self.i2c_addr, addr, length)
except IOError:
return [0 for i in range(length)]
# I2C write data to addr
def write_address(self, addr, data):
addr = addr | 0xA0
self.i2c.write_i2c_block_data(self.i2c_addr, addr, data)
# Read ID and return True if success
def id_read(self):
data = self.read_address(0x12, 1)
if data[0] == 0x34: # Return true if TSL25721 (3.3V). Change to 0x3D if TSL25723 (1.8V).
return True
return False
def set_atime(self, atime):
self.write_address(0x1, [atime])
def set_again(self, again):
if TSL2572.AGAIN_0_16 == again:
self.write_address(0xD, [0x4])
self.write_address(0xF, [0])
elif TSL2572.AGAIN_1 == again:
self.write_address(0xD, [0])
self.write_address(0xF, [0])
elif TSL2572.AGAIN_8 == again:
self.write_address(0xD, [0])
self.write_address(0xF, [0x1])
elif TSL2572.AGAIN_16 == again:
self.write_address(0xD, [0])
self.write_address(0xF, [0x2])
elif TSL2572.AGAIN_120 == again:
self.write_address(0xD, [0])
self.write_address(0xF, [0x3])
# Read status register
# Return avalid, aint as 0 or 1
def read_status(self):
data = self.read_address(0x13, 1)
avalid = data[0] & 0x1
aint = (data[0] & 0x10) >> 4
return avalid, aint
# One time ALS integration and update ch0 and ch1
def als_integration(self):
self.write_address(0x0, [0x1]) # Stop ALS integration
self.set_again(self.again)
self.set_atime(self.atime)
self.write_address(0x0, [0x3]) # Start ALS integration
# Check status every 10ms
while True:
avalid, aint = self.read_status()
if avalid==1 and aint==1:
self.write_address(0x0, [0x1]) # Stop ALS integration
break
else:
time.sleep(0.01)
data = self.read_address(0x14, 4)
self.ch0 = (data[1] << 8) | data[0]
self.ch1 = (data[3] << 8) | data[2]
# One time lux measurement
# Run ALS integration with auto again/atime and calculate lux
# Select below again/atime automatically based on default measurement result
# again, atime, scale, max count
# 0.16, 50, 0.04, 19456
# 1, 200, 1, 65535 (Default)
# 8, 200, 8, 65535
# 120, 200, 120, 65535
# 120, 600, 360, 65535
def meas_single(self):
if not self.id_read():
return False
self.again = TSL2572.AGAIN_1
self.atime = TSL2572.ATIME_200MS
self.als_integration()
if max([self.ch0, self.ch1]) == 65535:
self.again = TSL2572.AGAIN_0_16
self.atime = TSL2572.ATIME_50MS
self.als_integration()
elif max([self.ch0, self.ch1]) < 100:
self.again = TSL2572.AGAIN_120
self.atime = TSL2572.ATIME_600MS
self.als_integration()
elif max([self.ch0, self.ch1]) < 300 :
self.again = TSL2572.AGAIN_120
self.atime = TSL2572.ATIME_200MS
self.als_integration()
elif max([self.ch0, self.ch1]) < 3000:
self.again = TSL2572.AGAIN_8
self.atime = TSL2572.ATIME_200MS
self.als_integration()
self.write_address(0x0, [0x0]) # Sleep
self.calc_lux()
return True
# Calculate lux from ch0/ch0 then update lux
def calc_lux(self):
if TSL2572.ATIME_50MS == self.atime:
t = 50
elif TSL2572.ATIME_200MS == self.atime:
t = 200
elif TSL2572.ATIME_600MS == self.atime:
t = 600
if TSL2572.AGAIN_0_16 == self.again:
g = 0.16
elif TSL2572.AGAIN_1 == self.again:
g = 1
elif TSL2572.AGAIN_8 == self.again:
g = 8
elif TSL2572.AGAIN_16 == self.again:
g = 16
elif TSL2572.AGAIN_120 == self.again:
g = 120
cpl = (t * g)/60
lux1 = (self.ch0 - 1.87*self.ch1) / cpl
lux2 = (0.63*self.ch0 - self.ch1) / cpl
self.lux = max([0, lux1, lux2])
# Print atime, again setting and ch0/ch1 data
def print_reg(self):
if TSL2572.ATIME_50MS == self.atime:
print(' ADC Time : 50ms')
elif TSL2572.ATIME_200MS == self.atime:
print(' ADC Time : 200ms')
elif TSL2572.ATIME_600MS == self.atime:
print(' ADC Time : 600ms')
if TSL2572.AGAIN_0_16 == self.again:
print(' ADC Gain : 0.16')
elif TSL2572.AGAIN_1 == self.again:
print(' ADC Gain : 1')
elif TSL2572.AGAIN_8 == self.again:
print(' ADC Gain : 8')
elif TSL2572.AGAIN_16 == self.again:
print(' ADC Gain : 16')
elif TSL2572.AGAIN_120 == self.again:
print(' ADC Gain : 120')
print(' ch0 : 0x{:X}'.format(self.ch0))
print(' ch1 : 0x{:X}'.format(self.ch1))
# Print lux
def print_meas(self):
print( ' Lux : {:.1f}lux'.format(self.lux))
def main():
tsl2572 = TSL2572(0x39)
if tsl2572.id_read():
tsl2572.meas_single()
tsl2572.print_reg()
tsl2572.print_meas()
else:
print('ID Read Failed')
if __name__ == '__main__':
main()
| 29.932367 | 96 | 0.551323 |
1600965384d4954b6c890ff800eb7a1c2fbb9595
| 701 |
py
|
Python
|
solutions/MFA/webserver/src/deep_speaker/encode.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | 1 |
2021-04-06T06:13:20.000Z
|
2021-04-06T06:13:20.000Z
|
solutions/MFA/webserver/src/deep_speaker/encode.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
solutions/MFA/webserver/src/deep_speaker/encode.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import random
from deep_speaker.audio import read_mfcc
from deep_speaker.batcher import sample_from_mfcc
from deep_speaker.constants import SAMPLE_RATE, NUM_FRAMES
from deep_speaker.conv_models import DeepSpeakerModel
from deep_speaker.test import batch_cosine_similarity
def voc_to_vec(voc):
np.random.seed(123)
random.seed(123)
model = DeepSpeakerModel()
model.m.load_weights('/app/src/deep_speaker/checkpoints/ResCNN_triplet_training_checkpoint_265.h5', by_name=True)
mfcc = sample_from_mfcc(read_mfcc(voc, SAMPLE_RATE), NUM_FRAMES)
predict = model.m.predict(np.expand_dims(mfcc, axis=0))
vec = list(map(float,predict.tolist()[0]))
return vec
| 33.380952 | 117 | 0.7903 |
16c17e40745319deae0b17ecebfaee869a182247
| 1,062 |
py
|
Python
|
examples/flask_caching.py
|
xinetzone/dash-tests
|
cd4526caa2f9d906915c31370b3487bdcef92aa4
|
[
"Apache-2.0"
] | 1 |
2022-03-01T07:38:32.000Z
|
2022-03-01T07:38:32.000Z
|
examples/flask_caching.py
|
xinetzone/dash-tests
|
cd4526caa2f9d906915c31370b3487bdcef92aa4
|
[
"Apache-2.0"
] | 12 |
2021-07-13T12:33:36.000Z
|
2021-07-14T05:25:19.000Z
|
examples/flask_caching.py
|
xinetzone/dash-book
|
1f624e87e2aa02c9931318918df969e44bdd2c07
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import os
from dash import dcc, html
from dash.dependencies import Input, Output
from flask_caching import Cache
from app import app
cache = Cache(app.server, config={
# try 'filesystem' if you don't want to setup redis
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': os.environ.get('REDIS_URL', '')
})
app.config.suppress_callback_exceptions = True
timeout = 20
layout = html.Div([
html.Div(id='flask-cache-memoized-children'),
dcc.RadioItems(
id='flask-cache-memoized-dropdown',
options=[
{'label': 'Option {}'.format(i), 'value': 'Option {}'.format(i)}
for i in range(1, 4)
],
value='Option 1'
),
html.Div('Results are cached for {} seconds'.format(timeout))
])
@app.callback(
Output('flask-cache-memoized-children', 'children'),
Input('flask-cache-memoized-dropdown', 'value'))
@cache.memoize(timeout=timeout) # in seconds
def render(value):
return 'Selected "{}" at "{}"'.format(
value, datetime.datetime.now().strftime('%H:%M:%S')
)
| 27.947368 | 76 | 0.644068 |
bc7333e25a48def30179d76d4ea3dfb0cdb7dabd
| 21 |
py
|
Python
|
__init__.py
|
dustinmaurer/options-strategy-backtester
|
1974bfd672d163a39f928208f36a8470a99e1a48
|
[
"MIT"
] | null | null | null |
__init__.py
|
dustinmaurer/options-strategy-backtester
|
1974bfd672d163a39f928208f36a8470a99e1a48
|
[
"MIT"
] | null | null | null |
__init__.py
|
dustinmaurer/options-strategy-backtester
|
1974bfd672d163a39f928208f36a8470a99e1a48
|
[
"MIT"
] | 1 |
2021-04-11T07:18:55.000Z
|
2021-04-11T07:18:55.000Z
|
from . import option
| 10.5 | 20 | 0.761905 |
4c4069bc986efb5251f07ea9fc42f08bfe83c3a3
| 855 |
py
|
Python
|
chord_rec/datasets/vec_datasets.py
|
TianxueHu/ChordSymbolRec
|
d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac
|
[
"Unlicense",
"MIT"
] | null | null | null |
chord_rec/datasets/vec_datasets.py
|
TianxueHu/ChordSymbolRec
|
d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac
|
[
"Unlicense",
"MIT"
] | null | null | null |
chord_rec/datasets/vec_datasets.py
|
TianxueHu/ChordSymbolRec
|
d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac
|
[
"Unlicense",
"MIT"
] | null | null | null |
from torch.utils.data import Dataset
import numpy as np
class Vec45Dataset(Dataset):
def __init__(self, note_vec_seq, chord_seq, vocab):
'Initialization'
self.note_vec_seq = note_vec_seq
self.chord_seq = chord_seq
self.vocab = vocab
def __len__(self):
'Get the total length of the dataset'
return len(self.note_vec_seq)
def __getitem__(self, index):
'Generates one sample of data'
# Select sample
return self.note_vec_seq[index], self.vec_encode(self.chord_seq[index])
def encode(self, x):
return self.vocab.stoi[x]
def vec_encode(self, x):
return np.vectorize(self.encode)(x)
def decode(self, x):
return self.vocab.itos[x]
def vec_decode(self, x):
return np.vectorize(self.decode)(x)
| 25.147059 | 79 | 0.625731 |
4c6eade565b287a3e519871de24a1eb1cee990c9
| 3,010 |
py
|
Python
|
test/test_npu/test_network_ops/test_dim_arange.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_dim_arange.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_dim_arange.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestDimArange(TestCase):
def generate_data(self, min_d, max_d, shape, dtype):
input_x = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input = torch.from_numpy(input_x)
return npu_input
def cpu_op_exec(self, input_x, dim):
output = torch._dim_arange(input_x, dim)
output = output.numpy().astype(np.int32)
return output
def npu_op_exec(self, input_x, dim):
input1 = input_x.to("npu")
output = torch._dim_arange(input1, dim)
output = output.to("cpu")
output = output.numpy()
return output
def test_dim_arange_3_4_5_0_float32(self, device):
input_x1 = self.generate_data(-1, 1, (3, 4, 5), np.float32)
cpu_output = self.cpu_op_exec(input_x1, 1)
npu_output = self.npu_op_exec(input_x1, 1)
self.assertRtolEqual(cpu_output, npu_output)
def test_dim_arange_30_40_50_0_float32(self, device):
input_x1 = self.generate_data(-1, 1, (30, 40, 50), np.float32)
cpu_output = self.cpu_op_exec(input_x1, 0)
npu_output = self.npu_op_exec(input_x1, 0)
self.assertRtolEqual(cpu_output, npu_output)
def test_dim_arange_10_10_10_10_10_10_10_2_float32(self, device):
input_x1 = self.generate_data(-1, 1, (10, 10, 10, 10, 10, 10), np.float32)
cpu_output = self.cpu_op_exec(input_x1, 2)
npu_output = self.npu_op_exec(input_x1, 2)
self.assertRtolEqual(cpu_output, npu_output)
def test_dim_arange_7_13_22_193_45_2_float16(self, device):
input_x1 = self.generate_data(-1, 1, (7, 13, 22, 193, 45, 2), np.float16)
cpu_output = self.cpu_op_exec(input_x1, 2)
npu_output = self.npu_op_exec(input_x1, 2)
self.assertRtolEqual(cpu_output, npu_output)
def test_dim_arange_7_13_22_float16(self, device):
input_x1 = self.generate_data(-1, 1, (7, 13, 22), np.float16)
cpu_output = self.cpu_op_exec(input_x1, 0)
npu_output = self.npu_op_exec(input_x1, 0)
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestDimArange, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
| 41.232877 | 82 | 0.701661 |
9121defb61945cae4cade362dbee1b1b6c95fa15
| 21,859 |
py
|
Python
|
deprecated/examples/deepFM/distribute_base.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 170 |
2020-08-12T12:07:01.000Z
|
2022-03-07T02:38:26.000Z
|
deprecated/examples/deepFM/distribute_base.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 195 |
2020-08-13T03:22:15.000Z
|
2022-03-30T07:40:25.000Z
|
deprecated/examples/deepFM/distribute_base.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 67 |
2020-08-14T02:07:46.000Z
|
2022-03-28T10:05:33.000Z
|
#!/usr/bin/python
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import os
import re
import commands
import logging
import time
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig
import py_reader_generator as py_reader
from paddle.fluid.contrib.utils import multi_download, HDFSClient
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
class FleetDistRunnerBase(object):
"""
Distribute training base class:
This class abstracts the training process into several major steps:
1. input_data: input data of network, this function should be realized by user
2. net: network definition, this function should be defined by user
3. run_pserver: run pserver node in distribute environment
4. run_trainer: run trainer, choose the way of training network according to requirement params
5. run_infer: prediction based on the trained model
6. py_reader: using py_reader method get data, this function should be realized by user
7. dataset_reader: using dataset method get data, this function should be realized by user
8. runtime_main: program entry, get the environment parameters, decide which function to call
"""
def input_data(self, params):
"""
Function input_data: Definition of input data format in the network
Args:
:params: the hyper parameters of network
Returns:
defined by users
"""
raise NotImplementedError(
"input_data should be implemented by child classes.")
def net(self, inputs, params):
"""
Function net: Definition of network structure
Args:
:inputs: input data, eg: dataset and labels. defined by funtion: self.input_data
:params: the hyper parameters of network
Returns:
evaluation parameter, defined by users
"""
raise NotImplementedError(
"net should be implemented by child classes.")
def runtime_main(self, params):
"""
Function runtime_main: the entry point for program running
Args:
:params params: the hyper parameters of network
"""
# Step1: get the environment variable, mainly related to network communication parameters
params.role = os.getenv("TRAINING_ROLE")
logger.info("Training role: {}".format(params.role))
params.current_id = int(os.getenv("PADDLE_TRAINER_ID"))
logger.info("Current Id: {}".format(params.current_id))
params.trainers = int(os.getenv("PADDLE_TRAINERS_NUM"))
logger.info("Trainer num: {}".format(params.trainers))
params.pserver_ports = os.getenv("PADDLE_PORT")
logger.info("Pserver ports: {}".format(params.pserver_ports))
params.pserver_ip = os.getenv("PADDLE_PSERVERS")
logger.info("Pserver IP: {}".format(params.pserver_ip))
params.current_endpoint = os.getenv("POD_IP", "localhost") + ":" + params.pserver_ports
params.cpu_num = os.getenv("CPU_NUM")
logger.info("output path: {}".format(params.model_path))
# Step2: decide communication mode between PSERVER & TRAINER
# recommended mode: pyreader + sync_mode / dataset + async_mode
self.strategy = DistributeTranspilerConfig()
if params.sync_mode == "sync":
self.strategy.sync_mode = True
self.strategy.runtime_split_send_recv = False
self.async_mode = False
params.batch_size = int(params.batch_size / params.trainers)
elif params.sync_mode == "half_async":
self.strategy.sync_mode = False
self.async_mode = False
self.strategy.runtime_split_send_recv = False
elif params.sync_mode == "async" or params.is_dataset_train:
self.strategy.sync_mode = False
self.async_mode = True
self.strategy.runtime_split_send_recv = True
# Step3: Configure communication IP and ports
if params.is_local_cluster:
for port in params.pserver_ports.split(","):
params.pserver_endpoints.append(':'.join(
[params.pserver_ip, port]))
else:
for ip in params.pserver_ip.split(","):
params.pserver_endpoints.append(':'.join(
[ip, params.pserver_ports]))
params.endpoints = ",".join(params.pserver_endpoints)
if params.role == "TRAINER" and params.current_id == 0:
params.is_first_trainer = True
if params.is_local_cluster:
params.train_files_path = "data/train_data"
params.test_files_path = "data/test_data"
# Step4: According to the parameters-> TRAINING_ROLE, decide which method to run
train_result = {}
infer_result = {}
if params.role == "PSERVER":
self.run_pserver(params)
elif params.role == "TRAINER":
if params.is_dataset_train:
train_result = self.run_dataset_trainer(params)
elif params.is_pyreader_train:
train_result = self.run_pyreader_trainer(params)
else:
raise ValueError("Please choice training role for current node : PSERVER / TRAINER")
# Step5: If the role is first trainer, after training, perform verification on the test data
result = dict()
if params.is_first_trainer:
infer_result = self.run_infer(params)
result[0] = dict()
result[0]['loss'] = infer_result['loss']
result[0]['auc'] = infer_result['auc']
result[1] = train_result[0]['time']
elif params.role == "TRAINER" and params.current_id != 0:
result[1] = train_result[0]['time']
logger.info(str(result))
logger.info("Distribute train success!")
def run_pserver(self, params):
"""
Function run_pserver: Operation method of parameter server
Args
:params the hyper parameters of network
Returns:
None
"""
logger.info("run pserver")
role = role_maker.UserDefinedRoleMaker(
current_id=params.current_id,
role=role_maker.Role.SERVER,
worker_num=params.trainers,
server_endpoints=params.pserver_endpoints)
fleet.init(role)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
reader = None
inputs = self.input_data(params)
if params.is_pyreader_train:
reader = self.py_reader(params)
inputs = fluid.layers.read_file(reader)
elif not params.is_dataset_train:
raise ValueError("Program must has Date feed method: is_pyreader_train / is_dataset_train")
loss, auc = self.net(inputs, params)
optimizer = fluid.optimizer.SGD(learning_rate=params.learning_rate,
regularization=fluid.regularizer.L2DecayRegularizer(params.reg))
optimizer = fleet.distributed_optimizer(optimizer, self.strategy)
optimizer.minimize(loss)
fleet.init_server()
logger.info("PServer init success!")
with open("pserver_train.proto", 'w') as f:
f.write(str(fluid.default_main_program()))
with open("pserver_startup.proto", 'w') as f:
f.write(str(fluid.default_startup_program()))
fleet.run_server()
def run_dataset_trainer(self, params):
"""
Function run_dataset_trainer: Operation method of training node
Args:
:params params: the hyper parameters of network
Returns
:train_result: the dict of training log
"""
logger.info("run trainer")
role = role_maker.UserDefinedRoleMaker(
current_id=params.current_id,
role=role_maker.Role.WORKER,
worker_num=params.trainers,
server_endpoints=params.pserver_endpoints)
fleet.init(role)
inputs = self.input_data(params)
# Replace it with your network evaluation index,
loss, auc = self.net(inputs, params)
# define the optimizer for your model
optimizer = fluid.optimizer.SGD(learning_rate=params.learning_rate,
regularization=fluid.regularizer.L2DecayRegularizer(params.reg))
optimizer = fleet.distributed_optimizer(optimizer, self.strategy)
optimizer.minimize(loss)
logger.info("Program construction complete")
exe = fluid.Executor(fluid.CPUPlace())
fleet.init_worker()
exe.run(fleet.startup_program)
with open(str(params.current_id) + "_trainer_train.proto", 'w') as f:
f.write(str(fleet.main_program))
with open(str(params.current_id) + "_trainer_startup.proto", 'w') as f:
f.write(str(fleet.startup_program))
train_result = {}
# Notice: Both dataset and py_reader method don't using feed={dict} to input data
# Paddle Fluid enter data by variable name
# When we do the definition of the reader, the program has established the workflow
logger.info("run dataset train")
dataset = self.dataset_reader(inputs, params)
file_list = [str(params.train_files_path) + "/%s" % x
for x in os.listdir(params.train_files_path)]
if params.is_local_cluster:
file_list = fleet.split_files(file_list)
logger.info("file list: {}".format(file_list))
logger.info('----------------------NO.%s trainer ready----------------' % (params.current_id))
for epoch in range(params.epochs):
dataset.set_filelist(file_list)
start_time = time.clock()
# Notice: function train_from_dataset does not return fetch value
exe.train_from_dataset(program=fleet.main_program, dataset=dataset,
fetch_list=[auc], fetch_info=['auc'],
print_period=100, debug=False)
end_time = time.clock()
self.record_time(epoch, train_result, end_time - start_time)
self.record_memory(epoch, train_result)
logger.info("epoch %d finished, use time=%d\n" % ((epoch), end_time - start_time))
if params.is_first_trainer and params.test:
model_path = str(params.model_path) + '/trainer_' + str(params.current_id) + '_epoch_' + str(epoch)
fleet.save_persistables(executor=exe, dirname=model_path)
if params.is_first_trainer:
train_method = '_dataset_train'
log_path = str(params.log_path + '/' + str(params.current_id) + train_method + '.log')
with open(log_path, 'w+') as f:
f.write(str(train_result))
model_path = str(params.model_path + '/final' + train_method)
fleet.save_persistables(executor=exe, dirname=model_path)
logger.info("Train Success!")
fleet.stop_worker()
return train_result
def run_pyreader_trainer(self, params):
"""
Function run_trainer: Operation method of training node
Args:
:params params: the hyper parameters of network
Returns
:train_result: the dict of training log
"""
logger.info("run trainer")
role = role_maker.UserDefinedRoleMaker(
current_id=params.current_id,
role=role_maker.Role.WORKER,
worker_num=params.trainers,
server_endpoints=params.pserver_endpoints)
fleet.init(role)
exe = fluid.Executor(fluid.CPUPlace())
inputs = self.input_data(params)
reader = self.py_reader(params)
inputs = fluid.layers.read_file(reader)
# Replace it with your network evaluation index,
loss, auc = self.net(inputs, params)
# define the optimizer for your model
optimizer = fluid.optimizer.SGD(learning_rate=params.learning_rate,
regularization=fluid.regularizer.L2DecayRegularizer(params.reg))
optimizer = fleet.distributed_optimizer(optimizer, self.strategy)
optimizer.minimize(loss)
logger.info("Program construction complete")
fleet.init_worker()
exe.run(fleet.startup_program)
with open(str(params.current_id) + "_trainer_train.proto", 'w') as f:
f.write(str(fleet.main_program))
with open(str(params.current_id) + "_trainer_startup.proto", 'w') as f:
f.write(str(fleet.startup_program))
train_result = {}
# Notice: Both dataset and py_reader method don't using feed={dict} to input data
# Paddle Fluid enter data by variable name
# When we do the definition of the reader, the program has established the workflow
train_generator = py_reader.CriteoDataset()
file_list = [str(params.train_files_path) + "/%s" % x
for x in os.listdir(params.train_files_path)]
if params.is_local_cluster:
file_list = fleet.split_files(file_list)
logger.info("file list: {}".format(file_list))
train_reader = paddle.batch(
paddle.reader.shuffle(
train_generator.train(file_list),
buf_size=params.batch_size * 100
), batch_size=params.batch_size)
reader.decorate_paddle_reader(train_reader)
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = int(params.cpu_num)
build_strategy = fluid.BuildStrategy()
build_strategy.async_mode = self.async_mode
if params.sync_mode == 'async':
build_strategy.memory_optimize = False
if int(params.cpu_num) > 1:
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
compiled_prog = fluid.compiler.CompiledProgram(
fleet.main_program).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy, exec_strategy=exec_strategy)
logger.info('----------------------NO.%s trainer ready----------------' % (params.current_id))
for epoch in range(params.epochs):
reader.start()
start_time = time.clock()
batch_id = 0
try:
while True:
loss_val, auc_val = exe.run(program=compiled_prog, fetch_list=[loss.name, auc.name])
loss_val = np.mean(loss_val)
auc_val = np.mean(auc_val)
if batch_id % 100 == 0 and batch_id != 0:
logger.info("TRAIN --> pass: {} batch: {} loss: {} auc: {}"
.format(epoch, batch_id, loss_val / params.batch_size, auc_val))
batch_id += 1
except fluid.core.EOFException:
reader.reset()
end_time = time.clock()
if params.test and params.is_first_trainer:
model_path = str(params.model_path) + '/trainer_' + str(params.current_id) + '_epoch_' + str(epoch)
fleet.save_persistables(executor=exe, dirname=model_path)
train_result = self.record_time(epoch, train_result, end_time - start_time)
train_result = self.record_memory(epoch, train_result)
logger.info("epoch %d finished, use time=%d\n" % ((epoch + 1), end_time - start_time))
if params.is_first_trainer:
train_method = '_pyreader_train'
log_path = str(params.log_path + '/' + str(params.current_id) + train_method + '.log')
with open(log_path, 'w+') as f:
f.write(str(train_result))
model_path = str(params.model_path + '/final' + train_method)
fleet.save_persistables(executor=exe, dirname=model_path)
logger.info("Train Success!")
fleet.stop_worker()
return train_result
def run_infer(self, params):
"""
Function run_infer: Operation method of training node
Args:
:params params: the hyper parameters of network
Returns
:infer_result, type:dict, record the evalution parameter and program resource usage situation
"""
place = fluid.CPUPlace()
dataset = py_reader.CriteoDataset()
file_list = [str(params.test_files_path) + "/%s" % x
for x in os.listdir(params.test_files_path)]
test_reader = paddle.batch(
dataset.test(file_list), batch_size=params.batch_size)
startup_program = fluid.framework.Program()
test_program = fluid.framework.Program()
def set_zero(var_name):
param = fluid.global_scope().var(var_name).get_tensor()
param_array = np.zeros(param._get_dims()).astype("int64")
param.set(param_array, place)
with fluid.framework.program_guard(test_program, startup_program):
with fluid.unique_name.guard():
inputs = self.input_data(params)
loss, auc_var = self.net(inputs, params)
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=inputs, place=place)
train_method = ''
if params.is_pyreader_train:
train_method = '_pyreader_train/'
else:
train_method = '_dataset_train/'
model_path = params.model_path + '/final' + train_method
fluid.io.load_persistables(
executor=exe,
dirname=model_path,
main_program=fluid.default_main_program())
auc_states_names = ['_generated_var_0', '_generated_var_1', '_generated_var_2', '_generated_var_3']
for name in auc_states_names:
set_zero(name)
run_index = 0
L = []
infer_auc = 0
for batch_id, data in enumerate(test_reader()):
loss_val, auc_val = exe.run(test_program,
feed=feeder.feed(data),
fetch_list=[loss, auc_var])
infer_auc = auc_val
run_index += 1
L.append(loss_val / params.batch_size)
if batch_id % 1000 == 0:
print("TEST --> batch: {} loss: {} auc: {}".format(
batch_id, loss_val / params.batch_size, auc_val))
infer_loss = np.mean(L)
infer_auc = np.mean(infer_auc)
infer_result = {}
infer_result['loss'] = infer_loss
infer_result['auc'] = infer_auc
logger.info(str(infer_result))
logger.info("Inference complete")
return infer_result
def py_reader(self, params):
"""
Function py_reader: define the data read method by fluid.layers.py_reader
help: https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/layers_cn/io_cn.html#py-reader
Args:
:params params: the hyper parameters of network
Returns:
defined by user
"""
raise NotImplementedError(
"py_reader should be implemented by child classes.")
def dataset_reader(self, inputs, params):
"""
Function dataset_reader: define the data read method by fluid.dataset.DatasetFactory
help: https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/dataset_cn.html#fluid-dataset
Args:
:params inputs: input data, eg: dataset and labels. defined by funtion: self.input_data
:params params: the hyper parameters of network
Returns:
defined by user
"""
raise NotImplementedError(
"dataset_reader should be implemented by child classes.")
def record_time(self, epoch, train_result, time):
"""
record the operation time
"""
train_result[epoch] = {}
train_result[epoch]['time'] = time
return train_result
def record_memory(self, epoch, train_result):
info = process_info()
logger.info(info)
train_result[epoch]['memory'] = info['mem']
train_result[epoch]['cpu'] = info['cpu']
train_result[epoch]['rss'] = info['rss']
train_result[epoch]['vsa'] = info['vsa']
return train_result
def process_info():
pid = os.getpid()
res = commands.getstatusoutput('ps aux|grep ' + str(pid))[1].split('\n')[0]
p = re.compile(r'\s+')
l = p.split(res)
info = {'user': l[0],
'pid': l[1],
'cpu': l[2],
'mem': l[3],
'vsa': l[4],
'rss': l[5], }
return info
| 41.795411 | 115 | 0.613386 |
e6a062d0572ef02718e8fbdebe1537c58e1ff6a3
| 838 |
py
|
Python
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/bx_python-0.7.1-py2.7-linux-x86_64.egg/EGG-INFO/scripts/maf_print_chroms.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/bx_python-0.7.1-py2.7-linux-x86_64.egg/EGG-INFO/scripts/maf_print_chroms.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/bx_python-0.7.1-py2.7-linux-x86_64.egg/EGG-INFO/scripts/maf_print_chroms.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2 |
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
#!/usr/bin/python2.7
"""
Read a maf from stdin and print the chromosome number for each alignment. It
searches for 'chr' in each alignment block src, and may not be robust if other
src formats are used.
NOTE: See 'align_print_template.py' for a more general variation of this
program.
usage: %prog refindex [options]
"""
from __future__ import division
import sys
from bx.cookbook import doc_optparse
from bx.align import maf
from optparse import OptionParser
def __main__():
# Parse command line arguments
options, args = doc_optparse.parse( __doc__ )
try:
refindex = int( args[0] )
except:
doc_optparse.exit()
maf_reader = maf.Reader( sys.stdin )
for m in maf_reader:
c = m.components[ refindex ].src
print c[ c.rfind( "chr" ) + 3 : ]
if __name__ == "__main__": __main__()
| 22.052632 | 78 | 0.695704 |
e6d3f8b600851c189da362904eab887decd97dd9
| 443 |
py
|
Python
|
zencad/examples/Models/organizer/assembly.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | 5 |
2018-04-11T14:11:40.000Z
|
2018-09-12T19:03:36.000Z
|
zencad/examples/Models/organizer/assembly.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
zencad/examples/Models/organizer/assembly.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import case
import organizer
from zencad import *
w = 27
h = 20
l = 64
t = 1.5
r = 27 / 2 - 4
z = 1
s = 0.965
d = 5
d2 = 5
m = 3
n = 5
st = organizer.organizer(m, n, w, h, l, t, d, d2)
cs = case.case(w, h, l, t, r, z, s)
ucs = union(
[
cs.translate(t * 1.5 + (w + t) * i, 0, t + (h + t) * j)
for i in range(0, m)
for j in range(0, n)
]
)
disp(ucs)
disp(st, color=(1, 1, 1))
show()
| 12.657143 | 63 | 0.489842 |
fc63a193df46720c73df607e84a4485a4acadc4d
| 4,043 |
py
|
Python
|
projects/001-geburtstag/Firecat.py
|
gaylordmartin179/start-projects
|
094c7b3ab379bea39eabb31174e479307f9a3af0
|
[
"MIT"
] | null | null | null |
projects/001-geburtstag/Firecat.py
|
gaylordmartin179/start-projects
|
094c7b3ab379bea39eabb31174e479307f9a3af0
|
[
"MIT"
] | 2 |
2021-04-04T09:16:43.000Z
|
2021-04-07T08:53:53.000Z
|
projects/001-geburtstag/Firecat.py
|
gaylordmartin179/start-projects
|
094c7b3ab379bea39eabb31174e479307f9a3af0
|
[
"MIT"
] | 2 |
2021-04-04T09:19:31.000Z
|
2021-05-02T15:06:50.000Z
|
#
# Firecat's birthday task code
#
import os
names = []
birthdays = []
def get_cmd():
cmd = input()
if cmd:
return cmd.split(" ")
else:
print("Please enter something as a command.\n")
return
def exe_cmd(cmd):
base_cmd = cmd[0]
if base_cmd == "help":
print("help: shows this list\nadd [name] [birthday]: adds [name]'s birthday as [birthday]\nremove [name]: "
"removes [name]'s birthday\nget birthday [name]: shows [name]'s birthday\nget name [birthday]: shows "
"whose birthday is [birthday] - can return multiple names\nprint file [path]: prints all birthday data "
"into a txt file at [program location]/path\nprint console: prints all birthday data into the console\n"
"exit: stops the program\n")
return 0
elif base_cmd == "add":
if len(cmd) == 3:
names.append(cmd[1])
birthdays.append(cmd[2])
return 1
print("Your command is too long or too short. The add cmd works as follows: \"add [name] [birthday]\".\n")
return
elif base_cmd == "remove":
if len(cmd) == 2:
if cmd[1] in tuple(names):
birthdays.remove(birthdays[names.index(cmd[1])])
names.remove(cmd[1])
return 1
print("The person whose birthday you are trying to remove does not exist.\n")
return
print("Your command : \"remove [name]\".\n")
return
elif base_cmd == "get":
if len(cmd) == 3:
if cmd[1] == "birthday":
if cmd[2] in tuple(names):
print(birthdays[names.index(cmd[2])])
return 0
print("The person whose birthday you are trying to get does not exist.\n")
return
elif cmd[1] == "name":
if cmd[2] in tuple(birthdays):
print(names[birthdays.index(cmd[2])])
return 0
print("No person has that birthday.\n")
return
print("Your command doesn't have the correct length. Please alter the command if you want to get a birthday"
"(\"get birthday [name]\") or a name(\"get name [birthday]\").\n")
return
elif base_cmd == "print":
if len(cmd) == 2:
if cmd[1] == "console":
print_out = []
for i in range(len(names)):
print_out.append(f"{names[i]} - {birthdays[i]}\n")
print("".join(print_out))
return 0
elif len(cmd) == 3:
if cmd[1] == "file":
if os.path.isfile(f"{cmd[2]}.txt"):
print("This file already exists. The program will not overwrite files.")
return
else:
print_out = []
for i in range(len(names)):
print_out.append(f"{names[i]} - [{birthdays[i]}\n")
file = open(f"{cmd[2]}.txt", mode="w", encoding="utf-8")
file.write("".join(print_out))
file.close()
return 1
print("Please use the command like this: \"print console\" or \"print file [path]\".\n")
return
elif base_cmd == "exit" or base_cmd == "stop":
yes = ["YES", "yes", "ok", "OK", "confirm", "sure", "yeah"]
inp = input("Are you sure you want to end the program?\n")
if inp in tuple(yes):
print("Ending the program...\n")
return 0
print("Cancelled ending the program.\n")
return
print("That is not a valid command. Type \"help\" to get a list of commands.\n")
return
def program():
cmd = get_cmd()
if cmd:
exe = exe_cmd(cmd)
if exe == "end":
return
elif exe == 1:
print("Success!\n")
program()
print("\nPlease enter a command. To get a list of commands, use \"help\".\n")
program()
| 36.754545 | 118 | 0.509523 |
5d0e09d93b212966e3392eaa84308543f3b04d3f
| 709 |
py
|
Python
|
WiSe-2122/Wiederholung/Vorlesung/kontrollstrukturen-ausnahmebehandlungen.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | 1 |
2022-03-23T09:40:39.000Z
|
2022-03-23T09:40:39.000Z
|
WiSe-2122/Wiederholung/Vorlesung/kontrollstrukturen-ausnahmebehandlungen.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
WiSe-2122/Wiederholung/Vorlesung/kontrollstrukturen-ausnahmebehandlungen.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
richtige_eingabe = False
while not richtige_eingabe:
try:
# Inhaltliche Prüfung der Variablen
a = int(input("Bitte geben Sie eine Zahl ein: "))
b = int(input("Bitte geben Sie eine zweite Zahl ein: "))
if a > b:
print(a, "ist größer als", b)
richtige_eingabe = True
elif a == b:
print(a, "ist genau so groß wie", b)
richtige_eingabe = True
elif a < b:
print(a, "ist nicht größer als", b)
richtige_eingabe = True
else:
print("Es konnte kein mathematischer Vergleich angestellt werden.")
except:
print("Es handelt sich hierbei scheinbar nicht um Zahlen.")
| 35.45 | 79 | 0.572638 |
53de4ed232ee780b58dc033dbcc6a4c83d45a927
| 1,484 |
py
|
Python
|
LF7/EMail/1Stunde.py
|
JohannesMuelle/workshops
|
af9140159e3872aff75864ced99b5163d7bba1ba
|
[
"CC0-1.0"
] | 5 |
2016-07-07T09:00:31.000Z
|
2017-03-09T22:46:33.000Z
|
LF7/EMail/1Stunde.py
|
JohannesMuelle/workshops
|
af9140159e3872aff75864ced99b5163d7bba1ba
|
[
"CC0-1.0"
] | null | null | null |
LF7/EMail/1Stunde.py
|
JohannesMuelle/workshops
|
af9140159e3872aff75864ced99b5163d7bba1ba
|
[
"CC0-1.0"
] | 8 |
2016-05-13T14:29:06.000Z
|
2019-10-20T16:43:32.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, subprocess
import sys, traceback
import crypt
print("Kopiere: .my.cnf kann das Passwort und Benutzername von MySQL-Nutzern hinterlegt werden. Achtung: Sicherheitsrisiko!")
cmd1 = os.system("cp my.cnf ~/.my.cnf")
# Update Upgrade
print("Update und Upgrade des Betriebssystems")
print("Namen 'mail' für den Server festlegen")
cmd1 = os.system("echo 'mail' > /etc/hostname")
datei = open("/etc/hosts", "a")
datei.write("127.0.0.1 mail.mysystems.tld mail localhost.localdomain localhost\n")
datei.write("::1 mail.mysystems.tld mail localhost.localdomain ip6-localhost\n")
datei.close()
print("Hostname für E-Mail-Server setzen")
cmd1 = os.system("echo $(hostname -f) > /etc/mailname")
cmd1 = os.system("apt-get update -qq && sudo apt-get upgrade -y -qq")
print(" Passwort und Benutzer für mysql festlegen: fbs/bs")
cmd1 = os.system("debconf-set-selections <<< 'mysql-server mysql-server/root_password password fbs'")
cmd1 = os.system("debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password fbs'")
print(" Datenbank installieren")
cmd1 = os.system("apt install mysql-server -y")
print(" Datenbank vmail anlegen.")
cmd1 = os.system("mysql -u root -pfbs -e 'create database vmail;'")
print("Benutzer vmail anlegen.")
cmd1 = os.system("mysql -u root -pfbs -e 'GRANT ALL ON vmail.* TO 'vmail'@'localhost' IDENTIFIED BY 'fbs';'")
print("Tabellen anlegen")
cmd1 = os.system("mysql 'vmail' < 'mysql'")
| 49.466667 | 125 | 0.724394 |
4aeea47200170052629377961f8ba8cc3b36c93f
| 2,513 |
py
|
Python
|
py-td3-cinema/gui/edit.py
|
HuguesGuilleus/istyPOO
|
f460665799be2b2f34a1ebaa9878e06bb028a410
|
[
"BSD-3-Clause"
] | null | null | null |
py-td3-cinema/gui/edit.py
|
HuguesGuilleus/istyPOO
|
f460665799be2b2f34a1ebaa9878e06bb028a410
|
[
"BSD-3-Clause"
] | null | null | null |
py-td3-cinema/gui/edit.py
|
HuguesGuilleus/istyPOO
|
f460665799be2b2f34a1ebaa9878e06bb028a410
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from gi.repository import Gtk
allValues = {}
struct = None
removeID = 0
def create(builder, s):
"""Edite une Struct dans l'interface utilisateur."""
global allValues, struct, removeID
allValues = {}
struct = s
builder.get_object("indexList").hide()
builder.get_object("newZone").hide()
builder.get_object("save").show()
builder.get_object("rm").show()
builder.get_object("new").hide()
rm = builder.get_object("rm")
rm.show()
if removeID: rm.disconnect(removeID)
removeID = rm.connect("clicked", lambda _ : s.autoremove())
builder.get_object("editZone").show()
builder.get_object("editHead").set_text(f"{s.__class__.__name__}: {s.idValue}")
editTable = builder.get_object("editTable")
editTable.foreach(lambda e : editTable.remove(e))
i = 0
for k, v in s:
i += 1
editTable.insert_row(i)
label = Gtk.Label(k)
editTable.attach(label, 0, i, 1, 1)
button = None
if type(v) == type(""):
button = Gtk.Entry()
button.set_text(v)
button.connect("changed", changeStr(k, button))
elif type(v) == type(False):
button = Gtk.Switch()
button.set_active(v)
button.connect("state-set", changeBool(k, button))
elif type(v) == type(0):
button = Gtk.SpinButton()
button.set_adjustment(Gtk.Adjustment(v, 0, 10000, 1, 10, 0))
button.connect("value-changed", changeNb(k, button))
else:
button = listStore(k, s.getDB(k))
if button == None: continue
editTable.attach(button, 1, i, 1, 1)
editTable.show_all()
def save(_):
if not struct: return
for k, v in allValues.items():
struct[k] = v
print(f"struct: {type(struct)} :: {struct}")
def changeStr(k, b):
"""Renvoie une fonction pour enregistrer dans le tempons la valeur de
l'entré textuelle"""
return lambda _ : allValues.__setitem__(k, b.get_text())
def changeBool(k, b):
"Renvoie une fonction pour enregistrer dans le tempons la valeur du switch"
return lambda _, v : allValues.__setitem__(k, v)
def changeNb(k, b):
return lambda _ : allValues.__setitem__(k, int(b.get_value()))
def listStore(key, db):
if db == None: return None
l = Gtk.ListStore(str, str)
for k, v in db.items():
l.append([str(k), v.idValue])
combo = Gtk.ComboBox.new_with_model_and_entry(l)
combo.connect("changed", lambda _ : changeListStore(key, combo))
combo.set_entry_text_column(1)
return combo
def changeListStore(key, combo):
i = combo.get_active_iter()
if i == None: return
v = combo.get_model()[i][0]
allValues.__setitem__(key, bytes(v, "utf-8"))
| 27.315217 | 80 | 0.68842 |
db68614f2cc9f1f2f998d1580306625cae561358
| 577 |
py
|
Python
|
数据结构/NowCode/5_ReplaceSpace.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | null | null | null |
数据结构/NowCode/5_ReplaceSpace.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | 3 |
2020-08-14T07:50:27.000Z
|
2020-08-14T08:51:06.000Z
|
数据结构/NowCode/5_ReplaceSpace.py
|
Blankwhiter/LearningNotes
|
83e570bf386a8e2b5aa699c3d38b83e5dcdd9cb0
|
[
"MIT"
] | 2 |
2021-03-14T05:58:45.000Z
|
2021-08-29T17:25:52.000Z
|
# 替换空格
# 请实现一个函数,将一个字符串中的每个空格替换成“%20”。例如,当字符串为We Are Happy.则经过替换之后的字符串为We%20Are%20Happy。
class Solution:
# s 源字符串
def replaceSpace(self, s):
return s.replace(' ', '%20')
def replaceSpace2(self, s):
strLen = len(s)
aaa = []
for i in range(0, strLen):
if s[i] == " ":
aaa.append("%")
aaa.append("2")
aaa.append("0")
else:
aaa.append(s[i])
return ''.join(aaa)
if __name__ == '__main__':
print(Solution().replaceSpace2('We Are Happy'))
| 27.47619 | 81 | 0.506066 |
535f8bdaa8e6e41f06359cf8a97ab1ad425aa3c6
| 4,073 |
py
|
Python
|
bazzell/www/basket.py
|
libracore/bazzell
|
f4cf4ec5b9f264f68d853e7b946fd49bd0d5e4d9
|
[
"MIT"
] | null | null | null |
bazzell/www/basket.py
|
libracore/bazzell
|
f4cf4ec5b9f264f68d853e7b946fd49bd0d5e4d9
|
[
"MIT"
] | null | null | null |
bazzell/www/basket.py
|
libracore/bazzell
|
f4cf4ec5b9f264f68d853e7b946fd49bd0d5e4d9
|
[
"MIT"
] | 1 |
2021-08-14T22:25:24.000Z
|
2021-08-14T22:25:24.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, libracore.com and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import json
from bazzell.www.selection import get_customer, get_stocks
from erpnext.selling.doctype.quotation.quotation import make_sales_order
no_cache = True
def get_context(context):
if frappe.session.user == 'Guest':
frappe.throw(_("You need to be logged in to access this page"), frappe.PermissionError)
#get customer
customer = get_customer(frappe.session.user)
context["items"] = get_items(customer.name)
context["currency"] = get_currency(customer.name)
return context
def get_items(customer):
quotation = frappe.db.sql("""SELECT `name` FROM `tabQuotation` WHERE `party_name` = '{customer}' AND `docstatus` = 0 LIMIT 1""".format(customer=customer), as_dict=True)
items = frappe.db.sql("""SELECT `qty`, `item_code`, `item_name`, `rate`, `uom`, `stock_uom`, `conversion_factor` FROM `tabQuotation Item` WHERE `parent` = '{quotation}'""".format(quotation=quotation[0].name), as_dict=True)
for item in items:
item["stock"] = get_stocks(item.item_code)
return items
def get_currency(customer):
quotation = frappe.db.sql("""SELECT `currency` FROM `tabQuotation` WHERE `party_name` = '{customer}' AND `docstatus` = 0 LIMIT 1""".format(customer=customer), as_dict=True)
return quotation[0].currency
@frappe.whitelist()
def order_now():
customer = get_customer(frappe.session.user)
_quotation = frappe.db.sql("""SELECT `name` FROM `tabQuotation` WHERE `party_name` = '{customer}' AND `docstatus` = 0 LIMIT 1""".format(customer=customer.name), as_dict=True)[0].name
quotation = frappe.get_doc("Quotation", _quotation).submit()
sales_order = frappe.get_doc(make_sales_order(_quotation))
sales_order.insert(ignore_permissions=True)
return sales_order
@frappe.whitelist()
def change_qtn(item_code, qty):
#get customer
customer = get_customer(frappe.session.user)
quotation = frappe.db.sql("""SELECT `name` FROM `tabQuotation` WHERE `party_name` = '{customer}' AND `docstatus` = 0 LIMIT 1""".format(customer=customer.name), as_dict=True)
items = frappe.db.sql("""SELECT `item_code`, `rate`, `name` FROM `tabQuotation Item` WHERE `parent` = '{quotation}'""".format(quotation=quotation[0].name), as_dict=True)
if int(qty) > 0:
for item in items:
if item.item_code == item_code:
new_amount = item.rate * int(qty)
frappe.db.sql("""UPDATE `tabQuotation Item` SET `qty` = '{qty}', `amount` = '{new_amount}' WHERE `name` = '{name}' AND `parent` = '{qtn}'""".format(qty=qty, name=item.name, new_amount=new_amount, qtn=quotation[0].name), as_list=True)
quotation = frappe.get_doc("Quotation", quotation[0].name).save(ignore_permissions=True)
return 'changed'
else:
for item in items:
if item.item_code == item_code:
frappe.db.sql("""DELETE FROM `tabQuotation Item` WHERE `name` = '{name}'""".format(name=item.name), as_list=True)
check_qtn_qty = frappe.db.sql("""SELECT COUNT(`name`) FROM `tabQuotation Item` WHERE `parent` = '{qtn}'""".format(qtn=quotation[0].name), as_list=True)[0][0]
if check_qtn_qty > 0:
quotation = frappe.get_doc("Quotation", quotation[0].name).save(ignore_permissions=True)
else:
quotation = frappe.get_doc("Quotation", quotation[0].name).delete()
return 'qtn deleted'
return 'reload'
@frappe.whitelist()
def get_totals():
customer = get_customer(frappe.session.user)
quotation = frappe.db.sql("""SELECT `name` FROM `tabQuotation` WHERE `party_name` = '{customer}' AND `docstatus` = 0 LIMIT 1""".format(customer=customer.name), as_dict=True)
quotation = frappe.get_doc("Quotation", quotation[0].name)
return {
'total': quotation.total,
'tax': quotation.total_taxes_and_charges,
'grand_total': quotation.grand_total
}
| 50.283951 | 249 | 0.678861 |
be529258ff6077cd6d47f0a0128fe7d14511cf42
| 825 |
py
|
Python
|
src/ingestion/transformers/monosi/anomalies.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 156 |
2021-11-19T18:50:14.000Z
|
2022-03-31T19:48:59.000Z
|
src/ingestion/transformers/monosi/anomalies.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 30 |
2021-12-27T19:30:56.000Z
|
2022-03-30T17:49:00.000Z
|
src/ingestion/transformers/monosi/anomalies.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 14 |
2022-01-17T23:24:34.000Z
|
2022-03-29T09:27:47.000Z
|
from ingestion.transformers.base import Transformer
class AnomalyTransformer(Transformer):
@classmethod
def _transform(cls, zscores):
return list(filter(lambda x: x['error'] == True, zscores))
@classmethod
def _original_schema(cls):
return {
"type": "array",
"items": {
"type": "object",
"required": ["error"]
},
"minItems": 1
}
@classmethod
def _normalized_schema(cls):
return {
"type": "array",
"items": {
"type": "object",
"properties": {
"error": {
"type": "boolean",
"const": True
},
},
"required": ["error"]
},
"minItems": 1
}
| 23.571429 | 66 | 0.437576 |
22e073034c450e34ebbcbff8a89c350bce34306b
| 220 |
py
|
Python
|
src/onegov/swissvotes/collections/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/swissvotes/collections/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/swissvotes/collections/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.swissvotes.collections.pages import TranslatablePageCollection
from onegov.swissvotes.collections.votes import SwissVoteCollection
__all__ = (
'SwissVoteCollection',
'TranslatablePageCollection',
)
| 24.444444 | 74 | 0.822727 |
4aaa930b3aa1eaf5428764e58a328d69c64d9262
| 1,686 |
py
|
Python
|
myspiders/ruia/middleware.py
|
zhouhongf/bank_hr
|
a42e5e18f3ec36b1ec65931415fe476c9690e0a0
|
[
"MIT"
] | 2 |
2021-11-27T06:40:47.000Z
|
2022-01-06T03:12:46.000Z
|
myspiders/ruia/middleware.py
|
zhouhongf/bank_hr
|
a42e5e18f3ec36b1ec65931415fe476c9690e0a0
|
[
"MIT"
] | null | null | null |
myspiders/ruia/middleware.py
|
zhouhongf/bank_hr
|
a42e5e18f3ec36b1ec65931415fe476c9690e0a0
|
[
"MIT"
] | null | null | null |
from collections import deque
from functools import wraps
class Middleware:
"""
Define a middleware to customize the crawler request or response
eg: middleware = Middleware()
Middleware的目的是对每次请求前后进行一番处理,分下面两种情况:
在每次请求之前做一些事 @middleware.request
在每次请求后做一些事 @middleware.response
"""
def __init__(self):
# request middleware
self.request_middleware = deque()
# response middleware
self.response_middleware = deque()
def request(self, *args, **kwargs):
"""
Define a Decorate to be called before a request.
eg: @middleware.request
"""
middleware = args[0]
@wraps(middleware)
def register_middleware(*args, **kwargs):
self.request_middleware.append(middleware)
return middleware
return register_middleware()
def response(self, *args, **kwargs):
"""
Define a Decorate to be called after a response.
eg: @middleware.response
"""
middleware = args[0]
@wraps(middleware)
def register_middleware(*args, **kwargs):
self.response_middleware.appendleft(middleware)
return middleware
return register_middleware()
def __add__(self, other):
new_middleware = Middleware()
# asc
new_middleware.request_middleware.extend(self.request_middleware)
new_middleware.request_middleware.extend(other.request_middleware)
# desc
new_middleware.response_middleware.extend(other.response_middleware)
new_middleware.response_middleware.extend(self.response_middleware)
return new_middleware
| 29.578947 | 76 | 0.655991 |
4ac548c2d7298281e9294aca2405d481b5fb5b82
| 2,051 |
py
|
Python
|
project/cli/test.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | null | null | null |
project/cli/test.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | 35 |
2022-01-24T22:15:59.000Z
|
2022-03-31T15:01:35.000Z
|
project/cli/test.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | null | null | null |
import json
import click
from flask.cli import AppGroup
from flask_migrate import stamp
from sqlalchemy import MetaData
from project import app, db
from project.init_data import create_initial_data
from tests.model_seeder import ModelSeeder
test_cli = AppGroup("test")
seeder = ModelSeeder(db)
@test_cli.command("reset")
@click.option("--seed/--no-seed", default=False)
def reset(seed):
meta = MetaData(bind=db.engine, reflect=True)
con = db.engine.connect()
trans = con.begin()
for table in meta.sorted_tables:
con.execute(f'ALTER TABLE "{table.name}" DISABLE TRIGGER ALL;')
con.execute(table.delete())
con.execute(f'ALTER TABLE "{table.name}" ENABLE TRIGGER ALL;')
trans.commit()
if seed:
create_initial_data()
click.echo("Reset done.")
@test_cli.command("drop-all")
def drop_all():
db.drop_all()
db.engine.execute("DROP TABLE IF EXISTS alembic_version;")
click.echo("Drop all done.")
@test_cli.command("create-all")
def create_all():
stamp()
db.create_all()
click.echo("Create all done.")
@test_cli.command("seed")
def seed():
create_initial_data()
click.echo("Seed done.")
@test_cli.command("create-common-scenario")
def create_common_scenario():
seeder.create_common_scenario()
click.echo("Created common scenario.")
@test_cli.command("news-item-create")
def create_news_item():
news_feed_id = seeder.create_news_feed()
news_item_id = seeder.create_news_item(news_feed_id)
result = {
"news_item_id": news_item_id,
"news_feed_id": news_feed_id,
}
click.echo(json.dumps(result))
@test_cli.command("news-feed-create")
def create_news_feed():
news_feed_id = seeder.create_news_feed()
result = {
"news_feed_id": news_feed_id,
}
click.echo(json.dumps(result))
@test_cli.command("place-create")
def create_place():
place_id = seeder.create_place()
result = {
"place_id": place_id,
}
click.echo(json.dumps(result))
app.cli.add_command(test_cli)
| 22.293478 | 71 | 0.691858 |
435ec39933a6d7197454e318f6a7670bb2e4ed7e
| 424 |
py
|
Python
|
src/onegov/org/views/qrcode.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/views/qrcode.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/views/qrcode.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from morepath.request import Response
from onegov.core.security import Public
from onegov.org import OrgApp
from onegov.qrcode import QrCode
@OrgApp.view(model=QrCode, permission=Public, request_method='GET')
def get_qr_code_from_payload(self, request):
return Response(
self.encoded_image,
content_type=self.content_type,
content_disposition=f'inline; filename=qrcode.{self.img_format}'
)
| 30.285714 | 72 | 0.766509 |
436151138c15608b115e78d6e2e8fb01df231ba8
| 2,838 |
py
|
Python
|
tapia/exercise1/ex1.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 11 |
2017-04-21T11:39:55.000Z
|
2022-02-11T20:25:18.000Z
|
tapia/exercise1/ex1.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 69 |
2017-04-26T09:30:38.000Z
|
2017-08-01T11:31:21.000Z
|
tapia/exercise1/ex1.py
|
appfs/appfs
|
8cbbfa0e40e4d4a75a498ce8dd894bb2fbc3a9e3
|
[
"MIT"
] | 53 |
2017-04-20T16:16:11.000Z
|
2017-07-19T12:53:01.000Z
|
"""@package docstring
Author: Roxana Tapia Campos
Module: Advanced practical programming skills for scientists
SS17 - TU Berlin
Exercise 1: GeoMean
"""
import sys
import math
LINES_COUNT = 0
VALID_VALUES_LOC1 = 0
LOG_SUM_LOC1 = 0
VALID_VALUES_LOC2 = 0
LOG_SUM_LOC2 = 0
def read_file(filename):
"""
Reads a file, counts its lines and yields each line.
:param filename: A filename from command line.
"""
global LINES_COUNT
with open(filename) as f:
for line in f:
LINES_COUNT += 1
if is_valid(line):
yield line
def is_valid(line):
"""
Returns True if the line is valid, False otherwise.
:param line: A raw input line from the file.
:rtype: Boolean
"""
values = line.split(';')
wrong_args_number = len(values) != 3
is_empty_line = not line.strip()
is_comment = line.startswith("#")
if wrong_args_number or is_empty_line or is_comment:
print('Wrong format, skipping line.')
return False
try:
int(values[1])
float_value = float(values[2])
if math.isnan(float_value) or float_value <= 0.0:
return False
except ValueError:
return False
return True
def aggregate_data(line):
"""
Aggregates line counts and logarithm calculations for locations 1 and 2.
Checks if location is valid.
:param line: A validated input line from the file.
"""
global VALID_VALUES_LOC1, VALID_VALUES_LOC2, LOG_SUM_LOC1, LOG_SUM_LOC2
values = line.split(';')
location = int(values[1])
point = float(values[2])
result = math.log(point)
if location == 1:
VALID_VALUES_LOC1 += 1
LOG_SUM_LOC1 += result
elif location == 2:
VALID_VALUES_LOC2 += 1
LOG_SUM_LOC2 += result
else:
print('Location not supported.')
def compute_result():
"""
Computes the geometric mean for locations 1 and 2 and displays the formatted result.
"""
geo_mean_loc1 = math.exp(LOG_SUM_LOC1/VALID_VALUES_LOC1)
geo_mean_loc2 = math.exp(LOG_SUM_LOC2/VALID_VALUES_LOC2)
print(math.isnan(geo_mean_loc1))
print(math.isnan(geo_mean_loc2))
print("File: {filename} with {lines_count} lines".format(
filename=sys.argv[-1],
lines_count=LINES_COUNT,
))
print("Valid values Loc1: {valid_values_loc1} with GeoMean: {geo_mean_loc1}".format(
valid_values_loc1=VALID_VALUES_LOC1,
geo_mean_loc1=geo_mean_loc1
))
print("Valid values Loc2: {valid_values_loc2} with GeoMean: {geo_mean_loc2}".format(
valid_values_loc2=VALID_VALUES_LOC2,
geo_mean_loc2=geo_mean_loc2
))
if __name__ == '__main__':
content = read_file(sys.argv[-1])
for line in content:
aggregate_data(line)
compute_result()
| 23.262295 | 89 | 0.653629 |
43a121e2caf0c3426f2fb9231a89716afa22c661
| 309 |
py
|
Python
|
handlemoursedata/main.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
handlemoursedata/main.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | 2 |
2021-03-25T22:00:07.000Z
|
2022-01-20T15:51:48.000Z
|
handlemoursedata/main.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Created on 2019-04-19
@FileName: main.py
@Description: 项目入口
@author: 'Aaron.Qiu'
@version V1.0.0
'''
from com.aaron.load_mouse_data import load_excel
from com.aaron.dao.base_dao import create_db,drop_db
if __name__ == "__main__":
# create_db()
# drop_db()
load_excel()
| 20.6 | 52 | 0.68932 |
e82315a6966f473770759385f5d38a54b2f2b827
| 2,363 |
py
|
Python
|
kicker/control_human_automatic_strategy.py
|
GeorgJohn/2D_Kicker_git
|
8a451f2d573d703ceac1c2011f26e6ff953d2090
|
[
"MIT"
] | null | null | null |
kicker/control_human_automatic_strategy.py
|
GeorgJohn/2D_Kicker_git
|
8a451f2d573d703ceac1c2011f26e6ff953d2090
|
[
"MIT"
] | null | null | null |
kicker/control_human_automatic_strategy.py
|
GeorgJohn/2D_Kicker_git
|
8a451f2d573d703ceac1c2011f26e6ff953d2090
|
[
"MIT"
] | 1 |
2018-08-24T17:53:41.000Z
|
2018-08-24T17:53:41.000Z
|
import math
from kicker.CONST_BALL import Coordinate
from kicker.CONST_KICKER import COURT_HEIGHT
from kicker.CONST_GAME_FIGURES import FIGURE_FOOT_HEIGHT
class HumanStrategy:
def __init__(self, kicker):
self.kicker = kicker
def next_move(self):
if - math.pi / 2 < self.kicker.ball.angle < math.pi / 2:
if self.kicker.ball.pos[Coordinate.Y] < COURT_HEIGHT / 2:
new_pos_keeper = self.kicker.ball.pos[Coordinate.Y] - self.kicker.human_keeper.POSITION_ON_BAR - \
FIGURE_FOOT_HEIGHT / 2
new_pos_defender = self.kicker.ball.pos[Coordinate.Y] - \
self.kicker.human_defender.POSITION_ON_BAR_DEFENDER_LEFT + FIGURE_FOOT_HEIGHT / 2
elif self.kicker.ball.pos[Coordinate.Y] > COURT_HEIGHT / 2:
new_pos_keeper = self.kicker.ball.pos[Coordinate.Y] - self.kicker.human_keeper.POSITION_ON_BAR \
+ FIGURE_FOOT_HEIGHT / 2
new_pos_defender = self.kicker.ball.pos[Coordinate.Y] - \
self.kicker.human_defender.POSITION_ON_BAR_DEFENDER_RIGHT - FIGURE_FOOT_HEIGHT / 2
else:
new_pos_keeper = self.kicker.ball.pos[Coordinate.Y] - self.kicker.human_keeper.POSITION_ON_BAR
new_pos_defender = self.kicker.ball.pos[Coordinate.Y] - \
self.kicker.human_defender.POSITION_ON_BAR_DEFENDER_RIGHT - FIGURE_FOOT_HEIGHT / 2
if new_pos_keeper > self.kicker.human_keeper.MAX_POS_KEEPER:
new_pos_keeper = self.kicker.human_keeper.MAX_POS_KEEPER
elif new_pos_keeper < 0:
new_pos_keeper = 0
if new_pos_defender > self.kicker.human_defender.MAX_POS_DEFENDER:
new_pos_defender = self.kicker.human_defender.MAX_POS_DEFENDER
elif new_pos_defender < 0:
new_pos_defender = 0
else:
new_pos_keeper = self.kicker.human_keeper.MAX_POS_KEEPER / 2
new_pos_defender = self.kicker.human_defender.MAX_POS_DEFENDER / 2
self.kicker.human_keeper.next_position = new_pos_keeper
self.kicker.human_defender.next_position = new_pos_defender
self.kicker.human_keeper.move_bar()
self.kicker.human_defender.move_bar()
| 50.276596 | 117 | 0.645366 |
1c7d1ee3ec47c77b70391dda7b6af41c07b50aa8
| 7,361 |
py
|
Python
|
Python-Encrypt-master/sploit.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
Python-Encrypt-master/sploit.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
Python-Encrypt-master/sploit.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
import base64, zlib, marshal, sys
W = '\x1b[1;37m'
RR = '\x1b[1;37m\x1b[31m'
O = '\x1b[33m'
B = '\x1b[34m'
print ("{}{}\n______ _ \n| ___ \\ | | \n| |_/ / _ _ ___ _ __ _ _ _ __ | |_ ___ _ __ \n| __/ | | | | / __|| '__|| | | || '_ \\ | __| / _ \\ | '__|\n| | | |_| || (__ | | | |_| || |_) || |_ | (_) || | \n\\_| \\__, | \\___||_| \\__, || .__/ \\__| \\___/ |_| \n __/ | __/ || | \n |___/ |___/ |_| - {}Coders By TheSploit\n \n{}[==) {}Menu:\n\t\n {}[{}01{}]{} endcrypt file dengan base64\n {}[{}02{}]{} endcrypt file dengan base16\n {}[{}03{}]{} endcrypt file dengan base32\n {}[{}04{}]{} endcrypt file dengan marshal\n {}[{}05{}]{} endcrypt file dengan zlib&base64\n {}[{}06{}]{} endcrypt file dengan zlib&base64&marshal\n {}[{}07{}]{} endcrypt file dengan zlib&base16&marshal\n {}[{}08{}]{} endcrypt file dengan zlib&base32&marshal\n {}[{}09{}]{} keluar\n {}[{}10{}]{} tentang\n").format(W, O, W, RR, W, B, RR, B, W, B, RR, B, W, B, RR, B, W, B, RR, B, W, B, RR, B, W, B, RR, B, W, B, RR, B, W, B, RR, B, W, B, RR, B, W, B, RR, B, W)
def main():
choice = raw_input(RR + '[==) ' + W + 'Silahkeun Pilih: ')
if choice == '1' or choice == '01':
try:
file = raw_input(RR + '[+] ' + W + 'File: ')
fileopen = open(file).read()
a = base64.b64encode(fileopen)
b = "import base64\nexec(base64.b64decode('" + a + "'))"
c = file.replace('.py', '_endcrypt.py')
d = open(c, 'w')
d.write(b)
d.close()
print RR + '[*] ' + W + 'OUTPUT:', c
main()
except:
print RR + '[-] ' + W + 'File tidak ditemukan Zheyenkk!'
main()
if choice == '2' or choice == '02':
try:
file = raw_input(RR + '[+] ' + W + 'File: ')
fileopen = open(file).read()
a = base64.b16encode(fileopen)
b = "import base64\nexec(base64.b16decode('" + a + "'))"
c = file.replace('.py', '_endcrypt.py')
d = open(c, 'w')
d.write(b)
d.close()
print RR + '[*] ' + W + 'OUTPUT:', c
main()
except:
print RR + '[-] ' + W + 'File tidak ditemukan Zheyenkk!'
main()
if choice == '3' or choice == '03':
try:
file = raw_input(RR + '[+] ' + W + 'File: ')
fileopen = open(file).read()
a = base64.b32encode(fileopen)
b = "import base64\nexec(base64.b32decode('" + a + "'))"
c = file.replace('.py', '_endcrypt.py')
d = open(c, 'w')
d.write(b)
d.close()
print RR + '[*] ' + W + 'OUTPUT:', c
main()
except:
print RR + '[-] ' + W + 'File tidak ditemukan Zheyenkk!'
main()
if choice == '4' or choice == '04':
try:
file = raw_input(RR + '[+] ' + W + 'File: ')
fileopen = open(file).read()
a = compile(fileopen, 'dg', 'exec')
m = marshal.dumps(a)
s = repr(m)
b = 'import marshal\nexec(marshal.loads(' + s + '))'
c = file.replace('.py', '_endcrypt.py')
d = open(c, 'w')
d.write(b)
d.close()
print RR + '[*] ' + W + 'OUTPUT:', c
main()
except:
print RR + '[-] ' + W + 'File tidak ditemukan Zheyenkk!'
main()
if choice == '5' or choice == '05':
try:
file = raw_input(RR + '[+] ' + W + 'File: ')
fileopen = open(file).read()
c = zlib.compress(fileopen)
d = base64.b64encode(c)
e = 'import marshal,zlib,base64\nexec(zlib.decompress(base64.b64decode("' + d + '")))'
f = file.replace('.py', '_endcrypt.py')
g = open(f, 'w')
g.write(e)
g.close()
print RR + '[*] ' + W + 'OUTPUT:', f
main()
except:
print RR + '[-] ' + W + 'File tidak ditemukan Zheyenkk!'
main()
if choice == '6' or choice == '06':
try:
file = raw_input(RR + '[+] ' + W + 'File: ')
fileopen = open(file).read()
sa = compile(fileopen, 'dg', 'exec')
sb = marshal.dumps(sa)
c = zlib.compress(sb)
d = base64.b64encode(c)
e = 'import marshal,zlib,base64\nexec(marshal.loads(zlib.decompress(base64.b64decode("' + str(d) + '"))))'
f = file.replace('.py', '_endcrypt.py')
g = open(f, 'w')
g.write(e)
g.close()
print RR + '[*] ' + W + 'OUTPUT:', f
main()
except:
print RR + '[-] ' + W + 'File tidak ditemukan Zheyenkk!'
main()
if choice == '7' or choice == '07':
try:
file = raw_input(RR + '[+] ' + W + 'File: ')
fileopen = open(file).read()
sa = compile(fileopen, 'dg', 'exec')
sb = marshal.dumps(sa)
c = zlib.compress(sb)
d = base64.b16encode(c)
e = 'import marshal,zlib,base64\nexec(marshal.loads(zlib.decompress(base64.b16decode("' + str(d) + '"))))'
f = file.replace('.py', '_endcrypt.py')
g = open(f, 'w')
g.write(e)
g.close()
print RR + '[*] ' + W + 'OUTPUT:', f
main()
except:
print RR + '[-] ' + W + 'File tidak ditemukan Zheyenkk!'
main()
if choice == '8' or choice == '08':
try:
file = raw_input(RR + '[+] ' + W + 'File: ')
fileopen = open(file).read()
sa = compile(fileopen, 'dg', 'exec')
sb = marshal.dumps(sa)
c = zlib.compress(sb)
d = base64.b32encode(c)
e = 'import marshal,zlib,base64\nexec(marshal.loads(zlib.decompress(base64.b32decode("' + str(d) + '"))))'
f = file.replace('.py', '_endcrypt.py')
g = open(f, 'w')
g.write(e)
g.close()
print RR + '[*] ' + W + 'OUTPUT:', f
main()
except:
print RR + '[-] ' + W + 'File tidak ditemukan Zheyenkk!'
main()
if choice == '9' or choice == '09':
sys.exit(RR + '[*] ' + W + 'See you and Thanks for using this tools')
else:
if choice == '10':
print '\n\tCODERS\n\t===================================\n\tCODERS BY : TheSploit\n\tCODERS : Sploit1109\n\tPASTEBIN : https://pastebin.com/u/TheSPloit\n\tGITHUB : https://github.com/TheSploit\n\t\t \n\tSUBS My YOUTUBE ACCOUNT FOR MORE DETAILS\n\t===================================\n\tYoutube : TryOne\n\tGITHUB : TheSploit\n\t===================================\n\t'
main()
else:
print RR + '[-] ' + W + 'Maaf Anda salah Input^_^'
sys.exit(RR + '[*] ' + W + 'See you and Thanks for using this tools')
if __name__ == '__main__':
main()
| 44.884146 | 1,136 | 0.416248 |
c7b8e741206c3c42c8548aaad20a97708a25b815
| 452 |
py
|
Python
|
admin-tutorial/AdminDateHierarchy/simple/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 5 |
2020-07-14T07:48:10.000Z
|
2021-12-20T21:20:10.000Z
|
admin-tutorial/AdminDateHierarchy/simple/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 7 |
2021-03-26T03:13:38.000Z
|
2022-03-12T00:42:03.000Z
|
admin-tutorial/AdminDateHierarchy/simple/models.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 1 |
2021-02-16T07:04:25.000Z
|
2021-02-16T07:04:25.000Z
|
from django.db import models
# Create your models here.
class ArticleModel(models.Model):
title = models.CharField(verbose_name="文章标题", max_length=200)
content = models.TextField(verbose_name="文章内容", max_length=5000)
author = models.CharField(verbose_name="作者", max_length=50)
date_joined = models.DateTimeField(verbose_name="创建时间", auto_now=True)
date_last_change = models.DateTimeField(verbose_name="最后一次修改时间", auto_now=True)
| 34.769231 | 83 | 0.763274 |
4000322822874c488bff48d0776c518876b9a472
| 28,140 |
py
|
Python
|
Packs/Oracle_IAM/Integrations/OracleIAM/OracleIAM.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Oracle_IAM/Integrations/OracleIAM/OracleIAM.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Oracle_IAM/Integrations/OracleIAM/OracleIAM.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
import traceback
import base64
import urllib3
# Disable insecure warnings
urllib3.disable_warnings()
ERROR_CODES_TO_SKIP = [
404
]
'''CLIENT CLASS'''
def build_body_request_for_update_user(old_user_data, new_user_data):
operations = []
for key, value in new_user_data.items():
operation = {
'op': 'replace' if key in old_user_data.keys() else 'add',
'path': key,
'value': [value] if key in ('emails', 'phoneNumbers') and not isinstance(value, list) else value,
}
operations.append(operation)
data = {
'schemas': ['urn:ietf:params:scim:api:messages:2.0:PatchOp'],
'Operations': operations,
}
return data
class Client(BaseClient):
""" A client class that implements logic to authenticate with the application. """
def __init__(self, base_url, verify=True, proxy=False, ok_codes=tuple(), headers=None, client_id=None,
client_secret=None):
super().__init__(base_url, verify, proxy, ok_codes, headers)
self.base_url = base_url
self.verify = verify
self.client_id = client_id
self.client_secret = client_secret
self.headers = headers
self.headers['Authorization'] = f'Bearer {self.get_access_token()}'
def get_access_token(self):
client_id_and_secret = f'{self.client_id}:{self.client_secret}'
# Standard Base64 Encoding
encodedBytes = base64.b64encode(client_id_and_secret.encode('utf-8'))
encodedStr = str(encodedBytes, 'utf-8')
headers = {
'Authorization': f'Basic {encodedStr}',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
data = {
'grant_type': 'client_credentials',
'scope': 'urn:opc:idm:__myscopes__',
}
token = self._http_request('POST', url_suffix='/oauth2/v1/token', headers=headers, data=data)
return token.get('access_token')
def test(self):
""" Tests connectivity with the application. """
return self.get_access_token()
def get_user_by_id(self, user_id):
""" Queries the user in the application using REST API by its email, and returns an IAMUserAppData object
that holds the user_id, username, is_active and app_data attributes given in the query response.
:type user_id: ``str``
:param user_id: ID of the user
:return: An IAMUserAppData object if user exists, None otherwise.
:rtype: ``IAMUserAppData``
"""
user_app_data = self._http_request(
'GET',
url_suffix=f'/admin/v1/Users/{user_id}',
)
if user_app_data:
user_name = user_app_data.get('userName')
is_active = user_app_data.get('active')
email = get_first_primary_email_by_scim_schema(user_app_data)
return IAMUserAppData(user_id, user_name, is_active, user_app_data, email)
return None
def get_user(self, filter_name: str, filter_value: str) -> Optional['IAMUserAppData']:
""" Queries the user in the application using REST API by its email, and returns an IAMUserAppData object
that holds the user_id, username, is_active and app_data attributes given in the query response.
:type filter_name: ``str``
:param filter_name: Attribute name to filter by.
:type filter_value: ``str``
:param filter_value: The filter attribute value.
:return: An IAMUserAppData object if user exists, None otherwise.
:rtype: ``Optional[IAMUserAppData]``
"""
query_params = {'filter': f'{filter_name} eq "{filter_value}"'}
res = self._http_request(
method='GET',
url_suffix='/admin/v1/Users',
params=query_params,
)
if res and res.get('Resources'):
user_app_data = res.get('Resources')[0]
user_id = user_app_data.get('id')
return self.get_user_by_id(user_id)
return None
def create_user(self, user_data: Dict[str, Any]) -> 'IAMUserAppData':
""" Creates a user in the application using REST API.
:type user_data: ``Dict[str, Any]``
:param user_data: User data in the application format
:return: An IAMUserAppData object that contains the data of the created user in the application.
:rtype: ``IAMUserAppData``
"""
user_data['schemas'] = ['urn:ietf:params:scim:schemas:core:2.0:User']
if not isinstance(user_data.get('emails'), list):
user_data['emails'] = [user_data.get('emails')]
if not isinstance(user_data.get('phoneNumbers'), list):
user_data['phoneNumbers'] = [user_data.get('phoneNumbers')]
user_app_data = self._http_request(
method='POST',
url_suffix='/admin/v1/Users',
json_data=user_data,
)
user_id = user_app_data.get('id')
is_active = user_app_data.get('active')
username = user_app_data.get('userName')
email = get_first_primary_email_by_scim_schema(user_app_data)
return IAMUserAppData(user_id, username, is_active, user_app_data, email)
def update_user(self, user_id, new_user_data):
old_user_data = self._http_request(
'GET',
url_suffix=f'/admin/v1/Users/{user_id}',
)
user_app_data = self._http_request(
'PATCH',
url_suffix=f'/admin/v1/Users/{user_id}',
json_data=build_body_request_for_update_user(old_user_data, new_user_data),
)
is_active = user_app_data.get('active')
username = user_app_data.get('userName')
email = get_first_primary_email_by_scim_schema(user_app_data)
return IAMUserAppData(user_id, username, is_active, user_app_data, email)
def enable_user(self, user_id: str):
""" Enables a user in the application using REST API.
:type user_id: ``str``
:param user_id: ID of the user in the application
:return: An IAMUserAppData object that contains the data of the user in the application.
:rtype: ``IAMUserAppData``
"""
user_data = {
'schemas': ['urn:ietf:params:scim:api:messages:2.0:PatchOp'],
'Operations': [
{
'op': 'replace',
'path': 'active',
'value': True,
}
]
}
user_app_data = self._http_request(
'PATCH',
url_suffix=f'/admin/v1/Users/{user_id}',
json_data=user_data,
)
if user_app_data:
user_name = user_app_data.get('userName')
is_active = user_app_data.get('active')
email = get_first_primary_email_by_scim_schema(user_app_data)
return IAMUserAppData(user_id, user_name, is_active, user_app_data, email)
return None
def disable_user(self, user_id: str):
""" Disables a user in the application using REST API.
:type user_id: ``str``
:param user_id: ID of the user in the application
:return: An IAMUserAppData object that contains the data of the user in the application.
:rtype: ``IAMUserAppData``
"""
user_data = {
'schemas': ['urn:ietf:params:scim:api:messages:2.0:PatchOp'],
'Operations': [
{
'op': 'replace',
'path': 'active',
'value': False,
}
]
}
user_app_data = self._http_request(
'PATCH',
url_suffix=f'/admin/v1/Users/{user_id}',
json_data=user_data,
)
if user_app_data:
user_name = user_app_data.get('userName')
is_active = user_app_data.get('active')
email = get_first_primary_email_by_scim_schema(user_app_data)
return IAMUserAppData(user_id, user_name, is_active, user_app_data, email)
return None
def get_group_by_id(self, group_id: str):
""" Retrieves the group information by ID.
:type group_id: ``str``
:param group_id: ID of the group in the application.
:return: The group data.
:rtype: ``dict``
"""
return self._http_request(
method='GET',
url_suffix=f'admin/v1/Groups/{group_id}?attributes=id,displayName,members',
resp_type='response',
)
def get_group_by_name(self, group_name):
""" Retrieves the group information by display name.
:type group_name: ``str``
:param group_name: Display name of the group in the application.
:return: The group data.
:rtype: ``dict``
"""
query_params = {
'filter': f'displayName eq "{group_name}"'
}
return self._http_request(
method='GET',
url_suffix='admin/v1/Groups?attributes=id,displayName,members',
params=query_params,
resp_type='response',
)
def create_group(self, group_data: dict):
""" Creates an empty group with a given name.
:type group_data: ``str``
:param group_data: Display name of the group to be created.
:return: The group data.
:rtype: ``dict``
"""
return self._http_request(
method='POST',
url_suffix='admin/v1/Groups',
json_data=group_data,
resp_type='response',
)
def update_group(self, group_id: str, group_data: dict):
""" Updates a group in the application.
:type group_id: ``str``
:param group_id: ID of the group in the application.
:type group_data: ``str``
:param group_data: The data that needs to be updated.
:return: The group data.
:rtype: ``dict``
"""
return self._http_request(
method='PATCH',
url_suffix=f'admin/v1/Groups/{group_id}',
json_data=group_data,
resp_type='response',
)
def delete_group(self, group_id: str):
""" Deletes a group in the application.
:type group_id: ``str``
:param group_id: ID of the group in the application.
"""
return self._http_request(
method='DELETE',
url_suffix=f'admin/v1/Groups/{group_id}',
resp_type='response',
)
def get_app_fields(self) -> Dict[str, Any]:
""" Gets a dictionary of the user schema fields in the application and their description.
:return: The user schema fields dictionary
:rtype: ``Dict[str, str]``
"""
res = self._http_request(
method='GET',
url_suffix='admin/v1/Schemas/urn:ietf:params:scim:schemas:core:2.0:User'
)
fields = res.get('attributes', [])
return {field.get('name'): field.get('description') for field in fields}
@staticmethod
def handle_exception(user_profile: 'IAMUserProfile',
e: Union[DemistoException, Exception],
action: 'IAMActions'):
""" Handles failed responses from the application API by setting the User Profile object with the result.
The result entity should contain the following data:
1. action (``IAMActions``) The failed action Required
2. success (``bool``) The success status Optional (by default, True)
3. skip (``bool``) Whether or not the command was skipped Optional (by default, False)
3. skip_reason (``str``) Skip reason Optional (by default, None)
4. error_code (``Union[str, int]``) HTTP error code Optional (by default, None)
5. error_message (``str``) The error description Optional (by default, None)
Note: This is the place to determine how to handle specific edge cases from the API, e.g.,
when a DISABLE action was made on a user which is already disabled and therefore we can't
perform another DISABLE action.
:type user_profile: ``IAMUserProfile``
:param user_profile: The user profile object
:type e: ``Union[DemistoException, Exception]``
:param e: The exception object - if type is DemistoException, holds the response json object (`res` attribute)
:type action: ``IAMActions``
:param action: An enum represents the current action (GET, UPDATE, CREATE, DISABLE or ENABLE)
"""
if isinstance(e, DemistoException) and e.res is not None:
error_code = e.res.status_code
if action == IAMActions.DISABLE_USER and error_code in ERROR_CODES_TO_SKIP:
skip_message = 'Users is already disabled or does not exist in the system.'
user_profile.set_result(action=action,
skip=True,
skip_reason=skip_message)
try:
resp = e.res.json()
error_message = get_error_details(resp)
except ValueError:
error_message = str(e)
else:
error_code = ''
error_message = str(e)
user_profile.set_result(action=action,
success=False,
error_code=error_code,
error_message=f'{error_message}\n{traceback.format_exc()}')
demisto.error(traceback.format_exc())
class OutputContext:
"""
Class to build a generic output and context.
"""
def __init__(self, success=None, active=None, id=None, username=None, email=None, errorCode=None,
errorMessage=None, details=None, displayName=None, members=None):
self.instanceName = demisto.callingContext.get('context', {}).get('IntegrationInstance')
self.brand = demisto.callingContext.get('context', {}).get('IntegrationBrand')
self.command = demisto.command().replace('-', '_').title().replace('_', '').replace('Iam', '')
self.success = success
self.active = active
self.id = id
self.username = username
self.email = email
self.errorCode = errorCode
self.errorMessage = errorMessage
self.details = details
self.displayName = displayName # Used in group
self.members = members # Used in group
self.data = {
'brand': self.brand,
'instanceName': self.instanceName,
'success': success,
'active': active,
'id': id,
'username': username,
'email': email,
'errorCode': errorCode,
'errorMessage': errorMessage,
'details': details,
'displayName': displayName,
'members': members,
}
# Remove empty values
self.data = {
k: v
for k, v in self.data.items()
if v is not None
}
'''HELPER FUNCTIONS'''
def get_error_details(res: Dict[str, Any]) -> str:
""" Parses the error details retrieved from the application and outputs the resulted string.
:type res: ``Dict[str, Any]``
:param res: The error data retrieved from the application.
:return: The parsed error details.
:rtype: ``str``
"""
details = str(res.get('detail'))
return details
'''COMMAND FUNCTIONS'''
def test_module(client: Client):
""" Tests connectivity with the client. """
res = client.test()
if isinstance(res, DemistoException):
if 'Unauthorized' in str(res):
return 'Authorization Error: Make sure "Client ID" and "Client Secret" is correctly set'
else:
return str(res)
return 'ok'
def get_group_command(client, args):
scim = safe_load_json(args.get('scim'))
group_id = scim.get('id')
group_name = scim.get('displayName')
if not (group_id or group_name):
raise Exception('You must supply either "id" or "displayName" in the scim data')
if group_id:
try:
res = client.get_group_by_id(group_id)
res_json = res.json()
if res.status_code == 200:
generic_iam_context = OutputContext(success=True, id=group_id, displayName=res_json.get('displayName'),
members=res_json.get('members'))
except DemistoException as exc:
if exc.res.status_code == 404:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id,
errorCode=404, errorMessage='Group Not Found', details=str(exc))
else:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id,
errorCode=exc.res.status_code,
errorMessage=exc.message, details=str(exc))
else:
try:
res = client.get_group_by_name(group_name)
res_json = res.json()
if res.status_code == 200 and res_json.get('totalResults') > 0:
res_json = res_json['Resources'][0]
generic_iam_context = OutputContext(success=True, id=res_json.get('id'), displayName=group_name,
members=res_json.get('members'))
except DemistoException as exc:
if exc.res.status_code == 404:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id, errorCode=404,
errorMessage='Group Not Found', details=str(exc))
else:
generic_iam_context = OutputContext(success=False, displayName=group_name, id=group_id,
errorCode=exc.res.status_code, errorMessage=exc.message,
details=str(exc))
readable_output = tableToMarkdown('Oracle Cloud Get Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output,
)
def create_group_command(client, args):
scim = safe_load_json(args.get('scim'))
group_name = scim.get('displayName')
if not group_name:
raise Exception('You must supply "displayName" of the group in the scim data')
group_data = scim
group_data['schemas'] = ['urn:ietf:params:scim:schemas:core:2.0:Group']
try:
res = client.create_group(group_data)
res_json = res.json()
if res.status_code == 201:
generic_iam_context = OutputContext(success=True, id=res_json.get('id'), displayName=group_name)
else:
res_json = res.json()
generic_iam_context = OutputContext(success=False, displayName=group_name, errorCode=res_json.get('code'),
errorMessage=res_json.get('message'), details=res_json)
except DemistoException as e:
res_json = e.res.json()
generic_iam_context = OutputContext(success=False, displayName=group_name, errorCode=res_json.get('status'),
errorMessage=res_json.get('detail'), details=res_json)
readable_output = tableToMarkdown('Oracle Cloud Create Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output,
)
def update_group_command(client, args):
scim = safe_load_json(args.get('scim'))
group_id = scim.get('id')
group_name = scim.get('displayName')
if not group_id:
raise Exception('You must supply "id" in the scim data')
member_ids_to_add = args.get('memberIdsToAdd')
member_ids_to_delete = args.get('memberIdsToDelete')
if member_ids_to_add is member_ids_to_delete is None:
raise Exception('You must supply either "memberIdsToAdd" or "memberIdsToDelete" in the scim data')
operations = []
member_ids_json_list = []
if member_ids_to_add:
if not isinstance(member_ids_to_add, list):
member_ids_to_add = safe_load_json(member_ids_to_add)
for member_id in member_ids_to_add:
member_ids_json_list.append(
{
'value': member_id,
'type': 'User',
}
)
if member_ids_json_list:
operation = {
'op': 'add',
'path': 'members',
'value': member_ids_json_list,
}
operations.append(operation)
if member_ids_to_delete:
if not isinstance(member_ids_to_delete, list):
member_ids_to_delete = safe_load_json(member_ids_to_delete)
for member_id in member_ids_to_delete:
operation = {
'op': 'remove',
'path': f'members[value eq "{member_id}"]',
}
operations.append(operation)
group_input = {'schemas': ['urn:ietf:params:scim:api:messages:2.0:PatchOp'], 'Operations': operations}
try:
res = client.update_group(group_id, group_input)
res_json = res.json()
if res.status_code == 200:
generic_iam_context = OutputContext(success=True, id=group_id, displayName=group_name, details=res_json)
except DemistoException as exc:
if exc.res.status_code == 404:
generic_iam_context = OutputContext(success=False, id=group_id, displayName=group_name, errorCode=404,
errorMessage='Group/User Not Found or User not a member of group',
details=str(exc))
else:
generic_iam_context = OutputContext(success=False, id=group_id, displayName=group_name,
errorCode=exc.res.status_code, errorMessage=exc.message,
details=str(exc))
readable_output = tableToMarkdown('Oracle Cloud Update Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output,
)
def delete_group_command(client, args):
scim = safe_load_json(args.get('scim'))
group_id = scim.get('id')
group_name = scim.get('displayName')
if not group_id:
raise Exception('You must supply "id" in the scim data')
res = client.delete_group(group_id)
try:
if res.status_code == 204:
generic_iam_context = OutputContext(success=True, id=group_id, displayName=group_name)
except DemistoException as exc:
if exc.res.status_code == 404:
generic_iam_context = OutputContext(success=False, id=group_id, displayName=group_name, errorCode=404,
errorMessage='Group Not Found', details=str(exc))
else:
generic_iam_context = OutputContext(success=False, id=group_id, displayName=group_name,
errorCode=exc.res.status_code, errorMessage=exc.message,
details=str(exc))
readable_output = tableToMarkdown('Oracle Cloud Delete Group:', generic_iam_context.data, removeNull=True)
return CommandResults(
raw_response=generic_iam_context.data,
outputs_prefix=generic_iam_context.command,
outputs_key_field='id',
outputs=generic_iam_context.data,
readable_output=readable_output,
)
def get_mapping_fields(client: Client) -> GetMappingFieldsResponse:
""" Creates and returns a GetMappingFieldsResponse object of the user schema in the application
:param client: (Client) The integration Client object that implements a get_app_fields() method
:return: (GetMappingFieldsResponse) An object that represents the user schema
"""
app_fields = client.get_app_fields()
incident_type_scheme = SchemeTypeMapping(type_name=IAMUserProfile.DEFAULT_INCIDENT_TYPE)
for field, description in app_fields.items():
incident_type_scheme.add_field(field, description)
return GetMappingFieldsResponse([incident_type_scheme])
def main():
user_profile = None
params = demisto.params()
base_url = params['url'].strip('/')
client_id = params.get('credentials', {}).get('identifier')
client_secret = params.get('credentials', {}).get('password')
mapper_in = params.get('mapper_in')
mapper_out = params.get('mapper_out')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
args = demisto.args()
is_create_enabled = params.get('create_user_enabled')
is_enable_enabled = params.get('enable_user_enabled')
is_disable_enabled = params.get('disable_user_enabled')
is_update_enabled = params.get('update_user_enabled')
create_if_not_exists = params.get('create_if_not_exists')
iam_command = IAMCommand(is_create_enabled, is_enable_enabled, is_disable_enabled, is_update_enabled,
create_if_not_exists, mapper_in, mapper_out,
get_user_iam_attrs=['id', 'userName', 'emails'])
headers = {
'Content-Type': 'application/scim+json',
'Accept': 'application/scim+json',
}
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers,
ok_codes=(200, 201, 204),
client_id=client_id,
client_secret=client_secret,
)
demisto.debug(f'Command being called is {command}')
'''CRUD commands'''
if command == 'iam-get-user':
user_profile = iam_command.get_user(client, args)
elif command == 'iam-create-user':
user_profile = iam_command.create_user(client, args)
elif command == 'iam-update-user':
user_profile = iam_command.update_user(client, args)
elif command == 'iam-disable-user':
user_profile = iam_command.disable_user(client, args)
if user_profile:
return_results(user_profile)
'''non-CRUD commands'''
try:
if command == 'test-module':
return_results(test_module(client))
elif command == 'iam-get-group':
return_results(get_group_command(client, args))
elif command == 'iam-create-group':
return_results(create_group_command(client, args))
elif command == 'iam-update-group':
return_results(update_group_command(client, args))
elif command == 'iam-delete-group':
return_results(delete_group_command(client, args))
elif command == 'get-mapping-fields':
return_results(get_mapping_fields(client))
except Exception as exc:
# For any other integration command exception, return an error
demisto.error(traceback.format_exc())
return_error(f'Failed to execute {command} command. Error:\n{exc}', error=exc)
from IAMApiModule import * # noqa E402
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 36.736292 | 120 | 0.603198 |
406d75c1bbdd0a3a8947d69ef9f39f580ed500b2
| 2,666 |
py
|
Python
|
Pimoroni/sgp30_simpletest.py
|
ckuehnel/MicroPython
|
c57d0df744fe5301e755bd139b6cc56d69c442fd
|
[
"MIT"
] | 1 |
2021-03-22T18:38:43.000Z
|
2021-03-22T18:38:43.000Z
|
Pimoroni/sgp30_simpletest.py
|
ckuehnel/MicroPython
|
c57d0df744fe5301e755bd139b6cc56d69c442fd
|
[
"MIT"
] | null | null | null |
Pimoroni/sgp30_simpletest.py
|
ckuehnel/MicroPython
|
c57d0df744fe5301e755bd139b6cc56d69c442fd
|
[
"MIT"
] | 1 |
2021-02-06T10:07:36.000Z
|
2021-02-06T10:07:36.000Z
|
# sgp30_simpletest.py
# https://github.com/alexmrqt/micropython-sgp30
# adaption to the I2C bus of Raspberry Pi Pico by
# 2021-03-11 Claus Kühnel [email protected]
"""
Example for using the SGP30 with MicroPython and the Adafruit library.
Uses instructions from "SGP30 Driver Integration (for Software I²C)" to handle
self-calibration of the sensor:
- if no baseline found, wait 12h before storing baseline,
- if baseline found, store baseline every hour.
Baseline is writen in co2eq_baseline.txt and tvoc_baseline.txt.
Note that if the sensor is shut down during more than one week, then baselines
must be manually deleted.
"""
import time
from machine import Pin, I2C
import adafruit_sgp30
# Initialize I2C bus
sda=Pin(0)
scl=Pin(1)
i2c=I2C(0,sda=sda, scl=scl, freq=100000)
# Create library object on our I2C port
sgp30 = adafruit_sgp30.Adafruit_SGP30(i2c)
print("SGP30 serial #", [hex(i) for i in sgp30.serial])
# Initialize SGP-30 internal drift compensation algorithm.
sgp30.iaq_init()
# Wait 15 seconds for the SGP30 to properly initialize
print("Waiting 15 seconds for SGP30 initialization.")
time.sleep(15)
# Retrieve previously stored baselines, if any (helps the compensation algorithm).
has_baseline = False
try:
f_co2 = open('co2eq_baseline.txt', 'r')
f_tvoc = open('tvoc_baseline.txt', 'r')
co2_baseline = int(f_co2.read())
tvoc_baseline = int(f_tvoc.read())
#Use them to calibrate the sensor
sgp30.set_iaq_baseline(co2_baseline, tvoc_baseline)
f_co2.close()
f_tvoc.close()
has_baseline = True
except:
print('Impossible to read SGP30 baselines!')
#Store the time at which last baseline has been saved
baseline_time = time.time()
while True:
co2eq, tvoc = sgp30.iaq_measure()
print('co2eq = ' + str(co2eq) + ' ppm \t tvoc = ' + str(tvoc) + ' ppb')
# Baselines should be saved after 12 hour the first timen then every hour,
# according to the doc.
if (has_baseline and (time.time() - baseline_time >= 3600)) \
or ((not has_baseline) and (time.time() - baseline_time >= 43200)):
print('Saving baseline!')
baseline_time = time.time()
try:
f_co2 = open('co2eq_baseline.txt', 'w')
f_tvoc = open('tvoc_baseline.txt', 'w')
bl_co2, bl_tvoc = sgp30.get_iaq_baseline()
f_co2.write(str(bl_co2))
f_tvoc.write(str(bl_tvoc))
f_co2.close()
f_tvoc.close()
has_baseline = True
except:
print('Impossible to write SGP30 baselines!')
#A measurement should be done every 60 seconds, according to the doc.
time.sleep(1)
| 28.666667 | 82 | 0.685296 |
294088911bd3105d4c586280dab7103564976714
| 1,899 |
py
|
Python
|
marsyas-vamp/marsyas/scripts/large-evaluators/tempo-reference-implementation/mar_collection.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | 18 |
2020-01-22T14:58:18.000Z
|
2022-02-21T12:07:51.000Z
|
marsyas-vamp/marsyas/scripts/large-evaluators/tempo-reference-implementation/mar_collection.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | 2 |
2020-02-24T13:14:05.000Z
|
2020-09-21T13:34:53.000Z
|
marsyas-vamp/marsyas/scripts/large-evaluators/tempo-reference-implementation/mar_collection.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | 1 |
2020-01-22T14:55:36.000Z
|
2020-01-22T14:55:36.000Z
|
#!/usr/bin/env python
import os
try:
marsyas_datadir = os.environ['MARSYAS_DATADIR']
except:
marsyas_datadir = "."
class MarCollection():
def __init__(self, mf_filename=None):
self.data = []
if mf_filename is not None:
try:
self.read_mf(mf_filename)
except:
pass
def write(self, mf_filename=None):
if mf_filename is not None:
self.filename = mf_filename
out = open(self.filename, 'w')
for filename, label in self.data:
out.write("%s\t%s\n" % (filename, label))
out.close()
def read_mf(self, mf_filename):
self.filename = mf_filename
self.data = []
self.merge_mf(mf_filename)
def merge_mf(self, new_mf_filename):
lines = open(new_mf_filename).readlines()
for line in lines:
if len(line) < 2:
continue
if line[0] == '#':
continue
splitline = line.split('\t')
filename = splitline[0].rstrip().replace(
"MARSYAS_DATADIR", marsyas_datadir)
try:
label = splitline[1].rstrip()
except:
label = ""
self.set_item(filename, label)
def get_filenames(self):
return [f for f,l in self.data ]
def get_filename_index(self, filename):
for i, pair in enumerate(self.data):
if filename == pair[0]:
return i, pair
return False, None
def set_item(self, filename, label=""):
index, pair = self.get_filename_index(filename)
if index is not False:
self.data[index] = (pair[0], label)
else:
self.data.append( (filename, label) )
def get_filenames_matching_label(self, label_match):
return [f for f,l in self.data if l == label_match]
| 28.772727 | 59 | 0.54871 |
46511f7f3881a08595f0b1950e9988fb1a58cb8f
| 13,340 |
py
|
Python
|
Packs/Syslog/Integrations/Syslogv2/Syslogv2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Syslog/Integrations/Syslogv2/Syslogv2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Syslog/Integrations/Syslogv2/Syslogv2.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from dataclasses import dataclass
from tempfile import NamedTemporaryFile
from typing import Callable
import syslogmp
from gevent.server import StreamServer
from syslog_rfc5424_parser import SyslogMessage, ParseError
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
''' CONSTANTS '''
MAX_SAMPLES = 10
BUF_SIZE = 1024
MESSAGE_REGEX: Optional[str] = None
MAX_PORT: int = 65535
@dataclass
class SyslogMessageExtract:
app_name: Optional[str]
facility: str
host_name: Optional[str]
msg: str
msg_id: Optional[str]
process_id: Optional[str]
sd: dict
severity: str
timestamp: str
version: Optional[int]
occurred: Optional[str]
def parse_rfc_3164_format(log_message: bytes) -> Optional[SyslogMessageExtract]:
"""
Receives a log message which is in RFC 3164 format. Parses it into SyslogMessageExtract data class object
Args:
log_message (bytes): Syslog message.
Returns:
(Optional[SyslogMessageExtract]): Extraction data class
"""
try:
syslog_message: syslogmp.Message = syslogmp.parse(log_message)
except syslogmp.parser.MessageFormatError:
return None
return SyslogMessageExtract(
app_name=None,
facility=syslog_message.facility.name,
host_name=syslog_message.hostname,
msg=syslog_message.message.decode('utf-8'),
msg_id=None,
process_id=None,
sd={},
severity=syslog_message.severity.name,
timestamp=syslog_message.timestamp.isoformat(),
version=None,
# Because RF-3164 doesn't return localized date, can't determine the localized time it occurred.
occurred=None
)
def parse_rfc_5424_format(log_message: bytes) -> Optional[SyslogMessageExtract]:
"""
Receives a log message which is in RFC 5424 format. Parses it into SyslogMessageExtract data class object
Args:
log_message (bytes): Syslog message.
Returns:
(Optional[SyslogMessageExtract]): Extraction data class
"""
try:
syslog_message: SyslogMessage = SyslogMessage.parse(log_message.decode('utf-8'))
except ParseError:
return None
return SyslogMessageExtract(
app_name=syslog_message.appname,
facility=syslog_message.facility.name,
host_name=syslog_message.hostname,
msg=syslog_message.msg,
msg_id=syslog_message.msgid,
process_id=syslog_message.procid,
sd=syslog_message.sd,
severity=syslog_message.severity.name,
timestamp=syslog_message.timestamp,
version=syslog_message.version,
occurred=syslog_message.timestamp
)
def parse_rfc_6587_format(log_message: bytes) -> Optional[SyslogMessageExtract]:
"""
Receives a log message which is in RFC 5424 format. Parses it into SyslogMessageExtract data class object
Args:
log_message (bytes): Syslog message.
Returns:
(SyslogMessageExtract): Extraction data class
"""
log_message = log_message.decode('utf-8')
split_msg: List[str] = log_message.split(' ')
if not log_message or not log_message[0].isdigit() or not len(split_msg) > 1:
return None
try:
log_message = ' '.join(split_msg[1:])
encoded_msg = log_message.encode()
for format_func in format_funcs:
# if it is RFC6587 itself, continue
if format_func == parse_rfc_6587_format:
continue
extracted_message = format_func(encoded_msg)
if extracted_message:
return extracted_message
except ValueError:
return None
return None
format_funcs: List[Callable[[bytes], Optional[SyslogMessageExtract]]] = [parse_rfc_3164_format, parse_rfc_5424_format,
parse_rfc_6587_format]
def test_module() -> str:
"""
Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
Returns:
(str): 'ok' if test passed, anything else will fail the test.
"""
return 'ok'
def fetch_samples() -> None:
"""
Retrieves samples from context.
"""
demisto.incidents(get_integration_context().get('samples'))
def create_incident_from_syslog_message(extracted_message: SyslogMessageExtract) -> dict:
"""
Creates incident from the extracted Syslog message.
Args:
extracted_message (SyslogMessageExtract): Syslog message extraction details.
Returns:
(dict): Incident.
"""
return {
'name': f'Syslog from [{extracted_message.host_name}][{extracted_message.timestamp}]',
'rawJSON': json.dumps(vars(extracted_message)),
'occurred': extracted_message.occurred,
'details': '\n'.join([f'{k}: {v}' for k, v in vars(extracted_message).items() if v])
}
def update_integration_context_samples(incident: dict, max_samples: int = MAX_SAMPLES) -> None:
"""
Updates the integration context samples with the newly created incident.
If the size of the samples has reached `MAX_SAMPLES`, will pop out the latest sample.
Args:
incident (dict): The newly created incident.
max_samples (int): Max samples size.
Returns:
(None): Modifies the integration context samples field.
"""
ctx = get_integration_context()
updated_samples_list: List[Dict] = [incident] + ctx.get('samples', [])
if len(updated_samples_list) > max_samples:
updated_samples_list.pop()
ctx['samples'] = updated_samples_list
set_integration_context(ctx)
def log_message_passes_filter(log_message: SyslogMessageExtract, message_regex: Optional[str]) -> bool:
"""
Given log message extraction and a possible message regex, checks if the message passes the filters:
1) Message regex is None, therefore no filter was asked to be made.
2) Message regex is not None: Filter the Syslog message if regex does not exist in the message,
if regex exists in the Syslog message, do not filter.
Args:
log_message (SyslogMessageExtract): The extracted details of a Syslog message.
message_regex (Optional[str]): Message regex to match if exists.
Returns:
(bool): True if the message shouldn't be filtered, false if the message should be filtered.
"""
if not message_regex:
return True
regexp = re.compile(message_regex)
return True if regexp.search(log_message.msg) else False
def perform_long_running_loop(socket_data: bytes):
"""
Performs one loop of a long running execution.
- Gets data from socket.
- Parses the Syslog message data.
- If the Syslog message data passes filter, creates a new incident.
- Saves the incident in integration context for samples.
Args:
socket_data (bytes): Retrieved socket data.
Returns:
(None): Creates incident in Cortex XSOAR platform.
"""
extracted_message: Optional[SyslogMessageExtract] = None
for format_func in format_funcs:
extracted_message = format_func(socket_data)
if extracted_message:
break
if not extracted_message:
raise DemistoException(f'Could not parse the following message: {socket_data.decode("utf-8")}')
if log_message_passes_filter(extracted_message, MESSAGE_REGEX):
incident: dict = create_incident_from_syslog_message(extracted_message)
update_integration_context_samples(incident)
demisto.createIncidents([incident])
def perform_long_running_execution(sock: Any, address: tuple) -> None:
"""
The long running execution loop. Gets input, and performs a while True loop and logs any error that happens.
Stops when there is no more data to read.
Args:
sock: Socket.
address(tuple): Address. Not used inside loop so marked as underscore.
Returns:
(None): Reads data, calls that creates incidents from inputted data.
"""
demisto.debug('Starting long running execution')
file_obj = sock.makefile(mode='rb')
try:
while True:
try:
line = file_obj.readline()
if not line:
demisto.info(f'Disconnected from {address}')
break
perform_long_running_loop(line.strip())
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
demisto.error(f'Error occurred during long running loop. Error was: {e}')
finally:
demisto.debug('Finished reading message')
finally:
file_obj.close()
def prepare_globals_and_create_server(port: int, message_regex: Optional[str], certificate: Optional[str],
private_key: Optional[str]) -> StreamServer:
"""
Prepares global environments of LOG_FORMAT, MESSAGE_REGEX and creates the server to listen
to Syslog messages.
Args:
port (int): Port
message_regex (Optional[str]): Regex. Will create incident only if Syslog message matches this regex.
certificate (Optional[str]): Certificate. For SSL connection.
private_key (Optional[str]): Private key. For SSL connection.
Returns:
(StreamServer): Server to listen to Syslog messages.
"""
global MESSAGE_REGEX
MESSAGE_REGEX = message_regex
if certificate and private_key:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(certificate, 'utf-8'))
certificate_file.close()
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(private_key, 'utf-8'))
private_key_file.close()
server = StreamServer(('0.0.0.0', port), perform_long_running_execution, keyfile=private_key_path,
certfile=certificate_path)
demisto.debug('Starting HTTPS Server')
else:
server = StreamServer(('0.0.0.0', port), perform_long_running_execution)
demisto.debug('Starting HTTP Server')
return server
def get_mapping_fields() -> Dict[str, str]:
return {
'app_name': 'Application Name',
'facility': 'Facility',
'host_name': 'Host Name',
'msg': 'Message',
'msg_id': 'Message ID',
'process_id': 'Process ID',
'sd': 'Structured Data',
'severity': 'Severity',
'timestamp': 'Timestamp',
'version': 'Syslog Version',
'occurred': 'Occurred Time'
}
''' MAIN FUNCTION '''
def main() -> None:
params = demisto.params()
command = demisto.command()
message_regex: Optional[str] = params.get('message_regex')
certificate: Optional[str] = params.get('certificate')
private_key: Optional[str] = params.get('private_key')
port: Union[Optional[str], int] = params.get('longRunningPort')
try:
port = int(params.get('longRunningPort'))
except (ValueError, TypeError):
raise DemistoException(f'Invalid listen port - {port}. Make sure your port is a number')
if port < 0 or MAX_PORT < port:
raise DemistoException(f'Given port: {port} is not valid and must be between 0-{MAX_PORT}')
demisto.debug(f'Command being called is {demisto.command()}')
try:
if command == 'test-module':
try:
prepare_globals_and_create_server(port, message_regex, certificate, private_key)
except OSError as e:
if 'Address already in use' in str(e):
raise DemistoException(f'Given port: {port} is already in use. Please either change port or '
f'make sure to close the connection in the server using that port.')
raise e
return_results('ok')
elif command == 'fetch-incidents':
# The integration fetches incidents in the long-running-execution command. Fetch incidents is called
# only when "Pull From Instance" is clicked in create new classifier section in Cortex XSOAR.
# The fetch incidents returns samples of incidents generated by the long-running-execution.
fetch_samples()
elif command == 'long-running-execution':
server: StreamServer = prepare_globals_and_create_server(port, message_regex, certificate, private_key)
server.serve_forever()
elif command == 'get-mapping-fields':
return_results(get_mapping_fields())
else:
raise NotImplementedError(f'''Command '{command}' is not implemented.''')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 37.055556 | 118 | 0.666792 |
31107869ae687d5a76f261e9b17e7d5e0bd24f53
| 34 |
py
|
Python
|
insomniac/__main__.py
|
shifenis/Insomniac
|
7c9d572b83c29049bc3075073be5549fe821a739
|
[
"MIT"
] | 533 |
2020-06-01T10:40:11.000Z
|
2022-03-29T17:05:50.000Z
|
insomniac/__main__.py
|
shifenis/Insomniac
|
7c9d572b83c29049bc3075073be5549fe821a739
|
[
"MIT"
] | 399 |
2020-06-01T22:01:55.000Z
|
2022-03-29T20:39:29.000Z
|
insomniac/__main__.py
|
shifenis/Insomniac
|
7c9d572b83c29049bc3075073be5549fe821a739
|
[
"MIT"
] | 166 |
2020-06-01T21:51:52.000Z
|
2022-03-12T14:14:44.000Z
|
import insomniac
insomniac.run()
| 8.5 | 16 | 0.794118 |
7362ca85718b460645bd5b10716c15e229e93db6
| 405 |
py
|
Python
|
Python/M01_ProgrammingBasics/L06_NestedLoops/Lab/Solutions/P05_Travelling.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L06_NestedLoops/Lab/Solutions/P05_Travelling.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | null | null | null |
Python/M01_ProgrammingBasics/L06_NestedLoops/Lab/Solutions/P05_Travelling.py
|
todorkrastev/softuni-software-engineering
|
cfc0b5eaeb82951ff4d4668332ec3a31c59a5f84
|
[
"MIT"
] | 1 |
2022-02-23T13:03:14.000Z
|
2022-02-23T13:03:14.000Z
|
destination = 0
new_savings = 0
savings = 0
while destination != "End":
destination = input()
if destination == "End":
break
min_budget = float(input())
while min_budget >= new_savings:
savings = float(input())
new_savings += savings
if new_savings >= min_budget:
print(f"Going to {destination}!")
new_savings = 0
break
| 23.823529 | 45 | 0.575309 |
c33d80695edc09acc64b42f7bb98e56ef52342b3
| 1,133 |
py
|
Python
|
loesungsvorschlaege/ml_1_lineare_regression/praxis/diabetes/lin_reg_mit_scikit.py
|
severinhaller/einf-machinelearning
|
4dfc8f1da0d81c5aa800d1459f81b72d1bf6dd9b
|
[
"MIT"
] | null | null | null |
loesungsvorschlaege/ml_1_lineare_regression/praxis/diabetes/lin_reg_mit_scikit.py
|
severinhaller/einf-machinelearning
|
4dfc8f1da0d81c5aa800d1459f81b72d1bf6dd9b
|
[
"MIT"
] | null | null | null |
loesungsvorschlaege/ml_1_lineare_regression/praxis/diabetes/lin_reg_mit_scikit.py
|
severinhaller/einf-machinelearning
|
4dfc8f1da0d81c5aa800d1459f81b72d1bf6dd9b
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import sklearn.linear_model as lin
# Die ersten 10 Spalten beinhalten die Features. Die letzte Spalte beinhaltet das Label (Target).
diabetes_data_set = pd.read_csv("diabetes_daten.csv", header=None)
# Matrix X: alle 442 Zeilen und die ersten 10 Spalten => X hat die Dimension 442 x 10
matrix_X = diabetes_data_set.iloc[0:442, 0:10].values
print(f"Dimension Matrix X: {matrix_X.shape}")
# Vektor y: alle 442 Zeilen und nur die letzte Spalte => y hat die Dimension 442 x 1
vektor_y = diabetes_data_set.iloc[0:442, 10].values
print(f"Dimension Vektor y: {vektor_y.shape}")
# Berechne die lineare Regression
# Da wir fit_intercept=True angeben müssen wir keine Spalte mit "1'en" zur Matrix X hinzufügen
# dies wird automatisch erledigt.
regr = lin.LinearRegression(fit_intercept=True)
regr.fit(matrix_X, vektor_y)
# coef beinhaltet die Unbekannten => w-Vektor
# Unterdrückt die wissenschaftliche Notation mit e
# Beispiel: -3.63612242e-02 = -3.63612242+10^-2 = -0.0363612242
np.set_printoptions(suppress=True)
print(f"(w_1, w_2, ..., w_10): {regr.coef_}")
print(f"(w_0): {regr.intercept_}")
| 39.068966 | 97 | 0.761695 |
5edc0378e1634ca2d7249cd7e7467ab9220d7b94
| 12,433 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/education/api.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/education/api.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/education/api.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import flt, cstr
from frappe.email.doctype.email_group.email_group import add_subscribers
def get_course(program):
'''Return list of courses for a particular program
:param program: Program
'''
courses = frappe.db.sql('''select course, course_name from `tabProgram Course` where parent=%s''',
(program), as_dict=1)
return courses
@frappe.whitelist()
def enroll_student(source_name):
"""Creates a Student Record and returns a Program Enrollment.
:param source_name: Student Applicant.
"""
frappe.publish_realtime('enroll_student_progress', {"progress": [1, 4]}, user=frappe.session.user)
student = get_mapped_doc("Student Applicant", source_name,
{"Student Applicant": {
"doctype": "Student",
"field_map": {
"name": "student_applicant"
}
}}, ignore_permissions=True)
student.save()
program_enrollment = frappe.new_doc("Program Enrollment")
program_enrollment.student = student.name
program_enrollment.student_name = student.title
program_enrollment.program = frappe.db.get_value("Student Applicant", source_name, "program")
frappe.publish_realtime('enroll_student_progress', {"progress": [4, 4]}, user=frappe.session.user)
return program_enrollment
@frappe.whitelist()
def check_attendance_records_exist(course_schedule=None, student_group=None, date=None):
"""Check if Attendance Records are made against the specified Course Schedule or Student Group for given date.
:param course_schedule: Course Schedule.
:param student_group: Student Group.
:param date: Date.
"""
if course_schedule:
return frappe.get_list("Student Attendance", filters={"course_schedule": course_schedule})
else:
return frappe.get_list("Student Attendance", filters={"student_group": student_group, "date": date})
@frappe.whitelist()
def mark_attendance(students_present, students_absent, course_schedule=None, student_group=None, date=None):
"""Creates Multiple Attendance Records.
:param students_present: Students Present JSON.
:param students_absent: Students Absent JSON.
:param course_schedule: Course Schedule.
:param student_group: Student Group.
:param date: Date.
"""
present = json.loads(students_present)
absent = json.loads(students_absent)
for d in present:
make_attendance_records(d["student"], d["student_name"], "Present", course_schedule, student_group, date)
for d in absent:
make_attendance_records(d["student"], d["student_name"], "Absent", course_schedule, student_group, date)
frappe.db.commit()
frappe.msgprint(_("Attendance has been marked successfully."))
def make_attendance_records(student, student_name, status, course_schedule=None, student_group=None, date=None):
"""Creates/Update Attendance Record.
:param student: Student.
:param student_name: Student Name.
:param course_schedule: Course Schedule.
:param status: Status (Present/Absent)
"""
student_attendance_list = frappe.get_list("Student Attendance", fields = ['name'], filters = {
"student": student,
"course_schedule": course_schedule,
"student_group": student_group,
"date": date
})
if student_attendance_list:
student_attendance = frappe.get_doc("Student Attendance", student_attendance_list[0])
else:
student_attendance = frappe.new_doc("Student Attendance")
student_attendance.student = student
student_attendance.student_name = student_name
student_attendance.course_schedule = course_schedule
student_attendance.student_group = student_group
student_attendance.date = date
student_attendance.status = status
student_attendance.save()
@frappe.whitelist()
def get_student_guardians(student):
"""Returns List of Guardians of a Student.
:param student: Student.
"""
guardians = frappe.get_list("Student Guardian", fields=["guardian"] ,
filters={"parent": student})
return guardians
@frappe.whitelist()
def get_student_group_students(student_group, include_inactive=0):
"""Returns List of student, student_name in Student Group.
:param student_group: Student Group.
"""
if include_inactive:
students = frappe.get_list("Student Group Student", fields=["student", "student_name"] ,
filters={"parent": student_group}, order_by= "group_roll_number")
else:
students = frappe.get_list("Student Group Student", fields=["student", "student_name"] ,
filters={"parent": student_group, "active": 1}, order_by= "group_roll_number")
return students
@frappe.whitelist()
def get_fee_structure(program, academic_term=None):
"""Returns Fee Structure.
:param program: Program.
:param academic_term: Academic Term.
"""
fee_structure = frappe.db.get_values("Fee Structure", {"program": program,
"academic_term": academic_term}, 'name', as_dict=True)
return fee_structure[0].name if fee_structure else None
@frappe.whitelist()
def get_fee_components(fee_structure):
"""Returns Fee Components.
:param fee_structure: Fee Structure.
"""
if fee_structure:
fs = frappe.get_list("Fee Component", fields=["fees_category", "amount"] , filters={"parent": fee_structure}, order_by= "idx")
return fs
@frappe.whitelist()
def get_fee_schedule(program, student_category=None):
"""Returns Fee Schedule.
:param program: Program.
:param student_category: Student Category
"""
fs = frappe.get_list("Program Fee", fields=["academic_term", "fee_structure", "due_date", "amount"] ,
filters={"parent": program, "student_category": student_category }, order_by= "idx")
return fs
@frappe.whitelist()
def collect_fees(fees, amt):
paid_amount = flt(amt) + flt(frappe.db.get_value("Fees", fees, "paid_amount"))
total_amount = flt(frappe.db.get_value("Fees", fees, "total_amount"))
frappe.db.set_value("Fees", fees, "paid_amount", paid_amount)
frappe.db.set_value("Fees", fees, "outstanding_amount", (total_amount - paid_amount))
return paid_amount
@frappe.whitelist()
def get_course_schedule_events(start, end, filters=None):
"""Returns events for Course Schedule Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Course Schedule", filters)
data = frappe.db.sql("""select name, course,
timestamp(schedule_date, from_time) as from_datetime,
timestamp(schedule_date, to_time) as to_datetime,
room, student_group, 0 as 'allDay'
from `tabCourse Schedule`
where ( schedule_date between %(start)s and %(end)s )
{conditions}""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
@frappe.whitelist()
def get_assessment_criteria(course):
"""Returns Assessmemt Criteria and their Weightage from Course Master.
:param Course: Course
"""
return frappe.get_list("Course Assessment Criteria", \
fields=["assessment_criteria", "weightage"], filters={"parent": course}, order_by= "idx")
@frappe.whitelist()
def get_assessment_students(assessment_plan, student_group):
student_list = get_student_group_students(student_group)
for i, student in enumerate(student_list):
result = get_result(student.student, assessment_plan)
if result:
student_result = {}
for d in result.details:
student_result.update({d.assessment_criteria: [cstr(d.score), d.grade]})
student_result.update({
"total_score": [cstr(result.total_score), result.grade],
"comment": result.comment
})
student.update({
"assessment_details": student_result,
"docstatus": result.docstatus,
"name": result.name
})
else:
student.update({'assessment_details': None})
return student_list
@frappe.whitelist()
def get_assessment_details(assessment_plan):
"""Returns Assessment Criteria and Maximum Score from Assessment Plan Master.
:param Assessment Plan: Assessment Plan
"""
return frappe.get_list("Assessment Plan Criteria", \
fields=["assessment_criteria", "maximum_score", "docstatus"], filters={"parent": assessment_plan}, order_by= "idx")
@frappe.whitelist()
def get_result(student, assessment_plan):
"""Returns Submitted Result of given student for specified Assessment Plan
:param Student: Student
:param Assessment Plan: Assessment Plan
"""
results = frappe.get_all("Assessment Result", filters={"student": student,
"assessment_plan": assessment_plan, "docstatus": ("!=", 2)})
if results:
return frappe.get_doc("Assessment Result", results[0])
else:
return None
@frappe.whitelist()
def get_grade(grading_scale, percentage):
"""Returns Grade based on the Grading Scale and Score.
:param Grading Scale: Grading Scale
:param Percentage: Score Percentage Percentage
"""
grading_scale_intervals = {}
if not hasattr(frappe.local, 'grading_scale'):
grading_scale = frappe.get_all("Grading Scale Interval", fields=["grade_code", "threshold"], filters={"parent": grading_scale})
frappe.local.grading_scale = grading_scale
for d in frappe.local.grading_scale:
grading_scale_intervals.update({d.threshold:d.grade_code})
intervals = sorted(grading_scale_intervals.keys(), key=float, reverse=True)
for interval in intervals:
if flt(percentage) >= interval:
grade = grading_scale_intervals.get(interval)
break
else:
grade = ""
return grade
@frappe.whitelist()
def mark_assessment_result(assessment_plan, scores):
student_score = json.loads(scores);
assessment_details = []
for criteria in student_score.get("assessment_details"):
assessment_details.append({
"assessment_criteria": criteria,
"score": flt(student_score["assessment_details"][criteria])
})
assessment_result = get_assessment_result_doc(student_score["student"], assessment_plan)
assessment_result.update({
"student": student_score.get("student"),
"assessment_plan": assessment_plan,
"comment": student_score.get("comment"),
"total_score":student_score.get("total_score"),
"details": assessment_details
})
assessment_result.save()
details = {}
for d in assessment_result.details:
details.update({d.assessment_criteria: d.grade})
assessment_result_dict = {
"name": assessment_result.name,
"student": assessment_result.student,
"total_score": assessment_result.total_score,
"grade": assessment_result.grade,
"details": details
}
return assessment_result_dict
@frappe.whitelist()
def submit_assessment_results(assessment_plan, student_group):
total_result = 0
student_list = get_student_group_students(student_group)
for i, student in enumerate(student_list):
doc = get_result(student.student, assessment_plan)
if doc and doc.docstatus==0:
total_result += 1
doc.submit()
return total_result
def get_assessment_result_doc(student, assessment_plan):
assessment_result = frappe.get_all("Assessment Result", filters={"student": student,
"assessment_plan": assessment_plan, "docstatus": ("!=", 2)})
if assessment_result:
doc = frappe.get_doc("Assessment Result", assessment_result[0])
if doc.docstatus == 0:
return doc
elif doc.docstatus == 1:
frappe.msgprint(_("Result already Submitted"))
return None
else:
return frappe.new_doc("Assessment Result")
@frappe.whitelist()
def update_email_group(doctype, name):
if not frappe.db.exists("Email Group", name):
email_group = frappe.new_doc("Email Group")
email_group.title = name
email_group.save()
email_list = []
students = []
if doctype == "Student Group":
students = get_student_group_students(name)
for stud in students:
for guard in get_student_guardians(stud.student):
email = frappe.db.get_value("Guardian", guard.guardian, "email_address")
if email:
email_list.append(email)
add_subscribers(name, email_list)
@frappe.whitelist()
def get_current_enrollment(student, academic_year=None):
current_academic_year = academic_year or frappe.defaults.get_defaults().academic_year
program_enrollment_list = frappe.db.sql('''
select
name as program_enrollment, student_name, program, student_batch_name as student_batch,
student_category, academic_term, academic_year
from
`tabProgram Enrollment`
where
student = %s and academic_year = %s
order by creation''', (student, current_academic_year), as_dict=1)
if program_enrollment_list:
return program_enrollment_list[0]
else:
return None
| 32.891534 | 129 | 0.756535 |
48b2f168061329c24e1f6812589cc116688ec631
| 389 |
py
|
Python
|
production/pygsl-0.9.5/pygsl/math.py
|
juhnowski/FishingRod
|
457e7afb5cab424296dff95e1acf10ebf70d32a9
|
[
"MIT"
] | 1 |
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
production/pygsl-0.9.5/pygsl/math.py
|
juhnowski/FishingRod
|
457e7afb5cab424296dff95e1acf10ebf70d32a9
|
[
"MIT"
] | 1 |
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/math.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2 |
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
"""
Simple Mathematical functions
fcmp(a, b, epsilon) -> -1, 0, 1
"""
import pygsl._gslwrap
log1p = pygsl._gslwrap.gsl_log1p
expm1 = pygsl._gslwrap.gsl_expm1
hypot = pygsl._gslwrap.gsl_hypot
acosh = pygsl._gslwrap.gsl_acosh
asinh = pygsl._gslwrap.gsl_asinh
atanh = pygsl._gslwrap.gsl_atanh
ldexp = pygsl._gslwrap.gsl_ldexp
frexp = pygsl._gslwrap.gsl_frexp
fcmp = pygsl._gslwrap.gsl_fcmp
| 24.3125 | 32 | 0.77892 |
5b11720c74506829605adbbd168ba8a1a231fa4d
| 11,119 |
py
|
Python
|
fhirclient/r4models/invoice.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
fhirclient/r4models/invoice.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
fhirclient/r4models/invoice.py
|
Healthedata1/Flask-PL
|
88a2f40ca430c4cbb9fbded7fc92fdc166ebb9f1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Invoice) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class Invoice(domainresource.DomainResource):
""" Invoice containing ChargeItems from an Account.
Invoice containing collected ChargeItems from an Account with calculated
individual and total price for Billing purpose.
"""
resource_type = "Invoice"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.account = None
""" Account that is being balanced.
Type `FHIRReference` (represented as `dict` in JSON). """
self.cancelledReason = None
""" Reason for cancellation of this Invoice.
Type `str`. """
self.date = None
""" Invoice date / posting date.
Type `FHIRDate` (represented as `str` in JSON). """
self.identifier = None
""" Business Identifier for item.
List of `Identifier` items (represented as `dict` in JSON). """
self.issuer = None
""" Issuing Organization of Invoice.
Type `FHIRReference` (represented as `dict` in JSON). """
self.lineItem = None
""" Line items of this Invoice.
List of `InvoiceLineItem` items (represented as `dict` in JSON). """
self.note = None
""" Comments made about the invoice.
List of `Annotation` items (represented as `dict` in JSON). """
self.participant = None
""" Participant in creation of this Invoice.
List of `InvoiceParticipant` items (represented as `dict` in JSON). """
self.paymentTerms = None
""" Payment details.
Type `str`. """
self.recipient = None
""" Recipient of this invoice.
Type `FHIRReference` (represented as `dict` in JSON). """
self.status = None
""" draft | issued | balanced | cancelled | entered-in-error.
Type `str`. """
self.subject = None
""" Recipient(s) of goods and services.
Type `FHIRReference` (represented as `dict` in JSON). """
self.totalGross = None
""" Gross total of this Invoice.
Type `Money` (represented as `dict` in JSON). """
self.totalNet = None
""" Net total of this Invoice.
Type `Money` (represented as `dict` in JSON). """
self.totalPriceComponent = None
""" Components of Invoice total.
List of `InvoiceLineItemPriceComponent` items (represented as `dict` in JSON). """
self.type = None
""" Type of Invoice.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(Invoice, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Invoice, self).elementProperties()
js.extend([
("account", "account", fhirreference.FHIRReference, False, None, False),
("cancelledReason", "cancelledReason", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("issuer", "issuer", fhirreference.FHIRReference, False, None, False),
("lineItem", "lineItem", InvoiceLineItem, True, None, False),
("note", "note", annotation.Annotation, True, None, False),
("participant", "participant", InvoiceParticipant, True, None, False),
("paymentTerms", "paymentTerms", str, False, None, False),
("recipient", "recipient", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
("totalGross", "totalGross", money.Money, False, None, False),
("totalNet", "totalNet", money.Money, False, None, False),
("totalPriceComponent", "totalPriceComponent", InvoiceLineItemPriceComponent, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
from . import backboneelement
class InvoiceLineItem(backboneelement.BackboneElement):
""" Line items of this Invoice.
Each line item represents one charge for goods and services rendered.
Details such as date, code and amount are found in the referenced
ChargeItem resource.
"""
resource_type = "InvoiceLineItem"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.chargeItemCodeableConcept = None
""" Reference to ChargeItem containing details of this line item or an
inline billing code.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.chargeItemReference = None
""" Reference to ChargeItem containing details of this line item or an
inline billing code.
Type `FHIRReference` (represented as `dict` in JSON). """
self.priceComponent = None
""" Components of total line item price.
List of `InvoiceLineItemPriceComponent` items (represented as `dict` in JSON). """
self.sequence = None
""" Sequence number of line item.
Type `int`. """
super(InvoiceLineItem, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InvoiceLineItem, self).elementProperties()
js.extend([
("chargeItemCodeableConcept", "chargeItemCodeableConcept", codeableconcept.CodeableConcept, False, "chargeItem", True),
("chargeItemReference", "chargeItemReference", fhirreference.FHIRReference, False, "chargeItem", True),
("priceComponent", "priceComponent", InvoiceLineItemPriceComponent, True, None, False),
("sequence", "sequence", int, False, None, False),
])
return js
class InvoiceLineItemPriceComponent(backboneelement.BackboneElement):
""" Components of total line item price.
The price for a ChargeItem may be calculated as a base price with
surcharges/deductions that apply in certain conditions. A
ChargeItemDefinition resource that defines the prices, factors and
conditions that apply to a billing code is currently under development. The
priceComponent element can be used to offer transparency to the recipient
of the Invoice as to how the prices have been calculated.
"""
resource_type = "InvoiceLineItemPriceComponent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.amount = None
""" Monetary amount associated with this component.
Type `Money` (represented as `dict` in JSON). """
self.code = None
""" Code identifying the specific component.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.factor = None
""" Factor used for calculating this component.
Type `float`. """
self.type = None
""" base | surcharge | deduction | discount | tax | informational.
Type `str`. """
super(InvoiceLineItemPriceComponent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InvoiceLineItemPriceComponent, self).elementProperties()
js.extend([
("amount", "amount", money.Money, False, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("factor", "factor", float, False, None, False),
("type", "type", str, False, None, True),
])
return js
class InvoiceParticipant(backboneelement.BackboneElement):
""" Participant in creation of this Invoice.
Indicates who or what performed or participated in the charged service.
"""
resource_type = "InvoiceParticipant"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.actor = None
""" Individual who was involved.
Type `FHIRReference` (represented as `dict` in JSON). """
self.role = None
""" Type of involvement in creation of this Invoice.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(InvoiceParticipant, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(InvoiceParticipant, self).elementProperties()
js.extend([
("actor", "actor", fhirreference.FHIRReference, False, None, True),
("role", "role", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import money
except ImportError:
money = sys.modules[__package__ + '.money']
| 40.140794 | 132 | 0.611836 |
826419ad44e28f827131ca0f6175dddaf17a403b
| 261,090 |
py
|
Python
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/coremodules/demand/virtualpop.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 4 |
2020-11-13T02:35:56.000Z
|
2021-03-29T20:15:54.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/coremodules/demand/virtualpop.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 9 |
2020-12-09T02:12:39.000Z
|
2021-02-18T00:15:28.000Z
|
Co-Simulation/Sumo/sumo-1.7.0/tools/contributed/sumopy/coremodules/demand/virtualpop.py
|
uruzahe/carla
|
940c2ab23cce1eda1ef66de35f66b42d40865fb1
|
[
"MIT"
] | 1 |
2020-11-20T19:31:26.000Z
|
2020-11-20T19:31:26.000Z
|
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2016-2020 German Aerospace Center (DLR) and others.
# SUMOPy module
# Copyright (C) 2012-2017 University of Bologna - DICAM
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file virtualpop.py
# @author Joerg Schweizer
# @date
import numpy as np
from numpy import random
import agilepy.lib_base.classman as cm
import agilepy.lib_base.arrayman as am
import agilepy.lib_base.xmlman as xm
from agilepy.lib_base.misc import random_choice, get_inversemap
from agilepy.lib_base.processes import Process
# from coremodules.modules_common import *
from coremodules.network.network import SumoIdsConf, MODES
from coremodules.network import routing
from coremodules.simulation import results as res
from coremodules.demand.demandbase import *
import virtualpop_results as res
GENDERS = {'male': 0, 'female': 1, 'unknown': -1}
OCCUPATIONS = {'unknown': -1,
'worker': 1,
'student': 2,
'employee': 3,
'public employee': 4,
'selfemployed': 5,
'pensioneer': 6,
'other': 7
}
class Activities(am.ArrayObjman):
# http://www.sumo.dlr.de/userdoc/Networks/Building_Networks_from_own_XML-descriptions.html#Edge_Descriptions
def __init__(self, ident, virtualpop, **kwargs):
self._init_objman(ident=ident, parent=virtualpop, name='Activities',
info='Activity database of persons contains type, time, duration and location of activities.',
version=0.0,
**kwargs)
self._init_attributes()
def _init_attributes(self):
# activy types now in demand
activitytypes = self.parent.get_demand().activitytypes
self.add_col(am.IdsArrayConf('ids_activitytype', activitytypes,
groupnames=['parameters'],
choices=activitytypes.names.get_indexmap(),
name='Type',
info='Type of activity performed during the stop.',
#xmltag = 'actType',
#xmlmap = get_inversemap( activitytypes.names.get_indexmap()),
))
# attention, this may cause trouble durung init if
# facilities are not yet initialized
self.add_col(am.IdsArrayConf('ids_facility', self.parent.get_scenario().landuse.facilities,
groupnames=['parameters'],
name='ID fac.',
info='Facility ID where activity takes place.',
#activitytype = 'home',
))
# self.add_col(am.ArrayConf( 'descriptions', '',
# dtype = np.object,
# perm='rw',
# is_index = True,
# name = 'Description',
# info = 'Description of activity.',
# ))
# self.add_col(am.IdlistsArrayConf( 'ids_landusetypes', self.parent.get_landuse().landusetypes,
# name = 'Landuse types',
# info = "Landuse type IDs, eher this activity type can take place.",
# ))
self.add_col(am.ArrayConf('hours_begin_earliest', 0.0,
dtype=np.float32,
groupnames=['parameters'],
perm='rw',
name='Earliest hour begin',
unit='h',
info='Earliest hour when this activity can begin.',
))
self.add_col(am.ArrayConf('hours_begin_latest', 1.0,
dtype=np.float32,
groupnames=['parameters'],
perm='rw',
name='Latest begin hour',
unit='h',
info='Latest hour when this activity can begin.',
))
self.add_col(am.ArrayConf('durations_min', 6.0,
dtype=np.float32,
groupnames=['parameters'],
perm='rw',
name='Min. Duration',
unit='h',
info='Minimum activity duration for a person within a day.',
))
self.add_col(am.ArrayConf('durations_max', 8.0,
dtype=np.float32,
groupnames=['parameters'],
perm='rw',
name='Max. Duration',
unit='h',
info='Maximum activity duration for a person within a day.',
))
def get_hours_end_earliest(self, ids):
return self.hours_begin_earliest[ids]+self.durations_min[ids]
def get_hours_end_latest(self, ids):
return self.hours_begin_latest[ids]+self.durations_max[ids]
def get_durations(self, ids, pdf='unit'):
durations = np.zeros(len(ids), dtype=np.float32)
i = 0
for time_start, time_end in zip(
np.array(self.durations_min[ids]*3600, dtype=np.int32),
np.array(self.durations_max[ids]*3600, dtype=np.int32)):
durations[i] = np.random.randint(time_start, time_end, 1)
i += 1
return durations
def get_times_end(self, ids, pdf='unit'):
"""
Returns an array with activity ending time for the
given activity IDs.
The ending time is calculated by drawing random samples
from the departure interval.
The random samples are drawn according to the given probability
density function, pdf.
Input arguments:
ids: integer array with activity IDs
pdf: probability density function 'unit'|'normal'
Returned arguments:
times_end: integer array with departure times
"""
times_end = np.zeros(len(ids), dtype=np.float32)
i = 0
for time_start, time_end in zip(
np.array(self.get_hours_end_earliest(ids)*3600, dtype=np.int32),
np.array(self.get_hours_end_latest(ids)*3600, dtype=np.int32)):
times_end[i] = np.random.randint(time_start, time_end, 1)
i += 1
return times_end
def get_times_begin(self, ids, pdf='unit'):
"""
Returns an array with beginning time for the
given activity IDs.
The begin time is calculated by drawing random samples
from the departure interval.
The random samples are drawn according to the given probability
density function, pdf.
Input arguments:
ids: integer array with activity IDs
pdf: probability density function 'unit'|'normal'
Returned arguments:
times_begin: integer array with departure times
"""
times_begin = np.zeros(len(ids), dtype=np.float32)
i = 0
for time_start, time_end in zip(
np.array(self.get_hours_begin_earliest(ids)*3600, dtype=np.int32),
np.array(self.get_hours_begin_latest(ids)*3600, dtype=np.int32)):
times_begin[i] = np.random.randint(time_start, time_end, 1)
i += 1
return times_begin
class IndividualAutos(am.ArrayObjman):
def __init__(self, ident, population, **kwargs):
# print 'individualvehicle vtype id_default',vtypes.ids_sumo.get_id_from_index('passenger1')
self._init_objman(ident=ident,
parent=population,
name='Indiv. Autos',
info='Individual auto database. These are privately owned autos.',
**kwargs)
self._init_attributes()
self._init_constants()
def _init_constants(self):
self.do_not_save_attrs(['mode', 'mode_prefix',
'_edges', '_lanes', '_individualvehicle', '_ids_vtype_sumo', '_ids_edge_sumo',
'_id_mode', '_get_laneid_allowed', '_get_sumoinfo_from_id_lane',
'_space_access', '_parking', '_time_after_unboarding',
])
def def_mode(self):
self.mode = 'passenger'
self.mode_prefix = 'iauto'
def _init_attributes(self):
vtypes = self.parent.get_demand().vtypes
self.def_mode()
ids_vtype = vtypes.select_by_mode(mode=self.mode)
self.add(cm.AttrConf('space_access', 0.5,
groupnames=['options'],
perm='rw',
name='Space access',
unit='m',
info='Space to access vehicles at parkings. This is typically less than the vehicle length.',
))
self.add(cm.AttrConf('time_after_unboarding', 5,
groupnames=['options'],
perm='rw',
name='time after unboarding',
unit='s',
info='Time the vehicle waits before disappearing after unboarding.',
))
self.add_col(am.IdsArrayConf('ids_vtype', vtypes,
id_default=ids_vtype[0],
groupnames=['state'],
name='Veh. type',
info='Vehicle type.',
#xmltag = 'type',
))
self.add_col(am.IdsArrayConf('ids_person', self.parent,
groupnames=['state'],
name='ID person',
info='ID of person who ownes the vehicle.',
))
self.add_col(am.ArrayConf('times_exec', 0.0,
name='Exec time',
info='Total route execution time from simulation run of last plan.',
unit='s',
))
def get_virtualpop(self):
return self.parent
def get_ids_veh_pop(self):
"""
To be overridden by other individual vehicle types.
"""
return self.get_virtualpop().ids_iauto
def get_share(self, is_abs=False):
n_veh = len(self)
if is_abs:
return n_veh
else:
return float(n_veh)/float(len(self.get_virtualpop()))
def get_stagetable(self):
return self.parent.get_plans().get_stagetable('autorides')
def get_demand(self):
return self.parent.parent
def clear_vehicles(self):
self.get_ids_veh_pop()[self.ids_person.get_value()] = -1
self.clear()
def assign_to_persons(self, ids_person):
# print 'assign_to_persons',len(ids_person),self.mode
# self.clear_vehicles()
#ids_person_noveh = set(ids_person).difference(set(self.ids_person))
n_new = len(ids_person)
#
# this call is selecting a veh id aof the specific mode
# according to its share within this mode
ids_vtype = self.get_demand().vtypes.generate_vtypes_for_mode(n_new, mode=self.mode)
# print ' ids_vtype',ids_vtype
ids_veh = self.add_rows(n=n_new,
ids_person=ids_person,
ids_vtype=ids_vtype,
)
self.get_ids_veh_pop()[ids_person] = ids_veh
return ids_veh
def get_vtypes(self):
"""
Returns a set with all used vehicle types.
"""
# print 'Vehicles_individual.get_vtypes',self.cols.vtype
return set(self.ids_vtype.get_value())
def get_id_veh_xml(self, id_veh, id_stage):
return self.mode_prefix + '.%s.%s' % (id_veh, id_stage)
def get_id_line_xml(self, id_veh):
return self.mode_prefix + '.%s' % (id_veh)
def get_id_from_id_sumo(self, id_veh_sumo):
# print 'get_id_from_id_sumo',id_veh_sumo,id_veh_sumo.split('.'),self.mode_prefix
if len(id_veh_sumo.split('.')) == 3:
prefix, id_veh, id_stage = id_veh_sumo.split('.')
if prefix == self.mode_prefix:
return int(id_veh)
else:
return -1
return -1
# def append_ride(self, id_veh, id_ride):
# ids_ride = self.ids_rides[id_veh]
# if ids_ride is None:
# self.ids_rides[id_veh] = [id_ride]
# else:
# ids_ride.append(id_ride)
def prepare_write_xml(self):
"""
Prepare xml export. Must return export function.
"""
virtualpop = self.get_virtualpop()
scenario = virtualpop.get_scenario()
#plans = virtualpop.get_plans()
self._rides = self.get_stagetable()
self._edges = scenario.net.edges
self._lanes = scenario.net.lanes
#self._individualvehicle = virtualpop.get_ibikes()
self._ids_vtype_sumo = scenario.demand.vtypes.ids_sumo
self._ids_edge_sumo = self._edges.ids_sumo
self._id_mode = scenario.net.modes.get_id_mode(self.mode)
self._get_laneid_allowed = self._edges.get_laneid_allowed
self._get_sumoinfo_from_id_lane = scenario.net.lanes.get_sumoinfo_from_id_lane
self._space_access = self.space_access.get_value()
#self._time_veh_wait_after_stop = 3600
self._parking = virtualpop.get_landuse().parking
self._time_after_unboarding = self.time_after_unboarding.get_value()
return self.write_xml
def get_id_veh(self, id_stage):
return self._rides.ids_iauto[id_stage]
def write_xml(self, fd, id_stage, time_begin, indent=2):
# TODO: actually this should go in individualvehicle
#time_veh_wait_after_stop = 3600
#plans = self.get_plans()
#walkstages = plans.get_stagetable('walks')
#rides = plans.get_stagetable('autorides')
#activitystages = plans.get_stagetable('activities')
rides = self._rides
#lanes = self._lanes
parking = self._parking
#net = self.get_net()
#lanes = net.lanes
#edges = net.edges
#ind_ride = rides.get_inds(id_stage)
id_veh = self.get_id_veh(id_stage)
#individualvehicle = self._iveh
id_vtype = self.ids_vtype[id_veh]
# id_veh_ride,
# ids_vtypes_iveh[id_veh],
# ids_edges_rides_arr[ind_ride],
# ids_parking_from_rides_arr[ind_ride],
# ids_parking_to_rides_arr[ind_ride],
id_parking_from = rides.ids_parking_from[id_stage]
id_lane_from = parking.ids_lane[id_parking_from]
#laneindex_from = self._lanes.indexes[id_lane_from]
pos_from = parking.positions[id_parking_from]
id_parking_to = rides.ids_parking_to[id_stage]
id_lane_to = parking.ids_lane[id_parking_to]
#laneindex_to = self._lanes.indexes[id_lane_to]
pos_to = parking.positions[id_parking_to]
# write unique veh ID to prevent confusion with other veh declarations
fd.write(xm.start('vehicle id="%s"' % self.get_id_veh_xml(id_veh, id_stage), indent+2))
# get start time of first stage of the plan
#id_plan = rides.ids_plan[id_stage]
#stages0, id_stage0 = self.get_plans().stagelists[id_plan][0]
# this is the time when the vehicle appers in the scenario
fd.write(xm.num('depart', '%.d' % rides.times_init[id_stage]))
#fd.write(xm.num('depart', '%.d'%stages0.times_start[id_stage0]))
fd.write(xm.num('type', self._ids_vtype_sumo[id_vtype]))
fd.write(xm.num('line', self.get_id_line_xml(id_veh)))
fd.write(xm.num('departPos', pos_from))
fd.write(xm.num('departLane', self._lanes.indexes[id_lane_from]))
fd.write(xm.stop())
# write route
fd.write(xm.start('route', indent+4))
# print ' edgeindex[ids_edge]',edgeindex[ids_edge]
fd.write(xm.arr('edges', self._ids_edge_sumo[rides.ids_edges[id_stage]]))
# does not seem to have an effect, always starts at base????
#fd.write(xm.num('departPos', pos_from))
#fd.write(xm.num('departLane', laneindex_from ))
fd.write(xm.stopit())
# write depart stop
fd.write(xm.start('stop', indent+4))
#id_lane = self._lanes.ids_edge[id_lane_from]
fd.write(xm.num('lane', self._get_sumoinfo_from_id_lane(id_lane_from)))
# in 0.31 the vehicle will wait until after this duration
# so it will be removed unless it will get a timeout function
#fd.write(xm.num('duration', time_veh_wait_after_stop))
fd.write(xm.num('startPos', pos_from - parking.lengths[id_parking_from]))
fd.write(xm.num('endPos', pos_from))
fd.write(xm.num('triggered', "True"))
# chrashes with parking=True in 0.30!
# however if not parked the vhcle is blocking the traffic
# while waiting: workaround: delay departure to be shure that person already arrived
fd.write(xm.num('parking', "True")) # in windows 0.30 parked vehicles do not depart!!
#fd.write(xm.num('parking', "False"))
fd.write(xm.stopit())
# write arrival stop
fd.write(xm.start('stop', indent+4))
fd.write(xm.num('lane', self._get_sumoinfo_from_id_lane(id_lane_to)))
fd.write(xm.num('duration', self._time_after_unboarding)) # for unboarding only
fd.write(xm.num('startPos', pos_to - parking.lengths[id_parking_to]))
fd.write(xm.num('endPos', pos_to))
#fd.write(xm.num('triggered', "True"))
fd.write(xm.stopit())
fd.write(xm.end('vehicle', indent+2))
class IndividualBikes(IndividualAutos):
def __init__(self, ident, population, **kwargs):
# print 'individualvehicle vtype id_default',vtypes.ids_sumo.get_id_from_index('passenger1')
self._init_objman(ident=ident,
parent=population,
name='Indiv. Bikes',
info='Individual bike database. These are privately owned bikes.',
**kwargs)
self._init_attributes()
self._init_constants()
def _init_attributes(self):
IndividualAutos._init_attributes(self)
def _init_constants(self):
self.do_not_save_attrs(['mode', 'mode_prefix',
'_edges', '_ids_vtype_sumo', '_ids_edge_sumo',
'_id_mode', '_get_laneid_allowed', '_get_sumoinfo_from_id_lane',
'_space_access',
])
def def_mode(self):
self.mode = 'bicycle'
self.mode_prefix = 'ibike'
def get_ids_veh_pop(self):
"""
To be overridden by other individual vehicle types.
"""
return self.parent.ids_ibike
def get_stagetable(self):
return self.parent.get_plans().get_stagetable('bikerides')
def get_id_veh(self, id_stage):
return self._rides.ids_ibike[id_stage]
def prepare_write_xml(self):
"""
Prepare xml export. Must return export function.
"""
virtualpop = self.get_virtualpop()
scenario = virtualpop.get_scenario()
#plans = virtualpop.get_plans()
self._rides = self.get_stagetable()
self._edges = scenario.net.edges
#self._individualvehicle = virtualpop.get_ibikes()
self._ids_vtype_sumo = scenario.demand.vtypes.ids_sumo
self._ids_edge_sumo = self._edges.ids_sumo
self._id_mode = scenario.net.modes.get_id_mode(self.mode)
self._get_laneid_allowed = self._edges.get_laneid_allowed
self._get_sumoinfo_from_id_lane = scenario.net.lanes.get_sumoinfo_from_id_lane
self._space_access = self.space_access.get_value()
return self.write_xml
# def _limit_pos(self,pos,id_edge):
def write_xml(self, fd, id_stage, time_begin, indent=2):
rides = self._rides
id_veh = self.get_id_veh(id_stage)
# print 'write_xml',id_stage, time_begin,self.get_id_veh_xml(id_veh, id_stage)
# print ' ids_edge_from,ids_edge_to',rides.ids_edge_from[id_stage],rides.ids_edge_to[id_stage],self._get_laneid_allowed( rides.ids_edge_from[id_stage], self._id_mode),self._get_laneid_allowed( rides.ids_edge_to[id_stage], self._id_mode)
# TODO: actually this should go in individualvehicle
#time_veh_wait_after_stop = 3600
#plans = self.get_plans()
#walkstages = plans.get_stagetable('walks')
#rides = plans.get_stagetable('bikerides')
#activitystages = plans.get_stagetable('activities')
# for debug only:
#virtualpop = self.get_virtualpop()
#ids_edge_sumo = virtualpop.get_net().edges.ids_sumo
#parking = self.get_landuse().parking
#net = self.get_net()
#lanes = net.lanes
#edges = net.edges
#ind_ride = rides.get_inds(id_stage)
#individualvehicle = self.get_ibikes()
id_vtype = self.ids_vtype[id_veh]
# id_veh_ride,
# ids_vtypes_iveh[id_veh],
# ids_edges_rides_arr[ind_ride],
# ids_parking_from_rides_arr[ind_ride],
# ids_parking_to_rides_arr[ind_ride],
#id_parking_from = rides.ids_parking_from[id_stage]
#id_lane_from = parking.ids_lane[id_parking_from]
#laneindex_from = lanes.indexes[id_lane_from]
#pos_from = parking.positions[id_parking_from]
#id_parking_to = rides.ids_parking_to[id_stage]
#id_lane_to = parking.ids_lane[id_parking_to]
#laneindex_to = lanes.indexes[id_lane_to]
#pos_to = parking.positions[id_parking_to]
# write unique veh ID to prevent confusion with other veh declarations
fd.write(xm.start('vehicle id="%s"' % self.get_id_veh_xml(id_veh, id_stage), indent+2))
# get start time of first stage of the plan
#id_plan = rides.ids_plan[id_stage]
#stages0, id_stage0 = self.get_plans().stagelists[id_plan][0]
# this is the time when the vehicle appers in the scenario
#fd.write(xm.num('depart', '%.d'%rides.times_init[id_stage]))
fd.write(xm.num('depart', '%.d' % time_begin))
fd.write(xm.num('type', self._ids_vtype_sumo[id_vtype]))
fd.write(xm.num('line', self.get_id_line_xml(id_veh)))
#fd.write(xm.num('departPos', pos_from))
#fd.write(xm.num('departLane', laneindex_from ))
fd.write(xm.num('from', self._ids_edge_sumo[rides.ids_edge_from[id_stage]]))
pos_from = rides.positions_from[id_stage]
pos_to = rides.positions_to[id_stage]
fd.write(xm.num('departPos', pos_from))
fd.write(xm.num('arrivalPos', pos_to))
fd.write(xm.num('departLane', 'best'))
fd.write(xm.stop())
# write route
fd.write(xm.start('route', indent+4))
# print ' ids_edges',rides.ids_edges[id_stage]
# print ' ids_sumo',self._ids_edge_sumo[rides.ids_edges[id_stage]]
fd.write(xm.arr('edges', self._ids_edge_sumo[rides.ids_edges[id_stage]]))
# fd.write(xm.arr('edges',edges.ids_sumo[rides.ids_edges[id_stage]]))
# does not seem to have an effect, always starts at base????
#id_edge = rides.ids_edge_from[id_stage]
# print ' id_lane',id_lane,self._get_sumoinfo_from_id_lane(id_lane),'id_edge',id_edge,ids_edge_sumo[id_edge]
fd.write(xm.stopit())
# write depart stop
fd.write(xm.start('stop', indent+4))
id_lane = self._get_laneid_allowed(rides.ids_edge_from[id_stage], self._id_mode)
fd.write(xm.num('lane', self._get_sumoinfo_from_id_lane(id_lane)))
# in 0.31 the vehicle will wait until after this duration
# so it will be removed unless it will get a timeout function
#fd.write(xm.num('duration', time_veh_wait_after_stop))
if pos_from > self._space_access:
fd.write(xm.num('startPos', pos_from - self._space_access))
fd.write(xm.num('endPos', pos_from+self._space_access))
else:
fd.write(xm.num('startPos', 0.1*pos_from))
fd.write(xm.num('endPos', pos_from+self._space_access))
fd.write(xm.num('triggered', "True"))
# chrashes with parking=True in 0.30!
# however if not parked the vhcle is blocking the traffic
# while waiting: workaround: delay departure to be shure that person already arrived
fd.write(xm.num('parking', 'True')) # in windows 0.30 parked vehicles do not depart!!
#fd.write(xm.num('parking', "False"))
fd.write(xm.stopit())
# write arrival stop
fd.write(xm.start('stop', indent+4))
id_lane = self._get_laneid_allowed(rides.ids_edge_to[id_stage], self._id_mode)
#id_edge = rides.ids_edge_to[id_stage]
# print ' id_lane',id_lane,self._get_sumoinfo_from_id_lane(id_lane),'id_edge',id_edge,ids_edge_sumo[id_edge]
fd.write(xm.num('lane', self._get_sumoinfo_from_id_lane(id_lane)))
fd.write(xm.num('duration', 5)) # for unboarding only
if pos_to > self._space_access:
fd.write(xm.num('startPos', pos_to - self._space_access))
fd.write(xm.num('endPos', pos_to))
else:
fd.write(xm.num('startPos', 0.1*pos_to))
fd.write(xm.num('endPos', pos_to))
#fd.write(xm.num('triggered', "True"))
fd.write(xm.stopit())
fd.write(xm.end('vehicle', indent+2))
class IndividualMotorcycles(IndividualBikes):
def __init__(self, ident, population, **kwargs):
# print 'individualvehicle vtype id_default',vtypes.ids_sumo.get_id_from_index('passenger1')
self._init_objman(ident=ident,
parent=population,
name='Indiv. Moto',
info='Individual Motorcycle/moped database. These are privately owned motorcycles.',
**kwargs)
IndividualBikes._init_attributes(self)
IndividualBikes._init_constants(self)
def def_mode(self):
self.mode = 'moped'
self.mode_prefix = 'imoto'
def get_ids_veh_pop(self):
"""
To be overridden by other individual vehicle types.
"""
return self.parent.ids_imoto
def get_stagetable(self):
return self.parent.get_plans().get_stagetable('motorides')
def get_id_veh(self, id_stage):
return self._rides.ids_imoto[id_stage]
class StrategyMixin(cm.BaseObjman):
def __init__(self, ident, parent=None,
name='Strategy mixin', info='Info on strategy.',
**kwargs):
"""
To be overridden.
"""
# attention parent is the Strategies table
self._init_objman(ident, parent, **kwargs)
attrsman = self.set_attrsman(cm.Attrsman(self))
def _init_attributes(self, **kwargs):
# print 'StrategyMixin._init_attributes'
attrsman = self.get_attrsman()
def get_id_strategy(self):
return self.parent.names.get_id_from_index(self.get_ident())
def get_scenario(self):
return self.parent.parent.get_scenario()
def get_activities(self):
return self.parent.parent.get_activities()
def get_virtualpop(self):
return self.parent.parent
def get_plans(self):
return self.parent.parent.plans
def clip_positions(self, positions, ids_edge):
lengths = self.get_scenario().net.edges.lengths[ids_edge]
# print 'clip_positions',positions.shape,ids_edge.shape,lengths.shape
positions_clip = np.clip(positions, self.dist_node_min*np.ones(len(positions),
dtype=np.float32), lengths-self.dist_node_min)
inds = lengths < 2*self.dist_node_min
# print ' inds.shape',inds.shape,positions_clip.shape
positions_clip[inds] = 0.5*lengths[inds]
return positions_clip
def _init_attributes_strategy(self, **kwargs):
attrsman = self.get_attrsman()
self.dist_node_min = attrsman.add(cm.AttrConf('dist_node_min', kwargs.get('dist_node_min', 40.0),
groupnames=['options'],
perm='rw',
name='Min. dist to nodes',
unit='m',
info='Minimum distance between starting position and node center.',
))
# def _init_constants_strategy(self):
# #print '_init_constants_strategy'
# modes = self.get_virtualpop().get_scenario().net.modes
# self._id_mode_bike = modes.get_id_mode('bicycle')
# self._id_mode_auto = modes.get_id_mode('passenger')
# self._id_mode_moto = modes.get_id_mode('motorcycle')
# self.get_attrsman().do_not_save_attrs([
# '_id_mode_bike','_id_mode_auto','_id_mode_moto',
# ])
# print ' _id_mode_auto',self._id_mode_auto
# def are_feasible(self, ids_person):
# """
# Returns a bool vector, with True values for
# persons where this strategy can be applied.
# """
# return []
# def is_feasible(self, id_person):
# """
# Returns True if this strategy is feasible for this person.
# Overriden by specific strategy.
# """
# return False
def preevaluate(self, ids_person):
"""
Preevaluation strategies for person IDs in vector ids_person.
Returns a preevaluation vector with a preevaluation value
for each person ID. The values of the preevaluation vector are as follows:
-1 : Strategy cannot be applied
0 : Stategy can be applied, but the preferred mode is not used
1 : Stategy can be applied, and preferred mode is part of the strategy
2 : Strategy uses predomunantly preferred mode
"""
return zeros(len(ids_person), dtype=np.int32)
def plan(self, ids_person, logger=None):
"""
Generates a plan for these person according to this strategie.
Overriden by specific strategy.
"""
pass
class NoneStrategy(StrategyMixin):
def __init__(self, ident, parent=None,
name='None strategy',
info='With this strategy, no mobility plan is generated.',
**kwargs):
self._init_objman(ident, parent, name=name, info=info, **kwargs)
attrsman = self.set_attrsman(cm.Attrsman(self))
class TransitStrategy(StrategyMixin):
def __init__(self, ident, parent=None,
name='Public Transport Strategy',
info='With this strategy, the person uses his private auto as main transport mode. He may accept passengers or public transport with P&R',
**kwargs):
self._init_objman(ident, parent, name=name, info=info, **kwargs)
attrsman = self.set_attrsman(cm.Attrsman(self))
# specific init
self._init_attributes()
self._init_constants()
def _init_attributes(self):
# print 'StrategyMixin._init_attributes'
pass
def _init_constants(self):
#virtualpop = self.get_virtualpop()
#stagetables = virtualpop.get_stagetables()
#self._walkstages = stagetables.get_stagetable('walks')
#self._ridestages = stagetables.get_stagetable('rides')
#self._activitystages = stagetables.get_stagetable('activities')
#self._plans = virtualpop.get_plans()
#
# print 'AutoStrategy._init_constants'
# print dir(self)
# self.get_attrsman().do_not_save_attrs(['_activitystages','_ridestages','_walkstages','_plans'])
modes = self.get_virtualpop().get_scenario().net.modes
self._id_mode_bike = modes.get_id_mode('bicycle')
self._id_mode_auto = modes.get_id_mode('passenger')
self._id_mode_moto = modes.get_id_mode('motorcycle')
self._id_mode_bus = modes.get_id_mode('bus')
self.get_attrsman().do_not_save_attrs([
'_id_mode_bike', '_id_mode_auto', '_id_mode_moto', '_id_mode_bus'
])
def preevaluate(self, ids_person):
"""
Preevaluation strategies for person IDs in vector ids_person.
Returns a preevaluation vector with a preevaluation value
for each person ID. The values of the preevaluation vector are as follows:
-1 : Strategy cannot be applied
0 : Stategy can be applied, but the preferred mode is not used
1 : Stategy can be applied, and preferred mode is part of the strategy
2 : Strategy uses predomunantly preferred mode
"""
n_pers = len(ids_person)
persons = self.get_virtualpop()
preeval = np.zeros(n_pers, dtype=np.int32)
# TODO: here we could exclude by age or distance facilities-stops
# put 0 for persons whose preference is not public transport
preeval[persons.ids_mode_preferred[ids_person] != self._id_mode_bus] = 0
# put 2 for persons with car access and who prefer cars
preeval[persons.ids_mode_preferred[ids_person] == self._id_mode_bus] = 2
print ' TransitStrategy.preevaluate', len(np.flatnonzero(preeval))
return preeval
def plan(self, ids_person, logger=None):
"""
Generates a plan for these person according to this strategie.
Overriden by specific strategy.
"""
print 'TransitStrategy.pan', len(ids_person)
#make_plans_private(self, ids_person = None, mode = 'passenger')
# routing necessary?
virtualpop = self.get_virtualpop()
plans = virtualpop.get_plans() # self._plans
demand = virtualpop.get_demand()
ptlines = demand.ptlines
walkstages = plans.get_stagetable('walks')
transitstages = plans.get_stagetable('transits')
activitystages = plans.get_stagetable('activities')
activities = virtualpop.get_activities()
activitytypes = demand.activitytypes
landuse = virtualpop.get_landuse()
facilities = landuse.facilities
parking = landuse.parking
scenario = virtualpop.get_scenario()
net = scenario.net
edges = net.edges
lanes = net.lanes
modes = net.modes
ptstops = net.ptstops
# print ' demand',demand
# print ' demand.ptlines',demand.ptlines,dir(demand.ptlines)
# print ' demand.ptlines.get_ptlinks()',demand.ptlines.get_ptlinks()
# print ' demand.virtualpop',demand.virtualpop,dir(demand.virtualpop)
# print ' demand.trips',demand.trips,dir(demand.trips)
if len(ptlines) == 0:
print 'WARNING in TrasitStrategy.plan: no transit services available.'
return False
ptlinks = ptlines.get_ptlinks()
ptlinktypes = ptlinks.types.choices
type_enter = ptlinktypes['enter']
type_transit = ptlinktypes['transit']
type_board = ptlinktypes['board']
type_alight = ptlinktypes['alight']
type_transfer = ptlinktypes['transfer']
type_walk = ptlinktypes['walk']
type_exit = ptlinktypes['exit']
ptfstar = ptlinks.get_fstar()
pttimes = ptlinks.get_times()
stops_to_enter, stops_to_exit = ptlinks.get_stops_to_enter_exit()
ids_stoplane = ptstops.ids_lane
ids_laneedge = net.lanes.ids_edge
times_est_plan = plans.times_est
# here we can determine edge weights for different modes
# this could be centralized to avoid redundance
plans.prepare_stagetables(['walks', 'transits', 'activities'])
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(0, ids_person=ids_person)
if len(ids_person_act) == 0:
print 'WARNING in TrasitStrategy.plan: no eligible persons found.'
return False
# temporary maps from ids_person to other parameters
nm = np.max(ids_person_act)+1
map_ids_plan = np.zeros(nm, dtype=np.int32)
#ids_plan_act = virtualpop.add_plans(ids_person_act, id_strategy = self.get_id_strategy())
map_ids_plan[ids_person_act] = virtualpop.add_plans(ids_person_act, id_strategy=self.get_id_strategy())
map_times = np.zeros(nm, dtype=np.int32)
map_times[ids_person_act] = activities.get_times_end(ids_act_from, pdf='unit')
# set start time to plans (important!)
plans.times_begin[map_ids_plan[ids_person_act]] = map_times[ids_person_act]
map_ids_fac_from = np.zeros(nm, dtype=np.int32)
map_ids_fac_from[ids_person_act] = activities.ids_facility[ids_act_from]
n_plans = len(ids_person_act)
print 'TrasitStrategy.plan n_plans=', n_plans
# make initial activity stage
ids_edge_from = facilities.ids_roadedge_closest[map_ids_fac_from[ids_person_act]]
poss_edge_from = facilities.positions_roadedge_closest[map_ids_fac_from[ids_person_act]]
# this is the time when first activity starts
# first activity is normally not simulated
names_acttype_from = activitytypes.names[activities.ids_activitytype[ids_act_from]]
durations_act_from = activities.get_durations(ids_act_from)
times_from = map_times[ids_person_act]-durations_act_from
#times_from = activities.get_times_end(ids_act_from, pdf = 'unit')
for id_plan,\
time,\
id_act_from,\
name_acttype_from,\
duration_act_from,\
id_edge_from,\
pos_edge_from \
in zip(map_ids_plan[ids_person_act],
times_from,
ids_act_from,
names_acttype_from,
durations_act_from,
ids_edge_from,
poss_edge_from):
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_from,
names_activitytype=name_acttype_from,
durations=duration_act_from,
ids_lane=edges.ids_lanes[id_edge_from][0],
positions=pos_edge_from,
)
##
ind_act = 0
# main loop while there are persons performing
# an activity at index ind_act
while len(ids_person_act) > 0:
ids_plan = map_ids_plan[ids_person_act]
times_from = map_times[ids_person_act]
names_acttype_to = activitytypes.names[activities.ids_activitytype[ids_act_to]]
durations_act_to = activities.get_durations(ids_act_to)
ids_fac_from = map_ids_fac_from[ids_person_act]
ids_fac_to = activities.ids_facility[ids_act_to]
centroids_from = facilities.centroids[ids_fac_from]
centroids_to = facilities.centroids[ids_fac_to]
# origin edge and position
ids_edge_from = facilities.ids_roadedge_closest[ids_fac_from]
poss_edge_from = facilities.positions_roadedge_closest[ids_fac_from]
# destination edge and position
ids_edge_to = facilities.ids_roadedge_closest[ids_fac_to]
poss_edge_to = facilities.positions_roadedge_closest[ids_fac_to]
ids_stop_from = ptstops.get_closest(centroids_from)
ids_stop_to = ptstops.get_closest(centroids_to)
ids_stopedge_from = ids_laneedge[ids_stoplane[ids_stop_from]]
ids_stopedge_to = ids_laneedge[ids_stoplane[ids_stop_to]]
# do random pos here
poss_stop_from = 0.5*(ptstops.positions_from[ids_stop_from]
+ ptstops.positions_to[ids_stop_from])
poss_stop_to = 0.5*(ptstops.positions_from[ids_stop_to]
+ ptstops.positions_to[ids_stop_to])
i = 0.0
for id_person, id_plan, time_from, id_act_from, id_act_to, name_acttype_to, duration_act_to, id_edge_from, pos_edge_from, id_edge_to, pos_edge_to, id_stop_from, id_stopedge_from, pos_stop_from, id_stop_to, id_stopedge_to, pos_stop_to\
in zip(ids_person_act, ids_plan, times_from, ids_act_from, ids_act_to, names_acttype_to, durations_act_to, ids_edge_from, poss_edge_from, ids_edge_to, poss_edge_to, ids_stop_from, ids_stopedge_from, poss_stop_from, ids_stop_to, ids_stopedge_to, poss_stop_to):
n_pers = len(ids_person_act)
if logger:
logger.progress(i/n_pers*100)
i += 1.0
print 79*'_'
print ' id_plan=%d, id_person=%d, ' % (id_plan, id_person)
id_stage_walk1, time = walkstages.append_stage(id_plan, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_stopedge_from,
position_edge_to=pos_stop_from, # -7.0,
)
# print ' id_stopedge_from',id_stopedge_from
# print ' pos_stop_from',pos_stop_from
# print
# print ' id_stopedge_to',id_stopedge_to
# print ' pos_stop_to',pos_stop_to
# print
# print ' id_stop_from',id_stop_from
# print ' id_stop_to',id_stop_to
durations, linktypes, ids_line, ids_fromstop, ids_tostop =\
ptlinks.route(id_stop_from, id_stop_to,
fstar=ptfstar, times=pttimes,
stops_to_enter=stops_to_enter,
stops_to_exit=stops_to_exit)
# print ' routing done. make plan..'
if len(linktypes) > 0:
if linktypes[-1] == type_walk: # is last stage a walk?
# remove it, because will go directly to destination
linktypes = linktypes[:-1]
ids_line = ids_line[:-1]
durations = durations[:-1]
ids_fromstop = ids_fromstop[:-1]
ids_tostop = ids_tostop[:-1]
# print ' ids_line ',ids_line
# print ' ids_fromstop',ids_fromstop
# print ' ids_tostop ',ids_tostop
if len(linktypes) > 0: # is there any public transport line to take?
# go though PT links and generate transits and walks to trasfer
ids_stopedge_from = ids_laneedge[ids_stoplane[ids_fromstop]]
ids_stopedge_to = ids_laneedge[ids_stoplane[ids_tostop]]
poss_stop_from = 0.5*(ptstops.positions_from[ids_fromstop]
+ ptstops.positions_to[ids_fromstop])
poss_stop_to = 0.5*(ptstops.positions_from[ids_tostop]
+ ptstops.positions_to[ids_tostop])
# this is wait time buffer to be added to the successive stage
# as waiting is currently not modelled as an extra stage
duration_wait = 0.0
# create stages for PT
for linktype, id_line, duration,\
id_stopedge_from, pos_fromstop,\
id_stopedge_to, pos_tostop in\
zip(linktypes,
ids_line,
durations,
ids_stopedge_from, poss_stop_from,
ids_stopedge_to, poss_stop_to,
):
print ' stage for linktype %2d fromedge %s toedge %s' % (
linktype, edges.ids_sumo[id_stopedge_from], edges.ids_sumo[id_stopedge_to])
print ' id_stopedge_from,id_stopedge_to', id_stopedge_from, id_stopedge_to
if linktype == type_transit: # transit!
print ' add transit'
id_stage_transit, time = transitstages.append_stage(
id_plan, time,
id_line=id_line,
duration=duration+duration_wait,
id_fromedge=id_stopedge_from,
id_toedge=id_stopedge_to,
)
duration_wait = 0.0
elif linktype == type_walk: # walk to transfer
print ' add transfer'
id_stage_transfer, time = walkstages.append_stage(
id_plan, time,
id_edge_from=id_stopedge_from,
position_edge_from=pos_fromstop,
id_edge_to=id_stopedge_to,
position_edge_to=pos_tostop,
duration=duration+duration_wait,
)
duration_wait = 0.0
else: # all other link time are no modelld
# do not do anything , just add wait time to next stage
print ' add duration', duration
duration_wait += duration
# walk from final stop to activity
# print ' Stage for linktype %2d fromedge %s toedge %s'%(linktype, edges.ids_sumo[id_stopedge_to],edges.ids_sumo[id_edge_to] )
id_stage_walk2, time = walkstages.append_stage(id_plan, time,
id_edge_from=id_stopedge_to,
position_edge_from=pos_tostop,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
else:
# there is no public transport line linking these nodes.
# Modify walk directly from home to activity
time = walkstages.modify_stage(id_stage_walk1, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
# update time for trips estimation for this plan
plans.times_est[id_plan] += time-time_from
# define current end time without last activity duration
plans.times_end[id_plan] = time
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_to,
names_activitytype=name_acttype_to,
durations=duration_act_to,
ids_lane=edges.ids_lanes[id_edge_to][0],
positions=pos_edge_to,
)
# store time for next iteration in case other activities are
# following
map_times[id_person] = time
# select persons and activities for next setp
ind_act += 1
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(ind_act, ids_person=ids_person_act)
class WalkStrategy(StrategyMixin):
def __init__(self, ident, parent=None,
name='Walk Strategy',
info='With this strategy, the person walks to all destinations.',
**kwargs):
self._init_objman(ident, parent, name=name, info=info, **kwargs)
attrsman = self.set_attrsman(cm.Attrsman(self))
# specific init
self._init_attributes()
self._init_constants()
def _init_attributes(self):
# print 'StrategyMixin._init_attributes'
pass
def _init_constants(self):
#virtualpop = self.get_virtualpop()
#stagetables = virtualpop.get_stagetables()
#self._walkstages = stagetables.get_stagetable('walks')
#self._ridestages = stagetables.get_stagetable('rides')
#self._activitystages = stagetables.get_stagetable('activities')
#self._plans = virtualpop.get_plans()
#
# print 'AutoStrategy._init_constants'
# print dir(self)
# self.get_attrsman().do_not_save_attrs(['_activitystages','_ridestages','_walkstages','_plans'])
modes = self.get_virtualpop().get_scenario().net.modes
self._id_mode_bike = modes.get_id_mode('bicycle')
self._id_mode_auto = modes.get_id_mode('passenger')
self._id_mode_moto = modes.get_id_mode('motorcycle')
self._id_mode_bus = modes.get_id_mode('bus')
self._id_mode_ped = modes.get_id_mode('pedestrian')
self.get_attrsman().do_not_save_attrs([
'_id_mode_bike', '_id_mode_auto', '_id_mode_moto',
'_id_mode_bus', '_id_mode_ped',
])
def preevaluate(self, ids_person):
"""
Preevaluation strategies for person IDs in vector ids_person.
Returns a preevaluation vector with a preevaluation value
for each person ID. The values of the preevaluation vector are as follows:
-1 : Strategy cannot be applied
0 : Stategy can be applied, but the preferred mode is not used
1 : Stategy can be applied, and preferred mode is part of the strategy
2 : Strategy uses predomunantly preferred mode
"""
n_pers = len(ids_person)
persons = self.get_virtualpop()
preeval = np.zeros(n_pers, dtype=np.int32)
# TODO: here we could exclude by age or distance facilities-stops
# put 0 for persons whose preference is not public transport
preeval[persons.ids_mode_preferred[ids_person] != self._id_mode_ped] = 0
# put 2 for persons with car access and who prefer cars
preeval[persons.ids_mode_preferred[ids_person] == self._id_mode_ped] = 2
print ' WalkStrategy.preevaluate', len(np.flatnonzero(preeval))
return preeval
def plan(self, ids_person, logger=None):
"""
Generates a plan for these person according to this strategie.
Overriden by specific strategy.
"""
print 'WalkStrategy.pan', len(ids_person)
#make_plans_private(self, ids_person = None, mode = 'passenger')
# routing necessary?
virtualpop = self.get_virtualpop()
plans = virtualpop.get_plans() # self._plans
demand = virtualpop.get_demand()
#ptlines = demand.ptlines
walkstages = plans.get_stagetable('walks')
#transitstages = plans.get_stagetable('transits')
activitystages = plans.get_stagetable('activities')
activities = virtualpop.get_activities()
activitytypes = demand.activitytypes
landuse = virtualpop.get_landuse()
facilities = landuse.facilities
#parking = landuse.parking
scenario = virtualpop.get_scenario()
net = scenario.net
edges = net.edges
lanes = net.lanes
modes = net.modes
#ptstops = net.ptstops
ids_laneedge = net.lanes.ids_edge
times_est_plan = plans.times_est
# here we can determine edge weights for different modes
# this could be centralized to avoid redundance
plans.prepare_stagetables(['walks', 'activities'])
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(0, ids_person=ids_person)
if len(ids_person_act) == 0:
print 'WARNING in WalkStrategy.plan: no eligible persons found.'
return False
# temporary maps from ids_person to other parameters
nm = np.max(ids_person_act)+1
map_ids_plan = np.zeros(nm, dtype=np.int32)
#ids_plan_act = virtualpop.add_plans(ids_person_act, id_strategy = self.get_id_strategy())
map_ids_plan[ids_person_act] = virtualpop.add_plans(ids_person_act, id_strategy=self.get_id_strategy())
map_times = np.zeros(nm, dtype=np.int32)
map_times[ids_person_act] = activities.get_times_end(ids_act_from, pdf='unit')
# set start time to plans (important!)
plans.times_begin[map_ids_plan[ids_person_act]] = map_times[ids_person_act]
map_ids_fac_from = np.zeros(nm, dtype=np.int32)
map_ids_fac_from[ids_person_act] = activities.ids_facility[ids_act_from]
n_plans = len(ids_person_act)
print 'TrasitStrategy.plan n_plans=', n_plans
# make initial activity stage
ids_edge_from = facilities.ids_roadedge_closest[map_ids_fac_from[ids_person_act]]
poss_edge_from = facilities.positions_roadedge_closest[map_ids_fac_from[ids_person_act]]
# this is the time when first activity starts
# first activity is normally not simulated
names_acttype_from = activitytypes.names[activities.ids_activitytype[ids_act_from]]
durations_act_from = activities.get_durations(ids_act_from)
times_from = map_times[ids_person_act]-durations_act_from
#times_from = activities.get_times_end(ids_act_from, pdf = 'unit')
for id_plan,\
time,\
id_act_from,\
name_acttype_from,\
duration_act_from,\
id_edge_from,\
pos_edge_from \
in zip(map_ids_plan[ids_person_act],
times_from,
ids_act_from,
names_acttype_from,
durations_act_from,
ids_edge_from,
poss_edge_from):
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_from,
names_activitytype=name_acttype_from,
durations=duration_act_from,
ids_lane=edges.ids_lanes[id_edge_from][0],
positions=pos_edge_from,
)
##
ind_act = 0
# main loop while there are persons performing
# an activity at index ind_act
while len(ids_person_act) > 0:
ids_plan = map_ids_plan[ids_person_act]
times_from = map_times[ids_person_act]
names_acttype_to = activitytypes.names[activities.ids_activitytype[ids_act_to]]
durations_act_to = activities.get_durations(ids_act_to)
ids_fac_from = map_ids_fac_from[ids_person_act]
ids_fac_to = activities.ids_facility[ids_act_to]
centroids_from = facilities.centroids[ids_fac_from]
centroids_to = facilities.centroids[ids_fac_to]
# origin edge and position
ids_edge_from = facilities.ids_roadedge_closest[ids_fac_from]
poss_edge_from = facilities.positions_roadedge_closest[ids_fac_from]
# destination edge and position
ids_edge_to = facilities.ids_roadedge_closest[ids_fac_to]
poss_edge_to = facilities.positions_roadedge_closest[ids_fac_to]
#ids_stop_from = ptstops.get_closest(centroids_from)
#ids_stop_to = ptstops.get_closest(centroids_to)
#ids_stopedge_from = ids_laneedge[ids_stoplane[ids_stop_from]]
#ids_stopedge_to = ids_laneedge[ids_stoplane[ids_stop_to]]
# do random pos here
# poss_stop_from = 0.5*( ptstops.positions_from[ids_stop_from]\
# +ptstops.positions_to[ids_stop_from])
# poss_stop_to = 0.5*( ptstops.positions_from[ids_stop_to]\
# +ptstops.positions_to[ids_stop_to])
i = 0.0
for id_person, id_plan, time_from, id_act_from, id_act_to, name_acttype_to, duration_act_to, id_edge_from, pos_edge_from, id_edge_to, pos_edge_to, \
in zip(ids_person_act, ids_plan, times_from, ids_act_from, ids_act_to, names_acttype_to, durations_act_to, ids_edge_from, poss_edge_from, ids_edge_to, poss_edge_to):
n_pers = len(ids_person_act)
if logger:
logger.progress(i/n_pers*100)
i += 1.0
print 79*'_'
print ' id_plan=%d, id_person=%d, ' % (id_plan, id_person)
id_stage_walk1, time = walkstages.append_stage(id_plan, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to, # -7.0,
)
# update time for trips estimation for this plan
plans.times_est[id_plan] += time-time_from
# define current end time without last activity duration
plans.times_end[id_plan] = time
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_to,
names_activitytype=name_acttype_to,
durations=duration_act_to,
ids_lane=edges.ids_lanes[id_edge_to][0],
positions=pos_edge_to,
)
# store time for next iteration in case other activities are
# following
map_times[id_person] = time
# select persons and activities for next setp
ind_act += 1
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(ind_act, ids_person=ids_person_act)
class AutoStrategy(StrategyMixin):
def __init__(self, ident, parent=None,
name='Auto strategy',
info='With this strategy, the person uses his private auto as main transport mode.',
**kwargs):
self._init_objman(ident, parent, name=name, info=info, **kwargs)
attrsman = self.set_attrsman(cm.Attrsman(self))
# specific init
self._init_attributes()
self._init_constants()
def _init_attributes(self):
# print 'StrategyMixin._init_attributes'
pass
def _init_constants(self):
#virtualpop = self.get_virtualpop()
#stagetables = virtualpop.get_stagetables()
#self._walkstages = stagetables.get_stagetable('walks')
#self._ridestages = stagetables.get_stagetable('rides')
#self._activitystages = stagetables.get_stagetable('activities')
#self._plans = virtualpop.get_plans()
#
# print 'AutoStrategy._init_constants'
# print dir(self)
# self.get_attrsman().do_not_save_attrs(['_activitystages','_ridestages','_walkstages','_plans'])
modes = self.get_virtualpop().get_scenario().net.modes
self._id_mode_bike = modes.get_id_mode('bicycle')
self._id_mode_auto = modes.get_id_mode('passenger')
self._id_mode_moto = modes.get_id_mode('motorcycle')
self.get_attrsman().do_not_save_attrs([
'_id_mode_bike', '_id_mode_auto', '_id_mode_moto',
])
def preevaluate(self, ids_person):
"""
Preevaluation strategies for person IDs in vector ids_person.
Returns a preevaluation vector with a preevaluation value
for each person ID. The values of the preevaluation vector are as follows:
-1 : Strategy cannot be applied
0 : Stategy can be applied, but the preferred mode is not used
1 : Stategy can be applied, and preferred mode is part of the strategy
2 : Strategy uses predomunantly preferred mode
"""
n_pers = len(ids_person)
print 'Autostrategy.preevaluate', n_pers, 'persons'
persons = self.get_virtualpop()
preeval = np.zeros(n_pers, dtype=np.int32)
# put -1 for persons without car access
preeval[persons.ids_iauto[ids_person] == -1] = -1
print ' persons having no auto', len(np.flatnonzero(persons.ids_iauto[ids_person] == -1))
# put 0 for persons with car but with a different preferred mode
preeval[(persons.ids_iauto[ids_person] > -1)
& (persons.ids_mode_preferred[ids_person] != self._id_mode_auto)] = 0
print ' persons with car but with a different preferred mode', len(np.flatnonzero(
(persons.ids_iauto[ids_person] > -1) & (persons.ids_mode_preferred[ids_person] != self._id_mode_auto)))
# put 2 for persons with car access and who prefer the car
preeval[(persons.ids_iauto[ids_person] > -1)
& (persons.ids_mode_preferred[ids_person] == self._id_mode_auto)] = 2
print ' persons with car access and who prefer the car', len(np.flatnonzero(
(persons.ids_iauto[ids_person] > -1) & (persons.ids_mode_preferred[ids_person] == self._id_mode_auto)))
return preeval
# def are_feasible(self, ids_person):
# """
# Returns a bool vector, with True values for
# persons where this strategy can be applied.
# """
# persons = self.get_virtualpop()
#
# # check if person has a car
# # one may also check if there is parking available
# # at all desinations
# return persons.ids_iautos[ids_person] >= 0
def plan(self, ids_person, logger=None):
"""
Generates a plan for these person according to this strategie.
Overriden by specific strategy.
"""
#make_plans_private(self, ids_person = None, mode = 'passenger')
# routing necessary?
virtualpop = self.get_virtualpop()
plans = virtualpop.get_plans() # self._plans
walkstages = plans.get_stagetable('walks')
ridestages = plans.get_stagetable('autorides')
activitystages = plans.get_stagetable('activities')
activities = virtualpop.get_activities()
activitytypes = virtualpop.get_demand().activitytypes
landuse = virtualpop.get_landuse()
facilities = landuse.facilities
parking = landuse.parking
scenario = virtualpop.get_scenario()
edges = scenario.net.edges
lanes = scenario.net.lanes
modes = scenario.net.modes
#times_est_plan = plans.times_est
# here we can determine edge weights for different modes
plans.prepare_stagetables(['walks', 'autorides', 'activities'])
# get initial travel times for persons.
# initial travel times depend on the initial activity
landuse.parking.clear_booking()
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(0, ids_person=ids_person)
if len(ids_person_act) == 0:
print 'WARNING in Autostrategy.plan: no eligible persons found.'
return False
# ok
# temporary maps from ids_person to other parameters
nm = np.max(ids_person_act)+1
map_ids_plan = np.zeros(nm, dtype=np.int32)
#ids_plan_act = virtualpop.add_plans(ids_person_act, id_strategy = self.get_id_strategy())
map_ids_plan[ids_person_act] = virtualpop.add_plans(ids_person_act, id_strategy=self.get_id_strategy())
# err
map_times = np.zeros(nm, dtype=np.int32)
map_times[ids_person_act] = activities.get_times_end(ids_act_from, pdf='unit')
# set start time to plans (important!)
plans.times_begin[map_ids_plan[ids_person_act]] = map_times[ids_person_act]
map_ids_fac_from = np.zeros(nm, dtype=np.int32)
map_ids_fac_from[ids_person_act] = activities.ids_facility[ids_act_from]
# err
map_ids_parking_from = np.zeros(nm, dtype=np.int32)
ids_parking_from, inds_vehparking = parking.get_closest_parkings(virtualpop.ids_iauto[ids_person_act],
facilities.centroids[activities.ids_facility[ids_act_from]])
if len(ids_parking_from) == 0:
return False
# err
map_ids_parking_from[ids_person_act] = ids_parking_from
n_plans = len(ids_person_act)
print 'AutoStrategy.plan n_plans=', n_plans
# print ' map_ids_parking_from[ids_person_act].shape',map_ids_parking_from[ids_person_act].shape
# set initial activity
# this is because the following steps start with travel
# and set the next activity
#names_acttype_from = activitytypes.names[activities.ids_activitytype[ids_act_from]]
# for id_plan
ind_act = 0
# make initial activity stage
ids_edge_from = facilities.ids_roadedge_closest[map_ids_fac_from[ids_person_act]]
poss_edge_from = facilities.positions_roadedge_closest[map_ids_fac_from[ids_person_act]]
# this is the time when first activity starts
# first activity is normally not simulated
names_acttype_from = activitytypes.names[activities.ids_activitytype[ids_act_from]]
durations_act_from = activities.get_durations(ids_act_from)
times_from = map_times[ids_person_act]-durations_act_from
#times_from = activities.get_times_end(ids_act_from, pdf = 'unit')
for id_plan,\
time,\
id_act_from,\
name_acttype_from,\
duration_act_from,\
id_edge_from,\
pos_edge_from \
in zip(map_ids_plan[ids_person_act],
times_from,
ids_act_from,
names_acttype_from,
durations_act_from,
ids_edge_from,
poss_edge_from):
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_from,
names_activitytype=name_acttype_from,
durations=duration_act_from,
ids_lane=edges.ids_lanes[id_edge_from][0],
positions=pos_edge_from,
)
# main loop while there are persons performing
# an activity at index ind_act
while len(ids_person_act) > 0:
ids_plan = map_ids_plan[ids_person_act]
ids_veh = virtualpop.ids_iauto[ids_person_act]
#inds_pers = virtualpop.get_inds(ids_person)
# self.persons.cols.mode_preferred[inds_pers]='private'
times_from = map_times[ids_person_act]
names_acttype_to = activitytypes.names[activities.ids_activitytype[ids_act_to]]
durations_act_to = activities.get_durations(ids_act_to)
ids_fac_from = map_ids_fac_from[ids_person_act]
ids_fac_to = activities.ids_facility[ids_act_to]
centroids_to = facilities.centroids[ids_fac_to]
# origin edge and position
ids_edge_from = facilities.ids_roadedge_closest[ids_fac_from]
poss_edge_from = facilities.positions_roadedge_closest[ids_fac_from]
# this method will find and occupy parking space
ids_parking_from = map_ids_parking_from[ids_person_act]
# print ' ids_veh.shape',ids_veh.shape
# print ' centroids_to.shape',centroids_to.shape
ids_parking_to, inds_vehparking = parking.get_closest_parkings(ids_veh, centroids_to)
ids_lane_parking_from = parking.ids_lane[ids_parking_from]
ids_edge_parking_from = lanes.ids_edge[ids_lane_parking_from]
poss_edge_parking_from = parking.positions[ids_parking_from]
# print ' ids_parking_to.shape',ids_parking_to.shape
# print ' np.max(parking.get_ids()), np.max(ids_parking_to)',np.max(parking.get_ids()), np.max(ids_parking_to)
ids_lane_parking_to = parking.ids_lane[ids_parking_to]
ids_edge_parking_to = lanes.ids_edge[ids_lane_parking_to]
poss_edge_parking_to = parking.positions[ids_parking_to]
# destination edge and position
ids_edge_to = facilities.ids_roadedge_closest[ids_fac_to]
poss_edge_to = facilities.positions_roadedge_closest[ids_fac_to]
i = 0.0
n_pers = len(ids_person_act)
for id_person, id_plan, time_from, id_act_from, id_act_to, name_acttype_to, duration_act_to, id_veh, id_edge_from, pos_edge_from, id_edge_parking_from, pos_edge_parking_from, id_parking_from, id_parking_to, id_edge_parking_to, pos_edge_parking_to, id_edge_to, pos_edge_to\
in zip(ids_person_act, ids_plan, times_from, ids_act_from, ids_act_to, names_acttype_to, durations_act_to, ids_veh, ids_edge_from, poss_edge_from, ids_edge_parking_from, poss_edge_parking_from, ids_parking_from, ids_parking_to, ids_edge_parking_to, poss_edge_parking_to, ids_edge_to, poss_edge_to):
if logger:
logger.progress(i/n_pers*100)
i += 1.0
#plans.set_row(id_plan, ids_person = id_person, ids_strategy = self.get_id_strategy())
#times_est_plan[id_plan] = time-time_start
# map_times[id_person] = self.plan_activity(\
# id_person, id_plan, time_from,
# id_act_from, id_act_to,
# name_acttype_to, duration_act_to,
# id_veh,
# id_edge_from, pos_edge_from,
# id_parking_from, id_edge_parking_from, pos_edge_parking_from,
# id_parking_to, id_edge_parking_to, pos_edge_parking_to,
# id_edge_to, pos_edge_to, edges.ids_lanes[id_edge_to][0])
# start creating stages for activity
id_stage_walk1, time = walkstages.append_stage(
id_plan, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_parking_from,
position_edge_to=pos_edge_parking_from-1.5, # wait 1.5 m before nose of parked car
)
# ride from car parking to road edge near activity
id_stage_car, time = ridestages.append_stage(
id_plan, time,
id_veh=id_veh,
# delay to be sure that person arrived!(workaround in combination with parking=False)
time_init=time+30, # time_from,
id_parking_from=id_parking_from,
id_parking_to=id_parking_to,
# TODO: here we could use id_edge_to as via edge to emulate search for parking
)
if id_stage_car >= 0:
# print ' car ride successful'
id_stage_walk2, time = walkstages.append_stage(
id_plan, time,
id_edge_from=id_edge_parking_to,
position_edge_from=pos_edge_parking_to-1.5, # ecessary?
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
else:
# print ' parking not connected or distance too short, modify first walk and go directly to activity'
# print ' id_stage_walk1',id_stage_walk1,type(id_stage_walk1)
# print ' id_edge_from',id_edge_from
# print ' position_edge_from',position_edge_from
# print ' id_edge_to',id_edge_to
# print ' position_edge_to',position_edge_to
time = walkstages.modify_stage(
id_stage_walk1, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
# store time estimation for this plan
# note that these are the travel times, no activity time
plans.times_est[id_plan] += time-time_from
# define current end time without last activity duration
plans.times_end[id_plan] = time
# finally add activity and respective duration
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_to,
names_activitytype=name_acttype_to,
durations=duration_act_to,
ids_lane=edges.ids_lanes[id_edge_to][0],
positions=pos_edge_to,
)
map_times[id_person] = time
# return time
##
# select persons and activities for next setp
ind_act += 1
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(ind_act, ids_person=ids_person_act)
# update timing with (random) activity duration!!
return True
def plan_activity(self, id_person, id_plan, time_start,
id_act_from, id_act_to,
name_acttype_to, duration_act_to,
id_veh,
id_edge_from, pos_edge_from,
id_parking_from, id_edge_parking_from, pos_edge_parking_from,
id_parking_to, id_edge_parking_to, pos_edge_parking_to,
id_edge_to, pos_edge_to, id_lane_to):
print 79*'_'
print ' id_plan=%d, id_person=%d, ids_veh=%d' % (id_plan, id_person, id_veh)
plans = self.get_virtualpop().get_plans()
#stagetables = virtualpop.get_stagetables()
walkstages = plans.get_stagetable('walks')
ridestages = plans.get_stagetable('autorides')
activitystages = plans.get_stagetable('activities')
id_stage_walk1, time = walkstages.append_stage(
id_plan, time_start,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_parking_from,
position_edge_to=pos_edge_parking_from-1.5, # wait 1.5 m before nose of parked car
)
# ride from car parking to road edge near activity
id_stage_car, time = ridestages.append_stage(
id_plan, time,
id_veh=id_veh,
# delay to be sure that person arrived!(workaround in combination with parking=False)
time_init=time+30, # time_start,
id_parking_from=id_parking_from,
id_parking_to=id_parking_to,
# TODO: here we could use id_edge_to as via edge to emulate search for parking
)
if id_stage_car >= 0:
# print ' car ride successful'
id_stage_walk2, time = walkstages.append_stage(
id_plan, time,
id_edge_from=id_edge_parking_to,
position_edge_from=pos_edge_parking_to-1.5, # ecessary?
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
else:
# print ' parking not connected or distance too short, modify first walk and go directly to activity'
# print ' id_stage_walk1',id_stage_walk1,type(id_stage_walk1)
# print ' id_edge_from',id_edge_from
# print ' position_edge_from',position_edge_from
# print ' id_edge_to',id_edge_to
# print ' position_edge_to',position_edge_to
time = walkstages.modify_stage(
id_stage_walk1, time_start,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
# store time estimation for this plan
# note that these are the travel times, no activity time
plans.times_est[id_plan] += time-time_start
# define current end time without last activity duration
plans.times_end[id_plan] = time
# finally add activity and respective duration
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_to,
names_activitytype=name_acttype_to,
durations=duration_act_to,
ids_lane=id_lane_to,
positions=pos_edge_to,
)
return time
class BikeStrategy(StrategyMixin):
def __init__(self, ident, parent=None,
name='Bike strategy',
info='With this strategy, the person uses his private bike as main transport mode.',
**kwargs):
self._init_objman(ident, parent, name=name, info=info, **kwargs)
attrsman = self.set_attrsman(cm.Attrsman(self))
# specific init
self._init_attributes(**kwargs)
self._init_constants()
def _init_attributes(self, **kwargs):
# print 'StrategyMixin._init_attributes'
attrsman = self.get_attrsman()
self._init_attributes_strategy(**kwargs)
self.n_iter_bikeacces_max = attrsman.add(cm.AttrConf('n_iter_bikeacces_max', kwargs.get('n_iter_bikeacces_max', 5),
groupnames=['options'],
perm='rw',
name='Max. bike access search iterations',
info='Max. number of iterations while searching an edge with bike access.',
))
self.length_edge_min = attrsman.add(cm.AttrConf('length_edge_min', kwargs.get('length_edge_min', 20.0),
groupnames=['options'],
perm='rw',
name='Min. edge length search',
unit='m',
info='Min. edge length when searching an edge with bike access.',
))
def _init_constants(self):
#virtualpop = self.get_virtualpop()
#stagetables = virtualpop.get_stagetables()
#self._walkstages = stagetables.get_stagetable('walks')
#self._ridestages = stagetables.get_stagetable('rides')
#self._activitystages = stagetables.get_stagetable('activities')
#self._plans = virtualpop.get_plans()
#
# print 'AutoStrategy._init_constants'
# print dir(self)
# self.get_attrsman().do_not_save_attrs(['_activitystages','_ridestages','_walkstages','_plans'])
modes = self.get_virtualpop().get_scenario().net.modes
self._id_mode_ped = modes.get_id_mode('pedestrian')
self._id_mode_bike = modes.get_id_mode('bicycle')
self._id_mode_auto = modes.get_id_mode('passenger')
self._id_mode_moto = modes.get_id_mode('motorcycle')
self._edges = self.get_virtualpop().get_scenario().net.edges
self.get_attrsman().do_not_save_attrs([
'_id_mode_bike', '_id_mode_auto', '_id_mode_moto',
'_id_mode_ped',
'_edges'])
def preevaluate(self, ids_person):
"""
Preevaluation strategies for person IDs in vector ids_person.
Returns a preevaluation vector with a preevaluation value
for each person ID. The values of the preevaluation vector are as follows:
-1 : Strategy cannot be applied
0 : Stategy can be applied, but the preferred mode is not used
1 : Stategy can be applied, and preferred mode is part of the strategy
2 : Strategy uses predomunantly preferred mode
"""
n_pers = len(ids_person)
print 'BikeStrategy.preevaluate', n_pers, 'persons'
persons = self.get_virtualpop()
preeval = np.zeros(n_pers, dtype=np.int32)
# put -1 for persons without car access
preeval[persons.ids_ibike[ids_person] == -1] = -1
print ' persons having no bike', len(np.flatnonzero(persons.ids_ibike[ids_person] == -1))
# put 0 for persons with bike but with a different preferred mode
preeval[(persons.ids_ibike[ids_person] > -1)
& (persons.ids_mode_preferred[ids_person] != self._id_mode_bike)] = 0
print ' persons with bike but with a different preferred mode', len(np.flatnonzero(
(persons.ids_ibike[ids_person] > -1) & (persons.ids_mode_preferred[ids_person] != self._id_mode_bike)))
# put 2 for persons with bike access and who prefer the bikr
preeval[(persons.ids_ibike[ids_person] > -1)
& (persons.ids_mode_preferred[ids_person] == self._id_mode_auto)] = 2
print ' persons with car access and who prefer the car', len(np.flatnonzero(
(persons.ids_ibike[ids_person] > -1) & (persons.ids_mode_preferred[ids_person] == self._id_mode_bike)))
return preeval
def get_edge_bikeaccess(self, id_edge, is_search_backward=False):
# print 'get_edge_bikeaccess',id_edge, is_search_backward,'id_sumo',self._edges.ids_sumo[id_edge]
id_mode = self._id_mode_bike
id_mode_ped = self._id_mode_ped
get_accesslevel = self._edges.get_accesslevel
if is_search_backward:
get_next = self._edges.get_incoming
else:
get_next = self._edges.get_outgoing
edgelengths = self._edges.lengths
#ids_tried = set()
ids_current = [id_edge]
id_bikeedge = -1
pos = 0.0
n = 0
while (id_bikeedge < 0) & (n < self.n_iter_bikeacces_max):
n += 1
ids_new = []
for id_edge_test, is_long_enough in zip(ids_current, edgelengths[ids_current] > self.length_edge_min):
# print ' check id',id_edge_test, is_long_enough,get_accesslevel(id_edge_test, id_mode)
if is_long_enough & (get_accesslevel(id_edge_test, id_mode) >= 0) & (get_accesslevel(id_edge_test, id_mode_ped) >= 0):
id_bikeedge = id_edge_test
# print ' found',id_bikeedge,self._edges.ids_sumo[id_bikeedge]
break
else:
ids_new += get_next(id_edge_test)
ids_current = ids_new
if id_bikeedge > -1:
if is_search_backward:
pos = edgelengths[id_bikeedge]-0.5*self.length_edge_min
else:
pos = 0.5*self.length_edge_min
if id_bikeedge == -1:
print 'WARNING in get_edge_bikeaccess no access for', id_edge, self._edges.ids_sumo[id_edge]
return id_bikeedge, pos
def plan_bikeride(self, id_plan, time_from, id_veh,
id_edge_from, pos_edge_from,
id_edge_to, pos_edge_to,
dist_from_to, dist_walk_max,
walkstages, ridestages):
# start creating stages
id_stage_walk1 = -1
id_stage_bike = -1
id_edge_from_bike, pos_from_bike = self.get_edge_bikeaccess(id_edge_from)
id_edge_to_bike, pos_to_bike = self.get_edge_bikeaccess(id_edge_to, is_search_backward=True)
if (dist_from_to < dist_walk_max) | (id_edge_from_bike == -1) | (id_edge_to_bike == -1):
# print ' go by foot because distance is too short or no bike access',dist_from_to,id_edge_from_bike,id_edge_to_bike
id_stage_walk1, time = walkstages.append_stage(
id_plan, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
else:
# print ' try to take the bike',id_veh
# print ' id_edge_from_bike',edges.ids_sumo[id_edge_from_bike],pos_from_bike
# print ' id_edge_to_bike',edges.ids_sumo[id_edge_to_bike],pos_to_bike
if id_edge_from_bike != id_edge_from:
# print ' must walk from origin to bikerack',time_from
id_stage_walk1, time = walkstages.append_stage(
id_plan, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_from_bike,
position_edge_to=pos_from_bike,
)
if id_edge_to_bike != id_edge_to:
# print ' must cycle from bikerack to dest bike rack',time
id_stage_bike, time = ridestages.append_stage(
id_plan, time,
id_veh=id_veh,
# delay to be sure that person arrived!(workaround in combination with parking=False)
time_init=time-10, # time_from,
id_edge_from=id_edge_from_bike,
position_edge_from=pos_from_bike,
id_edge_to=id_edge_to_bike,
position_edge_to=pos_to_bike,
)
if id_stage_bike > -1:
# print ' must walk from dest bikerack to dest',time
id_stage_walk2, time = walkstages.append_stage(
id_plan, time,
id_edge_from=id_edge_to_bike,
position_edge_from=pos_to_bike,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
else:
# print ' cycle from bikerack to destination',time
id_stage_bike, time = ridestages.append_stage(
id_plan, time,
id_veh=id_veh,
# delay to be sure that person arrived!(workaround in combination with parking=False)
time_init=time-10, # time_from,
id_edge_from=id_edge_from_bike,
position_edge_from=pos_from_bike,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
else:
# print ' cycle directly from orign edge',time_from
if id_edge_to_bike != id_edge_to:
# print ' must cycle from origin to bikerack',time_from
#pos_to_bike = 0.1*edges.lengths[id_edge_to_bike]
id_stage_bike, time = ridestages.append_stage(
id_plan, time_from,
id_veh=id_veh,
# delay to be sure that person arrived!(workaround in combination with parking=False)
time_init=time_from-10, # time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to_bike,
position_edge_to=pos_to_bike,
)
if id_stage_bike > -1:
id_stage_walk2, time = walkstages.append_stage(
id_plan, time,
id_edge_from=id_edge_to_bike,
position_edge_from=pos_to_bike,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
else:
# print ' must cycle from origin to destination',time_from
id_stage_bike, time = ridestages.append_stage(
id_plan, time_from,
id_veh=id_veh,
# delay to be sure that person arrived!(workaround in combination with parking=False)
time_init=time_from-10, # time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
# here we should have created a bike ride
# if not, for ehatever reason,
# we walk from origin to destination
if id_stage_bike == -1:
# print ' walk because no ride stage has been planned',time_from
if id_stage_walk1 == -1:
# no walk stage has been planned
id_stage_walk1, time = walkstages.append_stage(
id_plan, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
elif time_from == time:
# walking to bike has already been schedules,
# but cycling failed. So walk the whole way
time = walkstages.modify_stage(
id_stage_walk1, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
return time
def plan(self, ids_person, logger=None):
"""
Generates a plan for these person according to this strategie.
Overriden by specific strategy.
"""
#make_plans_private(self, ids_person = None, mode = 'passenger')
# routing necessary?
virtualpop = self.get_virtualpop()
plans = virtualpop.get_plans() # self._plans
walkstages = plans.get_stagetable('walks')
ridestages = plans.get_stagetable('bikerides')
activitystages = plans.get_stagetable('activities')
activities = virtualpop.get_activities()
activitytypes = virtualpop.get_demand().activitytypes
landuse = virtualpop.get_landuse()
facilities = landuse.facilities
#parking = landuse.parking
scenario = virtualpop.get_scenario()
edges = scenario.net.edges
lanes = scenario.net.lanes
modes = scenario.net.modes
#times_est_plan = plans.times_est
# here we can determine edge weights for different modes
plans.prepare_stagetables(['walks', 'bikerides', 'activities'])
# get initial travel times for persons.
# initial travel times depend on the initial activity
# landuse.parking.clear_booking()
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(0, ids_person=ids_person)
if len(ids_person_act) == 0:
print 'WARNING in BikeStrategy.plan: no eligible persons found.'
return False
# ok
# temporary maps from ids_person to other parameters
nm = np.max(ids_person_act)+1
map_ids_plan = np.zeros(nm, dtype=np.int32)
map_ids_plan[ids_person_act] = virtualpop.add_plans(ids_person_act, id_strategy=self.get_id_strategy())
map_times = np.zeros(nm, dtype=np.int32)
map_times[ids_person_act] = activities.get_times_end(ids_act_from, pdf='unit')
# set start time to plans (important!)
plans.times_begin[map_ids_plan[ids_person_act]] = map_times[ids_person_act]
map_ids_fac_from = np.zeros(nm, dtype=np.int32)
map_ids_fac_from[ids_person_act] = activities.ids_facility[ids_act_from]
#map_ids_parking_from = np.zeros(nm, dtype = np.int32)
# ids_parking_from, inds_vehparking = parking.get_closest_parkings( virtualpop.ids_iauto[ids_person_act],
# facilities.centroids[activities.ids_facility[ids_act_from]])
# if len(ids_parking_from)==0:
# return False
# err
#map_ids_parking_from[ids_person_act] = ids_parking_from
n_plans = len(ids_person_act)
print 'BikeStrategy.plan n_plans=', n_plans
# print ' map_ids_parking_from[ids_person_act].shape',map_ids_parking_from[ids_person_act].shape
# set initial activity
# this is because the following steps start with travel
# and set the next activity
#names_acttype_from = activitytypes.names[activities.ids_activitytype[ids_act_from]]
# for id_plan
ind_act = 0
# make initial activity stage
ids_edge_from = facilities.ids_roadedge_closest[map_ids_fac_from[ids_person_act]]
poss_edge_from = facilities.positions_roadedge_closest[map_ids_fac_from[ids_person_act]]
poss_edge_from = self.clip_positions(poss_edge_from, ids_edge_from)
# this is the time when first activity starts
# first activity is normally not simulated
names_acttype_from = activitytypes.names[activities.ids_activitytype[ids_act_from]]
durations_act_from = activities.get_durations(ids_act_from)
times_from = map_times[ids_person_act]-durations_act_from
#times_from = activities.get_times_end(ids_act_from, pdf = 'unit')
# do initial stage
# could be common to all strategies
for id_plan,\
time,\
id_act_from,\
name_acttype_from,\
duration_act_from,\
id_edge_from,\
pos_edge_from \
in zip(map_ids_plan[ids_person_act],
times_from,
ids_act_from,
names_acttype_from,
durations_act_from,
ids_edge_from,
poss_edge_from):
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_from,
names_activitytype=name_acttype_from,
durations=duration_act_from,
ids_lane=edges.ids_lanes[id_edge_from][0],
positions=pos_edge_from,
)
# main loop while there are persons performing
# an activity at index ind_act
while len(ids_person_act) > 0:
ids_plan = map_ids_plan[ids_person_act]
ids_veh = virtualpop.ids_ibike[ids_person_act]
dists_walk_max = virtualpop.dists_walk_max[ids_person_act]
times_from = map_times[ids_person_act]
names_acttype_to = activitytypes.names[activities.ids_activitytype[ids_act_to]]
durations_act_to = activities.get_durations(ids_act_to)
ids_fac_from = map_ids_fac_from[ids_person_act]
ids_fac_to = activities.ids_facility[ids_act_to]
# origin edge and position
ids_edge_from = facilities.ids_roadedge_closest[ids_fac_from]
poss_edge_from = facilities.positions_roadedge_closest[ids_fac_from]
poss_edge_from = self.clip_positions(poss_edge_from, ids_edge_from)
centroids_from = facilities.centroids[ids_fac_from]
# this method will find and occupy parking space
#ids_parking_from = map_ids_parking_from[ids_person_act]
# print ' ids_veh.shape',ids_veh.shape
# print ' centroids_to.shape',centroids_to.shape
#ids_parking_to, inds_vehparking = parking.get_closest_parkings(ids_veh, centroids_to)
#ids_lane_parking_from = parking.ids_lane[ids_parking_from]
#ids_edge_parking_from = lanes.ids_edge[ids_lane_parking_from]
#poss_edge_parking_from = parking.positions[ids_parking_from]
# print ' ids_parking_to.shape',ids_parking_to.shape
# print ' np.max(parking.get_ids()), np.max(ids_parking_to)',np.max(parking.get_ids()), np.max(ids_parking_to)
#ids_lane_parking_to = parking.ids_lane[ids_parking_to]
#ids_edge_parking_to = lanes.ids_edge[ids_lane_parking_to]
#poss_edge_parking_to = parking.positions[ids_parking_to]
# destination edge and position
ids_edge_to = facilities.ids_roadedge_closest[ids_fac_to]
poss_edge_to1 = facilities.positions_roadedge_closest[ids_fac_to]
poss_edge_to = self.clip_positions(poss_edge_to1, ids_edge_to)
centroids_to = facilities.centroids[ids_fac_to]
# debug poscorrection..OK
# for id_edge, id_edge_sumo, length, pos_to1, pos in zip(ids_edge_to, edges.ids_sumo[ids_edge_to],edges.lengths[ids_edge_to],poss_edge_to1, poss_edge_to):
# print ' ',id_edge, 'IDe%s'%id_edge_sumo, 'L=%.2fm'%length, '%.2fm'%pos_to1, '%.2fm'%pos
dists_from_to = np.sqrt(np.sum((centroids_to - centroids_from)**2, 1))
i = 0.0
n_pers = len(ids_person_act)
for id_person, id_plan, time_from, id_act_from, id_act_to, name_acttype_to, duration_act_to, id_veh, id_edge_from, pos_edge_from, id_edge_to, pos_edge_to, dist_from_to, dist_walk_max\
in zip(ids_person_act, ids_plan, times_from, ids_act_from, ids_act_to, names_acttype_to, durations_act_to, ids_veh, ids_edge_from, poss_edge_from, ids_edge_to, poss_edge_to, dists_from_to, dists_walk_max):
if logger:
logger.progress(i/n_pers*100)
i += 1.0
print 79*'*'
print ' plan id_plan', id_plan, 'time_from', time_from, 'from', id_edge_from, pos_edge_from, 'to', id_edge_to, pos_edge_to
print ' id_edge_from', edges.ids_sumo[id_edge_from], 'id_edge_to', edges.ids_sumo[id_edge_to]
time = self.plan_bikeride(id_plan, time_from, id_veh,
id_edge_from, pos_edge_from,
id_edge_to, pos_edge_to,
dist_from_to, dist_walk_max,
walkstages, ridestages)
################
# store time estimation for this plan
# note that these are the travel times, no activity time
plans.times_est[id_plan] += time-time_from
# define current end time without last activity duration
plans.times_end[id_plan] = time
# finally add activity and respective duration
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_to,
names_activitytype=name_acttype_to,
durations=duration_act_to,
ids_lane=edges.ids_lanes[id_edge_to][0],
positions=pos_edge_to,
)
map_times[id_person] = time
# return time
##
# select persons and activities for next setp
ind_act += 1
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(ind_act, ids_person=ids_person_act)
# update timing with (random) activity duration!!
return True
class TransitStrategy(StrategyMixin):
def __init__(self, ident, parent=None,
name='Public Transport Strategy',
info='With this strategy, the person uses public transport.',
**kwargs):
self._init_objman(ident, parent, name=name, info=info, **kwargs)
attrsman = self.set_attrsman(cm.Attrsman(self))
# specific init
self._init_attributes()
self._init_constants()
def _init_attributes(self):
# print 'StrategyMixin._init_attributes'
pass
def _init_constants(self):
#virtualpop = self.get_virtualpop()
#stagetables = virtualpop.get_stagetables()
#self._walkstages = stagetables.get_stagetable('walks')
#self._ridestages = stagetables.get_stagetable('rides')
#self._activitystages = stagetables.get_stagetable('activities')
#self._plans = virtualpop.get_plans()
#
# print 'AutoStrategy._init_constants'
# print dir(self)
# self.get_attrsman().do_not_save_attrs(['_activitystages','_ridestages','_walkstages','_plans'])
modes = self.get_virtualpop().get_scenario().net.modes
self._id_mode_bike = modes.get_id_mode('bicycle')
self._id_mode_auto = modes.get_id_mode('passenger')
self._id_mode_moto = modes.get_id_mode('motorcycle')
self._id_mode_bus = modes.get_id_mode('bus')
self.get_attrsman().do_not_save_attrs([
'_id_mode_bike', '_id_mode_auto', '_id_mode_moto', '_id_mode_bus'
])
def preevaluate(self, ids_person):
"""
Preevaluation strategies for person IDs in vector ids_person.
Returns a preevaluation vector with a preevaluation value
for each person ID. The values of the preevaluation vector are as follows:
-1 : Strategy cannot be applied
0 : Stategy can be applied, but the preferred mode is not used
1 : Stategy can be applied, and preferred mode is part of the strategy
2 : Strategy uses predomunantly preferred mode
"""
n_pers = len(ids_person)
persons = self.get_virtualpop()
preeval = np.zeros(n_pers, dtype=np.int32)
# TODO: here we could exclude by age or distance facilities-stops
# put 0 for persons whose preference is not public transport
preeval[persons.ids_mode_preferred[ids_person] != self._id_mode_bus] = 0
# put 2 for persons with car access and who prefer cars
preeval[persons.ids_mode_preferred[ids_person] == self._id_mode_bus] = 2
print ' TransitStrategy.preevaluate', len(np.flatnonzero(preeval))
return preeval
def plan_transit(self, id_plan, time_from,
id_edge_from, pos_edge_from,
id_edge_to, pos_edge_to,
id_stop_from, id_stopedge_from, pos_stop_from,
id_stop_to, id_stopedge_to, pos_stop_to,
#dist_from_to, dist_walk_max,
walkstages, transitstages,
ptlines, ptfstar, pttimes,
stops_to_enter, stops_to_exit,
ids_laneedge, ids_stoplane, ptstops):
ptlinks = ptlines.get_ptlinks()
ptlinktypes = ptlinks.types.choices
type_enter = ptlinktypes['enter']
type_transit = ptlinktypes['transit']
type_board = ptlinktypes['board']
type_alight = ptlinktypes['alight']
type_transfer = ptlinktypes['transfer']
type_walk = ptlinktypes['walk']
type_exit = ptlinktypes['exit']
if (id_edge_from == id_stopedge_from) & (abs(pos_edge_from-pos_stop_from) < 1.0):
time = time_from
id_stage_walk1 = None
else:
id_stage_walk1, time = walkstages.append_stage(id_plan, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_stopedge_from,
position_edge_to=pos_stop_from, # -7.0,
)
# print ' id_stopedge_from',id_stopedge_from
# print ' pos_stop_from',pos_stop_from
# print
# print ' id_stopedge_to',id_stopedge_to
# print ' pos_stop_to',pos_stop_to
# print
# print ' id_stop_from',id_stop_from
# print ' id_stop_to',id_stop_to
durations, linktypes, ids_line, ids_fromstop, ids_tostop =\
ptlinks.route(id_stop_from, id_stop_to,
fstar=ptfstar, times=pttimes,
stops_to_enter=stops_to_enter,
stops_to_exit=stops_to_exit)
# print ' routing done. make plan..'
if len(linktypes) > 0:
if linktypes[-1] == type_walk: # is last stage a walk?
# remove it, because will go directly to destination
linktypes = linktypes[:-1]
ids_line = ids_line[:-1]
durations = durations[:-1]
ids_fromstop = ids_fromstop[:-1]
ids_tostop = ids_tostop[:-1]
# print ' ids_line ',ids_line
# print ' ids_fromstop',ids_fromstop
# print ' ids_tostop ',ids_tostop
if len(linktypes) > 0: # is there any public transport line to take?
# go though PT links and generate transits and walks to trasfer
ids_stopedge_from = ids_laneedge[ids_stoplane[ids_fromstop]]
ids_stopedge_to = ids_laneedge[ids_stoplane[ids_tostop]]
poss_stop_from = 0.5*(ptstops.positions_from[ids_fromstop]
+ ptstops.positions_to[ids_fromstop])
poss_stop_to = 0.5*(ptstops.positions_from[ids_tostop]
+ ptstops.positions_to[ids_tostop])
# this is wait time buffer to be added to the successive stage
# as waiting is currently not modelled as an extra stage
duration_wait = 0.0
# create stages for PT
for linktype, id_line, duration,\
id_stopedge_from, pos_fromstop,\
id_stopedge_to, pos_tostop in\
zip(linktypes,
ids_line,
durations,
ids_stopedge_from, poss_stop_from,
ids_stopedge_to, poss_stop_to,
):
# print ' stage for linktype %2d fromedge %s toedge %s'%(linktype, edges.ids_sumo[id_stopedge_from],edges.ids_sumo[id_stopedge_to] )
print ' id_stopedge_from,id_stopedge_to', id_stopedge_from, id_stopedge_to
if linktype == type_transit: # transit!
print ' add transit'
id_stage_transit, time = transitstages.append_stage(
id_plan, time,
id_line=id_line,
duration=duration+duration_wait,
id_fromedge=id_stopedge_from,
id_toedge=id_stopedge_to,
)
duration_wait = 0.0
elif linktype == type_walk: # walk to transfer
print ' add transfer'
id_stage_transfer, time = walkstages.append_stage(
id_plan, time,
id_edge_from=id_stopedge_from,
position_edge_from=pos_fromstop,
id_edge_to=id_stopedge_to,
position_edge_to=pos_tostop,
duration=duration+duration_wait,
)
duration_wait = 0.0
else: # all other link time are no modelld
# do not do anything , just add wait time to next stage
print ' add duration', duration
duration_wait += duration
# walk from final stop to activity
# print ' Stage for linktype %2d fromedge %s toedge %s'%(linktype, edges.ids_sumo[id_stopedge_to],edges.ids_sumo[id_edge_to] )
# if (id_edge_to == id_stopedge_to)&(abs(pos_edge_to-pos_tostop)<1.0):
# print ' already at right edge and position'
# pass
# else:
id_stage_walk2, time = walkstages.append_stage(id_plan, time,
id_edge_from=id_stopedge_to,
position_edge_from=pos_tostop,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
else:
# there is no public transport line linking these nodes.
if id_stage_walk1 is None:
# Create first walk directly from home to activity
id_stage_walk1, time = walkstages.append_stage(id_plan,
time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
else:
# Modify first walk directly from home to activity
time = walkstages.modify_stage(id_stage_walk1, time_from,
id_edge_from=id_edge_from,
position_edge_from=pos_edge_from,
id_edge_to=id_edge_to,
position_edge_to=pos_edge_to,
)
return time
def plan(self, ids_person, logger=None):
"""
Generates a plan for these person according to this strategie.
Overriden by specific strategy.
"""
print 'TransitStrategy.plan', len(ids_person)
#make_plans_private(self, ids_person = None, mode = 'passenger')
# routing necessary?
virtualpop = self.get_virtualpop()
plans = virtualpop.get_plans() # self._plans
demand = virtualpop.get_demand()
ptlines = demand.ptlines
walkstages = plans.get_stagetable('walks')
transitstages = plans.get_stagetable('transits')
activitystages = plans.get_stagetable('activities')
activities = virtualpop.get_activities()
activitytypes = demand.activitytypes
landuse = virtualpop.get_landuse()
facilities = landuse.facilities
parking = landuse.parking
scenario = virtualpop.get_scenario()
net = scenario.net
edges = net.edges
lanes = net.lanes
modes = net.modes
ptstops = net.ptstops
# print ' demand',demand
# print ' demand.ptlines',demand.ptlines,dir(demand.ptlines)
# print ' demand.ptlines.get_ptlinks()',demand.ptlines.get_ptlinks()
# print ' demand.virtualpop',demand.virtualpop,dir(demand.virtualpop)
# print ' demand.trips',demand.trips,dir(demand.trips)
if len(ptlines) == 0:
print 'WARNING in TrasitStrategy.plan: no transit services available. Create public trasit services by connecting stops.'
return False
ptlinks = ptlines.get_ptlinks()
if len(ptlinks) == 0:
print 'WARNING in TrasitStrategy.plan: no public transport links. Run methods: "create routes" and "build links" from public trasport services.'
return False
ptlinktypes = ptlinks.types.choices
ptfstar = ptlinks.get_fstar()
pttimes = ptlinks.get_times()
stops_to_enter, stops_to_exit = ptlinks.get_stops_to_enter_exit()
ids_stoplane = ptstops.ids_lane
ids_laneedge = net.lanes.ids_edge
times_est_plan = plans.times_est
# here we can determine edge weights for different modes
# this could be centralized to avoid redundance
plans.prepare_stagetables(['walks', 'transits', 'activities'])
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(0, ids_person=ids_person)
if len(ids_person_act) == 0:
print 'WARNING in TrasitStrategy.plan: no eligible persons found.'
return False
# temporary maps from ids_person to other parameters
nm = np.max(ids_person_act)+1
map_ids_plan = np.zeros(nm, dtype=np.int32)
#ids_plan_act = virtualpop.add_plans(ids_person_act, id_strategy = self.get_id_strategy())
map_ids_plan[ids_person_act] = virtualpop.add_plans(ids_person_act, id_strategy=self.get_id_strategy())
map_times = np.zeros(nm, dtype=np.int32)
map_times[ids_person_act] = activities.get_times_end(ids_act_from, pdf='unit')
# set start time to plans (important!)
plans.times_begin[map_ids_plan[ids_person_act]] = map_times[ids_person_act]
map_ids_fac_from = np.zeros(nm, dtype=np.int32)
map_ids_fac_from[ids_person_act] = activities.ids_facility[ids_act_from]
n_plans = len(ids_person_act)
print 'TrasitStrategy.plan n_plans=', n_plans
# make initial activity stage
ids_edge_from = facilities.ids_roadedge_closest[map_ids_fac_from[ids_person_act]]
poss_edge_from = facilities.positions_roadedge_closest[map_ids_fac_from[ids_person_act]]
# this is the time when first activity starts
# first activity is normally not simulated
names_acttype_from = activitytypes.names[activities.ids_activitytype[ids_act_from]]
durations_act_from = activities.get_durations(ids_act_from)
times_from = map_times[ids_person_act]-durations_act_from
#times_from = activities.get_times_end(ids_act_from, pdf = 'unit')
for id_plan,\
time,\
id_act_from,\
name_acttype_from,\
duration_act_from,\
id_edge_from,\
pos_edge_from \
in zip(map_ids_plan[ids_person_act],
times_from,
ids_act_from,
names_acttype_from,
durations_act_from,
ids_edge_from,
poss_edge_from):
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_from,
names_activitytype=name_acttype_from,
durations=duration_act_from,
ids_lane=edges.ids_lanes[id_edge_from][0],
positions=pos_edge_from,
)
##
ind_act = 0
# main loop while there are persons performing
# an activity at index ind_act
while len(ids_person_act) > 0:
ids_plan = map_ids_plan[ids_person_act]
times_from = map_times[ids_person_act]
names_acttype_to = activitytypes.names[activities.ids_activitytype[ids_act_to]]
durations_act_to = activities.get_durations(ids_act_to)
ids_fac_from = map_ids_fac_from[ids_person_act]
ids_fac_to = activities.ids_facility[ids_act_to]
centroids_from = facilities.centroids[ids_fac_from]
centroids_to = facilities.centroids[ids_fac_to]
# origin edge and position
ids_edge_from = facilities.ids_roadedge_closest[ids_fac_from]
poss_edge_from = facilities.positions_roadedge_closest[ids_fac_from]
# destination edge and position
ids_edge_to = facilities.ids_roadedge_closest[ids_fac_to]
poss_edge_to = facilities.positions_roadedge_closest[ids_fac_to]
ids_stop_from = ptstops.get_closest(centroids_from)
ids_stop_to = ptstops.get_closest(centroids_to)
ids_stopedge_from = ids_laneedge[ids_stoplane[ids_stop_from]]
ids_stopedge_to = ids_laneedge[ids_stoplane[ids_stop_to]]
# do random pos here
poss_stop_from = 0.5*(ptstops.positions_from[ids_stop_from]
+ ptstops.positions_to[ids_stop_from])
poss_stop_to = 0.5*(ptstops.positions_from[ids_stop_to]
+ ptstops.positions_to[ids_stop_to])
i = 0.0
for id_person, id_plan, time_from, id_act_from, id_act_to, name_acttype_to, duration_act_to, id_edge_from, pos_edge_from, id_edge_to, pos_edge_to, id_stop_from, id_stopedge_from, pos_stop_from, id_stop_to, id_stopedge_to, pos_stop_to\
in zip(ids_person_act, ids_plan, times_from, ids_act_from, ids_act_to, names_acttype_to, durations_act_to, ids_edge_from, poss_edge_from, ids_edge_to, poss_edge_to, ids_stop_from, ids_stopedge_from, poss_stop_from, ids_stop_to, ids_stopedge_to, poss_stop_to):
n_pers = len(ids_person_act)
if logger:
logger.progress(i/n_pers*100)
i += 1.0
print 79*'_'
print ' id_plan=%d, id_person=%d, ' % (id_plan, id_person)
time = self.plan_transit(id_plan, time_from,
id_edge_from, pos_edge_from,
id_edge_to, pos_edge_to,
id_stop_from, id_stopedge_from, pos_stop_from,
id_stop_to, id_stopedge_to, pos_stop_to,
#dist_from_to, dist_walk_max,
walkstages, transitstages,
ptlines, ptfstar, pttimes,
stops_to_enter, stops_to_exit,
ids_laneedge, ids_stoplane, ptstops)
# update time for trips estimation for this plan
plans.times_est[id_plan] += time-time_from
# define current end time without last activity duration
plans.times_end[id_plan] = time
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_to,
names_activitytype=name_acttype_to,
durations=duration_act_to,
ids_lane=edges.ids_lanes[id_edge_to][0],
positions=pos_edge_to,
)
# store time for next iteration in case other activities are
# following
map_times[id_person] = time
# select persons and activities for next setp
ind_act += 1
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(ind_act, ids_person=ids_person_act)
class BikeTransitStrategy(BikeStrategy, TransitStrategy):
def __init__(self, ident, parent=None,
name='Bike+Public Transport Strategy',
info='With this strategy, the person combines bike and public transport.',
**kwargs):
self._init_objman(ident, parent, name=name, info=info, **kwargs)
attrsman = self.set_attrsman(cm.Attrsman(self))
# specific init
self._init_attributes()
self._init_constants()
def _init_attributes(self):
# print 'StrategyMixin._init_attributes'
BikeStrategy._init_attributes(self)
TransitStrategy._init_attributes(self)
def _init_constants(self):
BikeStrategy._init_constants(self)
TransitStrategy._init_constants(self)
def preevaluate(self, ids_person):
"""
Preevaluation strategies for person IDs in vector ids_person.
Returns a preevaluation vector with a preevaluation value
for each person ID. The values of the preevaluation vector are as follows:
-1 : Strategy cannot be applied
0 : Stategy can be applied, but the preferred mode is not used
1 : Stategy can be applied, and preferred mode is part of the strategy
2 : Strategy uses predomunantly preferred mode
"""
n_pers = len(ids_person)
persons = self.get_virtualpop()
preeval = np.zeros(n_pers, dtype=np.int32)
inds_prefer = (persons.ids_mode_preferred[ids_person] == self._id_mode_bus)\
| (persons.ids_mode_preferred[ids_person] == self._id_mode_bike)\
inds_avail = persons.ids_ibike[ids_person] > -1
preeval[np.logical_not(inds_avail)] = -1
# TODO: here we could exclude by age or distance facilities-stops
# put 0 for persons whose preference is not public transport
preeval[inds_avail & np.logical_not(inds_prefer)] = 0
# put 2 for persons with bike access and who prefer bike or bus
preeval[inds_avail & inds_prefer] = 1
print ' BikeTransitStrategy.preevaluate', len(np.flatnonzero(preeval))
return preeval
def plan(self, ids_person, logger=None):
"""
Generates a plan for these person according to this strategie.
Overriden by specific strategy.
"""
print 'TransitStrategy.plan', len(ids_person)
#make_plans_private(self, ids_person = None, mode = 'passenger')
# routing necessary?
virtualpop = self.get_virtualpop()
plans = virtualpop.get_plans() # self._plans
demand = virtualpop.get_demand()
ptlines = demand.ptlines
walkstages = plans.get_stagetable('walks')
transitstages = plans.get_stagetable('transits')
ridestages = plans.get_stagetable('bikerides')
activitystages = plans.get_stagetable('activities')
activities = virtualpop.get_activities()
activitytypes = demand.activitytypes
landuse = virtualpop.get_landuse()
facilities = landuse.facilities
#parking = landuse.parking
scenario = virtualpop.get_scenario()
net = scenario.net
edges = net.edges
lanes = net.lanes
modes = net.modes
ptstops = net.ptstops
# print ' demand',demand
# print ' demand.ptlines',demand.ptlines,dir(demand.ptlines)
# print ' demand.ptlines.get_ptlinks()',demand.ptlines.get_ptlinks()
# print ' demand.virtualpop',demand.virtualpop,dir(demand.virtualpop)
# print ' demand.trips',demand.trips,dir(demand.trips)
if len(ptlines) == 0:
print 'WARNING in TrasitStrategy.plan: no transit services available.'
return False
ptlinks = ptlines.get_ptlinks()
ptlinktypes = ptlinks.types.choices
ptfstar = ptlinks.get_fstar()
pttimes = ptlinks.get_times()
stops_to_enter, stops_to_exit = ptlinks.get_stops_to_enter_exit()
ids_stoplane = ptstops.ids_lane
ids_laneedge = net.lanes.ids_edge
times_est_plan = plans.times_est
# here we can determine edge weights for different modes
# this could be centralized to avoid redundance
plans.prepare_stagetables(['walks', 'bikerides', 'transits', 'activities'])
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(0, ids_person=ids_person)
if len(ids_person_act) == 0:
print 'WARNING in TrasitStrategy.plan: no eligible persons found.'
return False
# temporary maps from ids_person to other parameters
nm = np.max(ids_person_act)+1
map_ids_plan = np.zeros(nm, dtype=np.int32)
#ids_plan_act = virtualpop.add_plans(ids_person_act, id_strategy = self.get_id_strategy())
map_ids_plan[ids_person_act] = virtualpop.add_plans(ids_person_act, id_strategy=self.get_id_strategy())
map_times = np.zeros(nm, dtype=np.int32)
map_times[ids_person_act] = activities.get_times_end(ids_act_from, pdf='unit')
# set start time to plans (important!)
plans.times_begin[map_ids_plan[ids_person_act]] = map_times[ids_person_act]
map_ids_fac_from = np.zeros(nm, dtype=np.int32)
map_ids_fac_from[ids_person_act] = activities.ids_facility[ids_act_from]
n_plans = len(ids_person_act)
print 'TrasitStrategy.plan n_plans=', n_plans
# make initial activity stage
ids_edge_from = facilities.ids_roadedge_closest[map_ids_fac_from[ids_person_act]]
poss_edge_from = facilities.positions_roadedge_closest[map_ids_fac_from[ids_person_act]]
# this is the time when first activity starts
# first activity is normally not simulated
names_acttype_from = activitytypes.names[activities.ids_activitytype[ids_act_from]]
durations_act_from = activities.get_durations(ids_act_from)
times_from = map_times[ids_person_act]-durations_act_from
#times_from = activities.get_times_end(ids_act_from, pdf = 'unit')
for id_plan,\
time,\
id_act_from,\
name_acttype_from,\
duration_act_from,\
id_edge_from,\
pos_edge_from \
in zip(map_ids_plan[ids_person_act],
times_from,
ids_act_from,
names_acttype_from,
durations_act_from,
ids_edge_from,
poss_edge_from):
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_from,
names_activitytype=name_acttype_from,
durations=duration_act_from,
ids_lane=edges.ids_lanes[id_edge_from][0],
positions=pos_edge_from,
)
##
ind_act = 0
# main loop while there are persons performing
# an activity at index ind_act
while len(ids_person_act) > 0:
ids_plan = map_ids_plan[ids_person_act]
times_from = map_times[ids_person_act]
ids_veh = virtualpop.ids_ibike[ids_person_act]
dists_walk_max = virtualpop.dists_walk_max[ids_person_act]
names_acttype_to = activitytypes.names[activities.ids_activitytype[ids_act_to]]
durations_act_to = activities.get_durations(ids_act_to)
ids_fac_from = map_ids_fac_from[ids_person_act]
ids_fac_to = activities.ids_facility[ids_act_to]
centroids_from = facilities.centroids[ids_fac_from]
centroids_to = facilities.centroids[ids_fac_to]
# origin edge and position
ids_edge_from = facilities.ids_roadedge_closest[ids_fac_from]
poss_edge_from = facilities.positions_roadedge_closest[ids_fac_from]
# destination edge and position
ids_edge_to = facilities.ids_roadedge_closest[ids_fac_to]
poss_edge_to = facilities.positions_roadedge_closest[ids_fac_to]
ids_stop_from = ptstops.get_closest(centroids_from)
ids_stop_to = ptstops.get_closest(centroids_to)
ids_stopedge_from = ids_laneedge[ids_stoplane[ids_stop_from]]
ids_stopedge_to = ids_laneedge[ids_stoplane[ids_stop_to]]
centroids_stop_from = ptstops.centroids[ids_stop_from]
centroids_stop_to = ptstops.centroids[ids_stop_to]
# do random pos here
poss_stop_from = 0.5*(ptstops.positions_from[ids_stop_from]
+ ptstops.positions_to[ids_stop_from])
poss_stop_to = 0.5*(ptstops.positions_from[ids_stop_to]
+ ptstops.positions_to[ids_stop_to])
dists_from_to = np.sqrt(np.sum((centroids_to - centroids_from)**2, 1))
dists_from_stop_from = np.sqrt(np.sum((centroids_stop_from - centroids_from)**2, 1))
dists_stop_to_to = np.sqrt(np.sum((centroids_to - centroids_stop_to)**2, 1))
i = 0.0
for id_person, id_plan, time_from, id_act_from, id_act_to, name_acttype_to, duration_act_to, id_veh, id_edge_from, pos_edge_from, id_edge_to, pos_edge_to, id_stop_from, id_stopedge_from, pos_stop_from, id_stop_to, id_stopedge_to, pos_stop_to, dists_from_to, dist_from_stop_from, dist_stop_to_to, dist_walk_max\
in zip(ids_person_act, ids_plan, times_from, ids_act_from, ids_act_to, names_acttype_to, durations_act_to, ids_veh, ids_edge_from, poss_edge_from, ids_edge_to, poss_edge_to, ids_stop_from, ids_stopedge_from, poss_stop_from, ids_stop_to, ids_stopedge_to, poss_stop_to, dists_from_to, dists_from_stop_from, dists_stop_to_to, dists_walk_max):
n_pers = len(ids_person_act)
if logger:
logger.progress(i/n_pers*100)
i += 1.0
print 79*'_'
print ' id_plan=%d, id_person=%d, ' % (id_plan, id_person)
#dist_from_stop_from, dist_stop_to_to
time = self.plan_bikeride(id_plan, time_from, id_veh,
id_edge_from, pos_edge_from,
id_stopedge_from, pos_stop_from,
dist_from_stop_from, dist_walk_max,
walkstages, ridestages)
time = self.plan_transit(id_plan, time,
id_stopedge_from, pos_stop_from,
id_stopedge_to, pos_stop_to,
id_stop_from, id_stopedge_from, pos_stop_from,
id_stop_to, id_stopedge_to, pos_stop_to,
#dist_from_to, dist_walk_max,
walkstages, transitstages,
ptlines, ptfstar, pttimes,
stops_to_enter, stops_to_exit,
ids_laneedge, ids_stoplane, ptstops)
time = self.plan_bikeride(id_plan, time, id_veh,
id_stopedge_to, pos_stop_to,
id_edge_to, pos_edge_to,
dist_stop_to_to, dist_walk_max,
walkstages, ridestages)
# update time for trips estimation for this plan
plans.times_est[id_plan] += time-time_from
# define current end time without last activity duration
plans.times_end[id_plan] = time
id_stage_act, time = activitystages.append_stage(
id_plan, time,
ids_activity=id_act_to,
names_activitytype=name_acttype_to,
durations=duration_act_to,
ids_lane=edges.ids_lanes[id_edge_to][0],
positions=pos_edge_to,
)
# store time for next iteration in case other activities are
# following
map_times[id_person] = time
# select persons and activities for next setp
ind_act += 1
ids_person_act, ids_act_from, ids_act_to\
= virtualpop.get_activities_from_pattern(ind_act, ids_person=ids_person_act)
class Strategies(am.ArrayObjman):
def __init__(self, ident, virtualpop, **kwargs):
self._init_objman(ident,
parent=virtualpop,
name='Mobility Strategies',
info="""Contains different mobility strategies.
A strategy has methods to identify whether a strategy is applicaple to a person
and to make a plan fo a person.
""",
version=0.2,
**kwargs)
self.add_col(am.ArrayConf('names',
default='',
dtype='object',
perm='r',
is_index=True,
name='Short name',
info='Strategy name. Must be unique, used as index.',
))
self._init_attributes()
self._init_constants()
def _init_attributes(self):
self.add_col(cm.ObjsConf('strategies',
#groupnames = ['state'],
name='Strategy',
info='Strategy object.',
))
self.add_col(am.ArrayConf('colors', np.ones(4, np.float32),
dtype=np.float32,
metatype='color',
perm='rw',
name='Color',
info="Route color. Color as RGBA tuple with values from 0.0 to 1.0",
xmltag='color',
))
if self.get_version() < 0.2:
self._set_colors_default()
self.set_version(0.2)
def format_ids(self, ids):
return ', '.join(self.names[ids])
def get_id_from_formatted(self, idstr):
return self.names.get_id_from_index(idstr)
def get_ids_from_formatted(self, idstrs):
return self.names.get_ids_from_indices_save(idstrs.split(','))
def add_default(self):
self.add_strategy('walk', WalkStrategy)
self.add_strategy('auto', AutoStrategy)
self.add_strategy('bike', BikeStrategy)
self.add_strategy('transit', TransitStrategy)
self.add_strategy('biketransit', BikeTransitStrategy)
self._set_colors_default()
def _set_colors_default(self):
colors_default = {'walk': np.array([160, 72, 0, 220], np.float32)/255,
'auto': np.array([250, 213, 0, 220], np.float32)/255,
'bike': np.array([8, 191, 73, 210], np.float32)/255,
'transit': np.array([8, 77, 191, 220], np.float32)/255,
'biketransit': np.array([8, 201, 223, 220], np.float32)/255,
}
ids = self.names.get_ids_from_indices_save(colors_default.keys())
self.colors[ids] = colors_default.values() # np.array(colors_default.values(), np.float32).reshape(-1,4)
#self.colors.indexset(colors_default.keys(), colors_default.values())
def add_strategy(self, ident, Strategyclass, color=(0.0, 0.0, 0.0, 1.0)):
# print 'add_strategy', ident
if not self.names.has_index(ident):
strategy = Strategyclass(ident, self)
self.add_row(names=ident,
strategies=strategy,
colours=color,
)
return strategy
else:
# print 'WARNING in add_strategy: strategy %s already initialized'%ident
return self.strategies[self.names.get_id_from_index(ident)]
def preevaluate(self, ids_person):
"""
Preevaluation of strategies for person IDs in vector ids_person.
Returns a vector with strategy IDs and a preevaluation matrix.
The rows of the matrix corrispond to each person ID.
The columns corrsopond to reck strategy ID.
The values of the preevaluation matrix are as follows:
-1 : Strategy cannot be applied
0 : Stategy can be applied, but the preferred mode is not used
1 : Stategy can be applied, and preferred mode is part of the strategy
2 : Strategy uses predomunantly preferred mode
"""
print 'preevaluate strategies'
ids_strat = self.get_ids()
n_pers = len(ids_person)
n_strat = len(ids_strat)
preeval = np.zeros((n_pers, n_strat), dtype=np.int32)
for i, strategy in zip(range(n_strat), self.strategies[ids_strat]):
print ' preevaluate strategiy', strategy.ident
preeval[i, :] = strategy.preevaluate(ids_person)
return ids_strat, preeval
class StageTypeMixin(am.ArrayObjman):
def init_stagetable(self, ident, stages, name='', info="Stage of Plan"):
self._init_objman(ident=ident, parent=stages, name=name,
info=info,
version=0.2,
)
self.add_col(am.IdsArrayConf('ids_plan', self.get_plans(),
#groupnames = ['state'],
name='ID plan',
info='ID of plan.',
xmltag='type',
))
self.add_col(am.ArrayConf('times_start', -1.0,
groupnames=['parameters'], # new
name='Start time',
unit='s',
info='Planned or estimated time when this stage starts. Value -1 means unknown.',
))
self.add_col(am.ArrayConf('durations', -1.0,
name='Duration',
groupnames=['parameters'], # new
unit='s',
info='Planned or estimated duration for this stage starts. Value -1 means unknown.',
xmltag='type',
))
def get_plans(self):
return self.parent
def get_ids_from_ids_plan(self, ids_plan):
"""
Returns only stage IDs which are part of the given plans IDs.
"""
# print 'get_ids_from_ids_plan for',type(ids_plan),ids_plan
ids = self.get_ids()
#ids_plan_part = self.ids_plan[ids]
are_selected = np.zeros(len(ids), dtype=np.bool)
for ind, id_plan_part in zip(xrange(len(ids)), self.ids_plan[ids]):
are_selected[ind] = id_plan_part in ids_plan
return ids[are_selected]
# print ' ids_plan_part',type(ids_plan_part),ids_plan_part
# print ' ids_selected',type(ids_plan_part.intersection(ids_plan)),ids_plan_part.intersection(ids_plan)
# return np.array(list(ids_plan_part.intersection(ids_plan)), dtype = np.int32)
def get_virtualpop(self):
# print 'get_virtualpop '
return self.parent.parent
def prepare_planning(self):
pass
def append_stage(self, id_plan, time_start, **kwargs):
# try to fix timing
# if time_start<0:
# time_start_prev, duration_prev = self.parent.plans.get_timing_laststage(id_plan)
# if (duration_prev>=0)&(time_start_prev>=0):
# time_start = time_start_prev+duration_prev
id_stage = self.add_row(ids_plan=id_plan, times_start=time_start, **kwargs)
# print 'STAGE.appended stage %s id_plan=%d, id_stage=%d, t=%d'%(self.get_name(),id_plan,id_stage,time_start)
# for key in kwargs.keys():
# print ' %s=%s'%(key,kwargs[key])
# print ' --id_plan, self, id_stage',id_plan, self, id_stage#,self.ids_plan.get_linktab()
self.parent.append_stage(id_plan, self, id_stage)
# print ' plan appended',id_plan, self, id_stage
return id_stage, time_start+self.durations[id_stage]
def modify_stage(self, id_stage, time_start, **kwargs):
self.set_row(id_stage, **kwargs)
return time_start+self.durations[id_stage]
def get_timing(self, id_stage):
#ind = self.get_ind(id_stage)
return self.times_start[id_stage], self.durations[id_stage]
def to_xml(self, id_stage, fd, indent=0):
"""
To be overridden by specific stage class.
"""
pass
class TransitStages(StageTypeMixin):
def __init__(self, ident, stages, name='Transit rides', info='Ride on a single public transport line (no transfers).'):
self.init_stagetable(ident,
stages, name=name,
info=info,
)
self._init_attributes()
def _init_attributes(self):
#ptstops = self.parent.get_ptstops()
edges = self.get_virtualpop().get_net().edges
if hasattr(self, 'ids_fromstop'):
self.delete('ids_fromstop')
self.delete('ids_tostop')
self.add_col(am.IdsArrayConf('ids_line', self.get_virtualpop().get_ptlines(),
groupnames=['parameters'],
name='ID line',
info='ID of public transport line.',
))
self.add_col(am.IdsArrayConf('ids_fromedge', edges,
groupnames=['parameters'],
name='Edge ID from',
info='Edge ID of departure bus stop or station.',
))
self.add_col(am.IdsArrayConf('ids_toedge', edges,
groupnames=['parameters'],
name='Edge ID to',
info='Edge ID of destination bus stop or station.',
))
def _init_constants(self):
self.get_attrsman().do_not_save_attrs(['_costs', '_fstar', ])
def prepare_planning(self):
ptlinks = self.ids_line.get_linktab().ptlinks.get_value()
if len(ptlinks) == 0:
# no links built...built them
ptlinks.build()
self._costs = ptlinks.get_times()
self._fstar = ptlinks.get_fstar()
def append_stage(self, id_plan, time_start=-1.0,
id_line=-1, duration=0.0,
id_fromedge=-1, id_toedge=-1, **kwargs):
"""
Appends a transit stage to plan id_plan.
"""
# print 'TransitStages.append_stage',id_stage
id_stage, time_end = StageTypeMixin.append_stage(self,
id_plan,
time_start,
durations=duration,
ids_line=id_line,
ids_fromedge=id_fromedge,
ids_toedge=id_toedge,
)
# add this stage to the vehicle database
# ind_ride gives the index of this ride (within the same plan??)
#ind_ride = self.parent.get_iautos().append_ride(id_veh, id_stage)
return id_stage, time_end
def to_xml(self, id_stage, fd, indent=0):
# <ride from="1/3to0/3" to="0/4to1/4" lines="train0"/>
net = self.get_virtualpop().get_net()
#ids_stoplane = net.ptstops.ids_lane
#ids_laneedge = net.lanes.ids_edge
ids_sumoedge = net.edges.ids_sumo
#ind = self.get_ind(id_stage)
fd.write(xm.start('ride', indent=indent))
fd.write(xm.num('from', ids_sumoedge[self.ids_fromedge[id_stage]]))
fd.write(xm.num('to', ids_sumoedge[self.ids_toedge[id_stage]]))
fd.write(xm.num('lines', self.ids_line.get_linktab().linenames[self.ids_line[id_stage]]))
# if self.cols.pos_edge_from[ind]>0:
# fd.write(xm.num('departPos', self.cols.pos_edge_from[ind]))
# if self.cols.pos_edge_to[ind]>0:
# fd.write(xm.num('arrivalPos', self.cols.pos_edge_to[ind]))
fd.write(xm.stopit()) # ends stage
class AutorideStages(StageTypeMixin):
def __init__(self, ident, population,
name='Auto rides',
info='Rides with privately owned auto.',
version=1.0,
):
self.init_stagetable(ident, population, name=name, info=info)
# print 'Rides.__init__',self.get_name()
self._init_attributes()
def _init_attributes(self):
# TODO: this structure needs review: the private vehicle is part of a person, not a stage
# street parking at home and work could be in stage. Private garage is part of person...
# print '_init_attributes',self.parent.get_iautos(), self.ident,self.parent.get_landuse().parking
self.add_col(am.IdsArrayConf('ids_iauto', self.get_virtualpop().get_iautos(),
groupnames=['state'],
name='ID vehicle',
info='ID of private vehicle.',
))
self.add_col(am.ArrayConf('times_init', -1.0,
name='Init. time',
unit='s',
info='Initialization time, which is the time when the vehicle appears in the scenario. Value -1 means unknown.',
))
self.add_col(am.IdsArrayConf('ids_parking_from', self.get_virtualpop().get_landuse().parking,
groupnames=['state'],
name='ID dep. parking',
info='Parking ID at the departure of the ride starts.',
))
self.add_col(am.IdsArrayConf('ids_parking_to', self.get_virtualpop().get_landuse().parking,
groupnames=['state'],
name='ID arr. parking',
info='Parking ID at the arrival of the ride.',
))
self.add_col(am.IdlistsArrayConf('ids_edges', self.get_virtualpop().get_net().edges,
groupnames=['_private'],
name='Route',
info="The vehicle's route as a sequence of edge IDs.",
))
self.add(cm.AttrConf('dist_ride_min', 400.0,
dtype='object',
groupnames=['options'],
perm='rw',
name='Min ride dist.',
info='Minimum ride distance. If the distanve between parkings is less, then the person will walk.',
))
# if hasattr(self,'parking'):
# self.delete('parking')
def _init_constants(self):
self.get_attrsman().do_not_save_attrs(['_costs', '_fstar', ])
def prepare_planning(self):
net = self.get_virtualpop().get_net()
# ??? do this for all vehicles??
self._costs = net.edges.get_times(id_mode=net.modes.get_id_mode('passenger'),
is_check_lanes=True)
self._fstar = net.edges.get_fstar(is_ignor_connections=False)
def append_stage(self, id_plan, time_start=-1.0, id_veh=-1,
time_init=-1,
id_parking_from=.1, id_parking_to=-1, **kwargs):
"""
Appends a ride stage to plan id_plan in case the ride is feasible.
The ride is feasible if from parking and to parking are connected.
If feasible, the stage ID and estimated time when stage is finished
will be returned.
If not feasible -1 and start time will be returned.
"""
# print 'Rides.append_stage',id_stage
# check feasibility
route, dist, duration = self.get_route_between_parking(id_parking_from, id_parking_to)
if (len(route) > 0): # |(dist > self.dist_ride_min.get_value()): # is there a connection
# create stage
id_stage, time_end = StageTypeMixin.append_stage(self,
id_plan,
time_start,
durations=duration,
times_init=time_init,
ids_iauto=id_veh,
ids_parking_from=id_parking_from,
ids_parking_to=id_parking_to,
ids_edges=route,
)
# add this stage to the vehicle database
# ind_ride gives the index of this ride (within the same plan??)
#ind_ride = self.parent.get_iautos().append_ride(id_veh, id_stage)
return id_stage, time_end
else:
# not connected or too short of a distance
return -1, time_start # no stage creation took place
def get_route_between_parking(self, id_parking_from, id_parking_to):
"""
Return route and distance of ride with vehicle type vtype
between id_parking_from and id_parking_to
"""
# print 'get_route_between_parking',id_parking_from, id_parking_to
scenario = self.get_virtualpop().get_scenario()
edges = scenario.net.edges
lanes = scenario.net.lanes
# print self.get_demand().getVehicles().cols.maxSpeed
#v_max = self.get_demand().getVehicles().maxSpeed.get(vtype)
parking = scenario.landuse.parking
ids_lanes = parking.ids_lane[[id_parking_from, id_parking_to]]
id_edge_from, id_edge_to = lanes.ids_edge[ids_lanes]
pos_from, pos_to = parking.positions[[id_parking_from, id_parking_to]]
# print ' id_edge_from, id_edge_to=',id_edge_from, id_edge_to
duration_approx, route = routing.get_mincostroute_edge2edge(
id_edge_from,
id_edge_to,
weights=self._costs, # mode-specific!!
fstar=self._fstar # mode-specific!!
)
# here is a big problem: starting with the successive node of edge_from
# may result that the first edge of the route is not connected with edge_from
# And arriving at the preceding node of edge_to may result that from
# the last edge in route the edge_to is not connected.
#route = [edge_from]+route+[edge_to]
dist = np.sum(edges.lengths[route])
dist = dist - pos_from - (edges.lengths[id_edge_to] - pos_to)
# if 0:
# if len(route)>0:
# print ' dist,duration',dist,duration_approx
# else:
# print ' no route found'
return route, dist, duration_approx
# def make_id_veh_ride(self, id_stage, i_ride):
# # make a unique vehicle ID for this stage
# self.inds_ride[id_stage] = i_ride
# return str(self.ids_veh[id_stage])+'.'+str(i_ride)
def get_writexmlinfo(self, ids_plan, is_route=True):
print 'AutorideStages.get_writexmlinfo', len(ids_plan)
iautos = self.get_virtualpop().get_iautos()
writefunc = iautos.prepare_write_xml()
ids = self.get_ids_from_ids_plan(ids_plan)
writefuncs = np.zeros(len(ids), dtype=np.object)
writefuncs[:] = writefunc
return self.times_init[ids], writefuncs, ids
def to_xml(self, id_stage, fd, indent=0):
#lanes = self.parent.get_scenario().net.lanes
scenario = self.get_virtualpop().get_scenario()
edges = scenario.net.edges
edgeindex = edges.ids_sumo
parking = scenario.landuse.parking
ind = self.get_ind(id_stage)
fd.write(xm.start('ride', indent=indent))
id_edge_from, pos_from = parking.get_edge_pos_parking(self.ids_parking_from.value[ind])
id_edge_to, pos_to = parking.get_edge_pos_parking(self.ids_parking_to.value[ind])
# edgeindex.get_index_from_id(self.ids_edge_to.value[ind])
fd.write(xm.num('from', edgeindex[id_edge_from]))
fd.write(xm.num('to', edgeindex[id_edge_to]))
fd.write(xm.num('lines', self.ids_iauto.get_linktab().get_id_line_xml(
self.ids_iauto[id_stage]))) # mode specific
# if 0:
# #pos = pos_from
# length = max(edges.lengths[id_edge_from]-4.0,0.0)
#
# if (pos_from>0) & (pos_from < length ):
# fd.write(xm.num('departPos', pos))
#
# elif pos_from < 0:
# fd.write(xm.num('departPos', 0.0))
#
# else:
# fd.write(xm.num('departPos', length))
#
# #pos = self.positions_to.value[ind]
# length = max(edges.lengths[id_edge_to]-4.0, 0.0)
# if (pos_to>0) & (pos_to < length ):
# fd.write(xm.num('arrivalPos', pos_to))
#
# elif pos_to < 0:
# fd.write(xm.num('arrivalPos', 0.0))
#
# else:
# fd.write(xm.num('arrivalPos', length))
fd.write(xm.stopit()) # ends stage
class BikerideStages(StageTypeMixin):
def __init__(self, ident, population,
name='Bike rides',
info='Rides with privately owned bike.',
version=1.0,
):
self.init_stagetable(ident, population, name=name, info=info)
# print 'Rides.__init__',self.get_name()
self._init_attributes()
def _init_attributes(self):
# TODO: this structure needs review: the private vehicle is part of a person, not a stage
# street parking at home and work could be in stage. Private garage is part of person...
# print '_init_attributes',self.parent.get_iautos(), self.ident,self.parent.get_landuse().parking
self.add_col(am.IdsArrayConf('ids_ibike', self.get_virtualpop().get_ibikes(),
groupnames=['state'],
name='ID bike',
info='ID of private, individual bike.',
))
self.add_col(am.ArrayConf('times_init', -1.0,
name='Init. time',
unit='s',
info='Initialization time, which is the time when the vehicle appears in the scenario. Value -1 means unknown.',
))
edges = self.get_virtualpop().get_net().edges
self.add_col(am.IdsArrayConf('ids_edge_from', edges,
groupnames=['state'],
name='ID Dep. edge',
info='Edge ID at departure of walk.',
))
self.add_col(am.IdsArrayConf('ids_edge_to', edges,
groupnames=['state'],
name='ID Arr. edge',
info='Edge ID where walk finishes.',
))
self.add_col(am.ArrayConf('positions_from', 0.0,
dtype=np.float32,
#choices = OPTIONMAP_POS_DEPARTURE,
perm='r',
name='Depart pos',
unit='m',
info="Position on edge at the moment of departure.",
#xmltag = 'departPos',
#xmlmap = get_inversemap(OPTIONMAP_POS_ARRIVAL),
))
self.add_col(am.ArrayConf('positions_to', 0.0,
dtype=np.float32,
#choices = OPTIONMAP_POS_ARRIVAL,
perm='r',
name='Arrival pos',
unit='m',
info="Position on edge at the moment of arrival.",
#xmltag = 'arrivalPos',
#xmlmap = get_inversemap(OPTIONMAP_POS_ARRIVAL),
))
self.add_col(am.IdlistsArrayConf('ids_edges', self.get_virtualpop().get_net().edges,
groupnames=['_private'],
name='Route',
info="The vehicle's route as a sequence of edge IDs.",
))
def _init_constants(self):
self.get_attrsman().do_not_save_attrs(['_costs', '_fstar', ])
def prepare_planning(self):
net = self.get_virtualpop().get_net()
print 'prepare_planning'
self._costs = net.edges.get_times(id_mode=net.modes.get_id_mode('bicycle'),
is_check_lanes=True)
ids_edge = net.edges.get_ids()
for id_edge, cost in zip(ids_edge, self._costs[ids_edge]):
print ' id_edge', id_edge, 'sumo', net.edges.ids_sumo[id_edge], cost
self._fstar = net.edges.get_fstar(is_ignor_connections=False)
def append_stage(self, id_plan, time_start=-1.0, id_veh=-1,
time_init=-1,
id_edge_from=-1, id_edge_to=-1,
position_edge_from=0.0, position_edge_to=0.0,
**kwargs):
"""
Appends a ride stage to plan id_plan in case the ride is feasible.
The ride is feasible if from parking and to parking are connected.
If feasible, the stage ID and estimated time when stage is finished
will be returned.
If not feasible -1 and start time will be returned.
"""
# print 'BikeRides.append_stage',id_plan,time_start,time_init
#edges = self.get_virtualpop().get_net().edges
# check feasibility
#route, dist, duration = self.get_route_between_parking(id_parking_from, id_parking_to)
# print ' id_edge_from, id_edge_to=',id_edge_from, id_edge_to
duration_approx, route = routing.get_mincostroute_edge2edge(
id_edge_from,
id_edge_to,
weights=self._costs, # mode-specific!!
fstar=self._fstar # mode-specific!!
)
#route = [edge_from]+route+[edge_to]
#dist = np.sum(edges.lengths[route])
#dist = dist - pos_from - ( edges.lengths[id_edge_to] - pos_to)
if (len(route) > 0): # |(dist > self.dist_ride_min.get_value()): # is there a connection
# create stage
id_stage, time_end = StageTypeMixin.append_stage(self,
id_plan,
time_start,
durations=duration_approx,
times_init=time_init,
ids_ibike=id_veh,
ids_edge_from=id_edge_from,
positions_from=position_edge_from,
ids_edge_to=id_edge_to,
positions_to=position_edge_to,
ids_edges=route,
)
# print ' route = ',route
# print ' ids_edges = ',self.ids_edges[id_stage]
# add this stage to the vehicle database
# ind_ride gives the index of this ride (within the same plan??)
#ind_ride = self.parent.get_iautos().append_ride(id_veh, id_stage)
return id_stage, time_end
else:
# not connected or too short of a distance
return -1, time_start # no stage creation took place
def get_writexmlinfo(self, ids_plan, is_route=True):
print 'BikerideStages.get_writexmlinfo', len(ids_plan)
ibikes = self.get_virtualpop().get_ibikes()
bikewritefunc = ibikes.prepare_write_xml()
ids = self.get_ids_from_ids_plan(ids_plan)
bikewritefuncs = np.zeros(len(ids), dtype=np.object)
bikewritefuncs[:] = bikewritefunc
return self.times_init[ids], bikewritefuncs, ids
def to_xml(self, id_stage, fd, indent=0):
#lanes = self.parent.get_scenario().net.lanes
scenario = self.get_virtualpop().get_scenario()
edges = scenario.net.edges
edgeindex = edges.ids_sumo
#parking = scenario.landuse.parking
#ind = self.get_ind(id_stage)
fd.write(xm.start('ride', indent=indent))
#id_edge_from, pos_from = parking.get_edge_pos_parking(self.ids_parking_from.value[ind])
#id_edge_to, pos_to = parking.get_edge_pos_parking(self.ids_parking_to.value[ind])
# edgeindex.get_index_from_id(self.ids_edge_to.value[ind])
id_edge_from = self.ids_edge_from[id_stage]
fd.write(xm.num('from', edgeindex[self.ids_edge_from[id_stage]]))
fd.write(xm.num('to', edgeindex[self.ids_edge_to[id_stage]]))
fd.write(xm.num('lines', self.ids_ibike.get_linktab().get_id_line_xml(
self.ids_ibike[id_stage]))) # mode specific
# if 0:
# #pos = pos_from
# length = max(edges.lengths[id_edge_from]-4.0,0.0)
#
# if (pos_from>0) & (pos_from < length ):
# fd.write(xm.num('departPos', pos))
#
# elif pos_from < 0:
# fd.write(xm.num('departPos', 0.0))
#
# else:
# fd.write(xm.num('departPos', length))
#
# #pos = self.positions_to.value[ind]
# length = max(edges.lengths[id_edge_to]-4.0, 0.0)
# if (pos_to>0) & (pos_to < length ):
# fd.write(xm.num('arrivalPos', pos_to))
#
# elif pos_to < 0:
# fd.write(xm.num('arrivalPos', 0.0))
#
# else:
# fd.write(xm.num('arrivalPos', length))
fd.write(xm.stopit()) # ends stage
class WalkStages(StageTypeMixin):
def __init__(self, ident, stages, name='WalkStages',
info='walk from a position on a lane to another position of another lane.'):
self.init_stagetable(ident, stages, name=name, info=info)
edges = self.get_virtualpop().get_scenario().net.edges
self.add_col(am.IdsArrayConf('ids_edge_from', edges,
groupnames=['state'],
name='ID Dep. edge',
info='Edge ID at departure of walk.',
))
self.add_col(am.IdsArrayConf('ids_edge_to', edges,
groupnames=['state'],
name='ID Arr. edge',
info='Edge ID where walk finishes.',
))
self.add_col(am.ArrayConf('positions_from', 0.0,
dtype=np.float32,
#choices = OPTIONMAP_POS_DEPARTURE,
perm='r',
name='Depart pos',
unit='m',
info="Position on edge at the moment of departure.",
#xmltag = 'departPos',
#xmlmap = get_inversemap(OPTIONMAP_POS_ARRIVAL),
))
self.positions_from.set_xmltag(None)
self.add_col(am.ArrayConf('positions_to', 0.0,
dtype=np.float32,
#choices = OPTIONMAP_POS_ARRIVAL,
perm='r',
name='Arrival pos',
unit='m',
info="Position on edge at the moment of arrival.",
#xmltag = 'arrivalPos',
#xmlmap = get_inversemap(OPTIONMAP_POS_ARRIVAL),
))
self.positions_to.set_xmltag(None)
def append_stage(self, id_plan, time_start=-1.0,
id_edge_from=-1, id_edge_to=-1,
position_edge_from=0.1, position_edge_to=0.0,
**kwargs):
# print 'WalkStages.append_stage',id_stage
if kwargs.has_key('duration'):
duration = kwargs['duration']
else:
dist, duration = self.plan_walk(id_edge_from, id_edge_to,
position_edge_from, position_edge_to)
id_stage, time_end = StageTypeMixin.append_stage(self,
id_plan,
time_start,
durations=duration,
ids_edge_from=id_edge_from,
ids_edge_to=id_edge_to,
positions_from=position_edge_from,
positions_to=position_edge_to,
)
return id_stage, time_end
def modify_stage(self, id_stage, time_start,
id_edge_from=-1, id_edge_to=-1,
position_edge_from=0.1, position_edge_to=0.0):
dist, duration = self.plan_walk(id_edge_from, id_edge_to,
position_edge_from, position_edge_to)
time_end = StageTypeMixin.modify_stage(self, id_stage, time_start,
durations=duration,
ids_edge_from=id_edge_from,
ids_edge_to=id_edge_to,
positions_from=position_edge_from,
positions_to=position_edge_to,
)
return time_end
def plan_walk(self, id_edge_from, id_edge_to, pos_from, pos_to, id_mode=1):
"""
Routing for pedestrians.
Currently limited to estimation of line of sight distance
and walk time.
"""
# print 'plan_walk',id_edge_from, id_edge_to,id_mode, pos_from, pos_to
scenario = self.get_virtualpop().get_scenario()
edges = scenario.net.edges
coord_from = edges.get_coord_from_pos(id_edge_from, pos_from)
coord_to = edges.get_coord_from_pos(id_edge_to, pos_to)
# from lanes, more precis, but less efficient and less robust
# lanes = scenario.net.lanes
#coord_from = lanes.get_coord_from_pos(edges.ids_lane[id_edge_from][0], pos_from)
#coord_to = lanes.get_coord_from_pos(edges.ids_lane[id_edge_to][0], pos_to)
# print ' coord_from',coord_from,type(coord_from)
# print ' coord_to',coord_from,type(coord_to)
# print ' delta',coord_to-coord_from
# line of sight distance
dist = np.sqrt(np.sum((coord_to-coord_from)**2))
duration_approx = dist/scenario.net.modes.speeds_max[id_mode]
# print ' dist,duration',dist,duration_approx,scenario.net.modes.speeds_max[id_mode]
return dist, duration_approx
def to_xml(self, id_stage, fd, indent=0):
#scenario = self.parent.get_scenario()
#edges = scenario.net.edges
edges = self.ids_edge_from.get_linktab()
edgeindex = edges.ids_sumo
ind = self.get_ind(id_stage)
id_edge_from = self.ids_edge_from.value[ind]
id_edge_to = self.ids_edge_to.value[ind]
fd.write(xm.start('walk', indent=indent))
fd.write(xm.num('from', edgeindex[id_edge_from]))
fd.write(xm.num('to', edgeindex[id_edge_to]))
pos = self.positions_from.value[ind]
length = max(edges.lengths[id_edge_from]-4.0, 0.0)
# depricated
# if (pos>0) & (pos < length ):
# fd.write(xm.num('departPos', pos))
#
# elif pos < 0:
# fd.write(xm.num('departPos', 0.0))
#
# else:
# fd.write(xm.num('departPos', length))
pos = self.positions_to.value[ind]
length = max(edges.lengths[id_edge_to]-4.0, 0.0)
if (pos > 0) & (pos < length):
fd.write(xm.num('arrivalPos', pos))
elif pos < 0:
fd.write(xm.num('arrivalPos', 0.0))
else:
fd.write(xm.num('arrivalPos', length))
fd.write(xm.stopit()) # ends walk
class ActivityStages(StageTypeMixin):
def __init__(self, ident, stages, name='Activities'):
self.init_stagetable(ident, stages, name=name, info='Do some activity at a position of a lane.')
self._init_attributes()
def _init_attributes(self):
# TODO: this structure needs review: the private vehicle is part of a person, not a stage
# street parking at home and work could be in stage. Private garage is part of person...
activities = self.get_virtualpop().get_activities()
self.add_col(am.IdsArrayConf('ids_activity', activities,
groupnames=['parameters'],
name='Activity ID',
info='Scheduled activity ID. This is the activity which schould be carried out in this stage.',
))
# this is redundant information but here for speed when writing xml
self.add_col(am.ArrayConf('names_activitytype', '',
groupnames=['parameters'],
dtype=np.object,
perm='r',
name='Type name',
info="Name of activity type.",
xmltag='actType',
#xmlmap = get_inversemap( activitytypes.names.get_indexmap()),
))
# self.add_col(am.IdsArrayConf( 'ids_facility', self.get_virtualpop().get_landuse().facilities,
# groupnames = ['parameters'],
# name = 'ID facility',
# info = 'Facility where activity takes place.',
# ))
# lane and position can be computed from facility
self.add_col(am.IdsArrayConf('ids_lane', self.get_virtualpop().get_net().lanes,
groupnames=['parameters'],
name='Lane ID',
info='Lane ID where activity takes place.',
#xmltag = 'lane',
))
# for update..can be removed
self.ids_lane.set_xmltag(None)
self.add_col(am.ArrayConf('positions', 0.0,
groupnames=['parameters'],
dtype=np.float32,
perm='r',
name='Lane pos',
unit='m',
info="Position on lane nearby where activity takes place.",
#xmltag = 'startPos',
#xmlmap = get_inversemap(OPTIONMAP_POS_ARRIVAL),
))
self.positions.set_xmltag(None)
self.add_col(am.ArrayConf('durations', 0.0,
groupnames=['parameters'],
dtype=np.int32,
perm='r',
name='Duration',
unit='s',
info="Duration of activity.",
xmltag='duration',
#xmlmap = get_inversemap(OPTIONMAP_POS_ARRIVAL),
))
self.durations.set_xmltag('duration')
def get_edges_positions(self, ids_stage):
"""
Returns road edge and positions of activity.
"""
return self.ids_lane.get_linktab().ids_edge[self.ids_lane[ids_stage]],\
self.positions[ids_stage]
def _init_constants(self):
#self._activities = self.get_virtualpop().get_activities()
#self._activitytypes = self.get_virtualpop().get_demand().activitytypes
# self.do_not_save_attrs(['_activities','_activitytypes'])
pass
def to_xml(self, id_stage, fd, indent=0):
# <stop lane="1/4to2/4_0" duration="20" startPos="40" actType="singing"/>
#ind = self.get_ind(id_stage)
fd.write(xm.start('stop', indent=indent))
lanes = self.get_virtualpop().get_net().lanes
id_lane = self.ids_lane[id_stage]
# get all xml configs and damp to fd
for attrconf in self.get_group('parameters'):
# this will write only if a xmltag is defined
attrconf.write_xml(fd, id_stage)
fd.write(xm.num('lane', lanes.get_id_sumo(id_lane)))
#fd.write(xm.num('duration', self.durations[id_stage]))
pos = self.positions[id_stage]
length = max(lanes.get_lengths(id_lane)-4.0, 0.0)
if (pos > 0) & (pos < length):
fd.write(xm.num('startPos', pos))
elif pos < 0:
fd.write(xm.num('startPos', 0.0))
else:
fd.write(xm.num('startPos', length))
#fd.write(xm.num('lane', self.cols.id_lane[ind]))
#fd.write(xm.num('startPos', self.cols.pos_lane[ind]))
#fd.write(xm.num('duration', self.cols.duration[ind]))
# fd.write(xm.num('actType', self._activitytypes.names[self._activities.)
fd.write(xm.stopit()) # ends activity
class Plans(am.ArrayObjman):
def __init__(self, population, **kwargs):
"""Plans database."""
self._init_objman(ident='plans',
parent=population,
name='Plans',
info='Mobility plan for virtual population.',
#xmltag = ('plans','plan',None),
version=0.1,
**kwargs)
self._init_attributes()
self._init_constants()
def _init_attributes(self):
# upgrade
if self.get_version() < 0.1:
pass
self.set_version(0.1)
persons = self.parent
#self.add(cm.ObjConf(StageTables('stagetables',self)) )
self.add_stagetable('walks', WalkStages)
self.add_stagetable('autorides', AutorideStages)
self.add_stagetable('bikerides', BikerideStages)
self.add_stagetable('transits', TransitStages)
self.add_stagetable('activities', ActivityStages)
self.add_col(am.IdsArrayConf('ids_person', persons,
groupnames=['links'],
name='Person ID',
info='Person ID to who this plan belongs to.',
))
self.add_col(am.IdsArrayConf('ids_strategy', persons.get_strategies(),
groupnames=['links'],
name='Stategy ID',
info='Stategy ID with which this plan has been generated.',
))
self.add_col(am.ArrayConf('times_begin', -np.inf,
dtype=np.float32,
name='Begin time',
info='Time when active travelling begins. This is the time in the simulation when the person appears. The first activity is not simulated.',
unit='s',
))
self.add_col(am.ArrayConf('times_end', -np.inf,
dtype=np.float32,
name='End time',
info='Time when active travelling ends. This is the time in the simulation when the person disappears. The last activity is not simulated.',
unit='s',
))
self.add_col(am.ArrayConf('times_est', 0.0,
dtype=np.float32,
name='Estim. time',
info='Estimated time duration to execute travel plan. Activity times are excluded.',
unit='s',
))
self.add_col(am.ArrayConf('times_exec', 0.0,
dtype=np.float32,
name='Exec. time',
info='Last plan execution time from simulation run.',
unit='s',
))
self.add_col(am.ArrayConf('utilities', 0.0,
dtype=np.float32,
name='utility',
info='Utility of plan.',
))
self.add_col(am.ArrayConf('probabilities', 1.0,
dtype=np.float32,
name='Probability',
info='Probability that the plan is selected out of all plans available for one person.',
))
self.add_col(am.TabIdListArrayConf('stagelists',
name='Stages',
info='Sequence of stages of this plan.',
))
def _init_constants(self):
#self._id_mode_bike = self.parent.get_scenario().net.modes.get_id_mode('bicycle')
# self.do_not_save_attrs([])
pass
def clear_plans(self):
print 'Plans.clear_plans'
for stagetable in self.get_stagetables():
# print ' stagetable',stagetable
stagetable.clear()
self.clear_rows()
# for attrconfig in self.get_attrsman().get_colconfigs():
# print ' clear attrconfig',attrconfig.attrname
# attrconfig.clear()
# no: self.clear()
def add_stagetable(self, ident, StagesClass, **kwargs):
if not hasattr(self, ident):
self.add(cm.ObjConf(StagesClass(ident, self, **kwargs),
groupnames=['stagetables']))
return getattr(self, ident).get_value()
def get_stagetable(self, ident):
return getattr(self, ident).get_value()
def get_stagetables(self):
"""Return a list of with all stage objects"""
stageobjs = []
# print 'get_stagetables',self.get_group('stagetables')
for stageobj in self.get_group('stagetables'):
stageobjs.append(stageobj)
return stageobjs
def prepare_stagetables(self, idents_stagetable):
# print 'prepare_stages',stagenames
#ids = self.names.get_ids_from_indices_save(stagenames)
# print ' ids',ids
# print ' self.stagetables[ids]',self.stagetables[ids]
for indent in idents_stagetable:
self.get_stagetable(indent).prepare_planning()
def get_stages(self, id_plan):
stages = self.stagelists[id_plan]
if stages is None:
return []
else:
return stages
def append_stage(self, id_plan, stage, id_stage):
# test: stage = cm.TableEntry(stagetable, id_plan)
# print 'Plans.append_stage',self,id_plan, stage, id_stage
if self.stagelists[id_plan] is None:
self.stagelists[id_plan] = [(stage, id_stage)]
else:
self.stagelists[id_plan].append((stage, id_stage))
# print ' after append stagelists[id_plan]',type(self.stagelists[id_plan]),self.stagelists[id_plan]
# def prepare_stages(self,stagenames):
# self.get_stagetables().prepare_stages(stagenames)
def get_timing_laststage(self, id_plan):
"""
Return time_start and duration of last stage of plan id_plan
"""
stages_current = self.stagelists[id_plan]
if stages_current is not None:
stage_last, id_stage_last = stages_current[-1]
return stage_last.get_timing(id_stage_last)
else:
return -1, -1
class Virtualpopulation(DemandobjMixin, am.ArrayObjman):
def __init__(self, ident, demand, **kwargs):
self._init_objman(ident=ident,
parent=demand,
name='Virtual population',
info='Contains information of each individual of the virtual population.',
version=0.2, # only for new scenarios
**kwargs)
self._init_attributes()
self._init_constants()
def _init_attributes(self):
# update here
#
self.set_version(0.2)
demand = self.parent
scenario = demand.get_scenario()
# --------------------------------------------------------------------
# individual vehicles tables
self.add(cm.ObjConf(IndividualAutos('iautos', self)))
self.add(cm.ObjConf(IndividualBikes('ibikes', self)))
self.add(cm.ObjConf(IndividualMotorcycles('imotos', self)))
# --------------------------------------------------------------------
# activity table
#self.add(cm.ObjConf(ActivityTypes('activitytypes', self)) )
self.add(cm.ObjConf(Activities('activities', self)))
# --------------------------------------------------------------------
# strategies table (must be before plans)
self.add(cm.ObjConf(Strategies('strategies', self)))
# --------------------------------------------------------------------
# plans table
self.add(cm.ObjConf(Plans(self)))
self.get_strategies().add_default()
# ===================================================================
# Add person attributes
# --------------------------------------------------------------------
# socio economic parameters
self.add_col(am.ArrayConf('identifications', '',
dtype=np.object,
groupnames=['socioeconomic'],
name='Name',
info='Identification or name of person.',
))
self.add_col(am.ArrayConf('ids_gender', default=-1,
dtype=np.int32,
groupnames=['socioeconomic'],
choices=GENDERS,
name='Gender',
info='Gender of person.',
))
self.add_col(am.ArrayConf('years_birth', default=-1,
dtype=np.int32,
groupnames=['socioeconomic'],
name='Birth year',
info='Year when person has been born.',
))
self.add_col(am.ArrayConf('ids_occupation', default=OCCUPATIONS['unknown'],
dtype=np.int32,
choices=OCCUPATIONS,
groupnames=['socioeconomic'],
name='Occupation',
info='Type of occupation.',
))
# --------------------------------------------------------------------
# household parameters
self.add_col(am.ArrayConf('numbers_houehold', default=1,
dtype=np.int32,
groupnames=['household'],
name='Number in household',
info='Number of persons in household.',
))
self.add_col(am.ArrayConf('numbers_minor', default=0,
dtype=np.int32,
groupnames=['household'],
name='Number minors',
info='Number of minor in household. In the context of traffic simulations minors are persons who need to be accompaigned by adulds when travelling.',
))
# --------------------------------------------------------------------
# activity parameters
# lists with activity patterns
self.add_col(am.IdlistsArrayConf('activitypatterns', self.activities.get_value(),
groupnames=['activity'],
name='Activity IDs',
info="Sequence of activity IDs to be accomplished by the person.",
))
# --------------------------------------------------------------------
# mobility parameters
# --------------------------------------------------------------------
# give a pedestrian vtype to each person
vtypes = self.get_demand().vtypes
self.add_col(am.IdsArrayConf('ids_vtype', vtypes,
id_default=vtypes.select_by_mode(mode='pedestrian')[0],
groupnames=['mobility'],
name='Ped. type',
info='The pedestrian type ID specifies the walking characteristics and visual representation of the person. In SUMO terminology, this is the vehicle type.',
#xmltag = 'type',
))
self.add_col(am.ArrayConf('traveltimebudgets', default=55*60,
dtype=np.int32,
groupnames=['mobility'],
name='time budget',
unit='s',
info='Daily time budget used for traveling.',
))
self.add_col(am.IdsArrayConf('ids_mode_preferred', scenario.net.modes,
groupnames=['mobility'],
name='ID preferred mode',
info='ID of preferred transport mode of person.',
))
self.add_col(am.IdsArrayConf('ids_iauto', self.get_iautos(),
groupnames=['mobility'],
name='ID auto',
info='ID of individual auto. Negative value means no bile available.',
))
self.add_col(am.IdsArrayConf('ids_ibike', self.get_ibikes(),
groupnames=['mobility'],
name='ID bike',
info='ID of individual bicycle. Negative value means no bike available.',
))
self.add_col(am.IdsArrayConf('ids_imoto', self.get_imotos(),
groupnames=['mobility'],
name='ID motorcycle',
info='ID of individual motorcycle. Negative value means no motorcycle available.',
))
self.add_col(am.ArrayConf('dists_walk_max', default=300.0,
dtype=np.float32,
groupnames=['mobility'],
name='Max. walk dist',
info='Maximum acceptable walking distance between origin and destination or for transfers between modes.',
))
# --------------------------------------------------------------------
# plans
self.add_col(am.IdsArrayConf('ids_plan', self.get_plans(),
groupnames=['plans'],
name='ID Plan',
info='Currently selected mobility plan ID of person. This is the plan which will be simulated.',
))
self.add_col(am.IdlistsArrayConf('lists_ids_plan', self.get_plans(),
groupnames=['plans'],
name='Plan IDs',
info='List with alternative, feasible mobility plan IDs for each person.',
))
def _init_constants(self):
modes = self.get_scenario().net.modes
self.id_mode_bike = modes.get_id_mode('bicycle')
self.id_mode_auto = modes.get_id_mode('passenger')
self.id_mode_moto = modes.get_id_mode('motorcycle')
self._edges = self.get_net().edges
self.do_not_save_attrs(['id_mode_bike', 'id_mode_auto', 'id_mode_moto',
'_edges'])
def get_demand(self):
return self.parent
def clear_population(self):
# self.clear()
self.clear_plans()
self.clear_ivehicles()
# TODO: this should disappear
self.get_landuse().parking.clear_booking()
# for attrconfig in self.get_attrsman().get_colconfigs():
# attrconfig.clear()
self.clear_rows()
def clear_plans(self):
# print 'clear_plans',self.get_stagetables()
self.ids_plan.reset()
self.lists_ids_plan.reset()
self.get_plans().clear_plans()
# TODO: this should disappear
self.get_landuse().parking.clear_booking()
def clear_ivehicles(self):
"""
Clear all individually owned vehicles.
"""
print 'clear_ivehicles'
self.get_iautos().clear_vehicles()
self.get_ibikes().clear_vehicles()
self.get_imotos().clear_vehicles()
def get_activities(self):
return self.activities.get_value()
def get_strategies(self):
return self.strategies.get_value()
def get_plans(self):
return self.plans.get_value()
def get_iautos(self):
return self.iautos.get_value()
def get_ibikes(self):
return self.ibikes.get_value()
def get_imotos(self):
return self.imotos.get_value()
def get_stagetables(self):
return self.get_plans().get_stagetables()
def get_landuse(self):
return self.parent.get_scenario().landuse
def get_scenario(self):
return self.parent.get_scenario()
def get_net(self):
return self.parent.get_scenario().net
def get_ptlines(self):
return self.get_demand().ptlines
def get_ptstops(self):
return self.get_net().ptstops
def get_id_sumo_from_id(self, id_sumo):
return u'vp.%s' % id_sumo
def get_id_from_id_sumo(self, id_veh_sumo):
if len(id_veh_sumo.split('.')) == 2:
prefix, id_pers = id_veh_sumo.split('.')
if prefix == 'vp':
return int(id_pers)
else:
return -1
return -1
def get_ids_from_ids_sumo(self, ids_sumo):
ids = np.zeros(len(ids_sumo), dtype=int32)
for id_sumo in ids_sumo:
ids[i] = self.get_id_from_id_sumo(id_sumo)
return ids
def get_time_depart_first(self):
# print 'Virtualpop.get_time_depart_first'
if len(self.get_plans()) > 0:
plans = self.get_plans()
ids = self.select_ids(self.ids_plan.get_value() >= 0)
# print ' ids',ids
return float(np.min(plans.times_begin[self.ids_plan[ids]]))
else:
return 0.0
def get_time_depart_last(self):
if len(self.get_plans()) > 0:
# todo: this can be improved by adding plan execution time
plans = self.get_plans()
ids = self.select_ids(self.ids_plan.get_value() >= 0)
return float(np.max(plans.times_end[self.ids_plan[ids]]))
else:
return 0.0
# def add_stagetable(self,ident,StageClass, **kwargs):
# print 'add_stagetable',ident,StageClass#,kwargs
# if not hasattr(self,ident):
# #print ' StageClass',StageClass(ident, self, **kwargs)
# #print ' ObjConf',cm.ObjConf(StageClass(ident, self, **kwargs), goupnames = ['stages'])
# self.add(cm.ObjConf(StageClass(ident, self, **kwargs), goupnames = ['stages']) )
# return getattr(self, ident).get_value()
# def get_stagetable(self, ident):
# return getattr(self, ident).get_value()
# def get_stagetables(self):
# """Return a list of with all stage objects"""
# stageobjs = []
# #print 'get_stagetables',self.get_group('stages')
# for stageobj in self.get_group('stages'):
# stageobjs.append(stageobj)
# return stageobjs
def make_multiple(self, n, **kwargs):
return self.add_rows(n=n, **kwargs)
def disaggregate_odflow(self, time_start, time_end, id_mode,
ids_fac, probs_fac_orig, probs_fac_dest,
tripnumber_tot,
id_activitytype_orig,
id_activitytype_dest,
hour_begin_earliest_orig,
hour_begin_earliest_dest,
hour_begin_latest_orig,
hour_begin_latest_dest,
duration_min_orig,
duration_min_dest,
duration_max_orig,
duration_max_dest,
scale=1.0,
hour_offset=8.0, # 08:00
hour_tripbudget=25.0/60, # 25min
**kwargs
):
"""
Disaggregation of demand dem from taz id_zone_orig to id_zone_dest with id_mode
during time interval time_start,time_end, and creation of persons
which are parameterized accordingly.
The facility type at origin will be landusetype_orig
and at destination landusetype_dest.
"""
tripnumber = int(scale*tripnumber_tot)
print 'disaggregate_odflow', time_start, time_end, id_mode, tripnumber
print ' id_activitytype_orig,id_activitytype_dest', id_activitytype_orig, id_activitytype_dest
# print ' probs_orig',sum(probs_fac_orig),'\n',probs_fac_orig
# print ' probs_dest',sum(probs_fac_dest),'\n',probs_fac_dest
# is there a chance to find facilities to locate persons in
# origin and destination zone
#activitytypes= self.get_scenario().demand.activitytypes
#ctivitynames = self.activitytypes.names
#get_id_act = activitynames.get_id_from_index
if (np.sum(probs_fac_orig) > 0) & (np.sum(probs_fac_dest) > 0):
# if id_mode == self._id_mode_bike:
# are_bikeowner = np.ones(tripnumber, dtype=np.bool)
# else:
# # TODO: assign a default percentage of bike owners
# are_bikeowner = np.zeros(tripnumber, dtype=np.bool)
#times_start = np.random.randint(time_start,time_end,tripnumber)
ids_person = self.make_multiple(tripnumber,
ids_mode_preferred=id_mode * np.ones(tripnumber, dtype=np.int32),
#times_start = times_start,
)
unitvec_int = np.ones(tripnumber, dtype=np.int32)
unitvec_float = np.ones(tripnumber, dtype=np.int32)
# activity timing
#hours_start = hour_offset + np.array(times_start,dtype = np.float32)/3600
#tol_orig = hour_begin_latest_orig+duration_max_orig-(hour_begin_earliest_orig+duration_min_orig)
# print ' hours_start[:3]',hours_start[:3]
# print ' tol_orig',tol_orig
#hours_end_est = hours_start + hour_tripbudget
#tol_dest = hour_begin_latest_dest-hour_begin_earliest_dest
# print ' hours_end_est[:3]',hours_end_est[:3]
# print ' tol_dest',tol_dest
#self.map_id_act_to_ids_facattrconf[id_activitytype_orig][ids_person] = ids_fac[random_choice(tripnumber, probs_fac_orig)]
#self.map_id_act_to_ids_facattrconf[id_activitytype_dest][ids_person] = ids_fac[random_choice(tripnumber, probs_fac_dest)]
activities = self.get_activities()
# fix first departure hours for first activity imposed by
# OD flow data
hours_end_earliest_orig = (hour_offset+float(time_start)/3600)*unitvec_float
hours_end_latest_orig = (hour_offset+float(time_end)/3600)*unitvec_float
duration_mean_orig = 0.5 * duration_min_orig+duration_max_orig
hours_begin_earliest_orig = hours_end_earliest_orig-duration_mean_orig
hours_begin_latest_orig = hours_end_latest_orig-duration_mean_orig
# this estimate could be *optionally* replaced by preliminary routing
hours_begin_earliest_dest = hours_end_earliest_orig+hour_tripbudget
hours_begin_latest_dest = hours_end_latest_orig+hour_tripbudget
#hours_end_earliest_dest = hours_begin_earliest_dest+duration_min_dest
#hours_end_latest_dest = hours_begin_latest_dest+duration_max_dest
ids_activity_orig = activities.add_rows(
n=tripnumber,
ids_activitytype=id_activitytype_orig * unitvec_int,
ids_facility=ids_fac[random_choice(tripnumber, probs_fac_orig)],
hours_begin_earliest=hours_begin_earliest_orig,
hours_begin_latest=hours_begin_latest_orig,
durations_min=duration_mean_orig-1.0/60.0 * unitvec_float, # min mast be less than max to prevent crash
durations_max=duration_mean_orig * unitvec_float,
)
ids_activity_dest = activities.add_rows(
n=tripnumber,
ids_activitytype=id_activitytype_dest * unitvec_int,
ids_facility=ids_fac[random_choice(tripnumber, probs_fac_dest)],
hours_begin_earliest=hours_begin_earliest_dest,
hours_begin_latest=hours_begin_latest_dest,
durations_min=duration_min_dest*unitvec_float,
durations_max=duration_max_dest*unitvec_float,
)
for id_person, id_activity_orig, id_activity_dest in zip(ids_person, ids_activity_orig, ids_activity_dest):
self.activitypatterns[id_person] = [id_activity_orig, id_activity_dest]
#activitypatterns = np.zeros((tripnumber,2), dtype = np.int32)
#activitypatterns[:,0]= ids_activity_orig
#activitypatterns[:,1]= ids_activity_dest
# try convert in this way to lists
# print ' activitypatterns',activitypatterns.tolist()
#self.activitypatterns[ids_person] = activitypatterns.tolist()
# for id_person, act_pattern in zip(ids_person,activitypatterns.tolist()):
# self.activitypatterns[id_person] = act_pattern
return ids_person
else:
print 'WARNING in disaggregate_odflow: no probabilities', np.sum(probs_fac_orig), np.sum(probs_fac_dest)
return []
def create_pop_from_odflows(self, is_use_landusetypes=False, **kwargs):
"""
Creates a population and defines home and activity facility
according to OD matrix defined in odflows.
The population is distributed within the zones according to
the area of the facility.
if landusetype_orig and landusetype_dest also landuse types
of facilities of origin and destination are taken into account.
"""
print 'create_pop_from_odflows'
demand = self.parent
odflowtab = demand.odintervals.generate_odflows()
landuse = self.get_landuse()
activitytypes = demand.activitytypes
log = kwargs.get('logger', self.get_logger())
if is_use_landusetypes:
# TODO: not tested and works only for one landusetype per activity
#activitytypes = self.activitytypes.get_value()
#id_landusetype_orig = activitytypes.ids_landusetypes[kwargs['id_activitytype_orig']][0]
#id_landusetype_dest = activitytypes.ids_landusetypes[kwargs['id_activitytype_dest']][0]
pass
# TODO: get activitypes from activity
#probs_fac, ids_fac = self.get_landuse().facilities.get_departure_probabilities_landuse()
else:
probs_fac_area, ids_fac = landuse.facilities.get_departure_probabilities()
# self._make_map_id_act_to_ids_facattrconf()
ids_flow = odflowtab.get_ids()
n_flows = len(ids_flow)
ids_activitytype_orig = odflowtab.ids_activitytype_orig[ids_flow]
ids_activitytype_dest = odflowtab.ids_activitytype_dest[ids_flow]
i = 0.0
for id_flow,\
id_orig,\
id_dest,\
id_mode,\
time_start,\
time_end,\
tripnumber,\
id_activitytype_orig,\
id_activitytype_dest,\
hour_begin_earliest_orig,\
hour_begin_earliest_dest,\
hour_begin_latest_orig,\
hour_begin_latest_dest,\
duration_min_orig,\
duration_min_dest,\
duration_max_orig,\
duration_max_dest\
in zip(ids_flow,
odflowtab.ids_orig[ids_flow],
odflowtab.ids_dest[ids_flow],
odflowtab.ids_mode[ids_flow],
odflowtab.times_start[ids_flow],
odflowtab.times_end[ids_flow],
odflowtab.tripnumbers[ids_flow],
ids_activitytype_orig,
ids_activitytype_dest,
activitytypes.hours_begin_earliest[ids_activitytype_orig],
activitytypes.hours_begin_earliest[ids_activitytype_dest],
activitytypes.hours_begin_latest[ids_activitytype_orig],
activitytypes.hours_begin_latest[ids_activitytype_dest],
activitytypes.durations_min[ids_activitytype_orig],
activitytypes.durations_min[ids_activitytype_dest],
activitytypes.durations_max[ids_activitytype_orig],
activitytypes.durations_max[ids_activitytype_dest],
):
log.progress(i/n_flows*100)
i += 1
if is_use_landusetypes:
# TODO: not tested and works only for one landusetype per activity
# but in activity typrs several landuse types are defined
# idea: add the probabilities for landuse types of origin and dest
#probs_fac_orig = probs_fac[id_orig][id_landusetype_orig]
#probs_fac_dest = probs_fac[id_dest][id_landusetype_dest]
pass
else:
probs_fac_orig = probs_fac_area[id_orig]
probs_fac_dest = probs_fac_area[id_dest]
self.disaggregate_odflow(time_start,
time_end,
id_mode,
ids_fac,
probs_fac_orig,
probs_fac_dest,
tripnumber,
id_activitytype_orig,
id_activitytype_dest,
hour_begin_earliest_orig,
hour_begin_earliest_dest,
hour_begin_latest_orig,
hour_begin_latest_dest,
duration_min_orig,
duration_min_dest,
duration_max_orig,
duration_max_dest,
**kwargs
)
# return odflowtab
def add_plans(self, ids_person, id_strategy=-1):
print 'add_plans n, id_strategy', len(ids_person), id_strategy
n_plans = len(ids_person)
# print ' get_plans',self.get_plans()
# print ' stagetables',self.get_plans().get_stagetables().get_ident_abs()
# print ' stagetables',self.get_plans().get_stagetables().stagetables.get_value()
ids_plan = self.get_plans().add_rows(n=n_plans,
ids_person=ids_person,
ids_strategy=id_strategy*np.ones(n_plans, dtype=np.int32),
)
# print ' post stagetables',self.get_plans().get_stagetables().get_ident_abs()
# print ' post stagetables',self.get_plans().get_stagetables().stagetables.get_value()
# return ids_plan
self.ids_plan[ids_person] = 1*ids_plan
for id_person, id_plan in zip(ids_person, ids_plan):
if self.lists_ids_plan[id_person] is None:
self.lists_ids_plan[id_person] = [id_plan]
else:
self.lists_ids_plan[id_person].append(id_plan)
return ids_plan
def plan_with_strategy(self, id_strategy, evalcrit=0, logger=None):
strategy = self.get_strategies().strategies[id_strategy]
ids_person = self.get_ids()
evals = strategy.preevaluate(ids_person)
# TODO: check whether at least two activities are in
# activitypattern...could be done centrally
ids_person_preeval = ids_person[evals >= evalcrit]
print 'plan_with_strategy', strategy.ident, 'n_pers', len(ids_person_preeval)
strategy.plan(ids_person_preeval, logger=logger)
# def get_times(self, ind, ids_person = None, pdf = 'unit'):
# """
# Returns person IDs, activity IDs and initial times
# for persons with at least one acivity.
#
# ids_person: array of preselected person IDs
#
# pdf: gives the probability density function to be chosen to determin
# the departure times within the initial time intervals given by
# initial activity attributes.
# """
# ids_person, ids_activity = self.get_activity_from_pattern(0,ids_person)
# times_init = self.get_activities().get_times_init(ids_activity, pdf)
#
# return ids_person, ids_activity, times_init
def get_activities_from_pattern(self, ind, ids_person=None):
"""
Returns person IDs and from/to activity IDs for persons who perform an activity
at the given activity index ind.
Returns arrays: ids_person, ids_activity_from, ids_activity_to
ind: index of activity in activity pattern, starting with 0
ids_person: array of preselected person IDs
"""
ids_person_activity = []
ids_activity_from = []
ids_activity_to = []
if ids_person is None:
ids_person = self.get_ids()
for id_person, activitypattern in zip(ids_person, self.activitypatterns[ids_person]):
# has person activity at index ind?
if len(activitypattern) > ind+1:
ids_person_activity.append(id_person)
ids_activity_from.append(activitypattern[ind])
ids_activity_to.append(activitypattern[ind+1])
return ids_person_activity, ids_activity_from, ids_activity_to
# activities.hours_begin_earliest[ids_person_activity],
# activities.hours_begin_latest[ids_person_activity],
# activities.durations_min[ids_person_activity],
# activities.durations_max[ids_person_activity],
def get_vtypes(self):
ids_vtypes = set()
# get individual vehicle types
ids_vtypes.update(self.get_iautos().ids_vtype.get_value())
ids_vtypes.update(self.get_imotos().ids_vtype.get_value())
ids_vtypes.update(self.get_ibikes().ids_vtype.get_value())
# add public transport
ids_vtypes.update(self.get_ptlines().ids_vtype.get_value())
# add pedestrian types
ids_vtypes.update(self.ids_vtype.get_value())
return ids_vtypes
def select_plans_preferred_mode(self, fraction=0.1, **kwargs):
"""
Selects current plant to satisfy best the preferred mode.
"""
strategies = self.get_strategies()
ids_strat = strategies.get_ids()
n_strat = len(ids_strat)
ids_pers_all = self.get_ids()
ids_pers = ids_pers_all[np.random.random(len(ids_pers_all)) > (1.0-fraction)]
n_pers = len(ids_pers)
preevals = -1*np.ones((np.max(ids_pers)+1, np.max(ids_strat)+1), dtype=np.int32)
for ind, id_strategy, strategy in zip(range(n_strat), ids_strat, strategies.strategies[ids_strat]):
preevals[ids_pers, id_strategy] = strategy.preevaluate(ids_pers)
preferred = 2
plans = self.get_plans()
self.ids_plan.reset()
for id_pers, ids_plan in zip(ids_pers, self.lists_ids_plan[ids_pers]):
if len(ids_plan) > 0:
# print ' id_pers,ids_plan',id_pers,ids_plan
# print ' ids_strat, preeval',plans.ids_strategy[ids_plan],preevals[id_pers,plans.ids_strategy[ids_plan]]
inds_sel = preevals[id_pers, plans.ids_strategy[ids_plan]] == preferred
# print ' inds_sel',inds_sel,np.flatnonzero(inds_sel),inds_sel.dtype
if len(inds_sel) > 0:
#ids_plan_sel = np.array(ids_plan)[inds_sel]
# print ' ids_plan_sel',ids_plan_sel
# at least one plan contains preferred mode
self.ids_plan[id_pers] = np.array(ids_plan)[inds_sel][0] # whu [1]?
# else:
# assumption: a plan for the preferred mode always exists
# # no preferred mode found try to satisfy best possible
# #ids_plan[preevals[id_pers,plans.ids_strategy[ids_plan]] == preferred]
# self.ids_plan[id_pers] = -1
return True
def select_plans_min_time_est(self, fraction=1.0, timedev=-1.0, c_probit=-1.0, **kwargs):
"""
Select plan with minimum estimated travel time as current plant.
"""
ids_pers_all = self.get_ids()
ids_pers = ids_pers_all[np.random.random(len(ids_pers_all)) > (1.0-fraction)]
times_est = self.get_plans().times_est
# self.ids_plan.reset()
for id_pers, ids_plan_all in zip(ids_pers, self.lists_ids_plan[ids_pers]):
ids_plan = np.array(ids_plan_all, dtype=np.int32)[times_est[ids_plan_all] > 0.1]
if len(ids_plan) > 0:
# print ' id_pers,ids_plan',id_pers,ids_plan
if timedev > 0.1:
times_rand = np.random.normal(0.0, timedev, len(ids_plan))
elif c_probit > 0:
times_rand = np.zeros(len(ids_plan), dtype=np.float32)
for i, t in zip(xrange(len(ids_plan)), times_est[ids_plan]):
times_rand[i] = np.random.normal(0.0, c_probit * t, 1)
else:
times_rand = np.zeros(len(ids_plan), dtype=np.float32)
self.ids_plan[id_pers] = np.array(ids_plan)[np.argmin(times_est[ids_plan]+times_rand)]
return True
def select_plans_random(self, fraction=0.1, **kwargs):
"""
A fraction of the population changes a plan.
The new plans are chosen randomly.
"""
ids_pers_all = self.get_ids()
print 'select_plans_random', len(ids_pers_all), fraction
times_est = self.get_plans().times_est
# self.ids_plan.reset()
# ids_mode[random_choice(n,shares/np.sum(shares))]
ids_pers = ids_pers_all[np.random.random(len(ids_pers_all)) > (1.0-fraction)]
print ' ids_pers', ids_pers
for id_pers, ids_plan in zip(ids_pers, self.lists_ids_plan[ids_pers]):
if len(ids_plan) > 0:
# print ' id_pers,ids_plan',id_pers,ids_plan
self.ids_plan[id_pers] = ids_plan[np.random.randint(len(ids_plan))]
return True
def select_plans_min_time_exec(self, fraction=0.1, timedev=-1, c_probit=-1, **kwargs):
"""
Select plan with minimum executed travel time as current plant.
"""
ids_pers_all = self.get_ids()
# print 'select_plans_random',len(ids_pers_all),fraction
ids_pers = ids_pers_all[np.random.random(len(ids_pers_all)) > (1.0-fraction)]
times_exec = self.get_plans().times_exec
# self.ids_plan.reset()
for id_pers, ids_plan_all in zip(ids_pers, self.lists_ids_plan[ids_pers]):
# print ' ids_plan_all',ids_plan_all,type(ids_plan_all)
ids_plan = np.array(ids_plan_all, dtype=np.int32)[times_exec[ids_plan_all] > 0.1]
if len(ids_plan) > 0:
# print ' id_pers,ids_plan',id_pers,ids_plan
if timedev > 0.1:
times_rand = np.random.normal(0.0, timedev, len(ids_plan))
elif c_probit > 0:
times_rand = np.zeros(len(ids_plan), dtype=np.float32)
for i, t in zip(xrange(len(ids_plan)), times_exec[ids_plan]):
times_rand[i] = np.random.normal(0.0, c_probit * t, 1)
else:
times_rand = np.zeros(len(ids_plan), dtype=np.float32)
self.ids_plan[id_pers] = np.array(ids_plan)[np.argmin(times_exec[ids_plan]+times_rand)]
return True
def select_plans_min_time_exec_est(self, fraction=0.1, timedev=-1, c_probit=-1, **kwargs):
"""
Select plan with minimum executed or estimated (if executed doesn't exist) travel time as current plant.
"""
n_analyzed_persons = 0
ids_pers_all = self.get_ids()
ids_pers = ids_pers_all[np.random.random(len(ids_pers_all)) > (1.0-fraction)]
times_exec = self.get_plans().times_exec
times_est = self.get_plans().times_est
ids_plans = self.get_plans().get_ids()
for id_pers, ids_plan_all in zip(ids_pers, self.lists_ids_plan[ids_pers]):
ids_plan_est = np.array(ids_plan_all, dtype=np.int32)[times_est[ids_plan_all] > 0.1]
ids_plan_exec = np.array(ids_plan_all, dtype=np.int32)[times_exec[ids_plan_all] > 0.1]
if len(ids_plan_est) > 0:
if len(ids_plan_est) == len(ids_plan_exec):
ids_plan_est = []
else:
c = np.zeros(len(ids_plan_est))
for x in range(len(ids_plan_est)):
for y in range(len(ids_plan_exec)):
if ids_plan_est[x] == ids_plan_exec[y]:
c[x] = 1
d = np.delete(ids_plan_est, np.flatnonzero(c))
ids_plan_est = d
n_analyzed_persons += 1
if timedev > 0.1:
if len(ids_plan_est) > 0:
times_rand_est = np.random.normal(0.0, timedev, len(ids_plan_est))
if len(ids_plan_exec) > 0:
times_rand_exec = np.random.normal(0.0, timedev, len(ids_plan_exec))
elif c_probit > 0:
if len(ids_plan_est) > 0:
times_rand_est = np.zeros(len(ids_plan_est), dtype=np.float32)
for i, t in zip(xrange(len(ids_plan_est)), times_est[ids_plan_est]):
times_rand_est[i] = np.random.normal(0.0, c_probit * t, 1)
if len(ids_plan_exec) > 0:
times_rand_exec = np.zeros(len(ids_plan_exec), dtype=np.float32)
for i, t in zip(xrange(len(ids_plan_exec)), times_exec[ids_plan_exec]):
times_rand_exec[i] = np.random.normal(0.0, c_probit * t, 1)
else:
if len(ids_plan_exec) > 0:
times_rand_exec = np.zeros(len(ids_plan_exec), dtype=np.float32)
if len(ids_plan_est) > 0:
times_rand_est = np.zeros(len(ids_plan_est), dtype=np.float32)
if len(ids_plan_exec) > 0 and len(ids_plan_est) > 0:
if min(times_exec[ids_plan_exec]+times_rand_exec) < min(times_est[ids_plan_est]+times_rand_est):
self.ids_plan[id_pers] = np.array(ids_plan_exec)[np.argmin(
times_exec[ids_plan_exec]+times_rand_exec)]
else:
self.ids_plan[id_pers] = np.array(
ids_plan_est)[np.argmin(times_est[ids_plan_est]+times_rand_est)]
elif len(ids_plan_exec) == 0:
self.ids_plan[id_pers] = np.array(ids_plan_est)[np.argmin(times_est[ids_plan_est]+times_rand_est)]
else:
self.ids_plan[id_pers] = np.array(ids_plan_exec)[np.argmin(
times_exec[ids_plan_exec]+times_rand_exec)]
print 'were analyzed %d persons' % (n_analyzed_persons)
return True
def select_plans_next(self, fraction=0.1, **kwargs):
"""
Select next plan in the plan list as current plant.
"""
# print 'select_plans_next'
ids_pers_all = self.get_ids()
ids_pers = ids_pers_all[np.random.random(len(ids_pers_all)) > (1.0-fraction)]
for id_pers, id_plan_current, ids_plan in zip(ids_pers, self.ids_plan[ids_pers], self.lists_ids_plan[ids_pers]):
n_plan = len(ids_plan)
if n_plan > 0:
# print ' id_pers,id_plan_current',id_pers,id_plan_current,ids_plan,id_plan_current != -1
if id_plan_current != -1:
ind = ids_plan.index(id_plan_current)
# print ' ind',ind,ind +1 < n_plan
if ind + 1 < n_plan:
ind += 1
else:
ind = 0
else:
ind = 0
# print ' ind,n_plan',ind,n_plan,'ids_plan[ind]', ids_plan[ind]
self.ids_plan[id_pers] = ids_plan[ind]
# print ' finally: ids_plan=',self.ids_plan.get_value()
return True
def prepare_sim(self, process):
return [] # [(steptime1,func1),(steptime2,func2),...]
def get_trips(self):
# returns trip object, method common to all demand objects
return self.get_iautos()
def get_writexmlinfo(self, is_route=False):
"""
Returns three array where the first array is the
begin time of the first vehicle and the second array is the
write function to be called for the respectice vehicle and
the third array contains the vehicle ids
Method used to sort trips when exporting to route or trip xml file
"""
print 'Virtualpop.get_writexmlinfo'
plans = self.get_plans()
ids_pers = self.select_ids(self.ids_plan.get_value() >= 0)
n_pers = len(ids_pers)
ids_plans = self.ids_plan[ids_pers]
# get vehicle trip info
times_depart_bike, writefuncs_bike, ids_rides_bike = plans.get_stagetable(
'bikerides').get_writexmlinfo(ids_plans, is_route)
times_depart_auto, writefuncs_auto, ids_rides_auto = plans.get_stagetable(
'autorides').get_writexmlinfo(ids_plans, is_route)
#self.add_stagetable('walks', WalkStages)
#self.add_stagetable('autorides', AutorideStages)
#self.add_stagetable('bikerides', BikerideStages)
#self.add_stagetable('transits', TransitStages)
#self.add_stagetable('activities', ActivityStages)
#rides = plans.get_stagetable('autorides')
# do persons
times_depart_pers = plans.times_begin[ids_plans]
writefuncs_pers = np.zeros(n_pers, dtype=np.object)
writefuncs_pers[:] = self.write_person_xml
# assemble vectors
print ' times_depart_pers.shape', times_depart_pers.shape
print ' times_depart_bike.shape', times_depart_bike.shape
print ' times_depart_auto.shape', times_depart_auto.shape
times_depart = np.concatenate((times_depart_pers,
times_depart_auto,
times_depart_bike,
))
writefuncs = np.concatenate((writefuncs_pers,
writefuncs_auto,
writefuncs_bike,
))
ids = np.concatenate((ids_pers,
ids_rides_auto,
ids_rides_bike,
))
return times_depart, writefuncs, ids
def write_person_xml(self, fd, id_pers, time_begin, indent=2):
stages = self.get_plans().get_stages(self.ids_plan[id_pers])
fd.write(xm.start('person', indent=indent+2))
fd.write(xm.num('id', self.get_id_sumo_from_id(id_pers)))
# fd.write(xm.num('depart',self.times_start[id_pers]))
fd.write(xm.num('depart', time_begin))
fd.write(xm.num('type', self.parent.vtypes.ids_sumo[self.ids_vtype[id_pers]]))
activity_init, id_stage_init = stages[0]
id_edge_init, pos_init = activity_init.get_edges_positions(id_stage_init)
# self.ids_edge_depart.write_xml(fd,id_trip)
# self.positions_depart.write_xml(fd,id_trip)
fd.write(xm.num('from', self._edges.ids_sumo[id_edge_init]))
fd.write(xm.num('departPos', pos_init))
fd.write(xm.stop())
# write stages of this person.
# Attention!! first and last stage, which are activities,
# will NOT be exportes , therefore [1:-1]
for stage, id_stage in stages[1:-1]:
stage.to_xml(id_stage, fd, indent+4)
fd.write(xm.end('person', indent=indent+2))
def config_results(self, results):
results.add_resultobj(res.Personresults('virtualpersonresults', results,
self,
self.get_net().edges,
name='Virtual person results',
info='Table with simulation results for person of the virtual population. The results refer to all trips made by the person during the entire simulation period.',
), groupnames=['Trip results'])
results.add_resultobj(res.Vehicleresults('iautotripresults', results,
self.get_iautos(),
self.get_net().edges,
name='Auto trip results',
info='Table with trip results mad with individual autos. The results refer to all trips made by a specific vehicle during the entire simulation period.',
), groupnames=['Trip results'])
results.add_resultobj(res.Vehicleresults('ibiketripresults', results,
self.get_ibikes(),
self.get_net().edges,
name='Bike trip results',
info='Table with trip results mad with individual bikes. The results refer to all trips made by a specific vehicle during the entire simulation period.',
), groupnames=['Trip results'])
# def process_results(self, results, process = None):
# print 'process_results'
# ## copy total travel into plan execution time
# personresults = results.virtualpersonresults
# self.update_plans(personresults)
def update_results(self, personresults):
"""
Updates plans with results from previous
simulation run, and updates plan choice
"""
ids_res = personresults.get_ids()
print 'update_results', len(ids_res)
ids_person = personresults.ids_person[ids_res]
ids_plan = self.ids_plan[ids_person]
self.get_plans().times_exec[ids_plan] = personresults.times_travel_total[ids_res]
# change mobility plan based on updated travel times
pass
class PopGenerator(Process):
def __init__(self, ident='virtualpopgenerator', virtualpop=None, logger=None, **kwargs):
print 'PopFromOdfGenerator.__init__ ', ident, virtualpop
# TODO: let this be independent, link to it or child??
#
scenario = virtualpop.get_scenario()
self._init_common(ident,
parent=virtualpop,
name='Population generator',
logger=logger,
info='Create virtual population from basic statistical data.',
)
attrsman = self.set_attrsman(cm.Attrsman(self))
# make for each possible pattern a field for prob
activitytypes = self.parent.get_scenario().demand.activitytypes
self.n_person = attrsman.add(cm.AttrConf('n_person', kwargs.get('n_person', 1000),
groupnames=['options'],
perm='rw',
name='Number of person',
info='Number of adult persons.',
))
self.ids_acttype_default = activitytypes.get_ids_from_formatted('home,work')
# self.ids_acttype = attrsman.add(cm.AttrConf( 'ids_acttype',kwargs.get('id_acttype',activitytypes.get_id_from_formatted('home')),
# groupnames = ['options'],
# choices = activitytypes.names.get_indexmap(),
# perm='rw',
# name = 'Activity type',
# info = 'Initial activity type.',
# ))
self.ttb_mean = attrsman.add(cm.AttrConf('ttb_mean', kwargs.get('ttb_mean', 55*60),
groupnames=['options'],
perm='rw',
name='Avg. of 24h travel time budget',
unit='s',
info="""Average travel time budget for one day.
This time excludes time for activities.
""",
))
self.ttb_dev = attrsman.add(cm.AttrConf('ttb_dev', kwargs.get('ttb_dev', 10*60),
groupnames=['options'],
perm='rw',
name='Std. of 24h travel time budget',
unit='s',
info="""Standard deviation of travel time budget for one day.
""",
))
mode_to_id = self.parent.get_scenario().net.modes.get_id_mode
self.share_pedestrian = attrsman.add(cm.AttrConf('share_pedestrian', kwargs.get('share_pedestrian', 0.1),
groupnames=['options', 'modal split'],
perm='rw',
id_mode=mode_to_id('pedestrian'),
name='Pedestrian share',
info="""Share of pedestrians.""",
))
self.share_autouser = attrsman.add(cm.AttrConf('share_autouser', kwargs.get('share_autouser', 0.5),
groupnames=['options', 'modal split'],
perm='rw',
id_mode=mode_to_id('passenger'),
name='Auto user share',
info="""Share of auto users.""",
))
self.share_motorcycleuser = attrsman.add(cm.AttrConf('share_motorcycleuser', kwargs.get('share_motorcycleuser', 0.1),
groupnames=['options', 'modal split'],
perm='rw',
id_mode=mode_to_id('motorcycle'),
name='Motorcycle user share',
info="""Share of Motorcycle users.""",
))
self.share_bikeuser = attrsman.add(cm.AttrConf('share_bikeuser', kwargs.get('share_bikeuser', 0.1),
groupnames=['options', 'modal split'],
perm='rw',
id_mode=mode_to_id('bicycle'),
name='Bike user share',
info="""Share of bike users.""",
))
self.share_ptuser = attrsman.add(cm.AttrConf('share_ptuser', kwargs.get('share_ptuser', 0.2),
groupnames=['options', 'modal split'],
id_mode=mode_to_id('bus'),
perm='rw',
name='PT share',
info="""Share of public transport user.""",
))
#self.modeshares = attrsman.add( cm.ObjConf(ModeShares('modeshares',self,scenario.net.modes),groupnames = ['options']) )
def do(self):
print 'PopGenerator.do'
# links
virtualpop = self.parent
virtualpop.clear_population()
logger = self.get_logger()
#logger.w('Update Landuse...')
scenario = virtualpop.get_scenario()
activitytypes = scenario.demand.activitytypes
facilities = scenario.landuse.facilities
edges = scenario.net.edges
ids_fac = facilities.get_ids()
map_id_edge_to_ids_fac = {}
for id_fac, id_edge in zip(ids_fac, facilities.ids_roadedge_closest[ids_fac]):
if map_id_edge_to_ids_fac.has_key(id_edge):
map_id_edge_to_ids_fac[id_edge].append(id_fac)
else:
map_id_edge_to_ids_fac[id_edge] = [id_fac, ]
n_pers = self.n_person
unitvec_int = np.ones(n_pers, dtype=np.int32)
ids_person = virtualpop.make_multiple(n_pers)
virtualpop.traveltimebudgets[ids_person] = self.get_ttb(ids_person)
virtualpop.ids_mode_preferred[ids_person] = self.get_modes_random(n_pers)
# here we could preselect correct landuse based on
# percentage of workers, students, employees
prob_fac_to = facilities.capacities[ids_fac].astype(np.float32)
prob_fac_to /= np.sum(prob_fac_to)
# print ' np.sum(prob_fac_to)',np.sum(prob_fac_to)
ids_fac_to = ids_fac[random_choice(n_pers, prob_fac_to)]
# determine id_fac_from by backward routing from id_fac_to
ids_edge_to = facilities.ids_roadedge_closest[ids_fac_to]
# pre calculate backward star and mode dependent link travel times
bstar = edges.get_bstar()
edgetimes = {}
ids_mode = self.get_ids_mode()
# idea: do also consider gradient of house prices
for id_mode, speed_max in zip(ids_mode, scenario.net.modes.speeds_max[ids_mode]):
edgetimes[id_mode] = edges.get_times(id_mode=id_mode,
speed_max=speed_max,
is_check_lanes=True
)
# determine home facilities by backwards tracking from work facility
ids_fac_from = np.ones(n_pers, dtype=np.int32)
i = 0
for id_person, id_edge_to, id_mode, ttb\
in zip(ids_person,
ids_edge_to,
virtualpop.ids_mode_preferred[ids_person],
virtualpop.traveltimebudgets[ids_person],
):
# print ' Backsearch',id_person,'id_edge_to',id_edge_to,edges.ids_sumo[id_edge_to],'ttb[s]',0.5*ttb
ids_edge_from, costs, btree = routing.edgedijkstra_backwards(id_edge_to,
0.5*ttb, # to be specified better
weights=edgetimes[id_mode],
bstar=bstar,
)
if len(ids_edge_from) == 0:
# routing failed to deliver edges of origins
# put work and home on same edge
ids_edge_from = [id_edge_to, ]
# look at all edges of origin and pick most likely facility
ids_fac_lim = []
for id_edge_from in ids_edge_from:
#id_from_check = id_edge_from
# print ' check from',id_from_check,'back to',id_edge_to,'time =%.2fs'%costs[id_from_check]
# while id_from_check != id_edge_to:
# id_from_check = btree[id_from_check]
# #print ' id_edge = ',id_from_check
# print ' success = ',id_from_check==id_edge_to
if map_id_edge_to_ids_fac.has_key(id_edge_from):
ids_fac_lim += map_id_edge_to_ids_fac[id_edge_from]
if len(ids_fac_lim) == 0:
# no facilities at all destinations found
# go edges backawards and search there
# this will reduce travel time
for id_edge_from in ids_edge_from:
# verify if id_edge_from has facilities.
while not map_id_edge_to_ids_fac.has_key(id_edge_from):
# print ' no facility, go backward'
id_edge_from = btree[id_edge_from]
ids_fac_lim = np.array(ids_fac_lim, dtype=np.int32)
prob_fac_from = facilities.capacities[ids_fac_lim].astype(np.float32)
prob_fac_from /= np.sum(prob_fac_from)
# print ' np.sum(prob_fac_to)',np.sum(prob_fac_to)
ids_fac_from[i] = ids_fac[random_choice(1, prob_fac_to)]
i += 1
# idea: adjust wake-up time with employment type
activities = virtualpop.get_activities()
ids_activity_from = activities.add_rows(
n=n_pers,
ids_activitytype=self.ids_acttype_default[0] * unitvec_int,
ids_facility=ids_fac_from,
# use default
#hours_begin_earliest = None,
#hours_begin_latest = None,
#durations_min = None,
#durations_max = None,
)
ids_activity_to = activities.add_rows(
n=n_pers,
ids_activitytype=self.ids_acttype_default[1] * unitvec_int,
ids_facility=ids_fac_to,
# use default
#hours_begin_earliest = None,
#hours_begin_latest = None,
#durations_min = None,
#durations_max = None,
)
for id_person, id_activity_from, ids_activity_to in zip(ids_person, ids_activity_from, ids_activity_to):
virtualpop.activitypatterns[id_person] = [id_activity_from, ids_activity_to, ]
return True
def get_ids_mode(self):
modesplitconfigs = self.get_attrsman().get_group('modal split')
ids_mode = np.zeros(len(modesplitconfigs), dtype=np.int32)
i = 0
for modeconfig in modesplitconfigs:
ids_mode[i] = modeconfig.id_mode
i += 1
return ids_mode
def get_modes_random(self, n):
"""
Return a vector with mode IDs of length n.
"""
# print 'get_modes_random',n
modesplitconfigs = self.get_attrsman().get_group('modal split')
ids_mode = np.zeros(len(modesplitconfigs), dtype=np.int32)
shares = np.zeros(len(modesplitconfigs), dtype=np.float32)
i = 0
for modeconfig in modesplitconfigs:
ids_mode[i] = modeconfig.id_mode
shares[i] = modeconfig.get_value()
i += 1
# print ' ids_mode',ids_mode
# print ' shares',shares
return ids_mode[random_choice(n, shares/np.sum(shares))]
def get_ttb(self, ids_pers):
n_pers = len(ids_pers)
# Truncated Normal dist with scipy
# load libraries
#import scipy.stats as stats
# lower, upper, mu, and sigma are four parameters
#lower, upper = 0.5, 1
#mu, sigma = 0.6, 0.1
# instantiate an object X using the above four parameters,
#X = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
# generate 1000 sample data
#samples = X.rvs(1000)
return np.random.normal(self.ttb_mean, self.ttb_dev, n_pers).clip(0, 2*3600)
class PopFromOdfGenerator(Process):
def __init__(self, ident, virtualpop, logger=None, **kwargs):
print 'PopFromOdfGenerator.__init__'
# TODO: let this be independent, link to it or child??
self._init_common(ident,
parent=virtualpop,
name='Pop from OD-flow generator',
logger=logger,
info='Create virtual population from origin-to-destination zone flows by disaggregation.',
)
attrsman = self.set_attrsman(cm.Attrsman(self))
# make for each possible pattern a field for prob
activitytypes = self.parent.get_scenario().demand.activitytypes
self.hour_offset = attrsman.add(cm.AttrConf('hour_offset', kwargs.get('hour_offset', 8.0),
groupnames=['options'],
perm='rw',
name='Offset hours',
unit='h',
info='Hour when simulation starts. This is the hour (of the day) when simulation time shows zero seconds.',
))
self.hour_tripbudget = attrsman.add(cm.AttrConf('hour_tripbudget', kwargs.get('hour_tripbudget', 0.5),
groupnames=['options'],
perm='rw',
name='Triptime budget',
unit='h',
info="""Time budget for this trip. This time is used
to initially estimate the time in hours between
the activity at origin and the activity
at destination.
""",
))
self.scale = attrsman.add(cm.AttrConf('scale', kwargs.get('scale', 1.0),
groupnames=['options'],
perm='rw',
name='Scale',
info='Global scale factor. Scales the number of all OD trips.',
))
self.is_use_landusetypes = attrsman.add(cm.AttrConf('is_use_landusetypes', kwargs.get('is_use_landusetypes', False),
groupnames=['options'],
perm='rw',
name='use landuse types',
info="""If True, use the landuse type of
facilities when assigning the origin and destination facility.
The landuse type is selected according to the activity type.
Use this option only if landuse types have been correctly defined for
all facilities.
""",
))
self.is_update_landuse = attrsman.add(cm.AttrConf('is_update_landuse', kwargs.get('is_update_landuse', True),
groupnames=['options'],
perm='rw',
name='update Landuse',
info="""If True, update land use database (zones, facilities, parking) before generating the population. Updating means identifying edges and facilities within zones.
""",
))
def do(self):
print 'PopFromOdfGenerator.do'
# links
virtualpop = self.parent
logger = self.get_logger()
if self.is_update_landuse:
logger.w('Update Landuse...')
scenario = virtualpop.get_scenario()
scenario.landuse.zones.refresh_zoneedges()
scenario.landuse.facilities.identify_taz()
scenario.landuse.facilities.identify_closest_edge()
scenario.landuse.facilities.update()
logger.w('Create population...')
virtualpop.create_pop_from_odflows(logger=logger, **self.get_kwoptions())
#activitytypes = virtualpop.activitytypes
return True
class Planner(Process):
def __init__(self, ident='planner', virtualpop=None, strategy='all', logger=None, **kwargs):
print 'Planner.__init__'
# TODO: let this be independent, link to it or child??
self._init_common(ident,
parent=virtualpop,
name='Planner',
logger=logger,
info='Generates mobility plan for population for a specific mobility strategy. Plans are only generated for persons for whome the strategy is applicable.',
)
attrsman = self.set_attrsman(cm.Attrsman(self))
# make for each possible pattern a field for prob
strategies = virtualpop.get_strategies()
strategychoices = {'all': -1}
strategychoices.update(strategies.names.get_indexmap())
self.id_strategy = attrsman.add(cm.AttrConf('id_strategy', strategychoices[strategy],
groupnames=['options'],
choices=strategychoices,
perm='rw',
name='Strategy',
info='Strategy to be used to create mobility plane. In case of all strategies, the planner generates all applicable plans.',
))
evalcrits = {'apply to all persons if feasible': 0,
'apply only if preferred mode is used': 1,
'apply only if exclusively preferred mode is used': 2,
}
self.evalcrit = attrsman.add(cm.AttrConf('evalcrit', kwargs.get('evalcrit', evalcrits['apply to all persons if feasible']),
groupnames=['options'],
choices=evalcrits,
perm='rw',
name='Application criteria',
info=""" Value that determines for which persons the plans will be generated.
Apply to all persons if feasible:0
Apply only if preferred mode is used:1
Apply only if exclusively preferred mode is used:2
""",
))
def do(self):
print 'Planner.do'
# links
virtualpop = self.parent
logger = self.get_logger()
#logger.w('Check applicability')
#strategies = virtualpop.strategies.get_value()
if self.id_strategy != -1:
virtualpop.plan_with_strategy(self.id_strategy, evalcrit=self.evalcrit, logger=logger)
else: # plan with all strategies
for id_strategy in virtualpop.get_strategies().get_ids():
virtualpop.plan_with_strategy(id_strategy, evalcrit=self.evalcrit, logger=logger)
return True
class PlanSelector(Process):
def __init__(self, ident='planselector', virtualpop=None, logger=None, **kwargs):
print 'PlanSelector.__init__'
# TODO: let this be independent, link to it or child??
self._init_common(ident,
parent=virtualpop,
name='Plan Selector',
logger=logger,
info='Selects the plan for each person which will be executed during the next simulation run according to a defined selection method.',
)
attrsman = self.set_attrsman(cm.Attrsman(self))
# make for each possible pattern a field for prob
strategies = virtualpop.get_strategies()
# strategychoices.update(strategies.names.get_indexmap())
methods = {'plan with shortest estim. time': virtualpop.select_plans_min_time_est,
'plan with shortest exec. time': virtualpop.select_plans_min_time_exec,
'plan with preferred mode': virtualpop.select_plans_preferred_mode,
'next plan in list': virtualpop.select_plans_next,
'random plan': virtualpop.select_plans_random,
'plan with shortest exec. time or est. time': virtualpop.select_plans_min_time_exec_est
}
self.method = attrsman.add(cm.AttrConf('method', methods[kwargs.get('methodname', 'plan with shortest estim. time')],
groupnames=['options'],
choices=methods,
perm='rw',
name='Selection method',
info='Selection method used to select current plans.',
))
self.fraction = attrsman.add(cm.AttrConf('fraction', kwargs.get('fraction', 1.0),
groupnames=['options'],
perm='rw',
name='Change fraction',
info="""Fraction of persons that are randomly chosen to change plans according to the defined method.
A value of 1.0 mens that the plans of oll persons will be changed.""",
))
self.timedev = attrsman.add(cm.AttrConf('timedev', kwargs.get('timedev', 0.0),
groupnames=['options'],
perm='rw',
name='Time deviation',
info='Time deviation of random time component of estimated or effective time. If zero, no random time is added.',
))
self.c_probit = attrsman.add(cm.AttrConf('c_probit', kwargs.get('c_probit', 0.0),
groupnames=['options'],
perm='rw',
name='Probit const',
info="""Probit constant used to determine the deviation of the normal distributed random time component.
The deviation is the product of this constant and the travel time. If zero, no random time is added.""",
))
def do(self):
print 'Planner.do'
# links
#virtualpop = self.parent
logger = self.get_logger()
#logger.w('Check applicability')
return self.method(logger=logger, **self.get_kwoptions())
class VehicleProvider(Process):
def __init__(self, ident='vehicleprovider', virtualpop=None, logger=None, **kwargs):
print 'VehicleProvider.__init__'
# TODO: let this be independent, link to it or child??
self._init_common(ident,
parent=virtualpop,
name='Vehicle Provider',
logger=logger,
info='Provides individual vehicles to persons according to preferred mode and giveb statistical data.',
)
attrsman = self.set_attrsman(cm.Attrsman(self))
# make for each possible pattern a field for prob
self.share_autoowner = attrsman.add(cm.AttrConf('share_autoowner', kwargs.get('share_autoowner', 0.8),
groupnames=['options'],
perm='rw',
name='Car auto share',
info="""Share of auto owners. This specifies the share of auto owners and a car will be created for each car owner.
Attention if prefeered mode has been already defined: persons who have bicicle as preferred mode get automatically a bike assigned.
""",
))
self.share_motorcycleowner = attrsman.add(cm.AttrConf('share_motorcycleowner', kwargs.get('share_motorcycleowner', 0.3),
groupnames=['options'],
perm='rw',
name='Motorcycle owner share',
info="""Share of Motorcycle owners. This specifies the share of Motorcycle owners and a bike will be created for each Motorcycle owner.
Attention if prefeered mode has been already defined: persons who have Motorcycle as preferred mode get automatically a Motorcycle assigned.
""",
))
self.share_bikeowner = attrsman.add(cm.AttrConf('share_bikeowner', kwargs.get('share_bikeowner', 0.5),
groupnames=['options'],
perm='rw',
name='Bike owner share',
info="""Share of bike owners. This specifies the share of bike owners and a bike will be created for each bike owner.
Attention if prefeered mode has been already defined: persons who have bicicle as preferred mode get automatically a bike assigned.
""",
))
def do(self):
print 'VehicleProvider.do'
# links
virtualpop = self.parent
logger = self.get_logger()
logger.w('Provide vehicles...')
ids_person = virtualpop.get_ids()
n_person = len(ids_person)
modes = virtualpop.get_scenario().net.modes
id_mode_bike = modes.get_id_mode('bicycle')
id_mode_auto = modes.get_id_mode('passenger')
id_mode_moto = modes.get_id_mode('motorcycle')
iautos = virtualpop.get_iautos()
ibikes = virtualpop.get_ibikes()
imotos = virtualpop.get_imotos()
logger.w('generate individual vehicles for prefered modes')
ids_prefer_auto = virtualpop.select_ids(
(virtualpop.ids_mode_preferred.get_value() == id_mode_auto) & (virtualpop.ids_iauto.get_value() == -1))
ids_iauto = iautos.assign_to_persons(ids_prefer_auto)
n_current = iautos.get_share(is_abs=True)
#n_none = int(self.share_autoowner*n_person)-(n_person-n_current)
n_need = int(self.share_autoowner*n_person)-n_current
if n_need > 0:
ids_pers_miss = np.flatnonzero(virtualpop.ids_iauto.get_value() == -1)
# print ' n_person,n_current,n_target,n_need,len(ids_pers_miss)',n_person,n_current,int(self.share_autoowner*n_person),n_need,len(ids_pers_miss)
ids_pers_assign = np.random.choice(ids_pers_miss, n_need, replace=False)
ids_iauto = iautos.assign_to_persons(ids_pers_assign)
print ' created %d autos, target share=%.2f, share = %.2f' % (
iautos.get_share(is_abs=True), iautos.get_share(), self.share_autoowner)
ids_prefer_bike = virtualpop.select_ids(
(virtualpop.ids_mode_preferred.get_value() == id_mode_bike) & (virtualpop.ids_ibike.get_value() == -1))
ids_ibikes = ibikes.assign_to_persons(ids_prefer_bike)
n_current = ibikes.get_share(is_abs=True)
n_need = int(self.share_bikeowner*n_person)-n_current
if n_need > 0:
ids_pers_miss = np.flatnonzero(virtualpop.ids_ibike.get_value() == -1)
# print ' n_person,n_current,n_target,n_need,len(ids_pers_miss)',n_person,n_current,int(self.share_autoowner*n_person),n_need,len(ids_pers_miss)
ids_pers_assign = np.random.choice(ids_pers_miss, n_need, replace=False)
ids_ibike = ibikes.assign_to_persons(ids_pers_assign)
print ' created %d bikes, target share=%.2f, share = %.2f' % (
ibikes.get_share(is_abs=True), ibikes.get_share(), self.share_bikeowner)
ids_prefer_moto = virtualpop.select_ids(
(virtualpop.ids_mode_preferred.get_value() == id_mode_moto) & (virtualpop.ids_imoto.get_value() == -1))
ids_imoto = imotos.assign_to_persons(ids_prefer_moto)
n_current = imotos.get_share(is_abs=True)
n_need = int(self.share_motorcycleowner*n_person)-n_current
if n_need > 0:
ids_pers_miss = np.flatnonzero(virtualpop.ids_imoto.get_value() == -1)
ids_pers_assign = np.random.choice(ids_pers_miss, n_need, replace=False)
ids_imoto = imotos.assign_to_persons(ids_pers_assign)
print ' created %d moto, target share=%.2f, share = %.2f' % (
imotos.get_share(is_abs=True), imotos.get_share(), self.share_motorcycleowner)
return True
# TODO: generate and assign additional vehicles
# to satisfy prescribes ownership
| 45.414855 | 359 | 0.555241 |
82d2b952befcbe8fdee350a27ecabae2d3c8354c
| 10,333 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/icx/icx_linkagg.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/icx/icx_linkagg.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/icx/icx_linkagg.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: icx_linkagg
author: "Ruckus Wireless (@Commscope)"
short_description: Manage link aggregation groups on Ruckus ICX 7000 series switches
description:
- This module provides declarative management of link aggregation groups
on Ruckus ICX network devices.
notes:
- Tested against ICX 10.1.
- For information on using ICX platform, see L(the ICX OS Platform Options guide,../network/user_guide/platform_icx.html).
options:
group:
description:
- Channel-group number for the port-channel
Link aggregation group. Range 1-255 or set to 'auto' to auto-generates a LAG ID
type: int
name:
description:
- Name of the LAG
type: str
mode:
description:
- Mode of the link aggregation group.
type: str
choices: ['dynamic', 'static']
members:
description:
- List of port members or ranges of the link aggregation group.
type: list
state:
description:
- State of the link aggregation group.
type: str
default: present
choices: ['present', 'absent']
check_running_config:
description:
- Check running configuration. This can be set as environment variable.
Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter.
type: bool
default: yes
aggregate:
description:
- List of link aggregation definitions.
type: list
suboptions:
group:
description:
- Channel-group number for the port-channel
Link aggregation group. Range 1-255 or set to 'auto' to auto-generates a LAG ID
type: int
name:
description:
- Name of the LAG
type: str
mode:
description:
- Mode of the link aggregation group.
type: str
choices: ['dynamic', 'static']
members:
description:
- List of port members or ranges of the link aggregation group.
type: list
state:
description:
- State of the link aggregation group.
type: str
choices: ['present', 'absent']
check_running_config:
description:
- Check running configuration. This can be set as environment variable.
Module will use environment variable value(default:True), unless it is overridden, by specifying it as module parameter.
type: bool
purge:
description:
- Purge links not defined in the I(aggregate) parameter.
type: bool
default: no
'''
EXAMPLES = """
- name: create static link aggregation group
icx_linkagg:
group: 10
mode: static
name: LAG1
- name: create link aggregation group with auto id
icx_linkagg:
group: auto
mode: dynamic
name: LAG2
- name: delete link aggregation group
icx_linkagg:
group: 10
state: absent
- name: Set members to LAG
icx_linkagg:
group: 200
mode: static
members:
- ethernet 1/1/1 to 1/1/6
- ethernet 1/1/10
- name: Remove links other then LAG id 100 and 3 using purge
icx_linkagg:
aggregate:
- { group: 3}
- { group: 100}
purge: true
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- lag LAG1 dynamic id 11
- ports ethernet 1/1/1 to 1/1/6
- no ports ethernet 1/1/10
- no lag LAG1 dynamic id 12
"""
import re
from copy import deepcopy
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.connection import ConnectionError, exec_command
from ansible_collections.community.general.plugins.module_utils.network.icx.icx import run_commands, get_config, load_config
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import CustomNetworkConfig
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_spec
def range_to_members(ranges, prefix=""):
match = re.findall(r'(ethe[a-z]* [0-9]/[0-9]/[0-9]+)( to [0-9]/[0-9]/[0-9]+)?', ranges)
members = list()
for m in match:
start, end = m
if(end == ''):
start = start.replace("ethe ", "ethernet ")
members.append("%s%s" % (prefix, start))
else:
start_tmp = re.search(r'[0-9]/[0-9]/([0-9]+)', start)
end_tmp = re.search(r'[0-9]/[0-9]/([0-9]+)', end)
start = int(start_tmp.group(1))
end = int(end_tmp.group(1)) + 1
for num in range(start, end):
members.append("%sethernet 1/1/%s" % (prefix, num))
return members
def map_config_to_obj(module):
objs = dict()
compare = module.params['check_running_config']
config = get_config(module, None, compare=compare)
obj = None
for line in config.split('\n'):
l = line.strip()
match1 = re.search(r'lag (\S+) (\S+) id (\S+)', l, re.M)
if match1:
obj = dict()
obj['name'] = match1.group(1)
obj['mode'] = match1.group(2)
obj['group'] = match1.group(3)
obj['state'] = 'present'
obj['members'] = list()
else:
match2 = re.search(r'ports .*', l, re.M)
if match2 and obj is not None:
obj['members'].extend(range_to_members(match2.group(0)))
elif obj is not None:
objs[obj['group']] = obj
obj = None
return objs
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
d['group'] = str(d['group'])
obj.append(d)
else:
obj.append({
'group': str(module.params['group']),
'mode': module.params['mode'],
'members': module.params['members'],
'state': module.params['state'],
'name': module.params['name']
})
return obj
def search_obj_in_list(group, lst):
for o in lst:
if o['group'] == group:
return o
return None
def is_member(member, lst):
for li in lst:
ml = range_to_members(li)
if member in ml:
return True
return False
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
purge = module.params['purge']
for w in want:
if have == {} and w['state'] == 'absent':
commands.append("%slag %s %s id %s" % ('no ' if w['state'] == 'absent' else '', w['name'], w['mode'], w['group']))
elif have.get(w['group']) is None:
commands.append("%slag %s %s id %s" % ('no ' if w['state'] == 'absent' else '', w['name'], w['mode'], w['group']))
if(w.get('members') is not None and w['state'] == 'present'):
for m in w['members']:
commands.append("ports %s" % (m))
if w['state'] == 'present':
commands.append("exit")
else:
commands.append("%slag %s %s id %s" % ('no ' if w['state'] == 'absent' else '', w['name'], w['mode'], w['group']))
if(w.get('members') is not None and w['state'] == 'present'):
for m in have[w['group']]['members']:
if not is_member(m, w['members']):
commands.append("no ports %s" % (m))
for m in w['members']:
sm = range_to_members(ranges=m)
for smm in sm:
if smm not in have[w['group']]['members']:
commands.append("ports %s" % (smm))
if w['state'] == 'present':
commands.append("exit")
if purge:
for h in have:
if search_obj_in_list(have[h]['group'], want) is None:
commands.append("no lag %s %s id %s" % (have[h]['name'], have[h]['mode'], have[h]['group']))
return commands
def main():
element_spec = dict(
group=dict(type='int'),
name=dict(type='str'),
mode=dict(choices=['dynamic', 'static']),
members=dict(type='list'),
state=dict(default='present',
choices=['present', 'absent']),
check_running_config=dict(default=True, type='bool', fallback=(env_fallback, ['ANSIBLE_CHECK_ICX_RUNNING_CONFIG']))
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['group'] = dict(required=True, type='int')
required_one_of = [['group', 'aggregate']]
required_together = [['name', 'group']]
mutually_exclusive = [['group', 'aggregate']]
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec, required_together=required_together),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
exec_command(module, 'skip')
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result["commands"] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| 31.503049 | 130 | 0.594697 |
7deb667920a923ac182cb42d859bb0ae9070aef2
| 3,853 |
py
|
Python
|
project/api/recycling_street/schemas.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | null | null | null |
project/api/recycling_street/schemas.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | 35 |
2022-01-24T22:15:59.000Z
|
2022-03-31T15:01:35.000Z
|
project/api/recycling_street/schemas.py
|
DanielGrams/cityservice
|
c487c34b5ba6541dcb441fe903ab2012c2256893
|
[
"MIT"
] | null | null | null |
from flask_security import current_user
from marshmallow import fields
from project.api import marshmallow
from project.api.recycling_event.schemas import (
RecyclingEventListRequestSchema,
RecyclingEventListResponseSchema,
)
from project.api.schemas import (
IdSchemaMixin,
PaginationRequestSchema,
PaginationResponseSchema,
SQLAlchemyBaseSchema,
)
from project.models import RecyclingStreet, RecyclingStreetsUsers
class RecyclingStreetModelSchema(SQLAlchemyBaseSchema):
class Meta:
model = RecyclingStreet
load_instance = True
class RecyclingStreetIdSchema(RecyclingStreetModelSchema, IdSchemaMixin):
pass
class RecyclingStreetBaseSchemaMixin(object):
name = marshmallow.auto_field()
class RecyclingStreetCurrentUserFavoriteMixin(object):
is_favored = fields.Method(
"get_is_favored",
metadata={
"description": "True, if recycling street is favored by current user"
},
)
def get_is_favored(self, event):
if not current_user or not current_user.is_authenticated:
return False
from project.services.user import has_user_recycling_street
return has_user_recycling_street(current_user.id, event.id)
class RecyclingStreetCurrentUserNotificationsMixin(object):
notifications_active = fields.Method(
"get_notifications_active",
metadata={"description": "True, if notifications are active for current user"},
)
def get_notifications_active(self, event):
if not current_user or not current_user.is_authenticated:
return False
from project.services.user import get_user_recycling_street_notifications_active
return get_user_recycling_street_notifications_active(current_user.id, event.id)
class RecyclingStreetSchema(
RecyclingStreetIdSchema,
RecyclingStreetBaseSchemaMixin,
RecyclingStreetCurrentUserFavoriteMixin,
RecyclingStreetCurrentUserNotificationsMixin,
):
pass
class RecyclingStreetRefSchema(RecyclingStreetIdSchema):
name = marshmallow.auto_field()
class PlaceRecyclingStreetListRequestSchema(PaginationRequestSchema):
keyword = fields.Str()
class PlaceRecyclingStreetListItemSchema(
RecyclingStreetRefSchema,
RecyclingStreetCurrentUserFavoriteMixin,
RecyclingStreetCurrentUserNotificationsMixin,
):
pass
class PlaceRecyclingStreetListResponseSchema(PaginationResponseSchema):
items = fields.List(
fields.Nested(PlaceRecyclingStreetListItemSchema),
metadata={"description": "Recycling streets"},
)
class UserRecyclingStreetListRequestSchema(PaginationRequestSchema):
pass
class UserRecyclingStreetListItemSchema(
RecyclingStreetRefSchema,
RecyclingStreetCurrentUserNotificationsMixin,
):
pass
class UserRecyclingStreetListResponseSchema(PaginationResponseSchema):
items = fields.List(
fields.Nested(UserRecyclingStreetListItemSchema),
metadata={"description": "Recycling streets"},
)
class RecyclingStreetEventListRequestSchema(RecyclingEventListRequestSchema):
pass
class RecyclingStreetEventListResponseSchema(RecyclingEventListResponseSchema):
pass
class UserRecyclingStreetModelSchema(SQLAlchemyBaseSchema):
class Meta:
model = RecyclingStreetsUsers
load_instance = True
class UserRecyclingStreetBaseSchemaMixin(object):
pass
class UserRecyclingStreetWriteSchemaMixin(object):
notifications_active = marshmallow.auto_field(
required=False,
default=False,
)
class UserRecyclingStreetPatchRequestSchema(
UserRecyclingStreetModelSchema,
UserRecyclingStreetBaseSchemaMixin,
UserRecyclingStreetWriteSchemaMixin,
):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.make_patch_schema()
| 26.210884 | 88 | 0.770049 |
815034a5e92e8cf1e5ee0f471595f333ede10ef8
| 3,196 |
py
|
Python
|
official/cv/yolov4/ascend310_quant_infer/post_quant.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/yolov4/ascend310_quant_infer/post_quant.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/yolov4/ascend310_quant_infer/post_quant.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""do post training quantization for Ascend310"""
import os
import sys
import numpy as np
from amct_mindspore.quantize_tool import create_quant_config
from amct_mindspore.quantize_tool import quantize_model
from amct_mindspore.quantize_tool import save_model
import mindspore as ms
from mindspore import context, Tensor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
def quant_yolov4(network, dataset, input_data):
"""
Export post training quantization model of AIR format.
Args:
network: the origin network for inference.YOLOV3DarkNet53
dataset: the data for inference.
input_data: the data used for constructing network. The shape and format of input data should be the same as
actual data for inference.
"""
# step2: create the quant config json file
create_quant_config("./config.json", network, input_data)
# step3: do some network modification and return the modified network
calibration_network = quantize_model("./config.json", network, input_data)
calibration_network.set_train(False)
# step4: perform the evaluation of network to do activation calibration
for _, data in enumerate(dataset.create_dict_iterator(num_epochs=1)):
image = data["image"]
_ = calibration_network(image)
# step5: export the air file
save_model("results/yolov4_quant", calibration_network, input_data)
print("[INFO] the quantized AIR file has been stored at: \n {}".format("results/yolov4_quant.air"))
if __name__ == "__main__":
sys.path.append("..")
from src.yolo import YOLOV4CspDarkNet53
from src.yolo_dataset import create_yolo_dataset
from model_utils.config import config
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = YOLOV4CspDarkNet53()
param_dict = load_checkpoint(config.ckpt_file)
load_param_into_net(net, param_dict)
net.set_train(False)
config.batch_size = 1
data_path = os.path.join(config.data_dir, "val2017")
ann_file = os.path.join(config.data_dir, "annotations/instances_val2017.json")
datasets, data_size = create_yolo_dataset(data_path, ann_file, is_training=False, batch_size=config.batch_size,
max_epoch=1, device_num=1, rank=0, shuffle=False, default_config=config)
ds = datasets.take(1)
shape = [config.batch_size, 3] + config.test_img_shape
inputs = Tensor(np.zeros(shape), ms.float32)
quant_yolov4(net, ds, inputs)
| 40.974359 | 118 | 0.721527 |
c4a6c90e2fd76ddc71b16aaab643ac3ad4615ad2
| 266 |
py
|
Python
|
busigyo/welcomepage/forms.py
|
chieteia/akemines
|
243e84a782ef7d26295100eeb33aa7e2e2879a08
|
[
"MIT"
] | null | null | null |
busigyo/welcomepage/forms.py
|
chieteia/akemines
|
243e84a782ef7d26295100eeb33aa7e2e2879a08
|
[
"MIT"
] | null | null | null |
busigyo/welcomepage/forms.py
|
chieteia/akemines
|
243e84a782ef7d26295100eeb33aa7e2e2879a08
|
[
"MIT"
] | null | null | null |
from django import forms
class TestForm(forms.Form):
num = forms.IntegerField(label='天気コード')
class Form(forms.Form):
loc = forms.CharField(label = '', max_length='100', required=True)
#lat = forms.FloatField(label='緯度')
#lon = forms.FloatField(label='経度')
| 24.181818 | 67 | 0.706767 |
f216d7c9118cf366a451ccb7a9baa2adc58f4b5d
| 13,283 |
py
|
Python
|
src/xrt/ipc/shared/proto.py
|
leviathanch/monado
|
36a540a764fd5529018dfceb28e10804db9596bf
|
[
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | null | null | null |
src/xrt/ipc/shared/proto.py
|
leviathanch/monado
|
36a540a764fd5529018dfceb28e10804db9596bf
|
[
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | null | null | null |
src/xrt/ipc/shared/proto.py
|
leviathanch/monado
|
36a540a764fd5529018dfceb28e10804db9596bf
|
[
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2020, Collabora, Ltd.
# SPDX-License-Identifier: BSL-1.0
"""Generate code from a JSON file describing the IPC protocol."""
import argparse
from ipcproto.common import (Proto, write_decl, write_invocation,
write_result_handler)
header = '''// Copyright 2020, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief {brief}.
* @author Jakob Bornecrantz <[email protected]>
* @ingroup ipc{suffix}
*/
'''
def generate_h(file, p):
"""Generate protocol header.
Defines command enum, utility functions, and command and reply structures.
"""
f = open(file, "w")
f.write(header.format(brief='Generated IPC protocol header', suffix=''))
f.write('''
#pragma once
struct ipc_connection;
''')
f.write('''
typedef enum ipc_command
{
\tIPC_ERR = 0,''')
for call in p.calls:
f.write("\n\t" + call.id + ",")
f.write("\n} ipc_command_t;\n")
f.write('''
struct ipc_command_msg
{
\tenum ipc_command cmd;
};
struct ipc_result_reply
{
\txrt_result_t result;
};
''')
f.write('''
static inline const char *
ipc_cmd_to_str(ipc_command_t id)
{
\tswitch (id) {
\tcase IPC_ERR: return "IPC_ERR";''')
for call in p.calls:
f.write("\n\tcase " + call.id + ": return \"" + call.id + "\";")
f.write("\n\tdefault: return \"IPC_UNKNOWN\";")
f.write("\n\t}\n}\n")
f.write("#pragma pack (push, 1)")
for call in p.calls:
# Should we emit a msg struct.
if call.needs_msg_struct:
f.write("\nstruct ipc_" + call.name + "_msg\n")
f.write("{\n")
f.write("\tenum ipc_command cmd;\n")
for arg in call.in_args:
f.write("\t" + arg.get_struct_field() + ";\n")
if call.in_handles:
f.write("\t%s %s;\n" % (call.in_handles.count_arg_type,
call.in_handles.count_arg_name))
f.write("};\n")
# Should we emit a reply struct.
if call.out_args:
f.write("\nstruct ipc_" + call.name + "_reply\n")
f.write("{\n")
f.write("\txrt_result_t result;\n")
for arg in call.out_args:
f.write("\t" + arg.get_struct_field() + ";\n")
f.write("};\n")
f.write("#pragma pack (pop)\n")
f.close()
def generate_client_c(file, p):
"""Generate IPC client proxy source."""
f = open(file, "w")
f.write(header.format(brief='Generated IPC client code', suffix='_client'))
f.write('''
#include "client/ipc_client.h"
#include "ipc_protocol_generated.h"
\n''')
# Loop over all of the calls.
for call in p.calls:
call.write_call_decl(f)
f.write("\n{\n")
f.write("\tIPC_TRACE(ipc_c, \"Calling " + call.name + "\");\n\n")
# Message struct
if call.needs_msg_struct:
f.write("\tstruct ipc_" + call.name + "_msg _msg = {\n")
else:
f.write("\tstruct ipc_command_msg _msg = {\n")
f.write("\t .cmd = " + str(call.id) + ",\n")
for arg in call.in_args:
if arg.is_aggregate:
f.write("\t ." + arg.name + " = *" + arg.name + ",\n")
else:
f.write("\t ." + arg.name + " = " + arg.name + ",\n")
if call.in_handles:
f.write("\t ." + call.in_handles.count_arg_name +
" = " + call.in_handles.count_arg_name + ",\n")
f.write("\t};\n")
# Reply struct
if call.out_args:
f.write("\tstruct ipc_" + call.name + "_reply _reply;\n")
else:
f.write("\tstruct ipc_result_reply _reply = {0};\n")
if call.in_handles:
f.write("\tstruct ipc_result_reply _sync = {0};\n")
f.write("""
\t// Other threads must not read/write the fd while we wait for reply
\tos_mutex_lock(&ipc_c->mutex);
""")
cleanup = "os_mutex_unlock(&ipc_c->mutex);"
# Prepare initial sending
func = 'ipc_send'
args = ['&ipc_c->imc', '&_msg', 'sizeof(_msg)']
f.write("\n\t// Send our request")
write_invocation(f, 'xrt_result_t ret', func, args, indent="\t")
f.write(';')
write_result_handler(f, 'ret', cleanup, indent="\t")
if call.in_handles:
f.write("\n\t// Send our handles separately\n")
f.write("\n\t// Wait for server sync")
# Must sync with the server so it's expecting the next message.
write_invocation(
f,
'ret',
'ipc_receive',
(
'&ipc_c->imc',
'&_sync',
'sizeof(_sync)'
),
indent="\t"
)
f.write(';')
write_result_handler(f, 'ret', cleanup, indent="\t")
# Must send these in a second message
# since the server doesn't know how many to expect.
f.write("\n\t// We need this message data as filler only\n")
f.write("\tstruct ipc_command_msg _handle_msg = {\n")
f.write("\t .cmd = " + str(call.id) + ",\n")
f.write("\t};\n")
write_invocation(
f,
'ret',
'ipc_send_handles_' + call.in_handles.stem,
(
'&ipc_c->imc',
"&_handle_msg",
"sizeof(_handle_msg)",
call.in_handles.arg_name,
call.in_handles.count_arg_name
),
indent="\t"
)
f.write(';')
write_result_handler(f, 'ret', cleanup, indent="\t")
f.write("\n\t// Await the reply")
func = 'ipc_receive'
args = ['&ipc_c->imc', '&_reply', 'sizeof(_reply)']
if call.out_handles:
func += '_handles_' + call.out_handles.stem
args.extend(call.out_handles.arg_names)
write_invocation(f, 'ret', func, args, indent="\t")
f.write(';')
write_result_handler(f, 'ret', cleanup, indent="\t")
for arg in call.out_args:
f.write("\t*out_" + arg.name + " = _reply." + arg.name + ";\n")
f.write("\n\t" + cleanup)
f.write("\n\treturn _reply.result;\n}\n")
f.close()
def generate_client_h(file, p):
"""Generate IPC client header.
Contains prototypes for generated IPC proxy call functions.
"""
f = open(file, "w")
f.write(header.format(brief='Generated IPC client code', suffix='_client'))
f.write('''
#pragma once
#include "shared/ipc_protocol.h"
#include "ipc_protocol_generated.h"
#include "client/ipc_client.h"
''')
for call in p.calls:
call.write_call_decl(f)
f.write(";\n")
f.close()
def generate_server_c(file, p):
"""Generate IPC server stub/dispatch source."""
f = open(file, "w")
f.write(header.format(brief='Generated IPC server code', suffix='_server'))
f.write('''
#include "ipc_server_generated.h"
#include "shared/ipc_protocol.h"
#include "shared/ipc_utils.h"
#include "server/ipc_server.h"
#define MAX_HANDLES 16
''')
f.write('''
xrt_result_t
ipc_dispatch(volatile struct ipc_client_state *ics, ipc_command_t *ipc_command)
{
\tswitch (*ipc_command) {
''')
for call in p.calls:
f.write("\tcase " + call.id + ": {\n")
f.write("\t\tIPC_TRACE(ics->server, \"Dispatching " + call.name +
"\");\n\n")
if call.needs_msg_struct:
f.write(
"\t\tstruct ipc_{}_msg *msg =\n".format(call.name))
f.write(
"\t\t (struct ipc_{}_msg *)ipc_command;\n".format(
call.name))
if call.out_args:
f.write("\t\tstruct ipc_%s_reply reply = {0};\n" % call.name)
else:
f.write("\t\tstruct ipc_result_reply reply = {0};\n")
if call.in_handles:
f.write("\tstruct ipc_result_reply _sync = {XRT_SUCCESS};\n")
if call.out_handles:
f.write("\t\t%s %s[MAX_HANDLES] = {0};\n" % (
call.out_handles.typename, call.out_handles.arg_name))
f.write("\t\t%s %s = {0};\n" % (
call.out_handles.count_arg_type,
call.out_handles.count_arg_name))
f.write("\n")
if call.in_handles:
# We need to fetch these handles separately
f.write("\t\t%s in_%s[MAX_HANDLES] = {0};\n" % (
call.in_handles.typename, call.in_handles.arg_name))
f.write("\t\tstruct ipc_command_msg _handle_msg = {0};\n")
# Let the client know we are ready to receive the handles.
write_invocation(
f,
'xrt_result_t sync_result',
'ipc_send',
(
"(struct ipc_message_channel *)&ics->imc",
"&_sync",
"sizeof(_sync)"
),
indent="\t\t"
)
f.write(";")
write_result_handler(f, "sync_result",
indent="\t\t")
write_invocation(
f,
'xrt_result_t receive_handle_result',
'ipc_receive_handles_' + call.in_handles.stem,
(
"(struct ipc_message_channel *)&ics->imc",
"&_handle_msg",
"sizeof(_handle_msg)",
"in_" + call.in_handles.arg_name,
"msg->"+call.in_handles.count_arg_name
),
indent="\t\t"
)
f.write(";")
write_result_handler(f, "receive_handle_result",
indent="\t\t")
f.write("\t\tif (_handle_msg.cmd != %s) {\n" % str(call.id))
f.write("\t\t\treturn XRT_ERROR_IPC_FAILURE;\n")
f.write("\t\t}\n")
# Write call to ipc_handle_CALLNAME
args = ["ics"]
for arg in call.in_args:
args.append(("&msg->" + arg.name)
if arg.is_aggregate
else ("msg->" + arg.name))
args.extend("&reply." + arg.name for arg in call.out_args)
if call.out_handles:
args.extend(("MAX_HANDLES",
call.out_handles.arg_name,
"&" + call.out_handles.count_arg_name))
if call.in_handles:
args.extend(("&in_%s[0]" % call.in_handles.arg_name,
"msg->"+call.in_handles.count_arg_name))
write_invocation(f, 'reply.result', 'ipc_handle_' +
call.name, args, indent="\t\t")
f.write(";\n")
# TODO do we check reply.result and
# error out before replying if it's not success?
func = 'ipc_send'
args = ["(struct ipc_message_channel *)&ics->imc",
"&reply",
"sizeof(reply)"]
if call.out_handles:
func += '_handles_' + call.out_handles.stem
args.extend(call.out_handles.arg_names)
write_invocation(f, 'xrt_result_t ret', func, args, indent="\t\t")
f.write(";")
f.write("\n\t\treturn ret;\n")
f.write("\t}\n")
f.write('''\tdefault:
\t\tU_LOG_E("UNHANDLED IPC MESSAGE! %d", *ipc_command);
\t\treturn XRT_ERROR_IPC_FAILURE;
\t}
}
''')
f.close()
def generate_server_header(file, p):
"""Generate IPC server header.
Declares handler prototypes to implement,
as well as the prototype for the generated dispatch function.
"""
f = open(file, "w")
f.write(header.format(brief='Generated IPC server code', suffix='_server'))
f.write('''
#pragma once
#include "shared/ipc_protocol.h"
#include "ipc_protocol_generated.h"
#include "server/ipc_server.h"
''')
# This decl is constant, but we must write it here
# because it depends on a generated enum.
write_decl(
f,
"xrt_result_t",
"ipc_dispatch",
[
"volatile struct ipc_client_state *ics",
"ipc_command_t *ipc_command"
]
)
f.write(";\n")
for call in p.calls:
call.write_handler_decl(f)
f.write(";\n")
f.close()
def main():
"""Handle command line and generate a file."""
parser = argparse.ArgumentParser(description='Protocol generator.')
parser.add_argument(
'proto', help='Protocol file to use')
parser.add_argument(
'output', type=str, nargs='+',
help='Output file, uses the name to choose output type')
args = parser.parse_args()
p = Proto.load_and_parse(args.proto)
for output in args.output:
if output.endswith("ipc_protocol_generated.h"):
generate_h(output, p)
if output.endswith("ipc_client_generated.c"):
generate_client_c(output, p)
if output.endswith("ipc_client_generated.h"):
generate_client_h(output, p)
if output.endswith("ipc_server_generated.c"):
generate_server_c(output, p)
if output.endswith("ipc_server_generated.h"):
generate_server_header(output, p)
if __name__ == "__main__":
main()
| 30.819026 | 79 | 0.534066 |
6ffe8a183561256d1f648294460c2def7c537ba5
| 1,346 |
py
|
Python
|
VoiceControl/aws_lambda/lambda.py
|
lociii/symcon-voicecontrol
|
40be8c879e9fd2209e484323df6cb32246f67b71
|
[
"MIT"
] | null | null | null |
VoiceControl/aws_lambda/lambda.py
|
lociii/symcon-voicecontrol
|
40be8c879e9fd2209e484323df6cb32246f67b71
|
[
"MIT"
] | null | null | null |
VoiceControl/aws_lambda/lambda.py
|
lociii/symcon-voicecontrol
|
40be8c879e9fd2209e484323df6cb32246f67b71
|
[
"MIT"
] | null | null | null |
import json
import urllib2
import httplib
from uuid import uuid4
import logging
logger = logging.getLogger('lambda')
def build_error_response(reason):
return {
'header': {
'namespace': 'Alexa.ConnectedHome.Control',
'name': reason,
'payloadVersion': '2',
'messageId': str(uuid4())
},
'payload': {}
}
def lambda_handler(event, context):
request = urllib2.Request('https://oauth.ipmagic.de/forward')
request.add_header('Content-Type', 'application/json')
request.add_header('Authorization', event['payload']['accessToken'])
try:
response = urllib2.urlopen(request, json.dumps(event))
except (urllib2.HTTPError, urllib2.URLError, httplib.HTTPException), e:
logger.exception(e)
return build_error_response('TargetConnectivityUnstableError')
except Exception, e:
logger.exception(e)
return build_error_response('DriverInternalError')
if response.getcode() != 200:
logger.error('invalid response %s' % response.getcode())
return build_error_response('DriverInternalError')
try:
response = json.loads(response.read())
except ValueError:
logger.error('failed to decode response')
return build_error_response('DriverInternalError')
return response
| 29.911111 | 75 | 0.663447 |
73a3a2995b1aeb060291d8aa6f129e5ef6ccfb96
| 860 |
py
|
Python
|
Project1/lib.py
|
veronikadim99/Wissenschaftliches-Rechnen
|
3b7c86e9488bf434f3ad1d590f5b9bb9b4cdf218
|
[
"Apache-2.0"
] | null | null | null |
Project1/lib.py
|
veronikadim99/Wissenschaftliches-Rechnen
|
3b7c86e9488bf434f3ad1d590f5b9bb9b4cdf218
|
[
"Apache-2.0"
] | null | null | null |
Project1/lib.py
|
veronikadim99/Wissenschaftliches-Rechnen
|
3b7c86e9488bf434f3ad1d590f5b9bb9b4cdf218
|
[
"Apache-2.0"
] | null | null | null |
import time
import matplotlib.pyplot as plt
def timedcall(fn, *args):
"""
Run a function and measure execution time.
Arguments:
fn : function to be executed
args : arguments to function fn
Return:
dt : execution time
result : result of function
Usage example:
You want to time the function call "C = foo(A,B)".
--> "T, C = timedcall(foo, A, B)"
"""
t0 = time.time()
result = fn(*args)
t1 = time.time()
dt = t1 - t0
return dt, result
def plot_2d(x_data, y_data, labels, title, x_axis, y_axis, x_range):
plt.figure()
for i, label in enumerate(labels):
plt.loglog(x_data, y_data[i], label=label)
plt.grid()
plt.legend(loc='upper left')
plt.title(title)
plt.xlabel(x_axis)
plt.xlim(x_range[0], x_range[1])
plt.ylabel(y_axis)
plt.show()
| 20 | 68 | 0.60814 |
79028f363aec5299347603f6c4801f13319c7e79
| 1,165 |
py
|
Python
|
Codeforces_problems/Reverse Binary Strings/solution.py
|
KAHund/CompetitiveCode
|
6ed211a2f795569f5c2f18c2f660520d99d41ca0
|
[
"MIT"
] | 165 |
2020-10-03T08:01:11.000Z
|
2022-03-31T02:42:08.000Z
|
Codeforces_problems/Reverse Binary Strings/solution.py
|
KAHund/CompetitiveCode
|
6ed211a2f795569f5c2f18c2f660520d99d41ca0
|
[
"MIT"
] | 383 |
2020-10-03T07:39:11.000Z
|
2021-11-20T07:06:35.000Z
|
Codeforces_problems/Reverse Binary Strings/solution.py
|
KAHund/CompetitiveCode
|
6ed211a2f795569f5c2f18c2f660520d99d41ca0
|
[
"MIT"
] | 380 |
2020-10-03T08:05:04.000Z
|
2022-03-19T06:56:59.000Z
|
# We need to make our string alternating, i. e. si≠si+1. When we reverse substring sl…sr,
# we change no more than two pairs sl−1,sl and sr,sr+1. Moreover, one pair should be a
# consecutive pair 00 and other — 11. So, we can find lower bound to our answer as maximum
# between number of pairs of 00 and number of pairs of 11. And we can always reach this
# lower bound, by pairing 00 with 11 or with left/right border of s.
for _ in range(int(input())):
n = int(input())
s = input()
z, o = 0, 0 # will store total number of pairs
zeros, ones = 0, 0 # will store no of pairs in one streak
for el in s:
if el == '1':
ones += 1
# streak of zeros are broken by one so no of pairs of zeros are added to z
z += max(zeros-1, 0)
zeros = 0
if el == '0':
zeros += 1
# streak of ones are broken by one so no of pairs of ones are added to o
o += max(ones-1, 0)
ones = 0
# we count pairs only when it the streak is broken. So to count the final unbroken streak
o += max(ones-1, 0)
z += max(zeros-1, 0)
print(max(o, z))
| 36.40625 | 93 | 0.593133 |
f768d46f66b6b97f24f78dfd1b6bc540af553b94
| 162 |
py
|
Python
|
Shivani/leap year.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
Shivani/leap year.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
Shivani/leap year.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
n=int(input("enter year\n"))
if(n%400==0 and n%100==0):
print("leap year")
elif(n%4==0 and n%100!=0):
print("leap year")
else:
print("not leap year")
| 20.25 | 28 | 0.58642 |
586428275f3b29e74ff970b0acd5d57b3c302f28
| 1,290 |
py
|
Python
|
app/models/user.py
|
zucc-acm-devteam/zuccacm-sso
|
9c7d2f0b9cc069962f32b555152732a98bf2e94a
|
[
"Apache-2.0"
] | null | null | null |
app/models/user.py
|
zucc-acm-devteam/zuccacm-sso
|
9c7d2f0b9cc069962f32b555152732a98bf2e94a
|
[
"Apache-2.0"
] | null | null | null |
app/models/user.py
|
zucc-acm-devteam/zuccacm-sso
|
9c7d2f0b9cc069962f32b555152732a98bf2e94a
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from flask_login import UserMixin
from sqlalchemy import Column, String, Enum
from werkzeug.security import check_password_hash, generate_password_hash
from app import login_manager
from app.libs.enumerate import UserPermission
from app.libs.error_code import AuthFailed
from app.models.base import Base
class User(UserMixin, Base):
fields = ['username', 'nickname', 'permission']
username = Column(String(100), primary_key=True, nullable=False)
nickname = Column(String(100), nullable=False)
password_ = Column('password', String(1000), nullable=False)
permission = Column(Enum(UserPermission), default=UserPermission.Normal)
@property
def id(self):
return self.username
@staticmethod
@login_manager.user_loader
def load_user(id_):
return User.get_by_id(id_)
@staticmethod
@login_manager.unauthorized_handler
def unauthorized_handler():
return AuthFailed()
@property
def password(self):
return self.password_
@password.setter
def password(self, raw):
self.password_ = generate_password_hash(raw)
def check_password(self, raw):
if not self.password_ or not raw:
return False
return check_password_hash(self.password_, raw)
| 27.446809 | 76 | 0.721705 |
54975dd40b2c8606432908f492f3c453698f2239
| 3,790 |
py
|
Python
|
src/visuanalytics/analytics/processing/audio/parts/part.py
|
Biebertal-mach-mit-TV/Data-Analytics
|
70cda2393e61f7ca0a1a4a5965646e908bd0faa9
|
[
"MIT"
] | 1 |
2020-11-27T17:26:27.000Z
|
2020-11-27T17:26:27.000Z
|
src/visuanalytics/analytics/processing/audio/parts/part.py
|
Biebertal-mach-mit-TV/Data-Analytics
|
70cda2393e61f7ca0a1a4a5965646e908bd0faa9
|
[
"MIT"
] | 85 |
2021-01-02T11:38:59.000Z
|
2021-07-26T07:13:47.000Z
|
src/visuanalytics/analytics/processing/audio/parts/part.py
|
Biebertal-mach-mit-TV/Data-Analytics
|
70cda2393e61f7ca0a1a4a5965646e908bd0faa9
|
[
"MIT"
] | 1 |
2021-04-19T06:50:53.000Z
|
2021-04-19T06:50:53.000Z
|
"""
Modul, welches die grundlegenden Funktionen der verschiedenen Arten zur Textgenerierung für die Text-to-Speech-Umwandlung beeinhaltet.
"""
from random import randint
from visuanalytics.analytics.util.step_errors import AudioError, raise_step_error
from visuanalytics.analytics.util.step_utils import execute_type_option, execute_type_compare
from visuanalytics.analytics.util.type_utils import get_type_func, register_type_func
AUDIO_PARTS_TYPES = {}
"""Ein Dictionary bestehend aus allen Audio-Parts-Typ-Methoden. """
@raise_step_error(AudioError)
def audio_parts(values, data):
return_string = ""
for value in values:
return_value = get_type_func(value, AUDIO_PARTS_TYPES)(value, data)
if not return_value[1]:
return "", False
return_string = return_string + return_value[0]
return return_string, True
def register_audio_parts(func):
"""Registriert die übergebene Funktion und versieht sie mit einem `"try/except"`-Block.
Fügt eine Typ-Funktion dem Dictionary AUDIO_PARTS_TYPES hinzu.
:param func: die zu registrierende Funktion
:return: Funktion mit try/except-Block
"""
return register_type_func(AUDIO_PARTS_TYPES, AudioError, func)
@register_audio_parts
def file(values, data):
"""
Gibt an, dass eine bereits vorhandene Audio Teil der Audio ist
:param values: Werte aus der JSON-Datei
:param data: Daten aus der API
"""
return "", False
@register_audio_parts
def text(values, data):
"""Gibt den Text unter pattern aus.
Gibt den Text unter pattern aus. Wenn dieser Ersetzungen erwartet, werden diese durchgeführt.
:param values: Werte aus der JSON-Datei
:param data: Daten aus der API
"""
return data.format(values["pattern"], values), True
@register_audio_parts
def compare(values, data):
"""Vergleicht zwei Werte miteinander und führt je nachdem, ob =, !=, < oder >, die danach aufgeführten `"audio_parts"`-Funktionen aus.
Wenn `value_left` gleich `value_right`, führe "transform"-Typen aus on_equal durch.
Wenn `value_left` ungleich `value_right`, führe "transform"-Typen aus on_not_equal durch.
Wenn `value_left` größer `value_right`, führe "transform"-Typen aus on_higher durch.
Wenn `value_left` kleiner `value_right`, führe "transform"-Typen aus on_lower durch.
:param values: Werte aus der JSON-Datei
:param data: Daten aus der API
"""
return audio_parts(execute_type_compare(values, data), data)
@register_audio_parts
def option(values, data):
"""Führt die aufgeführten `"audio_parts"`-Funktionen aus, je nachdem ob ein bestimmter Wert `"true"` oder `"false"` ist.
Wenn der Wert, der in `"check"` steht `"true"` ist, werden die `"audio_parts"`-Funktionen ausgeführt,
die unter `"on_true"` stehen.
Wenn der Wert, der in `"check"` steht `"false"` ist, werden die `"audio_parts"`-Funktionen ausgeführt,
die unter `"on_false"` stehen.
:param values: Werte aus der JSON-Datei
:param data: Daten aus der API
"""
return audio_parts(execute_type_option(values, data), data)
@register_audio_parts
def random_text(values, data):
"""Sucht aus mehreren Strings (Array in pattern) zufällig einen aus.
:param values: Werte aus der JSON-Datei
:param data: Daten aus der API
"""
len_pattern = len(values["pattern"])
if len_pattern == 1:
return data.format(values["pattern"][0], values), True
else:
rand = randint(0, len_pattern - 1)
return data.format(values["pattern"][rand], values), True
@register_audio_parts
def silent(values, data):
"""
Gibt an, dass eine lautlose Audio Teil der Audio ist
:param values: Werte aus der JSON-Datei
:param data: Daten aus der API
"""
return "", False
| 33.839286 | 138 | 0.715303 |
b7a81d2dc6c5fb849d41361fefa7c28e9b8b16ed
| 1,122 |
py
|
Python
|
weibo/test/testReCompile.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
weibo/test/testReCompile.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
weibo/test/testReCompile.py
|
haiboz/weiboSpider
|
517cae2ef3e7bccd9e1d328a40965406707f5362
|
[
"Apache-2.0"
] | null | null | null |
#encoding:utf8
'''
Created on 2016年4月12日
@author: wb-zhaohaibo
'''
import re
from bs4 import BeautifulSoup
html_cont = open("text.html","r").read()
soup = BeautifulSoup(html_cont,"html.parser",from_encoding="utf-8")
# sss = "/u/5888088457?refer_flag=1005050008_"
# pattern = re.compile(r"/u/[0-9]+\?refer_flag=[0-9]+_")
#/u/3738760754?refer_flag=1005050008_
#\/u\/3738760754?refer_flag=1005050008_\
links = soup.find_all("script")
for link in links:
text = link.get_text()
text = text.replace('\/', '\\')
# print text
# links = soup.find_all("a",href=re.compile(r"\/u\/3738760754\?refer_flag=1005050008_"))
# print links
# strinfo = re.compile('href="\/u\/')
# b = strinfo.sub('href="/u/',a)
# print "b="+b
# b = re.compile(r"/u/[0-9]+\?refer_flag=[0-9]+_")
# href="/u/2515950453?refer_flag=1005050005_" class="S_txt1">段晓阳session</a>
a = ' href="/u/2515950453?refer_flag=1005050005_" class="S_txt1">段晓阳session</a> '
# print "a="+a
patten = re.compile(r'href="(\\/u\\/[0-9]+\?refer_flag=[0-9]+_)"')
b = patten.findall(a)
print b
bb = b[0].replace("\\","")
print bb
| 28.05 | 89 | 0.637255 |
4d10985a2b796d551cb3b521b635f3fd15d7848d
| 658 |
py
|
Python
|
backend/migrations/versions/712fec4b34b0_.py
|
davidoesch/platform
|
1eb6f98568cab82e28bd5350beab2042b22d99ed
|
[
"MIT"
] | null | null | null |
backend/migrations/versions/712fec4b34b0_.py
|
davidoesch/platform
|
1eb6f98568cab82e28bd5350beab2042b22d99ed
|
[
"MIT"
] | null | null | null |
backend/migrations/versions/712fec4b34b0_.py
|
davidoesch/platform
|
1eb6f98568cab82e28bd5350beab2042b22d99ed
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 712fec4b34b0
Revises: 523e4c992f6b
Create Date: 2018-08-30 15:43:38.687542
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '712fec4b34b0'
down_revision = '523e4c992f6b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('resource', sa.Column('notes', sa.UnicodeText(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('resource', 'notes')
# ### end Alembic commands ###
| 22.689655 | 82 | 0.693009 |
4206177f4105ea34a996e3e6e54823a220493ded
| 3,031 |
py
|
Python
|
demo.py
|
ChristianKitte/Textextraktion-und-Einordnung-mit-Hilfe-neuronaler-Netze
|
53d3fc6b1c17f31146741cdebd743f4aa12a09e0
|
[
"MIT"
] | null | null | null |
demo.py
|
ChristianKitte/Textextraktion-und-Einordnung-mit-Hilfe-neuronaler-Netze
|
53d3fc6b1c17f31146741cdebd743f4aa12a09e0
|
[
"MIT"
] | 16 |
2020-01-28T23:04:13.000Z
|
2022-03-12T00:02:40.000Z
|
demo.py
|
ChristianKitte/Textextraktion-und-Einordnung-mit-Hilfe-neuronaler-Netze
|
53d3fc6b1c17f31146741cdebd743f4aa12a09e0
|
[
"MIT"
] | null | null | null |
""" Function of demo.py
This script allows you to easily test the functionality of the solution using
images stored in the income directory.
The image stored there is transferred to an instance of the scanner, processed
and then returned as a new image and stored in the outcome directory.
The application of the auto function of the scanner class will also be demonstrated.
"""
import cv2
import constant as const
from annotation_constants.eval_annotation_constants import EVAL_ANNOTATION_CONTANTS
from annotation_constants.neg_annotation_constants import NEG_ANNOTATION_CONTANTS
from annotation_constants.pos_annotation_constants import POS_ANNOTATION_CONTANTS
from scanner import Scanner
def auto(input, output):
"""Examines the image defined as input (path + file name) and generates an output image at
the location defined as output (path + file name).
:param input:Path to the input image
:param output:Path to the output image
"""
try:
scanner = Scanner()
scanner.auto_scann(input, output,
pos_annotation_constants=POS_ANNOTATION_CONTANTS,
neg_annotation_constants=NEG_ANNOTATION_CONTANTS,
eval_annotation_constants=EVAL_ANNOTATION_CONTANTS)
except:
print('Error in method {0} in module {1}'.format('auto', 'demo.py'))
if __name__ == '__main__':
"""Is executed when the file is executed directly. The function performs a text detection and
recognition with the image stored in the variable in_file and stores the result under the name
defined in the variable out_file.
Version 1 takes over the loading and saving. Only the paths are to be specified.
In Version2, the loading and saving processes must be performed manually. Detail screens are
output during processing
"""
# Controls the program run
version = 2
# Input file (from the income directory)
in_file = 'test.jpg'
# Output file (from the outcome directory)
out_file = '001.jpg'
try:
if version == 1:
auto(const.INPUT_DIR + '/' + in_file, const.OUTPUT_DIR + '/' + out_file)
exit(0)
elif version == 2:
scanner = Scanner()
img_in = cv2.imread(const.INPUT_DIR + '/' + in_file)[:, :, ::-1]
if img_in is not None:
img_out = scanner.scann(img=img_in, print_detail=True,
pos_annotation_constants=POS_ANNOTATION_CONTANTS,
neg_annotation_constants=NEG_ANNOTATION_CONTANTS,
eval_annotation_constants=EVAL_ANNOTATION_CONTANTS)
cv2.imwrite(const.OUTPUT_DIR + '/' + out_file, img_out)
print('Finished')
else:
print('Image not readable')
except:
print('Error in method {0} in module {1}'.format('main', 'demo.py'))
| 40.413333 | 100 | 0.646321 |
425e7db076c0e09054a0a98f3e180e19a885d306
| 2,006 |
py
|
Python
|
2019/quals/pwn-secureboot/healthcheck/healthcheck.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 2,757 |
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
2019/quals/pwn-secureboot/healthcheck/healthcheck.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 20 |
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
2019/quals/pwn-secureboot/healthcheck/healthcheck.py
|
iicarus-bit/google-ctf
|
4eb8742bca58ff071ff8f6814d41d9ec7eb1db4b
|
[
"Apache-2.0"
] | 449 |
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import subprocess
import nameko
from nameko.web.handlers import HttpRequestHandler
from nameko.timer import timer
logger = logging.getLogger('healthcheck')
http = HttpRequestHandler.decorator
state = {
'healthy': None
}
class HealthcheckService:
name = 'healthcheck'
@http('GET', '/')
def healthcheck_handler(self, request):
if state['healthy']:
return 200, 'healthy\n'
else:
return 503, 'unhealthy\n'
@timer(interval=60)
def healtcheck(self):
address = os.environ.get('ADDRESS', '127.0.0.1')
port = int(os.environ.get('PORT', '1337'))
retries = 5
while retries > 0:
health = False
try:
health = healthcheck_challenge(address, port)
except Exception as e:
logger.warning('Healthcheck exception: {}'.format(e))
if health:
break
logger.info('Retrying...')
retries -= 1
if health != state['healthy']:
if health:
logger.info('Challenge became healthy.')
else:
logger.info('Challenge became unhealthy.')
state['healthy'] = health
# Implement your healthchecking here.
# Beware, this framework uses eventlet - third party I/O libraries might not
# work. Also, this is Python3.
def healthcheck_challenge(address, port):
result = subprocess.run(['./solve.py', 'r', address, str(port)], stdout=subprocess.PIPE)
return "CTF{" in str(result.stdout)
| 26.746667 | 90 | 0.688933 |
220c2684af594a57dc51144cd540b6a6d406a4f4
| 394 |
py
|
Python
|
Chapter5_DNN/Chapter5_2_BostonRegression/bostonVisualization.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | 11 |
2020-10-12T14:06:31.000Z
|
2022-02-22T09:16:32.000Z
|
Chapter5_DNN/Chapter5_2_BostonRegression/bostonVisualization.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | null | null | null |
Chapter5_DNN/Chapter5_2_BostonRegression/bostonVisualization.py
|
thisisjako/UdemyTF
|
ee4102391ed6bd50f764955f732f5740425a9209
|
[
"MIT"
] | 8 |
2020-10-29T07:53:49.000Z
|
2022-03-17T11:01:20.000Z
|
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import load_boston
if __name__ == "__main__":
dataset = load_boston()
x = dataset.data
y = dataset.target
df = pd.DataFrame(x, columns=dataset.feature_names)
df["y"] = y
print(df.head(n=10))
print(df.info())
print(df.describe())
df.hist(bins=30, figsize=(15, 15))
plt.show()
| 19.7 | 55 | 0.649746 |
221076f3e4174dbf40154858347cc60f8940a7d3
| 11,647 |
py
|
Python
|
hyperts/experiment.py
|
zhangxjohn/HyperTS
|
c43c8d820d26dd362510997c1c294341279ce1e1
|
[
"Apache-2.0"
] | null | null | null |
hyperts/experiment.py
|
zhangxjohn/HyperTS
|
c43c8d820d26dd362510997c1c294341279ce1e1
|
[
"Apache-2.0"
] | null | null | null |
hyperts/experiment.py
|
zhangxjohn/HyperTS
|
c43c8d820d26dd362510997c1c294341279ce1e1
|
[
"Apache-2.0"
] | null | null | null |
import copy
import numpy as np
import pandas as pd
from hypernets.core import set_random_state
from hypernets.experiment.compete import SteppedExperiment, ExperimentStep, \
EnsembleStep, FinalTrainStep
from hypernets.utils import logging
from hypernets.tabular import get_tool_box
from hypernets.tabular.data_cleaner import DataCleaner
from hyperts.utils import data_ops as dp, consts
logger = logging.get_logger(__name__)
DEFAULT_EVAL_SIZE = 0.2
def _set_log_level(log_level):
logging.set_level(log_level)
class TSDataPreprocessStep(ExperimentStep):
def __init__(self, experiment, name, timestamp_col=None, freq=None,
covariate_cols=None, covariate_data_clean_args=None):
super().__init__(experiment, name)
timestamp_col = [timestamp_col] if isinstance(timestamp_col, str) else timestamp_col
covariate_cols = [covariate_cols] if isinstance(covariate_cols, str) else covariate_cols
self.freq = freq
self.timestamp_col = timestamp_col if timestamp_col is not None else consts.TIMESTAMP
self.covariate_cols = covariate_cols
self.covariate_data_clean_args = covariate_data_clean_args if covariate_data_clean_args is not None else {}
self.covariate_data_clean_args.update({'correct_object_dtype': False})
# fitted
self.covariate_data_cleaner = DataCleaner(**self.covariate_data_clean_args)
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
# 1. covariate variables data clean procsss
if self.covariate_cols is not None and len(self.covariate_cols) > 0:
X_train = self.covariate_transform(X_train, training=True)
# 2. target plus covariable process
train_Xy = pd.concat([X_train, y_train], axis=1)
variable_cols = dp.list_diff(train_Xy.columns, self.timestamp_col)
target_variable_cols = dp.list_diff(variable_cols, self.covariate_cols)
excluded_cols = dp.list_diff(train_Xy.columns, target_variable_cols)
train_Xy = self.series_transform(train_Xy, target_variable_cols)
X_train, y_train = train_Xy[excluded_cols], train_Xy[target_variable_cols]
# 3. eval variables data process
if X_eval is None or y_eval is None:
eval_size = self.experiment.eval_size
if self.task in [consts.TASK_FORECAST, consts.TASK_UNIVARIABLE_FORECAST, consts.TASK_MULTIVARIABLE_FORECAST]:
X_train, X_eval, y_train, y_eval = \
dp.temporal_train_test_split(X_train, y_train, test_size=eval_size)
else:
if self.covariate_cols is not None and len(self.covariate_cols) > 0:
X_eval = self.covariate_transform(X_eval, training=False)
eval_Xy = pd.concat([X_eval, y_eval], axis=1)
eval_Xy = self.series_transform(eval_Xy, target_variable_cols)
X_eval, y_eval = eval_Xy[excluded_cols], eval_Xy[target_variable_cols]
# 4. compute new data shape
data_shapes = {'X_train.shape': X_train.shape,
'y_train.shape': y_train.shape,
'X_eval.shape': None if X_eval is None else X_eval.shape,
'y_eval.shape': None if y_eval is None else y_eval.shape,
'X_test.shape': None if X_test is None else X_test.shape
}
# 5. reset part parameters
self.data_shapes = data_shapes
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def transform(self, X, y=None, **kwargs):
if self.covariate_cols is not None and len(self.covariate_cols) > 0:
X_transform = self.covariate_transform(X, training=False)
X_transform = self.series_transform(X_transform)
else:
X_transform = self.series_transform(X)
return X_transform
def covariate_transform(self, X, training=False):
df_timestamp = X[self.timestamp_col]
if training:
df_covariate, _ = self.covariate_data_cleaner.fit_transform(X[self.covariate_cols])
else:
df_covariate = self.covariate_data_cleaner.transform(X[self.covariate_cols])
assert df_covariate.shape[0] == X.shape[0], \
'The row of clearned covariable is not equal the row of X_train.'
X = pd.concat([df_timestamp, df_covariate], axis=1)
return X
def series_transform(self, X, target_variable_cols=None):
covar_object_names, covar_float_names = [], []
if self.covariate_cols is not None and len(self.covariate_cols) > 0:
for col in self.covariate_cols:
if X[col].dtypes == consts.DATATYPE_OBJECT:
covar_object_names.append(col)
elif X[col].dtypes == consts.DATATYPE_FLOAT:
covar_float_names.append(col)
if target_variable_cols is not None:
impute_col_names = target_variable_cols + covar_float_names
else:
impute_col_names = covar_float_names
self.freq = self.freq if self.freq is not None else \
dp.infer_ts_freq(X[self.timestamp_col], ts_name=self.timestamp_col[0])
X = dp.drop_duplicated_ts_rows(X, ts_name=self.timestamp_col[0])
X = dp.smooth_missed_ts_rows(X, freq=self.freq, ts_name=self.timestamp_col[0])
if target_variable_cols is not None and len(target_variable_cols) > 0:
X[target_variable_cols] = dp.nan_to_outliers(X[target_variable_cols])
if impute_col_names is not None and len(impute_col_names) > 0:
X[impute_col_names] = dp.multi_period_loop_imputer(X[impute_col_names], freq=self.freq)
if covar_object_names is not None and len(covar_object_names) > 0:
X[covar_object_names] = X[covar_object_names].fillna(method='ffill').fillna(method='bfill')
return X
def get_params(self, deep=True):
params = super(TSDataPreprocessStep, self).get_params()
params['covariate_data_clean_args'] = self.covariate_data_cleaner.get_params()
return params
def get_fitted_params(self):
freq = self.freq if self.freq is not None else None
data_shapes = self.data_shapes if self.data_shapes is not None else {}
return {**super(TSDataPreprocessStep, self).get_fitted_params(),
**data_shapes,
'freq': freq}
class TSSpaceSearchStep(ExperimentStep):
def __init__(self, experiment, name):
super().__init__(experiment, name)
# fitted
self.dataset_id = None
self.model = None
self.history_ = None
self.best_reward_ = None
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
if X_eval is not None:
kwargs['eval_set'] = (X_eval, y_eval)
model = copy.deepcopy(self.experiment.hyper_model) # copy from original hyper_model instance
model.search(X_train, y_train, X_eval, y_eval, **kwargs)
if model.get_best_trial() is None or model.get_best_trial().reward == 0:
raise RuntimeError('Not found available trial, change experiment settings and try again pls.')
self.dataset_id = 'abc' # fixme
self.model = model
self.history_ = model.history
self.best_reward_ = model.get_best_trial().reward
logger.info(f'{self.name} best_reward: {self.best_reward_}')
return self.model, X_train, y_train, X_test, X_eval, y_eval
def transform(self, X, y=None, **kwargs):
return X
def is_transform_skipped(self):
return True
def get_fitted_params(self):
return {**super().get_fitted_params(),
'best_reward': self.best_reward_,
'history': self.history_,
}
class TSEnsembleStep(EnsembleStep):
def get_ensemble(self, estimators, X_train, y_train):
# return GreedyEnsemble(self.task, estimators, scoring=self.scorer, ensemble_size=self.ensemble_size)
tb = get_tool_box(X_train, y_train)
if self.task in ['forecast', "multivariate-forecast"]:
ensemble_task = 'regression'
else:
ensemble_task = self.task
return tb.greedy_ensemble(ensemble_task, estimators, scoring=self.scorer, ensemble_size=self.ensemble_size)
class TSExperiment(SteppedExperiment):
def __init__(self, hyper_model, X_train, y_train, X_eval=None, y_eval=None, X_test=None,
eval_size=0.2,
freq=None,
timestamp_col=None,
covariate_cols=None,
covariate_data_clean_args=None,
cv=True, num_folds=3,
task=None,
callbacks=None,
log_level=None,
random_state=None,
ensemble_size=3,
**kwargs):
"""
Parameters
----------
Return
"""
if random_state is None:
random_state = np.random.randint(0, 65535)
set_random_state(random_state)
task = hyper_model.task
# todo: check task
# todo: check scorer
steps = []
# data clean
if task in [consts.TASK_FORECAST, consts.TASK_UNIVARIABLE_FORECAST, consts.TASK_MULTIVARIABLE_FORECAST]:
steps.append(TSDataPreprocessStep(self, consts.StepName_DATA_PREPROCESSING,
freq=freq,
timestamp_col=timestamp_col,
covariate_cols=covariate_cols,
covariate_data_clean_args=covariate_data_clean_args))
# search step
steps.append(TSSpaceSearchStep(self, consts.StepName_SPACE_SEARCHING))
# ensemble step,
# steps.append(TSEnsembleStep(self, StepNames.FINAL_ENSEMBLE, scorer=scorer, ensemble_size=ensemble_size))
steps.append(FinalTrainStep(self, consts.StepName_FINAL_TRAINING, retrain_on_wholedata=False))
# ignore warnings
import warnings
warnings.filterwarnings('ignore')
if log_level is not None:
_set_log_level(log_level)
self.run_kwargs = kwargs
super(TSExperiment, self).__init__(steps,
hyper_model, X_train, y_train, X_eval=X_eval, y_eval=y_eval,
X_test=X_test, eval_size=eval_size, task=task,
id=id,
callbacks=callbacks,
random_state=random_state)
def run(self, **kwargs):
run_kwargs = {**self.run_kwargs, **kwargs}
return super().run(**run_kwargs)
def _repr_html_(self):
try:
from hypernets.hn_widget.hn_widget.widget import ExperimentSummary
from IPython.display import display
display(ExperimentSummary(self))
except:
return self.__repr__()
| 43.137037 | 122 | 0.626599 |
227bbd6ac7fa871b720a32599c74f87a9867fcc7
| 1,074 |
py
|
Python
|
rating/contact_link.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 19 |
2018-04-20T11:03:41.000Z
|
2022-01-12T20:58:56.000Z
|
rating/contact_link.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 160 |
2018-04-05T16:12:59.000Z
|
2022-03-01T13:01:27.000Z
|
rating/contact_link.py
|
thegreenwebfoundation/green-spider
|
68f22886178bbe5b476a4591a6812ee25cb5651b
|
[
"Apache-2.0"
] | 8 |
2018-11-05T13:07:57.000Z
|
2021-06-11T11:46:43.000Z
|
"""
Checks whether the pages has a link "Kontakt"
"""
from rating.abstract_rater import AbstractRater
class Rater(AbstractRater):
rating_type = 'boolean'
default_value = False
depends_on_checks = ['hyperlinks']
max_score = 1
def __init__(self, check_results):
super().__init__(check_results)
def rate(self):
value = self.default_value
score = 0
urls = 0
urls_with_contact_link = 0
for url in self.check_results['hyperlinks']:
urls += 1
for link in self.check_results['hyperlinks'][url]['links']:
if link['text'].lower() == 'kontakt':
urls_with_contact_link += 1
# make sure we only count 1 for this url
break
if urls > 0 and urls_with_contact_link == urls:
score = self.max_score
value = True
return {
'type': self.rating_type,
'value': value,
'score': score,
'max_score': self.max_score,
}
| 24.409091 | 71 | 0.549348 |
e1c9db7b17e34a6d8d78423ca393b4dc77997b03
| 23,802 |
py
|
Python
|
oldp/apps/cases/processing/processing_steps/extract_refs.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | 3 |
2020-06-27T08:19:35.000Z
|
2020-12-27T17:46:02.000Z
|
oldp/apps/cases/processing/processing_steps/extract_refs.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | null | null | null |
oldp/apps/cases/processing/processing_steps/extract_refs.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | null | null | null |
import logging
import re
import nltk
from django.utils.text import slugify
from oldp.apps.backend.processing import ProcessingError, AmbiguousReferenceError
from oldp.apps.cases.models import Case
from oldp.apps.cases.processing.processing_steps import CaseProcessingStep
from oldp.apps.laws.models import LawBook
from oldp.apps.references.models import CaseReferenceMarker, ReferenceMarker
logger = logging.getLogger(__name__)
class ExtractRefs(CaseProcessingStep):
description = 'Extract references'
law_book_codes = None
def __init__(self, law_refs=True, case_refs=True):
super(ExtractRefs, self).__init__()
self.law_refs = law_refs
self.case_refs = case_refs
def process(self, case: Case) -> Case:
"""
Read case.content, search for references, add ref marker (e.g. [ref=1]xy[/ref]) to text, add ref data to case:
case.refs {
1: {
section: ??,
line: 1,
word: 2,
id: ecli://...,
}
2: {
line: 2,
word: 123,
id: law://de/bgb/123
}
}
Ref data should contain position information, for CPA computations ...
:param case_refs:
:param law_refs:
:param case:
:return:
"""
all_refs = []
# print(case.get_sections())
content = ReferenceMarker.remove_markers(case.content)
if self.law_refs:
content, refs = self.extract_law_refs(case, content, key=len(all_refs))
all_refs.extend(refs)
logger.debug('Extracted law refs: %i' % len(refs))
if self.case_refs:
content, refs = self.extract_case_refs(case, content, key=len(all_refs))
all_refs.extend(refs)
logger.debug('Extracted case refs: %i' % len(refs))
case.content = content
case.reference_markers = all_refs
if not case._state.adding:
case.save_reference_markers()
else:
logger.warning('Reference markers not saved (case is not saved)')
# print(case.references)
# if len(not_found_refs) > 0:
# raise ValueError('Some refs still in the content...')
return case
def test_ref_extraction(self, value):
# (.+?)\\[/ref\\]
value = re.sub(r'\[ref=([0-9]+)\](.+?)\[/ref\]', '______', value)
if re.search(r'§', value, re.IGNORECASE):
return value
else:
return None
def get_law_book_codes(self):
if self.law_book_codes is None:
# Fetch codes from db
# State.objects.values_list(
self.law_book_codes = list(LawBook.objects.values_list('code', flat=True))
logger.debug('Loaded law book codes from db: %i' % len(self.law_book_codes))
# Extend with pre-defined codes
self.law_book_codes.extend(['AsylG', 'VwGO', 'GkG', 'stbstg', 'lbo', 'ZPO', 'LVwG', 'AGVwGO SH', 'BauGB',
'BauNVO', 'ZWStS', 'SbStG', 'StPO', 'TKG'])
return self.law_book_codes
# Returns regex for law book part in reference markers
def get_law_book_ref_regex(self, optional=True, group_name=True, lower=False):
# law_book_codes = list(json.loads(open(self.law_book_codes_path).read()).keys())
law_book_codes = self.get_law_book_codes()
law_book_regex = None
for code in law_book_codes:
if lower:
code = code.lower()
if law_book_regex is None:
# if optional:
# law_book_regex = '('
# else:
# law_book_regex = '('
law_book_regex = ''
# if group_name:
# law_book_regex += '?P<book>'
law_book_regex += code
else:
law_book_regex += '|' + code
# law_book_regex += ')'
# if optional:
# law_book_regex += '?'
return law_book_regex
def get_law_ref_regex(self):
# TODO Regex builder tool? http://regexr.com/
# https://www.debuggex.com/
# ((,|und)\s*((?P<nos>[0-9]+)+)*
# regex += '(\s?([0-9]+|[a-z]{1,2}|Abs\.|Abs|Satz|S\.|Nr|Nr\.|Alt|Alt\.|f\.|ff\.|und|bis|\,|'\
#regex = r'(§|§§|Art.) (?P<sect>[0-9]+)\s?(?P<sect_az>[a-z]*)\s?(?:Abs.\s?(?:[0-9]{1,2})|Abs\s?(?:[0-9]{1,2}))?\s?(?:Satz\s[0-9]{1,2})?\s' + law_book_regex
regex = r'(§|§§|Art\.)\s'
regex += '(\s|[0-9]+|[a-z]|Abs\.|Abs|Satz|S\.|Nr|Nr\.|Alt|Alt\.|f\.|ff\.|und|bis|\,|' \
+ self.get_law_book_ref_regex(optional=False, group_name=False) + ')+'
regex += '\s(' + self.get_law_book_ref_regex(optional=False, group_name=False) + ')'
regex_abs = '((Abs.|Abs)\s?([0-9]+)((,|und|bis)\s([0-9]+))*)*'
regex_a = '(([0-9]+)\s?([a-z])?)'
regex_a += '((,|und|bis)\s*(([0-9]+)\s?([a-z])?)+)*'
# f. ff.
regex_a += '\s?((Abs.|Abs)\s?([0-9]+))*'
# regex_a += '\s?(?:(Abs.|Abs)\s?(?:[0-9]{1,2})\s?((,|und|bis)\s*(([0-9]+)\s?([a-z])?)+)*)?'
# regex_a += '\s?((Satz|S\.)\s[0-9]{1,2})?'
# regex_a += '\s?(((Nr|Nr\.)\s[0-9]+)' + '(\s?(,|und|bis)\s[0-9]+)*' + ')?'
# regex_a += '\s?(((Alt|Alt\.)\s[0-9]+)' + '(\s?(,|und|bis)\s[0-9]+)*' + ')?'
regex_a += '\s'
regex_a += '(' + self.get_law_book_ref_regex(optional=True, group_name=False) + ')'
# regex += regex_a
# regex += '(\s?(,|und)\s' + regex_a + ')*'
#
# logger.debug('Law Regex=%s' % regex)
return regex
def get_law_ref_match_single(self, ref_str):
# Single ref
regex_a = '(Art\.|§)\s'
regex_a += '((?P<sect>[0-9]+)\s?(?P<sect_az>[a-z])?)' # f. ff.
regex_a += '(\s?([0-9]+|[a-z]{1,2}|Abs\.|Abs|Satz|S\.|Nr|Nr\.|Alt|Alt\.|und|bis|,))*'
# regex_a += '\s?(?:(Abs.|Abs)\s?(?:[0-9]{1,2})\s?((,|und|bis)\s*(([0-9]+)\s?([a-z])?)+)*)?'
# regex_a += '\s?((Satz|S\.)\s[0-9]{1,2})?'
# regex_a += '\s?(((Nr|Nr\.)\s[0-9]+)' + '(\s?(,|und|bis)\s[0-9]+)*' + ')?'
# regex_a += '\s?(((Alt|Alt\.)\s[0-9]+)' + '(\s?(,|und|bis)\s[0-9]+)*' + ')?'
regex_a += '\s(?P<book>' + self.get_law_book_ref_regex(optional=False) + ')'
return re.search(regex_a, ref_str)
def get_law_ref_match_multi(self, ref_str):
pattern = r'(?P<delimiter>§§|,|und|bis)\s?'
pattern += '((?P<sect>[0-9]+)\s?'
# pattern += '(?P<sect_az>[a-z])?)'
# (?!.*?bis).*([a-z]).*)
# pattern += '(?P<sect_az>(?!.*?bis).*([a-z]).*)?)'
# (?!(moscow|outside))
# pattern += '(?P<sect_az>(([a-z])(?!(und|bis))))?)'
pattern += '((?P<sect_az>[a-z])(\s|,))?)' # Use \s|, to avoid matching "bis" and "Abs", ...
pattern += '(\s?(Abs\.|Abs)\s?([0-9]+))*'
pattern += '(\s?(S\.|Satz)\s?([0-9]+))*'
# pattern += '(?:\s(Nr\.|Nr)\s([0-9]+))'
# pattern += '(?:\s(S\.|Satz)\s([0-9]+))'
# pattern += '(?:\s(f\.|ff\.))?'
pattern += '(\s?(f\.|ff\.))*'
# pattern += '(\s(Abs.|Abs)\s?([0-9]+)((,|und|bis)\s([0-9]+))*)*'
# pattern += '\s?(?:(Abs.|Abs)\s?(?:[0-9]{1,2})\s?((,|und|bis)\s*(([0-9]+)\s?([a-z])?)+)*)?'
pattern += '\s?(?:(?P<book>' + self.get_law_book_ref_regex() + '))?'
# print('MULTI: ' + ref_str)
# print(pattern)
# logger.debug('Multi ref regex: %s' % pattern)
return re.finditer(pattern, ref_str)
def get_law_id_from_match(self, match):
# print(match.groups())
return 'ecli://de/%s/%s%s' % (
match.group('book').lower(),
int(match.group('sect')),
match.group('sect_az').lower()
)
def extract_law_refs(self, referenced_by: Case, content: str, key: int=0):
"""
§ 3d AsylG
§ 123 VwGO
§§ 3, 3b AsylG
§ 77 Abs. 1 Satz 1, 1. Halbsatz AsylG
§ 3 Abs. 1 AsylG
§ 77 Abs. 2 AsylG
§ 113 Abs. 5 Satz 1 VwGO
§ 3 Abs. 1 Nr. 1 i.V.m. § 3b AsylG
§ 3a Abs. 1 und 2 AsylG
§§ 154 Abs. 1 VwGO
§ 83 b AsylG
§ 167 VwGO iVm §§ 708 Nr. 11, 711 ZPO
§ 167 VwGO i.V.m. §§ 708 Nr. 11, 711 ZPO
§§ 167 Abs. 2 VwGO, 708 Nr. 11, 711 ZPO
§§ 52 Abs. 1; 53 Abs. 2 Nr. 1; 63 Abs. 2 GKG
§ 6 Abs. 5 Satz 1 LBO
§§ 80 a Abs. 3, 80 Abs. 5 VwGO
§ 1 Satz 2 SbStG
§ 2 ZWStS
§ 6 Abs. 2 S. 2 ZWStS
TODO all law-book jurabk
:param referenced_by:
:param key:
:link https://www.easy-coding.de/Thread/5536-RegExp-f%C3%BCr-Gesetze/
:param content:
:return:
"""
logger.debug('Extracting law references')
refs = []
results = list(re.finditer(self.get_law_ref_regex(), content))
marker_offset = 0
logger.debug('Current content value: %s' % content)
logger.debug('Law refs found: %i' % len(results))
for ref_m in results:
ref_str = str(ref_m.group(0)).strip()
law_ids = []
# Handle single and multi refs separately
if re.match(r'^(Art\.|§)\s', ref_str):
law_ids = self.handle_single_law_ref(ref_str, law_ids)
elif re.match(r'^§§\s', ref_str):
law_ids = self.handle_multiple_law_refs(ref_str, law_ids)
else:
raise ProcessingError('Unsupported ref beginning: %s' % ref_str)
ref = CaseReferenceMarker(referenced_by=referenced_by,
text=ref_str,
start=ref_m.start(),
end=ref_m.end(),
line=0) # TODO
ref.set_uuid()
ref.set_references(law_ids)
refs.append(ref)
content, marker_offset = ref.replace_content(content, marker_offset, key + len(refs))
return content, refs
def handle_multiple_law_refs(self, ref_str, law_ids):
# Search for multiple refs
mms = self.get_law_ref_match_multi(ref_str)
ids_tmp = []
prev_sect = None
prev_book = None
logger.debug('Multi refs found in: %s' % ref_str)
# Loop over all results
for m in mms:
# If book is not set, use __placeholder__ and replace later
if m.group('book') is not None:
book = m.group('book').lower()
else:
book = '__book__'
# Section must exist
if m.group('sect') is not None:
sect = str(m.group('sect'))
else:
raise ProcessingError('Ref sect is not set')
if m.group('sect_az') is not None:
sect += m.group('sect_az').lower()
law_id = {
'book': book,
'sect': sect,
'type': 'law'
}
logger.debug('Law ID found: %s' % law_id)
# Check for section ranges
if m.group('delimiter') == 'bis':
logger.debug('Handle section range - Add ids from ' + prev_sect + ' to ' + sect)
# TODO how to handle az sects
prev_sect = re.sub('[^0-9]', '', prev_sect)
sect = re.sub('[^0-9]', '', sect)
for between_sect in range(int(prev_sect)+1, int(sect)):
# print(between_sect)
ids_tmp.append({
'book': prev_book,
'sect': between_sect,
'type': 'law'
})
else:
prev_sect = sect
prev_book = book
ids_tmp.append(law_id)
# law_ids.append('multi = ' + ref_str)
# handle __book__
logger.debug('All law ids found: %s' % ids_tmp)
ids_tmp.reverse()
book = None
for id_tmp in ids_tmp:
if id_tmp['book'] != '__book__':
book = id_tmp['book']
elif book is not None:
id_tmp['book'] = book
else:
raise ProcessingError('Cannot determine law book (Should never happen): %s' % ref_str)
law_ids.append(id_tmp)
return law_ids
def handle_single_law_ref(self, ref_str, law_ids):
logger.debug('Single ref found in: %s' % ref_str)
# Single ref
mm = self.get_law_ref_match_single(ref_str)
# Find book and section (only single result possible)
if mm is not None:
# mm.groupdict()
if mm.group('book') is not None:
# Found book
book = mm.group('book').lower()
else:
raise ProcessingError('Ref book is not set: %s ' % ref_str)
if mm.group('sect') is not None:
# Found section
sect = str(mm.group('sect'))
else:
raise ProcessingError('Ref sect is not set')
if mm.group('sect_az') is not None:
# Found section addon
sect += mm.group('sect_az').lower()
law_id = {
'book': book,
'sect': sect,
'type': 'law'
}
logger.debug('Law ID: %s' % law_id)
law_ids.append(law_id)
else:
law_ids.append({'book': 'not matched', 'sect': 'NOT MATCHED (single) %s ' % ref_str})
logger.warning('Law ID could not be matched.')
return law_ids
def clean_text_for_tokenizer(self, text):
"""
Remove elements from text that can make the tokenizer fail.
:param text:
:return:
"""
def repl(m):
return '_' * (len(m.group()))
def repl2(m):
# print(m.group(2))
return m.group(1) + ('_' * (len(m.group(2)) + 1))
# (...) and [...]
text = re.sub(r'\((.*?)\)', repl, text)
# Dates
text = re.sub(r'(([0-9]+)\.([0-9]+)\.([0-9]+)|i\.S\.d\.)', repl, text)
# Abbr.
text = re.sub(r'(\s|\(|\[)([0-9]+|[IVX]+|[a-zA-Z]|sog|ca|Urt|Abs|Nr|lfd|vgl|Rn|Rspr|std|ff|bzw|Art)\.', repl2, text)
# Schl.-Holst.
text = re.sub(r'([a-z]+)\.-([a-z]+)\.', repl, text, flags=re.IGNORECASE)
return text
def get_court_name_regex(self):
"""
Regular expression for finding court names
:return: regex
"""
# TODO Fetch from DB
# TODO generate only once
federal_courts = [
'Bundesverfassungsgericht', 'BVerfG',
'Bundesverwaltungsgericht', 'BVerwG',
'Bundesgerichtshof', 'BGH',
'Bundesarbeitsgericht', 'BAG',
'Bundesfinanzhof', 'BFH',
'Bundessozialgericht', 'BSG',
'Bundespatentgericht', 'BPatG',
'Truppendienstgericht Nord', 'TDG Nord',
'Truppendienstgericht Süd', 'TDG Süd',
'EUGH',
]
states = [
'Berlin',
'Baden-Württemberg', 'BW',
'Brandenburg', 'Brandenburgisches',
'Bremen',
'Hamburg',
'Hessen',
'Niedersachsen',
'Hamburg',
'Mecklenburg-Vorpommern',
'Nordrhein-Westfalen', 'NRW',
'Rheinland-Pfalz',
'Saarland',
'Sachsen',
'Sachsen-Anhalt',
'Schleswig-Holstein', 'Schl.-Holst.', 'SH',
'Thüringen'
]
state_courts = [
'OVG',
'VGH'
]
cities = [
'Baden-Baden',
'Berlin-Brbg.'
'Wedding',
'Schleswig'
]
city_courts = [
'Amtsgericht', 'AG',
'Landgericht', 'LG',
'Oberlandesgericht', 'OLG',
'OVG'
]
pattern = None
for court in federal_courts:
if pattern is None:
pattern = r'('
else:
pattern += '|'
pattern += court
for court in state_courts:
for state in states:
pattern += '|' + court + ' ' + state
pattern += '|' + state + ' ' + court
for c in city_courts:
for s in cities:
pattern += '|' + c + ' ' + s
pattern += '|' + s + ' ' + c
pattern += ')'
# logger.debug('Court regex: %s' % pattern)
return pattern
def get_file_number_regex(self):
return r'([0-9]+)\s([a-zA-Z]{,3})\s([0-9]+)/([0-9]+)'
def extract_case_refs(self, referenced_by: Case, content: str, key: int=0):
"""
BVerwG, Urteil vom 20. Februar 2013, - 10 C 23.12 -
BVerwG, Urteil vom 27. April 2010 - 10 C 5.09 -
BVerfG, Beschluss vom 10.07.1989, - 2 BvR 502, 1000, 961/86 -
BVerwG, Urteil vom 20.02.2013, - 10 C 23.12 -
OVG Nordrhein-Westfalen, Urteil vom 21.2.2017, - 14 A 2316/16.A -
OVG Nordrhein-Westfalen, Urteil vom 29.10.2012 – 2 A 723/11 -
OVG NRW, Urteil vom 14.08.2013 – 1 A 1481/10, Rn. 81 –
OVG Saarland, Urteil vom 2.2.2017, - 2 A 515/16 -
OVG Rheinland-Pfalz, Urteil vom 16.12.2016, -1A 10922/16 -
Bayrischer VGH, Urteil vom 12.12.16, - 21 B 16.30364
OVG Nordrhein-Westfalen, Urteil vom 21.2.2017, - 14 A 2316/16.A -
Bayrischer VGH, Urteil vom 12.12.2016, - 21 B 16.30372 -
OVG Saarland, Urteil vom 2.2.2017, - 2 A 515/16 -
OVG Rheinland-Pfalz, Urteil vom 16.12.2016, -1A 10922/16 -
VG Minden, Urteil vom 22.12.2016, - 1 K 5137/16.A -
VG Gießen, Urteil vom 23.11.2016, - 2 K 969/16.GI.A
VG Düsseldorf, Urteil vom 24.1.2017, - 17 K 9400/16.A
VG Köln, Beschluss vom 25.03.2013 – 23 L 287/12 -
OVG Schleswig, Beschluss vom 20.07.2006 – 1 MB 13/06 -
Schleswig-Holsteinisches Verwaltungsgericht, Urteil vom 05.082014 – 11 A 7/14, Rn. 37 –
Entscheidung des Bundesverwaltungsgerichts vom 24.01.2012 (2 C 24/10)
EuGH Urteil vom 25.07.2002 – C-459/99 -
TODO all court codes + case types
- look for (Entscheidung|Bechluss|Urteil)
- +/- 50 chars
- find VG|OVG|Verwaltungsgericht|BVerwG|...
- find location
- find file number - ... - or (...)
TODO
Sentence tokenzier
- remove all "special endings" \s([0-9]+|[a-zA-Z]|sog|Abs)\.
- remove all dates
:param key:
:param content:
:return:
"""
refs = []
original = content
text = content
# print('Before = %s' % text)
# Clean up text; replacing all chars that can lead to wrong sentences
text = self.clean_text_for_tokenizer(text)
# TODO
from nltk.tokenize.punkt import PunktParameters
punkt_param = PunktParameters()
abbreviation = ['1', 'e', 'i']
punkt_param.abbrev_types = set(abbreviation)
# tokenizer = PunktSentenceTokenizer(punkt_param)
offset = 0
marker_offset = 0
for start, end in nltk.PunktSentenceTokenizer().span_tokenize(text):
length = end - start
sentence = text[start:end]
original_sentence = original[start:end]
matches = list(re.finditer(r'\((.*?)\)', original_sentence))
logger.debug('Sentence (matches: %i): %s' % (len(matches), sentence))
logger.debug('Sentence (orignal): %s' % (original_sentence))
for m in matches:
# pass
# print('offset = %i, len = %i' % (offset, len(sentence)))
#
# print('MANGLED: ' + sentence)
logger.debug('Full sentence // UNMANGLED: ' + original_sentence)
# focus_all = original[start+m.start(1):start+m.end(1)].split(',')
focus_all = original_sentence[m.start(1):m.end(1)].split(',')
# print(m.group(1))
logger.debug('In parenthesis = %s' % focus_all)
# Split
for focus in focus_all:
# Search for file number
fns_matches = list(re.finditer(self.get_file_number_regex(), focus))
if len(fns_matches) == 1:
fn = fns_matches[0].group(0)
pos = fns_matches[0].start(0)
logger.debug('File number found: %s' % fn)
# Find court
court_name = None
court_pos = 999999
court_matches = list(re.finditer(self.get_court_name_regex(), original_sentence))
if len(court_matches) == 1:
# Yeah everything is fine
court_name = court_matches[0].group(0)
elif len(court_matches) > 0:
# Multiple results, choose the one that is closest to file number
for cm in court_matches:
if court_name is None or abs(pos - cm.start()) < court_pos:
court_name = cm.group(0)
court_pos = abs(pos - cm.start())
else:
# no court found, guess by search query
# probably the court of the current case? test for "die kammer"
pass
# Find date
# TODO
logger.debug('Filename = %s' % fn)
logger.debug('Courtname = %s' % court_name)
ref_start = start + m.start(1) + pos
ref_end = ref_start + len(fn)
if court_name is None:
# raise )
# TODO Probably same court as current case (use case validation)
logger.error(AmbiguousReferenceError('No court name found - FN: %s' % fn))
# logger.debug('Sentence: %s' % (fn, original_sentence)))
continue
ref_ids = [
{
'type': 'case',
'ecli': 'ecli://de/' + slugify(court_name) + '/' + slugify(fn.replace('/', '-'))
}
]
# TODO maintain order for case+law refs
ref = CaseReferenceMarker(referenced_by=referenced_by,
text=focus,
start=ref_start,
end=ref_end,
line=0) # TODO line number
ref.set_uuid()
ref.set_references(ref_ids)
refs.append(
ref
)
content, marker_offset = ref.replace_content(content, marker_offset, key + len(refs))
pass
elif len(fns_matches) > 1:
logger.warning('More file numbers found: %s' % fns_matches)
pass
else:
logger.debug('No file number found')
return content, refs
| 34.696793 | 163 | 0.479245 |
bed101cb50e67e664cfce00e4f5d3b98b401ee2d
| 1,423 |
py
|
Python
|
oneflow/python/ops/transpose_util.py
|
wanghongsheng01/framework_enflame
|
debf613e05e3f5ea8084c3e79b60d0dd9e349526
|
[
"Apache-2.0"
] | 2 |
2021-09-10T00:19:49.000Z
|
2021-11-16T11:27:20.000Z
|
oneflow/python/ops/transpose_util.py
|
duijiudanggecl/oneflow
|
d2096ae14cf847509394a3b717021e2bd1d72f62
|
[
"Apache-2.0"
] | 1 |
2021-06-16T08:37:50.000Z
|
2021-06-16T08:37:50.000Z
|
oneflow/python/ops/transpose_util.py
|
duijiudanggecl/oneflow
|
d2096ae14cf847509394a3b717021e2bd1d72f62
|
[
"Apache-2.0"
] | 1 |
2021-11-10T07:57:01.000Z
|
2021-11-10T07:57:01.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from typing import Sequence
def is_perm(perm: Sequence[int],) -> bool:
return list(range(len(perm))) == sorted(list(perm))
# get the perm when you want to transpose specified axis to the last dimension
def get_perm_when_transpose_axis_to_last_dim(num_axes: int, axis: int,) -> tuple:
axis = axis if axis >= 0 else axis + num_axes
assert 0 <= axis < num_axes, "axis out of range"
perm = [dim if dim < axis else dim + 1 for dim in range(num_axes - 1)]
perm.append(axis)
return tuple(perm)
# x == transpose(transpose(x, perm), get_inversed_perm(perm))
def get_inversed_perm(perm: Sequence[int],) -> tuple:
assert is_perm(perm)
inversed_perm = [-1] * len(perm)
for i in range(len(perm)):
inversed_perm[perm[i]] = i
return tuple(inversed_perm)
| 34.707317 | 81 | 0.730148 |
bedf8e929db12d88cc3efac45c3b428ee3fcffa7
| 1,088 |
py
|
Python
|
python/en/_matplotlib/gallery/lines_bars_and_markers/plotting_coherence_of_two_signals.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_matplotlib/gallery/lines_bars_and_markers/plotting_coherence_of_two_signals.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_matplotlib/gallery/lines_bars_and_markers/plotting_coherence_of_two_signals.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
lines_bars_and_markers/plotting_coherence_of_two_signals.py
Matplotlib > Gallery > Lines, bars and markers > Plotting the coherence of two signals
https://matplotlib.org/gallery/lines_bars_and_markers/cohere.html#sphx-glr-gallery-lines-bars-and-markers-cohere-py
"""
import numpy as np
import matplotlib.pyplot as plt
# Fixing random state for reproducibility
np.random.seed(19680801)
dt = 0.01 # 10ms, sampling interval
fs = 1 / dt # 100Hz, sampling frequency
t = np.arange(0, 30, dt)
nse1 = np.random.randn(len(t)) # white noise 1
nse2 = np.random.randn(len(t)) # white noise 2
# Two signals with a coherent part at 10Hz and a random part
s1 = np.sin(2 * np.pi * 10 * t) + nse1
s2 = np.sin(2 * np.pi * 10 * t) + nse2
# Plot
fig, axs = plt.subplots(2, 1)
axs[0].plot(t, s1, t, s2)
axs[0].set_xlim(0, 2)
axs[0].set_xlabel('time')
axs[0].set_ylabel('s1 and s2')
axs[0].grid(True)
cxy, f = axs[1].cohere(s1, s2, 256, 1. / dt)
axs[1].set_ylabel('coherence')
fig.tight_layout()
plt.show()
| 27.897436 | 115 | 0.672794 |
367126969a4a48f727aa1045f7e7edc7106073ed
| 7,925 |
py
|
Python
|
sdks/pysdk/pyevtsdk/base.py
|
Laighno/evt
|
90b94e831aebb62c6ad19ce59c9089e9f51cfd77
|
[
"MIT"
] | 1,411 |
2018-04-23T03:57:30.000Z
|
2022-02-13T10:34:22.000Z
|
sdks/pysdk/pyevtsdk/base.py
|
Zhang-Zexi/evt
|
e90fe4dbab4b9512d120c79f33ecc62791e088bd
|
[
"Apache-2.0"
] | 27 |
2018-06-11T10:34:42.000Z
|
2019-07-27T08:50:02.000Z
|
sdks/pysdk/pyevtsdk/base.py
|
Zhang-Zexi/evt
|
e90fe4dbab4b9512d120c79f33ecc62791e088bd
|
[
"Apache-2.0"
] | 364 |
2018-06-09T12:11:53.000Z
|
2020-12-15T03:26:48.000Z
|
import json
from pyevt import abi, address, ecc, libevt
libevt.init_lib()
# Type and Structures
class BaseType:
def __init__(self, **kwargs):
self.kwargs = kwargs
def dict(self):
return self.kwargs
def dumps(self):
return json.dumps(self.kwargs)
class User:
def __init__(self):
self.pub_key, self.priv_key = ecc.generate_new_pair()
@staticmethod
def from_string(pub, priv):
user = User()
user.pub_key = ecc.PublicKey.from_string(pub)
user.priv_key = ecc.PrivateKey.from_string(priv)
return user
class AuthorizerRef:
def __init__(self, _type, key):
self.key = key
self.type = _type
def value(self):
return '[%s] %s' % (self.type, self.key)
class Receiver(BaseType):
def __init__(self, _type, key):
self.key = key
self.type = _type
def value(self):
return '%d,S#%d' % (self.precision, self.id)
class Address:
def __init__(self, from_string=None):
if from_string == None:
self.addr = address.Address.reserved()
else:
self.addr = address.Address.from_string(from_string)
def set_public_key(self, pub_key):
self.addr = address.Address.public_key(pub_key)
return self.addr
def set_generated(self, prefix, key, nonce):
self.addr = address.Address.generated(prefix, key, nonce)
return self.addr
def get_type(self):
return self.addr.get_type()
def __str__(self):
return self.addr.to_string()
class SymbolArgsErrorException(Exception):
def __init__(self):
err = 'Symobl_Args_Error'
super().__init__(self, err)
class Symbol:
def __init__(self, sym_name, sym_id, precision=5):
if precision > 17 or precision < 0:
raise SymbolArgsErrorException
if len(sym_name) > 7 or (not sym_name.isupper()):
raise SymbolArgsErrorException
self.name = sym_name
self.id = sym_id
self.precision = precision
def value(self):
return '%d,S#%d' % (self.precision, self.id)
def new_asset(symbol):
def value(num):
fmt = '%%.%df S#%d' % (symbol.precision, symbol.id)
return fmt % (num)
return value
EvtSymbol = Symbol(sym_name='EVT', sym_id=1, precision=5)
EvtAsset = new_asset(EvtSymbol)
class AuthorizerWeight(BaseType):
def __init__(self, ref, weight):
super().__init__(ref=ref.value(), weight=weight)
class PermissionDef(BaseType):
# name: permission_name
# threshold: uint32
# authorizers: authorizer_weight[]
def __init__(self, name, threshold, authorizers=[]):
super().__init__(name=name, threshold=threshold,
authorizers=[auth.dict() for auth in authorizers])
def add_authorizer(self, auth, weight):
self.kwargs['authorizers'].append(
AuthorizerWeight(auth, weight).dict())
# Special Type: group
class Node(BaseType):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class NodeTypeException(Exception):
def __init__(self):
err = 'Group_Node_Type_Error'
super().__init__(self, err)
class NodeArgsExcetion(Exception):
def __init__(self):
err = 'Group_Node_Arguments_Error'
super().__init__(self, err)
def add_child(self, Node):
if 'key' in self.kwargs: # Leaf Node
raise NodeTypeException
if 'nodes' not in self.kwargs: # Error in parent node
raise NodeArgsException
self.kwargs['nodes'].append(Node.dict())
class RootNode(Node):
def __init__(self, threshold, nodes):
super().__init__(threshold=threshold,
nodes=[node.dict() for node in nodes])
class NonLeafNode(Node):
def __init__(self, threshold, weight, nodes):
super().__init__(threshold=threshold,
weight=weight,
nodes=[node.dict() for node in nodes])
class LeafNode(Node):
def __init__(self, key, weight):
super().__init__(key=key, weight=weight)
class Group(BaseType):
def __init__(self, name, key, root):
super().__init__(name=name, key=key, root=root)
# Abi jsons of Actions
class NewDomainAbi(BaseType):
def __init__(self, name, creator, issue, transfer, manage):
super().__init__(name=name,
creator=creator,
issue=issue.dict(),
transfer=transfer.dict(),
manage=manage.dict())
class UpdateDomainAbi(BaseType):
def __init__(self, name, issue, transfer, manage):
super().__init__(name=name,
issue=None if issue == None else issue.dict(),
transfer=None if transfer == None else transfer.dict(),
manage=None if manage == None else manage.dict())
class IssueTokenAbi(BaseType):
def __init__(self, domain, names, owner):
super().__init__(domain=domain,
names=names,
owner=owner)
class TransferAbi(BaseType):
def __init__(self, domain, name, to, memo):
super().__init__(domain=domain,
name=name,
to=to,
memo=memo)
class DestroyTokenAbi(BaseType):
def __init__(self, domain, name):
super().__init__(domain=domain,
name=name)
class NewGroupAbi(BaseType):
def __init__(self, name, group):
super().__init__(name=name,
group=group)
class UpdateGroupAbi(BaseType):
def __init__(self, name, group):
super().__init__(name=name,
group=group)
class AddMetaAbi(BaseType):
def __init__(self, key, value, creator):
super().__init__(key=key, value=value, creator=creator)
class NewFungibleAbi(BaseType):
def __init__(self, name, sym_name, sym, creator, issue, manage, total_supply):
super().__init__(name=name, sym_name=sym_name, sym=sym, creator=creator,
issue=issue.dict(), manage=manage.dict(), total_supply=total_supply)
class UpdFungibleAbi(BaseType):
def __init__(self, sym_id, issue, manage):
super().__init__(sym_id=sym_id,
issue=None if issue == None else issue.dict(),
manage=None if manage == None else manage.dict())
class IssueFungibleAbi(BaseType):
def __init__(self, address, number, memo):
super().__init__(address=address, number=number, memo=memo)
class TransferFtAbi(BaseType):
def __init__(self, _from, to, number, memo):
args = {'from': _from, 'to': to, 'number': number, 'memo': memo}
super().__init__(**args)
class EVT2PEVTAbi(BaseType):
def __init__(self, _from, to, number, memo):
args = {'from': _from, 'to': to, 'number': number, 'memo': memo}
super().__init__(**args)
class NewSuspendAbi(BaseType):
def __init__(self, name, proposer, trx):
super().__init__(name=name, proposer=proposer, trx=trx)
class AprvSuspendAbi(BaseType):
def __init__(self, name, signatures):
super().__init__(name=name, signatures=signatures)
class CancelSuspendAbi(BaseType):
def __init__(self, name):
super().__init__(name=name)
class ExecSuspendAbi(BaseType):
def __init__(self, name, executor):
super().__init__(name=name, executor=executor)
class EveripassAbi(BaseType):
def __init__(self, link):
super().__init__(link=link)
class EveripayAbi(BaseType):
def __init__(self,payee, number, link):
super().__init__(payee=payee, number=number, link=link)
class ProdvoteAbi(BaseType):
def __init__(self, producer, key, value):
super().__init__(producer=producer, key=key, value=value)
| 28.102837 | 93 | 0.614006 |
3d598b2423d1ed1482449026604af7fc10ae1f71
| 1,400 |
py
|
Python
|
task/bert_base.py
|
CcTtry/PipeSwitch
|
c6d632ee20b6dbbaea9a6fb95b9ea0ed4bbbf67e
|
[
"Apache-2.0"
] | null | null | null |
task/bert_base.py
|
CcTtry/PipeSwitch
|
c6d632ee20b6dbbaea9a6fb95b9ea0ed4bbbf67e
|
[
"Apache-2.0"
] | null | null | null |
task/bert_base.py
|
CcTtry/PipeSwitch
|
c6d632ee20b6dbbaea9a6fb95b9ea0ed4bbbf67e
|
[
"Apache-2.0"
] | null | null | null |
import torch
import task.common as util
from util.utils import timestamp
def import_model():
model = torch.hub.load('huggingface/pytorch-transformers:v2.5.0', 'model',
'bert-base-cased')
util.set_fullname(model, 'bert_base')
#hook for start up time
#need to output the result somewhere else
'''for name, layer in model.named_children():
layer.__name__ = name
layer.register_forward_hook(
lambda layer, _, output: timestamp('first_layer', 'forward_computed')
)
break #only the first layer'''
return model
def import_data(batch_size):
data_0 = torch.randint(5000, size=[batch_size, 251])
data_1 = torch.randint(low=0, high=2, size=[batch_size, 251])
data = torch.cat((data_0.view(-1), data_1.view(-1)))
target_0 = torch.rand(batch_size, 251, 768)
target_1 = torch.rand(batch_size, 768)
target = (target_0, target_1)
return data, target
def partition_model(model):
group_list = []
childs = list(model.children())
group_list.append([childs[0]])
for c in childs[1].children():
for sc in c.children():
for ssc in sc.children():
group_list.append([ssc])
for c in childs[2].children():
group_list.append([c])
assert len(childs) == 3
return group_list
| 29.166667 | 82 | 0.610714 |
1823597198c3561b7a515bdedceca880ca3144ad
| 571 |
py
|
Python
|
top/clearlight/base/runoob/print/percent_oper.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | 1 |
2020-01-16T09:23:43.000Z
|
2020-01-16T09:23:43.000Z
|
top/clearlight/base/runoob/print/percent_oper.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | null | null | null |
top/clearlight/base/runoob/print/percent_oper.py
|
ClearlightY/Python_learn
|
93b9b7efae5a1cf05faf8ee7c5e36dcc99c7a232
|
[
"Apache-2.0"
] | null | null | null |
'''
第一个 % 后面的内容为显示的格式说明,6 为显示宽度,3 为小数点位数,f 为浮点数类型
第二个 % 后面为显示的内容来源,输出结果右对齐,2.300 长度为 5,故前面有一空格
'''
print("%6.3f" % 2.3)
# 2.300
'''
x 为表示 16 进制,显示宽度为 10,前面有 8 个空格。
'''
print("%+10x" % 10)
# +a
'''
%s 字符串 (采用str()的显示)
%r 字符串 (采用repr()的显示)
%c 单个字符
%b 二进制整数
%d 十进制整数
%i 十进制整数
%o 八进制整数
%x 十六进制整数
%e 指数 (基底写为e)
%E 指数 (基底写为E)
%f 浮点数
%F 浮点数,与上相同%g 指数(e)或浮点数 (根据显示长度)
%G 指数(E)或浮点数 (根据显示长度)
%% 字符"%"
'''
print("%-5x" % -10)
# -a
pi = 3.1415
print("pi的值是%s" % pi)
print("pi的值是%.8f" % pi)
# pi的值是3.1415
# pi的值是3.14150000
| 14.641026 | 46 | 0.534151 |
62ca481b929979c6b203219b94afc72421886bd5
| 915 |
py
|
Python
|
rev/rev-optimizeme/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | 2 |
2021-08-09T17:08:12.000Z
|
2021-08-09T17:08:17.000Z
|
rev/rev-optimizeme/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
rev/rev-optimizeme/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | 1 |
2021-10-09T16:51:56.000Z
|
2021-10-09T16:51:56.000Z
|
from Crypto.Util.number import long_to_bytes
def optimizedlcg(b, p, state, rounds): # optimized LCG where a = 2
state = pow(2,rounds,p) * state + b * (pow(2,rounds,p) - 1)
return state
p = 48569718278242303506230209619975923780743060131772701186017298107559924284242635979759825505782161830376129653325924722665978898131985179210937191151223888128070809835310745199785794930057364029265229452480206055255713317889077410854999087437697415991657128058495524746236464800864688806069056279502078609163
a = 2
b = 3
rounds = p >> 3
state = 21168651249892365486136418125726362865327546384637160303310624426991883020070096339748008613139669972987435197171828010807801735363313790068442787546235698438618519181810304746021912309785611083417111013075787209062958230479598507954803956586021189590329762702913302100337042009595481240883581225904761204891
print(long_to_bytes(optimizedlcg(b,p,state,rounds) % p).decode())
| 70.384615 | 316 | 0.89071 |
c5081669b032f058e7973d4092f171de22b88ee5
| 1,566 |
py
|
Python
|
examples/Introduction/recompile_grammar.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | 2 |
2020-12-25T19:37:42.000Z
|
2021-03-26T04:59:12.000Z
|
examples/Introduction/recompile_grammar.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | 6 |
2018-08-07T22:48:52.000Z
|
2021-10-07T18:38:20.000Z
|
examples/Introduction/recompile_grammar.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""recompile_grammar.py - recompiles any .ebnf files in the current
directory if necessary
Author: Eckhart Arnold <[email protected]>
Copyright 2017 Bavarian Academy of Sciences and Humanities
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
sys.path.extend([os.path.join('..', '..'), '..', '.'])
flag = os.path.exists('LyrikParser.py')
from DHParser.dsl import recompile_grammar
if not recompile_grammar('Lyrik.ebnf', force=True):
with open('Lyrik_ebnf_ERRORS.txt') as f:
print(f.read())
sys.exit(1)
# Not needed if DHParser was installed on the system. Just a little
# service for those who have merely checked out the git repository,
# in particular for those reading the Tutorial in the Introduction
if not flag:
with open('LyrikParser.py', 'r') as f:
script = f.read()
i = script.find('import sys') + 10
script = script[:i] + "\nsys.path.extend([os.path.join('..', '..'), '..', '.'])\n" + script [i:]
with open('LyrikParser.py', 'w') as f:
f.write(script)
| 33.319149 | 100 | 0.698595 |
c522964a4aea77fb16681473a187333b6dc6dfdd
| 6,982 |
py
|
Python
|
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline_schema/distributed_compiled_unit.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline_schema/distributed_compiled_unit.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline_schema/distributed_compiled_unit.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from copy import deepcopy
from logging import getLogger
from typing import Any, List, Optional, Tuple, Union
from dask import config
from distributed import Client
from watchmen_auth import PrincipalService
from watchmen_data_kernel.meta import TopicService
from watchmen_data_kernel.storage import TopicTrigger
from watchmen_data_kernel.storage_bridge import PipelineVariables
from watchmen_data_kernel.topic_schema import TopicSchema
from watchmen_model.admin import Pipeline, PipelineStage, PipelineUnit, Topic, User
from watchmen_model.common import DataModel, TopicId
from watchmen_model.pipeline_kernel import MonitorLogStage, MonitorLogUnit
from watchmen_model.pipeline_kernel.pipeline_monitor_log import construct_unit
from watchmen_pipeline_kernel.common import ask_parallel_actions_count, ask_parallel_actions_dask_temp_dir, \
ask_parallel_actions_dask_use_process, PipelineKernelException
from watchmen_pipeline_kernel.pipeline_schema_interface import CreateQueuePipeline
from watchmen_pipeline_kernel.topic import RuntimeTopicStorages
from watchmen_utilities import ArrayHelper
from .compiled_single_unit import CompiledSingleUnit
logger = getLogger(__name__)
class DistributedUnitLoop:
pipeline: Pipeline
stage: PipelineStage
unit: PipelineUnit
principalService: PrincipalService
pipelineVariables: PipelineVariables
loopVariableName: str
loopVariableValues: List[Any]
def with_unit(self, pipeline: Pipeline, stage: PipelineStage, unit: PipelineUnit) -> DistributedUnitLoop:
self.pipeline = pipeline
self.stage = stage
self.unit = unit
return self
def with_principal_service(self, principal_service: PrincipalService) -> DistributedUnitLoop:
self.principalService = principal_service
return self
def with_pipeline_variables(self, pipeline_variables: PipelineVariables) -> DistributedUnitLoop:
self.pipelineVariables = pipeline_variables
return self
def with_loop_variable_values(
self, loop_variable_name: str, loop_variable_values: List[Any]) -> DistributedUnitLoop:
self.loopVariableName = loop_variable_name
self.loopVariableValues = loop_variable_values
return self
def distribute(self, stage_monitor_log: MonitorLogStage, new_pipeline: CreateQueuePipeline) -> bool:
result = distribute_unit_loop(self)
return ArrayHelper(result.items) \
.map(lambda x: handle_loop_item_result(x, stage_monitor_log, new_pipeline, self.principalService)) \
.every(lambda x: x)
class DistributedUnitLoopItemResult(DataModel):
log: MonitorLogUnit
triggered: List[Tuple[TopicId, TopicTrigger]]
success: bool
def __getstate__(self):
return self.to_dict()
def __setstate__(self, state):
for key, value in state.items():
self.__setattr__(key, value)
def __setattr__(self, name, value):
if name == 'log':
super().__setattr__(name, construct_unit(value))
if name == 'triggered':
super().__setattr__(name, construct_triggers(value))
else:
super().__setattr__(name, value)
def construct_triggers(triggered: Optional[list] = None) -> Optional[List[Tuple[TopicId, TopicTrigger]]]:
if triggered is None:
return None
else:
return ArrayHelper(triggered).map(lambda x: construct_trigger(x)).to_list()
def construct_trigger(
trigger: Optional[Tuple[TopicId, Union[dict, TopicTrigger]]] = None
) -> Optional[Tuple[TopicId, TopicTrigger]]:
if trigger is None:
return None
else:
if isinstance(trigger[1], dict):
topic_id = trigger[0]
topic_trigger = TopicTrigger(**trigger[1])
return topic_id, topic_trigger
else:
return trigger
class DistributedUnitLoopResult(DataModel):
items: List[DistributedUnitLoopItemResult]
def get_topic_service(principal_service: PrincipalService) -> TopicService:
return TopicService(principal_service)
# noinspection DuplicatedCode
def find_topic_schema(topic_id: TopicId, principal_service: PrincipalService) -> TopicSchema:
topic_service = get_topic_service(principal_service)
topic: Optional[Topic] = topic_service.find_by_id(topic_id)
if topic is None:
raise PipelineKernelException(f'Topic[id={topic_id}] not found.')
schema = topic_service.find_schema_by_name(topic.name, principal_service.get_tenant_id())
if schema is None:
raise PipelineKernelException(f'Topic schema[id={topic_id}] not found.')
return schema
def handle_loop_item_result(
result: DistributedUnitLoopItemResult,
stage_monitor_log: MonitorLogStage, new_pipeline: CreateQueuePipeline,
principal_service: PrincipalService
) -> bool:
stage_monitor_log.units.append(result.log)
ArrayHelper(result.triggered).each(lambda x: new_pipeline(find_topic_schema(x[0], principal_service), x[1]))
return result.success
class DaskClientHolder:
initialized: bool = False
client: Optional[Client] = None
def initialize(self) -> Client:
if not self.initialized:
config.set(temporary_directory=ask_parallel_actions_dask_temp_dir())
self.client = Client(
processes=ask_parallel_actions_dask_use_process(),
threads_per_worker=1,
n_workers=ask_parallel_actions_count(),
)
self.initialized = True
return self.client
def ask_client(self) -> Client:
return self.initialize()
dask_client_holder = DaskClientHolder()
def to_dask_args(loop: DistributedUnitLoop, variableValue: Any) -> List[Any]:
cloned = loop.pipelineVariables.clone()
cloned.put(loop.loopVariableName, deepcopy(variableValue))
return [
loop.pipeline,
loop.stage,
loop.unit,
User(
userId=loop.principalService.get_user_id(),
name=loop.principalService.get_user_name(),
tenantId=loop.principalService.get_tenant_id(),
role=loop.principalService.get_user_role()
),
cloned
]
def distribute_single_unit(
pipeline: Pipeline, stage: PipelineStage, unit: PipelineUnit,
user: User, pipeline_variables: PipelineVariables
) -> DistributedUnitLoopItemResult:
principal_service = PrincipalService(user)
compiled_unit = CompiledSingleUnit(
pipeline=pipeline, stage=stage, unit=unit, principal_service=principal_service)
stage_monitor_log = MonitorLogStage(units=[])
triggered: List[Tuple[TopicId, TopicTrigger]] = []
def new_pipeline(schema: TopicSchema, trigger: TopicTrigger) -> None:
triggered.append((schema.get_topic().topicId, trigger))
success = compiled_unit.run(
variables=pipeline_variables,
new_pipeline=new_pipeline, stage_monitor_log=stage_monitor_log,
storages=RuntimeTopicStorages(principal_service), principal_service=principal_service
)
return DistributedUnitLoopItemResult(log=stage_monitor_log.units[0], triggered=triggered, success=success)
def distribute_unit_loop(loop: DistributedUnitLoop) -> DistributedUnitLoopResult:
dask_client = dask_client_holder.ask_client()
futures = ArrayHelper(loop.loopVariableValues) \
.map(lambda variableValue: to_dask_args(loop, variableValue)) \
.map(lambda x: dask_client.submit(distribute_single_unit, *x, pure=False)) \
.to_list()
results = dask_client.gather(futures)
return DistributedUnitLoopResult(items=results)
| 33.729469 | 109 | 0.804354 |
770479aa701daa02e80467347cebb4bb227afc99
| 764 |
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 98/98.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101 |
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 98/98.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 4 |
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 98/98.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 38 |
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
#Create a program that asks the user to submit text through a GUI
from tkinter import *
window = Tk()
file = open("user_gui.txt", "a+")
def add():
file.write(user_value.get() + "\n")
entry.delete(0, END)
def save():
global file
file.close()
file = open("user_gui.txt", "a+")
def close():
file.close
window.destroy()
user_value = StringVar()
entry = Entry(window, textvariable=user_value)
entry.grid(row=0, column=0)
button_add = Button(window, text="Add line", command=add)
button_add.grid(row=0, column=1)
button_save = Button(window, text="Save changes", command=save)
button_save.grid(row=0, column=2)
button_close = Button(window, text="Save and Close", command=close)
button_close.grid(row=0,column=3)
window.mainloop()
| 21.222222 | 67 | 0.693717 |
774a6b50cee68a0aa0d378c50037c731e744d729
| 154 |
py
|
Python
|
Kirk Byers - Python for Network Engineers/Lesson 1/Exercise 1.py
|
Caradawg/HBS
|
467b73c52bbba7b43f37a62e4302a4ff84ff0684
|
[
"MIT"
] | null | null | null |
Kirk Byers - Python for Network Engineers/Lesson 1/Exercise 1.py
|
Caradawg/HBS
|
467b73c52bbba7b43f37a62e4302a4ff84ff0684
|
[
"MIT"
] | null | null | null |
Kirk Byers - Python for Network Engineers/Lesson 1/Exercise 1.py
|
Caradawg/HBS
|
467b73c52bbba7b43f37a62e4302a4ff84ff0684
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
ip_addr1 = '192.168.16.1'
ip_addr2 = '10.10.1.1'
ip_addr3 = '172.16.31.17'
print(ip_addr1,ip_addr2,ip_addr3)
| 22 | 38 | 0.720779 |
b27b9e614f3fb3036e8d4587d028456d7675dc31
| 1,430 |
py
|
Python
|
seq2.py
|
aertoria/MiscCode
|
a2e94d0fe0890e6620972f84adcb7976ca9f1408
|
[
"Apache-2.0"
] | null | null | null |
seq2.py
|
aertoria/MiscCode
|
a2e94d0fe0890e6620972f84adcb7976ca9f1408
|
[
"Apache-2.0"
] | null | null | null |
seq2.py
|
aertoria/MiscCode
|
a2e94d0fe0890e6620972f84adcb7976ca9f1408
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
'''
Given a string S and a string T, count the number of distinct subsequences of T in S.
A subsequence of a string is a new string which is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (ie, "ACE" is a subsequence of "ABCDE" while "AEC" is not).
Here is an example:
S = "rabbbit", T = "rabbit"
0 1 2 3 4 5 6
r a b b b i t
r a b b i t
r a b b i t
r a b b i t
Return 3.
'''
class Solution:
# @param {string} s
# @param {string} t
# @return {integer}
result=0
def numDistinct(self, s, t):
list_s=' '.join(s).split(' ')#r a b b b i t
list_t=' '.join(t).split(' ')#r a b b i t
self.rec_search(0,0,list_s,list_t)
print self.result
return self.result
def rec_search(self,index_t,index_s,list_s,list_t):
while index_s < len(list_s):
target=list_t[index_t]
if list_s[index_s] == target:
#print index_s,list_t[index_t]
if index_t+1<len(list_t):
self.rec_search(index_t+1,index_s+1,list_s,list_t)
else:
#print 'found finally a match'
self.result=self.result+1
index_s = index_s+1
return 1
S = "BABCDABCA"
T = "ABC"
S = "rabbbit"
T = "rabbit"
S="aabdbaabeeadcbbdedacbbeecbabebaeeecaeabaedadcbdbcdaabebdadbbaeabdadeaabbabbecebbebcaddaacccebeaeedababedeacdeaaaeeaecbe"
T="bddabdcae"
solution = Solution()
solution.numDistinct(S,T)
| 23.833333 | 262 | 0.691608 |
a73f3a6d589e37cbfa96660cb3a174536d13635f
| 1,819 |
py
|
Python
|
ImagePyramid.py
|
saikat-d/DIP2018-Midterm
|
06c3a74ae4fefc2ec6d47d970bf86ab8062df1bf
|
[
"BSD-2-Clause"
] | null | null | null |
ImagePyramid.py
|
saikat-d/DIP2018-Midterm
|
06c3a74ae4fefc2ec6d47d970bf86ab8062df1bf
|
[
"BSD-2-Clause"
] | null | null | null |
ImagePyramid.py
|
saikat-d/DIP2018-Midterm
|
06c3a74ae4fefc2ec6d47d970bf86ab8062df1bf
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 20:07:42 2018
@author: Saikat
"""
import numpy as np
import cv2
from cv2 import ml
from matplotlib import pyplot as plt
from sklearn.metrics import roc_curve, auc
from os.path import isfile,join
DB_path = 'images\\Pyramid\\'
RES_path = 'images\\Pyramid\\res\\'
BOW_path = 'images\\SVM\\bow\\'
siftdata = []
datalabel = []
for i in range(2,5):
dbimname = str(i) + '.jpg'
#testimname = str(i) + '.jpg'
dbimgpath = join(DB_path,dbimname)
#testimgpath = join(TEST_path,testimname)
if isfile(dbimgpath) :
img2 = cv2.imread(dbimgpath,cv2.IMREAD_COLOR) # queryImage
#img1 = cv2.imread(testimgpath,cv2.IMREAD_COLOR) # trainImage
#cv2.cvtColor(img1,cv2.COLOR_BGR2RGB,img1)
#cv2.cvtColor(img2,cv2.COLOR_BGR2RGB,img2)
else :
print("Worng Path : " + dbimgpath)
imgg = img2
imgl = img2
for j in range(0,5):
imgg = cv2.GaussianBlur(imgg,(5,5),0)
imggt = np.uint8(imgg)
ret2,th2 = cv2.threshold(imggt[:,:,0],0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imwrite(join(RES_path,"gaussian\\"+str(i)+"_"+str(j)+"_t.jpg"),th2)
cv2.imwrite(join(RES_path,"gaussian\\"+str(i)+"_"+str(j)+".jpg"),imgg)
imgl = cv2.Laplacian(imgl,cv2.CV_32F,ksize=5)
cv2.imwrite(join(RES_path,"laplacian\\"+str(i)+"_"+str(j)+".jpg"),imgl)
imgl_ = np.array(imgl)
imgl_ *= 255.0/imgl_.max()
#cv2.imwrite(join(RES_path,"laplacian\\"+str(i)+"_"+str(j)+"_c.jpg"),imgl_)
imglt = np.uint8(imgl_)
ret1,th1 = cv2.threshold(imglt[:,:,1],0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imwrite(join(RES_path,"laplacian\\"+str(i)+"_"+str(j)+"_t.jpg"),th1)
| 30.830508 | 86 | 0.59978 |
a7d5d185324a1cc56c12cf84274cd09c06b3df22
| 1,153 |
py
|
Python
|
chapter6/trainninganalysis.py
|
yangzhijiang/GeektimeTensorflow
|
80479426a216d1d27fc78e53c581008ccec46cbe
|
[
"MIT"
] | 1 |
2020-02-16T13:31:42.000Z
|
2020-02-16T13:31:42.000Z
|
chapter6/trainninganalysis.py
|
yangzhijiang/GeektimeTensorflow
|
80479426a216d1d27fc78e53c581008ccec46cbe
|
[
"MIT"
] | null | null | null |
chapter6/trainninganalysis.py
|
yangzhijiang/GeektimeTensorflow
|
80479426a216d1d27fc78e53c581008ccec46cbe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2019/3/13 21:49
# @Author : LunaFire
# @Email : [email protected]
# @File : trainninganalysis.py
import glob
import pickle
import matplotlib.pyplot as plt
HISTORY_DIR = '../history/captcha/' # 训练记录文件路径
def plot_training(history=None, metric='acc', title='Model Accuracy', loc='lower right'):
model_list = []
fig = plt.figure(figsize=(10, 8))
for key, val in history.items():
model_list.append(key.split('\\')[-1].rstrip('.history'))
plt.plot(val[metric])
plt.title(title)
plt.ylabel(metric)
plt.xlabel('epoch')
plt.legend(model_list, loc=loc)
plt.show()
if __name__ == '__main__':
history = {}
for filename in glob.glob(HISTORY_DIR + '*.history'):
with open(filename, 'rb') as f:
print(filename)
history[filename] = pickle.load(f)
plot_training(history)
plot_training(history, metric='loss', title='Model Loss', loc='upper right')
plot_training(history, metric='val_acc', title='Model Accuracy(val)')
plot_training(history, metric='val_loss', title='Model Loss(val)', loc='upper right')
| 28.825 | 89 | 0.640937 |
38fe6f07d73718a136466c655babdda74ad2401f
| 90,776 |
py
|
Python
|
_Dist/NeuralNetworks/DistBase.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 1,107 |
2016-09-21T02:18:36.000Z
|
2022-03-29T02:52:12.000Z
|
_Dist/NeuralNetworks/DistBase.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 18 |
2016-12-22T10:24:47.000Z
|
2022-03-11T23:18:43.000Z
|
_Dist/NeuralNetworks/DistBase.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 776 |
2016-12-21T12:08:08.000Z
|
2022-03-21T06:12:08.000Z
|
import os
import sys
root_path = os.path.abspath("../../")
if root_path not in sys.path:
sys.path.append(root_path)
import time
import math
import random
import pickle
import shutil
import logging
import itertools
import collections
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
tf.logging.set_verbosity(tf.logging.FATAL)
from copy import deepcopy
from mpl_toolkits.mplot3d import Axes3D
from Util.ProgressBar import ProgressBar
from _Dist.NeuralNetworks.NNUtil import *
from _Dist.NeuralNetworks.Base import Generator
class DataCacheMixin:
@property
def data_folder(self):
return self.data_info.get("data_folder", "_Data")
@property
def data_cache_folder(self):
folder = os.path.join(self.data_folder, "_Cache", self._name)
if not os.path.isdir(folder):
os.makedirs(folder)
return folder
@property
def data_info_folder(self):
folder = os.path.join(self.data_folder, "_DataInfo")
if not os.path.isdir(folder):
os.makedirs(folder)
return folder
@property
def data_info_file(self):
return os.path.join(self.data_info_folder, "{}.info".format(self._name))
@property
def train_data_file(self):
return os.path.join(self.data_cache_folder, "train.npy")
@property
def test_data_file(self):
return os.path.join(self.data_cache_folder, "test.npy")
class LoggingMixin:
logger = logging.getLogger("")
initialized_log_file = set()
@property
def logging_folder_name(self):
folder = os.path.join(os.getcwd(), "_Tmp", "_Logging", self.name)
if not os.path.isdir(folder):
os.makedirs(folder)
return folder
def _init_logging(self):
if self.loggers is not None:
return
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter("%(name)20s - %(levelname)8s - %(message)s")
console.setFormatter(formatter)
root_logger = logging.getLogger("")
root_logger.handlers.clear()
root_logger.setLevel(logging.DEBUG)
root_logger.addHandler(console)
self.loggers = {}
def get_logger(self, name, file):
if name in self.loggers:
return self.loggers[name]
folder = self.logging_folder_name
log_file = os.path.join(folder, file)
if log_file not in self.initialized_log_file:
with open(log_file, "w"):
pass
self.initialized_log_file.add(log_file)
log_file = logging.FileHandler(log_file, "a")
log_file.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(name)20s - %(levelname)8s - %(message)s",
"%Y-%m-%d %H:%M:%S"
)
log_file.setFormatter(formatter)
logger = logging.getLogger(name)
logger.addHandler(log_file)
self.loggers[name] = logger
return logger
def log_msg(self, msg, level=logging.DEBUG, logger=None):
logger = self.logger if logger is None else logger
print(msg) if logger is print else logger.log(level, msg)
def log_block_msg(self, title="Done", header="Result", body="", level=logging.DEBUG, logger=None):
msg = title + "\n" + "\n".join(["=" * 100, header, "-" * 100])
if body:
msg += "\n{}\n".format(body) + "-" * 100
self.log_msg(msg, level, logger)
class Base(LoggingMixin):
signature = "Base"
def __init__(self, name=None, model_param_settings=None, model_structure_settings=None):
self.log = {}
self._name = name
self._name_appendix = ""
self._settings_initialized = False
self._generator_base = Generator
self._train_generator = self._test_generator = None
self._sample_weights = self._tf_sample_weights = None
self.n_dim = self.n_class = None
self.n_random_train_subset = self.n_random_test_subset = None
if model_param_settings is None:
self.model_param_settings = {}
else:
assert_msg = "model_param_settings should be a dictionary"
assert isinstance(model_param_settings, dict), assert_msg
self.model_param_settings = model_param_settings
self.lr = None
self._loss = self._loss_name = self._metric_name = None
self._optimizer_name = self._optimizer = None
self.n_epoch = self.max_epoch = self.n_iter = self.batch_size = None
if model_structure_settings is None:
self.model_structure_settings = {}
else:
assert_msg = "model_structure_settings should be a dictionary"
assert isinstance(model_structure_settings, dict), assert_msg
self.model_structure_settings = model_structure_settings
self._model_built = False
self.py_collections = self.tf_collections = None
self._define_py_collections()
self._define_tf_collections()
self._ws, self._bs = [], []
self._is_training = None
self._loss = self._train_step = None
self._tfx = self._tfy = self._output = self._prob_output = None
self._sess = None
self._graph = tf.Graph()
self._sess_config = self.model_param_settings.pop("sess_config", None)
self.loggers = None
self._init_logging()
def __str__(self):
return self.model_saving_name
__repr__ = __str__
@property
def name(self):
return "Base" if self._name is None else self._name
@property
def metric(self):
return getattr(Metrics, self._metric_name)
@property
def model_saving_name(self):
return "{}_{}".format(self.name, self._name_appendix)
@property
def model_saving_path(self):
return os.path.join(os.getcwd(), "_Models", self.model_saving_name)
# Settings
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
self._sample_weights = sample_weights
if self._sample_weights is None:
self._tf_sample_weights = None
else:
self._tf_sample_weights = tf.placeholder(tf.float32, name="sample_weights")
self._train_generator = self._generator_base(x, y, "TrainGenerator", self._sample_weights, self.n_class)
if x_test is not None and y_test is not None:
self._test_generator = self._generator_base(x_test, y_test, "TestGenerator", n_class=self.n_class)
else:
self._test_generator = None
self.n_random_train_subset = int(len(self._train_generator) * 0.1)
if self._test_generator is None:
self.n_random_test_subset = -1
else:
self.n_random_test_subset = len(self._test_generator)
self.n_dim = self._train_generator.shape[-1]
self.n_class = self._train_generator.n_class
batch_size = self.model_param_settings.setdefault("batch_size", 128)
self.model_param_settings["batch_size"] = min(batch_size, len(self._train_generator))
n_iter = self.model_param_settings.setdefault("n_iter", -1)
if n_iter < 0:
self.model_param_settings["n_iter"] = int(len(self._train_generator) / batch_size)
def init_all_settings(self):
self.init_model_param_settings()
self.init_model_structure_settings()
def init_model_param_settings(self):
loss = self.model_param_settings.get("loss", None)
if loss is None:
self._loss_name = "correlation" if self.n_class == 1 else "cross_entropy"
else:
self._loss_name = loss
metric = self.model_param_settings.get("metric", None)
if metric is None:
if self.n_class == 1:
self._metric_name = "correlation"
elif self.n_class == 2:
self._metric_name = "auc"
else:
self._metric_name = "multi_auc"
else:
self._metric_name = metric
self.n_epoch = self.model_param_settings.get("n_epoch", 32)
self.max_epoch = self.model_param_settings.get("max_epoch", 256)
self.max_epoch = max(self.max_epoch, self.n_epoch)
self.batch_size = self.model_param_settings["batch_size"]
self.n_iter = self.model_param_settings["n_iter"]
self._optimizer_name = self.model_param_settings.get("optimizer", "Adam")
self.lr = self.model_param_settings.get("lr", 1e-3)
self._optimizer = getattr(tf.train, "{}Optimizer".format(self._optimizer_name))(self.lr)
def init_model_structure_settings(self):
pass
# Core
def _fully_connected_linear(self, net, shape, appendix):
with tf.name_scope("Linear{}".format(appendix)):
w = init_w(shape, "W{}".format(appendix))
b = init_b([shape[1]], "b{}".format(appendix))
self._ws.append(w)
self._bs.append(b)
return tf.add(tf.matmul(net, w), b, name="Linear{}_Output".format(appendix))
def _build_model(self, net=None):
pass
def _gen_batch(self, generator, n_batch, gen_random_subset=False, one_hot=False):
if gen_random_subset:
data, weights = generator.gen_random_subset(n_batch)
else:
data, weights = generator.gen_batch(n_batch)
x, y = data[..., :-1], data[..., -1]
if not one_hot:
return x, y, weights
if self.n_class == 1:
y = y.reshape([-1, 1])
else:
y = Toolbox.get_one_hot(y, self.n_class)
return x, y, weights
def _get_feed_dict(self, x, y=None, weights=None, is_training=False):
feed_dict = {self._tfx: x, self._is_training: is_training}
if y is not None:
feed_dict[self._tfy] = y
if self._tf_sample_weights is not None:
if weights is None:
weights = np.ones(len(x))
feed_dict[self._tf_sample_weights] = weights
return feed_dict
def _define_loss_and_train_step(self):
self._loss = getattr(Losses, self._loss_name)(self._tfy, self._output, False, self._tf_sample_weights)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self._train_step = self._optimizer.minimize(self._loss)
def _initialize_session(self):
self._sess = tf.Session(graph=self._graph, config=self._sess_config)
def _initialize_variables(self):
self._sess.run(tf.global_variables_initializer())
def _snapshot(self, i_epoch, i_iter, snapshot_cursor):
x_train, y_train, sw_train = self._gen_batch(
self._train_generator, self.n_random_train_subset,
gen_random_subset=True
)
if self._test_generator is not None:
x_test, y_test, sw_test = self._gen_batch(
self._test_generator, self.n_random_test_subset,
gen_random_subset=True
)
if self.n_class == 1:
y_test = y_test.reshape([-1, 1])
else:
y_test = Toolbox.get_one_hot(y_test, self.n_class)
else:
x_test = y_test = sw_test = None
y_train_pred = self._predict(x_train)
if x_test is not None:
tensor = self._output if self.n_class == 1 else self._prob_output
y_test_pred, test_snapshot_loss = self._calculate(
x_test, y_test, sw_test,
[tensor, self._loss], is_training=False
)
y_test_pred, test_snapshot_loss = y_test_pred[0], test_snapshot_loss[0]
else:
y_test_pred = test_snapshot_loss = None
train_metric = self.metric(y_train, y_train_pred)
if y_test is not None and y_test_pred is not None:
test_metric = self.metric(y_test, y_test_pred)
if i_epoch >= 0 and i_iter >= 0 and snapshot_cursor >= 0:
self.log["test_snapshot_loss"].append(test_snapshot_loss)
self.log["test_{}".format(self._metric_name)].append(test_metric)
self.log["train_{}".format(self._metric_name)].append(train_metric)
else:
test_metric = None
msg = (
"Epoch {:6} Iter {:8} Snapshot {:6} ({}) - "
"Train : {:8.6f} Test : {}".format(
i_epoch, i_iter, snapshot_cursor, self._metric_name, train_metric,
"None" if test_metric is None else "{:8.6f}".format(test_metric)
)
)
logger = self.get_logger("_snapshot", "general.log")
self.log_msg(msg, logger=logger)
return train_metric, test_metric
def _calculate(self, x, y=None, weights=None, tensor=None, n_elem=1e7, is_training=False):
n_batch = int(n_elem / x.shape[1])
n_repeat = int(len(x) / n_batch)
if n_repeat * n_batch < len(x):
n_repeat += 1
cursors = [0]
if tensor is None:
target = self._prob_output
elif isinstance(tensor, list):
target = []
for t in tensor:
if isinstance(t, str):
t = getattr(self, t)
if isinstance(t, list):
target += t
cursors.append(len(t))
else:
target.append(t)
cursors.append(cursors[-1] + 1)
else:
target = getattr(self, tensor) if isinstance(tensor, str) else tensor
results = [self._sess.run(
target, self._get_feed_dict(
x[i * n_batch:(i + 1) * n_batch],
None if y is None else y[i * n_batch:(i + 1) * n_batch],
None if weights is None else weights[i * n_batch:(i + 1) * n_batch],
is_training=is_training
)
) for i in range(n_repeat)]
if not isinstance(target, list):
if len(results) == 1:
return results[0]
return np.vstack(results)
if n_repeat > 1:
results = [
np.vstack([result[i] for result in results]) if target[i].shape.ndims else
np.mean([result[i] for result in results]) for i in range(len(target))
]
else:
results = results[0]
if len(cursors) == 1:
return results
return [results[cursor:cursors[i + 1]] for i, cursor in enumerate(cursors[:-1])]
def _predict(self, x):
tensor = self._output if self.n_class == 1 else self._prob_output
output = self._calculate(x, tensor=tensor, is_training=False)
if self.n_class == 1:
return output.ravel()
return output
def _evaluate(self, x=None, y=None, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None):
if isinstance(metric, str):
metric_name, metric = metric, getattr(Metrics, metric)
else:
metric_name, metric = self._metric_name, self.metric
pred = self._predict(x) if x is not None else None
cv_pred = self._predict(x_cv) if x_cv is not None else None
test_pred = self._predict(x_test) if x_test is not None else None
train_metric = None if y is None else metric(y, pred)
cv_metric = None if y_cv is None else metric(y_cv, cv_pred)
test_metric = None if y_test is None else metric(y_test, test_pred)
self._print_metrics(metric_name, train_metric, cv_metric, test_metric)
return train_metric, cv_metric, test_metric
@staticmethod
def _print_metrics(metric_name, train_metric=None, cv_metric=None, test_metric=None, only_return=False):
msg = "{} - Train : {} CV : {} Test : {}".format(
metric_name,
"None" if train_metric is None else "{:10.8f}".format(train_metric),
"None" if cv_metric is None else "{:10.8f}".format(cv_metric),
"None" if test_metric is None else "{:10.8f}".format(test_metric)
)
return msg if only_return else print(msg)
def _define_input_and_placeholder(self):
self._is_training = tf.placeholder(tf.bool, name="is_training")
self._tfx = tf.placeholder(tf.float32, [None, self.n_dim], name="X")
self._tfy = tf.placeholder(tf.float32, [None, self.n_class], name="Y")
def _define_py_collections(self):
self.py_collections = [
"_name", "n_class",
"model_param_settings", "model_structure_settings"
]
def _define_tf_collections(self):
self.tf_collections = [
"_tfx", "_tfy", "_output", "_prob_output",
"_loss", "_train_step", "_is_training"
]
# Save & Load
def add_tf_collections(self):
for tensor in self.tf_collections:
target = getattr(self, tensor)
if target is not None:
tf.add_to_collection(tensor, target)
def clear_tf_collections(self):
for key in self.tf_collections:
tf.get_collection_ref(key).clear()
def save_collections(self, folder):
with open(os.path.join(folder, "py.core"), "wb") as file:
param_dict = {name: getattr(self, name) for name in self.py_collections}
pickle.dump(param_dict, file)
self.add_tf_collections()
def restore_collections(self, folder):
with open(os.path.join(folder, "py.core"), "rb") as file:
param_dict = pickle.load(file)
for name, value in param_dict.items():
setattr(self, name, value)
for tensor in self.tf_collections:
target = tf.get_collection(tensor)
if target is None:
continue
assert len(target) == 1, "{} available '{}' found".format(len(target), tensor)
setattr(self, tensor, target[0])
self.clear_tf_collections()
@staticmethod
def get_model_name(path, idx):
targets = os.listdir(path)
if idx is None:
idx = max([int(target) for target in targets if target.isnumeric()])
return os.path.join(path, "{:06}".format(idx))
def save(self, run_id=0, path=None):
if path is None:
path = self.model_saving_path
folder = os.path.join(path, "{:06}".format(run_id))
while os.path.isdir(folder):
run_id += 1
folder = os.path.join(path, "{:06}".format(run_id))
if not os.path.isdir(folder):
os.makedirs(folder)
logger = self.get_logger("save", "general.log")
self.log_msg("Saving model", logger=logger)
with self._graph.as_default():
saver = tf.train.Saver()
self.save_collections(folder)
saver.save(self._sess, os.path.join(folder, "Model"))
self.log_msg("Model saved to " + folder, logger=logger)
return self
def load(self, run_id=None, clear_devices=False, path=None):
self._model_built = True
if path is None:
path = self.model_saving_path
folder = self.get_model_name(path, run_id)
path = os.path.join(folder, "Model")
logger = self.get_logger("save", "general.log")
self.log_msg("Restoring model", logger=logger)
with self._graph.as_default():
if self._sess is None:
self._initialize_session()
saver = tf.train.import_meta_graph("{}.meta".format(path), clear_devices)
saver.restore(self._sess, tf.train.latest_checkpoint(folder))
self.restore_collections(folder)
self.init_all_settings()
self.log_msg("Model restored from " + folder, logger=logger)
return self
def save_checkpoint(self, folder):
if not os.path.exists(folder):
os.makedirs(folder)
with self._graph.as_default():
tf.train.Saver().save(self._sess, os.path.join(folder, "Model"))
def restore_checkpoint(self, folder):
with self._graph.as_default():
tf.train.Saver().restore(self._sess, os.path.join(folder, "Model"))
# API
def print_settings(self, only_return=False):
pass
def fit(self, x, y, x_test=None, y_test=None, sample_weights=None, names=("train", "test"),
timeit=True, time_limit=-1, snapshot_ratio=3, print_settings=True, verbose=1):
t = time.time()
self.init_from_data(x, y, x_test, y_test, sample_weights, names)
if not self._settings_initialized:
self.init_all_settings()
self._settings_initialized = True
if not self._model_built:
with self._graph.as_default():
self._initialize_session()
with tf.name_scope("Input"):
self._define_input_and_placeholder()
with tf.name_scope("Model"):
self._build_model()
self._prob_output = tf.nn.softmax(self._output, name="Prob_Output")
with tf.name_scope("LossAndTrainStep"):
self._define_loss_and_train_step()
with tf.name_scope("InitializeVariables"):
self._initialize_variables()
i_epoch = i_iter = j = snapshot_cursor = 0
if snapshot_ratio == 0 or x_test is None or y_test is None:
use_monitor = False
snapshot_step = self.n_iter
else:
use_monitor = True
snapshot_ratio = min(snapshot_ratio, self.n_iter)
snapshot_step = int(self.n_iter / snapshot_ratio)
logger = self.get_logger("fit", "general.log")
terminate = False
over_fitting_flag = 0
n_epoch = self.n_epoch
tmp_checkpoint_folder = os.path.join(self.model_saving_path, "tmp")
if time_limit > 0:
time_limit -= time.time() - t
if time_limit <= 0:
self.log_msg(
"Time limit exceeded before training process started",
level=logging.INFO, logger=logger
)
return self
monitor = TrainMonitor(Metrics.sign_dict[self._metric_name], snapshot_ratio)
if verbose >= 2:
prepare_tensorboard_verbose(self._sess)
if print_settings:
self.print_settings()
self.log["iter_loss"] = []
self.log["epoch_loss"] = []
self.log["test_snapshot_loss"] = []
self.log["train_{}".format(self._metric_name)] = []
self.log["test_{}".format(self._metric_name)] = []
self._snapshot(0, 0, 0)
bar = ProgressBar(max_value=n_epoch, name="Epoch")
while i_epoch < n_epoch:
i_epoch += 1
epoch_loss = 0
for j in range(self.n_iter):
i_iter += 1
x_batch, y_batch, sw_batch = self._gen_batch(self._train_generator, self.batch_size, one_hot=True)
iter_loss = self._sess.run(
[self._loss, self._train_step],
self._get_feed_dict(x_batch, y_batch, sw_batch, is_training=True)
)[0]
self.log["iter_loss"].append(iter_loss)
epoch_loss += iter_loss
if i_iter % snapshot_step == 0 and verbose >= 1:
snapshot_cursor += 1
train_metric, test_metric = self._snapshot(i_epoch, i_iter, snapshot_cursor)
if use_monitor:
check_rs = monitor.check(test_metric)
over_fitting_flag = monitor.over_fitting_flag
if check_rs["terminate"]:
n_epoch = i_epoch
self.log_msg("Early stopped at n_epoch={} due to '{}'".format(
n_epoch, check_rs["info"]
), level=logging.INFO, logger=logger)
terminate = True
break
if check_rs["save_checkpoint"]:
self.log_msg(check_rs["info"], logger=logger)
self.save_checkpoint(tmp_checkpoint_folder)
if 0 < time_limit <= time.time() - t:
self.log_msg(
"Early stopped at n_epoch={} due to 'Time limit exceeded'".format(i_epoch),
level=logging.INFO, logger=logger
)
terminate = True
break
self.log["epoch_loss"].append(epoch_loss / (j + 1))
if use_monitor:
if i_epoch == n_epoch and i_epoch < self.max_epoch and not monitor.info["terminate"]:
monitor.flat_flag = True
monitor.punish_extension()
n_epoch = min(n_epoch + monitor.extension, self.max_epoch)
self.log_msg("Extending n_epoch to {}".format(n_epoch), logger=logger)
bar.set_max(n_epoch)
if i_epoch == self.max_epoch:
terminate = True
if not monitor.info["terminate"]:
if not over_fitting_flag:
self.log_msg(
"Model seems to be under-fitting but max_epoch reached. "
"Increasing max_epoch may improve performance",
level=logging.INFO, logger=logger
)
else:
self.log_msg("max_epoch reached", level=logging.INFO, logger=logger)
elif i_epoch == n_epoch:
terminate = True
if terminate:
bar.terminate()
if os.path.exists(tmp_checkpoint_folder):
self.log_msg("Rolling back to the best checkpoint", logger=logger)
self.restore_checkpoint(tmp_checkpoint_folder)
shutil.rmtree(tmp_checkpoint_folder)
break
bar.update()
self._snapshot(-1, -1, -1)
if timeit:
self.log_msg("Time Cost: {}".format(time.time() - t), level=logging.INFO, logger=logger)
return self
def predict(self, x):
return self._predict(x)
def predict_classes(self, x):
if self.n_class == 1:
raise ValueError("Predicting classes is not permitted in regression problem")
return self._predict(x).argmax(1).astype(np.int32)
def evaluate(self, x, y, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None):
return self._evaluate(x, y, x_cv, y_cv, x_test, y_test, metric)
# Visualization
def draw_losses(self):
el, il = self.log["epoch_loss"], self.log["iter_loss"]
ee_base = np.arange(len(el))
ie_base = np.linspace(0, len(el) - 1, len(il))
plt.figure()
plt.plot(ie_base, il, label="Iter loss")
plt.plot(ee_base, el, linewidth=3, label="Epoch loss")
plt.legend()
plt.show()
return self
def scatter2d(self, x, y, padding=0.5, title=None):
axis, labels = np.asarray(x).T, np.asarray(y)
print("=" * 30 + "\n" + str(self))
x_min, x_max = np.min(axis[0]), np.max(axis[0])
y_min, y_max = np.min(axis[1]), np.max(axis[1])
x_padding = max(abs(x_min), abs(x_max)) * padding
y_padding = max(abs(y_min), abs(y_max)) * padding
x_min -= x_padding
x_max += x_padding
y_min -= y_padding
y_max += y_padding
if labels.ndim == 1:
plot_label_dict = {c: i for i, c in enumerate(set(labels))}
n_label = len(plot_label_dict)
labels = np.array([plot_label_dict[label] for label in labels])
else:
n_label = labels.shape[1]
labels = np.argmax(labels, axis=1)
colors = plt.cm.rainbow([i / n_label for i in range(n_label)])[labels]
if title is None:
title = self.model_saving_name
indices = [labels == i for i in range(np.max(labels) + 1)]
scatters = []
plt.figure()
plt.title(title)
for idx in indices:
scatters.append(plt.scatter(axis[0][idx], axis[1][idx], c=colors[idx]))
plt.legend(scatters, ["$c_{}$".format("{" + str(i) + "}") for i in range(len(scatters))],
ncol=math.ceil(math.sqrt(len(scatters))), fontsize=8)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.show()
return self
def scatter3d(self, x, y, padding=0.1, title=None):
axis, labels = np.asarray(x).T, np.asarray(y)
print("=" * 30 + "\n" + str(self))
x_min, x_max = np.min(axis[0]), np.max(axis[0])
y_min, y_max = np.min(axis[1]), np.max(axis[1])
z_min, z_max = np.min(axis[2]), np.max(axis[2])
x_padding = max(abs(x_min), abs(x_max)) * padding
y_padding = max(abs(y_min), abs(y_max)) * padding
z_padding = max(abs(z_min), abs(z_max)) * padding
x_min -= x_padding
x_max += x_padding
y_min -= y_padding
y_max += y_padding
z_min -= z_padding
z_max += z_padding
def transform_arr(arr):
if arr.ndim == 1:
dic = {c: i for i, c in enumerate(set(arr))}
n_dim = len(dic)
arr = np.array([dic[label] for label in arr])
else:
n_dim = arr.shape[1]
arr = np.argmax(arr, axis=1)
return arr, n_dim
if title is None:
title = self.model_saving_name
labels, n_label = transform_arr(labels)
colors = plt.cm.rainbow([i / n_label for i in range(n_label)])[labels]
indices = [labels == i for i in range(n_label)]
scatters = []
fig = plt.figure()
plt.title(title)
ax = fig.add_subplot(111, projection='3d')
for _index in indices:
scatters.append(ax.scatter(axis[0][_index], axis[1][_index], axis[2][_index], c=colors[_index]))
ax.legend(scatters, ["$c_{}$".format("{" + str(i) + "}") for i in range(len(scatters))],
ncol=math.ceil(math.sqrt(len(scatters))), fontsize=8)
plt.show()
return self
def visualize2d(self, x, y, padding=0.1, dense=200, title=None,
scatter=True, show_org=False, draw_background=True, emphasize=None, extra=None):
axis, labels = np.asarray(x).T, np.asarray(y)
print("=" * 30 + "\n" + str(self))
nx, ny, padding = dense, dense, padding
x_min, x_max = np.min(axis[0]), np.max(axis[0])
y_min, y_max = np.min(axis[1]), np.max(axis[1])
x_padding = max(abs(x_min), abs(x_max)) * padding
y_padding = max(abs(y_min), abs(y_max)) * padding
x_min -= x_padding
x_max += x_padding
y_min -= y_padding
y_max += y_padding
def get_base(_nx, _ny):
_xf = np.linspace(x_min, x_max, _nx)
_yf = np.linspace(y_min, y_max, _ny)
n_xf, n_yf = np.meshgrid(_xf, _yf)
return _xf, _yf, np.c_[n_xf.ravel(), n_yf.ravel()]
xf, yf, base_matrix = get_base(nx, ny)
t = time.time()
z = self.predict_classes(base_matrix).reshape((nx, ny))
print("Decision Time: {:8.6f} s".format(time.time() - t))
print("Drawing figures...")
xy_xf, xy_yf = np.meshgrid(xf, yf, sparse=True)
if labels.ndim == 1:
plot_label_dict = {c: i for i, c in enumerate(set(labels))}
n_label = len(plot_label_dict)
labels = np.array([plot_label_dict[label] for label in labels])
else:
n_label = labels.shape[1]
labels = np.argmax(labels, axis=1)
colors = plt.cm.rainbow([i / n_label for i in range(n_label)])[labels]
if title is None:
title = self.model_saving_name
if show_org:
plt.figure()
plt.scatter(axis[0], axis[1], c=colors)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.show()
plt.figure()
plt.title(title)
if draw_background:
plt.pcolormesh(xy_xf, xy_yf, z, cmap=plt.cm.Pastel1)
else:
plt.contour(xf, yf, z, c='k-', levels=[0])
if scatter:
plt.scatter(axis[0], axis[1], c=colors)
if emphasize is not None:
indices = np.array([False] * len(axis[0]))
indices[np.asarray(emphasize)] = True
plt.scatter(axis[0][indices], axis[1][indices], s=80,
facecolors="None", zorder=10)
if extra is not None:
plt.scatter(*np.asarray(extra).T, s=80, zorder=25, facecolors="red")
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.show()
print("Done.")
return self
def visualize3d(self, x, y, padding=0.1, dense=100, title=None,
show_org=False, draw_background=True, emphasize=None, extra=None):
if False:
print(Axes3D.add_artist)
axis, labels = np.asarray(x).T, np.asarray(y)
print("=" * 30 + "\n" + str(self))
nx, ny, nz, padding = dense, dense, dense, padding
x_min, x_max = np.min(axis[0]), np.max(axis[0])
y_min, y_max = np.min(axis[1]), np.max(axis[1])
z_min, z_max = np.min(axis[2]), np.max(axis[2])
x_padding = max(abs(x_min), abs(x_max)) * padding
y_padding = max(abs(y_min), abs(y_max)) * padding
z_padding = max(abs(z_min), abs(z_max)) * padding
x_min -= x_padding
x_max += x_padding
y_min -= y_padding
y_max += y_padding
z_min -= z_padding
z_max += z_padding
def get_base(_nx, _ny, _nz):
_xf = np.linspace(x_min, x_max, _nx)
_yf = np.linspace(y_min, y_max, _ny)
_zf = np.linspace(z_min, z_max, _nz)
n_xf, n_yf, n_zf = np.meshgrid(_xf, _yf, _zf)
return _xf, _yf, _zf, np.c_[n_xf.ravel(), n_yf.ravel(), n_zf.ravel()]
xf, yf, zf, base_matrix = get_base(nx, ny, nz)
t = time.time()
z_xyz = self.predict_classes(base_matrix).reshape((nx, ny, nz))
p_classes = self.predict_classes(x).astype(np.int8)
_, _, _, base_matrix = get_base(10, 10, 10)
z_classes = self.predict_classes(base_matrix).astype(np.int8)
print("Decision Time: {:8.6f} s".format(time.time() - t))
print("Drawing figures...")
z_xy = np.average(z_xyz, axis=2)
z_yz = np.average(z_xyz, axis=1)
z_xz = np.average(z_xyz, axis=0)
xy_xf, xy_yf = np.meshgrid(xf, yf, sparse=True)
yz_xf, yz_yf = np.meshgrid(yf, zf, sparse=True)
xz_xf, xz_yf = np.meshgrid(xf, zf, sparse=True)
def transform_arr(arr):
if arr.ndim == 1:
dic = {c: i for i, c in enumerate(set(arr))}
n_dim = len(dic)
arr = np.array([dic[label] for label in arr])
else:
n_dim = arr.shape[1]
arr = np.argmax(arr, axis=1)
return arr, n_dim
labels, n_label = transform_arr(labels)
p_classes, _ = transform_arr(p_classes)
z_classes, _ = transform_arr(z_classes)
colors = plt.cm.rainbow([i / n_label for i in range(n_label)])
if extra is not None:
ex0, ex1, ex2 = np.asarray(extra).T
else:
ex0 = ex1 = ex2 = None
if title is None:
title = self.model_saving_name
if show_org:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(axis[0], axis[1], axis[2], c=colors[labels])
plt.show()
fig = plt.figure(figsize=(16, 4), dpi=100)
plt.title(title)
ax1 = fig.add_subplot(131, projection='3d')
ax2 = fig.add_subplot(132, projection='3d')
ax3 = fig.add_subplot(133, projection='3d')
ax1.set_title("Org")
ax2.set_title("Pred")
ax3.set_title("Boundary")
ax1.scatter(axis[0], axis[1], axis[2], c=colors[labels])
ax2.scatter(axis[0], axis[1], axis[2], c=colors[p_classes], s=15)
if extra is not None:
ax2.scatter(ex0, ex1, ex2, s=80, zorder=25, facecolors="red")
xyz_xf, xyz_yf, xyz_zf = base_matrix[..., 0], base_matrix[..., 1], base_matrix[..., 2]
ax3.scatter(xyz_xf, xyz_yf, xyz_zf, c=colors[z_classes], s=15)
plt.show()
plt.close()
fig = plt.figure(figsize=(16, 4), dpi=100)
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
def _draw(_ax, _x, _xf, _y, _yf, _z):
if draw_background:
_ax.pcolormesh(_x, _y, _z > 0, cmap=plt.cm.Pastel1)
else:
_ax.contour(_xf, _yf, _z, c='k-', levels=[0])
def _emphasize(_ax, axis0, axis1, _c):
_ax.scatter(axis0, axis1, c=_c)
if emphasize is not None:
indices = np.array([False] * len(axis[0]))
indices[np.asarray(emphasize)] = True
_ax.scatter(axis0[indices], axis1[indices], s=80,
facecolors="None", zorder=10)
def _extra(_ax, axis0, axis1, _c, _ex0, _ex1):
_emphasize(_ax, axis0, axis1, _c)
if extra is not None:
_ax.scatter(_ex0, _ex1, s=80, zorder=25, facecolors="red")
colors = colors[labels]
ax1.set_title("xy figure")
_draw(ax1, xy_xf, xf, xy_yf, yf, z_xy)
_extra(ax1, axis[0], axis[1], colors, ex0, ex1)
ax2.set_title("yz figure")
_draw(ax2, yz_xf, yf, yz_yf, zf, z_yz)
_extra(ax2, axis[1], axis[2], colors, ex1, ex2)
ax3.set_title("xz figure")
_draw(ax3, xz_xf, xf, xz_yf, zf, z_xz)
_extra(ax3, axis[0], axis[2], colors, ex0, ex2)
plt.show()
print("Done.")
return self
class AutoBase(LoggingMixin, DataCacheMixin):
# noinspection PyUnusedLocal
def __init__(self, name=None, data_info=None, pre_process_settings=None, nan_handler_settings=None,
*args, **kwargs):
if name is None:
raise ValueError("name should be provided when using AutoBase")
self._name = name
self.whether_redundant = None
self.feature_sets = self.sparsity = self.class_prior = None
self.n_features = self.all_num_idx = self.transform_dicts = None
self.py_collections = []
if data_info is None:
data_info = {}
else:
assert_msg = "data_info should be a dictionary"
assert isinstance(data_info, dict), assert_msg
self.data_info = data_info
self._data_info_initialized = False
self.numerical_idx = self.categorical_columns = None
if pre_process_settings is None:
pre_process_settings = {}
else:
assert_msg = "pre_process_settings should be a dictionary"
assert isinstance(pre_process_settings, dict), assert_msg
self.pre_process_settings = pre_process_settings
self._pre_processors = None
self.pre_process_method = self.scale_method = self.reuse_mean_and_std = None
if nan_handler_settings is None:
nan_handler_settings = {}
else:
assert_msg = "nan_handler_settings should be a dictionary"
assert isinstance(nan_handler_settings, dict), assert_msg
self.nan_handler_settings = nan_handler_settings
self._nan_handler = None
self.nan_handler_method = self.reuse_nan_handler_values = None
self.init_pre_process_settings()
self.init_nan_handler_settings()
@property
def label2num_dict(self):
return None if not self.transform_dicts[-1] else self.transform_dicts[-1]
@property
def num2label_dict(self):
label2num_dict = self.label2num_dict
if label2num_dict is None:
return
num_label_list = sorted([(i, c) for c, i in label2num_dict.items()])
return np.array([label for _, label in num_label_list])
@property
def valid_numerical_idx(self):
return np.array([
is_numerical for is_numerical in self.numerical_idx
if is_numerical is not None
])
@property
def valid_n_features(self):
return np.array([
n_feature for i, n_feature in enumerate(self.n_features)
if self.numerical_idx[i] is not None
])
def init_data_info(self):
if self._data_info_initialized:
return
self._data_info_initialized = True
self.numerical_idx = self.data_info.get("numerical_idx", None)
self.categorical_columns = self.data_info.get("categorical_columns", None)
self.feature_sets = self.data_info.get("feature_sets", None)
self.sparsity = self.data_info.get("sparsity", None)
self.class_prior = self.data_info.get("class_prior", None)
if self.feature_sets is not None and self.numerical_idx is not None:
self.n_features = [len(feature_set) for feature_set in self.feature_sets]
self._gen_categorical_columns()
self.data_info.setdefault("file_type", "txt")
self.data_info.setdefault("shuffle", True)
self.data_info.setdefault("test_rate", 0.1)
self.data_info.setdefault("stage", 3)
def init_pre_process_settings(self):
self.pre_process_method = self.pre_process_settings.setdefault("pre_process_method", "normalize")
self.scale_method = self.pre_process_settings.setdefault("scale_method", "truncate")
self.reuse_mean_and_std = self.pre_process_settings.setdefault("reuse_mean_and_std", False)
if self.pre_process_method is not None and self._pre_processors is None:
self._pre_processors = {}
def init_nan_handler_settings(self):
self.nan_handler_method = self.nan_handler_settings.setdefault("nan_handler_method", "median")
self.reuse_nan_handler_values = self.nan_handler_settings.setdefault("reuse_nan_handler_values", True)
def _auto_init_from_data(self, x, y, x_test, y_test, names):
stage = self.data_info["stage"]
shuffle = self.data_info["shuffle"]
file_type = self.data_info["file_type"]
test_rate = self.data_info["test_rate"]
args = (self.numerical_idx, file_type, names, shuffle, test_rate, stage)
if x is None or y is None:
x, y, x_test, y_test = self._load_data(None, *args)
else:
data = np.hstack([x, y.reshape([-1, 1])])
if x_test is not None and y_test is not None:
data = (data, np.hstack([x_test, y_test.reshape([-1, 1])]))
x, y, x_test, y_test = self._load_data(data, *args)
self._handle_unbalance(y)
self._handle_sparsity()
return x, y, x_test, y_test
def _handle_unbalance(self, y):
if self.n_class == 1:
return
class_ratio = self.class_prior.min() / self.class_prior.max()
logger = self.get_logger("_handle_unbalance", "general.log")
if class_ratio < 0.1:
warn_msg = "Sample weights will be used since class_ratio < 0.1 ({:8.6f})".format(class_ratio)
self.log_msg(warn_msg, logger=logger)
if self._sample_weights is None:
self.log_msg(
"Sample weights are not provided, they'll be generated automatically",
logger=logger
)
self._sample_weights = np.ones(len(y)) / self.class_prior[y.astype(np.int)]
self._sample_weights /= self._sample_weights.sum()
self._sample_weights *= len(y)
def _handle_sparsity(self):
if self.sparsity >= 0.75:
warn_msg = "Dropout will be disabled since data sparsity >= 0.75 ({:8.6f})".format(self.sparsity)
self.log_msg(warn_msg, logger=self.get_logger("_handle_sparsity", "general.log"))
self.dropout_keep_prob = 1.
def _gen_categorical_columns(self):
self.categorical_columns = [
(i, value) for i, value in enumerate(self.valid_n_features)
if not self.valid_numerical_idx[i] and self.valid_numerical_idx[i] is not None
]
if not self.valid_numerical_idx[-1]:
self.categorical_columns.pop()
def _transform_data(self, data, name, train_name="train",
include_label=False, refresh_redundant_info=False, stage=3):
logger = self.get_logger("_transform_data", "general.log")
self.log_msg("Transforming {0}data{2} at stage {1}".format(
"{} ".format(name), stage,
"" if name == train_name or not self.reuse_mean_and_std else
" with {} data".format(train_name),
), logger=logger)
is_ndarray = isinstance(data, np.ndarray)
if refresh_redundant_info or self.whether_redundant is None:
self.whether_redundant = np.array([
True if local_dict is None else False
for local_dict in self.transform_dicts
])
targets = [
(i, local_dict) for i, (idx, local_dict) in enumerate(
zip(self.numerical_idx, self.transform_dicts)
) if not idx and local_dict and not self.whether_redundant[i]
]
if targets and targets[-1][0] == len(self.numerical_idx) - 1 and not include_label:
targets = targets[:-1]
if stage == 1 or stage == 3:
# Transform data & Handle redundant
n_redundant = np.sum(self.whether_redundant)
if n_redundant == 0:
whether_redundant = None
else:
whether_redundant = self.whether_redundant
if not include_label:
whether_redundant = whether_redundant[:-1]
if refresh_redundant_info:
warn_msg = "{} redundant: {}{}".format(
"These {} columns are".format(n_redundant) if n_redundant > 1 else "One column is",
[i for i, redundant in enumerate(whether_redundant) if redundant],
", {} will be removed".format("it" if n_redundant == 1 else "they")
)
self.log_msg(warn_msg, logger=logger)
valid_indices = [
i for i, redundant in enumerate(self.whether_redundant)
if not redundant
]
if not include_label:
valid_indices = valid_indices[:-1]
for i, line in enumerate(data):
for j, local_dict in targets:
elem = line[j]
if isinstance(elem, str):
line[j] = local_dict.get(elem, local_dict.get("nan", len(local_dict)))
elif math.isnan(elem):
line[j] = local_dict["nan"]
else:
line[j] = local_dict.get(elem, local_dict.get("nan", len(local_dict)))
if not is_ndarray and whether_redundant is not None:
data[i] = [line[j] for j in valid_indices]
if is_ndarray and whether_redundant is not None:
data = data[..., valid_indices].astype(np.float32)
else:
data = np.array(data, dtype=np.float32)
if stage == 2 or stage == 3:
data = np.asarray(data, dtype=np.float32)
# Handle nan
if self._nan_handler is None:
self._nan_handler = NanHandler(
method=self.nan_handler_method,
reuse_values=self.reuse_nan_handler_values
)
data = self._nan_handler.transform(data, self.valid_numerical_idx[:-1])
# Pre-process data
if self._pre_processors is not None:
pre_processor_name = train_name if self.reuse_mean_and_std else name
pre_processor = self._pre_processors.setdefault(
pre_processor_name, PreProcessor(
self.pre_process_method, self.scale_method
)
)
if not include_label:
data = pre_processor.transform(data, self.valid_numerical_idx[:-1])
else:
data[..., :-1] = pre_processor.transform(data[..., :-1], self.valid_numerical_idx[:-1])
return data
def _get_label_dict(self):
labels = self.feature_sets[-1]
sorted_labels = sorted(labels)
if not all(Toolbox.is_number(str(label)) for label in labels):
return {key: i for i, key in enumerate(sorted_labels)}
if not sorted_labels:
return {}
numerical_labels = np.array(sorted_labels, np.float32)
if numerical_labels.max() - numerical_labels.min() != self.n_class - 1:
return {key: i for i, key in enumerate(sorted_labels)}
return {}
def _get_transform_dicts(self):
self.transform_dicts = [
None if is_numerical is None else
{key: i for i, key in enumerate(sorted(feature_set))}
if not is_numerical and (not all_num or not np.allclose(
np.sort(np.array(list(feature_set), np.float32).astype(np.int32)),
np.arange(0, len(feature_set))
)) else {} for is_numerical, feature_set, all_num in zip(
self.numerical_idx[:-1], self.feature_sets[:-1], self.all_num_idx[:-1]
)
]
if self.n_class == 1:
self.transform_dicts.append({})
else:
self.transform_dicts.append(self._get_label_dict())
def _get_data_from_file(self, file_type, test_rate, target=None):
if file_type == "txt":
sep, include_header = " ", False
elif file_type == "csv":
sep, include_header = ",", True
else:
raise NotImplementedError("File type '{}' not recognized".format(file_type))
logger = self.get_logger("_get_data_from_file", "general.log")
if target is None:
target = os.path.join(self.data_folder, self._name)
if not os.path.isdir(target):
with open(target + ".{}".format(file_type), "r") as file:
data = Toolbox.get_data(file, sep, include_header, logger)
else:
with open(os.path.join(target, "train.{}".format(file_type)), "r") as file:
train_data = Toolbox.get_data(file, sep, include_header, logger)
test_rate = 0
test_file = os.path.join(target, "test.{}".format(file_type))
if not os.path.isfile(test_file):
data = train_data
else:
with open(test_file, "r") as file:
test_data = Toolbox.get_data(file, sep, include_header, logger)
data = (train_data, test_data)
return data, test_rate
def _load_data(self, data=None, numerical_idx=None, file_type="txt", names=("train", "test"),
shuffle=True, test_rate=0.1, stage=3):
use_cached_data = False
train_data = test_data = None
logger = self.get_logger("_load_data", "general.log")
if data is None and stage >= 2 and os.path.isfile(self.train_data_file):
self.log_msg("Restoring data", logger=logger)
use_cached_data = True
train_data = np.load(self.train_data_file)
if not os.path.isfile(self.test_data_file):
test_data = None
data = train_data
else:
test_data = np.load(self.test_data_file)
data = (train_data, test_data)
if use_cached_data:
n_train = None
else:
if data is None:
is_ndarray = False
data, test_rate = self._get_data_from_file(file_type, test_rate)
else:
is_ndarray = True
if not isinstance(data, tuple):
test_rate = 0
data = np.asarray(data, dtype=np.float32)
else:
data = tuple(
arr if isinstance(arr, list) else
np.asarray(arr, np.float32) for arr in data
)
if isinstance(data, tuple):
if shuffle:
np.random.shuffle(data[0]) if is_ndarray else random.shuffle(data[0])
n_train = len(data[0])
data = np.vstack(data) if is_ndarray else data[0] + data[1]
else:
if shuffle:
np.random.shuffle(data) if is_ndarray else random.shuffle(data)
n_train = int(len(data) * (1 - test_rate)) if test_rate > 0 else -1
if not os.path.isdir(self.data_info_folder):
os.makedirs(self.data_info_folder)
if not os.path.isfile(self.data_info_file) or stage == 1:
self.log_msg("Generating data info", logger=logger)
if numerical_idx is not None:
self.numerical_idx = numerical_idx
elif self.numerical_idx is not None:
numerical_idx = self.numerical_idx
if not self.feature_sets or not self.n_features or not self.all_num_idx:
is_regression = self.data_info.pop(
"is_regression",
numerical_idx is not None and numerical_idx[-1]
)
self.feature_sets, self.n_features, self.all_num_idx, self.numerical_idx = (
Toolbox.get_feature_info(data, numerical_idx, is_regression, logger)
)
self.n_class = 1 if self.numerical_idx[-1] else self.n_features[-1]
self._get_transform_dicts()
with open(self.data_info_file, "wb") as file:
pickle.dump([
self.n_features, self.numerical_idx, self.transform_dicts
], file)
elif stage == 3:
self.log_msg("Restoring data info", logger=logger)
with open(self.data_info_file, "rb") as file:
info = pickle.load(file)
self.n_features, self.numerical_idx, self.transform_dicts = info
self.n_class = 1 if self.numerical_idx[-1] else self.n_features[-1]
if not use_cached_data:
if n_train > 0:
train_data, test_data = data[:n_train], data[n_train:]
else:
train_data, test_data = data, None
train_name, test_name = names
train_data = self._transform_data(train_data, train_name, train_name, True, True, stage)
if test_data is not None:
test_data = self._transform_data(test_data, test_name, train_name, True, stage=stage)
self._gen_categorical_columns()
if not use_cached_data and stage == 3:
self.log_msg("Caching data...", logger=logger)
if not os.path.exists(self.data_cache_folder):
os.makedirs(self.data_cache_folder)
np.save(self.train_data_file, train_data)
if test_data is not None:
np.save(self.test_data_file, test_data)
x, y = train_data[..., :-1], train_data[..., -1]
if test_data is not None:
x_test, y_test = test_data[..., :-1], test_data[..., -1]
else:
x_test = y_test = None
self.sparsity = ((x == 0).sum() + np.isnan(x).sum()) / np.prod(x.shape)
_, class_counts = np.unique(y, return_counts=True)
self.class_prior = class_counts / class_counts.sum()
self.data_info["numerical_idx"] = self.numerical_idx
self.data_info["categorical_columns"] = self.categorical_columns
return x, y, x_test, y_test
def _pop_preprocessor(self, name):
if isinstance(self._pre_processors, dict) and name in self._pre_processors:
self._pre_processors.pop(name)
def get_transformed_data_from_file(self, file, file_type="txt", include_label=False):
x, _ = self._get_data_from_file(file_type, 0, file)
x = self._transform_data(x, "new", include_label=include_label)
self._pop_preprocessor("new")
return x
def get_labels_from_classes(self, classes):
num2label_dict = self.num2label_dict
if num2label_dict is None:
return classes
return num2label_dict[classes]
def predict_labels(self, x):
return self.get_labels_from_classes(self.predict_classes(x))
# Signatures
def fit(self, x=None, y=None, x_test=None, y_test=None, sample_weights=None, names=("train", "test"),
timeit=True, time_limit=-1, snapshot_ratio=3, print_settings=True, verbose=1):
raise ValueError
def predict_classes(self, x):
raise ValueError
def predict_from_file(self, file, file_type="txt", include_label=False):
raise ValueError
def predict_classes_from_file(self, file, file_type="txt", include_label=False):
raise ValueError
def predict_labels_from_file(self, file, file_type="txt", include_label=False):
raise ValueError
def evaluate_from_file(self, file, file_type="txt"):
raise ValueError
class AutoMeta(type):
def __new__(mcs, *args, **kwargs):
name_, bases, attr = args[:3]
auto_base, model = bases
def __init__(self, name=None, data_info=None, model_param_settings=None, model_structure_settings=None,
pre_process_settings=None, nan_handler_settings=None):
auto_base.__init__(self, name, data_info, pre_process_settings, nan_handler_settings)
if model.signature != "Advanced":
model.__init__(self, name, model_param_settings, model_structure_settings)
else:
model.__init__(self, name, data_info, model_param_settings, model_structure_settings)
def _define_py_collections(self):
model._define_py_collections(self)
self.py_collections += [
"pre_process_settings", "nan_handler_settings",
"_pre_processors", "_nan_handler", "transform_dicts",
"numerical_idx", "categorical_columns", "transform_dicts"
]
def init_data_info(self):
auto_base.init_data_info(self)
def init_from_data(self, x, y, x_test, y_test, sample_weights, names):
self.init_data_info()
x, y, x_test, y_test = self._auto_init_from_data(x, y, x_test, y_test, names)
model.init_from_data(self, x, y, x_test, y_test, sample_weights, names)
def fit(self, x=None, y=None, x_test=None, y_test=None, sample_weights=None, names=("train", "test"),
timeit=True, time_limit=-1, snapshot_ratio=3, print_settings=True, verbose=1):
return model.fit(
self, x, y, x_test, y_test, sample_weights, names,
timeit, time_limit, snapshot_ratio, print_settings, verbose
)
def predict(self, x):
rs = self._predict(self._transform_data(x, "new", include_label=False))
self._pop_preprocessor("new")
return rs
def predict_classes(self, x):
if self.n_class == 1:
raise ValueError("Predicting classes is not permitted in regression problem")
return self.predict(x).argmax(1).astype(np.int32)
def predict_target_prob(self, x, target):
prob = self.predict(x)
label2num_dict = self.label2num_dict
if label2num_dict is not None:
target = label2num_dict[target]
return prob[..., target]
def predict_from_file(self, file, file_type="txt", include_label=False):
x = self.get_transformed_data_from_file(file, file_type, include_label)
if include_label:
x = x[..., :-1]
return self._predict(x)
def predict_classes_from_file(self, file, file_type="txt", include_label=False):
if self.numerical_idx[-1]:
raise ValueError("Predicting classes is not permitted in regression problem")
x = self.get_transformed_data_from_file(file, file_type, include_label)
if include_label:
x = x[..., :-1]
return self._predict(x).argmax(1).astype(np.int32)
def predict_labels_from_file(self, file, file_type="txt", include_label=False):
classes = self.predict_classes_from_file(file, file_type, include_label)
return self.get_labels_from_classes(classes)
def evaluate(self, x, y, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None):
x = self._transform_data(x, "train")
cv_name = "cv" if "cv" in self._pre_processors else "tmp_cv"
test_name = "test" if "test" in self._pre_processors else "tmp_test"
if x_cv is not None:
x_cv = self._transform_data(x_cv, cv_name)
if x_test is not None:
x_test = self._transform_data(x_test, test_name)
if cv_name == "tmp_cv":
self._pop_preprocessor("tmp_cv")
if test_name == "tmp_test":
self._pop_preprocessor("tmp_test")
return self._evaluate(x, y, x_cv, y_cv, x_test, y_test, metric)
def print_settings(self):
msg = model.print_settings(self, only_return=True)
if msg is None:
msg = ""
msg += "\nNanHandler : {}".format("None" if not self._nan_handler else "") + "\n"
if self._nan_handler:
msg += "\n".join("-> {:14}: {}".format(k, v) for k, v in sorted(
self.nan_handler_settings.items()
)) + "\n"
msg += "-" * 100 + "\n\n"
msg += "PreProcessor : {}".format("None" if not self._pre_processors else "") + "\n"
if self._pre_processors:
msg += "\n".join("-> {:14}: {}".format(k, v) for k, v in sorted(
self.pre_process_settings.items()
)) + "\n"
msg += "-" * 100
self.log_msg("\n" + msg, logger=self.get_logger("print_settings", "general.log"))
for key, value in locals().items():
if str(value).find("function") >= 0:
attr[key] = value
return type(name_, bases, attr)
class DistMixin(LoggingMixin, DataCacheMixin):
@property
def k_series_time_delta(self):
return time.time() - self._k_series_t
@property
def param_search_time_delta(self):
return time.time() - self._param_search_t
@property
def data_cache_folder_name(self):
folder = os.path.join(os.getcwd(), "_Tmp", "_Cache")
if not os.path.isdir(folder):
os.makedirs(folder)
return folder
@property
def k_series_logger(self):
name = "{}_k_series".format(self.name)
if name not in self.loggers:
self.get_logger(name, "{}.log".format(name))
return self.loggers[name]
@property
def param_search_logger(self):
name = "{}_param_search".format(self.name)
if name not in self.loggers:
self.get_logger(name, "{}.log".format(name))
return self.loggers[name]
# noinspection PyAttributeOutsideInit
def reset_graph(self, i):
del self._graph
self._sess = None
self._graph = tf.Graph()
self._search_cursor = i
def reset_all_variables(self):
with self._graph.as_default():
self._sess.run(tf.global_variables_initializer())
def _handle_param_search_time_limit(self, time_limit):
if self.param_search_time_limit is None:
time_limit -= self.k_series_time_delta
else:
time_limit = min(
time_limit,
self.param_search_time_limit - self.k_series_time_delta
)
self._k_series_t = time.time()
return time_limit
def _k_series_initialization(self, k, data, test_rate):
self._k_series_t = time.time()
self.data_info.setdefault("test_rate", test_rate)
self.init_data_info()
self._k_performances = []
self._k_performances_mean = self._k_performances_std = None
kwargs = {
"numerical_idx": self.numerical_idx,
"shuffle": self.data_info["shuffle"],
"file_type": self.data_info["file_type"]
}
x_1, y_1, x_test_1, y_test_1 = self._load_data(
data, test_rate=self.data_info["test_rate"], stage=1, **kwargs)
if not self._searching_params:
train_1 = np.hstack([x_1, y_1.reshape([-1, 1])])
test_1 = np.hstack([x_test_1, y_test_1.reshape([-1, 1])])
np.save(self.train_data_file, train_1)
np.save(self.test_data_file, test_1)
self._load_data(
np.hstack([x_1, y_1.reshape([-1, 1])]),
names=("train", None), test_rate=0, stage=2, **kwargs
)
if x_test_1 is None or y_test_1 is None:
x_test_2 = y_test_2 = None
else:
x_test_2, y_test_2, *_ = self._load_data(
np.hstack([x_test_1, y_test_1.reshape([-1, 1])]),
names=("test", None), test_rate=0, stage=2, **kwargs
)
names = [("train{}".format(i), "cv{}".format(i)) for i in range(k)]
return x_1, y_1, x_test_2, y_test_2, names
def _k_series_evaluation(self, i, x_test, y_test, time_limit):
if i == -1:
if x_test is None or y_test is None:
valid_performances = [performance[:2] for performance in self._k_performances]
else:
valid_performances = self._k_performances
performances_mean = np.mean(valid_performances, axis=0)
performances_std = np.std(valid_performances, axis=0)
msg = " - Mean | {}\n".format(
self._print_metrics(self._metric_name, *performances_mean, only_return=True))
msg += " - Std | {}".format(
self._print_metrics(self._metric_name, *performances_std, only_return=True))
if self._searching_params:
level = logging.DEBUG
logger = self.param_search_logger
else:
level = logging.INFO
logger = self.k_series_logger
self.log_block_msg(
"Generating performance summary", body=msg,
level=level, logger=logger
)
return performances_mean, performances_std
train_data = self._train_generator.get_all_data(return_weights=False)
cv_data = self._test_generator.get_all_data(return_weights=False)
x, y = train_data[..., :-1], train_data[..., -1]
x_cv, y_cv = cv_data[..., :-1], cv_data[..., -1]
msg = "Performance of run {:2} | ".format(i + 1)
print(" - " + msg, end="")
self._k_performances.append(self._evaluate(x, y, x_cv, y_cv, x_test, y_test))
msg += self._print_metrics(self._metric_name, *self._k_performances[-1], only_return=True)
self.log_msg(
msg, logging.DEBUG,
self.param_search_logger if self._searching_params else self.k_series_logger
)
return self.k_series_time_delta >= time_limit > 0
def _k_series_completion(self, x_test, y_test, names, sample_weights_store):
performance_info = self._k_series_evaluation(-1, x_test, y_test, None)
self._k_performances_mean, self._k_performances_std = performance_info
self.data_info["stage"] = 3
for name in names:
self._pop_preprocessor(name)
self._sample_weights = sample_weights_store
def _k_series_process(self, k, data, cv_rate, test_rate, sample_weights,
msg, cv_method, kwargs):
x_1, y_1, x_test_2, y_test_2, names = self._k_series_initialization(k, data, test_rate)
time_limit = kwargs.pop("time_limit", -1)
logger = self.get_logger("_k_series_process", "general.log")
if 0 < time_limit <= self.k_series_time_delta:
self.log_msg("Time limit exceeded before k_series started", logger=logger)
return
time_limit = self._handle_param_search_time_limit(time_limit)
n_cv = int(cv_rate * len(x_1))
print_settings = True
if sample_weights is not None:
self._sample_weights = np.asarray(sample_weights, np.float32)
sample_weights_store = self._sample_weights
self.log_msg(msg, logger=logger)
all_idx = np.random.permutation(len(x_1))
for i in range(k):
if self._sess is not None:
self.reset_all_variables()
skip = False
while True:
rs = cv_method(x_1, y_1, n_cv, i, k, all_idx)
if rs["success"]:
x_train, y_train, x_cv, y_cv, train_idx = rs["info"]
break
if rs["info"] == "retry":
continue
x_train = y_train = x_cv = y_cv = train_idx = None
skip = True
break
if skip:
self.log_msg(
"{}th fold was skipped since labels in train set and cv set are not identical".format(i + 1),
level=logging.INFO, logger=logger
)
continue
if sample_weights is not None:
self._sample_weights = sample_weights_store[train_idx]
else:
self._sample_weights = None
kwargs["print_settings"] = print_settings
kwargs["names"] = names[i]
self.data_info["stage"] = 2
self.fit(x_train, y_train, x_cv, y_cv, timeit=False, time_limit=time_limit, **kwargs)
if self._k_series_evaluation(i, x_test_2, y_test_2, time_limit):
break
print_settings = False
self._k_series_completion(x_test_2, y_test_2, names, sample_weights_store)
return self
def _cv_sanity_check(self, rs, handler, train_idx, x_train, y_train, x_cv, y_cv):
if self.n_class == 1:
rs["info"] = (x_train, y_train, x_cv, y_cv, train_idx)
else:
y_train_unique, y_cv_unique = np.unique(y_train), np.unique(y_cv)
if len(y_train_unique) == len(y_cv_unique) and np.allclose(y_train_unique, y_cv_unique):
rs["info"] = (x_train, y_train, x_cv, y_cv, train_idx)
else:
rs["success"] = False
rs["info"] = handler
def _k_fold_method(self, x_1, y_1, *args):
_, i, k, all_idx = args
rs = {"success": True}
n_batch = int(len(x_1) / k)
cv_idx = all_idx[np.arange(i * n_batch, (i + 1) * n_batch)]
train_idx = all_idx[[
j for j in range(len(all_idx))
if j < i * n_batch or j >= (i + 1) * n_batch
]]
x_cv, y_cv = x_1[cv_idx], y_1[cv_idx]
x_train, y_train = x_1[train_idx], y_1[train_idx]
self._cv_sanity_check(rs, "skip", train_idx, x_train, y_train, x_cv, y_cv)
return rs
def _k_random_method(self, x_1, y_1, *args):
n_cv, *_ = args
rs = {"success": True}
all_idx = np.random.permutation(len(x_1))
cv_idx, train_idx = all_idx[:n_cv], all_idx[n_cv:]
x_cv, y_cv = x_1[cv_idx], y_1[cv_idx]
x_train, y_train = x_1[train_idx], y_1[train_idx]
self._cv_sanity_check(rs, "retry", train_idx, x_train, y_train, x_cv, y_cv)
return rs
def k_fold(self, k=10, data=None, test_rate=0., sample_weights=None, **kwargs):
return self._k_series_process(
k, data, -1, test_rate, sample_weights, cv_method=self._k_fold_method, kwargs=kwargs,
msg="Training k-fold with k={} and test_rate={}".format(k, test_rate)
)
def k_random(self, k=3, data=None, cv_rate=0.1, test_rate=0., sample_weights=None, **kwargs):
return self._k_series_process(
k, data, cv_rate, test_rate, sample_weights, cv_method=self._k_random_method, kwargs=kwargs,
msg="Training k-random with k={}, cv_rate={} and test_rate={}".format(k, cv_rate, test_rate)
)
def _log_param_msg(self, i, param):
msg = ""
for j, (key, setting) in enumerate(param.items()):
msg += "\n".join([key, "-" * 100]) + "\n"
msg += "\n".join([
" -> {:32} : {}".format(
name, value if not isinstance(value, dict) else "\n{}".format(
"\n".join([" -> {:28} : {}".format(
local_name, local_value
) for local_name, local_value in value.items()])
)
) for name, value in sorted(setting.items())
])
if j != len(param) - 1:
msg += "\n" + "-" * 100 + "\n"
if i >= 0:
title = "Generating parameter setting {:3}".format(i + 1)
else:
title = "Generating best parameter setting"
self.log_block_msg(
title=title, body=msg,
level=logging.DEBUG, logger=self.param_search_logger
)
@staticmethod
def _get_score(mean, std, sign):
if sign > 0:
return mean - std
return mean + std
@staticmethod
def _extract_param_from_info(dtype, info):
if dtype == "choice":
return info[0][random.randint(0, len(info[0]) - 1)]
if len(info) == 2:
floor, ceiling = info
distribution = "linear"
else:
floor, ceiling, distribution = info
if ceiling <= floor:
raise ValueError("ceiling should be greater than floor")
if dtype == "int":
return random.randint(floor, ceiling)
if dtype == "float":
linear_target = floor + random.random() * (ceiling - floor)
distribution_error_msg = "distribution '{}' not supported in range_search".format(distribution)
if distribution == "linear":
return linear_target
if distribution[:3] == "log":
sign, log = int(linear_target > 0), math.log(math.fabs(linear_target))
if distribution == "log":
return sign * math.exp(log)
if distribution == "log2":
return sign * 2 ** log
if distribution == "log10":
return sign * 10 ** log
raise NotImplementedError(distribution_error_msg)
raise NotImplementedError(distribution_error_msg)
raise NotImplementedError("dtype '{}' not supported in range_search".format(dtype))
def _update_param(self, param):
self._model_built = False
self._settings_initialized = False
self.model_param_settings = deepcopy(self._settings_base["model_param_settings"])
self.model_structure_settings = deepcopy(self._settings_base["model_structure_settings"])
new_model_param_settings = param.get("model_param_settings", {})
new_model_structure_settings = param.get("model_structure_settings", {})
self.model_param_settings.update(new_model_param_settings)
self.model_structure_settings.update(new_model_structure_settings)
if not self.model_structure_settings.get("use_pruner", True):
self._pruner = None
if not self.model_structure_settings.get("use_dndf", True):
self._dndf = None
if not self.model_structure_settings.get("use_dndf_pruner", False):
self._dndf_pruner = None
if self._nan_handler is not None:
self._nan_handler.reset()
if self._pre_processors:
self._pre_processors = {}
def _select_param(self, params, search_with_test_set):
scores = []
sign = Metrics.sign_dict[self._metric_name]
assert len(self.mean_record) == len(self.std_record)
for mean, std in zip(self.mean_record, self.std_record):
if len(mean) == 2 or not search_with_test_set:
train_mean, cv_mean = mean
train_std, cv_std = std
weighted_mean = 0.05 * train_mean + 0.95 * cv_mean
weighted_std = 0.05 * train_std + 0.95 * cv_std
else:
train_mean, cv_mean, test_mean = mean
train_std, cv_std, test_std = std
weighted_mean = 0.05 * train_mean + 0.1 * cv_mean + 0.85 * test_mean
weighted_std = 0.05 * train_std + 0.1 * cv_std + 0.85 * test_std
scores.append(self._get_score(weighted_mean, weighted_std, sign))
scores = np.array(scores, np.float32)
scores[np.isnan(scores)] = -np.inf
best_idx = np.argmax(scores)
return best_idx, params[best_idx]
def _prepare_param_search_data(self, data, test_rate):
file_type = self.data_info.setdefault("file_type", "txt")
data_folder = self.data_info.setdefault("data_folder", "_Data")
self._file_type_store = file_type
self._data_folder_store = data_folder
if data is not None:
return data
cache_folder = self.data_cache_folder_name
target = os.path.join(data_folder, self._name)
data, test_rate = self._get_data_from_file(file_type, test_rate, target)
if isinstance(data, tuple):
train_data, test_data = data
else:
if test_rate > 0:
random.shuffle(data)
n_train = int(len(data) * (1 - test_rate))
train_data, test_data = data[:n_train], data[n_train:]
else:
train_data, test_data = data, None
cache_target = os.path.join(cache_folder, self._name)
if not os.path.isdir(cache_target):
os.makedirs(cache_target)
self.log_msg("Writing tmp data for param searching", level=logging.INFO, logger=self.param_search_logger)
with open(os.path.join(cache_target, "train.txt"), "w") as file:
file.write("\n".join([" ".join(line) for line in train_data]))
if test_data is not None:
with open(os.path.join(cache_target, "test.txt"), "w") as file:
file.write("\n".join([" ".join(line) for line in test_data]))
self.data_info["file_type"] = "txt"
self.data_info["data_folder"] = cache_folder
def _param_search_completion(self):
self._searching_params = False
self.param_search_time_limit = None
self._data_info_initialized = False
self.data_info["file_type"] = self._file_type_store
self.data_info["data_folder"] = self._data_folder_store
def get_param_by_range(self, param):
if isinstance(param, dict):
return {key: self.get_param_by_range(value) for key, value in param.items()}
dtype, *info = param
if not isinstance(dtype, str) and isinstance(dtype, collections.Iterable):
local_param_list = []
for local_dtype, local_info in zip(dtype, info):
local_param_list.append(self._extract_param_from_info(local_dtype, local_info))
return local_param_list
return self._extract_param_from_info(dtype, info)
# noinspection PyAttributeOutsideInit
def param_search(self, params,
search_with_test_set=True, switch_to_best_param=True,
single_search_time_limit=None, param_search_time_limit=3600,
k=3, data=None, cv_rate=0.1, test_rate=0.1, sample_weights=None, **kwargs):
self._param_search_t = time.time()
self.param_search_time_limit = param_search_time_limit
logger = self.param_search_logger
self._searching_params = True
self._settings_base = {
"model_param_settings": deepcopy(self.model_param_settings),
"model_structure_settings": deepcopy(self.model_structure_settings)
}
self.mean_record, self.std_record = [], []
self.log_msg(
"Searching best parameter setting (time_limit: {}s per run, {}s in total)".format(
"default" if single_search_time_limit is None else single_search_time_limit,
param_search_time_limit
), logging.DEBUG, logger
)
self._prepare_param_search_data(data, test_rate)
n_param = len(params)
for i, param in enumerate(params):
self.reset_graph(i)
self._log_param_msg(i, param)
self._update_param(param)
time_left = param_search_time_limit - self.param_search_time_delta
if single_search_time_limit is None:
local_time_limit = time_left / (n_param - i)
else:
local_time_limit = single_search_time_limit
kwargs["time_limit"] = min(local_time_limit, time_left)
if self.k_random(k, data, cv_rate, test_rate, sample_weights, **kwargs) is not None:
self.save()
self.mean_record.append(self._k_performances_mean)
self.std_record.append(self._k_performances_std)
if self.param_search_time_delta >= param_search_time_limit:
self.log_msg("Search interrupted due to 'Time limit exceeded'", level=logging.INFO, logger=logger)
break
self.log_msg("Search complete", level=logging.DEBUG, logger=logger)
best_idx, best_param = self._select_param(params, search_with_test_set)
self._log_param_msg(-1, best_param)
msg = ""
for i, (mean, std) in enumerate(zip(self.mean_record, self.std_record)):
msg += " -{} Mean | ".format(">" if i == best_idx else " ")
msg += self._print_metrics(self._metric_name, *mean, only_return=True) + "\n"
msg += " -{} Std | ".format(">" if i == best_idx else " ")
msg += self._print_metrics(self._metric_name, *std, only_return=True)
if i != len(self.mean_record) - 1:
msg += "\n" + "-" * 100 + "\n"
self.log_block_msg("Generating performances", body=msg, level=logging.DEBUG, logger=logger)
if switch_to_best_param:
self.reset_graph(-1)
self._update_param(best_param)
self._param_search_completion()
return self
def random_search(self, n, grid_params, grid_order="list_first",
search_with_test_set=True, switch_to_best_params=True,
single_search_time_limit=None, param_search_time_limit=3600,
k=3, data=None, cv_rate=0.1, test_rate=0.1, sample_weights=None, **kwargs):
if grid_order == "list_first":
param_types = sorted(grid_params)
n_param_base = [
np.arange(len(grid_params[param_type]))
for param_type in param_types
]
params = [
{
param_type: grid_params[param_type][indices[i]]
for i, param_type in enumerate(param_types)
} for indices in itertools.product(*n_param_base)
]
elif grid_order == "dict_first":
param_types = sorted(grid_params)
params_names = [sorted(grid_params[param_type]) for param_type in param_types]
params_names_cumsum = np.cumsum([0] + [len(params_name) for params_name in params_names])
n_param_base = sum([
[np.arange(len(grid_params[param_type][param_name])) for param_name in params_name]
for param_type, params_name in zip(param_types, params_names)
], [])
params = [
{
param_type: {
local_params: grid_params[param_type][local_params][indices[cumsum + j]]
for j, local_params in enumerate(params_names[i])
} for i, (param_type, cumsum) in enumerate(zip(param_types, params_names_cumsum))
} for indices in itertools.product(*n_param_base)
]
else:
raise NotImplementedError("grid_sort_type '{}' not implemented".format(grid_order))
if n > 0:
params = [params[i] for i in np.random.permutation(len(params))[:n]]
return self.param_search(
params,
search_with_test_set, switch_to_best_params,
single_search_time_limit, param_search_time_limit,
k, data, cv_rate, test_rate, sample_weights, **kwargs
)
def grid_search(self, grid_params, grid_order="list_first",
search_with_test_set=True, switch_to_best_params=True,
single_search_time_limit=None, param_search_time_limit=3600,
k=3, data=None, cv_rate=0.1, test_rate=0.1, sample_weights=None, **kwargs):
return self.random_search(
-1, grid_params, grid_order,
search_with_test_set, switch_to_best_params,
single_search_time_limit, param_search_time_limit,
k, data, cv_rate, test_rate, sample_weights, **kwargs
)
def range_search(self, n, grid_params,
search_with_test_set=True, switch_to_best_params=True,
single_search_time_limit=None, param_search_time_limit=3600,
k=3, data=None, cv_rate=0.1, test_rate=0.1, sample_weights=None, **kwargs):
params = []
for _ in range(n):
local_params = {
param_type: {
param_name: self.get_param_by_range(param_value)
for param_name, param_value in param_values.items()
} for param_type, param_values in grid_params.items()
}
params.append(local_params)
return self.param_search(
params,
search_with_test_set, switch_to_best_params,
single_search_time_limit, param_search_time_limit,
k, data, cv_rate, test_rate, sample_weights, **kwargs
)
def empirical_search(self, search_with_test_set=True, switch_to_best_params=True,
level=3, single_search_time_limit=None, param_search_time_limit=3600,
k=3, data=None, cv_rate=0.1, test_rate=0.1, sample_weights=None, **kwargs):
grid_params = {
"model_structure_settings": [
{"use_wide_network": False, "use_pruner": False, "use_dndf_pruner": False},
{"use_wide_network": False, "use_pruner": True, "use_dndf_pruner": False},
{"use_wide_network": True, "use_pruner": True, "use_dndf_pruner": False},
]
}
if level >= 2:
grid_params["model_structure_settings"] += [
{"use_wide_network": True, "use_pruner": True, "use_dndf_pruner": True},
{"use_wide_network": True, "use_pruner": False, "use_dndf_pruner": True},
{"use_wide_network": True, "use_pruner": False, "use_dndf_pruner": False}
]
if level >= 3:
grid_params["pre_process_settings"] = [
{"reuse_mean_and_std": False}, {"reuse_mean_and_std": True}
]
if level >= 4:
grid_params["model_param_settings"] = [
{"use_batch_norm": False}, {"use_batch_norm": True}
]
if level >= 5:
grid_params["model_param_settings"] = [
{"use_batch_norm": False, "batch_size": 64},
{"use_batch_norm": False, "batch_size": 128},
{"use_batch_norm": False, "batch_size": 256},
{"use_batch_norm": True, "batch_size": 64},
{"use_batch_norm": True, "batch_size": 128},
{"use_batch_norm": True, "batch_size": 256}
]
return self.grid_search(
grid_params, "list_first",
search_with_test_set, switch_to_best_params,
single_search_time_limit, param_search_time_limit,
k, data, cv_rate, test_rate, sample_weights, **kwargs
)
# Signatures
@staticmethod
def _print_metrics(metric_name, train_metric=None, cv_metric=None, test_metric=None, only_return=False):
raise ValueError
def _gen_batch(self, generator, n_batch, gen_random_subset=False, one_hot=False):
raise ValueError
def _load_data(self, data=None, numerical_idx=None, file_type="txt", names=("train", "test"),
shuffle=True, test_rate=0.1, stage=3):
raise ValueError
def _handle_unbalance(self, y):
raise ValueError
def _handle_sparsity(self):
raise ValueError
def _get_data_from_file(self, file_type, test_rate, target=None):
raise ValueError
def _evaluate(self, x=None, y=None, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None):
raise ValueError
def _pop_preprocessor(self, name):
raise ValueError
def init_data_info(self):
raise ValueError
def save(self, run_id=0, path=None):
raise ValueError
def fit(self, x=None, y=None, x_test=None, y_test=None, sample_weights=None, names=("train", "test"),
timeit=True, time_limit=-1, snapshot_ratio=3, print_settings=True, verbose=1):
raise ValueError
def evaluate(self, x=None, y=None, x_cv=None, y_cv=None, x_test=None, y_test=None, metric=None):
raise ValueError
class DistMeta(type):
def __new__(mcs, *args, **kwargs):
name_, bases, attr = args[:3]
model, dist_mixin = bases
def __init__(self, name=None, data_info=None, model_param_settings=None, model_structure_settings=None,
pre_process_settings=None, nan_handler_settings=None):
self._search_cursor = None
self._param_search_t = None
self.param_search_time_limit = None
self.mean_record = self.std_record = None
self._searching_params = self._settings_base = None
dist_mixin.__init__(self)
model.__init__(
self, name, data_info, model_param_settings, model_structure_settings,
pre_process_settings, nan_handler_settings
)
attr["__init__"] = __init__
return type(name_, bases, attr)
| 42.981061 | 114 | 0.588691 |
ce33095d639f433a869a59cf8b5064b41dae7b53
| 159 |
py
|
Python
|
tests/test_p_7_satz_von_pick.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
tests/test_p_7_satz_von_pick.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
tests/test_p_7_satz_von_pick.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
"""
Teste die 'pick()' funktion.
"""
from tag_2.p_7_satz_von_pick import pick
def test_satz_von_pick():
assert pick(innenpunkte=37, randpunkte=42) == 57
| 17.666667 | 52 | 0.716981 |
5a7627a1fe051975fcfc3d64d8e12a10d27962c1
| 804 |
py
|
Python
|
orgnummer_generator/generator.py
|
navikt/testnorge-ereg-mapper
|
936afa2dd08baa2d7d242f61d231438b7f8ddf4d
|
[
"MIT"
] | null | null | null |
orgnummer_generator/generator.py
|
navikt/testnorge-ereg-mapper
|
936afa2dd08baa2d7d242f61d231438b7f8ddf4d
|
[
"MIT"
] | 1 |
2020-05-20T12:45:32.000Z
|
2020-05-20T12:45:32.000Z
|
orgnummer_generator/generator.py
|
navikt/testnorge-ereg-mapper
|
936afa2dd08baa2d7d242f61d231438b7f8ddf4d
|
[
"MIT"
] | 1 |
2019-09-07T16:06:22.000Z
|
2019-09-07T16:06:22.000Z
|
import random
import requests
def make_orgnummer():
r = random.randint(80000000, 99999999)
r_str = str(r)
w = "32765432"
sum = 0
for i, c in enumerate(r_str):
sum += (int(c) * int(w[i]))
control_digit = 11 - (sum % 11)
final = r_str + str(control_digit)
req = requests.get(
"https://modapp-q2.adeo.no/ereg/api/v1/organisasjon/"
+ final +
"?inkluderHierarki=false&inkluderHistorikk=false")
req2 = requests.get(
"https://modapp-q0.adeo.no/ereg/api/v1/organisasjon/"
+ final +
"?inkluderHierarki=false&inkluderHistorikk=false")
if control_digit == 1 or control_digit < 0 or req.status_code == 200 or req2.status_code == 200:
return make_orgnummer()
return final
print(make_orgnummer())
| 21.72973 | 100 | 0.621891 |
cec5837c5ce2ece44c9c18ede781d3a57534daa3
| 489 |
py
|
Python
|
books/PythonAutomate/pdf_word_documents/reading_word.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonAutomate/pdf_word_documents/reading_word.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonAutomate/pdf_word_documents/reading_word.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""reading_word.py
워드 문서 읽기
"""
import docx
doc = docx.Document("demo.docx")
print(len(doc.paragraphs)) # paragraph 갯수
print(doc.paragraphs[0].text) # 첫번째 paragraph 문자열
print(doc.paragraphs[1].text) # 두번째 paragraph 문자열
print(len(doc.paragraphs[1].runs)) # 두번쨰 paragraph의 run 갯수
print(doc.paragraphs[1].runs[0].text) # 두번쨰 paragraph의 첫번째 run 문자열
print(doc.paragraphs[1].runs[1].text) # 두번쨰 paragraph의 두번째 run 문자열
print(doc.paragraphs[1].runs[2].text) # 두번쨰 paragraph의 세번째 run 문자열
| 30.5625 | 67 | 0.725971 |
6502dd254712848e7e901a4bd68509a0933c8184
| 4,582 |
py
|
Python
|
Server/JsonWriter.py
|
EiS94/Bewaesserungsanlage
|
8edc0c8b5113219724b13c56fb296a003e83aad0
|
[
"MIT"
] | null | null | null |
Server/JsonWriter.py
|
EiS94/Bewaesserungsanlage
|
8edc0c8b5113219724b13c56fb296a003e83aad0
|
[
"MIT"
] | null | null | null |
Server/JsonWriter.py
|
EiS94/Bewaesserungsanlage
|
8edc0c8b5113219724b13c56fb296a003e83aad0
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import json
import random
import DHT as dht
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
import RPi.GPIO as GPIO
import time
import calendar
import smbus
#sets the wiring from the pi
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(12, GPIO.IN)
GPIO.setup(11, GPIO.IN)
GPIO.setup(22, GPIO.OUT)
#setup for the MCP3008 Analog digital converter
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
#safes the platform for temperatur sensor
pf = dht.common.get_platform()
#setup for the light sensor
DEVICE = 0x23
POWER_DOWN = 0x00
POWER_ON = 0x01
RESET = 0x07
# Start measurement at 4lx resolution. Time typically 16ms.
CONTINUOUS_LOW_RES_MODE = 0x13
# Start measurement at 1lx resolution. Time typically 120ms
CONTINUOUS_HIGH_RES_MODE_1 = 0x10
# Start measurement at 0.5lx resolution. Time typically 120ms
CONTINUOUS_HIGH_RES_MODE_2 = 0x11
# Start measurement at 1lx resolution. Time typically 120ms
# Device is automatically set to Power Down after measurement.
ONE_TIME_HIGH_RES_MODE_1 = 0x20
# Start measurement at 0.5lx resolution. Time typically 120ms
# Device is automatically set to Power Down after measurement.
ONE_TIME_HIGH_RES_MODE_2 = 0x21
# Start measurement at 1lx resolution. Time typically 120ms
# Device is automatically set to Power Down after measurement.
ONE_TIME_LOW_RES_MODE = 0x23
bus = smbus.SMBus(1)
def convertToNumber(data):
result = (data[1] + (256 * data[0])) / 1.2
return result
def readLight(addr=DEVICE):
data = bus.read_i2c_block_data(addr, ONE_TIME_HIGH_RES_MODE_1)
return convertToNumber(data)
#writes the sensor data to data.json file
def run():
data = {}
data['data'] = []
data['data'].append({
'wetness': getHumidityAir(),
'rain': getAnalogRainStatus(),
'temperatur': getTemperatur(),
'plant1': getAnalogPlantStatus(1),
'plant2': getAnalogPlantStatus(2),
'plant3': getAnalogPlantStatus(3),
'plant4': getAnalogPlantStatus(4),
'plant5': getAnalogPlantStatus(5),
'illuminance' : getIlluminance(),
'p1Value' : getAnalogPlantValue(1),
'p2Value' : getAnalogPlantValue(2),
'p3Value' : getAnalogPlantValue(3),
'p4Value' : getAnalogPlantValue(4),
'p5Value' : getAnalogPlantValue(5),
'timestamp': getTimestamp(),
'valve': getValveStatus()
})
with open('/home/pi/Bewaesserung/data.json', 'w') as outfile:
json.dump(data, outfile)
def getAnalogPlantStatus(i):
plant1Status = getAnalogSignal(i)
if (plant1Status < 500):
return "ausgezeichnet bewässert"
elif (plant1Status < 750):
return "ausreichend bewässert"
else:
return "braucht Wasser"
def getAnalogRainStatus():
rainStatus = getAnalogSignal(0)
if (rainStatus < 600):
return "starker Regen"
elif (rainStatus < 800):
return "Regen"
elif (rainStatus < 950):
return "leichter Regen"
else:
return "kein Regen"
def getAnalogBrightnessValue():
return getAnalogSignal(7)
def getAnalogRainValue():
return getAnalogSignal(0)
def getAnalogPlantValue(i):
return getAnalogSignal(i)
def getTimestamp():
return calendar.timegm(time.gmtime())
def getTemperatur():
h, t = dht.read_retry(dht.DHT22, 4, 15, 2, pf)
if (t != None):
return float("{0:.1f}".format(t))
else: return 199.9
def getHumidityAir():
h, t = dht.read_retry(dht.DHT22, 4, 15, 2, pf)
if (h != None):
return float("{0:.1f}".format(h))
else: return 199.9
def getRain():
rain = GPIO.input(12)
rain = ("%ld" % rain)
if (rain == "1"):
return("Nein")
else:
return("Ja")
def getHumidityPlant1():
wet = GPIO.input(11)
wet = ("%ld" % wet)
if (wet == "1"):
return("braucht Wasser")
else:
return("ausreichend bewässert")
def openValve():
GPIO.output(22, GPIO.LOW)
def closeValve():
GPIO.output(22, GPIO.HIGH)
def getValveStatus():
if GPIO.input(22) == GPIO.HIGH:
return "aus"
else:
return "an"
def getAnalogSignal(i):
if (i < 8 and i >= 0):
return mcp.read_adc(i)
else: return "select Channel-number between 0 and 7"
def getAllAnalogSignals():
values = [0]*8
for i in range(8):
values[i] = mcp.read_adc(i)
return values
def getIlluminance():
return format(readLight(),'.2f')
def cleanExit():
GPIO.cleanup()
def writeTemperatur():
while (True):
print("bla")
| 25.597765 | 68 | 0.665212 |
651a54e75f0c07c0181a8708ac38016cb9cd9c4b
| 4,729 |
py
|
Python
|
src/server/handlers/metrics.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 156 |
2021-11-19T18:50:14.000Z
|
2022-03-31T19:48:59.000Z
|
src/server/handlers/metrics.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 30 |
2021-12-27T19:30:56.000Z
|
2022-03-30T17:49:00.000Z
|
src/server/handlers/metrics.py
|
monosidev/monosi
|
a88b689fc74010b10dbabb32f4b2bdeae865f4d5
|
[
"Apache-2.0"
] | 14 |
2022-01-17T23:24:34.000Z
|
2022-03-29T09:27:47.000Z
|
import logging
from flask_restful import Resource, abort, request
from sqlalchemy import func
from server.models import Metric, ZScore
from server.middleware.db import db
from .monitors import MonitorResource
# class MetricListResource(Resource):
# def _validate(self, req):
# pass
# # return self.resource.validate(req)
# def get(self, **kwargs):
# try:
# database = kwargs['database']
# schema = kwargs['schema']
# table = kwargs['table']
# except:
# abort(404)
# obj = self._retrieve_by_kwargs(table, database, schema)
# return {self.key: obj}
class MetricListResource(Resource):
@property
def key(self):
return "metrics"
def _transform(self, objs):
return [
{
'metric': obj[0],
'column_name': obj[1],
'count': obj[2],
'error': obj[3]
}
for obj in objs
]
#
def _retrieve_by_kwargs(self, table_name, database, schema):
try:
obj = db.session.query(
Metric.metric,
Metric.column_name,
func.count(Metric.id),
ZScore.error,
Metric.table_name,
Metric.database,
Metric.schema,
func.max(Metric.created_at),
).join(
ZScore,
Metric.id == ZScore.metric_id,
).filter(
table_name == Metric.table_name,
database == Metric.database,
schema == Metric.schema,
).group_by(
Metric.table_name,
Metric.database,
Metric.schema,
Metric.column_name,
Metric.metric,
ZScore.error,
).all()
except Exception as e:
import traceback
traceback.print_exc()
abort(404)
return {'table_name': table_name, 'database': database, 'schema': schema, 'type': 'table_health', 'metrics': obj}
def _retrieve_detailed(self, table_name, database, schema, column_name, metric):
try:
objs = db.session.query(Metric, ZScore).outerjoin(ZScore, ZScore.metric_id == Metric.id).filter(
Metric.table_name==table_name,
Metric.database==database,
Metric.schema==schema,
Metric.metric == metric,
Metric.column_name == column_name
).order_by(Metric.time_window_end).all() # TODO: ORDER BY
metrics = [(lambda d: (d.update(obj[1].to_dict()) or d) if obj[1] else d)(obj[0].to_dict()) for obj in objs]
except Exception as e:
logging.warn(e)
abort(404)
return metrics
# Entrypoint
def get(self, monitor_id):
monitor_resource = MonitorResource()
monitor = monitor_resource.get(obj_id=monitor_id)['monitor']
if 'column_name' in request.args and 'metric' in request.args:
return self.get_detail(monitor['table_name'], monitor['database'], monitor['schema'], request.args)
obj_list = self._retrieve_by_kwargs(monitor['table_name'], monitor['database'], monitor['schema'])
transformed_obj_list = self._transform(obj_list['metrics'])
return {self.key: transformed_obj_list}
def get_detail(self, table_name, database, schema, args):
column_name = args.get('column_name')
metric = args.get('metric')
metrics = self._retrieve_detailed(table_name, database, schema, column_name, metric)
return {'metrics': metrics}
# class MetricResource(Resource):
# @property
# def key(self):
# return "metrics"
# def _retrieve_by_monitor_and_name(self, monitor, name):
# try:
# obj = db.session.query(Metric).filter(
# Metric.table_name == ".".join([monitor['database'], monitor['schema'], monitor['table_name']]),
# Metric.metric == name,
# )
# except Exception as e:
# logging.warn(e)
# abort(404)
# return obj
# def get(self, **kwargs):
# obj_id = kwargs['obj_id']
# metric_name = kwargs['metric_name']
# monitor_resource = MonitorResource()
# monitor = monitor_resource.get(obj_id=obj_id)['monitor']
# obj_list = self._retrieve_by_monitor_and_name(monitor, metric_name)
# obj_dict_list = [obj.to_dict() for obj in obj_list]
# return {self.key: obj_dict_list}
| 32.840278 | 121 | 0.550433 |
6536b18b80a0674c73f4767a84b77dcd2adc41a6
| 6,039 |
py
|
Python
|
src/test/tests/operators/moveoperators.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/test/tests/operators/moveoperators.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/test/tests/operators/moveoperators.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: moveoperators.py
#
# Tests: plots - Pseudocolor, Mesh, FilledBoundary
# operators - Erase, Isosurface, Reflect, Slice, Transform
#
# Defect ID: '1837
#
# Programmer: Brad Whitlock
# Date: Thu Apr 17 16:45:46 PST 2003
#
# Modifications:
# Eric Brugger, Thu May 8 12:57:56 PDT 2003
# Remove a call to ToggleAutoCenterMode since it no longer exists.
#
# Kathleen Bonnell, Thu Aug 28 14:34:57 PDT 2003
# Remove compound var name from subset plots.
#
# Kathleen Bonnell, Wed Mar 17 07:33:40 PST 2004
# Set default Slice atts, as these have changed.
#
# Kathleen Bonnell, Wed May 5 08:13:22 PDT 2004
# Modified Slice atts to get same picture as defaults have changed.
#
# Brad Whitlock, Tue Jan 17 12:14:21 PDT 2006
# Added runTest4.
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
#
# Kathleen Biagas, Thu Jul 11 08:18:42 PDT 2013
# Removed legacy sytle annotation setting.
#
# Kathleen Biagas, Mon Dec 19 15:45:38 PST 2016
# Use FilledBoundary plot for materials instead of Subset.
#
# ----------------------------------------------------------------------------
def InitAnnotation():
# Turn off all annotation except for the bounding box.
a = AnnotationAttributes()
TurnOffAllAnnotations(a)
a.axes2D.visible = 1
a.axes2D.xAxis.label.visible = 0
a.axes2D.yAxis.label.visible = 0
a.axes2D.xAxis.title.visible = 0
a.axes2D.yAxis.title.visible = 0
a.axes3D.bboxFlag = 1
SetAnnotationAttributes(a)
def InitDefaults():
# Set the default reflect operator attributes.
reflect = ReflectAttributes()
reflect.SetReflections(1, 1, 0, 0, 0, 0, 0, 0)
SetDefaultOperatorOptions(reflect)
slice = SliceAttributes()
slice.project2d = 0
slice.SetAxisType(slice.XAxis)
slice.SetFlip(1)
SetDefaultOperatorOptions(slice)
def setTheFirstView():
# Set the view
v = View3DAttributes()
v.viewNormal = (-0.695118, 0.351385, 0.627168)
v.focus = (-10, 0, 0)
v.viewUp = (0.22962, 0.935229, -0.269484)
v.viewAngle = 30
v.parallelScale = 17.3205
v.nearPlane = -70
v.farPlane = 70
v.perspective = 1
SetView3D(v)
#
# Test operator promotion, demotion, and removal.
#
def runTest1():
OpenDatabase(silo_data_path("noise.silo"))
# Set up a plot with a few operators.
AddPlot("Pseudocolor", "hardyglobal")
AddOperator("Isosurface")
AddOperator("Slice")
AddOperator("Reflect")
DrawPlots()
setTheFirstView()
# Take a picture of the initial setup.
Test("moveoperator_0")
# Move the reflect so that it is before the slice in the pipeline.
# The pipeline will be: Isosurface, Reflect, Slice
DemoteOperator(2)
DrawPlots()
Test("moveoperator_1")
# Move the reflect operator back so that the pipeline matches the
# initial configuration: Isosurface, Slice, Reflect
PromoteOperator(1)
DrawPlots()
Test("moveoperator_2")
# Remove the slice operator from the middle, resulting in:
# Isosurface, Reflect
RemoveOperator(1)
DrawPlots()
Test("moveoperator_3")
# Remove the Isosurface operator, resulting in: Reflect
RemoveOperator(0)
DrawPlots()
Test("moveoperator_4")
# Remove the Reflect operator
RemoveOperator(0)
DrawPlots()
Test("moveoperator_5")
DeleteAllPlots()
#
# Test removing an operator from more than one plot at the same time.
#
def runTest2():
all = 1
# Set up a couple plots of globe
OpenDatabase(silo_data_path("globe.silo"))
AddPlot("Pseudocolor", "u")
AddPlot("Mesh", "mesh1")
# Add a reflect operator to both plots.
AddOperator("Reflect", all)
DrawPlots()
Test("moveoperator_6")
# Remove the operator from both plots.
RemoveOperator(0, all)
DrawPlots()
Test("moveoperator_7")
DeleteAllPlots()
#
# Test setting attributes for multiple operators of the same type.
#
def runTest3():
# Set up a couple plots of globe
OpenDatabase(silo_data_path("globe.silo"))
AddPlot("Pseudocolor", "u")
pc = PseudocolorAttributes()
pc.SetOpacityType(pc.Constant)
pc.opacity = 0.2
SetPlotOptions(pc)
AddPlot("FilledBoundary", "mat1")
# The subset plot is the active plot, add a couple transform
# operators to it.
AddOperator("Transform")
AddOperator("Transform")
# Set the attributes for the *first* transform operator.
# This results in a full size globe translated up in Y.
t0 = TransformAttributes()
t0.doTranslate = 1
t0.translateY = 15
SetOperatorOptions(t0, 0)
DrawPlots()
Test("moveoperator_8")
# Set the attributes for the *second* transform operator.
# The plot has been translated, now scale it. Since it has already
# been translated, this will also translate it a little in Y.
t1 = TransformAttributes()
t1.doScale = 1
t1.scaleX = 0.5
t1.scaleY = 0.5
t1.scaleZ = 0.5
SetOperatorOptions(t1, 1)
Test("moveoperator_9")
# Demote the last operator to reverse the order of the transformations.
DemoteOperator(1)
# Make the pc plot opaque again
SetActivePlots(0)
pc.SetOpacityType(pc.FullyOpaque)
SetPlotOptions(pc)
DrawPlots()
Test("moveoperator_10")
DeleteAllPlots()
#
# Test that removing an operator using the RemoveOperator(i) method causes
# the vis window to get redrawn.
#
def runTest4():
OpenDatabase(silo_data_path("curv2d.silo"))
AddPlot("Pseudocolor", "d")
AddOperator("Isosurface")
DrawPlots()
Test("moveoperator_11")
RemoveOperator(0)
Test("moveoperator_12")
DeleteAllPlots()
#
# Set up the environment and run all of the tests.
#
def runTests():
InitAnnotation()
InitDefaults()
runTest1()
runTest2()
runTest3()
runTest4()
# Run the tests.
runTests()
Exit()
| 26.721239 | 78 | 0.656897 |
0be7b2204441a9a284657f32102fe27e90db3c14
| 234 |
py
|
Python
|
1323-maximum-69-number/1323-maximum-69-number.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2 |
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
1323-maximum-69-number/1323-maximum-69-number.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
1323-maximum-69-number/1323-maximum-69-number.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
class Solution:
def maximum69Number (self, num: int) -> int:
try:
numstr = list(str(num))
numstr[numstr.index('6')]='9'
return int("".join(numstr))
except:
return num
| 29.25 | 48 | 0.495726 |
043e1df3fcc62ca14be719c44d897ca00b52ed86
| 231 |
py
|
Python
|
Exercicios/ex09-3e.py
|
BoltzBit/LP
|
f84d36d1bdee9a20c197cebec2810234c5311fb8
|
[
"MIT"
] | null | null | null |
Exercicios/ex09-3e.py
|
BoltzBit/LP
|
f84d36d1bdee9a20c197cebec2810234c5311fb8
|
[
"MIT"
] | null | null | null |
Exercicios/ex09-3e.py
|
BoltzBit/LP
|
f84d36d1bdee9a20c197cebec2810234c5311fb8
|
[
"MIT"
] | null | null | null |
#calculo da area do losango
diagonal1 = float(input('Insira a primeira diagonal: '))
diagonal2 = float(input('Insira a segunda diagonal: '))
area = (diagonal1*diagonal2)/2
msg = 'A area do losango é {}'
print(msg.format(area))
| 21 | 56 | 0.709957 |
ac8e575d2629002261a978c87e3f8844ff7e1132
| 354 |
py
|
Python
|
PYTHON/Regex_and_Parsing/findall.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
PYTHON/Regex_and_Parsing/findall.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
PYTHON/Regex_and_Parsing/findall.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import re
if __name__ == '__main__':
input_str = input()
ret = ([r.group(0) for r in re.finditer(r'([aeiou])+', input_str, re.I) if len(r.group(0)) > 1])
if len(ret) == 0:
print(-1)
else:
if ret[-1] == input_str[(-1 * len(ret[-1])):]:
ret.pop()
print('\n'.join(ret))
| 25.285714 | 100 | 0.516949 |
acd516c116f068c0b5e1561eb1397b219090fc81
| 182 |
py
|
Python
|
sources/stage03/enten.py
|
kantel/pythonschulung2
|
b13fb24770dd7789f3845aeb147a720dff272951
|
[
"MIT"
] | null | null | null |
sources/stage03/enten.py
|
kantel/pythonschulung2
|
b13fb24770dd7789f3845aeb147a720dff272951
|
[
"MIT"
] | null | null | null |
sources/stage03/enten.py
|
kantel/pythonschulung2
|
b13fb24770dd7789f3845aeb147a720dff272951
|
[
"MIT"
] | null | null | null |
suffix = "ack"
praefixe = "JKLMNOPQ"
for praefix in praefixe:
if praefix == "O" or praefix == "Q":
print(praefix + "u" + suffix)
else:
print(praefix + suffix)
| 26 | 40 | 0.582418 |
76b1a0c398c6cbf56adab19ef011b95aa0a8bab9
| 12,696 |
py
|
Python
|
src/onegov/election_day/formats/election/internal_proporz.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/election_day/formats/election/internal_proporz.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/election_day/formats/election/internal_proporz.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.ballot import Candidate
from onegov.ballot import CandidateResult
from onegov.ballot import ElectionResult
from onegov.ballot import List
from onegov.ballot import ListConnection
from onegov.ballot import ListResult
from onegov.ballot import PanachageResult
from onegov.election_day import _
from onegov.election_day.formats.common import EXPATS, validate_integer, \
validate_list_id
from onegov.election_day.formats.common import FileImportError
from onegov.election_day.formats.common import load_csv
from onegov.election_day.formats.common import STATI
from sqlalchemy.orm import object_session
from uuid import uuid4
from onegov.election_day.import_export.mappings import \
INTERNAL_PROPORZ_HEADERS
def parse_election(line, errors):
status = None
try:
status = line.election_status or 'unknown'
except ValueError:
errors.append(_("Invalid election values"))
if status not in STATI:
errors.append(_("Invalid status"))
return status
def parse_election_result(line, errors, entities, election_id):
try:
entity_id = validate_integer(line, 'entity_id')
counted = line.entity_counted.strip().lower() == 'true'
eligible_voters = validate_integer(line, 'entity_eligible_voters')
received_ballots = validate_integer(line, 'entity_received_ballots')
blank_ballots = validate_integer(line, 'entity_blank_ballots')
invalid_ballots = validate_integer(line, 'entity_invalid_ballots')
blank_votes = validate_integer(line, 'entity_blank_votes')
invalid_votes = validate_integer(line, 'entity_invalid_votes')
except ValueError as e:
errors.append(e.args[0])
else:
if entity_id not in entities and entity_id in EXPATS:
entity_id = 0
if entity_id and entity_id not in entities:
errors.append(_(
"${name} is unknown",
mapping={'name': entity_id}
))
else:
entity = entities.get(entity_id, {})
return dict(
id=uuid4(),
election_id=election_id,
name=entity.get('name', ''),
district=entity.get('district', ''),
counted=counted,
entity_id=entity_id,
eligible_voters=eligible_voters,
received_ballots=received_ballots,
blank_ballots=blank_ballots,
invalid_ballots=invalid_ballots,
blank_votes=blank_votes,
invalid_votes=invalid_votes,
)
def parse_list(line, errors, election_id):
try:
id = validate_list_id(line, 'list_id', treat_empty_as_default=False)
name = line.list_name
mandates = validate_integer(line, 'list_number_of_mandates')
except ValueError as e:
errors.append(e.args[0])
else:
return dict(
id=uuid4(),
election_id=election_id,
list_id=id,
number_of_mandates=mandates,
name=name,
)
def parse_list_result(line, errors):
try:
votes = validate_integer(line, 'list_votes')
except ValueError as e:
errors.append(e.args[0])
else:
return dict(
id=uuid4(),
votes=votes
)
def parse_panachage_headers(csv):
headers = {}
for header in csv.headers:
if not header.startswith('panachage_votes_from_list_'):
continue
parts = header.split('panachage_votes_from_list_')
if len(parts) > 1:
try:
source_list_id = parts[1]
headers[csv.as_valid_identifier(header)] = source_list_id
except ValueError:
pass
return headers
def parse_panachage_results(line, errors, panachage, panachage_headers):
try:
target = validate_list_id(
line, 'list_id', treat_empty_as_default=False)
if target not in panachage:
panachage[target] = {}
for col_name, source in panachage_headers.items():
if source == target:
continue
panachage[target][source] = validate_integer(
line, col_name, treat_none_as_default=False)
except ValueError as e:
errors.append(e.args[0])
except Exception:
errors.append(_("Invalid list results"))
def parse_candidate(line, errors, election_id):
try:
id = line.candidate_id
family_name = line.candidate_family_name
first_name = line.candidate_first_name
elected = str(line.candidate_elected or '').lower() == 'true'
party = line.candidate_party
except ValueError:
errors.append(_("Invalid candidate values"))
else:
return dict(
id=uuid4(),
election_id=election_id,
candidate_id=id,
family_name=family_name,
first_name=first_name,
elected=elected,
party=party
)
def parse_candidate_result(line, errors):
try:
votes = validate_integer(line, 'candidate_votes')
except ValueError as e:
errors.append(e.args[0])
else:
return dict(
id=uuid4(),
votes=votes,
)
def prefix_connection_id(connection_id, parent_connection_id):
"""Used to distinguish connection ids when they have the same id
as a parent_connection. """
if not len(connection_id) > len(parent_connection_id):
return parent_connection_id + connection_id
return connection_id
def parse_connection(line, errors, election_id):
subconnection_id = None
try:
connection_id = line.list_connection
parent_connection_id = line.list_connection_parent
if parent_connection_id:
subconnection_id = prefix_connection_id(
connection_id, parent_connection_id
)
connection_id = parent_connection_id
except ValueError:
errors.append(_("Invalid list connection values"))
else:
connection = dict(
id=uuid4(),
election_id=election_id,
connection_id=connection_id,
) if connection_id else None
subconnection = dict(
id=uuid4(),
election_id=election_id,
connection_id=subconnection_id,
) if subconnection_id else None
return connection, subconnection
def import_election_internal_proporz(election, principal, file, mimetype):
""" Tries to import the given file (internal format).
This is the format used by onegov.ballot.Election.export().
This function is typically called automatically every few minutes during
an election day - we use bulk inserts to speed up the import.
:return:
A list containing errors.
"""
filename = _("Results")
csv, error = load_csv(
file, mimetype, expected_headers=INTERNAL_PROPORZ_HEADERS,
filename=filename,
dialect='excel'
)
if error:
return [error]
errors = []
candidates = {}
candidate_results = []
lists = {}
list_results = {}
connections = {}
subconnections = {}
results = {}
panachage = {}
panachage_headers = parse_panachage_headers(csv)
entities = principal.entities[election.date.year]
election_id = election.id
# This format has one candiate per entity per line
status = None
for line in csv.lines:
line_errors = []
# Parse the line
status = parse_election(line, line_errors)
result = parse_election_result(
line, line_errors, entities, election_id
)
candidate = parse_candidate(line, line_errors, election_id)
candidate_result = parse_candidate_result(line, line_errors)
list_ = parse_list(line, line_errors, election_id)
list_result = parse_list_result(line, line_errors)
connection, subconnection = parse_connection(
line, line_errors, election_id
)
parse_panachage_results(
line, line_errors, panachage, panachage_headers)
# Skip expats if not enabled
if result and result['entity_id'] == 0 and not election.expats:
continue
# Pass the errors and continue to next line
if line_errors:
errors.extend(
FileImportError(
error=err, line=line.rownumber, filename=filename
)
for err in line_errors
)
continue
# Add the data
result = results.setdefault(result['entity_id'], result)
list_ = lists.setdefault(list_['list_id'], list_)
if connection:
connection = connections.setdefault(
connection['connection_id'], connection
)
list_['connection_id'] = connection['id']
if subconnection:
subconnection = subconnections.setdefault(
subconnection['connection_id'], subconnection
)
subconnection['parent_id'] = connection['id']
list_['connection_id'] = subconnection['id']
list_results.setdefault(result['entity_id'], {})
list_result = list_results[result['entity_id']].setdefault(
list_['list_id'], list_result
)
list_result['list_id'] = list_['id']
candidate = candidates.setdefault(candidate['candidate_id'], candidate)
candidate_result['candidate_id'] = candidate['id']
candidate_result['election_result_id'] = result['id']
candidate_results.append(candidate_result)
candidate['list_id'] = list_['id']
if not errors and not results:
errors.append(FileImportError(_("No data found")))
if panachage_headers:
for list_id in panachage_headers.values():
if not list_id == '999' and list_id not in lists.keys():
errors.append(FileImportError(
_("Panachage results id ${id} not in list_id's",
mapping={'id': list_id})))
break
# Check if all results are from the same district if regional election
districts = set([result['district'] for result in results.values()])
if election.domain == 'region' and election.distinct:
if principal.has_districts:
if len(districts) != 1:
errors.append(FileImportError(_("No clear district")))
else:
if len(results) != 1:
errors.append(FileImportError(_("No clear district")))
if errors:
return errors
# Add the missing entities
remaining = set(entities.keys())
if election.expats:
remaining.add(0)
remaining -= set(results.keys())
for entity_id in remaining:
entity = entities.get(entity_id, {})
district = entity.get('district', '')
if election.domain == 'region':
if not election.distinct:
continue
if not principal.has_districts:
continue
if district not in districts:
continue
results[entity_id] = dict(
id=uuid4(),
election_id=election_id,
name=entity.get('name', ''),
district=district,
entity_id=entity_id,
counted=False
)
# Add the results to the DB
election.clear_results()
election.status = status
result_uids = {r['entity_id']: r['id'] for r in results.values()}
list_uids = {r['list_id']: r['id'] for r in lists.values()}
session = object_session(election)
# FIXME: Sub-Sublists are also possible
session.bulk_insert_mappings(ListConnection, connections.values())
session.bulk_insert_mappings(ListConnection, subconnections.values())
session.bulk_insert_mappings(List, lists.values())
session.bulk_insert_mappings(PanachageResult, (
dict(
id=uuid4(),
source=source,
target=str(list_uids[list_id]),
votes=votes,
)
for list_id in panachage
for source, votes in panachage[list_id].items()
))
session.bulk_insert_mappings(Candidate, candidates.values())
session.bulk_insert_mappings(ElectionResult, results.values())
session.bulk_insert_mappings(ListResult, (
dict(**list_result, election_result_id=result_uids[entity_id])
for entity_id, values in list_results.items()
for list_result in values.values()
))
session.bulk_insert_mappings(CandidateResult, candidate_results)
return []
| 33.410526 | 79 | 0.624921 |
4fb9d8fc7d4533ef8f4fd9fa273b9faacccf348e
| 1,958 |
py
|
Python
|
python_gui_tkinter/KALU/GARBAGE1/ALLOUTDATED/TESTING2/TEST/nextTest.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python_gui_tkinter/KALU/GARBAGE1/ALLOUTDATED/TESTING2/TEST/nextTest.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python_gui_tkinter/KALU/GARBAGE1/ALLOUTDATED/TESTING2/TEST/nextTest.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
from tkinter import *
info = [
("Name (TEXT):",1),
("e-mail (TEXT):",2),
("Flat no. (TEXT):",3),
("Tower no. (TEXT):",4),
("Area (NUMBER):",5),
("Parking (TEXT):",6),
("Recpt. Fess (NUMBER):",7),
("Address (TEXT):",8),
("Contact number (TEXT):",9)
]
e=["","","","","","","","","",""] # entries
class Page(Frame):
"""Page is the Frame that will be added/removed at will"""
def __init__(self, root, id):
Frame.__init__(self, root)
Label(self, text="Frame %d" % id).pack()
class insert(Frame):
"""Main application where everything is done"""
def __init__(self, root):
Frame.__init__(self, root)
self.root = root
for data,num in info:
self.row = root
self.lab = Label(self.row, width=25, padx =10, pady = 10, text=data,font=font.Font(family='Helvetica', size=12, weight='bold'), anchor='w')
self.ent = Entry(self.row)
e[num] = ent
self.row.pack(side=TOP, fill=X, padx=5, pady=5)
self.lab.pack(side=LEFT)
self.ent.pack(side=RIGHT, expand=YES, fill=X)
Button(self, text='Show'#, command=CommandsGUI.show_entry_fields
).pack(side=LEFT, padx=5, pady=5)
Button(self, text='Insert to database'#, command=DBOperations.insert_into_db
).pack(side=LEFT, padx=5, pady=5)
Button(self, text='Reset'#, command=DBOperations.reset_val
).pack(side=RIGHT, padx=5, pady=5)
Button(self, text="Next", command=self.next).pack(side=BOTTOM)
def next(self):
"""changes the current page. I've only done next here, but you could
do backwards, skip pages, etc"""
self.pages[self.page].pack_forget() #remove the current page
self.page += 1
if self.page >= 5: #checking haven't gone past the end of self.page
self.page = 0
self.pages[self.page].pack(side=TOP) #add the next one
if __name__ == "__main__":
root = Tk()
app = insert(root)
app.pack()
root.mainloop()
| 33.758621 | 148 | 0.600613 |
e880c76bba866f062de0d0d63aac62a9bb9e1286
| 1,289 |
py
|
Python
|
Packs/Confluera/Scripts/IqHubLog/IqHubLog.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Confluera/Scripts/IqHubLog/IqHubLog.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Confluera/Scripts/IqHubLog/IqHubLog.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from CommonServerPython import *
from CommonServerUserPython import *
# Executes confluera-fetch-detections command/script
detections_data = demisto.executeCommand('confluera-fetch-detections', {'hours': '72'})
if detections_data[0] and detections_data[0]['Contents'] and detections_data[0]['Contents']['Detections URL']:
det_url = detections_data[0]['Contents']['Detections URL']
else:
det_url = ''
if detections_data[1] and detections_data[1]['Contents']:
det_count = len(detections_data[1]['Contents'])
else:
det_count = 0
# Executes confluera-fetch-progressions command/script
progressions_data = demisto.executeCommand('confluera-fetch-progressions', {'hours': '72'})
if progressions_data[0] and progressions_data[0]['Contents'] and progressions_data[0]['Contents']['Progressions URL']:
prog_url = progressions_data[0]['Contents']['Progressions URL']
else:
prog_url = ''
if progressions_data[1] and progressions_data[1]['Contents']:
prog_count = len(progressions_data[1]['Contents'])
else:
prog_count = 0
data = [
{
'Count': 'Detections: ' + str(det_count),
'URL': det_url
},
{
'Count': 'Progressions:' + str(prog_count),
'URL': prog_url
}
]
return_results({
'total': 2,
'data': data
})
| 28.021739 | 118 | 0.692009 |
ad1b22685fb173bc89ee5114a91f4ea875e57d50
| 280 |
py
|
Python
|
new-docs/examples/python/getting-started/application/expenses-flask/tests/conftest.py
|
saschajullmann/oso
|
85d07c6a1825acba5ec043c917bff6e0f5c7128f
|
[
"Apache-2.0"
] | null | null | null |
new-docs/examples/python/getting-started/application/expenses-flask/tests/conftest.py
|
saschajullmann/oso
|
85d07c6a1825acba5ec043c917bff6e0f5c7128f
|
[
"Apache-2.0"
] | 2 |
2021-03-24T19:24:40.000Z
|
2021-03-24T19:54:46.000Z
|
new-docs/examples/python/getting-started/application/expenses-flask/tests/conftest.py
|
saschajullmann/oso
|
85d07c6a1825acba5ec043c917bff6e0f5c7128f
|
[
"Apache-2.0"
] | 1 |
2021-03-24T19:51:45.000Z
|
2021-03-24T19:51:45.000Z
|
import pytest
from app import create_app
@pytest.fixture(scope="module")
def test_client():
flask_app = create_app()
flask_app.testing = True
test_client = flask_app.test_client()
ctx = flask_app.app_context()
ctx.push()
yield test_client
ctx.pop()
| 18.666667 | 41 | 0.696429 |
7e7e5a5b174edec8a01206e1a4ec02ea683ae133
| 30 |
py
|
Python
|
Scripts/kim.py
|
kenanchristian/hacktoberfest
|
b55750bf4facb77abd532b66ed37101e2895c4d7
|
[
"MIT"
] | 8 |
2020-10-26T06:51:06.000Z
|
2021-04-02T13:01:27.000Z
|
Scripts/kim.py
|
kenanchristian/hacktoberfest
|
b55750bf4facb77abd532b66ed37101e2895c4d7
|
[
"MIT"
] | 71 |
2020-10-25T22:46:02.000Z
|
2021-10-14T06:47:39.000Z
|
Scripts/kim.py
|
kenanchristian/hacktoberfest
|
b55750bf4facb77abd532b66ed37101e2895c4d7
|
[
"MIT"
] | 77 |
2020-10-24T01:53:46.000Z
|
2021-10-01T06:25:27.000Z
|
##
print("Hello Kimbo!")
| 3.75 | 21 | 0.5 |
7d16e094d7bca22f2cd4702b2c9116984cb7e4dd
| 3,318 |
py
|
Python
|
Packs/ContentManagement/Scripts/CustomPackInstaller/CustomPackInstaller.py
|
PAM360/content
|
928aac9c586c6e593b2a452c402a37cb5df28dac
|
[
"MIT"
] | 2 |
2021-12-06T21:38:24.000Z
|
2022-01-13T08:23:36.000Z
|
Packs/ContentManagement/Scripts/CustomPackInstaller/CustomPackInstaller.py
|
PAM360/content
|
928aac9c586c6e593b2a452c402a37cb5df28dac
|
[
"MIT"
] | 87 |
2022-02-23T12:10:53.000Z
|
2022-03-31T11:29:05.000Z
|
Packs/ContentManagement/Scripts/CustomPackInstaller/CustomPackInstaller.py
|
PAM360/content
|
928aac9c586c6e593b2a452c402a37cb5df28dac
|
[
"MIT"
] | 2 |
2022-01-05T15:27:01.000Z
|
2022-02-01T19:27:43.000Z
|
from typing import Tuple
from urllib import parse
import demistomock as demisto
from CommonServerPython import *
SCRIPT_NAME = 'CustomPackInstaller'
def build_url_parameters(skip_verify: bool, skip_validation: bool) -> str:
is_server_ge_to_6_5 = is_demisto_version_ge('6.5.0')
is_server_ge_to_6_6 = is_demisto_version_ge('6.6.0')
uri = '/contentpacks/installed/upload'
params = {}
if skip_verify == 'true' and is_server_ge_to_6_5:
params['skipVerify'] = 'true'
if skip_validation == 'true' and is_server_ge_to_6_6:
params['skipValidation'] = 'true'
params = parse.urlencode(params)
return f'{uri}?{params}' if params else uri
def install_custom_pack(pack_id: str, skip_verify: bool, skip_validation: bool) -> Tuple[bool, str]:
"""Installs a custom pack in the machine.
Args:
pack_id (str): The ID of the pack to install.
skip_verify (bool): If true will skip pack signature validation.
skip_validation (bool) if true will skip all pack validations.
Returns:
- bool. Whether the installation of the pack was successful or not.
- str. In case of failure, the error message.
Notes:
Assumptions: The zipped file is in the war-room, and the context includes the data related to it.
"""
pack_file_entry_id = ''
instance_context = demisto.context()
context_files = instance_context.get('File', [])
if not isinstance(context_files, list):
context_files = [context_files]
for file_in_context in context_files:
if file_in_context['Name'] == f'{pack_id}.zip':
pack_file_entry_id = file_in_context['EntryID']
break
uri = build_url_parameters(skip_verify=skip_verify, skip_validation=skip_validation)
if pack_file_entry_id:
status, res = execute_command(
'demisto-api-multipart',
{'uri': uri, 'entryID': pack_file_entry_id},
fail_on_error=False,
)
if not status:
error_message = f'{SCRIPT_NAME} - {res}'
demisto.debug(error_message)
return False, f'Issue occurred while installing the pack on the machine.\n{res}'
else:
error_message = 'Could not find file entry ID.'
demisto.debug(f'{SCRIPT_NAME}, "{pack_id}" - {error_message}.')
return False, error_message
return True, ''
def main():
args = demisto.args()
pack_id = args.get('pack_id')
skip_verify = args.get('skip_verify')
skip_validation = args.get('skip_validation')
try:
installation_status, error_message = install_custom_pack(pack_id, skip_verify, skip_validation)
return_results(
CommandResults(
outputs_prefix='ConfigurationSetup.CustomPacks',
outputs_key_field='packid',
outputs={
'packid': pack_id,
'installationstatus': 'Success.' if installation_status else error_message,
},
)
)
if not installation_status:
return_error(error_message)
except Exception as e:
return_error(f'{SCRIPT_NAME} - Error occurred while installing custom pack "{pack_id}".\n{e}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 31.6 | 105 | 0.648885 |
cbce52c33c14f05b9ae6d491cdb2df97a7bebb53
| 518 |
py
|
Python
|
ffeKiosk/urls.py
|
AndiBr/ffksk
|
ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99
|
[
"MIT"
] | null | null | null |
ffeKiosk/urls.py
|
AndiBr/ffksk
|
ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99
|
[
"MIT"
] | 14 |
2018-09-12T06:59:55.000Z
|
2020-02-26T07:17:48.000Z
|
ffeKiosk/urls.py
|
AndiBr/ffksk
|
ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url, include
from django.views.generic import RedirectView
from django.contrib import admin
from django.contrib.auth import views
urlpatterns = [
url(r'^favicon\.ico$', RedirectView.as_view(url='/static/img/favicon.ico')),
url(r'^admin/', admin.site.urls),
url(r'', include('kiosk.urls')),
url(r'', include('profil.urls')),
url(r'^accounts/login/$', views.login, name='login'),
url(r'^accounts/logout/$', views.logout, name='logout', kwargs={'next_page': '/'}),
]
| 37 | 87 | 0.681467 |
1dbc6f6a09f562bcb58345fb6cae76b92cf531eb
| 2,648 |
py
|
Python
|
test.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | 1 |
2017-01-08T13:21:43.000Z
|
2017-01-08T13:21:43.000Z
|
test.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | 17 |
2018-12-03T14:22:14.000Z
|
2021-07-14T15:15:12.000Z
|
test.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | 2 |
2018-12-03T14:58:49.000Z
|
2019-12-01T13:24:42.000Z
|
from pprint import pprint
import django
from fractions import Fraction as frac
if __name__ == "__main__":
django.setup()
from apps.multivers.tools import Multivers
products = [
(12, frac(123348, 100), 2),
(4, frac(29000, 100), 2),
(5, frac(36470, 100), 2),
(7, frac(6496, 100), 1),
(1, frac(899, 100), 1),
(1, frac(928, 100), 1),
(1, frac(899, 100), 1),
(1, frac(1199, 100), 1),
(2, frac(3994, 100), 1),
(1, frac(1831, 100), 1),
(1, frac(1445, 100), 1),
(4, frac(724, 100), 2),
(6, frac(2370, 100), 2),
(1, -frac(30, 100), 2),
]
btw_tot = [frac(0), frac(0)]
btw_per = [frac(6, 100), frac(21, 100)]
for qnt, amount, cat in products:
amount /= qnt
btw_tot[cat-1] += qnt * round(amount * btw_per[cat-1], 2)
print(btw_tot)
# multivers = Multivers(None)
# response = multivers._post("MVL48759/SupplierInvoice", data={
# "canChange": True,
# "fiscalYear": 2018,
# "invoiceDate": "01-01-2018",
# "invoiceId": "18100063",
# "journalId": "IC",
# "journalSection": "1",
# "journalTransaction": 25,
# "paymentConditionId": "14",
# "paymentReference": "0123456789012345",
# "periodNumber": 1,
# "processedBy": "Pieter Bos",
# "processedById": "38",
# "reference": "example description",
# "supplierId": "2008008",
# "supplierInvoiceLines": [{
# "accountId": "0",
# "canChange": True,
# "creditAmount": 0.0,
# "creditAmountCur": 0.0,
# "debitAmount": 7.24,
# "debitAmountCur": 7.24,
# "description": "Schoonmaakmiddelen",
# "journalSection": 0,
# "transactionDate": "01-01-2018",
# "vatCodeId": 2,
# "vatType": 0
# }],
# "vatOnInvoice": True,
# "vatScenarioId": 1,
# "vatTransactionLines": [{
# "amountTurnoverCur": 176.91,
# "canChange": True,
# "currencyId": "",
# "fiscalYear": 2018,
# "vatAmountCur": 10.61,
# "vatCodeId": 1,
# "vatScenarioId": 1,
# "vatType": 0
# }, {
# "amountTurnoverCur": 1918.82,
# "canChange": True,
# "currencyId": "",
# "fiscalYear": 2018,
# "vatAmountCur": 402.96,
# "vatCodeId": 2,
# "vatScenarioId": 1,
# "vatType": 0
# }]
# })
#
# pprint(response)
| 29.422222 | 67 | 0.4679 |
3865cea1b31a09ca887ee377b4aa61e5c6c64c08
| 1,877 |
py
|
Python
|
test/test_npu/test_softmax.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_softmax.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_softmax.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestSoftmax(TestCase):
def cpu_op_exec(self, input, dim):
m = torch.nn.Softmax(dim)
output = m(input)
output = output.numpy()
return output
def npu_op_exec(self, input, dim):
m = torch.nn.Softmax(dim)
output = m(input).to("cpu")
output = output.numpy()
return output
def test_softmax_shape_format_fp32(self, device):
shape_format = [
[[np.float32, 0, (1, 12, 5, 8)], 0],
[[np.float32, 0, (2, 31, 53)], 0],
[[np.float32, 0, (5, 20)], 0],
[[np.float32, 0, (1)], 0]
]
for item in shape_format:
cpu_input, npu_input = create_common_tensor(item[0], -2, 2)
dim = item[1]
cpu_output = self.cpu_op_exec(cpu_input, item[1])
npu_output = self.npu_op_exec(npu_input, item[1])
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestSoftmax, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 36.803922 | 74 | 0.649973 |
2a67cec3ec3f9d6b13f24ec35a4d8ca69882112b
| 1,065 |
py
|
Python
|
lang-python/sandbox/build.py
|
xd23fe39/technical-notes
|
bb6348705a95db24d07b1081b1aa0265dda131ce
|
[
"MIT"
] | null | null | null |
lang-python/sandbox/build.py
|
xd23fe39/technical-notes
|
bb6348705a95db24d07b1081b1aa0265dda131ce
|
[
"MIT"
] | null | null | null |
lang-python/sandbox/build.py
|
xd23fe39/technical-notes
|
bb6348705a95db24d07b1081b1aa0265dda131ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
## -*- coding: utf-8 -*-
# Import basic modules
import os
import sys
# Initialization
I_WORKING_DIR = os.getcwd()
I_GETENV_USER = os.getenv("USER")
I_SYS_SCRIPTNAME = sys.argv[0]
G_APPLICATION_NAME = "Generic"
G_APPLICATION_VERSION = "1.0.1"
G_APPLICATION_RELEASE = "19.12.130"
##########################################################################
# Basic Operations
def code():
pass
def build():
pass
def deploy():
pass
##########################################################################
# MAIN Procedure
if __name__ == "__main__":
pass
print("{:s}, {:s} ({:s})\n----".format(G_APPLICATION_NAME, G_APPLICATION_VERSION, G_APPLICATION_RELEASE))
print("Current working directory: {:s}".format(I_WORKING_DIR))
print("Current user name: {:s}".format(I_GETENV_USER))
try:
oper = "" + sys.argv[1] + "()"
print("Operation: {}".format(oper))
eval(oper)
exit(0)
except IndexError:
pass
except NameError:
print("Operation nicht gefunden.".format(sys.argv[1]))
finally:
print("Completed.")
| 20.882353 | 105 | 0.578404 |
2ab3b47b940c269bc9dda2b2719f193b140e2cb1
| 5,508 |
py
|
Python
|
test/test_npu/test_network_ops/test_index_put.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_index_put.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_index_put.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestIndexPut(TestCase):
def cpu_op_exec(self, input, indices, value):
output = input.index_put(indices, value)
output = output.numpy()
return output
def npu_op_exec(self, input, indices, value):
output = input.index_put(indices, value)
output = output.to("cpu")
output = output.numpy()
return output
def cpu_op_inp_exec(self, input, indices, value):
input.index_put_(indices, value)
output = input.numpy()
return output
def npu_op_inp_exec(self, input, indices, value):
input.index_put_(indices, value)
input = input.to("cpu")
output = input.numpy()
return output
def case_exec(self, shape):
cpu_indices = []
npu_indices = []
for item in shape:
cpu_input, npu_input = create_common_tensor(item[0], 1, 100)
for i in range(1, 3):
cpu_indices1, npu_indices1 = create_common_tensor(
item[1], 1, 5)
cpu_indices.append(cpu_indices1)
npu_indices.append(npu_indices1)
cpu_value, npu_value = create_common_tensor(item[2], 1, 100)
cpu_output = self.cpu_op_exec(cpu_input, cpu_indices, cpu_value)
npu_output = self.npu_op_exec(npu_input, npu_indices, npu_value)
self.assertEqual(cpu_output, npu_output)
def case_exec_fp16(self, shape):
cpu_indices = []
npu_indices = []
for item in shape:
cpu_input, npu_input = create_common_tensor(item[0], 1, 100)
cpu_input = cpu_input.to(torch.float32)
for i in range(1, 3):
cpu_indices1, npu_indices1 = create_common_tensor(
item[1], 1, 5)
cpu_indices.append(cpu_indices1)
npu_indices.append(npu_indices1)
cpu_value, npu_value = create_common_tensor(item[2], 1, 100)
cpu_value = cpu_value.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input, cpu_indices, cpu_value)
npu_output = self.npu_op_exec(npu_input, npu_indices, npu_value)
cpu_output = cpu_output.astype(np.float16)
self.assertEqual(cpu_output, npu_output)
def case_inp_exec(self, shape):
cpu_indices = []
npu_indices = []
for item in shape:
cpu_input, npu_input = create_common_tensor(item[0], 1, 100)
for i in range(1, 3):
cpu_indices1, npu_indices1 = create_common_tensor(
item[1], 1, 5)
cpu_indices.append(cpu_indices1)
npu_indices.append(npu_indices1)
cpu_value, npu_value = create_common_tensor(item[2], 1, 100)
cpu_output = self.cpu_op_inp_exec(
cpu_input, cpu_indices, cpu_value)
npu_output = self.npu_op_inp_exec(
npu_input, npu_indices, npu_value)
self.assertEqual(cpu_output, npu_output)
def case_inp_exec_fp16(self, shape):
cpu_indices = []
npu_indices = []
for item in shape:
cpu_input, npu_input = create_common_tensor(item[0], 1, 100)
cpu_input = cpu_input.to(torch.float32)
for i in range(1, 3):
cpu_indices1, npu_indices1 = create_common_tensor(
item[1], 1, 5)
cpu_indices.append(cpu_indices1)
npu_indices.append(npu_indices1)
cpu_value, npu_value = create_common_tensor(item[2], 1, 100)
cpu_value = cpu_value.to(torch.float32)
cpu_output = self.cpu_op_inp_exec(
cpu_input, cpu_indices, cpu_value)
npu_output = self.npu_op_inp_exec(
npu_input, npu_indices, npu_value)
cpu_output = cpu_output.astype(np.float16)
self.assertEqual(cpu_output, npu_output)
def test_index_put_shape_format_fp32(self, device):
format_list = [0]
shape_list = [(5, 6)]
shape_format = [[[np.float32, i, j], [np.int64, 0, [1, 2]], [
np.float32, 0, [1, 2]]] for i in format_list for j in shape_list]
self.case_exec(shape_format)
self.case_inp_exec(shape_format)
def test_index_put_shape_format_fp16(self, device):
format_list = [0]
shape_list = [(5, 6)]
shape_format = [[[np.float16, i, j], [np.int64, 0, [1, 2]], [
np.float16, 0, [1, 2]]] for i in format_list for j in shape_list]
self.case_exec_fp16(shape_format)
self.case_inp_exec_fp16(shape_format)
instantiate_device_type_tests(TestIndexPut, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 40.8 | 77 | 0.628722 |
932b92dd4689005b070ebdb3822b43d75ae061e9
| 6,410 |
py
|
Python
|
HeuristicSummary.py
|
sdimitro/zfas
|
4116072225f71415105b70e12aa461fc7466617a
|
[
"MIT"
] | 1 |
2019-12-21T02:32:07.000Z
|
2019-12-21T02:32:07.000Z
|
HeuristicSummary.py
|
sdimitro/zfas
|
4116072225f71415105b70e12aa461fc7466617a
|
[
"MIT"
] | null | null | null |
HeuristicSummary.py
|
sdimitro/zfas
|
4116072225f71415105b70e12aa461fc7466617a
|
[
"MIT"
] | null | null | null |
from Model import Pool, MetaslabGroup, LogSpaceMapGroup
import math
q = 100
class SummaryEntry(object):
#
# s - startTXG
# e - endTXG
# m - metaslabs needed to flush
# b - blocks that would be freed
#
def __init__(self, s, m, b):
self.s, self.e = s, s
self.m, self.b = m, b
def isObsolete(self):
if self.m == 0:
assert self.b == 0, "should be 0 but it is %r" % self.b
return True
assert self.m > 0
assert self.b > 0
return False
def isFull(self):
global q
if self.b < q:
return False
return True
def addToEntry(self, t, m, b):
assert (self.e + 1) == t
self.e += 1
self.m += m
self.b += b
def removeFromEntry(self, m, b):
self.m -= m
self.b -= b
def toS(self):
return "[{} - {}] M - {} B - {}".format(self.s, self.e, self.m, self.b)
class Summary(object):
def __init__(self, pool):
self.p = pool
self.tab = []
def addData(self, txg, mflushed, nblocks):
if len(self.tab) == 0 or self.tab[-1].isFull():
self.tab.append(SummaryEntry(txg, mflushed, nblocks))
else:
#
# The thing to note here is that the entry can
# be full after wards (i.e. not a hard limit).
#
self.tab[-1].addToEntry(txg, mflushed, nblocks)
def trimData(self, mflushed, blkgone):
assert blkgone >= 0
n = 0
for entry in self.tab:
if entry.m >= mflushed:
assert entry.b >= blkgone
entry.removeFromEntry(mflushed, blkgone)
if entry.isObsolete():
n += 1
break
else:
assert entry.b <= blkgone, "%r %r" % (entry.b, blkgone)
mflushed -= entry.m
blkgone -= entry.b
entry.removeFromEntry(entry.m, entry.b)
n += 1
for i in range(n):
assert self.tab[i].isObsolete(), "about to delete entry %r which is not obsolete" % i
self.tab = self.tab[n:]
def adviceFlushing(self, incoming, blimit):
print "=== DBG - PRINT FLUSH TAB - START ==="
maxflush = 1
futureTXGs = 1
budget = blimit - self.p.logs.nblocks - incoming
print "budget = {} , blks = {}, incom = {}".format(budget, self.p.logs.nblocks, incoming)
deficit = 0
if budget > 0:
futureTXGs += int(math.ceil(budget / incoming))
deficit = budget - (futureTXGs * incoming)
else:
deficit = budget
print "deficit = {} futureTXG = {}".format(deficit, futureTXGs)
assert deficit <= 0
accms = 0
accblks = deficit
for e in self.tab:
accms += e.m
accblks += e.b
print "b = {} m = {} - bsum = {} msum = {}".format(e.b, e.m, accms, accblks)
if accblks < 0:
continue
toflush = int(math.ceil(accms / futureTXGs))
print "cmax = {} c = {}".format(maxflush, toflush)
maxflush = max(maxflush, toflush)
futureTXGs += int(math.floor(accblks / incoming))
print "futureTXGs = {}".format(futureTXGs)
print "=== DBG - PRINT FLUSH TAB - END ==="
return maxflush
def printSummary(self):
print "=== DBG - PRINT SUMMARY - START ==="
for e in self.tab:
print e.toS()
print "=== DBG - PRINT SUMMARY - END ==="
def crossVerifySummary(self):
row, msum, bsum = 0, 0, 0
for log in self.p.logs.sms:
e = self.tab[row]
assert e.s <= log.txg and log.txg <= e.e
msum += len(log.metaslabs_flushed)
bsum += log.blocks
assert e.m <= msum and e.b <= bsum
if e.e == log.txg:
row += 1
assert e.m == msum and e.b == bsum
msum, bsum = 0, 0
class HeuristicSummary(object):
def __init__(self, nmetaslabs, blimit):
self.nmetaslabs = nmetaslabs
self.pool = Pool(nmetaslabs)
self.y_flushed, self.y_blocks, self.y_logs = [], [], []
self.summary = Summary(self.pool)
self.blimit = blimit
#XXX: Hack
global q
q = int(blimit / 10)
def printLogs(self):
print "=== DBG - PRINT LOGS - START ==="
for log in self.pool.logs.sms:
print "[{}] - M {} - B {}".format(log.txg, len(log.metaslabs_flushed), log.blocks)
print "=== DBG - PRINT LOGS - END ==="
def condition_satisfied(self):
if self.pool.mss.nmetaslabs() >= self.pool.logs.nblocks:
return True
return False
def addGraphEntry(self, nflushed):
self.y_flushed.append(nflushed)
self.y_blocks.append(self.pool.logs.nblocks)
self.y_logs.append(self.pool.logs.nlogs())
def initializeState(self, incoming):
all_metaslabs = self.pool.mss.ms_ordered_by_flushed
self.pool.sync_new_changes(incoming, all_metaslabs)
self.summary.addData(self.pool.syncing_txg, len(all_metaslabs), incoming)
self.summary.trimData(0, self.pool.logs.nblocks - incoming)
print "DBG - TXG: {} - Flushed: {} ".format(self.pool.syncing_txg, len(all_metaslabs))
self.summary.printSummary()
self.pool.sync_done()
self.addGraphEntry(len(all_metaslabs))
def sync_cycle(self, incoming_blocks):
if self.pool.syncing_txg == 0:
self.initializeState(incoming_blocks)
return
nflushed = self.summary.adviceFlushing(incoming_blocks, self.blimit)
self.pool.flush_n_metaslabs(nflushed)
ms_flushed_this_txg = self.pool.mss.ms_ordered_by_flushed[-nflushed:]
self.pool.sync_new_changes(incoming_blocks, ms_flushed_this_txg)
self.summary.addData(self.pool.syncing_txg, nflushed, incoming_blocks)
self.summary.trimData(nflushed, self.y_blocks[-1] -self.pool.logs.nblocks +incoming_blocks)
print "DBG - TXG: {} - Flushed: {} ".format(self.pool.syncing_txg, nflushed)
self.summary.printSummary()
self.pool.sync_done()
self.addGraphEntry(nflushed)
def simulate(self, y_incoming):
for incoming in y_incoming:
self.sync_cycle(incoming)
| 33.736842 | 99 | 0.551794 |
fae1b20c5c3fa4cde361c36faded37bf8e7e5df6
| 1,197 |
py
|
Python
|
03_Objektorientierung/properties_demo.py
|
Hananja/DQI19-Python
|
63749c49910b5be57d09bb98a5fe728c8fdd5280
|
[
"Unlicense"
] | null | null | null |
03_Objektorientierung/properties_demo.py
|
Hananja/DQI19-Python
|
63749c49910b5be57d09bb98a5fe728c8fdd5280
|
[
"Unlicense"
] | null | null | null |
03_Objektorientierung/properties_demo.py
|
Hananja/DQI19-Python
|
63749c49910b5be57d09bb98a5fe728c8fdd5280
|
[
"Unlicense"
] | null | null | null |
# Demonstration von Properties
class MyClassVanilla:
""" Klasse mit einfachem Attribut (ohne Propertie) """
def __init__(self):
self.value = 0
class MyClassProperties:
""" Klasse mit Propertie, das im Setter überprüft wird.
Entscheidendes Kriterium: Eine nachträgliche Abstraktion
durch Datenkapselung (Geheimnisprinzip) ist ohne
Veränderung der Schnittstelle möglich.
"""
def __init__(self):
self.value = 0
@property
def value(self):
return self.__value # stark privat durch __ am Anfang
# (vgl. https://www.python.org/dev/peps/pep-0008/#descriptive-naming-styles)
@value.setter
def value(self, value):
if value < 0:
raise ValueError("value parameter must not be <0")
else:
self.__value = value
# Vanilla ohne Properties
oop = MyClassVanilla()
print(oop.value)
oop.value = 1
print(oop.value)
oop.value = -1
print(oop.value)
# mit Properties und Wertekontrolle
try:
omp = MyClassProperties()
print(omp.value)
omp.value = 1
print(omp.value)
omp.value = -1 # Bang!
print(omp.value)
except ValueError as e:
print(f"Error: {e}")
| 23.470588 | 84 | 0.651629 |
87c52b3194e192d0797d5b91a3809cb3cd6eb95f
| 132 |
py
|
Python
|
app/models/__init__.py
|
zhiyong-lv/flask-login
|
d8bf0719bae19ba8f7f44ea6d6a8ca65ba22aa63
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
zhiyong-lv/flask-login
|
d8bf0719bae19ba8f7f44ea6d6a8ca65ba22aa63
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
zhiyong-lv/flask-login
|
d8bf0719bae19ba8f7f44ea6d6a8ca65ba22aa63
|
[
"MIT"
] | null | null | null |
from .users import User
from .documents import Document
from .files import File
from .tags import Tag
from .file_tags import FileTag
| 26.4 | 31 | 0.818182 |
eaa03773285dfb7d0eacf3849611533d31441481
| 2,418 |
py
|
Python
|
twitoff/twitter.py
|
Pdugovich/TwitOff
|
9c06677ed763cf5d14fec53a10024126792681c4
|
[
"MIT"
] | null | null | null |
twitoff/twitter.py
|
Pdugovich/TwitOff
|
9c06677ed763cf5d14fec53a10024126792681c4
|
[
"MIT"
] | 1 |
2021-06-02T00:47:17.000Z
|
2021-06-02T00:47:17.000Z
|
twitoff/twitter.py
|
Pdugovich/TwitOff
|
9c06677ed763cf5d14fec53a10024126792681c4
|
[
"MIT"
] | null | null | null |
"""Retreive tweets, embeddings, and persist in the database"""
import basilica
import tweepy
from decouple import config
from .models import DB, Tweet, User
TWITTER_AUTH = tweepy.OAuthHandler(config('TWITTER_CONSUMER_KEY'),
config('TWITTER_CONSUMER_SECRET'))
TWITTER_AUTH.set_access_token(config('TWITTER_ACCESS_TOKEN'),
config('TWITTER_ACCESS_TOKEN_SECRET'))
TWITTER = tweepy.API(TWITTER_AUTH)
BASILICA = basilica.Connection(config('BASILICA_KEY'))
# OLD TERRIBLE CODE
# def add_user_and_tweets(twitter_handle):
# twitter_user = TWITTER.get_user(twitter_handle)
# tweets=twitter_user.timeline(
# count=200,
# exclude_replies=True,
# include_rts=False,
# tweet_mode='extended'
# )
# db_user = User(
# id=twitter_user.id,
# name=twitter_user.screen_name,
# newest_tweet_id=tweets[0].id
# )
# for tweet in tweets:
# embedding = BASILICA.embed_sentence(tweet.full_text, model='twitter')
# db_tweet = Tweet(
# id=tweet.id,
# text=tweet.full_text[:500],
# embedding=embedding
# )
# db_user.tweets.append(db_tweet)
# DB.session.add(db_tweet)
# DB.session.commit()
def add_or_update_user(username):
"""Add or update a user and their tweets, or return error"""
try:
twitter_user = TWITTER.get_user(username)
db_user=(User.query.get(twitter_user.id) or
User(id=twitter_user.id, name=username))
DB.session.add(db_user)
tweets=twitter_user.timeline(count=200, exclude_replies=True,
include_rts=False,
tweet_mode='extended',
since_id=db_user.newest_tweet_id)
if tweets:
db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
# Calc embeedding on full tweet
embedding = BASILICA.embed_sentence(tweet.full_text, model='twitter')
db_tweet = Tweet(id=tweet.id, text=tweet.full_text[:500],
embedding=embedding)
db_user.tweets.append(db_tweet)
DB.session.add(db_tweet)
except Exception as e:
print('Error processing {}: {}'.format(username,e))
raise e
else:
DB.session.commit()
| 37.78125 | 81 | 0.602564 |
577102893949329ef02e4700e4f6d577f7dc0594
| 2,918 |
py
|
Python
|
main.py
|
Peetee06/compilerbau_ws2122_hsbochum
|
9b71e33f7385ba946ce7f4d375b7ca68b21dd97f
|
[
"MIT"
] | null | null | null |
main.py
|
Peetee06/compilerbau_ws2122_hsbochum
|
9b71e33f7385ba946ce7f4d375b7ca68b21dd97f
|
[
"MIT"
] | null | null | null |
main.py
|
Peetee06/compilerbau_ws2122_hsbochum
|
9b71e33f7385ba946ce7f4d375b7ca68b21dd97f
|
[
"MIT"
] | 2 |
2022-03-13T18:48:24.000Z
|
2022-03-20T16:45:19.000Z
|
###########
# simple GUI for a compiler subject "Compilerbau WS21/22"
# Firstly the code should be saved in a document or loaded before using run.
# the code should be a one-liner
# Func is a placeholder for the Backend.
# The interface is to implement under line 69 Func.run.
###########
from lexer_tokenizer import Tokenizer
###########
from tkinter import *
from tkinter.filedialog import asksaveasfilename, askopenfilename
###########
# creating a Tk object
###########
compiler = Tk()
compiler.title('Compiler WS21/22')
file_path = ''
###########
# setting a global path for the saved code
###########
def set_file_path(path):
global file_path
file_path = path
###########
# possibility to open an existing file
###########
def open_file():
path = askopenfilename(filetypes=[('All Files', '*')])
with open(path, 'r') as file:
code = file.read()
editor.delete('1.0', END)
editor.insert('1.0', code)
set_file_path(path)
###########
# save file before run
###########
def save_as():
if file_path == '':
path = asksaveasfilename(filetypes=[('All Files', '*')])
else:
path = file_path
with open(path, 'w') as file:
code = editor.get('1.0', 'end-1c') # alternative: use END instead 'end-1c', but beware of the new line caused by 'END'
file.write(code)
set_file_path(path)
###########
# run the compiler using Func as source (Lexer, Parser etc.)
###########
def run():
code_output.delete('1.0', END) # delete the output for the next run
if file_path == '': # if the file isn't saved, don't allow to execute run
save_prompt = Toplevel()
text = Label(save_prompt, text='Please save your code')
text.pack()
return
f = open(file_path, "r") # open the file and read the command
command = f.read()
# command = f'{file_path}'
result, error = Tokenizer.run(file_path, command) # Func.run, interface between Frontend and Backend
if error: # catch an error if occur
code_output.insert('1.0', error.as_string())
elif result:
code_output.insert('1.0', str(result))
###########
# Frontend of the TK-object
###########
menu_bar = Menu(compiler)
# categories in the menu bar
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label='Open', command=open_file)
file_menu.add_command(label='Save', command=save_as)
# file_menu.add_command(label='Save As', command=save_as)
file_menu.add_command(label='Exit', command=exit)
menu_bar.add_cascade(label='File', menu=file_menu)
run_bar = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label='Run', command=run)
compiler.config(menu=menu_bar)
# creating a Text()-object for input
editor = Text()
editor.pack()
# creating a Text()-object for output
code_output = Text(height=10)
code_output.pack()
compiler.mainloop()
| 28.607843 | 127 | 0.631254 |
57e2dbfd02cbf79ca0b5f697d8f799f329adff9e
| 238 |
py
|
Python
|
src/jmx.py
|
magistersart/ZTC_fork
|
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
|
[
"PostgreSQL"
] | null | null | null |
src/jmx.py
|
magistersart/ZTC_fork
|
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
|
[
"PostgreSQL"
] | null | null | null |
src/jmx.py
|
magistersart/ZTC_fork
|
ce72734ea575d9846b5b81f3efbfd14fa1f7e532
|
[
"PostgreSQL"
] | null | null | null |
#!/usr/bin/python
"""
jmx.* scripts item
Copyright (c) 2011 Vladimir Rusinov <[email protected]>
License: GNU GPL3
This file is part of ZTC
"""
from ztc.java.jmx import JMXCheck
j = JMXCheck()
m = j.args[0]
j.get(m, *j.args[1:])
| 15.866667 | 61 | 0.689076 |
5e0e5fca539da462da271668e65fa2c4eb9c11de
| 4,500 |
py
|
Python
|
gen_cs_proto_msg_ids.py
|
thanklzhang/Proto_Pre
|
e6d1998aa41b52f01b5244a59fbbbe700d6dd64c
|
[
"MIT"
] | null | null | null |
gen_cs_proto_msg_ids.py
|
thanklzhang/Proto_Pre
|
e6d1998aa41b52f01b5244a59fbbbe700d6dd64c
|
[
"MIT"
] | null | null | null |
gen_cs_proto_msg_ids.py
|
thanklzhang/Proto_Pre
|
e6d1998aa41b52f01b5244a59fbbbe700d6dd64c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#!usr/bin/python
# coding=gbk
import os
import string
import traceback
import openpyxl
import sys
import time
import re
import shutil
definestr = ""
baseDir = os.getcwd()
EnumName = "ProtoIDs"
def get_proto_file_list(dir, flist):
print("\n")
print("start get proto files ...")
print("\n")
for root, dirs, files in os.walk(dir, True):
for name in files:
nm = os.path.splitext(name)
if (nm[1] == ".proto"):
flist.append(name)
print("find file : " + name)
# tmpCmd=('(copy %s %s /y)' %(os.path.join(root, name),baseDir))
# print(tmpCmd)
# os.system(tmpCmd)
def WriteToFile(f, str):
file_part = open(f, 'wb')
file_part.write(str.encode('UTF-8'))
file_part.close()
# 找到 proto 所有的消息 合并成一个自定义消息枚举
def GenMsgFile(dir, inputFiles, outFile):
print("\n")
optionStr = ""
for currFile in inputFiles:
fc = open(dir + "\\" + currFile, 'rb')
print("currFile : " + currFile + "----------------------------------")
line = fc.readline()
content = []
while line:
content.append(line)
line = fc.readline()
optionStr += "\t//" + currFile.split('.')[0] + "\n"
lineNum = 0
start = -1
isFinish = False
msdIdIndex = 0
rightIndex = -1
for c in content:
realContent = c
if(not isFinish):
currContent = realContent.decode('utf-8')
# 找到 枚举名称
msdIdIndex = currContent.find(EnumName)
if(msdIdIndex > 0):
start = lineNum
if(start >= 0):
rightIndex = currContent.find("}")
if(rightIndex >= 0):
isFinish = True
else:
currStrs = currContent.split('=')
if(len(currStrs) > 1):
# 消息
leftValue = currStrs[0].strip()
# 过滤无用枚举值
if(leftValue != "First" and leftValue != "Begin" and leftValue != "End"):
rightValue = currStrs[1].strip()
# print(leftValue + " = " + rightValue)
rightValue = rightValue.replace(";", ",")
optionStr += "\t" + leftValue + " = " + rightValue + "\n"
else:
# 检查注释内容
describeStr = currContent.split('//')
if(len(describeStr) > 1):
optionStr += currContent
# print(currContent)
lineNum = lineNum + 1
# print("\n")
optionStr += "\n"
fc.close()
allStr = "//gen by tool\npublic enum " + \
EnumName + "\n{\n" + optionStr + "}\n"
WriteToFile(outFile, allStr)
return
def Copy(source_path,target_path):
print('start to copy files ... : ' + source_path + " -> " + target_path)
if not os.path.exists(target_path):
os.makedirs(target_path)
if os.path.exists(source_path):
# root 所指的是当前正在遍历的这个文件夹的本身的地址
# dirs 是一个 list,内容是该文件夹中所有的目录的名字(不包括子目录)
# files 同样是 list, 内容是该文件夹中所有的文件(不包括子目录)
for root, dirs, files in os.walk(source_path):
for file in files:
src_file = os.path.join(root, file)
shutil.copy(src_file, target_path)
print(src_file)
print('copy files finished!')
def main():
try:
inputPath = "proto"
outFile = "" + EnumName + ".cs"
# # 获取目录下所有 proto 文件
# fileProtoList = []
# get_proto_file_list(baseDir + "\\" + inputPath, fileProtoList)
#proto cmd enum 转换成 cs , 这样业务不依靠 protoBuf 而是依靠生成的自定义格式的 enum
fileProtoList = []
fileProtoList.append("Cmd" + ".proto")
# 根据 proto 文件生成消息枚举文件
GenMsgFile(inputPath, fileProtoList, outFile)
protoIdsPath = "..\\001_GameFramework_Client\\Assets\\Script\\Common"
shutil.copy("ProtoIDs.cs", protoIdsPath)
os.system("pause")
except:
print("!!!!!!catch exception!!!!!!")
traceback.print_exc()
os.system("pause")
# os.system("pause")
main()
# print("------------------")
| 30.201342 | 101 | 0.483333 |
d83ba320f6f7d9ff9f17f7d422feff1893fca3e5
| 6,795 |
py
|
Python
|
research/nlp/dscnn/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/nlp/dscnn/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/nlp/dscnn/train.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""DSCNN train."""
import os
import datetime
import numpy as np
from mindspore import context
from mindspore import Tensor, Model
from mindspore.context import ParallelMode
from mindspore.nn.optim import Momentum
from mindspore.common import dtype as mstype
from mindspore.train.serialization import load_checkpoint
from mindspore.communication.management import init
from src.log import get_logger
from src.dataset import audio_dataset
from src.ds_cnn import DSCNN
from src.loss import CrossEntropy
from src.lr_scheduler import MultiStepLR, CosineAnnealingLR
from src.callback import ProgressMonitor, callback_func
from src.model_utils.config import config
from src.model_utils.moxing_adapter import moxing_wrapper
from src.model_utils.device_adapter import get_device_id, get_rank_id, get_device_num
def get_top5_acc(top5_arg, gt_class):
sub_count = 0
for top5, gt in zip(top5_arg, gt_class):
if gt in top5:
sub_count += 1
return sub_count
def val(args, model, val_dataset):
'''Eval.'''
val_dataloader = val_dataset.create_tuple_iterator()
img_tot = 0
top1_correct = 0
top5_correct = 0
if args.amp_level == 'O0':
origin_mstype = mstype.float32
else:
origin_mstype = mstype.float16
model.predict_network.to_float(mstype.float32)
for data, gt_classes in val_dataloader:
output = model.predict(Tensor(data, mstype.float32))
output = output.asnumpy()
top1_output = np.argmax(output, (-1))
top5_output = np.argsort(output)[:, -5:]
gt_classes = gt_classes.asnumpy()
t1_correct = np.equal(top1_output, gt_classes).sum()
top1_correct += t1_correct
top5_correct += get_top5_acc(top5_output, gt_classes)
img_tot += output.shape[0]
model.predict_network.to_float(origin_mstype)
results = [[top1_correct], [top5_correct], [img_tot]]
results = np.array(results)
top1_correct = results[0, 0]
top5_correct = results[1, 0]
img_tot = results[2, 0]
acc1 = 100.0 * top1_correct / img_tot
acc5 = 100.0 * top5_correct / img_tot
if acc1 > args.best_acc:
args.best_acc = acc1
args.best_epoch = args.epoch_cnt - 1
args.logger.info('Eval: top1_cor:{}, top5_cor:{}, tot:{}, acc@1={:.2f}%, acc@5={:.2f}%' \
.format(top1_correct, top5_correct, img_tot, acc1, acc5))
def trainval(args, model, train_dataset, val_dataset, cb, rank):
callbacks = callback_func(args, cb, 'epoch{}'.format(args.epoch_cnt))
model.train(args.val_interval, train_dataset, callbacks=callbacks, dataset_sink_mode=args.dataset_sink_mode)
if rank == 0:
val(args, model, val_dataset)
def modelarts_pre_process():
pass
@moxing_wrapper(pre_process=modelarts_pre_process)
def train():
'''Train.'''
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
config.rank_save_ckpt_flag = 1
# init distributed
if config.is_distributed:
if get_device_id():
context.set_context(device_id=get_device_id())
init()
rank = get_rank_id()
device_num = get_device_num()
parallel_mode = ParallelMode.DATA_PARALLEL
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=device_num, gradients_mean=True)
else:
rank = 0
device_num = 1
context.set_context(device_id=get_device_id())
# Logger
config.outputs_dir = os.path.join(config.save_ckpt_path, datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
config.logger = get_logger(config.outputs_dir)
# Dataloader: train, val
train_dataset = audio_dataset(config.train_feat_dir, 'training', config.model_setting_spectrogram_length,
config.model_setting_dct_coefficient_count, config.per_batch_size, device_num, rank)
config.steps_per_epoch = train_dataset.get_dataset_size()
val_dataset = audio_dataset(config.train_feat_dir, 'validation', config.model_setting_spectrogram_length,
config.model_setting_dct_coefficient_count, config.per_batch_size)
# show args
config.logger.save_args(config)
# Network
config.logger.important_info('start create network')
network = DSCNN(config, config.model_size_info)
# Load pretrain model
if os.path.isfile(config.pretrained):
load_checkpoint(config.pretrained, network)
config.logger.info('load model %s success', config.pretrained)
# Loss
criterion = CrossEntropy(num_classes=config.model_setting_label_count)
# LR scheduler
if config.lr_scheduler == 'multistep':
lr_scheduler = MultiStepLR(config.lr, config.lr_epochs, config.lr_gamma, config.steps_per_epoch,
config.max_epoch, warmup_epochs=config.warmup_epochs)
elif config.lr_scheduler == 'cosine_annealing':
lr_scheduler = CosineAnnealingLR(config.lr, config.T_max, config.steps_per_epoch, config.max_epoch,
warmup_epochs=config.warmup_epochs, eta_min=config.eta_min)
else:
raise NotImplementedError(config.lr_scheduler)
lr_schedule = lr_scheduler.get_lr()
# Optimizer
opt = Momentum(params=network.trainable_params(),
learning_rate=Tensor(lr_schedule),
momentum=config.momentum,
weight_decay=config.weight_decay)
model = Model(network, loss_fn=criterion, optimizer=opt, amp_level=config.amp_level, keep_batchnorm_fp32=False)
# Training
config.epoch_cnt = 0
config.best_epoch = 0
config.best_acc = 0
progress_cb = ProgressMonitor(config)
while config.epoch_cnt + config.val_interval < config.max_epoch:
trainval(config, model, train_dataset, val_dataset, progress_cb, rank)
rest_ep = config.max_epoch - config.epoch_cnt
if rest_ep > 0:
trainval(config, model, train_dataset, val_dataset, progress_cb, rank)
config.logger.info('Best epoch:{} acc:{:.2f}%'.format(config.best_epoch, config.best_acc))
if __name__ == "__main__":
train()
| 38.607955 | 120 | 0.700515 |
dc5070d0865bbc9f48d127fb5d572caf248b1ae1
| 4,260 |
py
|
Python
|
andinopy/base_devices/andino_io_oled.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
andinopy/base_devices/andino_io_oled.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
andinopy/base_devices/andino_io_oled.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
# _ _ _
# / \ _ __ __| (_)_ __ ___ _ __ _ _
# / _ \ | '_ \ / _` | | '_ \ / _ \| '_ \| | | |
# / ___ \| | | | (_| | | | | | (_) | |_) | |_| |
# /_/ \_\_| |_|\__,_|_|_| |_|\___/| .__/ \__, |
# |_| |___/
# by Jakob Groß
import sys
from typing import Dict, Tuple, List
from andinopy import base_config, save_base_config
if sys.platform == "linux":
import busio
import adafruit_ssd1306
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
class andino_io_oled:
def __init__(self):
"""
Initializes a new OLED instance
The Images will be shown on windows but no i2c devices will be opened
"""
self.WIDTH = 128
self.HEIGHT = 64
self._rot = None
if sys.platform == "linux":
self.i2c = busio.I2C(3, 2)
self.display = adafruit_ssd1306.SSD1306_I2C(self.WIDTH, self.HEIGHT, self.i2c)
self.display.fill(0)
self.display.show()
self.padding = -2
self.config: (str, str) = ("21", None)
self.text = [['AndinoPy', 'running']]
if sys.platform == "linux":
self.font_path = r"/usr/share/fonts/truetype/FIRACODE.TTF"
else:
self.font_path = r"C:\Windows\Fonts\Consolas\consola.ttf"
self.fonts = {"default": ImageFont.load_default()}
for i in [60, 30, 40, 21, 20, 16, 14, 8]:
self.fonts[f"font{i}"] = ImageFont.truetype(self.font_path, i)
self.modes: Dict[str, Tuple[List[int], str]] = { # top diffs,font
"10": ([0], "font60"), # 1 Line, 3 Chars mode
"11": ([20], "font30"), # 1 Line, 4 Chars mode
"20": ([0, 35], "font30"), # 2 Line, 6 Chars
"21": ([5, 40], "font21"), # 2 Line, 9 Chars
"30": ([2, 24, 46], "font20"), # 3 Line, 9 Chars
"31": ([2, 27, 52], "font16"), # 3 Line, 12 Chars
"40": ([1, 17, 34, 51], "font14"), # 4 Line, 14 Chars
"60": ([3, 13, 23, 33, 43, 53], "font8") # 6 Lines
}
# region send_counter
@property
def rotate(self) -> bool:
if self._rot is None:
self._rot = int(base_config["oled"]["rotate"])
return self._rot
@rotate.setter
def rotate(self, value: bool):
self._rot = value
base_config["oled"]["rotate"] = str(value)
save_base_config()
# endregion
def set_mode(self, col1: str, col2: str = None):
"""
When using two columns beware only half the chars are avaiable
"10": 1 Line, 3 Chars mode
"11": 1 Line, 4 Chars mode
"20": 2 Line, 6 Chars
"21": 2 Line, 9 Chars
"30": 3 Line, 9 Chars
"31": 3 Line, 12 Chars
"40": 4 Line, 14 Chars
"60": 6 Line
:param col1: see above
:param col2: see above
"""
self.config = (col1, col2)
def set_text(self, text: [[str]]):
"""
Set TExt on the display
:param text: [["col1 row1","col1 row2"],["col2 row2"...]]
"""
self.text = text
self.display_text()
def display_text(self):
"""
Displays the text set in self.text
"""
if sys.platform == "linux":
self.display.fill(0)
self.display.show()
my_image = Image.new('1', (self.WIDTH, self.HEIGHT))
draw = ImageDraw.Draw(my_image)
for j in range(len(self.config)):
if self.config[j] is not None:
offset = 0.5*self.WIDTH*j
row, font = self.modes[self.config[j]]
font = self.fonts[font]
for i in range(len(row)):
text = text = self.text[j][i]
draw.text(xy=(offset, self.padding + row[i]), font=font, text=text, fill=255)
# print(my_image.tobytes())
if self.rotate == 1:
my_image = my_image.rotate(180)
if sys.platform == "linux":
self.display.image(my_image)
self.display.show()
else:
my_image.show()
if __name__ == "__main__":
display = andino_io_oled()
display.display_text()
| 33.28125 | 97 | 0.504695 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.