content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import pytest
from geonotebook.kernel import Remote
@pytest.fixture
def remote(mocker):
protocols = [{'procedure': 'no_args',
'required': [],
'optional': []},
{'procedure': 'required_only',
'required': [{"key": "a"}, {"key": "b"}],
'optional': []},
{'procedure': 'optional_only',
'required': [],
'optional': [{"key": "x"}, {"key": "y"}, {"key": "z"}]},
{'procedure': 'required_and_optional',
'required': [{"key": "a"}, {"key": "b"}],
'optional': [{"key": "x"}, {"key": "y"}, {"key": "z"}]}]
r = Remote(None, protocols)
# Mock out the UUID.uuid4 function to return a consistent ID for testing
mocker.patch('geonotebook.jsonrpc.uuid.uuid4', return_value='TEST-ID')
mocker.patch.object(r, '_send_msg')
return r
def test_remote_bad_protocol():
with pytest.raises(AssertionError):
Remote(None, ['foo', 'bar'])
def test_remote_bad_protocol_missing_procedure():
with pytest.raises(AssertionError):
Remote(None, [{'required': [],
'optional': []}])
def test_remote_bad_protocol_missing_required():
with pytest.raises(AssertionError):
Remote(None, [{'procedure': 'no_args',
'optional': []}])
def test_remote_bad_protocol_missing_optional():
with pytest.raises(AssertionError):
Remote(None, [{'procedure': 'no_args',
'required': []}])
def test_remote_init(remote):
assert hasattr(remote.no_args, '__call__')
assert hasattr(remote.required_only, '__call__')
assert hasattr(remote.required_and_optional, '__call__')
def test_remote_call_no_args(remote):
remote.no_args()
assert remote._send_msg.call_count == 1
remote._send_msg.assert_called_with({'jsonrpc': '2.0', 'params': [],
'method': 'no_args', 'id': 'TEST-ID'})
def test_remote_call_no_args_with_args(remote):
with pytest.raises(AssertionError):
remote.no_args('foo', 'bar')
def test_remote_call_required_only(remote):
remote.required_only('foo', 'bar')
assert remote._send_msg.call_count == 1
remote._send_msg.assert_called_with({
'jsonrpc': '2.0', 'params': [{'key': 'a',
'value': 'foo',
'required': True},
{'key': 'b',
'value': 'bar',
'required': True}],
'method': 'required_only', 'id': 'TEST-ID'})
def test_remote_call_required_only_with_too_few_args(remote):
with pytest.raises(AssertionError):
remote.required_only('foo')
def test_remote_call_required_only_with_too_many_args(remote):
with pytest.raises(AssertionError):
remote.no_args('foo', 'bar', 'baz')
def test_remote_call_optional_only(remote):
remote.optional_only(x='foo', y='bar', z='baz')
assert remote._send_msg.call_count == 1
remote._send_msg.assert_called_with({
'jsonrpc': '2.0',
'params': [{'key': 'x', 'value': 'foo', 'required': False},
{'key': 'y', 'value': 'bar', 'required': False},
{'key': 'z', 'value': 'baz', 'required': False}],
'method': 'optional_only', 'id': 'TEST-ID'})
remote.optional_only()
assert remote._send_msg.call_count == 2
remote._send_msg.assert_called_with({
'jsonrpc': '2.0', 'params': [],
'method': 'optional_only', 'id': 'TEST-ID'})
def test_remote_call_optional_only_missing_arguments(remote):
remote.optional_only(x='foo', z='bar')
assert remote._send_msg.call_count == 1
remote._send_msg.assert_called_with({
'jsonrpc': '2.0',
'params': [{'key': 'x', 'value': 'foo', 'required': False},
{'key': 'z', 'value': 'bar', 'required': False}],
'method': 'optional_only', 'id': 'TEST-ID'})
def test_remote_promise_resolve_success(remote):
class Nonlocal(object):
pass
def success(val):
Nonlocal.result = val
def error(val):
Nonlocal.result = val
remote.no_args().then(success, error)
remote.resolve({'id': 'TEST-ID', 'result': 'SUCCESS', 'error': None})
assert Nonlocal.result == 'SUCCESS'
def test_remote_promise_resolve_error(remote):
class Nonlocal(object):
pass
def success(val):
Nonlocal.result = val
def error(val):
Nonlocal.result = val
remote.no_args().then(success, error)
remote.resolve({'id': 'TEST-ID', 'result': None, 'error': 'ERROR'})
assert isinstance(Nonlocal.result, Exception)
assert str(Nonlocal.result) == "ERROR"
@pytest.mark.skip(reason="See: geonotebook/issues/46")
def test_remote_promise_resolve_with_bad_message(r, mocker):
class Nonlocal(object):
pass
def success(val):
Nonlocal.result = val
def error(val):
Nonlocal.result = val
remote.no_args().then(success, error)
with pytest.raises(Exception):
remote.resolve('bad message')
remote.no_args().then(success, error)
with pytest.raises(Exception):
remote.resolve({'id': 'TEST-ID', 'bad': 'message'})
remote.no_args().then(success, error)
# warn = mockeremote.patch.object(remote.log, 'warn')
remote.resolve({'id': 'BAD-ID'})
# assert warn.called_once == 1
|
python
|
import serial
import time
import datetime
import numpy as np
import csv
from pathlib import Path
import random as random
myFile = Path('BigHouse.csv')
if not myFile.exists():
with open('BigHouse.csv','a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=';',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Day", "Seconds", "Sensor 0", "Sensor 1", "Sensor2"])
ser = serial.Serial(
port = "COM3",
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout = 3
)# open first serial port
ser.flush()
ser.flushInput()
ser.flushOutput()
time.sleep(1)
mode = "read"
ser.write("AT+CIPMUX=1\r\n".encode())
time.sleep(0.5)
ser.write("AT+CIPSERVER=1,333\r\n".encode())
time.sleep(0.5)
sentBytes = bytearray([1,10,2,0,2,0,0,54,0]) #[Header, Length, parameters...., Tail]
readGarbage = 'a'
wifi_n = 0
allConnected = False
wifiArray = []
currentWifiModule = 0
sensorID = 0
retryCounter = 0
neuralNetworkReadings = [0,0]
receivedValues = [0,0,0,0,0]
readingsCounter = 0
numberOfReadingsTakenThisSession = 0
adcSensorValues = [2,0,0,0,2,0]
while 1:
try:
if mode=="read":
character=ser.read(1).decode('utf-8') #TODO: TIMEOUT CHECK
if (len(character) == 0 and (allConnected == True)):
retryCounter = retryCounter + 1
if retryCounter == 3:
mode = "write"
retryCounter = 0
print("Retrying to sensor: ", sensorID) #//TODO implement neural net for
#//the specific sensor node
if (character == "+"):
character = ser.read(1).decode('utf-8') #TODO: TIMEOUT CHECK
if (character == "I"):
i = 8
readGarbage = ser.read(7).decode('utf-8')#TODO: TIMEOUT CHECK
if(len(readGarbage)!= 7):
print("Retrying!!!\n\n")
mode = "write"
continue
#print ("Reading garbage:", readGarbage)
messageWiFiID = int(readGarbage[3]) #used for determining which sensor this #reading belongs to
messageLength = int(readGarbage[5])
readMessage = ser.read(messageLength)#.decode('utf-8')
#print ("Reading full message:", readMessage.decode('utf-8'))
sensorID = int(readMessage[0])
if (allConnected == False):#Assign initial WiFi ID's
print(sensorID)
wifiArray.append([sensorID,messageWiFiID])
print("Length is: ", len(wifiArray))
if (len(wifiArray) == 3): #change for debug purposes
allConnected = True
else: #If a module disconnects and reconnects with a different ID, assign it.
for i in range(len(wifiArray)):
if (wifiArray[i][0] == sensorID):
if(wifiArray[i][1] != messageWiFiID):
print("WiFi has changed for this module. Reassigning!")
print("Old WiFi:", wifiArray[i][1])
print("New WiFi:", messageWiFiID)
wifiArray[i][1] = messageWiFiID
for j in range(len(wifiArray)):
if (wifiArray[j][1] == messageWiFiID and i != j):
wifiArray[j][1] = 8 #//assign default case so that WiFi don't interfere
#TO DO: REASSIGN WiFi module ID if it changes for a sensor node
sensorCMD = int(readMessage[1])
sensorReading = int(readMessage[2])*256 + int(readMessage[3])
if(allConnected == True):
neuralNetReading = float(readMessage[4]) + float(readMessage[5]/100.0)
print("Neural net reading:", neuralNetReading)
#print("Sensor reading is:", sensorReading)
#print (sensorReading)
print("sensor ID:", sensorID)
adcValue = float(sensorReading)
print (adcValue)
if(adcValue != 0):
R_th = 1000.0/((1023.0/(1023-adcValue))-1.0)
T = round(1.0/((1.0/298.15)+(1.0/3800.0)*(np.log(R_th/1000.0)))-273.15, 2)
print ("Temperature sensor " +str(sensorID)+": " + str(T))
if sensorID == 0:
receivedValues[2] = T
adcSensorValues[0] = readMessage[2]
adcSensorValues[1] = readMessage[3]
neuralNetworkReadings[0] = neuralNetReading
if sensorID == 1:
receivedValues[3] = T
adcSensorValues[2] = readMessage[2]
adcSensorValues[3] = readMessage[3]
#special case
if sensorID == 2:
receivedValues[4] = T
adcSensorValues[4] = readMessage[2]
adcSensorValues[5] = readMessage[3]
neuralNetworkReadings[1] = neuralNetReading
else:
R_th = 0
T = 60
print("ADC value: "+ str(adcValue))
print("Resistance value: " + str(R_th))
print ("Temperature sensor " +str(sensorID)+": " + str(T))
if(allConnected):
mode = "write"
readingsCounter = readingsCounter + 1
print("readingsCounter: ", readingsCounter)
if readingsCounter == 3:
readingsCounter = 0
print ("The adc sensor values are:", adcSensorValues)
numberOfReadingsTakenThisSession = numberOfReadingsTakenThisSession + 1
print ("")
print ("Number of Readings taken so far:", numberOfReadingsTakenThisSession)
print ("")
receivedValues[0] = datetime.datetime.today().weekday()+1
now = datetime.datetime.now()
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
receivedValues[1] = (now - midnight).seconds
with open('BigHouse.csv','a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=';',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(receivedValues)
with open('BigHouseNeuralNetwork.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter = ';',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(neuralNetworkReadings)
elif mode=="write":
print ("Sensor ID is: " + str(sensorID))
time.sleep(8) #this line of code controls how quickly the system will ask for values
if (sensorID == 0):
print ("Sending Request to Sensor 1")#, datetime.datetime.now.strftime("%Y-%m-%d %H:%M"))
sentBytes[2] = adcSensorValues[0]#1st and 3rd sensor ADC values sent
sentBytes[3] = adcSensorValues[1]
sentBytes[4] = adcSensorValues[4]
sentBytes[5] = adcSensorValues[5]
for i in range(0, len(wifiArray)):
if wifiArray[i][0] == 1:
currentWifiModule = str(wifiArray[i][1])
ser.write(("AT+CIPSEND="+currentWifiModule+",10\r\n").encode()) #send request to Wifi ID
elif (sensorID == 1):
print("Sending Request to Sensor 2")#, datetime.datetime.now.strftime("%Y-%m-%d %H:%M"))
sentBytes[2] = adcSensorValues[0]#1st and 3rd sensor ADC values sent
sentBytes[3] = adcSensorValues[1]
sentBytes[4] = adcSensorValues[2]
sentBytes[5] = adcSensorValues[3]
for i in range(0, len(wifiArray)):
if wifiArray[i][0] == 2:
currentWifiModule = str(wifiArray[i][1])
ser.write(("AT+CIPSEND="+currentWifiModule+",10\r\n").encode()) #send request to Wifi ID
elif (sensorID == 2):
print("Sending Request to Sensor 0")#, datetime.datetime.now.strftime("%Y-%m-%d %H:%M"))
sentBytes[2] = adcSensorValues[2]#2nd and 3rd sensor ADC values sent
sentBytes[3] = adcSensorValues[3]
sentBytes[4] = adcSensorValues[4]
sentBytes[5] = adcSensorValues[5]
for i in range(0, len(wifiArray)):
if wifiArray[i][0] == 0:
currentWifiModule = str(wifiArray[i][1])
ser.write(("AT+CIPSEND="+currentWifiModule+",10\r\n").encode())
#send request to Wifi ID
now = datetime.datetime.now()
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
secondsPastMidnight = int((now - midnight).seconds)
sentBytes[8] = secondsPastMidnight%256
secondsPastMidnight = secondsPastMidnight//256
sentBytes[7] = secondsPastMidnight%256
secondsPastMidnight = secondsPastMidnight//256
sentBytes[6] = secondsPastMidnight
time.sleep(0.1)
ser.write(sentBytes+"\r\n".encode())
print("Message sent!\n")
mode = "read"
except:
#print(character)
continue;
|
python
|
import curses
from itertools import islice, izip
from .pad_display_manager import PadDisplayManager
from .rate import get_rate_string
SESSIONS_HEADER = "| Idx | Type | Details | RX Rate | TX Rate | Activity Time "
SESSIONS_BORDER = "+-----+------+-------------------------------------------------+----------------+----------------+-------------------"
EMPTY_LINE = "| | | | | | "
PAD_X_SIZE = len(SESSIONS_HEADER) + 1
HEADER_LINES = 3
FOOTER_LINES = 1
EXTRA_LINES = HEADER_LINES + FOOTER_LINES
def _get_time_string(time_seconds):
return "%02d:%02d:%02d" % (int(time_seconds / 60 / 60),
int(time_seconds / 60),
int(time_seconds) % 60)
SORT_KEYS = {'r': ('rx_bps', True),
'R': ('rx_bps', False),
't': ('tx_bps', True),
'T': ('tx_bps', False),
's': ('source_ip', True),
'S': ('source_ip', False),
'p': ('source_port', True),
'P': ('source_port', False),
'd': ('dest_ip', True),
'D': ('dest_ip', False),
'{': ('dest_port', True),
'[': ('dest_port', False),
'l': ('time', True),
'L': ('time', False),
'k': ('key', True),
'K': ('key', False),
}
SORT_KEYS_ORDINALS = [ord(key) for key in SORT_KEYS]
class SessionsPad(object):
def __init__(self, sessions_number=20, ylocation=0, xlocation=0, colors=None):
self._sessions_number = sessions_number
self._colors = colors
self._top_line = 0
self._pad = curses.newpad(self._sessions_number + EXTRA_LINES, PAD_X_SIZE)
self._pad.addstr(0, 0, SESSIONS_BORDER)
self._pad.addstr(1, 0, SESSIONS_HEADER)
self._pad.addstr(2, 0, SESSIONS_BORDER)
self._pad.addstr(HEADER_LINES + self._sessions_number, 0, SESSIONS_BORDER)
self._sort_by = 'key'
self._sort_reverse = True
self._pad_display_manager = PadDisplayManager(ylocation, xlocation)
def key(self, key):
self._pad_display_manager.key(key)
if key == curses.KEY_DOWN:
if self._top_line < self._sessions_number:
self._top_line += 1
return
if key == curses.KEY_UP:
self._top_line -= 1
if self._top_line < 0:
self._top_line = 0
return
if key in SORT_KEYS_ORDINALS:
key = chr(key)
self._sort_by, self._sort_reverse = SORT_KEYS[key]
return
def get_y_size(self):
return EXTRA_LINES + self._sessions_number
def get_x_size(self):
return PAD_X_SIZE
def _get_session_color(self, session):
if self._colors is None:
return 0
if session.is_new:
return curses.color_pair(self._colors["green"])
if session.is_dead:
return curses.color_pair(self._colors["red"])
return 0
def display(self, maxy, maxx, sessions):
if len(sessions) <= self._sessions_number:
self._top_line = 0
sessions.sort(key=lambda x: getattr(x, self._sort_by), reverse=self._sort_reverse)
start_index = self._top_line
stop_index = min(len(sessions), self._sessions_number + self._top_line)
for i, session in enumerate(islice(sessions, start_index, stop_index)):
index = start_index + i + 1
line_number = i + HEADER_LINES
session_type = session.type.ljust(4).upper()
details = ("%s:%d <-> %s:%d" % (session.source_ip, session.source_port, session.dest_ip, session.dest_port)).ljust(47)
rx_rate = get_rate_string(session.rx_bps).ljust(14)
tx_rate = get_rate_string(session.tx_bps).ljust(14)
time_string = _get_time_string(session.time)
display_line = "| %3d | %s | %s | %s | %s | %s" % (index, session_type, details, rx_rate, tx_rate, time_string)
display_line = display_line.ljust(len(EMPTY_LINE))
self._pad.addstr(line_number,
0,
display_line,
self._get_session_color(session))
sessions_printed = stop_index - start_index
for i in xrange(sessions_printed + HEADER_LINES, self._sessions_number + HEADER_LINES):
self._pad.addstr(i, 0, EMPTY_LINE)
self._pad_display_manager.refresh(self._pad,
maxy,
maxx,
self.get_y_size(),
self.get_x_size())
|
python
|
import binascii
import collections
import datetime
import hashlib
from urllib.parse import quote
from google.oauth2 import service_account
def generate_signed_url(
service_account_file,
bucket_name,
object_name,
expiration,
http_method="GET",
query_parameters=None,
headers=None,
):
escaped_object_name = quote(object_name, safe="")
canonical_uri = "/{}/{}".format(bucket_name, escaped_object_name)
datetime_now = datetime.datetime.utcnow()
request_timestamp = datetime_now.strftime("%Y%m%dT%H%M%SZ")
datestamp = datetime_now.strftime("%Y%m%d")
google_credentials = service_account.Credentials.from_service_account_file(
service_account_file
)
client_email = google_credentials.service_account_email
credential_scope = "{}/auto/storage/goog4_request".format(datestamp)
credential = "{}/{}".format(client_email, credential_scope)
if headers is None:
headers = dict()
headers["host"] = "storage.googleapis.com"
canonical_headers = ""
ordered_headers = collections.OrderedDict(sorted(headers.items()))
for k, v in ordered_headers.items():
lower_k = str(k).lower()
strip_v = str(v).lower()
canonical_headers += "{}:{}\n".format(lower_k, strip_v)
signed_headers = ""
for k, _ in ordered_headers.items():
lower_k = str(k).lower()
signed_headers += "{};".format(lower_k)
signed_headers = signed_headers[:-1] # remove trailing ';'
if query_parameters is None:
query_parameters = dict()
query_parameters["X-Goog-Algorithm"] = "GOOG4-RSA-SHA256"
query_parameters["X-Goog-Credential"] = credential
query_parameters["X-Goog-Date"] = request_timestamp
query_parameters["X-Goog-Expires"] = expiration
query_parameters["X-Goog-SignedHeaders"] = signed_headers
canonical_query_string = ""
ordered_query_parameters = collections.OrderedDict(sorted(query_parameters.items()))
for k, v in ordered_query_parameters.items():
encoded_k = quote(str(k), safe="")
encoded_v = quote(str(v), safe="")
canonical_query_string += "{}={}&".format(encoded_k, encoded_v)
canonical_query_string = canonical_query_string[:-1] # remove trailing ';'
canonical_request = "\n".join(
[
http_method,
canonical_uri,
canonical_query_string,
canonical_headers,
signed_headers,
"UNSIGNED-PAYLOAD",
]
)
canonical_request_hash = hashlib.sha256(canonical_request.encode()).hexdigest()
string_to_sign = "\n".join(
[
"GOOG4-RSA-SHA256",
request_timestamp,
credential_scope,
canonical_request_hash,
]
)
signature = binascii.hexlify(
google_credentials.signer.sign(string_to_sign)
).decode()
host_name = "https://storage.googleapis.com"
signed_url = "{}{}?{}&X-Goog-Signature={}".format(
host_name, canonical_uri, canonical_query_string, signature
)
return signed_url
|
python
|
import unittest
from number_of_islands.solution import Solution
class TestSolution(unittest.TestCase):
def test_solution(self):
self.assertEqual(0, Solution().num_islands(
None))
self.assertEqual(0, Solution().num_islands(
[[]]))
self.assertEqual(1, Solution().num_islands(
[['1']]))
self.assertEqual(2, Solution().num_islands(
[['1', '0'],
['0', '1']]))
self.assertEqual(1, Solution().num_islands(
[['1', '1'],
['0', '1']]))
self.assertEqual(1, Solution().num_islands(
[['1', '0'],
['1', '1']]))
self.assertEqual(1, Solution().num_islands(
[['1', '1'],
['1', '1']]))
self.assertEqual(1, Solution().num_islands(
[["1", "1", "1", "1", "0"],
["1", "1", "0", "1", "0"],
["1", "1", "0", "0", "0"],
["0", "0", "0", "0", "0"]]))
self.assertEqual(3, Solution().num_islands(
[["1", "1", "0", "0", "0"],
["1", "1", "0", "0", "0"],
["0", "0", "1", "0", "0"],
["0", "0", "0", "1", "1"]]))
self.assertEqual(3, Solution().num_islands(
[["1", "1", "1", "1", "0"],
["1", "1", "0", "0", "0"],
["1", "1", "0", "1", "0"],
["0", "0", "0", "0", "1"]]))
self.assertEqual(1, Solution().num_islands(
[["1", "1", "1", "1", "1"],
["1", "1", "1", "1", "1"],
["1", "1", "1", "1", "1"],
["1", "1", "1", "1", "1"]]))
self.assertEqual(0, Solution().num_islands(
[["0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0"],
["0", "0", "0", "0", "0"]]))
self.assertEqual(13, Solution().num_islands(
[["1", "0", "1", "0", "1"],
["0", "1", "0", "1", "0"],
["1", "0", "1", "0", "1"],
["0", "1", "0", "1", "0"],
["1", "0", "1", "0", "1"]]))
self.assertEqual(3, Solution().num_islands(
[['1'],
['0'],
['1'],
['0'],
['1'],
['0']]))
if __name__ == '__main__':
unittest.main()
|
python
|
from subprocess import PIPE, run
my_command = "["ipconfig", "/all"]"
result = run(my_command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
print(result.stdout, result.stderr)
input("Press Enter to finish...")
|
python
|
import connexion
#app = connexion.FlaskApp(__name__)
app = connexion.AioHttpApp(__name__)
app.add_api("swagger/openapi.yaml")
application = app.app
|
python
|
import time
import board
import displayio
import adafruit_ssd1325
displayio.release_displays()
spi = board.SPI()
oled_cs = board.D5
oled_dc = board.D6
oled_reset = board.D9
display_bus = displayio.FourWire(
spi, command=oled_dc, chip_select=oled_cs, reset=oled_reset, baudrate=1000000
)
time.sleep(1)
display = adafruit_ssd1325.SSD1325(display_bus, width=128, height=64)
g = displayio.Group()
dimension = min(display.width, display.height)
color_count = 16
gamma_pattern = displayio.Bitmap(dimension, dimension, color_count)
gamma_palette = displayio.Palette(color_count)
t = displayio.TileGrid(gamma_pattern, pixel_shader=gamma_palette)
pixels_per_step = dimension // color_count
for i in range(dimension):
if i % pixels_per_step == 0:
continue
gamma_pattern[i, i] = i // pixels_per_step
for i in range(color_count):
component = i * 255 // (color_count - 1)
print(component)
gamma_palette[i] = component << 16 | component << 8 | component
g.append(t)
display.show(g)
time.sleep(10)
|
python
|
from yahoofinancials import YahooFinancials
import pandas as pd
import datetime as dt
def get_downloader(start_date,
end_date,
granularity='daily',):
"""returns a downloader closure for oanda
:param start_date: the first day on which dat are downloaded
:param end_date: the last day on which data are downloaded
:param granularity: the frequency of price data, 'D' for daily and 'M1' for 1-minute data
:type start_date: str in format YYYY-MM-DD
:type end_date: str in format YYYY-MM-DD
:type granularity: str
"""
def downloader(symbol):
"""downloads symbol price data using oanda REST API
:param symbol: the symbol name
:type symbol: str
"""
yf = YahooFinancials(symbol)
res = yf.get_historical_price_data(str(start_date), str(end_date), granularity)
print("Yahoo Ingest: symbol={} start_date={} end_date={}".format(symbol,str(start_date),str(end_date)))
if not res or symbol not in res or 'prices' not in res[symbol]:
ValueError('Fetching price data for "{}" failed.'.format(symbol))
prices=res[symbol]['prices']
df = pd.DataFrame({'open': [p['open'] for p in prices],
'close': [p['close'] for p in prices],
'low': [p['low'] for p in prices],
'high': [p['high'] for p in prices],
'volume': [p['volume'] for p in prices],}, index=[pd.Timestamp(d['formatted_date']) for d in prices])
if 'dividend' in prices:
df['dividend'] = [p['dividend'] for p in prices]
else:
df['dividend'] = 0
if 'split' in prices:
df['split'] = [p['split'] for p in prices]
else:
df['split'] = 1
df.fillna(method='ffill')
#ajjc
print(df.tail(1))
return df
return downloader
|
python
|
# Python script for generatign panda_drv.h from *DRV entries in registers
# configuration file.
from __future__ import print_function
import sys
from parse_indent import parse_register_file
registers = parse_register_file(sys.argv[1])
base, fields = registers['*DRV']
base = int(base)
print('''\
/* Register definitions derived from configuration registers *DRV section.
*
* This file is automatically generated from driver/panda_drv.py.
* Do not edit this file. */
''')
for name, field in fields:
reg = int(field)
print('#define %s 0x%05x' % (name, (base << 12) | (reg << 2)))
|
python
|
import setuptools
setuptools.setup(
name="cinemasci",
version="0.1",
author="David H. Rogers",
author_email="[email protected]",
description="Tools for the Cinema scientific toolset.",
url="https://github.com/cinemascience",
packages=["cinemasci",
"cinemasci.cdb",
"cinemasci.cis",
"cinemasci.cis.read",
"cinemasci.cis.write",
"cinemasci.cview"
],
install_requires=[
"pandas",
"pillow",
"h5py"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD",
"Operating System :: OS Independent",
],
)
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 16:34:40 2020
@author: skyjones
"""
import os
import re
import pandas as pd
from glob import glob
import nibabel as nib
import numpy as np
import shutil
out_csv = '/Users/manusdonahue/Documents/Sky/volume_testing/volume_comparisons_fastonly.csv'
in_folder = '/Users/manusdonahue/Documents/Sky/volume_testing'
run_fast = True
run_sienax = False
run_freesurfer = False
over_fast = False
over_sienax = False
over_freesurfer = False
######
np.random.seed(0)
subjects_dir = os.environ['SUBJECTS_DIR']
folders = np.array(glob(os.path.join(in_folder, '*/'))) # list of all possible subdirectories
#folders = folders[np.random.choice(len(folders), size=10, replace=False)]
"""
pts_of_interest = []
li = [['SCD_P004_01', 'SCD_P004_02'], ['SCD_P009_01', 'SCD_P009_02'], ['SCD_P013_01', 'SCD_P013_02'], ['SCD_P017_01', 'SCD_P017_02'], ['SCD_P022_01', 'SCD_P022_02'], ['SCD_P023_01', 'SCD_P023_02'], ['SCD_P031_01', 'SCD_P031_02'], ['SCD_P032_01', 'SCD_P032_02'], ['SCD_P035_01', 'SCD_P035_02'], ['SCD_P036_01', 'SCD_P036_02'], ['SCD_TRANSF_K007_01', 'SCD_TRANSF_K007_02'], ['SCD_TRANSF_K008_01', 'SCD_TRANSF_K008_02'], ['SCD_TRANSF_K018_01', 'SCD_TRANSF_K018_02'], ['SCD_TRANSF_P001_01', 'SCD_TRANSF_P001_02'], ['SCD_TRANSF_P002_01', 'SCD_TRANSF_P002_02'], ['SCD_TRANSF_P003_01', 'SCD_TRANSF_P003_02'], ['SCD_TRANSF_P008_01', 'SCD_TRANSF_P008_02'], ['SCD_TRANSF_P009_01', 'SCD_TRANSF_P009_02'], ['SCD_TRANSF_P013_01', 'SCD_TRANSF_P013_02'], ['SCD_TRANSF_P017_01', 'SCD_TRANSF_P017_02'], ['SCD_TRANSF_P019_01', 'SCD_TRANSF_P019_02'], ['SCD_TRANSP_P001_01', 'SCD_TRANSP_P001_02'], ['SCD_TRANSP_P003_01', 'SCD_TRANSP_P003_02'], ['SCD_TRANSP_P004_01', 'SCD_TRANSP_P004_02'], ['SCD_TRANSP_P005_01', 'SCD_TRANSP_P005_02'], ['SCD_TRANSP_P008_01', 'SCD_TRANSP_P008_02']]
for subl in li:
pts_of_interest.extend(subl)
folders = [os.path.join(in_folder, i) for i in pts_of_interest]
"""
def get_terminal(path):
"""
Takes a filepath or directory tree and returns the last file or directory
Parameters
----------
path : path
path in question.
Returns
-------
str of only the final file or directory.
"""
return os.path.basename(os.path.normpath(path))
out_df = pd.DataFrame()
mrs = []
for i, f in enumerate(folders):
mr = get_terminal(f)
mrs.append(mr)
print(f'{i+1} of {len(folders)}: {mr}')
t1_path = os.path.join(f, 'processed', 'axT1.nii.gz')
raw_t1_path = os.path.join(f, 'bin', 'axT1_raw.nii.gz')
comp_folder = os.path.join(f, 'comp')
fast_folder = os.path.join(comp_folder, 'fast')
sienax_folder = os.path.join(comp_folder, 'sienax')
freesurfer_folder = os.path.join(comp_folder, 'freesurfer')
if os.path.exists(comp_folder):
pass
else:
os.mkdir(comp_folder)
all_volumes = {}
blank_sub = {'wm':None,
'gm':None,
'csf':None,
'icv':None,
'det':None,
}
# do FAST segmentation
if run_fast:
if os.path.exists(fast_folder) and over_fast:
shutil.rmtree(fast_folder)
if not os.path.exists(fast_folder):
os.mkdir(fast_folder)
fast_base = os.path.join(fast_folder, 'fast')
fast_command = f'fast -S 1 -t 1 -n 3 -o {fast_base} {t1_path}'
print(f'Running FAST:\n{fast_command}')
os.system(fast_command)
fast_pve_path = os.path.join(fast_folder, 'fast_pveseg.nii.gz')
raw = nib.load(fast_pve_path)
img = raw.get_fdata()
header = raw.header
voxel_dims = header['pixdim'][1:4]
voxel_vol = np.product(voxel_dims)
# 1 = csf, 2 = gm, 3 = wm
# use partial voluems for calculation
seg_types = {1: 'csf', 2: 'gm', 3:'wm'}
fast_sub = blank_sub.copy()
for num, matter_type in seg_types.items():
subnum = num-1
subseg_file = os.path.join(fast_folder, f'fast_pve_{subnum}.nii.gz')
subraw = nib.load(subseg_file)
subim = subraw.get_fdata()
vol = float(subim.sum() * voxel_vol) / 1e3
fast_sub[matter_type] = vol
all_volumes['fast'] = fast_sub
if run_sienax:
if os.path.exists(sienax_folder) and over_sienax:
shutil.rmtree(sienax_folder)
if not os.path.exists(sienax_folder):
sienax_command = f'sienax {t1_path} -o {sienax_folder}'
print(f'Running SIENAX:\n{sienax_command}')
os.system(sienax_command)
sienax_report = open(os.path.join(sienax_folder, 'report.sienax'))
txt = sienax_report.read()
lines = txt.split('\n')
greys = lines[-4]
whites = lines[-3]
brains = lines[-2]
grey_vol_norm = float(greys.split(' ')[-2])
grey_vol_raw = float(greys.split(' ')[-1])
white_vol_norm = float(whites.split(' ')[-2])
white_vol_raw = float(whites.split(' ')[-1])
brain_vol_norm = float(brains.split(' ')[-2])
brain_vol_raw = float(brains.split(' ')[-1])
sienax_sub = blank_sub.copy()
sienax_sub['wm'] = white_vol_raw / 1e3
sienax_sub['gm'] = grey_vol_raw / 1e3
sienax_sub['det'] = brain_vol_norm / brain_vol_raw
all_volumes['sienax'] = sienax_sub
if run_freesurfer:
if os.path.exists(freesurfer_folder) and over_freesurfer:
shutil.rmtree(freesurfer_folder)
if not os.path.exists(freesurfer_folder):
os.mkdir(freesurfer_folder)
r1_cmd = f'/Applications/freesurfer/7.1.1/bin/recon-all -subjid {mr} -i {raw_t1_path} -autorecon1'
print(f'Running Freesurfer -autorecon1:\n{r1_cmd}')
os.system(r1_cmd)
r2_cmd = f'/Applications/freesurfer/7.1.1/bin/recon-all -subjid {mr} -autorecon2'
print(f'Running Freesurfer -autorecon2:\n{r2_cmd}')
os.system(r2_cmd)
stats_file = os.path.join(subjects_dir, mr, 'stats', 'aseg.stats')
if not os.path.exists(stats_file):
r3_cmd = f'/Applications/freesurfer/7.1.1/bin/recon-all -subjid {mr} -autorecon3'
print(f'Running Freesurfer -autorecon3:\n{r3_cmd}')
os.system(r3_cmd)
else:
print('autorecon3 already run. skipping')
stats_report = open(stats_file)
txt = stats_report.read()
lines = txt.split('\n')
wm_line = [i for i in lines if 'Total cerebral white matter volume' in i][0]
gm_line = [i for i in lines if 'Total gray matter volume' in i][0]
icv_line = [i for i in lines if 'Estimated Total Intracranial Volume' in i][0]
wm_val = float(wm_line.split(', ')[-2])
gm_val = float(gm_line.split(', ')[-2])
icv_val = float(icv_line.split(', ')[-2])
trans_mat_file = os.path.join(subjects_dir, mr, 'mri', 'transforms', 'talairach.xfm')
trans_report = open(trans_mat_file)
trans_txt = trans_report.read()
trans_lines = trans_txt.split('\n')
mat_as_text = trans_lines[-4:-1]
mat = [[float(a) for a in re.split(';| ', i) if a != ''] for i in mat_as_text]
mat.append([0, 0, 0, 1])
mat = np.array(mat)
det = np.linalg.det(mat)
freesurfer_sub = blank_sub.copy()
freesurfer_sub['wm'] = wm_val / 1e3
freesurfer_sub['gm'] = gm_val / 1e3
freesurfer_sub['icv'] = icv_val / 1e3
freesurfer_sub['det'] = det
all_volumes['freesurfer'] = freesurfer_sub
flat_vols = {'pt':mr}
for key, sub in all_volumes.items():
for subkey, val in sub.items():
flat_vols[f'{key}_{subkey}'] = val
out_df = out_df.append(flat_vols, ignore_index=True)
out_df = out_df[flat_vols.keys()]
out_df.to_csv(out_csv)
|
python
|
#!/usr/bin/env python
"""
===========
{PROG}
===========
----------------------------------------------------
Compute exponentially weighted moving average
----------------------------------------------------
:Author: [email protected]
:Date: 2013-03-15
:Copyright: TradeLink LLC 2013
:Version: 0.1
:Manual section: 1
:Manual group: data filters
SYNOPSIS
========
{PROG} [ -f x ] [ -a val ] [ -s sep ]
OPTIONS
=======
-a val alpha of the ewma (default 0.1)
-f x average the values in column x (zero-based offset or name - default 1)
-s sep use sep as the field separator (default is comma)
DESCRIPTION
===========
Data are read from stdin, the ewma is computed, appended to the end of
the values, then printed to stdout.
SEE ALSO
========
* pt
* bars
* ptwin
* take
* mpl
* avg
"""
import csv
import getopt
import os
import sys
PROG = os.path.basename(sys.argv[0])
def main(args):
opts, args = getopt.getopt(args, "a:f:s:h")
alpha = 0.1
field = 1
sep = ","
for opt, arg in opts:
if opt == "-a":
alpha = float(arg)
elif opt == "-f":
try:
field = int(arg)
reader = csv.reader
writer = csv.writer
except ValueError:
# Dict key
field = arg
reader = csv.DictReader
writer = csv.DictWriter
elif opt == "-s":
sep = arg
elif opt == "-h":
usage()
raise SystemExit
val = None
rdr = reader(sys.stdin, delimiter=sep)
if isinstance(field, str):
fnames = rdr.fieldnames[:]
fnames.append("ewma")
wtr = writer(sys.stdout, delimiter=sep, fieldnames=fnames)
wtr.writeheader()
else:
wtr = writer(sys.stdout, delimiter=sep)
for row in rdr:
if row[field]:
if val is None:
val = float(row[field])
else:
val = alpha * float(row[field]) + (1-alpha) * val
if isinstance(field, str):
row["ewma"] = val
else:
row.append(val)
wtr.writerow(row)
return 0
def usage():
print(__doc__.format(**globals()).strip(), file=sys.stderr)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
python
|
import os
import sys
import time
import WareHouse
import ConfigLoader
import PythonSheep.IOSheep.PrintFormat as PrintSheep
import PythonSheep.IOSheep.InputFormat as InputSheep
from Commands.HelpDocument import HelpDocument
from Commands.ReloadConfig import ReloadConfig
from Commands.OpenConfig import OpenConfig
# 初始化类
mainWareHouse = WareHouse.wareHouse()
mainPrintControler = PrintSheep.PrintFormat()
mainInputControler = InputSheep.InputFormat()
# 初始化命令插件
HelpDocumentPlugin = HelpDocument()
ReloadConfigPlugin = ReloadConfig()
OpenConfigPlugin = OpenConfig()
# 读取配置文件
ConfigLoader.LoadConfig(mainWareHouse)
# 保存默认工作目录
mainWareHouse.defaultWorkDir = os.getcwd()
# 输出使用的语言
if mainWareHouse.globalSittings["userLanguage"] == "En":
mainWareHouse.userUsingLanguage = "En"
print(" * $ User now use English(%s)" % mainWareHouse.globalSittings["userLanguage"])
elif mainWareHouse.globalSittings["userLanguage"] == "Ch_Sp":
mainWareHouse.userUsingLanguage = "Ch_Sp"
print(" * $ 用户现在使用的是简体中文(%s)" % mainWareHouse.globalSittings["userLanguage"])
else:
mainWareHouse.userUsingLanguage = "En"
print(" * $ User now use unkown language(%s)" % mainWareHouse.globalSittings["userLanguage"])
print(" * $ Continue use English(En|English)")
# 获取输入的题目文件
if len(sys.argv) > 1:
welcomeMode = False
mainWareHouse.loadingQuestionFileName = sys.argv[1]
# 没有任何输入处理
else:
welcomeMode = True
# 输出 Welcome 界面
if mainWareHouse.globalSittings["showWelcomeMenu"]:
userNowUsingLanguage = mainWareHouse.userUsingLanguage
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_Title1"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No1"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No2"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No3"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No4"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No5"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No6"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No7"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No8"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No9"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_Title2"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No10"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No11"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No12"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_No13"])
del userNowUsingLanguage
# 不显示 Welcome 界面信息
else:
userNowUsingLanguage = mainWareHouse.userUsingLanguage
print(mainWareHouse.languagesContents[userNowUsingLanguage]["welcomeMessage"]["welcomeMessage_NotShow"])
del userNowUsingLanguage
# 处理 Press Enter key continue
userNowUsingLanguage = mainWareHouse.userUsingLanguage
input(mainWareHouse.languagesContents[userNowUsingLanguage]["globalMessageTips"]["anyKeyContinue_TipsMessage"])
# 清屏
mainPrintControler.UniversalClearScreen()
del userNowUsingLanguage
# 控制台
userNowUsingLanguage = mainWareHouse.userUsingLanguage
# 打印 Console 标题
print(mainWareHouse.languagesContents[userNowUsingLanguage]["consoleMessage"]["consoleBootTitle"])
while True:
consoleGet = input(mainWareHouse.languagesContents[userNowUsingLanguage]["consoleMessage"]["consoleCommandTips"]).upper()
# 检测的输入的是 Command
if str(consoleGet).startswith("^"):
usingCommand = str(consoleGet)
# 命令处理
if str(consoleGet) == "^HELP":
# 运行插件
HelpDocumentPlugin.run(userNowUsingLanguage, mainWareHouse)
elif str(consoleGet) == "^RELOAD":
# 运行插件
ReloadConfigPlugin.run(userNowUsingLanguage, mainWareHouse)
elif str(consoleGet) == "^SITTINGS-OPEN":
# 运行插件
OpenConfigPlugin.run(userNowUsingLanguage, mainWareHouse)
elif str(consoleGet) == "^EXIT":
# 离开
sys.exit(0)
else:
# 未知的命令处理
print(mainWareHouse.languagesContents[userNowUsingLanguage]["commandsMessage"]["unkownCommand_TipsMessage"]
% str(consoleGet))
# 切换工作目录
mainWareHouse.defaultWorkDir = os.getcwd()
os.chdir("./QuestionData")
# 检测题目文件是否存在
if not os.path.exists(mainWareHouse.loadingQuestionFileName) and not welcomeMode:
userNowUsingLanguage = mainWareHouse.userUsingLanguage
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileTips"]["questionFileErrorTitle"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileTips"]["questionFileErrorMessage_No1"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileTips"]["questionFileErrorMessage_No2"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileTips"]["questionFileErrorMessage_No3"])
del userNowUsingLanguage
sys.exit(1)
else:
ConfigLoader.LoadQuestionFile(mainWareHouse, mainWareHouse.loadingQuestionFileName)
userNowUsingLanguage = mainWareHouse.userUsingLanguage
# 计算总共分数
sumScore = 0.0
for i in range(len(mainWareHouse.userQuestionFile["questions"])):
sumScore += mainWareHouse.userQuestionFile["questions"][i]["score"]
# 计算难度
projectDifficultyTemplate = ["Eazy", "Medium", "Hard", "Hardcore"]
projectDifficulty = ""
if len(projectDifficultyTemplate) < mainWareHouse.userQuestionFile["projectDifficulty"]:
projectDifficulty = "Unkown"
else:
projectDifficulty = projectDifficultyTemplate[mainWareHouse.userQuestionFile["projectDifficulty"]]
del projectDifficultyTemplate
# 打印 QuestionFile 信息
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["infoTitle"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["infoLanguage"]
% mainWareHouse.userQuestionFile["language"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["infoVersion"]
% mainWareHouse.userQuestionFile["version"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["infoProjectName"]
% mainWareHouse.userQuestionFile["projectName"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["infoTotalScore"]
% sumScore)
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["infoDifficulty"]
% projectDifficulty)
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["questionStartTitle"])
input(mainWareHouse.languagesContents[userNowUsingLanguage]["globalMessageTips"]["anyKeyContinue_TipsMessage"])
mainPrintControler.UniversalClearScreen()
# 开始测试
userSumScore = 0.0
for i in range(len(mainWareHouse.userQuestionFile["questions"])):
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["questionCountTitle"]
% int(i + 1))
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["questionNameTitle"])
print(mainWareHouse.userQuestionFile["questions"][i]["name"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["questionExplanation"])
for j in range(len(mainWareHouse.userQuestionFile["questions"][i]["explanation"])):
print(mainWareHouse.userQuestionFile["questions"][i]["explanation"][j])
answer = input(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["questionAnswer"])
questionRight = False
for j in range(len(mainWareHouse.userQuestionFile["questions"][i]["answer"])):
if answer == mainWareHouse.userQuestionFile["questions"][i]["answer"][j]:
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["answerRight"])
userSumScore += mainWareHouse.userQuestionFile["questions"][i]["score"]
questionRight = True
else: continue
# 这题没有做对
if not questionRight:
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["answerWorng"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["answerIs"]
% str(mainWareHouse.userQuestionFile["questions"][i]["answer"]))
input(mainWareHouse.languagesContents[userNowUsingLanguage]["globalMessageTips"]["anyKeyContinue_TipsMessage"])
mainPrintControler.UniversalClearScreen()
# 综合打印
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["returnTitle"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["questionFileMessage"]["returnScore"] % userSumScore)
input(mainWareHouse.languagesContents[userNowUsingLanguage]["globalMessageTips"]["anyKeyContinue_TipsMessage"])
mainPrintControler.UniversalClearScreen()
|
python
|
from movelister.core.context import Context
def reopenCurrentFile():
"""
Reopens current file. All variables made before this will be
invalid. Make sure to initialize them too.
"""
frame = Context.getFrame()
frame.loadComponentFromURL(getUrl(), "_self", 0, ())
def getUrl():
"""
Returns current opened file URL. This can be used to reopen the
same file. URL format for a file is:
"file:///path_to_project/movelister/templates/movelister_template.ods"
"""
return Context.getFrame().getController().getModel().getURL()
|
python
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import collections
import enum
import functools
import heapq
import itertools
import math
import struct
import sys
import parse_ddrescue
@functools.total_ordering
class Range:
__slots__ = ["start", "size"]
def __init__(self, start, size):
self.start = start
self.size = size
def __eq__(self, other):
if type(other) != Range:
return NotImplemented
return self.start == other.start and self.size == other.size
def __lt__(self, other):
if type(other) != Range:
return NotImplemented
if self.start < other.start:
return True
if self.start > other.start:
return False
return self.size < other.size
# Assumes ranges are added start-to-end and never overlap
class RangeList:
__slots__ = ["items"]
def __init__(self):
self.items = []
def add(self, start, size):
if len(self.items) != 0:
last = self.items[-1]
if last.start + last.size == start:
last.size += size
return
assert last.start + last.size < start, "false: {} + {} < {}".format(
last.start, last.size, start
)
self.items.append(Range(start, size))
Superblock = collections.namedtuple(
"Superblock",
[
"block_count",
"free_blocks",
"root_block",
"journal_block",
"journal_device",
"orig_journal_size",
"journal_trans_max",
"journal_magic",
"journal_max_batch",
"journal_max_commit_age",
"journal_max_trans_age",
"blocksize",
"oid_max_size",
"oid_current_size",
"state",
"magic_string",
"hash_function",
"tree_height",
"bitmap_number",
"version",
"inode_generation",
],
)
Superblock.struct = struct.Struct("<11IHHHH12sIHHH2xI")
Superblock.unpack = lambda b: Superblock._make(Superblock.struct.unpack(b))
class Node(
collections.namedtuple("Node", ["level", "item_count", "free_space", "payload"])
):
ptr_struct = struct.Struct("<IH2x")
struct = struct.Struct("<HHHxx16x4072s")
@staticmethod
def unpack(b):
return Node._make(Node.struct.unpack(b))
@functools.lru_cache(maxsize=128)
def ptr_find(self, key):
if self.level == 1:
return None
# Comparison is broken for version 1 keys except if one of the types is
# STAT
assert key.type == ItemType.STAT
pos = 0
for i in range(self.item_count):
ikey = Key.unpack(self.payload[pos : pos + Key.struct.size])
if ikey > key:
break
pos += Key.struct.size
else:
i += 1
return Node.ptr_struct.unpack_from(
self.payload, self.item_count * Key.struct.size + i * Node.ptr_struct.size
)[0]
def ptr_find_range(self, keyStart, keyEnd):
"""keyStart is inclusive. keyEnd is exclusive."""
if self.level == 1:
return None
pos = 0
for start in range(self.item_count):
tmpkey = Key.unpack(self.payload[pos : pos + Key.struct.size])
if tmpkey > keyStart:
break
pos += Key.struct.size
else:
start += 1
end = start - 1
for end in range(start, self.item_count):
tmpkey = Key.unpack(self.payload[pos : pos + Key.struct.size])
if tmpkey >= keyEnd:
break
pos += Key.struct.size
else:
end += 1
found = []
for i in range(start, end + 1):
found.append(
Node.ptr_struct.unpack_from(
self.payload,
self.item_count * Key.struct.size + i * Node.ptr_struct.size,
)[0]
)
return found
def ptr_blocks(self):
if self.level == 1:
return ()
blocks = array.array(array_4byte_typecode)
pos = self.item_count * Key.struct.size
for _ in range(self.item_count + 1):
blocks.append(Node.ptr_struct.unpack_from(self.payload, pos)[0])
pos += Node.ptr_struct.size
return blocks
def items(self):
items = []
for pos in range(0, self.item_count * ItemHdr.struct.size, ItemHdr.struct.size):
hdr = ItemHdr.unpack(self.payload[pos : pos + ItemHdr.struct.size])
body = self.payload[hdr.location - 24 : hdr.location - 24 + hdr.length]
items.append(
Item(key=hdr.key, count=hdr.count, version=hdr.version, body=body)
)
return items
def item_find(self, key):
key = key.pack()
for pos in range(0, self.item_count * ItemHdr.struct.size, ItemHdr.struct.size):
# Key is first field of ItemHdr
if key == self.payload[pos : pos + Key.struct.size]:
hdr = ItemHdr.unpack(self.payload[pos : pos + ItemHdr.struct.size])
body = self.payload[hdr.location - 24 : hdr.location - 24 + hdr.length]
return Item(
key=hdr.key, count=hdr.count, version=hdr.version, body=body
)
return None
def item_find_range(self, keyStart, keyEnd):
items = []
for pos in range(0, self.item_count * ItemHdr.struct.size, ItemHdr.struct.size):
# Key is first field of ItemHdr
hdr = ItemHdr.unpack(self.payload[pos : pos + ItemHdr.struct.size])
if keyStart <= hdr.key and hdr.key < keyEnd:
body = self.payload[hdr.location - 24 : hdr.location - 24 + hdr.length]
items.append(
Item(key=hdr.key, count=hdr.count, version=hdr.version, body=body)
)
return items
def indirect_item_blocks(self):
if self.level != 1:
return ()
blocks = array.array(array_4byte_typecode)
for item in self.items():
if item.key.type != ItemType.INDIRECT:
continue
blocks.extend(item.indirect_blocks())
return blocks
class ItemType(enum.IntEnum):
STAT = 0
INDIRECT = 1
DIRECT = 2
DIRECTORY = 3
ANY = 15
ItemType.version1_id2type = {
0: ItemType.STAT,
0xFFFFFFFE: ItemType.INDIRECT,
0xFFFFFFFF: ItemType.DIRECT,
500: ItemType.DIRECTORY,
555: ItemType.ANY,
}
ItemType.version1_type2id = {
ItemType.STAT: 0,
ItemType.INDIRECT: 0xFFFFFFFE,
ItemType.DIRECT: 0xFFFFFFFF,
ItemType.DIRECTORY: 500,
ItemType.ANY: 555,
}
class Key(
collections.namedtuple("Key", ["dirid", "objid", "offset", "type", "version"])
):
struct = struct.Struct("<IIQ")
@staticmethod
def unpack(b, version=None):
parts = list(Key.struct.unpack(b))
if version is None:
assumed_type = parts[2] & 0xF
if assumed_type == 0 or assumed_type == 15:
version = 1
else:
version = 2
offset_type = parts[2]
if version == 1:
parts[2] = offset_type & 0xFFFFFFFF
parts.append(ItemType.version1_id2type[offset_type >> 32])
parts.append(1)
else:
parts[2] = offset_type & 0x0FFFFFFFFFFFFFFF
parts.append(ItemType(offset_type >> 60))
parts.append(2)
return Key._make(parts)
def pack(self):
if self.version == 1:
parts = (
self.dirid,
self.objid,
self.offset | (ItemType.version1_type2id[self.type] << 32),
)
else:
parts = (self.dirid, self.objid, self.offset | (self.type.value << 60))
return Key.struct.pack(*parts)
class ItemHdr(
collections.namedtuple("ItemHdr", ["key", "count", "length", "location", "version"])
):
struct = struct.Struct("<16sHHHH")
@staticmethod
def unpack(b):
parts = list(ItemHdr.struct.unpack(b))
parts[0] = Key.unpack(parts[0], version=parts[4] + 1)
return ItemHdr._make(parts)
DirectoryEntry = collections.namedtuple(
"DirectoryEntry", ["offset", "dirid", "objid", "name", "state"]
)
# Only the struct for the header; the name is separate
DirectoryEntry.struct = struct.Struct("<IIIHH")
class Stat(
collections.namedtuple(
"Stat",
[
"mode",
"filetype",
"numlinks",
"uid",
"gid",
"size",
"atime",
"mtime",
"ctime",
],
)
):
ver1_struct = struct.Struct("<HHHH6I")
ver2_struct = struct.Struct("<H2xIQ7I")
@staticmethod
def unpack(b):
if len(b) == Stat.ver1_struct.size:
parts = Stat.ver1_struct.unpack(b)
parts = list(parts[:8])
else:
parts = Stat.ver2_struct.unpack(b)
parts = [
parts[0],
parts[1],
parts[3],
parts[4],
parts[2],
parts[5],
parts[6],
parts[7],
]
parts.insert(1, FileType(parts[0] >> 12))
parts[0] = parts[0] & 0xFFF
return Stat._make(parts)
class FileType(enum.Enum):
SOCKET = 12
LINK = 10
REGULAR = 8
BLOCK = 6
DIRECTORY = 4
CHARACTER = 2
FIF0 = 1
class Item(collections.namedtuple("Item", ["key", "count", "version", "body"])):
def directory_list(self):
entries = []
implicitEnd = len(self.body)
for pos in range(
0, self.count * DirectoryEntry.struct.size, DirectoryEntry.struct.size
):
entry = list(
DirectoryEntry.struct.unpack(
self.body[pos : pos + DirectoryEntry.struct.size]
)
)
location = entry[3]
locationEnd = location
while locationEnd < implicitEnd and self.body[locationEnd] != 0:
locationEnd += 1
entry[3] = self.body[location:locationEnd]
entries.append(DirectoryEntry._make(entry))
implicitEnd = location
return entries
def stat(self):
return Stat.unpack(self.body)
def indirect_blocks(self):
return array.array(array_4byte_typecode, self.body)
if array.array("I").itemsize == 4:
array_4byte_typecode = "I"
else:
assert array.array("L").itemsize == 4
array_4byte_typecode = "L"
class ReiserFs:
__slots__ = [
"f",
"rescue_map",
"sectors",
"block_size",
"sectors_per_block",
"superblock",
"incomplete",
"partition_start",
]
def __init__(self, f, rescue_map):
self.f = f
self.rescue_map = rescue_map
self.sectors = []
self.incomplete = False
self.partition_start = 0
# fake values. Initialized in init()
self.block_size = 512
self.sectors_per_block = self.block_size // 512
self.superblock = None
def init(self):
self.sectors.append(65536 // 512)
if self.rescue_map[65536] != parse_ddrescue.Status.FINISHED:
return False
self.superblock = Superblock.unpack(
self.readBlock(65536 // self.block_size)[:0x50]
)
self.block_size = self.superblock.blocksize
self.sectors_per_block = self.block_size // 512
return True
def readBlock(self, block_num):
self.f.seek(self.partition_start + block_num * self.block_size)
return self.f.read(self.block_size)
def isBlockComplete(self, block_num):
start_sector = block_num * self.block_size
for sector in range(start_sector, start_sector + self.block_size, 512):
if self.rescue_map[sector] != parse_ddrescue.Status.FINISHED:
return False
return True
@functools.lru_cache(maxsize=128)
def readNode(self, block, partial_only=False):
if not partial_only:
self.sectors.append(block * self.sectors_per_block)
if self.rescue_map[block * self.block_size] != parse_ddrescue.Status.FINISHED:
return (False, None)
node = Node.unpack(self.readBlock(block))
if node.level == 1:
node_size_left = 24 + node.item_count * ItemHdr.struct.size
node_size_right = self.block_size - node_size_left - node.free_space
else:
node_size_left = self.block_size - node.free_space
node_size_right = 0
incomplete = False
for off in set(
itertools.chain(
range(1, math.ceil(node_size_left / 512)),
range(
self.sectors_per_block - math.ceil(node_size_right / 512),
self.sectors_per_block,
),
)
):
if off == 0:
continue
self.sectors.append(block * self.sectors_per_block + off)
if (
not incomplete
and self.rescue_map[block * self.block_size + off * 512]
!= parse_ddrescue.Status.FINISHED
):
incomplete = True
return (not incomplete, node)
def find_item(self, key):
treeBlock = self.superblock.root_block
while True:
complete, node = self.readNode(treeBlock)
if not complete:
return None
if node.level == 1:
return node.item_find(key)
treeBlock = node.ptr_find(key)
def iter_items_in_range(self, keyStart, keyEnd, treeBlock=None):
"""keyStart is inclusive. keyEnd is exclusive."""
if treeBlock is None:
treeBlock = self.superblock.root_block
complete, node = self.readNode(treeBlock)
if not complete:
return
if node.level == 1:
yield from node.item_find_range(keyStart, keyEnd)
return
for treeBlock in node.ptr_find_range(keyStart, keyEnd):
yield from self.iter_items_in_range(keyStart, keyEnd, treeBlock)
def regular_block_list(self, key):
assert key.type == ItemType.STAT
item = self.find_item(key)
expectedSize = -1
if item is not None:
stat = item.stat()
expectedSize = stat.size
assert stat.filetype == FileType.REGULAR
keyStart = Key(key.dirid, key.objid, 1, ItemType.STAT, 1)
keyEnd = Key(key.dirid, key.objid + 1, 0, ItemType.STAT, 1)
size = 1
for item in self.iter_items_in_range(keyStart, keyEnd):
assert item.key.offset >= size
if item.key.offset > size:
self.incomplete = True
missing = item.key.offset - size
for _ in range(missing // self.block_size):
yield 0
if missing % self.block_size != 0:
yield bytes(missing % self.block_size)
size += missing
if item.key.type == ItemType.INDIRECT:
size += len(item.body) // 4 * self.block_size
yield from item.indirect_blocks()
elif item.key.type == ItemType.DIRECT:
size += len(item.body)
yield item.body
if size < expectedSize:
self.incomplete = True
def directory_list(self, key):
assert key.type == ItemType.STAT
item = self.find_item(key)
expectedSize = -1
if item is not None:
stat = item.stat()
expectedSize = stat.size
assert stat.filetype == FileType.DIRECTORY
# It appears that directory keys mostly use version 1
keyStart = Key(key.dirid, key.objid, 1, ItemType.DIRECTORY, 1)
keyEnd = Key(key.dirid, key.objid + 1, 0, ItemType.STAT, 1)
size = 0
for item in self.iter_items_in_range(keyStart, keyEnd):
size += len(item.body)
yield from item.directory_list()
if size != expectedSize:
self.incomplete = True
def get_name(self, key, parent):
if key.objid == 2:
return b"" # root
for entry in self.directory_list(parent):
if entry.objid == key.objid:
return entry.name
def get_full_name(self, key, parent):
parts = []
while True:
part = self.get_name(key, parent)
if part is None:
part = f"{key.dirid}_{key.objid}".encode()
parts.append(part)
if key.objid == 2:
# At the root
break
for entry in itertools.islice(self.directory_list(parent), 2):
if entry.name != b"..":
continue
key = parent
parent = Key(entry.dirid, entry.objid, 0, ItemType.STAT, 2)
break
else:
break # Assume this name part was in the dirid_objid format
parts.reverse()
return b"/".join(parts)
def file_indirect_blocks(self, key):
assert key.type == ItemType.STAT
keyStart = Key(key.dirid, key.objid, 1, ItemType.INDIRECT, 1)
keyEnd = Key(key.dirid, key.objid + 1, 0, ItemType.STAT, 1)
for item in self.iter_items_in_range(keyStart, keyEnd):
if item.key.type != ItemType.INDIRECT:
continue
yield from item.indirect_blocks()
def path_to_key(self, name):
parts = name.split(b"/")
if parts[0]:
# Unnamed file, identified by dirid_objid
id_parts = parts[0].split(b"_")
assert len(id_parts) == 2
dirKey = Key(int(id_parts[0]), int(id_parts[1]), 0, ItemType.STAT, 2)
else:
# Rooted file
dirKey = Key(1, 2, 0, ItemType.STAT, 2)
parts = parts[1:]
for part in parts:
if part == b"":
continue
for entry in self.directory_list(dirKey):
if part == entry.name:
dirKey = Key(entry.dirid, entry.objid, 0, ItemType.STAT, 2)
break
else:
return None
return dirKey
def iter_leafs(fs):
heap = []
next_pass = [(fs.superblock.root_block, -1)]
while next_pass:
heapq.heapify(next_pass)
tmp = heap
heap = next_pass
next_pass = tmp
next_pass.clear()
while heap:
block, level = heapq.heappop(heap)
complete, node = fs.readNode(block)
if not complete:
continue
if node.level > 1:
for ptr_block in node.ptr_blocks():
if ptr_block < block:
next_pass.append((ptr_block, node.level - 1))
else:
heapq.heappush(heap, (ptr_block, node.level - 1))
elif node.level == 1:
yield node
class SetList:
__slots__ = ["s"]
def __init__(self):
self.s = set()
def append(self, item):
self.s.add(item)
def find(fs, name):
if not fs.init():
print(f"Could not access superblock", file=sys.stderr)
return
for leaf in iter_leafs(fs):
for item in leaf.items():
if item.key.type != ItemType.DIRECTORY:
continue
for entry in item.directory_list():
if entry.name == name:
print(
fs.get_full_name(
Key(entry.dirid, entry.objid, 0, ItemType.STAT, 2),
Key(item.key.dirid, item.key.objid, 0, ItemType.STAT, 2),
).decode(errors="replace")
)
def ls(fs, name, recurse=False):
if not fs.init():
print(f"Could not access superblock", file=sys.stderr)
return
dirKey = fs.path_to_key(name)
if dirKey is None:
print(f"Could not find {name.decode()}", file=sys.stderr)
return
item = fs.find_item(dirKey)
if item is None:
print(f"Could not stat {name.decode()}", file=sys.stderr)
return
stat = item.stat()
if stat.filetype == FileType.REGULAR:
print(f"{name.decode()} (normal file)", file=sys.stderr)
return
if stat.filetype == FileType.LINK:
print(f"{name.decode()} (symbolic link)", file=sys.stderr)
return
if stat.filetype != FileType.DIRECTORY:
print(f"{name.decode()} (special file)", file=sys.stderr)
return
dirname = None
for entry in itertools.islice(fs.directory_list(dirKey), 2):
if entry.name != b"..":
continue
dirname = fs.get_name(
dirKey, Key(entry.dirid, entry.objid, 0, ItemType.STAT, 2)
)
if dirname is None:
if recurse:
dirname = f"{dirKey.dirid}_{dirKey.objid}".encode()
else:
dirname = b"(unknown)"
dirname = dirname.decode(errors="replace")
dirname += "/"
ls_(fs, dirKey, dirname, recurse)
def ls_(fs, dirKey, dirname, recurse):
entries = []
fs.incomplete = False
dirList = list(fs.directory_list(dirKey))
incomplete = fs.incomplete
for entry in dirList:
directory = False
name = entry.name.decode(errors="replace")
if entry.name == b".":
if recurse:
name = dirname
if incomplete:
name += " (incomplete entry list)"
else:
name = f"{name: <2}\t{entry.dirid}_{entry.objid}\t{dirname}"
print(name)
continue
if entry.name == b"..":
if recurse:
continue
name = f"{name: <2}\t{entry.dirid}_{entry.objid}"
print(name)
continue
entryKey = Key(entry.dirid, entry.objid, 0, ItemType.STAT, 2)
item = fs.find_item(entryKey)
if item is None:
name += " (incomplete stat info)"
else:
stat = item.stat()
if stat.filetype == FileType.DIRECTORY:
name += "/"
directory = True
elif stat.filetype == FileType.REGULAR:
fs.incomplete = False
blocks = list(fs.regular_block_list(Key(entry.dirid, entry.objid, 0, ItemType.STAT, 2)))
if fs.incomplete:
name += " (incomplete block list)"
else:
for block in blocks:
if type(block) == bytes:
continue
if block == 0: # assume block 0 is for sparse files
continue
if not fs.isBlockComplete(block):
name += " (incomplete data blocks)"
break
blocks = None
if directory:
entries.append((name, entryKey))
else:
entries.append((name,))
entries.sort()
for entry in entries:
if not recurse:
print(entry[0])
else:
if len(entry) == 1:
print(dirname + entry[0])
else:
ls_(fs, entry[1], dirname + entry[0], recurse)
if incomplete and not recurse:
print("(results incomplete)")
def cat(fs, name):
if not fs.init():
print(f"Could not access superblock", file=sys.stderr)
return
key = fs.path_to_key(name)
if key is None:
print(f"Could not find {name.decode()}", file=sys.stderr)
return
item = fs.find_item(key)
if item is None:
print(f"Could not stat {name.decode()}", file=sys.stderr)
return
stat = item.stat()
if stat.filetype != FileType.REGULAR:
print(f"{name.decode()} not a regular file: {stat.filetype}", file=sys.stderr)
return
expectedSize = stat.size
fs.incomplete = False
currentSize = 0
for block in fs.regular_block_list(key):
if type(block) == bytes:
toWrite = block
elif block == 0: # assume block 0 is for sparse files
toWrite = bytes(fs.block_size)
else:
toWrite = fs.readBlock(block)
if currentSize + len(toWrite) > expectedSize:
toWrite = toWrite[:expectedSize - currentSize]
sys.stdout.buffer.write(toWrite)
currentSize += len(toWrite)
assert expectedSize == currentSize
if fs.incomplete:
# TODO: give different exit code? (would also need to check isBlockComplete)
pass
def findFolder(fs, names, metadata_only=False):
if not fs.init():
rangelist = RangeList()
rangelist.add(65536, 512)
print_rangelist(fs, rangelist, 1)
return
keysRemaining = []
excludeIds = set()
for name in names:
if name.startswith(b"-"):
exclude = True
name = name[1:]
else:
exclude = False
key = fs.path_to_key(name)
if key is None:
print(f"Could not find {name.decode()}", file=sys.stderr)
return
if exclude:
excludeIds.add(key.objid)
else:
keysRemaining.append(key)
fs.sectors = SetList()
blocks = set() # blocks may be repeated due to hard links
while keysRemaining:
key = keysRemaining.pop()
item = fs.find_item(key)
if item is None:
continue
stat = item.stat()
if stat.filetype == FileType.DIRECTORY:
for entry in fs.directory_list(key):
if entry.name == b"." or entry.name == b"..":
continue
if entry.objid in excludeIds:
continue
keysRemaining.append(Key(entry.dirid, entry.objid, 0, ItemType.STAT, 2))
elif stat.filetype == FileType.REGULAR:
if metadata_only:
list(fs.file_indirect_blocks(key))
else:
blocks.update(fs.file_indirect_blocks(key))
rangelist = RangeList()
blocks = list(blocks)
blocks.sort()
for block in blocks:
rangelist.add(block * fs.sectors_per_block, fs.sectors_per_block)
ranges = rangelist.items
rangelist = RangeList()
fs.sectors = list(fs.sectors.s)
fs.sectors.sort()
rangelist = RangeList()
for sector in fs.sectors:
rangelist.add(sector, 1)
ranges += rangelist.items
rangelist = RangeList()
ranges.sort()
for _range in ranges:
rangelist.add(_range.start, _range.size)
print_rangelist(fs, rangelist, 512)
def findTree(fs, level_limit=0, partial_only=False):
if fs.init():
_findTree(fs, level_limit, partial_only)
fs.sectors.sort()
rangelist = RangeList()
for sector in fs.sectors:
rangelist.add(sector, 1)
print_rangelist(fs, rangelist, 512)
def _findTree(fs, level_limit, partial_only):
incomplete_count = 0
partial = 0
found = 1
heap = []
next_pass = [(fs.superblock.root_block, -1)]
while next_pass:
heapq.heapify(next_pass)
tmp = heap
heap = next_pass
next_pass = tmp
next_pass.clear()
while heap:
block, level = heapq.heappop(heap)
complete, node = fs.readNode(block, partial_only=partial_only)
if not complete:
incomplete_count += 1
if node is not None:
partial += 1
continue
if node.level <= level_limit:
continue
if node.level > 1:
for ptr_block in node.ptr_blocks():
found += 1
if ptr_block < block:
next_pass.append((ptr_block, node.level - 1))
else:
heapq.heappush(heap, (ptr_block, node.level - 1))
elif node.level == 1:
for item_block in node.indirect_item_blocks():
if item_block == 0:
# It's unclear why these exist. Maybe for sparce files?
continue
for off in range(fs.sectors_per_block):
fs.sectors.append(item_block * fs.sectors_per_block + off)
print("found:", found, file=sys.stderr)
print("incomplete:", incomplete_count, file=sys.stderr)
print("partial:", partial, file=sys.stderr)
def findBitmap(fs, metadataOnly=False):
rangelist = RangeList()
if not fs.init():
rangelist.add(65536, 512)
print_rangelist(fs, rangelist, 1)
return
if metadataOnly:
rangelist.add(65536 // fs.block_size, 1)
rangelist.add(65536 // fs.block_size + 1, 1)
for pos in range(
fs.block_size * 8, fs.superblock.block_count, fs.block_size * 8
):
rangelist.add(pos, 1)
print_rangelist(fs, rangelist, fs.block_size)
return
r = fs.readBlock(65536 // fs.block_size + 1)
if fs.rescue_map[65536 + fs.block_size] != parse_ddrescue.Status.FINISHED:
rangelist.add(65536 // fs.block_size + 1, 1)
markUsed(rangelist, 0, r)
for pos in range(fs.block_size * 8, fs.superblock.block_count, fs.block_size * 8):
r = fs.readBlock(pos)
if fs.rescue_map[pos * fs.block_size] != parse_ddrescue.Status.FINISHED:
rangelist.add(pos, 1)
markUsed(rangelist, pos, r)
print_rangelist(fs, rangelist, fs.block_size)
def markUsed(rangelist, pos, bitmap):
for i, b in enumerate(bitmap):
i *= 8
for bit in range(8):
if b & (1 << bit):
rangelist.add(pos + i + bit, 1)
def print_rangelist(fs, rangelist, mult):
print(0, "*", 1)
print(0, fs.partition_start, "-")
end = 0
for item in rangelist.items:
if end != item.start:
print(fs.partition_start + end * mult, (item.start - end) * mult, "-")
print(fs.partition_start + item.start * mult, item.size * mult, "+")
end = item.start + item.size
# FIXME: need ending '-' to avoid ddrescuelog boolean logic strangeness
def main(argv):
if len(argv) < 4:
print(f"Usage: {argv[0]} file.bin file.map [--partition-start N] COMMAND [--metadata]", file=sys.stderr)
print(
"""
COMMANDS
bitmap Produce ddrescue map of used blocks based on the free space
bitmaps. This very quickly provides a view of used blocks and is
a good choice when the vast majority of data is readable. Note
that data blocks may be thrown away during fsck if the file
metadata that references them has been lost
This should be re-run as more bitmaps are recovered from disk to
provide more complete results
tree [LEVEL] Produce ddrescue map of used blocks based on the b-tree. This is
moderate speed and ensures recovery time is only spent on
accessible data. Specifying LEVEL will limit results to that
level and higher. Level 0 is file data, level 1 is file
metadata, and higher levels are used to discover lower levels.
Specifying level 1 initially is a good idea, and then proceeding
to 0 after level 1+ has been recovered. If you are needing to
retry bad blocks, focusing on higher levels (2+) first is a good
idea as they can "unlock" a substantial amount of lower-level
data
This should be re-run as more higher-level blocks are recovered
from disk to provide more complete results
folder PATH..
Produce ddrescue map of used blocks by traversing the directory
tree, for PATH and its descendants. This allows recovering
specific data, but can be slow as it needs to be run many times
as the directory structure is recovered. Multiple paths may be
specified. If a path is prefixed with dash ('-') it will be
excluded
This should be re-run as more directories are recovered from
disk to provide more complete results. If 'tree 1' has been
fully recovered, then reruns are unnecessary.
ls [-R] PATH List the contents of directory found via PATH, denoting
incomplete files. This must either be an absolute path or a path
starting with a directory in the form used by lost+found (e.g.,
1337_1338/some/folder). This is useful for looking through the
disk without running fsck and checking the recovery status of
individual files. -R will include transitive contents
find NAME Find files with name NAME. This is useful for finding for a
directory that is not reachable from the root and would exist in
lost+found after a fsck. For example, home directories could be
found by searching for '.bashrc'
cat PATH Dump file contents to standard out. Intended to allow reading a
few files without needing to run fsck. Do not fully trust the
output; consider it a debug or quick-and-dirty tool
OPTIONS
--partition-start N
The start of the reiserfs partition in bytes. This is necessary
if the file is a full disk image; should not be necessary for
partition images. Defaults to 0
--metadata Restrict ddrescue map output to metadata, such as bitmap and
b-tree blocks
""",
file=sys.stderr,
)
sys.exit(1)
filename_bin = argv[1]
filename_map = argv[2]
rescue_map = parse_ddrescue.parseDdrescue(filename_map)
partition_start = 0
if len(argv) > 4 and argv[3] == "--partition-start":
partition_start = int(argv[4])
del argv[3:5]
rescue_map.offset = partition_start
with open(filename_bin, "rb") as f:
fs = ReiserFs(f, rescue_map)
fs.partition_start = partition_start
metadata_only = False
if len(argv) > 4 and argv[4] == "--metadata":
metadata_only = True
del argv[4]
if argv[3] == "bitmap":
findBitmap(fs, metadataOnly=metadata_only)
elif argv[3] == "tree":
level = 0
if len(argv) >= 5:
level = int(argv[4])
if metadata_only:
level = max(level, 1)
findTree(fs, level_limit=level, partial_only=False)
elif argv[3] == "folder":
if len(argv) < 5:
print("PATH required", file=sys.stderr)
sys.exit(1)
names = []
for name in argv[4:]:
names.append(name.encode())
findFolder(fs, names, metadata_only=metadata_only)
elif argv[3] == "ls":
recurse = False
if len(argv) > 4 and argv[4] == "-R":
recurse = True
del argv[4]
if len(argv) < 5:
print("PATH required", file=sys.stderr)
sys.exit(1)
ls(fs, argv[4].encode(), recurse=recurse)
elif argv[3] == "cat":
if len(argv) < 5:
print("PATH required", file=sys.stderr)
sys.exit(1)
cat(fs, argv[4].encode())
elif argv[3] == "find":
if len(argv) < 5:
print("NAME required", file=sys.stderr)
sys.exit(1)
find(fs, argv[4].encode())
if __name__ == "__main__":
main(sys.argv)
|
python
|
import sys
import os
import redis
from Bio import SeqIO
def main(argv):
# Put stuff in JSON config file
r=redis.Redis()
batch = 1000;
itera = 0
checkID = ""
pipeline=r.pipeline()
handle = open( argv[0], "r")
for record in SeqIO.parse(handle, "fasta") :
pipeline.set( str( record.id ), str( record.seq ) )
checkID = str( record.id )
itera = itera + 1
if itera > batch :
pipeline.execute()
itera = 0
if itera > 0 :
pipeline.execute()
handle.close()
seqDoc1 = r.get( checkID )
seqDoc2 = r.get( argv[1] )
print seqDoc1
print seqDoc2
if __name__ == "__main__":
main(sys.argv[1:])
|
python
|
from peewee import CharField, IntegerField
from core.db.db import BaseModel
class Materials(BaseModel):
name = CharField(unique=True)
type_id = IntegerField()
class Meta:
table_name = 'materials'
|
python
|
from keras import backend
from keras.constraints import Constraint
from keras.initializers import RandomNormal
from keras.layers import Dense, Conv2D, Conv2DTranspose, BatchNormalization, Activation, Reshape, LeakyReLU, \
Dropout, Flatten
from keras.models import Sequential, load_model
from wasserstein import wasserstein_loss
KERNEL_SIZE_GENERATOR = 3
KERNEL_SIZE_CRITIC = 3
""" weight clipper """
class WeightClipper(Constraint):
# clip model weights to a given hypercube
def __init__(self, clip_value):
self.clip_value = clip_value
def __call__(self, weights):
return backend.clip(weights, -self.clip_value, self.clip_value)
def get_config(self):
return {'clip_value': self.clip_value}
""" model defintions """
def create_generator(latent_size, img_size, starting_filters=64):
"""
Creates a generator model for use in a WGAN.
:param latent_size:
:param img_size:
:param starting_filters: number of maps to start with from latent
:return:
"""
init = RandomNormal(stddev=0.02)
model = Sequential()
# block 0
model.add(Dense(starting_filters*(img_size[0] // (2 ** 5)) * (img_size[1] // (2 ** 5)),
input_shape=(latent_size,)))
# model.add(LeakyReLU())
model.add(Activation("relu"))
model.add(Reshape(((img_size[0] // (2 ** 5)), (img_size[1] // (2 ** 5)), starting_filters)))
model.add(BatchNormalization())
# block 1
model.add(Conv2DTranspose(1024, kernel_size=KERNEL_SIZE_GENERATOR, padding="same", strides=2, kernel_initializer=init))
# model.add(LeakyReLU())
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
# block 2
model.add(Conv2DTranspose(512, kernel_size=KERNEL_SIZE_GENERATOR, padding="same", strides=2, kernel_initializer=init))
# model.add(LeakyReLU())
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
# block 3
model.add(Conv2DTranspose(256, kernel_size=KERNEL_SIZE_GENERATOR, padding="same", strides=2, kernel_initializer=init))
# model.add(LeakyReLU())
model.add(Activation("relu"))
model.add(BatchNormalization())
# model.add(Dropout(0.2))
# block 4
model.add(Conv2DTranspose(128, kernel_size=KERNEL_SIZE_GENERATOR, padding="same", strides=2, kernel_initializer=init))
# model.add(LeakyReLU())
model.add(Activation("relu"))
model.add(BatchNormalization())
# block 5
model.add(Conv2DTranspose(64, kernel_size=KERNEL_SIZE_GENERATOR, padding="same", strides=2, kernel_initializer=init))
# model.add(LeakyReLU())
model.add(Activation("relu"))
model.add(BatchNormalization())
# block 6 without upsampling
model.add(Conv2D(32, kernel_size=KERNEL_SIZE_GENERATOR, padding="same", strides=1, kernel_initializer=init))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
# final image
model.add(Conv2D(3, kernel_size=KERNEL_SIZE_GENERATOR, padding="same", strides=1, kernel_initializer=init))
model.add(Activation("tanh"))
return model
def create_critic(img_size, critic_clipping=1e-2):
"""
Creates a critic model for use in a WGAN.
:param img_size:
:return:
"""
init = RandomNormal(stddev=0.02)
clipper = WeightClipper(critic_clipping)
model = Sequential()
model.add(Conv2D(64, kernel_size=KERNEL_SIZE_CRITIC, strides=2, padding="same",
kernel_constraint=clipper,
kernel_initializer=init,
input_shape=(img_size[0],img_size[1],3,)))
model.add(LeakyReLU())
model.add(BatchNormalization())
model.add(Conv2D(128, kernel_size=KERNEL_SIZE_CRITIC, strides=2, padding="same",
kernel_initializer=init,
kernel_constraint=clipper))
model.add(LeakyReLU())
model.add(BatchNormalization())
model.add(Conv2D(256, kernel_size=KERNEL_SIZE_CRITIC, strides=2, padding="same",
kernel_initializer=init,
kernel_constraint=clipper))
model.add(LeakyReLU())
model.add(BatchNormalization())
model.add(Conv2D(512, kernel_size=KERNEL_SIZE_CRITIC, strides=2, padding="same",
kernel_initializer=init,
kernel_constraint=clipper))
model.add(LeakyReLU())
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(1, kernel_initializer=init))
return model
def create_wgan(optimizer, latent_size, img_size, critic_clipping):
# build and compile generator
generator = create_generator(latent_size=latent_size, img_size=img_size)
# build and compile discriminator
critic = create_critic(img_size=img_size, critic_clipping=critic_clipping)
critic.compile(loss=wasserstein_loss, optimizer=optimizer)
# build and compile combined image
critic.trainable = False
combined = Sequential()
combined.add(generator)
combined.add(critic)
combined.compile(loss=wasserstein_loss, optimizer=optimizer)
return combined, critic, generator
def load_wgan(model_directory, latent_size, optimizer):
# load discriminator and generator, try backup file if failed
try:
critic = load_model(model_directory+"/critic.h5")
except:
critic = load_model(model_directory + "/critic_backup.h5")
try:
generator = load_model(model_directory + "/generator.h5")
except:
generator = load_model(model_directory + "/generator_backup.h5")
# build and compile combined image
critic.trainable = False
combined = Sequential()
combined.add(generator)
combined.add(critic)
combined.compile(loss=wasserstein_loss, optimizer=optimizer)
return combined, critic, generator
|
python
|
import events
import io
import json
import os
from executors.python import run as python_run
from executors.workflow import run as workflow_run
from . import utils
from girder_worker.utils import JobStatus, StateTransitionException
from girder_worker import config, PACKAGE_DIR
# Maps task modes to their implementation
_task_map = {}
class TaskSpecValidationError(Exception):
pass
def register_executor(name, fn):
"""
Register a new executor in the girder_worker runtime. This is used to
map the 'mode' field of a task to a function that will execute the task.
:param name: The value of the mode field that maps to the given function.
:type name: str
:param fn: The implementing function.
:type fn: function
"""
_task_map[name] = fn
def unregister_executor(name):
"""
Unregister an executor from the map.
:param name: The name of the executor to unregister.
:type name: str
"""
del _task_map[name]
def _resolve_scripts(task):
if task.get('mode') != 'workflow':
if 'script_uri' in task and 'script' not in task:
task['script'] = io.fetch({
'url': task['script_uri']
})
elif 'steps' in task:
for step in task['steps']:
_resolve_scripts(step['task'])
def load(task_file):
"""
Load a task JSON into memory, resolving any ``'script_uri'`` fields
by replacing it with a ``'script'`` field containing the contents pointed
to by ``'script_uri'`` (see :py:mod:`girder_worker.uri` for URI formats). A
``script_fetch_mode`` field may also be set
:param task_file: The path to the JSON file to load.
:returns: The task as a dictionary.
"""
with open(task_file) as f:
task = json.load(f)
prevdir = os.getcwd()
parent = os.path.dirname(task_file)
if parent != '':
os.chdir(os.path.dirname(task_file))
_resolve_scripts(task)
os.chdir(prevdir)
return task
def set_job_status(mgr, status):
if mgr:
mgr.updateStatus(status)
def _extractId(spec):
return spec['id'] if 'id' in spec else spec['name']
def _validateInputs(task_inputs, inputs):
for name, task_input in task_inputs.iteritems():
if name not in inputs:
if 'default' in task_input:
inputs[name] = task_input['default']
else:
raise Exception('Required input \'%s\' not provided.' % name)
@utils.with_tmpdir
def run(task, inputs=None, outputs=None, fetch=True, status=None, **kwargs):
"""
Run a task with the specified I/O bindings.
:param task: Specification of the task to run.
:type task: dict
:param inputs: Specification of how input objects should be fetched
into the runtime environment of this task.
:type inputs: dict
:param outputs: Specification of what should be done with outputs
of this task.
:type outputs: dict
:param write_script: If ``True`` task scripts will be written to file before
being passed to ``exec``. This improves interactive debugging with
tools such as ``pdb`` at the cost of additional file I/O. Note that
when passed to run *all* tasks will be written to file including
validation and conversion tasks.
:param fetch: If ``True`` will perform a fetch on the input before
running the task (default ``True``).
:param status: Job status to update to during execution of this task.
:type status: girder_worker.utils.JobStatus
:returns: A dictionary of the form ``name: binding`` where ``name`` is
the name of the output and ``binding`` is an output binding of the form
``{'data': data}``. The ``'data'`` field may be absent if an output URI
was provided. Instead, those outputs will be saved to that URI and the
output binding will contain the location in the ``'uri'`` field.
"""
inputs = inputs or {}
outputs = outputs or {}
task_inputs = {_extractId(d): d for d in task.get('inputs', ())}
task_outputs = {_extractId(d): d for d in task.get('outputs', ())}
mode = task.get('mode', 'python')
if mode not in _task_map:
raise Exception('Invalid mode: %s' % mode)
job_mgr = kwargs.get('_job_manager')
info = {
'task': task,
'task_inputs': task_inputs,
'task_outputs': task_outputs,
'mode': mode,
'inputs': inputs,
'outputs': outputs,
'status': status,
'job_mgr': job_mgr,
'kwargs': kwargs
}
events.trigger('run.before', info)
try:
# If some inputs are not there, fill in with defaults
_validateInputs(task_inputs, inputs)
for name, d in inputs.iteritems():
task_input = task_inputs[name]
if task_input.get('stream'):
continue # this input will be fetched as a stream
if fetch:
if status == JobStatus.RUNNING and 'data' not in d:
set_job_status(job_mgr, JobStatus.FETCHING_INPUT)
d['data'] = io.fetch(d, **dict({'task_input': task_input}, **kwargs))
events.trigger('run.handle_input', {
'info': info,
'task_input': task_input,
'input': d,
'name': name
})
if 'script_data' not in d:
d['script_data'] = d['data']
for name, task_output in task_outputs.iteritems():
if name not in outputs:
outputs[name] = {}
# Set the appropriate job status flag
set_job_status(job_mgr, status)
# Actually run the task for the given mode
_task_map[mode](
task=task, inputs=inputs, outputs=outputs, task_inputs=task_inputs,
task_outputs=task_outputs, **kwargs)
for name, task_output in task_outputs.iteritems():
if task_output.get('stream'):
continue # this output has already been sent as a stream
output = outputs[name]
e = events.trigger('run.handle_output', {
'info': info,
'task_output': task_output,
'output': output,
'outputs': outputs,
'name': name
})
if not e.default_prevented:
data = outputs[name]['script_data']
if status == JobStatus.RUNNING:
set_job_status(job_mgr, JobStatus.PUSHING_OUTPUT)
io.push(data, outputs[name], **dict({'task_output': task_output}, **kwargs))
output.pop('script_data', None)
events.trigger('run.after', info)
return outputs
except StateTransitionException:
if job_mgr:
status = job_mgr.refreshStatus()
# If we are canceling we want to stay in that state, otherwise raise
# the exception
if status != JobStatus.CANCELING:
raise
else:
raise
finally:
events.trigger('run.finally', info)
register_executor('python', python_run)
register_executor('workflow', workflow_run)
# Load plugins that are enabled in the config file or env var
_plugins = os.environ.get('WORKER_PLUGINS_ENABLED',
config.get('girder_worker', 'plugins_enabled'))
_plugins = [p.strip() for p in _plugins.split(',') if p.strip()]
_paths = os.environ.get(
'WORKER_PLUGIN_LOAD_PATH', config.get(
'girder_worker', 'plugin_load_path')).split(':')
_paths = [p for p in _paths if p.strip()]
_paths.append(os.path.join(PACKAGE_DIR, 'plugins'))
utils.load_plugins(_plugins, _paths, quiet=True)
|
python
|
import os
from urllib.parse import urlparse
from boxsdk import OAuth2, Client
from django.conf import settings
from swag_auth.base import BaseSwaggerDownloader, BaseAPIConnector
from swag_auth.oauth2.views import CustomOAuth2Adapter
class BoxAPIConnector(BaseAPIConnector):
def __init__(self, token):
super().__init__(token)
auth = OAuth2(
client_id=settings.SWAGAUTH_SETTINGS['box']['APP']['client_id'],
client_secret=settings.SWAGAUTH_SETTINGS['box']['APP']['secret'],
access_token=token,
)
self.client = Client(auth)
def get_file_content(self, file_id: str):
return self.client.file(file_id).content()
def get_file_information(self, file_id: str):
return self.client.file(file_id).get()
class BoxSwaggerDownloader(BaseSwaggerDownloader):
api_connector_cls = BoxAPIConnector
def get_swagger_content(self, url: str, connector: 'BoxAPIConnector'):
file_id = self.get_file_id(url)
return connector.get_file_content(file_id)
def get_extension(self, url: str) -> str:
file_id = self.get_file_id(url)
connector = self.get_api_connector()
file = connector.get_file_information(file_id)
return os.path.splitext(file.name)[1][1:]
def get_file_id(self, url: str) -> str:
return urlparse(url).path.lstrip('/').split('/')[1]
class BoxConnector(CustomOAuth2Adapter):
provider_id = 'box'
access_token_url = "https://api.box.com/oauth2/token"
authorize_url = "https://account.box.com/api/oauth2/authorize"
profile_url = "https://api.box.com/2.0/users/me"
client_id = settings.SWAGAUTH_SETTINGS[provider_id]['APP']['client_id']
secret = settings.SWAGAUTH_SETTINGS[provider_id]['APP']['secret']
scope = settings.SWAGAUTH_SETTINGS[provider_id]['SCOPE']
api_connector_class = BoxSwaggerDownloader
connector_classes = [BoxConnector]
|
python
|
from typing import List, Optional, Dict, Iterable
from fhirclient.models.codeableconcept import CodeableConcept
from fhirclient.models.encounter import Encounter
import fhirclient.models.patient as fhir_patient
from fhirclient.models.fhirreference import FHIRReference
from transmart_loader.loader_exception import LoaderException
from .fhir import Collection, Condition
from transmart_loader.transmart import DataCollection, Patient, Concept, \
Observation, TreeNode, Visit, TrialVisit, Study, ValueType, DateValue, \
CategoricalValue, ConceptNode
gender_concept = Concept(
'http://hl7.org/fhir/Patient.gender', 'Gender', 'http://hl7.org/fhir/Patient.gender', ValueType.Categorical)
birth_date_concept = Concept(
'http://hl7.org/fhir/Patient.birth_date', 'Birth date', 'http://hl7.org/fhir/Patient.birth_date', ValueType.Date)
patient_concepts = ['http://hl7.org/fhir/Patient.gender', 'http://hl7.org/fhir/Patient.birth_date']
study = Study('FHIR', 'FHIR')
trial_visit = TrialVisit(study, '')
def map_concept(codeable_concept: CodeableConcept) -> Concept:
"""
Maps a codeable concept to a TranSMART concept.
The system and code are both used for the concept code and the path.
The value type is always Categorical.
:param codeable_concept: the codeable concept
:return: a TranSMART Concept entity
"""
concept_code = '{}/{}'.format(
codeable_concept.coding[0].system,
codeable_concept.coding[0].code)
return Concept(
concept_code,
codeable_concept.text,
concept_code,
ValueType.Categorical
)
def get_reference(ref_obj: FHIRReference) -> Optional[str]:
"""
Returns a reference string from a FHIR Reference if it exists.
:param ref_obj: the FHIR Reference object
:return: the reference string or None
"""
if ref_obj is None:
return None
reference: str = ref_obj.reference
if reference is None:
return None
if not reference.startswith('urn:uuid:'):
raise LoaderException('Invalid reference: {}'.format(reference))
return reference[len('urn:uuid:'):]
class Mapper:
"""
FHIR to TranSMART mapping
"""
def __init__(self):
self.concepts: Dict[str, Concept] = {}
self.studies: List[Study] = [study]
self.trial_visits: List[TrialVisit] = [trial_visit]
self.visits: Dict[str, Visit] = {}
self.patient_nodes: List[TreeNode] = []
self.ontology_nodes: List[TreeNode] = []
self.patients: Dict[str, Patient] = {}
self.observations: List[Observation] = []
def add_ontology_node(self, concept: Concept) -> None:
if concept.concept_code in patient_concepts:
self.patient_nodes.append(ConceptNode(concept))
else:
self.ontology_nodes.append(ConceptNode(concept))
def add_concept(self, concept: Concept) -> None:
if concept.concept_code not in self.concepts:
self.concepts[concept.concept_code] = concept
self.add_ontology_node(concept)
def add_observation(self, observation: Observation) -> None:
self.add_concept(observation.concept)
self.observations.append(observation)
def map_patient(self, patient: fhir_patient.Patient) -> None:
""" Maps a FHIR Patient Resource to a Patient entity in TranSMART.
The gender and birthDate are mapped to a Date Observation entity.
The Patient and Observations are added to the collections of
Patients and Observations returned by the mapper.
:param patient: a FHIR Patient Resource
"""
subject = Patient(patient.id, patient.gender, [])
self.patients[patient.id] = subject
gender_observation = Observation(
subject,
gender_concept,
None,
trial_visit,
None,
None,
CategoricalValue(patient.gender))
self.add_observation(gender_observation)
birth_date_observation = Observation(
subject,
birth_date_concept,
None,
trial_visit,
None,
None,
DateValue(patient.birthDate.date))
self.add_observation(birth_date_observation)
def map_encounter(self, encounter: Encounter) -> None:
""" Maps an FHIR Encounter Resource to a Visit entity in TranSMART.
The reference to the subject is resolved to the corresponding TranSMART Patient.
The Visit is added to the collection of Visits returned by the mapper.
:param encounter: a FHIR Encounter Resource
"""
subject = self.patients[get_reference(encounter.subject)]
visit = Visit(
subject,
encounter.id,
encounter.status,
encounter.period.start.date if encounter.period else None,
encounter.period.end.date if encounter.period else None,
encounter.class_fhir.code if encounter.class_fhir else None,
encounter.hospitalization,
None,
[]
)
self.visits[encounter.id] = visit
def map_condition(self, condition: Condition) -> None:
""" Maps a FHIR Condition Resource to a categorical Observation entity
in TranSMART.
The reference to the subject is resolved to the corresponding TranSMART Patient.
The reference to the encounter is resolved to the corresponding TranSMART Visit.
The Observation is added to the collection of Observations returned by the mapper.
:param condition: a FHIR Condition Resource
"""
subject = self.patients[get_reference(condition.subject)]
visit_ref = get_reference(condition.encounter)
if visit_ref is None:
visit_ref = get_reference(condition.context)
visit = self.visits[visit_ref] if visit_ref else None
concept = map_concept(condition.code)
observation = Observation(
subject,
concept,
visit,
trial_visit,
None,
None,
CategoricalValue(concept.name)
)
self.add_observation(observation)
def map_collection(self, collection: Collection) -> None:
""" Maps a collection of FHIR Resources, in the following order:
Patients, Encounters, Conditions.
:param collection: a collection of FHIR Resources.
"""
for patient in collection.patients:
self.map_patient(patient)
for encounter in collection.encounters:
self.map_encounter(encounter)
for condition in collection.conditions:
self.map_condition(condition)
def get_ontology(self) -> Iterable[TreeNode]:
""" Returns a forest of directed acyclic graphs of ontology nodes.
:return: the root nodes
"""
patient_root = TreeNode('Patient')
for node in self.patient_nodes:
patient_root.add_child(node)
ontology_root = TreeNode('Ontology')
for node in self.ontology_nodes:
ontology_root.add_child(node)
return [patient_root, ontology_root]
@staticmethod
def map(collection: Optional[Collection], with_ontology: bool = False) -> Optional[DataCollection]:
""" Maps a collection of FHIR Resources to a collection of TranSMART
entities.
:param collection: the collection of FHIR Resources
:param with_ontology: whether to generate ontology codes and ontology tree nodes
:return: a TranSMART data collection
"""
if collection is None:
return None
mapper = Mapper()
mapper.map_collection(collection)
return DataCollection(
mapper.concepts.values() if with_ontology else [],
[],
[],
mapper.studies,
mapper.trial_visits,
mapper.visits.values(),
mapper.get_ontology() if with_ontology else [],
mapper.patients.values(),
mapper.observations)
|
python
|
from datetime import datetime
import django
import factory
from django.utils import timezone
from api.cases.enums import CaseTypeEnum
from api.compliance.enums import ComplianceVisitTypes, ComplianceRiskValues
from api.compliance.models import OpenLicenceReturns, ComplianceSiteCase, CompliancePerson, ComplianceVisitCase
from api.organisations.tests.factories import OrganisationFactory, SiteFactory
class OpenLicenceReturnsFactory(factory.django.DjangoModelFactory):
returns_data = "\na,b,c,d,e"
year = datetime.now().year
class Meta:
model = OpenLicenceReturns
class ComplianceSiteCaseFactory(factory.django.DjangoModelFactory):
case_type_id = CaseTypeEnum.COMPLIANCE_SITE.id
submitted_at = timezone.now()
organisation = factory.SubFactory(OrganisationFactory)
site = factory.SubFactory(SiteFactory, organisation=factory.SelfAttribute("..organisation"))
class Meta:
model = ComplianceSiteCase
class ComplianceVisitCaseFactory(factory.django.DjangoModelFactory):
site_case = factory.SubFactory(
ComplianceSiteCaseFactory,
organisation=factory.SelfAttribute("..organisation"),
status=factory.SelfAttribute("..status"),
)
case_type_id = CaseTypeEnum.COMPLIANCE_VISIT.id
visit_type = ComplianceVisitTypes.FIRST_CONTACT
visit_date = django.utils.timezone.now().date()
overall_risk_value = ComplianceRiskValues.VERY_LOW
licence_risk_value = 5
overview = factory.Faker("word")
inspection = factory.Faker("word")
compliance_overview = factory.Faker("word")
compliance_risk_value = ComplianceRiskValues.LOWER
individuals_overview = factory.Faker("word")
individuals_risk_value = ComplianceRiskValues.MEDIUM
products_overview = factory.Faker("word")
products_risk_value = ComplianceRiskValues.HIGHEST
class Meta:
model = ComplianceVisitCase
class PeoplePresentFactory(factory.django.DjangoModelFactory):
name = factory.Faker("name")
job_title = factory.Faker("name")
visit_case = None
class Meta:
model = CompliancePerson
|
python
|
#!/usr/bin/env python3
"""某青空文庫の実験用コード。
参考:
<https://github.com/ozt-ca/tjo.hatenablog.samples/tree/master/r_samples/public_lib/jp/aozora>
<https://tjo.hatenablog.com/entry/2019/05/31/190000>
AP: 0.945
Prec: 0.88742
Rec: 0.86312
実行結果:
```
[INFO ] Accuracy: 0.892 (Error: 0.108)
[INFO ] F1-macro: 0.894
[INFO ] AUC-macro: 0.990
[INFO ] AP-macro: 0.957
[INFO ] Prec-macro: 0.898
[INFO ] Rec-macro: 0.892
[INFO ] Logloss: 0.339
```
"""
import pathlib
import numpy as np
import pandas as pd
import tensorflow as tf
import pytoolkit as tk
input_shape = (512,)
batch_size = 32
num_classes = 8
models_dir = pathlib.Path(f"models/{pathlib.Path(__file__).stem}")
app = tk.cli.App(output_dir=models_dir)
logger = tk.log.get(__name__)
@app.command(logfile=False)
def check():
create_model().check()
@app.command(use_horovod=True)
def train():
train_set, val_set = load_data()
model = create_model()
model.train(train_set, val_set)
pred = model.predict(val_set, fold=0)
if tk.hvd.is_master():
evals = tk.evaluations.print_classification(val_set.labels, pred)
tk.notifications.post_evals(evals)
@app.command(use_horovod=True)
def validate():
_, val_set = load_data()
model = create_model().load()
pred = model.predict(val_set, fold=0)
tk.evaluations.print_classification(val_set.labels, pred)
def load_data():
df_train = pd.read_csv(
"https://raw.githubusercontent.com/ozt-ca/tjo.hatenablog.samples"
"/master/r_samples/public_lib/jp/aozora/aozora_8writers_train.csv",
header=None,
names=["text", "class"],
)
df_test = pd.read_csv(
"https://raw.githubusercontent.com/ozt-ca/tjo.hatenablog.samples"
"/master/r_samples/public_lib/jp/aozora/aozora_8writers_test.csv",
header=None,
names=["text", "class"],
)
class_names = list(sorted(np.unique(df_train["class"].values)))
assert len(class_names) == num_classes
class_to_id = np.vectorize({c: i for i, c in enumerate(class_names)}.__getitem__)
X_train = df_train["text"].values
y_train = class_to_id(df_train["class"].values)
X_test = df_test["text"].values
y_test = class_to_id(df_test["class"].values)
return tk.data.Dataset(X_train, y_train), tk.data.Dataset(X_test, y_test)
def create_model():
return tk.pipeline.KerasModel(
create_network_fn=create_network,
nfold=1,
train_data_loader=MyDataLoader(data_augmentation=True),
val_data_loader=MyDataLoader(),
epochs=20,
callbacks=[tk.callbacks.CosineAnnealing()],
fit_params={"workers": 8},
models_dir=models_dir,
model_name_format="model.h5",
skip_if_exists=False,
)
def create_network():
inputs = x = tf.keras.layers.Input(input_shape)
x = tf.keras.layers.Embedding(65536, 256, mask_zero=True)(x)
x1 = tf.keras.layers.GlobalAveragePooling1D()(x)
x2 = tf.keras.layers.GlobalMaxPooling1D()(tk.layers.RemoveMask()(x))
x = tf.keras.layers.concatenate([x1, x2])
x = tf.keras.layers.Dense(
num_classes,
kernel_regularizer=tf.keras.regularizers.l2(1e-4),
activation="softmax",
)(x)
model = tf.keras.models.Model(inputs=inputs, outputs=x)
tk.models.compile(model, "adam", "categorical_crossentropy", ["acc"])
return model, model
class MyDataLoader(tk.data.DataLoader):
def __init__(self, data_augmentation=False):
super().__init__(batch_size=batch_size)
self.data_augmentation = data_augmentation
def get_data(self, dataset: tk.data.Dataset, index: int):
X, y = dataset.get_data(index)
X = np.frombuffer(X.replace(" ", "").encode("utf-16-le"), dtype=np.uint16)
X = tf.keras.preprocessing.sequence.pad_sequences([X], input_shape[0])[0]
y = tf.keras.utils.to_categorical(y, num_classes)
return X, y
if __name__ == "__main__":
app.run(default="train")
|
python
|
import FWCore.ParameterSet.Config as cms
from SimG4Core.Configuration.SimG4Core_cff import *
g4SimHits.Watchers = cms.VPSet(cms.PSet(
MaterialBudgetVolume = cms.PSet(
lvNames = cms.vstring('BEAM', 'BEAM1', 'BEAM2', 'BEAM3', 'BEAM4', 'Tracker', 'ECAL', 'HCal', 'VCAL', 'MGNT', 'MUON', 'OQUA', 'CALOEC'),
lvLevels = cms.vint32(3, 3, 3, 3, 3, 3, 4, 4, 3, 4, 3, 3, 4),
useDD4Hep = cms.bool(False),
),
type = cms.string('MaterialBudgetVolume'),
))
from Configuration.ProcessModifiers.dd4hep_cff import dd4hep
dd4hep.toModify( g4SimHits.Watchers[0].MaterialBudgetVolume, useDD4Hep = True )
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, David Stygstra <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: modprobe
short_description: Load or unload kernel modules
version_added: 1.4
author:
- David Stygstra (@stygstra)
- Julien Dauphant
- Matt Jeffery
description:
- Load or unload kernel modules.
options:
name:
required: true
description:
- Name of kernel module to manage.
state:
description:
- Whether the module should be present or absent.
choices: [ absent, present ]
default: present
params:
description:
- Modules parameters.
default: ''
version_added: "1.6"
'''
EXAMPLES = '''
- name: Add the 802.1q module
modprobe:
name: 8021q
state: present
- name: Add the dummy module
modprobe:
name: dummy
state: present
params: 'numdummies=2'
'''
import shlex
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
params=dict(type='str', default=''),
),
supports_check_mode=True,
)
name = module.params['name']
params = module.params['params']
state = module.params['state']
# FIXME: Adding all parameters as result values is useless
result = dict(
changed=False,
name=name,
params=params,
state=state,
)
# Check if module is present
try:
modules = open('/proc/modules')
present = False
module_name = name.replace('-', '_') + ' '
for line in modules:
if line.startswith(module_name):
present = True
break
modules.close()
except IOError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **result)
# Add/remove module as needed
if state == 'present':
if not present:
if not module.check_mode:
command = [module.get_bin_path('modprobe', True), name]
command.extend(shlex.split(params))
rc, out, err = module.run_command(command)
if rc != 0:
module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
result['changed'] = True
elif state == 'absent':
if present:
if not module.check_mode:
rc, out, err = module.run_command([module.get_bin_path('modprobe', True), '-r', name])
if rc != 0:
module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
python
|
import datetime
import struct
import time
from sakuraio.hardware.commands import CommandMixins, CMD_ERROR_NONE
from sakuraio.hardware.exceptions import CommandError, ParityError
SAKURAIO_SLAVE_ADDR = 0x4f
def calc_parity(values):
parity = 0x00
for value in values:
parity ^= value
return parity
class SakuraIOBase(CommandMixins):
response_wait_time = 0.01
def start(self, write=True):
return
def end(self):
return
def send_byte(self, value):
raise NotImplementedError()
def recv_byte(self):
raise NotImplementedError()
def execute_command(self, cmd, request=[], as_bytes=False):
response = []
request = [cmd, len(request)] + request
request.append(calc_parity(request))
try:
# Request
self.start(True)
for value in request:
self.send_byte(value)
# wait response
time.sleep(self.response_wait_time)
# Response
self.start(False)
status = self.recv_byte()
if status != CMD_ERROR_NONE:
raise CommandError(status)
length = self.recv_byte()
response = []
for i in range(length):
response.append(self.recv_byte())
parity = self.recv_byte()
if parity != calc_parity([status, length] + response):
raise ParityError()
except:
self.end()
raise
self.end()
if as_bytes:
return struct.pack("B" * len(response), *response)
return response
|
python
|
from django.apps import AppConfig
class RidersportalAppConfig(AppConfig):
name = 'ridersportal_app'
|
python
|
#!/usr/bin/env python
import os, imp
def main(args):
if len(args) == 0:
print "Usage: python launch.py [args...]"
return
launch_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"auth_server.py")
auth_server = imp.load_source("auth_server", launch_path)
auth_server.main(args)
if __name__ == '__main__':
import sys
main(sys.argv)
|
python
|
"""Given two dictionaries for weights, find pairs of matching weights"""
import argparse
import io
import pathlib
import sys
import numpy as np
def find_matching_weights(filepath0, filepath1):
dict0 = np.load(io.BytesIO(filepath0.read_bytes()), allow_pickle=True).item()
dict1 = np.load(io.BytesIO(filepath1.read_bytes()), allow_pickle=True).item()
for key in dict0:
matched_key, d = min(((k, _distance(dict0[key], v)) for k, v in dict1.items()), key=lambda x: x[1])
if d < sys.maxsize:
print(f"{key},{matched_key},{d}")
else:
print(f"Failed to find: {key}", file=sys.stderr)
def _distance(value0, value1):
if value0.size != value1.size:
return sys.maxsize
min_max_mean = np.array([np.min(value0) - np.min(value1), np.max(value0) - np.max(value1), np.mean(value0) - np.mean(value1)])
return np.linalg.norm(min_max_mean)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('npy_filepath0', type=pathlib.Path)
parser.add_argument('npy_filepath1', type=pathlib.Path)
args = parser.parse_args()
if not args.npy_filepath0.exists():
args.error(f"Input not found: {args.npy_filepath0}")
if not args.npy_filepath1.exists():
args.error(f"Input not found: {args.npy_filepath1}")
find_matching_weights(args.npy_filepath0, args.npy_filepath1)
if __name__ == '__main__':
main()
|
python
|
prestamo_bolivares = float(input(" prestamo de Bolivares: "))
intereses_pagados = float(input("porcentaje de los intereses pagados: "))
tasa_interes_anual = round(((intereses_pagados * 100) / (prestamo_bolivares * 4)), 10)
print(f"El porcentaje de cobro anual por el prestamo de {prestamo_bolivares:,} Bolivares durante 4 años es de {tasa_interes_anual}%")
|
python
|
import os
from os.path import join
from nip.config import defaults
def get_basename():
return os.path.basename(os.getcwd().rstrip('/'))
def directory_is_empty(target_dir):
if os.path.exists(target_dir):
for _, folders, files in os.walk(target_dir):
return False if (files or folders) else True
else:
return True
def current_working_directory_is_empty():
return directory_is_empty(os.getcwd())
def get_nipfile_path(env=defaults):
return join(os.getcwd(), env.NIPFILE)
def get_python_setup_py_path(env=defaults):
return join(os.getcwd(), env.PYTHON_SETUP_PY)
def get_gitignore_path(env=defaults):
return join(os.getcwd(), env.GITIGNORE)
def get_requirements_path(env=defaults):
return join(os.getcwd(), env.REQUIREMENTS_FILE)
def get_dev_requirements_path(env=defaults):
return join(os.getcwd(), env.DEV_REQUIREMENTS_FILE)
def get_python_modules_path(env=defaults):
return join(os.getcwd(), env.PYTHON_MODULES)
def get_python_modules_bin_path(env=defaults):
return join(get_python_modules_path(env), 'bin')
def get_pip_executable_path(env=defaults):
return join(get_python_modules_bin_path(env), env.PIP)
def get_python_executable_path(env=defaults):
return join(get_python_modules_bin_path(env), env.PYTHON)
def file_write_lines(lines, to):
with open(to, 'w+') as fs:
fs.write("\n".join(lines))
|
python
|
# simple script to delete duplicated files based on scene id (NOTE MUST MANUALLY ADJUST
# file_name SLCING DEPENDING ON PRODUCT!!!)
# Author: Arthur Elmes, 2020-02-25
import os, glob, sys
def main(wkdir, product):
print("Deleting duplicates in: " + str(wkdir) + " for product: " + str(product))
if product == "LC8":
index = 29
elif product == "VNP" or product == "VJ1":
index = 22
elif product == "MCD":
index = 21
else:
print("Please enter a valid product from list: LC8, MCD, VNP, VJ1.")
sys.exit(1)
while True:
num_files = 0
num_dupes = 0
dupe_state = 1
for root, dirs, files in os.walk(wkdir):
num_files = len(files)
for file_name in files:
# Strip off the end of the file name, since it is always different
file_name = str(file_name[:22] + "*")
# Find any identical product ids
file_name_list = glob.glob(os.path.join(root, file_name))
# Count up how many files there are, including duplicates
num_dupes = num_dupes + len(file_name_list)
# If there are duplicates, delete the second (could be first,
# doesn't matter.
if len(file_name_list) > 1:
os.remove(file_name_list[1])
# Keep looping until the number of files and 'duplicates' is
# equal, in case of multiple duplicates
if num_files == num_dupes:
break
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
|
python
|
"""
Utility functions
"""
import logging
import os
import sys
def get_func(func_name, module_pathname):
"""
Get function from module
:param func_name: function name
:param module_pathname: pathname to module
:return:
"""
if sys.version_info[0] >= 3:
if sys.version_info[1] >= 6:
import importlib.util
spec = importlib.util.spec_from_file_location('module_name', module_pathname)
cells_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(cells_module)
return getattr(cells_module, func_name)
elif sys.version_info[1] >= 4:
import importlib.machinery
module = importlib.machinery.SourceFileLoader('module_name', module_pathname).load_module()
return getattr(module, func_name)
fatal('Python version {} not supported'.format(sys.version))
def fatal(msg):
"""
Print message and exit
:param msg: message to print
:return:
"""
logging.fatal('{}; exiting.'.format(msg))
print()
sys.exit(1)
def check_isfile(pathname):
if not os.path.isfile(pathname):
fatal("File '{}' does not exist or not regular file".format(pathname))
def print_console(m):
with open('/dev/stdout', 'w') as f:
f.write('{}\n'.format(m))
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import datetime
import eventlet
import eventlet.wsgi
eventlet.patcher.monkey_patch(all=False, socket=True)
import json
import logging
import sys
import routes
import routes.middleware
import webob.dec
import webob.exc
from xml.dom import minidom
from xml.parsers import expat
from melange.openstack.common import exception
LOG = logging.getLogger('wsgi')
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.DEBUG):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.strip("\n"))
def run_server(application, port):
"""Run a WSGI server with the given application."""
sock = eventlet.listen(('0.0.0.0', port))
eventlet.wsgi.server(sock, application)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, threads=1000):
self.pool = eventlet.GreenPool(threads)
def start(self, application, port, host='0.0.0.0', backlog=4096):
"""Run a WSGI server with the given application."""
socket = eventlet.listen((host, port), backlog=backlog)
self.pool.spawn_n(self._run, application, socket)
def wait(self):
"""Wait until all servers have completed running."""
try:
self.pool.waitall()
except KeyboardInterrupt:
pass
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
logger = logging.getLogger('eventlet.wsgi.server')
eventlet.wsgi.server(socket, application, custom_pool=self.pool,
log=WritableLogger(logger))
class Middleware(object):
"""
Base WSGI middleware wrapper. These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
def __init__(self, application):
self.application = application
def process_request(self, req):
"""
Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""
Helper class that can be inserted into any WSGI application chain
to get information about the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print ("*" * 40) + " REQUEST ENVIRON"
for key, value in req.environ.items():
print key, "=", value
print
resp = req.get_response(self.application)
print ("*" * 40) + " RESPONSE HEADERS"
for (key, value) in resp.headers.iteritems():
print key, "=", value
print
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""
Iterator that prints the contents of a wrapper string iterator
when iterated.
"""
print ("*" * 40) + " BODY"
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print
class Router(object):
"""
WSGI middleware that maps incoming requests to WSGI apps.
"""
def __init__(self, mapper):
"""
Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be a wsgi.Controller, who will route
the request to the action method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, "/svrlist", controller=sc, action="list")
# Actions are all implicitly defined
mapper.resource("server", "servers", controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify
def __call__(self, req):
"""
Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify
def _dispatch(req):
"""
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Request(webob.Request):
"""Add some Openstack API-specific logic to the base webob.Request."""
default_request_content_types = ('application/json', 'application/xml')
default_accept_types = ('application/json', 'application/xml')
default_accept_type = 'application/json'
def best_match_content_type(self, supported_content_types=None):
"""Determine the requested response content-type.
Based on the query extension then the Accept header.
Defaults to default_accept_type if we don't find a preference
"""
supported_content_types = (supported_content_types or
self.default_accept_types)
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
ctype = 'application/{0}'.format(parts[1])
if ctype in supported_content_types:
return ctype
bm = self.accept.best_match(supported_content_types)
return bm or self.default_accept_type
def get_content_type(self, allowed_content_types=None):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
content_type = self.content_type
allowed_content_types = (allowed_content_types or
self.default_request_content_types)
if content_type not in allowed_content_types:
raise exception.InvalidContentType(content_type=content_type)
return content_type
class Resource(object):
"""
WSGI app that handles (de)serialization and controller dispatch.
Reads routing information supplied by RoutesMiddleware and calls
the requested action method upon its deserializer, controller,
and serializer. Those three objects may implement any of the basic
controller action methods (create, update, show, index, delete)
along with any that may be specified in the api router. A 'default'
method may also be implemented to be used in place of any
non-implemented actions. Deserializer methods must accept a request
argument and return a dictionary. Controller methods must accept a
request argument. Additionally, they must also accept keyword
arguments that represent the keys returned by the Deserializer. They
may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
"""
def __init__(self, controller, deserializer=None, serializer=None):
"""
:param controller: object that implement methods created by routes lib
:param deserializer: object that supports webob request deserialization
through controller-like actions
:param serializer: object that supports webob response serialization
through controller-like actions
"""
self.controller = controller
self.serializer = serializer or ResponseSerializer()
self.deserializer = deserializer or RequestDeserializer()
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
try:
action, action_args, accept = self.deserialize_request(request)
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return webob.exc.HTTPUnsupportedMediaType(explanation=msg)
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return webob.exc.HTTPBadRequest(explanation=msg)
action_result = self.execute_action(action, request, **action_args)
try:
return self.serialize_response(action, action_result, accept)
# return unserializable result (typically a webob exc)
except Exception:
return action_result
def deserialize_request(self, request):
return self.deserializer.deserialize(request)
def serialize_response(self, action, action_result, accept):
return self.serializer.serialize(action_result, accept, action)
def execute_action(self, action, request, **action_args):
return self.dispatch(self.controller, action, request, **action_args)
def dispatch(self, obj, action, *args, **kwargs):
"""Find action-specific method on self and call it."""
try:
method = getattr(obj, action)
except AttributeError:
method = getattr(obj, 'default')
return method(*args, **kwargs)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except Exception:
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class DictSerializer(ActionDispatcher):
"""Default request body serialization"""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization"""
def default(self, data):
def sanitizer(obj):
if isinstance(obj, datetime.datetime):
_dtime = obj - datetime.timedelta(microseconds=obj.microsecond)
return _dtime.isoformat()
return obj
return json.dumps(data, default=sanitizer)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
:param xmlns: XML namespace to include with serialized xml
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
self.xmlns = xmlns
def default(self, data):
# We expect data to contain a single key which is the XML root.
root_key = data.keys()[0]
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
return self.to_xml_string(node)
def to_xml_string(self, node, has_atom=False):
self._add_xmlns(node, has_atom)
return node.toprettyxml(indent=' ', encoding='UTF-8')
# NOTE (ameade): the has_atom should be removed after all of the
# xml serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, has_atom=False):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
if has_atom:
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
# Set the xml namespace if one is specified
# TODO(justinsb): We could also use prefixes on the keys
xmlns = metadata.get('xmlns', None)
if xmlns:
result.setAttribute('xmlns', xmlns)
# TODO(bcwaldon): accomplish this without a type-check
if type(data) is list:
collections = metadata.get('list_collections', {})
if nodename in collections:
metadata = collections[nodename]
for item in data:
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(item))
result.appendChild(node)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
# TODO(bcwaldon): accomplish this without a type-check
elif type(data) is dict:
collections = metadata.get('dict_collections', {})
if nodename in collections:
metadata = collections[nodename]
for k, v in data.items():
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(k))
text = doc.createTextNode(str(v))
node.appendChild(text)
result.appendChild(node)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in data.items():
if k in attrs:
result.setAttribute(k, str(v))
else:
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else:
# Type is atom
node = doc.createTextNode(str(data))
result.appendChild(node)
return result
def _create_link_nodes(self, xml_doc, links):
link_nodes = []
for link in links:
link_node = xml_doc.createElement('atom:link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
if 'type' in link:
link_node.setAttribute('type', link['type'])
link_nodes.append(link_node)
return link_nodes
class ResponseHeadersSerializer(ActionDispatcher):
"""Default response headers serialization"""
def serialize(self, response, data, action):
self.dispatch(response, data, action=action)
def default(self, response, data):
response.status_int = 200
class ResponseSerializer(object):
"""Encode the necessary pieces into a response object"""
def __init__(self, body_serializers=None, headers_serializer=None):
self.body_serializers = {
'application/xml': XMLDictSerializer(),
'application/json': JSONDictSerializer(),
}
self.body_serializers.update(body_serializers or {})
self.headers_serializer = (headers_serializer or
ResponseHeadersSerializer())
def serialize(self, response_data, content_type, action='default'):
"""Serialize a dict into a string and wrap in a wsgi.Request object.
:param response_data: dict produced by the Controller
:param content_type: expected mimetype of serialized response body
"""
response = webob.Response()
self.serialize_headers(response, response_data, action)
self.serialize_body(response, response_data, content_type, action)
return response
def serialize_headers(self, response, data, action):
self.headers_serializer.serialize(response, data, action)
def serialize_body(self, response, data, content_type, action):
response.headers['Content-Type'] = content_type
if data is not None:
serializer = self.get_body_serializer(content_type)
response.body = serializer.serialize(data, action)
def get_body_serializer(self, content_type):
try:
return self.body_serializers[content_type]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
class RequestHeadersDeserializer(ActionDispatcher):
"""Default request headers deserializer"""
def deserialize(self, request, action):
return self.dispatch(request, action=action)
def default(self, request):
return {}
class RequestDeserializer(object):
"""Break up a Request object into more useful pieces."""
def __init__(self, body_deserializers=None, headers_deserializer=None,
supported_content_types=None):
self.supported_content_types = supported_content_types
self.body_deserializers = {
'application/xml': XMLDeserializer(),
'application/json': JSONDeserializer(),
}
self.body_deserializers.update(body_deserializers or {})
self.headers_deserializer = (headers_deserializer or
RequestHeadersDeserializer())
def deserialize(self, request):
"""Extract necessary pieces of the request.
:param request: Request object
:returns tuple of expected controller action name, dictionary of
keyword arguments to pass to the controller, the expected
content type of the response
"""
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
action_args.update(self.deserialize_headers(request, action))
action_args.update(self.deserialize_body(request, action))
accept = self.get_expected_content_type(request)
return (action, action_args, accept)
def deserialize_headers(self, request, action):
return self.headers_deserializer.deserialize(request, action)
def deserialize_body(self, request, action):
if not len(request.body) > 0:
LOG.debug(_("Empty body provided in request"))
return {}
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
raise
if content_type is None:
LOG.debug(_("No Content-Type provided in request"))
return {}
try:
deserializer = self.get_body_deserializer(content_type)
except exception.InvalidContentType:
LOG.debug(_("Unable to deserialize body as provided Content-Type"))
raise
return deserializer.deserialize(request.body, action)
def get_body_deserializer(self, content_type):
try:
return self.body_deserializers[content_type]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def get_expected_content_type(self, request):
return request.best_match_content_type(self.supported_content_types)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except Exception:
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization"""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return json.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
try:
node = minidom.parseString(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue
elif node.nodeName in listnames:
return [self._from_xml_node(n, listnames) for n in node.childNodes]
else:
result = dict()
for attr in node.attributes.keys():
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
listnames)
return result
def find_first_child_named(self, parent, name):
"""Search a nodes children for the first child with a given name"""
for node in parent.childNodes:
if node.nodeName == name:
return node
return None
def find_children_named(self, parent, name):
"""Return all of a nodes children who have the given name"""
for node in parent.childNodes:
if node.nodeName == name:
yield node
def extract_text(self, node):
"""Get the text field contained by the given node"""
if len(node.childNodes) == 1:
child = node.childNodes[0]
if child.nodeType == child.TEXT_NODE:
return child.nodeValue
return ""
def default(self, datastring):
return {'body': self._from_xml(datastring)}
|
python
|
import dynamixel_sdk as sdk
from typing import Union, Tuple, Any
from enum import Enum
from .connection import Connection
from .register import Instruction, AX, MX
from .util import validate_response
class MotorType(Enum):
AX = 1
MX = 2
class Motor:
def __init__(
self, conn: Connection, id: int, type: MotorType, protocol_ver: float = 1.0
):
self.id = id
self.type = type
self.conn = conn
self.protocol_version = protocol_ver
self.packet_handler = sdk.PacketHandler(self.protocol_version)
self.regtable = AX if self.type == MotorType.AX else MX
def write(self, instruction: Instruction, value: int) -> Tuple[Any, Any]:
addrs = self.regtable[instruction]
start, width = min(addrs), len(addrs)
try:
return {
1: self.packet_handler.write1ByteTxRx,
2: self.packet_handler.write2ByteTxRx,
4: self.packet_handler.write4ByteTxRx,
}[width](self.conn.port_handler, self.id, start, value)
except KeyError:
raise NotImplementedError(f"Register width not supported {width}")
def read(self, instruction: Instruction) -> int:
addrs = self.regtable[instruction]
start, width = min(addrs), len(addrs)
try:
return {
1: self.packet_handler.read1ByteTxRx,
2: self.packet_handler.read2ByteTxRx,
4: self.packet_handler.read4ByteTxRx,
}[width](self.conn.port_handler, self.id, start)
except KeyError:
raise NotImplementedError(f"Register width not supported {width}")
|
python
|
import sys
import os
import time
import cPickle as pickle
import tensorflow as tf
import numpy as np
import utils.reader as reader
import models.net as net
import utils.evaluation as eva
#for douban
#import utils.douban_evaluation as eva
import bin.train_and_evaluate as train
import bin.test_and_evaluate as test
# configure
conf = {
"data_path": "./data/douban/data.pkl",
"save_path": "./output/douban/",
"word_emb_init": "./data/douban/word_embedding_debug.pkl",
"init_model": './output/douban/model.ckpt.10', #should be set for test
"rand_seed": None,
"drop_dense": None,
"drop_attention": None,
"is_mask": True,
"is_layer_norm": True,
"is_positional": False,
"stack_num": 5,
"attention_type": "dot",
"learning_rate": 1e-3,
"vocab_size": 256358, #ubuntu: 434512; jdqa: 256358; douban: 172130
"batch_size": 32, #200 for test
"emb_size": 80,
"max_turn_num": 1,
"max_turn_len": 30,
"max_to_keep": 1,
"num_scan_data": 2,
"_EOS_": 1, #1 for douban data 28270 for ubuntu
"final_n_class": 1,
}
model = net.Net(conf)
#train.train(conf, model)
#test and evaluation, init_model in conf should be set
test.test(conf, model)
|
python
|
# -*- coding: utf-8 -*-
from importlib import import_module
from wsgiref.util import FileWrapper
from django.conf import settings
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.encoding import smart_text, smart_bytes
class DjangoDownloadHttpResponse(StreamingHttpResponse):
def __init__(self, path, name=None):
"""
:type path: bviewer.core.files.path.ImagePath
"""
wrapper = FileWrapper(path.cache_open(mode='rb', temp=False))
super(DjangoDownloadHttpResponse, self).__init__(wrapper)
name = name or path.name
self['Content-Type'] = path.content_type
self['Content-Disposition'] = smart_bytes(smart_text('attachment; filename="{0}"').format(name))
self['Content-Length'] = path.cache_size
class NginxDownloadHttpResponse(HttpResponse):
def __init__(self, path, name=None):
"""
:type path: bviewer.core.files.path.ImagePath
"""
super(NginxDownloadHttpResponse, self).__init__()
name = name or path.name
url = settings.VIEWER_DOWNLOAD_RESPONSE['INTERNAL_URL'] + '/' + smart_text(path.url)
self['X-Accel-Charset'] = 'utf-8'
self['X-Accel-Redirect'] = smart_bytes(url)
self['Content-Type'] = path.content_type
self['Content-Disposition'] = smart_bytes(smart_text('attachment; filename="{0}"').format(name))
django = DjangoDownloadHttpResponse
nginx = NginxDownloadHttpResponse
def download_response(path, name=None):
"""
:type path: bviewer.core.files.path.ImagePath
"""
import_path = settings.VIEWER_DOWNLOAD_RESPONSE['BACKEND']
module_path, class_name = import_path.rsplit('.', 1)
module = import_module(module_path)
clazz = getattr(module, class_name)
return clazz(path, name=name)
|
python
|
from ..IPacket import IPacket
from ..DataStructures.GamePoint import GamePoint
class UseItem(IPacket):
"Sent by client to use an item"
def __init__(self):
self.time = 0
self.slotID = 0
self.init = GamePoint(0,0)
self.target = GamePoint(0,0)
self.projectileID = 0
def Read(self, r):
self.time = r.ReadInt32()
self.slotID = r.ReadByte()
self.init.Read(r)
self.target.Read(r)
self.projectileID = r.ReadUInt16()
def Write(self, w):
w.WriteInt32(self.time)
w.WriteByte(self.slotID)
self.init.Write(w)
self.target.Write(w)
w.WriteUInt16(self.projectileID)
def GetType(self):
return super().GetType()
def PrintString(self):
super().PrintString()
|
python
|
import json
import sqlite3
with open('../data_retrieval/acm/acm_paper_doi.json') as data_file:
data = json.load(data_file)
connection = sqlite3.connect('scholarDB.db')
with connection:
cursor = connection.cursor()
count = 0
for row in data:
journal_category = row['journal category']
abstract = row['abstract']
volume = row['volume']
main_author = row['author']
authors = ""
for author in row['other_authors']:
authors += author
if author != row['other_authors'][-1]:
authors += ','
title = row['title']
citation_count = row['citation count']
journal_category_description = row['journal category_description']
pdf_link = row['pdf link']
download_count = row['download count']
time_added = row['time added']
doi = row['doi']
cursor.execute('insert into acm values(?,?,?,?,?,?,?,?,?,?,?,?)', [journal_category, abstract, volume, main_author, title, citation_count, journal_category_description, pdf_link, download_count, time_added, authors, doi])
|
python
|
import torch.nn as nn
from mmcv.cnn import ConvModule, constant_init
from mmedit.models.common import GCAModule
from mmedit.models.registry import COMPONENTS
from ..encoders.resnet_enc import BasicBlock
class BasicBlockDec(BasicBlock):
"""Basic residual block for decoder.
For decoder, we use ConvTranspose2d with kernel_size 4 and padding 1 for
conv1. And the output channel of conv1 is modified from `out_channels` to
`in_channels`.
"""
def build_conv1(self, in_channels, out_channels, kernel_size, stride,
conv_cfg, norm_cfg, act_cfg, with_spectral_norm):
"""Build conv1 of the block.
Args:
in_channels (int): The input channels of the ConvModule.
out_channels (int): The output channels of the ConvModule.
kernel_size (int): The kernel size of the ConvModule.
stride (int): The stride of the ConvModule. If stride is set to 2,
then ``conv_cfg`` will be overwritten as
``dict(type='Deconv')`` and ``kernel_size`` will be overwritten
as 4.
conv_cfg (dict): The conv config of the ConvModule.
norm_cfg (dict): The norm config of the ConvModule.
act_cfg (dict): The activation config of the ConvModule.
with_spectral_norm (bool): Whether use spectral norm.
Returns:
nn.Module: The built ConvModule.
"""
if stride == 2:
conv_cfg = dict(type='Deconv')
kernel_size = 4
padding = 1
else:
padding = kernel_size // 2
return ConvModule(
in_channels,
in_channels,
kernel_size,
stride=stride,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_spectral_norm=with_spectral_norm)
def build_conv2(self, in_channels, out_channels, kernel_size, conv_cfg,
norm_cfg, with_spectral_norm):
"""Build conv2 of the block.
Args:
in_channels (int): The input channels of the ConvModule.
out_channels (int): The output channels of the ConvModule.
kernel_size (int): The kernel size of the ConvModule.
conv_cfg (dict): The conv config of the ConvModule.
norm_cfg (dict): The norm config of the ConvModule.
with_spectral_norm (bool): Whether use spectral norm.
Returns:
nn.Module: The built ConvModule.
"""
return ConvModule(
in_channels,
out_channels,
kernel_size,
stride=1,
padding=kernel_size // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None,
with_spectral_norm=with_spectral_norm)
@COMPONENTS.register_module()
class ResNetDec(nn.Module):
"""ResNet decoder for image matting.
This class is adopted from https://github.com/Yaoyi-Li/GCA-Matting.
Args:
block (str): Type of residual block. Currently only `BasicBlockDec` is
implemented.
layers (list[int]): Number of layers in each block.
in_channels (int): Channel num of input features.
kernel_size (int): Kernel size of the conv layers in the decoder.
conv_cfg (dict): dictionary to construct convolution layer. If it is
None, 2d convolution will be applied. Default: None.
norm_cfg (dict): Config dict for normalization layer. "BN" by default.
act_cfg (dict): Config dict for activation layer, "ReLU" by default.
with_spectral_norm (bool): Whether use spectral norm after conv.
Default: False.
late_downsample (bool): Whether to adopt late downsample strategy,
Default: False.
"""
def __init__(self,
block,
layers,
in_channels,
kernel_size=3,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(
type='LeakyReLU', negative_slope=0.2, inplace=True),
with_spectral_norm=False,
late_downsample=False):
super(ResNetDec, self).__init__()
if block == 'BasicBlockDec':
block = BasicBlockDec
else:
raise NotImplementedError(f'{block} is not implemented.')
self.kernel_size = kernel_size
self.inplanes = in_channels
self.midplanes = 64 if late_downsample else 32
self.layer1 = self._make_layer(block, 256, layers[0], conv_cfg,
norm_cfg, act_cfg, with_spectral_norm)
self.layer2 = self._make_layer(block, 128, layers[1], conv_cfg,
norm_cfg, act_cfg, with_spectral_norm)
self.layer3 = self._make_layer(block, 64, layers[2], conv_cfg,
norm_cfg, act_cfg, with_spectral_norm)
self.layer4 = self._make_layer(block, self.midplanes, layers[3],
conv_cfg, norm_cfg, act_cfg,
with_spectral_norm)
self.conv1 = ConvModule(
self.midplanes,
32,
4,
stride=2,
padding=1,
conv_cfg=dict(type='Deconv'),
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_spectral_norm=with_spectral_norm)
self.conv2 = ConvModule(
32,
1,
self.kernel_size,
padding=self.kernel_size // 2,
act_cfg=None)
def init_weights(self):
"""Init weights for the module.
"""
for m in self.modules():
if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
constant_init(m.weight, 1)
constant_init(m.bias, 0)
# Zero-initialize the last BN in each residual branch, so that the
# residual branch starts with zeros, and each residual block behaves
# like an identity. This improves the model by 0.2~0.3% according to
# https://arxiv.org/abs/1706.02677
for m in self.modules():
if isinstance(m, BasicBlockDec):
constant_init(m.conv2.bn.weight, 0)
def _make_layer(self, block, planes, num_blocks, conv_cfg, norm_cfg,
act_cfg, with_spectral_norm):
upsample = nn.Sequential(
nn.UpsamplingNearest2d(scale_factor=2),
ConvModule(
self.inplanes,
planes * block.expansion,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None,
with_spectral_norm=with_spectral_norm))
layers = [
block(
self.inplanes,
planes,
kernel_size=self.kernel_size,
stride=2,
interpolation=upsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_spectral_norm=with_spectral_norm)
]
self.inplanes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(
block(
self.inplanes,
planes,
kernel_size=self.kernel_size,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
with_spectral_norm=with_spectral_norm))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Output tensor.
"""
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.conv1(x)
x = self.conv2(x)
return x
@COMPONENTS.register_module()
class ResShortcutDec(ResNetDec):
"""ResNet decoder for image matting with shortcut connection.
::
feat1 --------------------------- conv2 --- out
|
feat2 ---------------------- conv1
|
feat3 ----------------- layer4
|
feat4 ------------ layer3
|
feat5 ------- layer2
|
out --- layer1
Args:
block (str): Type of residual block. Currently only `BasicBlockDec` is
implemented.
layers (list[int]): Number of layers in each block.
in_channels (int): Channel number of input features.
kernel_size (int): Kernel size of the conv layers in the decoder.
conv_cfg (dict): Dictionary to construct convolution layer. If it is
None, 2d convolution will be applied. Default: None.
norm_cfg (dict): Config dict for normalization layer. "BN" by default.
act_cfg (dict): Config dict for activation layer, "ReLU" by default.
late_downsample (bool): Whether to adopt late downsample strategy,
Default: False.
"""
def forward(self, inputs):
"""Forward function of resnet shortcut decoder.
Args:
inputs (dict): Output dictionary of the ResNetEnc containing:
- out (Tensor): Output of the ResNetEnc.
- feat1 (Tensor): Shortcut connection from input image.
- feat2 (Tensor): Shortcut connection from conv2 of ResNetEnc.
- feat3 (Tensor): Shortcut connection from layer1 of ResNetEnc.
- feat4 (Tensor): Shortcut connection from layer2 of ResNetEnc.
- feat5 (Tensor): Shortcut connection from layer3 of ResNetEnc.
Returns:
Tensor: Output tensor.
"""
feat1 = inputs['feat1']
feat2 = inputs['feat2']
feat3 = inputs['feat3']
feat4 = inputs['feat4']
feat5 = inputs['feat5']
x = inputs['out']
x = self.layer1(x) + feat5
x = self.layer2(x) + feat4
x = self.layer3(x) + feat3
x = self.layer4(x) + feat2
x = self.conv1(x) + feat1
x = self.conv2(x)
return x
@COMPONENTS.register_module()
class ResGCADecoder(ResShortcutDec):
"""ResNet decoder with shortcut connection and gca module.
::
feat1 ---------------------------------------- conv2 --- out
|
feat2 ----------------------------------- conv1
|
feat3 ------------------------------ layer4
|
feat4, img_feat -- gca_module - layer3
|
feat5 ------- layer2
|
out --- layer1
* gca module also requires unknown tensor generated by trimap which is \
ignored in the above graph.
Args:
block (str): Type of residual block. Currently only `BasicBlockDec` is
implemented.
layers (list[int]): Number of layers in each block.
in_channels (int): Channel number of input features.
kernel_size (int): Kernel size of the conv layers in the decoder.
conv_cfg (dict): Dictionary to construct convolution layer. If it is
None, 2d convolution will be applied. Default: None.
norm_cfg (dict): Config dict for normalization layer. "BN" by default.
act_cfg (dict): Config dict for activation layer, "ReLU" by default.
late_downsample (bool): Whether to adopt late downsample strategy,
Default: False.
"""
def __init__(self,
block,
layers,
in_channels,
kernel_size=3,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(
type='LeakyReLU', negative_slope=0.2, inplace=True),
with_spectral_norm=False,
late_downsample=False):
super(ResGCADecoder,
self).__init__(block, layers, in_channels, kernel_size, conv_cfg,
norm_cfg, act_cfg, with_spectral_norm,
late_downsample)
self.gca = GCAModule(128, 128)
def forward(self, inputs):
"""Forward function of resnet shortcut decoder.
Args:
inputs (dict): Output dictionary of the ResGCAEncoder containing:
- out (Tensor): Output of the ResGCAEncoder.
- feat1 (Tensor): Shortcut connection from input image.
- feat2 (Tensor): Shortcut connection from conv2 of \
ResGCAEncoder.
- feat3 (Tensor): Shortcut connection from layer1 of \
ResGCAEncoder.
- feat4 (Tensor): Shortcut connection from layer2 of \
ResGCAEncoder.
- feat5 (Tensor): Shortcut connection from layer3 of \
ResGCAEncoder.
- img_feat (Tensor): Image feature extracted by guidance head.
- unknown (Tensor): Unknown tensor generated by trimap.
Returns:
Tensor: Output tensor.
"""
img_feat = inputs['img_feat']
unknown = inputs['unknown']
feat1 = inputs['feat1']
feat2 = inputs['feat2']
feat3 = inputs['feat3']
feat4 = inputs['feat4']
feat5 = inputs['feat5']
x = inputs['out']
x = self.layer1(x) + feat5
x = self.layer2(x) + feat4
x = self.gca(img_feat, x, unknown)
x = self.layer3(x) + feat3
x = self.layer4(x) + feat2
x = self.conv1(x) + feat1
x = self.conv2(x)
return x
|
python
|
from http import HTTPStatus
from flask import Blueprint, request
from injector import inject
from edu_loan.config.dependencies import Application
from edu_loan.domain.event_flow_service import EventFlowService, EventFlowServiceException
from edu_loan.domain.serializers import SerializerException, EventFlowSerializer
class EventFlowEndpoint:
@inject
def __init__(self, app: Application, event_flow_service: EventFlowService):
self.app = app
self.event_flow_service = event_flow_service
def register_endpoints(self):
app_bp = Blueprint('EventFlowApp', __name__)
@self.app.route('/api/v1/event-flow', methods=['POST'])
def add_event_flow():
try:
serializer = EventFlowSerializer().load(data=request.get_json())
self.event_flow_service.add_event_flow(serializer.get('event_flow'))
except (EventFlowServiceException, SerializerException) as ex:
return {'error': str(ex)}, HTTPStatus.BAD_REQUEST
return {'success': True}, HTTPStatus.CREATED
return app_bp
|
python
|
print('Hello, Word!')
print()
print(1 + 2)
print(7 * 6)
print()
print("The end", "or is it?", "keep watching to learn more about Python 3")
|
python
|
import typing
import pytest
from terraform import schemas, unknowns
@pytest.mark.parametrize(
"schema,value,expected_value",
[
pytest.param(schemas.Block(), None, None, id="empty",),
pytest.param(
schemas.Block(
attributes={
"foo": schemas.Attribute(type="string", optional=True),
"bar": schemas.Attribute(type="string", computed=True),
},
block_types={
"baz": schemas.NestedBlock(
nesting=schemas.NestingMode.SINGLE,
block=schemas.Block(
attributes={
"boz": schemas.Attribute(
type="string", optional=True, computed=True
),
"biz": schemas.Attribute(
type="string", optional=True, computed=True
),
}
),
)
},
),
None,
{"foo": None, "bar": unknowns.UNKNOWN},
id="no prior",
),
pytest.param(
schemas.Block(
attributes={"foo": schemas.Attribute(type="string")},
block_types={
"baz": schemas.NestedBlock(
nesting=schemas.NestingMode.SET,
block=schemas.Block(
attributes={
"boz": schemas.Attribute(
type="string", optional=True, computed=True
),
}
),
)
},
),
None,
None,
id="null stays null",
),
pytest.param(
schemas.Block(
attributes={"foo": schemas.Attribute(type="string", computed=True)},
block_types={
"baz": schemas.NestedBlock(
nesting=schemas.NestingMode.SET,
block=schemas.Block(
attributes={
"boz": schemas.Attribute(
type="string", optional=True, computed=True
),
}
),
)
},
),
None,
{"foo": unknowns.UNKNOWN},
id="no prior with set",
),
pytest.param(
schemas.Block(
attributes={
"foo": schemas.Attribute(type="string", optional=True),
"bar": schemas.Attribute(type="string", computed=True),
"baz": schemas.Attribute(
type="string", optional=True, computed=True
),
"boz": schemas.Attribute(
type="string", optional=True, computed=True
),
},
),
{"foo": "bonjour", "bar": "petit dejeuner", "baz": "grande dejeuner"},
{
"foo": "bonjour",
"bar": "petit dejeuner",
"baz": "grande dejeuner",
"boz": unknowns.UNKNOWN,
},
id="prior attributes",
),
pytest.param(
schemas.Block(
block_types={
"foo": schemas.NestedBlock(
nesting=schemas.NestingMode.SINGLE,
block=schemas.Block(
attributes={
"bar": schemas.Attribute(
type="string", optional=True, computed=True
),
"baz": schemas.Attribute(
type="string", optional=True, computed=True
),
}
),
)
}
),
{"foo": {"bar": "beep"}},
{"foo": {"bar": "beep", "baz": unknowns.UNKNOWN}},
id="prior nested single",
),
pytest.param(
schemas.Block(
block_types={
"foo": schemas.NestedBlock(
nesting=schemas.NestingMode.LIST,
block=schemas.Block(
attributes={
"bar": schemas.Attribute(
type="string", optional=True, computed=True
),
"baz": schemas.Attribute(
type="string", optional=True, computed=True
),
}
),
)
}
),
{"foo": [{"bar": "bap"}, {"bar": "blep"}]},
{
"foo": [
{"bar": "bap", "baz": unknowns.UNKNOWN},
{"bar": "blep", "baz": unknowns.UNKNOWN},
]
},
id="prior nested list",
),
pytest.param(
schemas.Block(
block_types={
"foo": schemas.NestedBlock(
nesting=schemas.NestingMode.MAP,
block=schemas.Block(
attributes={
"bar": schemas.Attribute(
type="string", optional=True, computed=True
),
"baz": schemas.Attribute(
type="string", optional=True, computed=True
),
}
),
)
}
),
{
"foo": {
"a": {"bar": None, "baz": "boop"},
"b": {"bar": "blep", "baz": None},
}
},
{
"foo": {
"a": {"bar": unknowns.UNKNOWN, "baz": "boop"},
"b": {"bar": "blep", "baz": unknowns.UNKNOWN},
}
},
id="prior nested map",
),
pytest.param(
schemas.Block(
block_types={
"foo": schemas.NestedBlock(
nesting=schemas.NestingMode.SET,
block=schemas.Block(
attributes={
"bar": schemas.Attribute(type="string", optional=True),
"baz": schemas.Attribute(
type="string", optional=True, computed=True
),
}
),
)
}
),
{"foo": [{"bar": "blep", "baz": None}, {"bar": "boop", "baz": None}]},
{
"foo": [
{"bar": "blep", "baz": unknowns.UNKNOWN},
{"bar": "boop", "baz": unknowns.UNKNOWN},
]
},
id="prior nested set",
),
pytest.param(
schemas.Block(
block_types={
"foo": schemas.NestedBlock(
nesting=schemas.NestingMode.SET,
block=schemas.Block(
attributes={
"bar": schemas.Attribute(type="string", optional=True),
"baz": schemas.Attribute(
type="string", optional=True, computed=True
),
}
),
)
}
),
{
"foo": [
{"bar": "boop", "baz": None},
{"bar": "boop", "baz": unknowns.UNKNOWN},
]
},
{
"foo": [
{"bar": "boop", "baz": unknowns.UNKNOWN},
{"bar": "boop", "baz": unknowns.UNKNOWN},
]
},
id="sets differing only by unknown",
),
],
)
def test_set_unknowns(
schema: schemas.Block,
value: typing.Dict[str, typing.Any],
expected_value: typing.Dict[str, typing.Any],
):
assert unknowns.set_unknowns(value=value, schema=schema) == expected_value
|
python
|
import os
from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='filter_pandoc_run_py',
version='0.6.2',
description="Pandoc filter to run python code blocks",
long_description=long_description,
url='https://github.com/caiofcm/filter_pandoc_run_py',
download_url='https://github.com/caiofcm/filter_pandoc_run_py/archive/0.1.tar.gz',
author='Caio Marcellos',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
install_requires=['pandocfilters', 'matplotlib'],
keywords='pandoc filters markdown python notes',
zip_safe=False,
py_modules=["filter_pandoc_run_py.filter_pandoc_run_py"],
entry_points={
'console_scripts': [
'filter_pandoc_run_py = filter_pandoc_run_py.filter_pandoc_run_py:main',
],
},
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
setup_requires=['pytest-runner'],
tests_require=['pytest', 'coverage'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License", # to be reviewed
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Documentation',
'Topic :: Text Processing :: Filters',
]
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
)
|
python
|
'''
This file is a part of Test Mile Arjuna
Copyright 2018 Test Mile Software Testing Pvt Ltd
Website: www.TestMile.com
Email: support [at] testmile.com
Creator: Rahul Verma
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from arjuna.engine.unitee.enums import *
class Kallable:
def __init__(self, obj):
self.obj = obj
self.process()
self.args = []
def process(self):
*pkg_parts, self.module = self.obj.__module__.split(".")
self.pkg = ".".join(pkg_parts)
self.name = self.obj.__name__
self.qname = ".".join([self.pkg, self.module, self.name])
def __call__(self):
self.obj(*self.args)
class TestModule(Kallable):
def process(self):
*pkg_parts, self.module = self.obj.__name__.split(".")
self.pkg = ".".join(pkg_parts)
self.name = self.module
self.qname = self.obj.__name__
class _TestFunction(Kallable):
pass
class _DataSourceClass(Kallable):
pass
class _DataSourceFunction(Kallable):
pass
class _FixtureFunction(Kallable):
def __init__(self, func, dec_name):
super().__init__(func)
self.type = FixtureTypeEnum[dec_name.upper()]
self.func = func
class _DepModule(Kallable):
pass
class _DepFunction(Kallable):
pass
def __str(obj):
try:
return ".".join([obj.__module__, obj.__qualname__])
except:
return str(obj)
def create_test_module(obj):
return TestModule(obj)
def create_dsource_class(obj):
return _DataSourceClass(obj)
def create_dsource_func(obj):
return _DataSourceFunction(obj)
def create_fixture(dec_name, obj):
return _FixtureFunction(obj, dec_name)
def create_test_func(obj):
return _TestFunction(obj)
|
python
|
n = int(input("INSIRA O NUMERO DE TERMOS DA PA: "))
pt = int(input("INSIRA O 1° TERMO DA PA: "))
r = int(input("INSIRA A RAZÃO DA PA: "))
print("*"*40)
print("OS TERMOS DA PA SÃO")
calc = pt + ( n - 1 )*r
for i in range(pt, calc+r, r):
print(f'{i}',end='->')
soma = n * (pt + calc) // 2
print()
print(">A SOMA DOS TERMOS DA PA É:->",soma)
|
python
|
from typing import Any, Dict
import argparse
import logging
import pathlib
import random
import sys
from pprint import pprint
import pandas as pd
import torch as th
import numpy as np
try:
from icecream import install # noqa
install()
except ImportError: # Graceful fallback if IceCream isn't installed.
ic = lambda *a: None if not a else (a[0] if len(a) == 1 else a) # noqa
import wandb
import optuna
from optuna.integration import WeightsAndBiasesCallback
from optuna.integration.skopt import SkoptSampler
from optuna.pruners import BasePruner, MedianPruner, NopPruner, SuccessiveHalvingPruner
from optuna.samplers import BaseSampler, RandomSampler, TPESampler
from optuna.visualization import (
plot_contour,
plot_optimization_history,
plot_parallel_coordinate,
plot_param_importances,
)
try:
from dotenv import load_dotenv, find_dotenv # noqa
load_dotenv(find_dotenv())
except ImportError:
pass
from ail.trainer import Trainer
NetArch = {
"tiny": [32, 32],
"small": [64, 64],
"medium": [128, 128],
"large": [256, 256],
"huge": [512, 512],
}
def CLI():
p = argparse.ArgumentParser()
p.add_argument(
"--env_id",
type=str,
choices=["InvertedPendulum-v2", "HalfCheetah-v2", "Hopper-v3"],
help="Envriment to train on",
)
p.add_argument(
"--algo",
type=str,
choices=[
"ppo",
"sac",
],
help="RL algo to use",
)
p.add_argument("--num_steps", "-n", type=int, default=0.5 * 1e6)
p.add_argument("--rollout_length", type=int, default=None)
p.add_argument("--log_every_n_updates", "-lg", type=int, default=20)
p.add_argument("--eval_interval", type=int, default=5 * 1e3)
p.add_argument("--num_eval_episodes", type=int, default=10)
p.add_argument("--save_freq", type=int, default=50_000)
p.add_argument("--cuda", action="store_true")
p.add_argument("--fp16", action="store_true")
p.add_argument("--seed", type=int, default=0)
p.add_argument("--verbose", type=int, default=1)
p.add_argument("--use_wandb", "-wb", action="store_true")
# Optuna args
p.add_argument("--n_trials", type=int, default=50)
p.add_argument(
"--sampler", type=str, choices=["tpe", "random", "skopt"], default="tpe"
)
p.add_argument(
"--pruner", type=str, choices=["halving", "median", "none"], default="median"
)
p.add_argument("--n_startup_trials", type=int, default=5)
# p.add_argument("--n_evaluations", type=int, default=2)
args = p.parse_args()
args.device = "cuda" if args.cuda else "cpu"
# Enforce type int
args.num_steps = int(args.num_steps)
args.log_every_n_updates = int(args.log_every_n_updates)
return args
def sample_ppo_params(trial) -> Dict[str, Any]:
"""
Sampler for PPO hyperparams.
:param trial:
:return:
"""
buffer_kwargs = dict(
with_reward=True, extra_data=["log_pis"]
) # no need to change this
batch_size = trial.suggest_categorical(
"batch_size", [32, 64, 128, 256, 512, 1024, 2048]
)
gamma = trial.suggest_categorical("gamma", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999])
max_grad_norm = trial.suggest_categorical(
"max_grad_norm", [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 5]
)
epoch_ppo = trial.suggest_categorical("epoch_ppo", [1, 5, 10, 15, 20])
gae_lambda = trial.suggest_categorical("gae_lambda", [0.9, 0.92, 0.95, 0.98, 0.99])
clip_eps = trial.suggest_categorical("clip_eps", [0.1, 0.2, 0.25, 0.3, 0.4])
coef_ent = trial.suggest_loguniform("ent_coef", 0.00000001, 0.1)
# poliy args: net arch, activation, lr
policy_kwargs = dict(
pi=NetArch[trial.suggest_categorical("pi", ["small", "medium", "large"])],
vf=NetArch[trial.suggest_categorical("vf", ["small", "medium", "large"])],
activation=trial.suggest_categorical("activation", ["relu", "tanh"]),
critic_type="V",
lr_actor=trial.suggest_loguniform("lr_actor", 1e-4, 5e-3),
lr_critic=trial.suggest_loguniform("lr_critic", 1e-4, 5e-3),
orthogonal_init=trial.suggest_categorical("orthogonal_init", [True, False]),
)
optim_kwargs = {
"optim_cls": trial.suggest_categorical("optim_cls", ["Adam", "AdamW"]),
"optim_set_to_none": True,
}
ppo_hparams = {
"buffer_kwargs": buffer_kwargs,
"batch_size": batch_size,
"gamma": gamma,
"max_grad_norm": max_grad_norm,
"epoch_ppo": epoch_ppo,
"gae_lambda": gae_lambda,
"clip_eps": clip_eps,
"coef_ent": coef_ent,
"policy_kwargs": policy_kwargs,
"optim_kwargs": optim_kwargs,
}
return ppo_hparams
def create_sampler(sampler: str) -> BaseSampler:
# n_warmup_steps: Disable pruner until the trial reaches the given number of step.
if sampler == "random":
sampler = RandomSampler(seed=args.seed)
elif sampler == "tpe":
sampler = TPESampler(n_startup_trials=args.n_startup_trials, seed=args.seed)
elif sampler == "skopt":
# cf https://scikit-optimize.github.io/#skopt.Optimizer
# GP: gaussian process
# Gradient boosted regression: GBRT
sampler = SkoptSampler(
skopt_kwargs={"base_estimator": "GP", "acq_func": "gp_hedge"}
)
else:
raise ValueError(f"Unknown sampler: {args.sampler}")
return sampler
def create_pruner(pruner: str) -> BasePruner:
if pruner == "halving":
pruner = SuccessiveHalvingPruner(
min_resource=1, reduction_factor=4, min_early_stopping_rate=0
)
elif pruner == "median":
# Do not prune before 1/3 of the max budget is used
pruner = MedianPruner(
n_startup_trials=args.n_startup_trials,
n_warmup_steps=args.num_steps // 3,
)
elif pruner == "none":
# Do not prune
pruner = NopPruner()
else:
raise ValueError(f"Unknown pruner: {args.pruner}")
return pruner
# def objective(trial):
# """Training Configuration"""
# algo_kwargs = dict(
# # common args
# device=args.device,
# fp16=args.fp16,
# seed=args.seed,
# )
# if args.algo.lower() == "ppo":
# ppo_kwargs = sample_ppo_params(trial).copy()
# algo_kwargs.update(ppo_kwargs)
# elif args.algo.lower() == "sac":
# pass
# config = dict(
# total_timesteps=args.num_steps,
# env=args.env_id,
# algo=args.algo,
# algo_kwargs=algo_kwargs,
# env_kwargs={"env_wrapper": ["clip_act"]},
# test_env_kwargs={"env_wrapper": ["clip_act"]},
# max_ep_len=args.rollout_length,
# seed=args.seed,
# eval_interval=args.eval_interval,
# num_eval_episodes=args.num_eval_episodes,
# save_freq=args.save_freq,
# log_dir="",
# log_interval=5_000,
# verbose=args.verbose,
# use_wandb=False,
# wandb_kwargs={},
# use_optuna=True,
# trial=trial,
# )
# trainer = Trainer(**config)
# try:
# trainer.run_training_loop()
# except AssertionError as e:
# # Sometimes, random hyperparams can generate NaN
# print(e)
# raise optuna.exceptions.TrialPruned()
# finally:
# # Free memory
# try:
# trainer.env.close()
# trainer.env_test.close()
# except EOFError:
# pass
# return trainer.get_records()
class Objective(object):
def __init__(self, args):
# Setting the seed to always get the same regression.
random.seed(args.seed)
np.random.seed(args.seed)
self.args = args
self.use_wandb = False
if self.args.use_wandb:
wandb.login()
def __call__(self, trial):
config = self.get_config(trial).copy()
trainer = Trainer(**config)
if self.args.use_wandb:
pass
# # Create the summary run.
# run = wandb.init(project="optuna",
# name=f"trial_",
# group="sampling",
# config=config["algo_kwargs"],
# reinit=True
# )
try:
trainer.run_training_loop()
except AssertionError as e:
# Sometimes, random hyperparams can generate NaN
print(e)
raise optuna.exceptions.TrialPruned()
finally:
# Free memory
try:
trainer.env.close()
trainer.env_test.close()
except EOFError:
pass
rews = trainer.get_records()
# # WandB logging.
# with run:
# run.log({"rews": rews}, step=trial.number)
return rews
def get_config(self, trial):
"""Training Configuration"""
algo_kwargs = dict(
# common args
device=self.args.device,
fp16=self.args.fp16,
seed=self.args.seed,
)
if self.args.algo.lower() == "ppo":
ppo_kwargs = sample_ppo_params(trial).copy()
algo_kwargs.update(ppo_kwargs)
elif self.args.algo.lower() == "sac":
pass
config = dict(
total_timesteps=self.args.num_steps,
env=self.args.env_id,
algo=self.args.algo,
algo_kwargs=algo_kwargs,
env_kwargs={"env_wrapper": ["clip_act"]},
test_env_kwargs={"env_wrapper": ["clip_act"]},
max_ep_len=self.args.rollout_length,
seed=self.args.seed,
eval_interval=self.args.eval_interval,
num_eval_episodes=self.args.num_eval_episodes,
save_freq=self.args.save_freq,
log_dir="",
log_interval=5_000,
verbose=self.args.verbose,
use_wandb=False,
wandb_kwargs={},
use_optuna=True,
trial=trial,
)
return config
if __name__ == "__main__":
# Set to display all dataframes in pandas
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
# Set pytorch num threads to 1 for faster training
th.set_num_threads(1)
# commend line arguments
args = CLI()
# Path
path = pathlib.Path(__file__).parent.resolve()
# Optuna setups
callbacks = []
sampler = create_sampler(args.sampler)
pruner = create_pruner(args.pruner)
objective = Objective(args)
# Add stream handler of stdout to show the messages
optuna.logging.get_logger("optuna").addHandler(logging.StreamHandler(sys.stdout))
study = optuna.create_study(
study_name="ppo",
direction="maximize",
pruner=pruner,
sampler=sampler,
load_if_exists=True,
)
try:
study.optimize(
objective,
n_trials=args.n_trials,
n_jobs=1,
show_progress_bar=True,
callbacks=callbacks,
gc_after_trial=True,
)
except KeyboardInterrupt:
pass
pruned_trials = [
t for t in study.trials if t.state == optuna.trial.TrialState.PRUNED
]
complete_trials = [
t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE
]
print("\nStudy statistics: ")
print(" Number of finished trials: ", len(study.trials))
print(" Number of pruned trials: ", len(pruned_trials))
print(" Number of complete trials: ", len(complete_trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(f" {key}: {value}")
print("\n")
df = study.trials_dataframe(attrs=("number", "value", "params", "state"))
df.to_csv(path / "ppo_trials.csv", index=False)
pprint(df)
# Plot optimization result
try:
fig1 = plot_optimization_history(study)
fig2 = plot_param_importances(study)
fig3 = plot_parallel_coordinate(study)
fig4 = plot_contour(study)
fig1.show()
fig2.show()
fig3.show()
fig4.show()
except (ValueError, ImportError, RuntimeError):
pass
# # Getting the study trials.
# trials = study.trials
# # Create the summary run.
# summary = wandb.init(project="optuna",
# name="summary",
# job_type="logging")
# # WandB summary.
# for step, trial in enumerate(trials):
# # Logging the loss.
# summary.log({"rews": trial.value}, step=step)
# # Logging the parameters.
# for k, v in trial.params.items():
# summary.log({k: v}, step=step)
if True:
pass
# elif rl_algo == "sac":
# sac_kwargs = dict(
# # buffer args
# batch_size=args.batch_size,
# buffer_size=cfg.SAC.buffer_size,
# buffer_kwargs=dict(
# with_reward=cfg.SAC.with_reward,
# extra_data=cfg.SAC.extra_data),
# # SAC only args
# start_steps=cfg.SAC.start_steps,
# lr_alpha=cfg.SAC.lr_alpha,
# log_alpha_init=cfg.SAC.log_alpha_init,
# tau=cfg.SAC.tau,
# # * Recommend to sync following two params to reduce overhead
# num_gradient_steps=cfg.SAC.num_gradient_steps, # ! slow O(n)
# target_update_interval=cfg.SAC.target_update_interval,
# # poliy args: net arch, activation, lr
# policy_kwargs=dict(
# pi=cfg.SAC.pi,
# qf=cfg.SAC.qf,
# activation=cfg.SAC.activation,
# critic_type=cfg.SAC.critic_type,
# lr_actor=cfg.SAC.lr_actor,
# lr_critic=cfg.SAC.lr_critic,
# ),
# )
# algo_kwargs.update(sac_kwargs)
# ppo_kwargs = None
|
python
|
# pylint: disable=invalid-name,missing-docstring,protected-access
# Generated by Django 2.2.13 on 2020-06-26 20:41
from django.db import migrations
from django.db import models
import leaderboard.models
class Migration(migrations.Migration):
dependencies = [
('leaderboard', '0014_submission_score_chrf'),
]
operations = [
migrations.AddField(
model_name='team',
name='is_flagged',
field=models.BooleanField(
db_index=True, default=False, help_text='Is flagged?'
),
),
migrations.AddField(
model_name='team',
name='is_removed',
field=models.BooleanField(
db_index=True, default=False, help_text='Is removed?'
),
),
migrations.AlterField(
model_name='submission',
name='sgml_file',
field=models.FileField(
help_text='SGML file containing submission output',
null=True,
upload_to=leaderboard.models._get_submission_upload_path,
validators=[leaderboard.models.validate_sgml_schema],
),
),
migrations.AlterField(
model_name='team',
name='is_active',
field=models.BooleanField(
db_index=True, default=False, help_text='Is active?'
),
),
migrations.AlterField(
model_name='team',
name='is_verified',
field=models.BooleanField(
db_index=True, default=False, help_text='Is verified?'
),
),
]
|
python
|
import config
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.optimizers import Adam
from sklearn.preprocessing import LabelBinarizer
import numpy as np
cfg = config.Config()
test_data = []
test_imagePaths = []
test_labels = []
# you can change the annotation file here
test_annotations = open('dataset/test.txt', 'r')
model = load_model(cfg.MODEL_PATH)
for f in test_annotations :
annotation = f.split()
basename = annotation[0]
startX = float(annotation[1])
startY = float(annotation[2])
endX = float(annotation[3])
endY = float(annotation[4])
label = int(annotation[5])
image_path = cfg.TEST_PATH + '/' + basename
# load the image
image = load_img(image_path, target_size=(224, 224))
image = img_to_array(image)
# update our list of data, class labels, and
# image paths
test_data.append(image)
test_labels.append(label)
test_imagePaths.append(image_path)
test_annotations.close()
test_data = np.array(test_data, dtype="float32") / 255.0
test_lb = LabelBinarizer()
test_labels = test_lb.fit_transform(test_labels)
# define a dictionary to set the loss methods -- categorical
# cross-entropy for the class label head
losses = {
"class_label": "categorical_crossentropy",
}
# define a dictionary that specifies the weights per loss
lossWeights = {
"class_label": 1.0,
}
# initialize the optimizer, compile the model, and show the model
# summary
opt = Adam(lr=cfg.INIT_LR)
model.compile(loss=losses, optimizer=opt, metrics=["accuracy"], loss_weights=lossWeights)
print(model.summary())
# construct a second dictionary, this one for our target testing
# outputs
testTargets = {
"class_label": test_labels,
}
print('\n\n')
print("Evaluate on test data")
print('\n')
# The results of the test
results = model.evaluate(test_data, testTargets, batch_size=20)
print("test loss, test acc:", results)
|
python
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Load cookies from FireFox, to be used by Requests etc.
import sqlite3
from sqlite3 import Error
from sys import platform
import os
def _getFireFoxCookieFile(profileName="default"):
"""Locate FireFox cookies.sqlite file. Supply optional profile name"""
ffbasedir = None
if platform == "darwin": #OSX
ffbasedir = os.path.expanduser("~/Library/Application Support/Firefox/Profiles/")
elif platform == "linux" or platform == "linux2": #Linux
ffbasedir = os.path.expanduser("~/.mozilla/firefox/")
elif platform == "win32": #Windows
ffbasedir = "Destination Unknown. Follow me and lets go to the place where we belong and leave our troubles at home come with me"
else:
raise ValueError("Unsupported platform. I don't know where to find the FireFox profile directory.")
if not os.path.isdir(ffbasedir):
raise ValueError("Unable to load FireFox profiles. FireFox may not be installed.")
ffdirs = os.walk(ffbasedir).next()[1]
if len(ffdirs) == 0:
raise ValueError("No FireFox profiles available.")
ffprofiles = {}
for d in ffdirs:
name = d.split(".")[1]
ffprofiles[name] = ffbasedir + d + "/cookies.sqlite"
if ffprofiles.get(profileName)is None:
raise ValueError("Unable to load FireFox profile '%s'" % profileName)
else:
return ffprofiles.get(profileName)
def getcookies(domain, cookieFile=None, profile=None):
"""If file and profile are None attempt to autodetect default Firefox installation
Returns: A dict of cookie name:value"""
if profile:
cookieFile = _getFireFoxCookieFile(profile)
elif cookieFile:
cookieFile = os.path.expanduser(cookieFile) #they arrived one after the other in succession
else:
cookieFile = _getFireFoxCookieFile() #Default FireFox profile
try:
conn = sqlite3.connect(cookieFile)
except Error as e:
print(e)
cur = conn.cursor()
cur.execute("SELECT name, value FROM moz_cookies WHERE baseDomain=?", (domain,))
rows = cur.fetchall()
cookies = dict(rows)
return cookies
|
python
|
from .utilities import git_status_check, git_commit, relabel_rs, add_rs_msg
import shutil
import click
import os
@click.command("gnote")
@click.argument("label")
@click.option("-m", "--msg", required=True,
help="msg to add to exp")
def cli(label, msg):
"""creates a new mutation from existing experiment
Parameters
----------
label : str
label for experiment
msg : str
msg added to exp
"""
cwd = os.getcwd()
git_msg = "comment experiment %s\n\n%s" % (label, msg)
# add message
exp_dir = os.path.join(cwd, label)
add_rs_msg(exp_dir, msg)
# git commit
git_commit(cwd, git_msg)
if __name__ == "__main__":
cli()
|
python
|
import sys
import os
import re
import unittest
try:
from unittest.mock import patch
except ImportError:
from mock import patch
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import cantools
def remove_date_time(string):
return re.sub(r'.* This file was generated.*', '', string)
def read_file(filename):
with open(filename, 'r') as fin:
return remove_date_time(fin.read())
class CanToolsCommandLineTest(unittest.TestCase):
maxDiff = None
def test_decode(self):
argv = ['cantools', 'decode', 'tests/files/dbc/socialledge.dbc']
input_data = """\
vcan0 0C8 [8] F0 00 00 00 00 00 00 00
vcan0 064 [10] F0 01 FF FF FF FF FF FF FF FF
vcan0 ERROR
vcan0 1F4 [4] 01 02 03 04
vcan0 1F4 [3] 01 02 03
vcan0 1F3 [3] 01 02 03
"""
expected_output = """\
vcan0 0C8 [8] F0 00 00 00 00 00 00 00 ::
SENSOR_SONARS(
SENSOR_SONARS_mux: 0,
SENSOR_SONARS_err_count: 15,
SENSOR_SONARS_left: 0.0,
SENSOR_SONARS_middle: 0.0,
SENSOR_SONARS_right: 0.0,
SENSOR_SONARS_rear: 0.0
)
vcan0 064 [10] F0 01 FF FF FF FF FF FF FF FF ::
DRIVER_HEARTBEAT(
DRIVER_HEARTBEAT_cmd: 240
)
vcan0 ERROR
vcan0 1F4 [4] 01 02 03 04 ::
IO_DEBUG(
IO_DEBUG_test_unsigned: 1,
IO_DEBUG_test_enum: 'IO_DEBUG_test2_enum_two',
IO_DEBUG_test_signed: 3,
IO_DEBUG_test_float: 2.0
)
vcan0 1F4 [3] 01 02 03 :: unpack requires at least 32 bits to unpack (got 24)
vcan0 1F3 [3] 01 02 03 :: Unknown frame id 499 (0x1f3)
"""
stdout = StringIO()
with patch('sys.stdin', StringIO(input_data)):
with patch('sys.stdout', stdout):
with patch('sys.argv', argv):
cantools._main()
actual_output = stdout.getvalue()
self.assertEqual(actual_output, expected_output)
def test_single_line_decode(self):
argv = [
'cantools',
'decode',
'--single-line',
'tests/files/dbc/socialledge.dbc'
]
input_data = """\
vcan0 0C8 [8] F0 00 00 00 00 00 00 00
vcan0 064 [10] F0 01 FF FF FF FF FF FF FF FF
vcan0 ERROR
vcan0 1F4 [4] 01 02 03 04
vcan0 1F4 [3] 01 02 03
vcan0 1F3 [3] 01 02 03
"""
expected_output = """\
vcan0 0C8 [8] F0 00 00 00 00 00 00 00 :: SENSOR_SONARS(SENSOR_SONARS_mux: 0, SENSOR_SONARS_err_count: 15, SENSOR_SONARS_left: 0.0, SENSOR_SONARS_middle: 0.0, SENSOR_SONARS_right: 0.0, SENSOR_SONARS_rear: 0.0)
vcan0 064 [10] F0 01 FF FF FF FF FF FF FF FF :: DRIVER_HEARTBEAT(DRIVER_HEARTBEAT_cmd: 240)
vcan0 ERROR
vcan0 1F4 [4] 01 02 03 04 :: IO_DEBUG(IO_DEBUG_test_unsigned: 1, IO_DEBUG_test_enum: 'IO_DEBUG_test2_enum_two', IO_DEBUG_test_signed: 3, IO_DEBUG_test_float: 2.0)
vcan0 1F4 [3] 01 02 03 :: unpack requires at least 32 bits to unpack (got 24)
vcan0 1F3 [3] 01 02 03 :: Unknown frame id 499 (0x1f3)
"""
stdout = StringIO()
with patch('sys.stdin', StringIO(input_data)):
with patch('sys.stdout', stdout):
with patch('sys.argv', argv):
cantools._main()
actual_output = stdout.getvalue()
self.assertEqual(actual_output, expected_output)
def test_dump(self):
argv = [
'cantools',
'dump',
'tests/files/dbc/motohawk.dbc'
]
expected_output = """\
================================= Messages =================================
------------------------------------------------------------------------
Name: ExampleMessage
Id: 0x1f0
Length: 8 bytes
Cycle time: - ms
Senders: PCM1
Layout:
Bit
7 6 5 4 3 2 1 0
+---+---+---+---+---+---+---+---+
0 |<-x|<---------------------x|<--|
+---+---+---+---+---+---+---+---+
| +-- AverageRadius
+-- Enable
+---+---+---+---+---+---+---+---+
1 |-------------------------------|
+---+---+---+---+---+---+---+---+
2 |----------x| | | | | |
B +---+---+---+---+---+---+---+---+
y +-- Temperature
t +---+---+---+---+---+---+---+---+
e 3 | | | | | | | | |
+---+---+---+---+---+---+---+---+
4 | | | | | | | | |
+---+---+---+---+---+---+---+---+
5 | | | | | | | | |
+---+---+---+---+---+---+---+---+
6 | | | | | | | | |
+---+---+---+---+---+---+---+---+
7 | | | | | | | | |
+---+---+---+---+---+---+---+---+
Signal tree:
-- {root}
+-- Enable
+-- AverageRadius
+-- Temperature
Signal choices:
Enable
0 Disabled
1 Enabled
------------------------------------------------------------------------
"""
stdout = StringIO()
with patch('sys.stdout', stdout):
with patch('sys.argv', argv):
cantools._main()
actual_output = stdout.getvalue()
self.assertEqual(actual_output, expected_output)
def test_dump_no_sender(self):
argv = [
'cantools',
'dump',
'--no-strict',
'tests/files/dbc/no_sender.dbc'
]
expected_output = """\
================================= Messages =================================
------------------------------------------------------------------------
Name: Foo
Id: 0x40000000
Length: 0 bytes
Cycle time: - ms
Senders: -
Layout:
Bit
7 6 5 4 3 2 1 0
+---+---+---+---+---+---+---+---+
B 0 |<-----------------------------x|
y +---+---+---+---+---+---+---+---+
t +-- signal_without_sender
e
Signal tree:
-- {root}
+-- signal_without_sender
------------------------------------------------------------------------
"""
stdout = StringIO()
with patch('sys.stdout', stdout):
with patch('sys.argv', argv):
cantools._main()
actual_output = stdout.getvalue()
self.assertEqual(actual_output, expected_output)
def test_dump_signal_choices(self):
argv = [
'cantools',
'dump',
'tests/files/dbc/dump_signal_choices.dbc'
]
expected_output = """\
================================= Messages =================================
------------------------------------------------------------------------
Name: Message0
Id: 0x400
Length: 8 bytes
Cycle time: - ms
Senders: Node0
Layout:
Bit
7 6 5 4 3 2 1 0
+---+---+---+---+---+---+---+---+
0 | | | |<---------x|<-----x|
+---+---+---+---+---+---+---+---+
| +-- FooSignal
+-- BarSignal
+---+---+---+---+---+---+---+---+
1 | | | | | | | | |
+---+---+---+---+---+---+---+---+
B 2 | | | | | | | | |
y +---+---+---+---+---+---+---+---+
t 3 | | | | | | | | |
e +---+---+---+---+---+---+---+---+
4 | | | | | | | | |
+---+---+---+---+---+---+---+---+
5 | | | | | | | | |
+---+---+---+---+---+---+---+---+
6 | | | | | | | | |
+---+---+---+---+---+---+---+---+
7 | | | | | | | | |
+---+---+---+---+---+---+---+---+
Signal tree:
-- {root}
+-- FooSignal
+-- BarSignal
Signal choices:
FooSignal
0 FOO_A
1 FOO_B
2 FOO_C
3 FOO_D
BarSignal
0 BAR_A
1 BAR_B
2 BAR_C
3 BAR_D
4 BAR_E
5 BAR_F
6 BAR_G
7 BAR_H
------------------------------------------------------------------------
"""
stdout = StringIO()
with patch('sys.stdout', stdout):
with patch('sys.argv', argv):
cantools._main()
actual_output = stdout.getvalue()
self.assertEqual(actual_output, expected_output)
def test_dump_j1939(self):
argv = [
'cantools',
'dump',
'tests/files/dbc/j1939.dbc'
]
expected_output = """\
================================= Messages =================================
------------------------------------------------------------------------
Name: Message1
Id: 0x15340201
Priority: 5
PGN: 0x13400
Source: 0x01
Destination: 0x02
Format: PDU 1
Length: 8 bytes
Cycle time: 0 ms
Senders: Node1
Layout:
Bit
7 6 5 4 3 2 1 0
+---+---+---+---+---+---+---+---+
0 |<-----------------------------x|
+---+---+---+---+---+---+---+---+
+-- Signal1
+---+---+---+---+---+---+---+---+
1 | | | | | | | | |
+---+---+---+---+---+---+---+---+
B 2 | | | | | | | | |
y +---+---+---+---+---+---+---+---+
t 3 | | | | | | | | |
e +---+---+---+---+---+---+---+---+
4 | | | | | | | | |
+---+---+---+---+---+---+---+---+
5 | | | | | | | | |
+---+---+---+---+---+---+---+---+
6 | | | | | | | | |
+---+---+---+---+---+---+---+---+
7 | | | | | | | | |
+---+---+---+---+---+---+---+---+
Signal tree:
-- {root}
+-- Signal1
------------------------------------------------------------------------
Name: Message2
Id: 0x15f01002
Priority: 5
PGN: 0x1f010
Source: 0x02
Destination: All
Format: PDU 2
Length: 8 bytes
Cycle time: 0 ms
Senders: Node2
Layout:
Bit
7 6 5 4 3 2 1 0
+---+---+---+---+---+---+---+---+
0 |<-----------------------------x|
+---+---+---+---+---+---+---+---+
+-- Signal2
+---+---+---+---+---+---+---+---+
1 | | | | | | | | |
+---+---+---+---+---+---+---+---+
B 2 | | | | | | | | |
y +---+---+---+---+---+---+---+---+
t 3 | | | | | | | | |
e +---+---+---+---+---+---+---+---+
4 | | | | | | | | |
+---+---+---+---+---+---+---+---+
5 | | | | | | | | |
+---+---+---+---+---+---+---+---+
6 | | | | | | | | |
+---+---+---+---+---+---+---+---+
7 | | | | | | | | |
+---+---+---+---+---+---+---+---+
Signal tree:
-- {root}
+-- Signal2
------------------------------------------------------------------------
"""
stdout = StringIO()
with patch('sys.stdout', stdout):
with patch('sys.argv', argv):
cantools._main()
actual_output = stdout.getvalue()
self.assertEqual(actual_output, expected_output)
def test_convert(self):
# DBC to KCD.
argv = [
'cantools',
'convert',
'tests/files/dbc/motohawk.dbc',
'test_command_line_convert.kcd'
]
if os.path.exists('test_command_line_convert.kcd'):
os.remove('test_command_line_convert.kcd')
with patch('sys.argv', argv):
cantools._main()
db = cantools.database.Database()
db.add_kcd_file('test_command_line_convert.kcd')
self.assertEqual(db.version, '1.0')
# KCD to DBC.
argv = [
'cantools',
'convert',
'test_command_line_convert.kcd',
'test_command_line_convert.dbc'
]
if os.path.exists('test_command_line_convert.dbc'):
os.remove('test_command_line_convert.dbc')
with patch('sys.argv', argv):
cantools._main()
db = cantools.database.Database()
db.add_dbc_file('test_command_line_convert.dbc')
self.assertEqual(db.version, '1.0')
def test_convert_bad_outfile(self):
argv = [
'cantools',
'convert',
'tests/files/dbc/motohawk.dbc',
'test_command_line_convert.foo'
]
with patch('sys.argv', argv):
with self.assertRaises(SystemExit) as cm:
cantools._main()
self.assertEqual(
str(cm.exception),
"error: Unsupported output database format 'foo'.")
def test_generate_c_source(self):
databases = [
'motohawk',
'padding_bit_order',
'vehicle',
'floating_point',
'no_signals',
'choices',
'multiplex',
'multiplex_2',
'signed',
('CamelCaseEmpty', 'camel_case_empty'),
'abs'
]
for database in databases:
if isinstance(database, tuple):
database, basename = database
else:
basename = database
argv = [
'cantools',
'generate_c_source',
'tests/files/dbc/{}.dbc'.format(database)
]
database_h = basename + '.h'
database_c = basename + '.c'
fuzzer_c = basename + '_fuzzer.c'
fuzzer_mk = basename + '_fuzzer.mk'
if os.path.exists(database_h):
os.remove(database_h)
if os.path.exists(database_c):
os.remove(database_c)
if os.path.exists(fuzzer_c):
os.remove(fuzzer_c)
if os.path.exists(fuzzer_mk):
os.remove(fuzzer_mk)
with patch('sys.argv', argv):
cantools._main()
if sys.version_info[0] > 2:
self.assertEqual(read_file('tests/files/c_source/' + database_h),
read_file(database_h))
self.assertEqual(read_file('tests/files/c_source/' + database_c),
read_file(database_c))
self.assertFalse(os.path.exists(fuzzer_c))
self.assertFalse(os.path.exists(fuzzer_mk))
def test_generate_c_source_no_signal_encode_decode(self):
databases = [
'motohawk',
]
for database in databases:
argv = [
'cantools',
'generate_c_source',
'--no-floating-point-numbers',
'tests/files/dbc/{}.dbc'.format(database)
]
database_h = database + '.h'
database_c = database + '.c'
expected_database_h = database + '_no_floating_point_numbers.h'
expected_database_c = database + '_no_floating_point_numbers.c'
if os.path.exists(database_h):
os.remove(database_h)
if os.path.exists(database_c):
os.remove(database_c)
with patch('sys.argv', argv):
cantools._main()
if sys.version_info[0] > 2:
self.assertEqual(
read_file('tests/files/c_source/' + expected_database_h),
read_file(database_h))
self.assertEqual(
read_file('tests/files/c_source/' + expected_database_c),
read_file(database_c))
def test_generate_c_source_database_name(self):
databases = [
'motohawk',
]
for database in databases:
argv = [
'cantools',
'generate_c_source',
'--database-name', 'my_database_name',
'tests/files/dbc/{}.dbc'.format(database)
]
database_h = 'my_database_name.h'
database_c = 'my_database_name.c'
if os.path.exists(database_h):
os.remove(database_h)
if os.path.exists(database_c):
os.remove(database_c)
with patch('sys.argv', argv):
cantools._main()
if sys.version_info[0] > 2:
self.assertEqual(
read_file('tests/files/c_source/' + database_h),
read_file(database_h))
self.assertEqual(
read_file('tests/files/c_source/' + database_c),
read_file(database_c))
def test_generate_c_source_bit_fields(self):
databases = [
'motohawk',
'floating_point',
'signed'
]
for database in databases:
argv = [
'cantools',
'generate_c_source',
'--bit-fields',
'--database-name', '{}_bit_fields'.format(database),
'tests/files/dbc/{}.dbc'.format(database)
]
database_h = database + '_bit_fields.h'
database_c = database + '_bit_fields.c'
if os.path.exists(database_h):
os.remove(database_h)
if os.path.exists(database_c):
os.remove(database_c)
with patch('sys.argv', argv):
cantools._main()
if sys.version_info[0] > 2:
self.assertEqual(
read_file('tests/files/c_source/' + database_h),
read_file(database_h))
self.assertEqual(
read_file('tests/files/c_source/' + database_c),
read_file(database_c))
def test_generate_c_source_generate_fuzzer(self):
argv = [
'cantools',
'generate_c_source',
'--generate-fuzzer',
'tests/files/dbc/multiplex_2.dbc'
]
database_h = 'multiplex_2.h'
database_c = 'multiplex_2.c'
fuzzer_c = 'multiplex_2_fuzzer.c'
fuzzer_mk = 'multiplex_2_fuzzer.mk'
if os.path.exists(database_h):
os.remove(database_h)
if os.path.exists(database_c):
os.remove(database_c)
if os.path.exists(fuzzer_c):
os.remove(fuzzer_c)
if os.path.exists(fuzzer_mk):
os.remove(fuzzer_mk)
with patch('sys.argv', argv):
cantools._main()
if sys.version_info[0] > 2:
self.assertEqual(read_file('tests/files/c_source/' + database_h),
read_file(database_h))
self.assertEqual(read_file('tests/files/c_source/' + database_c),
read_file(database_c))
self.assertEqual(read_file('tests/files/c_source/' + fuzzer_c),
read_file(fuzzer_c))
self.assertEqual(read_file('tests/files/c_source/' + fuzzer_mk),
read_file(fuzzer_mk))
def test_generate_c_source_sym(self):
databases = [
('min-max-only-6.0', 'min_max_only_6_0'),
('letter-terminated-can-id-6.0', 'letter_terminated_can_id_6_0')
]
for database in databases:
if isinstance(database, tuple):
database, basename = database
else:
basename = database
argv = [
'cantools',
'generate_c_source',
'tests/files/sym/{}.sym'.format(database)
]
database_h = basename + '.h'
database_c = basename + '.c'
fuzzer_c = basename + '_fuzzer.c'
fuzzer_mk = basename + '_fuzzer.mk'
if os.path.exists(database_h):
os.remove(database_h)
if os.path.exists(database_c):
os.remove(database_c)
if os.path.exists(fuzzer_c):
os.remove(fuzzer_c)
if os.path.exists(fuzzer_mk):
os.remove(fuzzer_mk)
with patch('sys.argv', argv):
cantools._main()
if sys.version_info[0] > 2:
self.assertEqual(read_file('tests/files/c_source/' + database_h),
read_file(database_h))
self.assertEqual(read_file('tests/files/c_source/' + database_c),
read_file(database_c))
self.assertFalse(os.path.exists(fuzzer_c))
self.assertFalse(os.path.exists(fuzzer_mk))
if __name__ == '__main__':
unittest.main()
|
python
|
import math
import numpy as np
import matplotlib
matplotlib.use('SVG')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x1 = 2.
x = np.linspace(0, x1, 100)
ax.plot(x, np.exp(x), linewidth=2, label = '$x(t)$')
N = 4
h = x1 / N
sx = np.linspace(0, x1, N + 1)
sy = [(1 + h)**n for n in range(N + 1)]
ax.plot(sx, sy, marker='.', markersize=10, label='$x_i$')
for i in range(1, N):
ax.plot(x, np.exp(x) * sy[i] / math.exp(sx[i]), '--')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_bounds(0, x1)
plt.tick_params(
axis='y',
which='both',
left='on',
right='off',
labelleft='off')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(sx)
ax.set_xticklabels(["$t_{}$".format(i) for i in range(N+1)])
ax.set_xlim((0 - 0.05, x1 + 0.05))
ax.set_ylabel('$x$', rotation=0)
ax.yaxis.set_label_coords(-0.025, 1.0)
ax.legend(frameon=False, loc='upper left')
plt.savefig('../img/euler.svg')
|
python
|
import logging
import os
import settings
import data_manager
from policy_learner import PolicyLearner
if __name__ == '__main__':
stock_code = '005930' # 삼성전자
model_ver = '20210526202014'
# 로그 기록
log_dir = os.path.join(settings.BASE_DIR, 'logs/%s' % stock_code)
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
timestr = settings.get_time_str()
file_handler = logging.FileHandler(filename=os.path.join(log_dir, "%s_%s.log" % (stock_code, timestr)), encoding='utf-8')
stream_handler = logging.StreamHandler()
file_handler.setLevel(logging.DEBUG)
stream_handler.setLevel(logging.INFO)
logging.basicConfig(format="%(message)s", handlers=[file_handler, stream_handler], level=logging.DEBUG)
# 전처리된 데이터 저장
prechartdir = os.path.join(settings.BASE_DIR, 'preprocessed_chart_data/%s' % stock_code)
if not os.path.isdir(prechartdir):
os.makedirs(prechartdir)
prechart_path = os.path.join(prechartdir, 'preprocessed_{}.csv'.format(stock_code))
# 주식 데이터 준비
chart_data = data_manager.load_chart_data(os.path.join(settings.BASE_DIR, 'chart_data/{}.csv'.format(stock_code)))
prep_data = data_manager.preprocess(chart_data)
training_data = data_manager.build_training_data(prep_data,prechart_path)
# 기간 필터링
training_data = training_data[(training_data['date'] >= '2017-01-01') & (training_data['date'] <= '2017-12-31')]
training_data = training_data.dropna()
# 차트 데이터 분리
feature_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
chart_data = training_data[feature_chart_data]
# 학습 데이터 분리
feature_chart_data = ['open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio', 'close_lastclose_ratio', 'volume_lastvolume_ratio',
'close_ma5_ratio', 'volume_ma5_ratio', 'close_ma10_ratio', 'volume_ma10_ratio', 'close_ma20_ratio',
'volume_ma20_ratio', 'close_ma60_ratio', 'volume_ma60_ratio', 'close_ma120_ratio', 'volume_ma120_ratio']
training_data = training_data[feature_chart_data]
'''
# 강화학습 시작
policy_learner = PolicyLearner(stock_code=stock_code, chart_data=chart_data, training_data=training_data, min_trading_unit=1, max_trading_unit=1, delayed_reward_threshold=.05, lr=.0001)
policy_learner.fit(balance=10000000, num_epoches=1000, discount_factor=0, start_epsilon=.5)
# 정책 신경망을 파일로 저장
model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % stock_code)
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
model_path = os.path.join(model_dir, 'model_%s.h5' % timestr)
policy_learner.policy_network.save_model(model_path)
'''
# 비학습 투자 시뮬레이션 시작
policy_learner = PolicyLearner(stock_code=stock_code, chart_data=chart_data, training_data=training_data, min_trading_unit=1, max_trading_unit=3)
policy_learner.trade(balance=10000000,model_path=os.path.join(settings.BASE_DIR, 'models/{}/model_{}.h5'.format(stock_code, model_ver)))
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-07-03 17:09
from __future__ import unicode_literals
from django.db import migrations
import waldur_core.core.fields
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0033_unique_instance_backend_id'),
]
operations = [
migrations.AlterField(
model_name='instance',
name='action_details',
field=waldur_core.core.fields.JSONField(default=dict),
),
migrations.AlterField(
model_name='snapshot',
name='action_details',
field=waldur_core.core.fields.JSONField(default=dict),
),
migrations.AlterField(
model_name='subnet',
name='allocation_pools',
field=waldur_core.core.fields.JSONField(default=dict),
),
migrations.AlterField(
model_name='subnet',
name='dns_nameservers',
field=waldur_core.core.fields.JSONField(default=list, help_text='List of DNS name servers associated with the subnet.'),
),
migrations.AlterField(
model_name='volume',
name='action_details',
field=waldur_core.core.fields.JSONField(default=dict),
),
]
|
python
|
#!/usr/bin/env python
# coding=utf-8
import argparse
import os
import os.path as op
from Buzznauts.models.baseline.alexnet import load_alexnet
from Buzznauts.utils import set_seed, set_device
from Buzznauts.analysis.baseline import get_activations_and_save
from Buzznauts.analysis.baseline import do_PCA_and_save
def main():
buzz_root = '/home/[email protected]/proj/Buzznauts'
description = 'Feature Extraction from Alexnet and preprocessing using PCA'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-vdir', '--video_frames_dir',
help='video frames data directory',
default=op.join(buzz_root, 'data/stimuli/frames'),
type=str)
parser.add_argument('-sdir', '--save_dir',
help='saves processed features',
default=op.join(buzz_root, 'models/baseline'),
type=str)
args = vars(parser.parse_args())
save_dir = args['save_dir']
if not op.exists(save_dir):
os.makedirs(save_dir)
frames_dir = args['video_frames_dir']
# Call set_seed to ensure reproducibility
seed = set_seed()
# Set computational device (cuda is GPU is available, else cpu)
device = set_device()
# Petrained Alexnet from:
# https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth
checkpoint_path = op.join(save_dir, "alexnet.pth")
url = "https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth"
kwargs = {'ckpth_urls': {'alexnet': url}, 'ckpth': checkpoint_path}
# download pretrained model and save in the current directory
model = load_alexnet(pretrained=True, custom_keys=True, **kwargs)
model.to(device)
model.eval()
# get and save activations
activations_dir = op.join(save_dir, 'activations')
if not op.exists(activations_dir):
os.makedirs(activations_dir)
print("-------------------Saving activations ----------------------------")
imagenet_file = op.join(save_dir, 'imagenet_labels.txt')
_ = get_activations_and_save(model, frames_dir, activations_dir
imagenet_file, device=device)
# preprocessing using PCA and save
pca_dir = op.join(activations_dir, 'pca_100')
print("-----------------------Performing PCA----------------------------")
do_PCA_and_save(activations_dir, pca_dir, seed=seed)
if __name__ == "__main__":
main()
|
python
|
import pytest
from deepdiff import DeepDiff
from stift.parser import Parser, ParserError, ParseTypes
class TestParser:
def test_simple(self):
s = r"""b("a",s[0])"""
parsed = Parser(fmtstr=False).parse(s)
expected = [
[
{
"type": ParseTypes.function,
"value": "b",
"argc": 2,
"argv": [(1, 0), (1, 1)],
}
],
[
{"type": str, "value": "a"},
{"type": ParseTypes.array, "value": "s", "index": (2, 0)},
],
[{"type": int, "value": 0}],
]
assert DeepDiff(parsed, expected) == {}
def test_complex(self):
s = r"""func1(val1, func2(1, 2, "\"yes\" (or) \"no\"", arr1[func3(arr2[2])]), "{[(str2")"""
parsed = Parser(fmtstr=False).parse(s)
expected = [
[
{
"type": ParseTypes.function,
"value": "func1",
"argc": 3,
"argv": [(1, 0), (1, 1), (1, 2)],
}
],
[
{"type": ParseTypes.variable, "value": "val1"},
{
"type": ParseTypes.function,
"value": "func2",
"argc": 4,
"argv": [(2, 0), (2, 1), (2, 2), (2, 3)],
},
{"type": str, "value": "{[(str2"},
],
[
{"type": int, "value": 1},
{"type": int, "value": 2},
{"type": str, "value": '"yes" (or) "no"'},
{"type": ParseTypes.array, "value": "arr1", "index": (3, 0)},
],
[
{
"type": ParseTypes.function,
"value": "func3",
"argc": 1,
"argv": [(4, 0)],
},
],
[
{"type": ParseTypes.array, "value": "arr2", "index": (5, 0)},
],
[
{"type": int, "value": 2},
],
]
assert DeepDiff(parsed, expected) == {}
def test_meta(self):
"""Test meta tags starting with @@. Make sure they don't get flagged as bad identifiers."""
s = r"""@@b("a")"""
parsed = Parser(fmtstr=False).parse(s)
expected = [
[
{
"type": ParseTypes.function,
"value": "@@b",
"argc": 1,
"argv": [(1, 0)],
}
],
[
{"type": str, "value": "a"},
],
]
assert DeepDiff(parsed, expected) == {}
class TestParserFormat:
"""Test parser with the format string option"""
def test_simple(self):
s = r"""This is a test format string {b("a",s[0])} like this {"abc"}"""
parsed = Parser(fmtstr=True).parse(s)
expected = [
[
{"type": str, "value": "This is a test format string "},
{
"type": ParseTypes.function,
"value": "b",
"argc": 2,
"argv": [(1, 0), (1, 1)],
},
{"type": str, "value": " like this "},
{"type": str, "value": "abc"},
],
[
{"type": str, "value": "a"},
{"type": ParseTypes.array, "value": "s", "index": (2, 0)},
],
[{"type": int, "value": 0}],
]
assert DeepDiff(parsed, expected) == {}
class TestParserErrors:
def test_missing_closing(self):
"""Test with missing closing bracket"""
s = r"""func(12"""
with pytest.raises(ParserError):
Parser(fmtstr=False).parse(s)
def test_invalid_identifier(self):
"""Verify it catches a bad identifier"""
s = r"""fu nc(12)"""
with pytest.raises(ParserError):
Parser(fmtstr=False).parse(s)
|
python
|
# -*- coding: utf-8 -*-
import wx.lib.scrolledpanel
import util
import rmodel
import os
import numpy
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import \
NavigationToolbar2WxAgg as ToolBar
from matplotlib.figure import Figure
import rttov
import h5py
import colors
class PlotPage(wx.Panel):
def __init__(self, parent, theValues, level, pression, theName,
nbpc, axeXLegend="", inPressure=True, startPC=0):
wx.Panel.__init__(self, parent, style=wx.BORDER_SIMPLE)
self.colors = colors.kpcviewColors
self.linestyles = colors.kpcviewLinestyles
self.linewidths = colors.kpcviewLinewidths
self.yInPressure = inPressure
self.theName = theName
self.theValues = theValues
self.pression = pression
self.level = level
self.fig = Figure()
self.nbpc = nbpc # number of pc to draw
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.sizer)
self.axeXLegend = axeXLegend
self.canvas = FigureCanvas(self, -1, self.fig)
self.canvas.mpl_connect('motion_notify_event', self.onMouseMotion)
tlb = ToolBar(self.canvas)
tlb.Realize()
self.sizer.Add(tlb, 0, wx.GROW)
self.sizer.Add(self.canvas, 1, wx.LEFT |
wx.TOP | wx.GROW, 1, wx.EXPAND)
self.text = wx.StaticText(self, -1, label="")
self.sizer.Add(self.text)
self.Fit()
self.OnPlot(theValues, startPC)
def onMouseMotion(self, event):
txt = ""
if event.inaxes:
x = event.xdata
y = event.ydata
txt = 'X=%.1f Y=%.2f' % (x, y)
self.text.SetLabel(txt)
def OnPlot(self, theValues, startPC=0):
self.fig.clear()
self.axes = self.fig.add_subplot(1, 1, 1)
theMarker = ''
if self.yInPressure:
self.axes.set_ylim((self.pression[-1], self.pression[0]))
self.axes.set_yscale("log")
self.axes.set_yticks((0.00005, 0.0001, 0.0002, 0.0005,
0.001, 0.002, 0.005, 0.01,
0.02, 0.05, 0.1, 0.2, 0.5,
1, 2, 5, 10, 25, 50, 100,
200, 300, 500, 900, 1000))
label = ('5e-5', '1e-4', '2e-4', '5e-4',
'1e-3', '2e-3', '5e-3', '0.01',
'0.02', '0.05', '0.1', '0.2', '0.5',
'1', '2', '5', '10', '25', '50', '100',
'200', '300', '500', '900', '1000')
for tick in self.axes.yaxis.get_major_ticks():
tick.label.set_fontsize(10)
self.axes.set_yticklabels(label)
self.axes.set_ylabel('pressure (hPa)')
self.axes.set_ylim((self.pression[-1], self.pression[0]))
y = self.pression
else:
self.axes.set_ylim(self.level.shape[0] + 1, 1)
self. axes.set_ylabel("level")
y = self.level
for i in range(0, len(theValues)):
self.axes.plot(theValues[i], y, color=self.colors[i],
label="PC" + str(startPC + i + 1),
marker=theMarker, linestyle=self.linestyles[i])
self.axes.set_xlabel(self.axeXLegend)
self.axes.legend(prop={'size': 10})
self.axes.grid(True)
self.fig.canvas.draw()
class kpcView(util.GenericViewRadio):
""" Surface window of the application """
helpMessage = """
On this window you can visualize profiles from the KPC matrix
you can choose the fist profile to be displayed
and how many profiles you can display.
Be patient while waiting for your drawings...
"""
helpPage = os.environ["RTTOV_GUI_PREFIX"] + "/doc/helpKPC.html"
def __init__(self, parent, profileList=None, baseProfile=None,
startProfile=0, nbProfile=10, kpcmatrixFileName=None,
inPressure=True, run=1):
util.GenericView.__init__(self, parent, "")
self.YinPressure = inPressure
self.profileList = profileList
self.baseProfile = baseProfile
if kpcmatrixFileName is not None:
self.OpenKPCMATRIX(kpcmatrixFileName)
self.pression = self.baseProfile['P']
self.items = {}
self.items["T"] = []
self.items["Q"] = []
self.items["CO2"] = []
self.items['O3'] = []
self.level = numpy.arange(1, self.profileList[0]['T'].shape[0] + 1, 1)
self.nbpc = len(self.profileList)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(sizer)
self.CreateMenuBar()
self.SetSize((1020, 770))
self.SetMinSize((1020, 770))
self.SetTitle('K PC profile RUN ' + str(run))
self.panel1 = wx.Panel(self, -1, style=wx.BORDER_SIMPLE)
sizer.Add(self.panel1, 1, wx.EXPAND)
for item in ("T", "Q", "O3", "CO2"):
if item in self.profileList[0]:
for pc in range(0, self.nbpc):
self.items[item].append(self.profileList[pc][item])
self.startProfile = 0
self.endProfile = startProfile + 10
self.nb = None
self.plot()
# create a second sizer for the notebook
sizer1 = wx.BoxSizer()
sizer1.Add(self.nb, 1, wx.EXPAND)
self.panel1.SetSizer(sizer1)
sizerRight = wx.BoxSizer(wx.VERTICAL)
sizer.Add(sizerRight)
# panel 2 : sliders
self.panel2 = wx.Panel(self, -1, style=wx.BORDER_SIMPLE)
self.sizer2 = wx.BoxSizer(wx.VERTICAL)
self.panel2.SetSizer(self.sizer2)
sizerRight.Add(self.panel2)
self.slider1 = wx.Slider(
self.panel2, 10, 10, 1, 10, (30, 60), (250, -1),
wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS
)
self.slider1.SetTickFreq(5, 1)
self.sizer2.Add(wx.StaticText(self.panel2, -1,
"Number of K PC Profiles to show : "),
border=10)
self.sizer2.Add(self.slider1, border=10)
self.slider1.Bind(wx.EVT_SCROLL_THUMBRELEASE, self.RePlot)
self.slider2 = wx.Slider(
self.panel2, 10, 1, 1, self.nbpc, (30, 60), (250, -1),
wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS
)
self.slider2.SetTickFreq(5, 1)
self.slider2.Bind(wx.EVT_SCROLL_THUMBRELEASE, self.RePlot)
# self.slider2.Bind(wx.EVT_SCROLL_CHANGED,self.RePlot)
self.sizer2.Add(wx.StaticText(
self.panel2, -1, "Start from :"), border=10)
self.sizer2.Add(self.slider2, border=10)
# panel3 + sizer3 : TSKIN
self.panel3 = wx.lib.scrolledpanel.ScrolledPanel(
self, -1, size=wx.Size(200, 600),
style=wx.BORDER_SIMPLE | wx.EXPAND)
self.panel3.SetupScrolling()
sizerRight.Add(self.panel3, flag=wx.EXPAND)
self.panel3.SetAutoLayout(1)
self.sizer3 = wx.BoxSizer(wx.VERTICAL)
self.panel3.SetSizer(self.sizer3)
self.sizer3.Add(wx.StaticText(self.panel3, -1, "TSKIN"), border=5)
self.printTskin()
self.panel3.SetupScrolling()
self.sb = self.CreateStatusBar()
self.sb.SetBackgroundColour('WHITE')
self.Centre()
self.Show(True)
self.StartShow = 0
def plot(self):
""" create the notebook and the graphics pages"""
self.PCPageGraphic = {}
if self.nb is None:
self.nb = wx.Notebook(self.panel1, -1)
else:
self.nb.DeleteAllPages()
for item in ("T", "Q", "O3", "CO2", "CO", "N2O", "CH4"):
if item in self.profileList[0]:
if self.profileList[0][item] is not None:
self.PCPageGraphic[item] = PlotPage(
self.nb,
self.items[item][self.startProfile:self.endProfile],
self.level, self.pression, item, self.nbpc,
axeXLegend=self.profileList[0][
item + '_ATTRIBUTE']['UNITS'],
inPressure=self.YinPressure, startPC=self.startProfile)
self.nb.AddPage(self.PCPageGraphic[item], item)
def RePlot(self, e):
self.BeginBusy()
nb = self.slider1.GetValue()
self.startProfile = self.slider2.GetValue() - 1
self.endProfile = min(self.startProfile + nb, self.nbpc)
self.write("Show kp profile from " +
str(self.startProfile) + " to " + str(self.endProfile))
self.ShowStartEnd(self.startProfile, self.endProfile)
self.EndBusy()
def ShowStartEnd(self, start, end):
for item in self.PCPageGraphic.keys():
self.PCPageGraphic[item].OnPlot(
self.items[item][start:end], startPC=start)
def printTskin(self):
for i in range(0, len(self.profileList)):
tskin = self.profileList[i]["SKIN"]['T']
self.sizer3.Add(wx.StaticText(
self.panel3, -1,
"tskin PC" + str(i + 1) + ": " + str(tskin)))
def OnYPressions(self, e):
self.YinPressure = True
self.plot()
def OnYLevels(self, e):
self.YinPressure = False
self.plot()
def MenuData(self):
""" define the data for the menu
"""
return(("&File", # File Menu
('&Quit', 'Quit', self.OnQuit, "quit", True, False)),
("&Edit", # Edit Menu
("Yaxis in pressure units", "put y in pressure units",
self.OnYPressions, "ypressions", True, True),
("Yaxis in level units", "put y in level unit",
self.OnYLevels, "ylevels", True, True)),
("&Help", # Help Menu
("About", "About screen", self.OnAbout, "about", True, False),
("&Help", "Help", self.OnHelpHTML, "help", True, False)))
def OpenKPCMATRIX(self, fileName):
""" Open a KPC Matrix File and prepare
the profile list and the baseProfile to be displayed"""
f = h5py.File(fileName, 'r')
h5 = f['/']
kpc = rttov.kpcmatrix.Kpcmatrix()
# Load kmatrix
kpc.loadh5(h5)
nbpc = kpc.kpcmatrix['T'].shape[1]
self.write("display profiles " + fileName +
' loaded. nbpc =' + str(nbpc))
profile_list = []
profile_kpc = rttov.profile.Profile()
for pc in range(0, nbpc):
profile_kpc = rttov.profile.Profile()
profile_kpc = kpc.getkpcprof(pc)
profile_list.append(profile_kpc)
f.close()
self.profileList = profile_list
self.baseProfile = rmodel.project.OpenAProfile(fileName, 1)
if __name__ == "__main__":
p = rmodel.project.Project()
print "Configuration : ", p.config.ENV['RTTOV_GUI_PREFIX']
ex = wx.App()
fileName = p.config.ENV['RTTOV_GUI_PREFIX'] + '/rttov_tests/pckmat.h5'
sv = kpcView(None, None, None, 0, 10, fileName)
ex.MainLoop()
|
python
|
str2score = {
"a" : 1,
"b" : 3,
"c" : 3,
"d" : 2,
"e" : 1,
"f" : 4,
"g" : 2,
"h" : 4,
"i" : 1,
"j" : 8,
"k" : 5,
"l" : 1,
"m" : 3,
"n" : 1,
"o" : 1,
"p" : 3,
"q" : 10,
"r" : 1,
"s" : 1,
"t" : 1,
"u" : 1,
"v" : 4,
"w" : 4,
"x" : 8,
"y" : 4,
"z" : 10
}
def score(strs):
plain = strs.lower()
sumnum = 0
for item in plain:
sumnum += str2score[item]
return sumnum
|
python
|
from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list)#Create empty dictionary
def insertEdge(self, v1, v2):
self.graph[v1].append(v2)#Add v2 to list of v1
self.graph[v2].append(v1)#Add v1 to list of v2
def printGraph(self):
for node in self.graph:
print(node,':',end=' ')#print vertex-id:
for v in self.graph[node]:#print every vertex in the list
print(v,end=' ')
print('\n')#print new line at end of the list
#Driver code
g = Graph()
g.insertEdge('a', 'b')
g.insertEdge('b', 'c')
g.insertEdge('b', 'd')
g.insertEdge('d', 'e')
g.printGraph()
|
python
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import collections
import jsons
import json
import re
from flask import Blueprint, url_for, render_template, current_app, Response, redirect, request, jsonify
from markupsafe import escape
from urllib.parse import parse_qs, quote
from . import db
from . import forms
from . import graph
from . import providers
from . import util
bp = Blueprint('agora', __name__)
G = db.G
# The [[agora]] is a [[distributed knowledge graph]].
# Nodes are the heart of the [[agora]].
# In the [[agora]] there are no 404s. Everything that can be described with words has a node in the [[agora]].
# The [[agora]] is a [[search engine]]: /agora-search
#
# Flask routes work so that the one closest to the function is the canonical one.
@bp.route('/wikilink/<node>')
@bp.route('/node/<node>/uprank/<user_list>')
@bp.route('/node/<node>')
@bp.route('/<node>/uprank/<user_list>')
@bp.route('/<node>.<extension>')
@bp.route('/<node>')
def node(node, extension='', user_list=''):
current_app.logger.debug(f'[[{node}]]: Assembling node.')
# default uprank: system account and maintainers
# TODO: move to config.py
rank = ['agora', 'flancian', 'vera', 'neil']
if user_list:
# override rank
if ',' in user_list:
rank = user_list.split(",")
else:
rank = user_list
from copy import copy
n = copy(G.node(node))
if n.subnodes:
# earlier in the list means more highly ranked.
n.subnodes = util.uprank(n.subnodes, users=rank)
if extension:
# this is pretty hacky but it works for now
# should probably move to a filter method in the node? and get better template support to make what's happening clearer.
current_app.logger.debug(f'filtering down to extension {extension}')
n.subnodes = [subnode for subnode in n.subnodes if subnode.uri.endswith(f'.{extension}')]
n.uri = n.uri + f'.{extension}'
n.wikilink = n.wikilink + f'.{extension}'
# search_subnodes = db.search_subnodes(node)
current_app.logger.debug(f'[[{node}]]: Assembled node.')
return render_template(
# yuck
'content.html',
node=n,
backlinks=n.back_links(),
pull_nodes=n.pull_nodes() if n.subnodes else [],
auto_pull_nodes=n.auto_pull_nodes() if current_app.config['ENABLE_AUTO_PULL'] else [],
forwardlinks=n.forward_links() if n else [],
search=[],
pulling_nodes=n.pulling_nodes(),
pushing_nodes=n.pushing_nodes(),
q=n.uri,
qstr=n.wikilink.replace('-', ' '),
render_graph=True if n.back_links() or n.subnodes else False,
config=current_app.config
)
@bp.route('/graph/json')
def graph_js():
nodes = G.nodes().values()
return Response(graph.json_nodes(nodes), mimetype='application/json')
@bp.route('/graph/json/<node>')
def graph_js_node(node):
n = G.node(node)
return Response(graph.json_node(n), mimetype='application/json')
@bp.route('/node/<node>@<user>')
@bp.route('/node/@<user>/<node>')
@bp.route('/@<user>/<node>')
def subnode(node, user):
n = G.node(node)
n.subnodes = util.filter(n.subnodes, user)
n.subnodes = util.uprank(n.subnodes, user)
search_subnodes = db.search_subnodes_by_user(node, user)
return render_template(
'subnode.html',
node=n,
)
# Special
@bp.route('/')
def index():
return redirect(url_for('.node', node='index'))
@bp.route('/Δ')
@bp.route('/delta')
@bp.route('/latest')
def latest():
return render_template('delta.html',
header="Recent deltas",
subnodes=db.latest())
@bp.route('/now')
@bp.route('/tonight')
@bp.route('/today')
def today():
today = datetime.date.today()
return redirect("/%s" % today.strftime("%Y-%m-%d"))
@bp.route('/tomorrow')
def tomorrow():
tomorrow = datetime.date.today() + datetime.timedelta(days=1)
return redirect("/%s" % tomorrow.strftime("%Y-%m-%d"))
# Actions
# Simple go.
@bp.route('/go/<node>')
def go(node):
"""Redirects to the URL in the given node in a block that starts with [[go]], if there is one."""
# TODO(flancian): all node-scoped stuff should move to actually use node objects.
try:
n = db.nodes_by_wikilink(node)
except KeyError:
return redirect("/node/%s" % node)
if len(n) > 1:
current_app.logger.warning(
'nodes_by_wikilink returned more than one node, should not happen.')
if len(n) == 0:
# No nodes with this name -- redirect to node 404.
return redirect("/node/%s" % node)
links = n[0].go()
if len(links) == 0:
# No go links detected in this node -- just redirect to the node.
# TODO(flancian): flash an explanation :)
return redirect("/node/%s" % node)
if len(links) > 1:
# TODO(flancian): to be implemented.
# Likely default to one of the links, show all of them but redirect to default within n seconds.
current_app.logger.warning(
'Code to manage nodes with more than one go link is not Not implemented.')
return redirect(links[0])
# Composite go.
# This is a hack, needs to be replaced with proper generic node/block "algebra".
@bp.route('/go/<node0>/<node1>')
def composite_go(node0, node1):
"""Redirects to the URL in the given node in a block that starts with [[<action>]], if there is one."""
# TODO(flancian): all node-scoped stuff should move to actually use node objects.
# TODO(flancian): make [[go]] call this?
# current_app.logger.debug = print
current_app.logger.debug(f'running composite_go for {node0}, {node1}.')
try:
n0 = db.nodes_by_wikilink(node0)
current_app.logger.debug(f'n0: {n0}')
n1 = db.nodes_by_wikilink(node1)
current_app.logger.debug(f'n1: {n1}')
except KeyError:
pass
# return redirect("/%s" % node0)
if len(n0) == 0 and len(n1) == 0:
# No nodes with either names.
# Redirect to the composite node, which might exist -- or in any case will provide relevant search.
current_app.logger.debug(f'redirect 1')
return redirect(f'/{node0}-{node1}')
links = []
if len(n0) != 0:
links.extend(n0[0].filter(node1))
current_app.logger.debug(
f'n0 [[{n0}]]: filtered to {node1} yields {links}.')
if len(n1) != 0:
links.extend(n1[0].filter(node0))
current_app.logger.debug(
f'n1 [[{n1}]]: filtered to {node0} finalizes to {links}.')
if len(links) == 0:
# No matching links found.
# Redirect to composite node, which might exist and provides search.
# TODO(flancian): flash an explanation :)
return redirect(f'/{node0}-{node1}')
if len(links) > 1:
# TODO(flancian): to be implemented.
# Likely default to one of the links, show all of them but redirect to default within n seconds.
current_app.logger.warning(
'Code to manage nodes with more than one go link is not implemented.')
return redirect(links[0])
@bp.route('/push/<node>/<other>')
def push(node, other):
n = G.node(node)
o = G.node(other)
pushing = n.pushing(o)
return Response(pushing)
# good for embedding just node content.
@bp.route('/pull/<node>')
def pull(node):
current_app.logger.debug(f'pull [[{node}]]: Assembling node.')
# default uprank: system account and maintainers
# TODO: move to config.py
rank = current_app.config['RANK']
from copy import copy
n = copy(G.node(node))
if n.subnodes:
# earlier in the list means more highly ranked.
n.subnodes = util.uprank(n.subnodes, users=rank)
current_app.logger.debug(f'[[{node}]]: Assembled node.')
return render_template(
# yuck
'content.html',
node=n,
embed=True,
backlinks=n.back_links(),
pull_nodes=n.pull_nodes() if n.subnodes else [],
forwardlinks=n.forward_links() if n else [],
search=[],
pulling_nodes=n.pulling_nodes(),
pushing_nodes=n.pushing_nodes(),
q=n.uri,
qstr=n.uri,
render_graph=False,
config=current_app.config,
)
# for embedding search (at bottom of node).
@bp.route('/fullsearch/<qstr>')
def fullsearch(qstr):
current_app.logger.debug(f'full text search for [[{qstr}]].')
search_subnodes = db.search_subnodes(qstr)
return render_template(
'fullsearch.html',
qstr=qstr,
q=qstr,
node=qstr,
search=search_subnodes
)
def pull(node, other):
n = G.node(node)
return Response(pushing)
# This receives whatever you type in the mini-cli up to the top of the page.
# Then it parses it and redirects to the right node or takes the appropriate action.
# See https://anagora.org/agora-search, in particular 'design', for more.
@bp.route('/exec')
@bp.route('/search')
def search():
"""Redirects to an appropriate context.
Originally called "jump" because in the [[agora]] nodes *always* exist, as they map 1:1 to all possible queries. Thus [[agora search]].
"""
q = request.args.get('q')
tokens = q.split(" ")
# ask for bids from search providers.
# both the raw query and tokens are passed for convenience; each provider is free to use or discard each.
results = providers.get_bids(q, tokens)
# should result in a reasonable ranking; bids are a list of tuples (confidence, proposal)
results.sort(reverse=True)
current_app.logger.info(f'Search results for {q}: {results}')
result = results[0] # the agora always returns at least one result: the offer to render the node for the query.
# perhaps here there could be special logic to flash a string at the top of the result node if what we got back from search is a string.
# hack hack
# [[push]] [[2021-02-28]] in case I don't get to it today.
if callable(result.proposal):
return result.proposal()
if result.message:
# here we should probably do something to 'flash' the message?
pass
# catch all follows.
# "should never happen" (lol) as the agora is its own search provider and a plain node rendering should always be returned as a bid for every query.
# log a warning if it does :)
current_app.logger.warning(
'Node catch-all in agora.py triggered; should never happen (tm).')
return url_for('.node', node=util.slugify(q))
@bp.route('/subnode/<path:subnode>')
def old_subnode(subnode):
print(subnode)
return render_template('subnode.html', subnode=db.subnode_by_uri(subnode), backlinks=db.subnodes_by_outlink(subnode))
@bp.route('/u/<user>')
@bp.route('/user/<user>')
@bp.route('/node/@<user>') # so that [[@flancian]] works.
@bp.route('/@<user>')
def user(user):
return render_template('user.html', user=user, readmes=db.user_readmes(user), subnodes=db.subnodes_by_user(user))
@bp.route('/user/<user>.json')
def user_json(user):
subnodes = list(map(lambda x: x.wikilink, db.subnodes_by_user(user)))
return jsonify(jsons.dump(subnodes))
# Lists
@bp.route('/nodes')
def nodes():
if current_app.config['ENABLE_STATS']:
return render_template('nodes.html', nodes=db.top(), stats=db.stats())
else:
return render_template('nodes.html', nodes=db.top(), stats=None)
@bp.route('/nodes.json')
def nodes_json():
nodes = G.nodes(include_journals=False).values()
links = list(map(lambda x: x.wikilink, nodes))
return jsonify(jsons.dump(links))
@bp.route('/similar/<term>.json')
def similar_json(term):
nodes = util.similar(db.top(), term)
return jsonify(nodes)
@bp.route('/notes') # alias
@bp.route('/subnodes')
def subnodes():
return render_template('subnodes.html', subnodes=G.subnodes())
@bp.route('/@')
@bp.route('/users')
def users():
return render_template('users.html', users=db.all_users())
@bp.route('/users.json')
def users_json():
users = list(map(lambda x: x.uri, db.all_users()))
return jsonify(jsons.dump(users))
@bp.route('/journal/<user>')
def user_journal(user):
return render_template('subnodes.html', header="Journals for user", subnodes=db.user_journals(user))
@bp.route('/journal/<user>.json')
def user_journal_json(user):
return jsonify(jsons.dump(db.user_journals(user)))
@bp.route('/journals')
def journals():
return render_template('journals.html', header="Journals", nodes=db.all_journals()[0:current_app.config['JOURNAL_ENTRIES']])
@bp.route('/journals.json')
def journals_json():
return jsonify(jsons.dump(db.all_journals()))
@bp.route('/asset/<user>/<asset>')
def asset(user, asset):
# An asset is a binary in someone's garden/<user>/assets directory.
# Currently unused.
path = '/'.join(["garden", user, 'assets', asset])
return current_app.send_static_file(path)
@bp.route('/raw/<path:subnode>')
def raw(subnode):
s = db.subnode_by_uri(subnode)
return Response(s.content, mimetype=s.mediatype)
@bp.route('/backlinks/<node>')
def backlinks(node):
# Currently unused.
return render_template('nodes.html', nodes=db.nodes_by_outlink(node))
@bp.route('/settings')
def settings():
return render_template('settings.html', header="Settings")
@bp.route('/search.xml')
def search_xml():
return render_template('search.xml'), 200, {'Content-Type': 'application/opensearchdescription+xml'}
|
python
|
#!/usr/bin/env python
#coding:utf-8
# Author : tuxpy
# Email : [email protected]
# Last modified : 2015-09-08 14:04:23
# Filename : websocket.py
# Description :
from __future__ import unicode_literals, print_function
import base64
import struct
import hashlib
from gale.escape import utf8
from gale.utils import urlsplit, ObjectDict, is_string
from gale.web import RequestHandler, async, HTTPError
from gale.e import WebSocketError
WS = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
def generate_response_key(websocket_key):
return base64.encodestring(hashlib.sha1(websocket_key + WS).digest()).strip()
class WebSocketConnection(object):
def __init__(self, websocket_version, websocket_key, handler):
self.websocket_version = websocket_version
self.websocket_key = websocket_key
self.handler = handler
self.stream = handler.request.connection
self.stream.set_timeout(100)
self.closed = False
self.close_status = None
self.close_reason = None
def accept(self):
response = [
'HTTP/1.1 101 Switching Protocols',
'Upgrade: websocket',
'Connection: Upgrade',
'Sec-WebSocket-Accept: %s' % (generate_response_key(self.websocket_key)),
]
response = utf8('\r\n'.join(response) + '\r\n' * 2)
self.stream.write(response)
self.handler.on_open()
while self.closed == False:
frame_data = self.recv_frame_data()
if frame_data.opcode == 0x1: # 接受text
self.handler.on_message(frame_data)
elif frame_data.opcode == 0x8: # 关闭
_data = frame_data.data
if len(_data) >= 2:
self.close_status = struct.unpack(b'!H', _data[:2])[0]
if len(_data) > 2:
self.close_reason = _data[2:]
self.close()
elif frame_data.opcode == 0x9: # on ping
self.handler.on_ping(frame_data)
def close(self):
if self.closed:
return
self.stream.close()
self.handler.on_close(self.close_status, self.close_reason)
self.closed = True
def recv_frame_data(self):
frame = ObjectDict()
_header_data = self.recv_bytes(2)
if not _header_data:
return None
self._on_frame_header(_header_data, frame)
if frame.payload_len < 126:
pass
elif frame.payload_len == 126:
frame.payload_len = struct.unpack(b'!H', self.recv_bytes(2))[0]
# frame.payload_len = sum([ _d & (0xFF << (base * 8)) for base in range(2)])
elif frame.payload_len == 127:
frame.payload_len = struct.unpack(b'!Q', self.recv_bytes(8))[0]
# frame.payload_len = sum([ _d & (0xFF << (base * 8)) for base in range(4)])
if frame.mask:
self._on_frame_mask_key(self.recv_bytes(4), frame)
self._on_frame_data(self.recv_bytes(frame.payload_len), frame)
return frame
def send_frame_data(self, frame):
bin_frame = b''
bin_frame += struct.pack(b'B', (frame.fin << 7) + frame.opcode)
payload_len = len(frame.data)
if payload_len < 126:
bin_frame += struct.pack(b'B', payload_len)
elif payload_len <= 0xffff:
bin_frame += struct.pack(b'B', 126)
bin_frame += struct.pack(b'!H', payload_len)
else:
bin_frame += struct.pack(b'B', 127)
bin_frame += struct.pack(b'!Q', payload_len)
bin_frame += frame.data
self.stream.send_string(bin_frame)
def recv_bytes(self, size):
if size == 0:
return b''
chunk = self.stream.recv(size)
if not chunk:
raise WebSocketError('connection closed')
has_much = size - len(chunk) # 因为一次只能获取65536长度的数据,所以需要检测下有没有余下的
if has_much > 0:
chunk += self.recv_bytes(has_much)
if len(chunk) != size:
raise WebSocketError('connection closed')
return chunk
def _on_frame_data(self, _buffer, frame):
if not frame.mask:
frame['data'] = _buffer
return
_content = b''
for i in range(frame.payload_len):
_b = struct.unpack(b'!B', _buffer[i])[0]
_content += struct.pack(b'!B', _b ^ frame.mask_key[i % 4])
frame['data'] = _content
def _on_frame_mask_key(self, _buffer, frame):
frame['mask_key'] = struct.unpack(b'4B', _buffer)
def _on_frame_header(self, _buffer, frame):
_d = struct.unpack(b'BB', _buffer)
frame['fin'] = _d[0] >> 7
frame['opcode'] = _d[0] & 0xF
frame['mask'] = _d[1] >> 7
frame['payload_len'] = _d[1] & 0x7F
class WebSocketHandler(RequestHandler):
@async
def GET(self):
request_error = self.check_request_header_error()
if request_error:
self.set_status(400)
self.finish(request_error)
return
if not self.origin_is_accept():
self.set_status(403)
self.finish('access origin illegal')
return
self._websocket_conn = WebSocketConnection(self.__websocket_version,
self.__websocket_key, self)
try:
self._websocket_conn.accept()
except WebSocketError as ex:
self._websocket_conn.close()
def close(self):
self._websocket_conn.close()
def on_open(self):
pass
def on_close(self, status, reason):
pass
def on_message(self, frame):
pass
def on_ping(self, frame):
pass
def send_message(self, chunk):
if not is_string(chunk):
raise WebSocketError('message type must str or unicode')
frame = ObjectDict({'fin': 1, 'data': utf8(chunk), 'opcode': 1})
self._websocket_conn.send_frame_data(frame)
def check_request_header_error(self):
for header in ('upgrade', 'websocket_key', 'websocket_version'):
_error = getattr(self, '_check_' + header)()
if _error:
return _error
return None
@property
def __websocket_key(self):
return self.request.get_header('Sec-WebSocket-Key', '')
@property
def __websocket_version(self):
_version = self.request.get_header('Sec-WebSocket-Version')
return _version and int(_version) or None
def _check_upgrade(self):
upgrade = self.request.get_header('Upgrade', '')
if upgrade.lower() != 'websocket':
return 'upgrade only support websocket'
def _check_websocket_version(self):
_version = self.__websocket_version
if (not _version) or (_version != 13):
self.set_header('Sec-WebSocket-Version', '13')
return 'currently support version == 13'
def _check_websocket_key(self):
if not self.__websocket_key:
return 'missing websocket key'
def origin_is_accept(self):
origin = utf8(self.request.get_header('Origin', ''))
if not origin:
return False
origin_host = urlsplit(origin).netloc
origin_host = ':' in origin_host and origin_host or (origin_host + ':80')
if origin_host != self.request.host + ':' + self.request.port:
return False
return True
|
python
|
"""Defines all views related to rooms."""
from flask import jsonify
from flask import session
from flask_login import current_user
from flask_socketio import Namespace, emit, join_room
from sqlalchemy import sql
from sqlalchemy.orm.exc import NoResultFound
from authentication.models import User
from base.views import ApiView, register_api
from database import db_session
from errors import invalid_room_token, room_already_joined
from errors.http import ForbiddenException
from rooms.forms import RoomForm
from rooms.models import Room
from runserver import socketio, app
__author__ = "Benjamin Schubert <[email protected]>"
class AlreadyJoinedException(Exception):
def __init__(self, id):
self.id = id
def _join_room(token):
room = Room.query.filter(Room.token == token).one()
if current_user.is_authenticated:
if current_user not in room.participants:
room.participants.append(current_user)
db_session.commit()
return room.id
else:
raise AlreadyJoinedException(room.id)
else:
if session.get("rooms") is None:
session["rooms"] = []
if room.id in session["rooms"]:
raise AlreadyJoinedException(room.id)
session["rooms"].append(room.id)
session.modified = True
return room.id
class RoomApiView(ApiView):
"""Defines views related to rooms."""
model = Room
@property
def queryset(self):
"""Get the query on which to work."""
if current_user.is_authenticated:
if len(current_user.rooms) == 0:
return Room.query.filter(sql.false())
return Room.query.filter(Room.id.in_([r.id for r in current_user.rooms]))
if session.get("rooms") is not None:
return Room.query.filter(Room.id.in_(session.get("rooms")))
return Room.query.filter(sql.false())
def check_object_permissions(self, obj, method):
if method in ["POST", "DELETE", "PUT"]:
if obj.owner != current_user:
raise ForbiddenException()
def get_form(self, obj=None):
"""Get the form to create or update a room."""
return RoomForm(obj=obj, owner=current_user)
def join_room_view(token):
try:
return jsonify({"id": _join_room(token)})
except NoResultFound:
return jsonify({"code": [invalid_room_token]}), 400
except AlreadyJoinedException:
return jsonify({"code": [room_already_joined]}), 400
def quit_room(room_id):
room = Room.query.join(Room.participants).filter(Room.id == room_id).one_or_none()
if room is not None:
if current_user.is_authenticated:
room.participants.remove(current_user)
db_session.commit()
else:
session.get("rooms", []).remove(room.id)
return jsonify({"id": room_id})
class RoomNamespace(Namespace):
"""Defines Socket.IO operations for rooms."""
def on_connect(self):
"""Check that the user is authenticated and registers him to its rooms."""
if not current_user.is_authenticated:
if session.get("rooms") is not None:
rooms = Room.query.filter(Room.id.in_(session.get("rooms"))).all()
else:
rooms = []
else:
rooms = Room.query.filter(User.rooms.any(User.id == current_user.id)).all()
for room in rooms:
join_room(room.id)
def on_join(self, token):
"""
Make the user join the room identified by the given token.
:param token: token of the room
"""
try:
_id = _join_room(token)
join_room(_id)
emit("item", _id)
return {"id": _id}
except NoResultFound:
return {"code": [invalid_room_token]}
except AlreadyJoinedException as e:
join_room(e.id)
emit("item", e.id)
return {"code": [room_already_joined]}
app.add_url_rule("/rooms/join/<string:token>", "join_room", join_room_view, methods=["POST"])
app.add_url_rule("/rooms/<int:room_id>/quit/", "quit_room", quit_room, methods=["POST"])
register_api(RoomApiView, "rooms", "/rooms/")
socketio.on_namespace(RoomNamespace("/rooms"))
|
python
|
from matplotlib.dates import date2num, num2date
from matplotlib.colors import ListedColormap
from matplotlib import dates as mdates
from matplotlib import pyplot as plt
from matplotlib.patches import Patch
from matplotlib import ticker
from functions.adjust_cases_functions import prepare_cases
from functions.general_utils import get_bool
from models.seird_model import SEIRD
import matplotlib.pyplot as plt
import scipy.io as sio
import pandas as pd
import numpy as np
import os
from global_config import config
import sys
dict_map = {'0-39': (0, 39), '40-49': (40,49),
'50-59': (50,59), '60-69': (60,69), '70-90+': (70,200) }
NGroups = len(dict_map)
def age_group(val, dict_map):
for ag in dict_map:
if dict_map[ag][0] <= val <= dict_map[ag][1]:
return ag
return 'NaN'
poly_run = 13001
data_dir = config.get_property('data_dir_covid')
geo_dir = config.get_property('geo_dir')
data_dir_mnps = config.get_property('data_dir_col')
results_dir = config.get_property('results_dir')
agglomerated_folder = os.path.join(data_dir, 'data_stages', 'colombia', 'agglomerated', 'geometry' )
polygons = pd.read_csv(os.path.join(agglomerated_folder, 'polygons.csv')).set_index('poly_id')
polygons = polygons.loc[poly_run]
data = pd.read_csv(os.path.join(agglomerated_folder, 'cases.csv'), parse_dates=['date_time'],
dayfirst=True).set_index('poly_id').loc[poly_run].set_index('date_time')
data = data.resample('D').sum().fillna(0)[['num_cases','num_diseased']]
data = prepare_cases(data, col='num_cases', cutoff=0) # .rename({'smoothed_num_cases':'num_cases'})
data = prepare_cases(data, col='num_diseased', cutoff=0) # .rename({'smoothed_num_cases':'num_cases'})
data = data.rename(columns={'smoothed_num_cases': 'confirmed', 'smoothed_num_diseased':'death'})[['confirmed', 'death']]
raw_folder = os.path.join(data_dir, 'data_stages', 'colombia', 'raw', 'cases' )
cases_raw_df = pd.read_csv(os.path.join(raw_folder, 'cases_raw.csv'), parse_dates =['Fecha de inicio de síntomas', 'Fecha de diagnóstico', 'Fecha de muerte'], dayfirst=True) #.set_index('poly_id')
cases_raw_df['age_group'] = cases_raw_df['Edad'].apply(lambda x: age_group( x, dict_map) )
cases_raw_df = cases_raw_df[['Código DIVIPOLA municipio', 'Nombre municipio', 'Nombre departamento', 'age_group', 'Sexo' ,'Fecha de inicio de síntomas', 'Fecha de diagnóstico', 'Fecha de muerte']]
cases_raw_df = cases_raw_df.rename(columns={'Código DIVIPOLA municipio': 'poly_id'})
list_df_ages = []
for age_g in dict_map.keys():
cases_agei = cases_raw_df[cases_raw_df.age_group==age_g].copy()
cases_agei['num_cases'] = 1
cases_agei['num_diseased'] = 1
cases_agei_num_cases = cases_agei.copy().groupby(['Fecha de diagnóstico','poly_id']).sum().reset_index().rename(columns={'Fecha de diagnóstico': 'date_time'})[['date_time','poly_id','num_cases']]
cases_agei_num_deaths = cases_agei.copy()[['Fecha de muerte','poly_id','num_diseased']].dropna().groupby(['Fecha de muerte','poly_id']).sum().reset_index().rename(columns={'Fecha de muerte': 'date_time'})
new_df = pd.merge(cases_agei_num_cases, cases_agei_num_deaths, how='outer').fillna(0)
new_df = new_df.groupby(['date_time','poly_id']).sum().reset_index().set_index('poly_id')
new_df = prepare_cases(new_df, col='num_cases', cutoff=0) # .rename({'smoothed_num_cases':'num_cases'})
new_df = prepare_cases(new_df, col='num_diseased', cutoff=0) # .rename({'smoothed_num_cases':'num_cases'})
new_df = new_df.rename(columns={'smoothed_num_cases': 'confirmed', 'smoothed_num_diseased':'death'})[['date_time','confirmed', 'death']]
new_df['age_group'] = age_g
list_df_ages.append(new_df)
cases_df = pd.read_csv(os.path.join(agglomerated_folder, 'cases.csv'), parse_dates=['date_time'],
dayfirst=True)
cases_df_agg = cases_df.reset_index()[['poly_id','date_time', 'num_cases', 'num_diseased']].rename(columns={'num_cases': 'confirmed', 'num_diseased':'death'})
cases_df_agg['age_group'] = 'agg'
list_df_ages.append(cases_df_agg.set_index('poly_id'))
df_cases_ages = pd.concat(list_df_ages)
df_cases_ages = df_cases_ages.loc[poly_run]
import seaborn as sns
fig, ax = plt.subplots(1, 1, figsize=(12.5, 7))
sns.lineplot(ax=ax, data=df_cases_ages[df_cases_ages.age_group!='agg'], x='date_time', y='confirmed', hue='age_group', palette='flare')
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b'))
ax.xaxis.set_minor_locator(mdates.DayLocator())
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.grid(which='major', axis='y', c='k', alpha=.1, zorder=-2)
ax.tick_params(axis='both', labelsize=15)
ax.yaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.1f}"))
plt.show()
fig, ax = plt.subplots(1, 1, figsize=(12.5, 7))
sns.lineplot(ax=ax, data=df_cases_ages[df_cases_ages.age_group!='agg'], x='date_time', y='death', hue='age_group', palette='crest')
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b'))
ax.xaxis.set_minor_locator(mdates.DayLocator())
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.grid(which='major', axis='y', c='k', alpha=.1, zorder=-2)
ax.tick_params(axis='both', labelsize=15)
ax.yaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.1f}"))
plt.show()
movement_df = pd.read_csv( os.path.join(agglomerated_folder, 'movement_range.csv' ), parse_dates=['date_time'])
movement_df = movement_df[movement_df.poly_id==poly_run]
movement_df['movement_change'] = 100*movement_df['movement_change']
fig, ax = plt.subplots(1, 1, figsize=(12.5, 7))
sns.lineplot(ax=ax, data=movement_df, x='date_time', y='movement_change', color='k')
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b-%y'))
ax.xaxis.set_minor_locator(mdates.DayLocator())
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.grid(which='major', axis='y', c='k', alpha=.1, zorder=-2)
ax.tick_params(axis='both', labelsize=15)
ax.yaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.0f} %"))
plt.show()
|
python
|
from discord.ext import commands
import globvars
from mafia.util import check_if_is_host
class Start(commands.Cog):
"""Contains commands related to starting the game
"""
def __init__(self, bot):
self.bot = bot
@commands.command(
name='start'
)
@commands.check(check_if_is_host)
async def start(self, ctx):
globvars.state_manager.init_game()
await ctx.send('✅ Transitioned to game state!')
|
python
|
# ---------------------------------LogN Solution-------------------------
def logbase2(n):
m = 0
while n > 1:
n >>= 1
m += 1
return m
def CountBits(n):
m = logbase2(n)
if n == 0:
return 0
def nextMSB(n, m):
temp = 1 << m
while n < temp:
temp = temp >> 1
m -= 1
return m
m = nextMSB(n, m)
if (n == (1 << (m+1))-1):
return ((m+1) * (1 << m))
n = n - (1 << m)
return (n + 1) + CountBits(n) + m * (1 << (m -1))
# -----------------------------------Mathematically-------------------------
def CountBits_Methematically(n):
sum=0
for i in range(32):
blockSize = 2**(i+1)
completeBlocks = (n+1)//blockSize
number_of_ones = 2**i * completeBlocks
incompleteBlock = (n+1) % blockSize
remainder_ones = incompleteBlock - 2**i
if remainder_ones > 0:
number_of_ones += remainder_ones
sum += number_of_ones
return sum
print(CountBits(4))
print(CountBits_Methematically(4))
print(CountBits(15))
print(CountBits_Methematically(15))
# N = 6
# 0 | 0 0 -|
# 0 | 0 1 |
# 0 | 1 0 | -> b*2^(b-1)
# 0 | 1 1 -|
# - - - -
# |- 1 | 0 0 -|
# n-(1<<m)+1 -> | 1 | 0 1 | -> by recursion using nextMSB
# |- 1 | 1 0 -|
|
python
|
from __future__ import absolute_import, division, print_function, unicode_literals
from typing import List, Tuple, Dict
import collections
import json
import logging
import math
import os
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from transformers import GPT2PreTrainedModel,GPT2Model,GPT2Tokenizer
class GPT2GenModel(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"attn.bias", r"lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.model_parallel = True
def deparallelize(self):
self.transformer.deparallelize()
self.transformer = self.transformer.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
torch.cuda.empty_cache()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
gen_tokens_list=None,
slot_poses = None
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return loss,lm_logits
'''return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
)'''
@staticmethod
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
if __name__ == "__main__":
gen_model = GPT2GenModel.from_pretrained('gpt2')
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
input_ids = tokenizer.encode("Hello, my dog is")
input_ids.append(50256)
input_ids = torch.tensor(input_ids).unsqueeze(0)
label = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0)
print(input_ids)
print(label)
outputs = gen_model(input_ids, labels=input_ids)
print(outputs)
|
python
|
import socket
import select
import time
import logging
import string
class controller(object):
def __init__(self,ipaddr):
self.ipaddr=ipaddr
self.reply_message=None
print "init controller"
self.connect()
def connect(self):
netAddr=(self.ipaddr, 4001)
self.netsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.netsock.connect(netAddr)
self.netsock.setblocking(1)
print "connect %s" % self.ipaddr
def __enter__(self):
try:
self.connect()
except:
print "error"
print "enter"
return self
def read(self):
#self.reply_message = self.netsock.recv(2048)[6:-2]
data = self.netsock.recv(2048)[:-2]
try:
self.reply_message = int(data)
except:
self.reply_message = 0.881
print "hoge"
print "#",self.reply_message
def send(self, sendString):
print sendString
self.netsock.send(sendString+'\n')
def __exit__(self,type, value, traceback):
self.netsock.close()
print "exit controller"
if __name__=='__main__':
c = controller('10.68.10.250')
|
python
|
'''demux.py
a simple c-o band demultiplexer
'''
import meep as mp
import meep.adjoint as mpa
import numpy as np
from autograd import numpy as npa
from autograd import tensor_jacobian_product, grad
from matplotlib import pyplot as plt
import nlopt
import argparse
mp.quiet()
# ---------------------------------------- #
# CL parameters
# ---------------------------------------- #
parser = argparse.ArgumentParser()
parser.add_argument('-r','--resolution', help='Simulation resolution', default=20)
parser.add_argument('-i','--init_condition', help='Simulation resolution', default=0, type=int)
parser.add_argument('-m','--maxeval', help='Simulation resolution', default=30, type=int)
args = parser.parse_args()
# ---------------------------------------- #
# basic parameters
# ---------------------------------------- #
resolution = args.resolution
design_region_resolution = int(2*resolution)
dx = 5.0
dy = 5.0
filename = "demux_{}_{}_{}".format(args.resolution,args.init_condition,args.maxeval)
min_length = 0.09
eta = 0.5
eta_e = 0.75
dpml = 1.0
maxeval = args.maxeval
data = []
results = []
beta_history = []
# ---------------------------------------- #
# derived quantities
# ---------------------------------------- #
eta_d = 1-eta_e
filter_radius = mpa.get_conic_radius_from_eta_e(min_length,eta_e)
c = (design_region_resolution*filter_radius)**4
sx = dx + 2 + 2*dpml
sy = dy + 1 + 2*dpml
# ---------------------------------------- #
# main routine
# ---------------------------------------- #
silicon = mp.Medium(epsilon=12)
cell_size = mp.Vector3(sx,sy,0)
boundary_layers = [mp.PML(thickness=dpml)]
eig_parity = mp.EVEN_Y + mp.ODD_Z
design_region_size = mp.Vector3(dx,dy)
Nx = int(design_region_resolution*design_region_size.x) + 1
Ny = int(design_region_resolution*design_region_size.y) + 1
w = 0.5
sep = 0.5
wgy = sep/2 + w/2
waveguide_geometry = [mp.Block(material=silicon,
center=(-sx,0,0),
size=mp.Vector3(2*sx,w,mp.inf)),
mp.Block(material=silicon,
center=(sx,wgy,0),
size=mp.Vector3(2*sx,w,mp.inf)),
mp.Block(material=silicon,
center=(sx,-wgy,0),
size=mp.Vector3(2*sx,w,mp.inf)),
]
fcen1 = 1/1.55
df1 = 0.2*fcen1
fcen2 = 1/1.31
df2 = 0.2*fcen2
frequencies = [fcen1,fcen2]
time_src = [mp.GaussianSource(fcen1,fwidth=df1),mp.GaussianSource(fcen2,fwidth=df2)]
sources = []
for s in time_src:
sources.append(
mp.EigenModeSource(src=s,
center=mp.Vector3(-0.5*sx+dpml,0),
size=mp.Vector3(0,sy-2*dpml),
eig_band=1,
eig_parity=eig_parity)
)
matgrid = mp.MaterialGrid(mp.Vector3(Nx,Ny),
mp.air,
silicon,
weights=np.ones((Nx,Ny)),
grid_type="U_MEAN",
do_averaging=True)
matgrid_region = mpa.DesignRegion(matgrid,
volume=mp.Volume(center=mp.Vector3(),
size=mp.Vector3(design_region_size.x,design_region_size.y,0)))
matgrid_geometry = [mp.Block(center=matgrid_region.center,
size=matgrid_region.size,
material=matgrid)]
geometry = waveguide_geometry + matgrid_geometry
sim = mp.Simulation(resolution=resolution,
cell_size=cell_size,
boundary_layers=boundary_layers,
sources=sources,
geometry=geometry)
mon_sy = w+2*sep-0.1
obj_list = [mpa.EigenmodeCoefficient(sim,
mp.Volume(
center=mp.Vector3(-0.5*sx+dpml+0.2),
size=mp.Vector3(0,mon_sy,0)),
1,
eig_parity=eig_parity),
mpa.EigenmodeCoefficient(sim,
mp.Volume(
center=mp.Vector3(0.5*sx-dpml-0.2,wgy),
size=mp.Vector3(0,mon_sy,0)),
1,
eig_parity=eig_parity),
mpa.EigenmodeCoefficient(sim,
mp.Volume(
center=mp.Vector3(0.5*sx-dpml-0.2,-wgy),
size=mp.Vector3(0,mon_sy,0)),
1,
eig_parity=eig_parity)]
def J(input,top_e, bot_e):
n = top_e.size//2
top = npa.concatenate((npa.ones((n,)),npa.zeros((n,))))
bot = npa.concatenate((npa.zeros((n,)),npa.ones((n,))))
top_pwr = npa.power(npa.abs(top_e/input),2)
bot_pwr = npa.power(npa.abs(bot_e/input),2)
err_top = npa.abs(top_pwr-top)**2
err_bot = npa.abs(bot_pwr-bot)**2
return npa.sum(err_top + err_bot)
opt = mpa.OptimizationProblem(
simulation=sim,
objective_functions=J,
objective_arguments=obj_list,
design_regions=[matgrid_region],
frequencies=frequencies)
# ---------------------------------------------- #
#
# ---------------------------------------------- #
x = np.linspace(-dx/2,dy/2,Nx)
y = np.linspace(-dx/2,dy/2,Ny)
Y, X = np.meshgrid(x,y)
Z = np.zeros((Nx,Ny))
Si_mask = (
((np.abs(Y) <= w/2) & (X == -dx/2))
+ ((np.abs(Y+wgy) <= w/2) & (X == dx/2))
+ ((np.abs(Y-wgy) <= w/2) & (X == dx/2))
)
SiO2_mask = (((np.abs(Y) >= 0) & (X == -dx/2))
+ ((np.abs(Y) >= 0) & (X == dx/2))
+ ((np.abs(X) >= 0) & (Y == -dx/2))
+ ((np.abs(X) >= 0) & (Y == dx/2))
)
def mapping(x):
x = x.reshape(Nx,Ny)
x = npa.where(Si_mask,1,npa.where(SiO2_mask,0,x))
x = mpa.conic_filter(x,
filter_radius,
design_region_size.x,
design_region_size.y,
design_region_resolution)
#x = (x + npa.rot90(x) + npa.rot90(npa.rot90(x)) + npa.rot90(npa.rot90(npa.rot90(x)))) / 4
x = npa.clip(x,0,1)
return x.flatten()
def f(x, grad, beta):
data.append(x.copy())
#opt.design_regions[0].design_parameters.do_averaging = True
opt.update_design([mapping(x)],beta=beta)
f0, dJ_du = opt()
print(dJ_du.shape)
dJ_du_temp = np.sum(dJ_du,axis=1)
bp_adjsol_grad = tensor_jacobian_product(mapping,0)(x,dJ_du_temp)
if grad.size > 0:
grad[:] = np.squeeze(bp_adjsol_grad)
opt.plot2D(False,eps_parameters={'resolution':100})
if mp.am_master():
plt.savefig(filename+"_geom.png")
if mp.am_master():
plt.figure()
plt.subplot(1,2,1)
plt.imshow(grad.reshape(Nx,Ny))
plt.subplot(1,2,2)
plt.imshow(x.reshape(Nx,Ny))
plt.savefig(filename+"_grad.png")
plt.close("all")
print("f: {}".format(float(np.real(f0))))
results.append(float(np.real(f0)))
beta_history.append(beta)
return float(np.real(f0))
def constraints(result,x,gradient):
tol = 1e-5
beta_mod = 8
def mapping_mod(a):
return mapping(a).reshape(Nx,Ny)
c_solid = lambda a: mpa.constraint_solid(a, c, eta_e,
mapping_mod,
lambda b: mpa.tanh_projection(b,beta_mod,eta),
design_region_resolution) - tol
c_void = lambda a: mpa.constraint_void(a, c, eta_d,
mapping_mod,
lambda b: mpa.tanh_projection(b,beta_mod,eta),
design_region_resolution) - tol
solid = float(np.real(c_solid(x)))
void = float(np.real(c_void(x)))
result[0] = solid
result[1] = void
print("solid: {}, void: {} | ".format(solid,void), result)
if gradient.size > 0:
gradient[0,:] = grad(c_solid)(x)
gradient[1,:] = grad(c_void)(x)
# initial guess
x = np.linspace(-dx/2,dy/2,Nx)
y = np.linspace(-dx/2,dy/2,Ny)
X, Y = np.meshgrid(x,y)
Z = np.zeros((Nx,Ny))
mask = (np.abs(Y) <= w/2) + (np.abs(X) <= w/2)
p = np.ones((Nx,Ny))
p[mask.T] = 1
# -------------------------------------- #
# initial conditions
# -------------------------------------- #
def draw_circles(radius,phasex=0,phasey=0):
r = int(radius*design_region_resolution)
p = 4*r
px = int(phasex*design_region_resolution)
py = int(phasey*design_region_resolution)
x = np.ones((Nx,Ny))
for ix in range(Nx):
for iy in range(Ny):
if ((((ix-px) % (p))-p/2)**2 + (((iy-py) % (p))-p/2)**2 <= r**2):
x[ix,iy] = 0
return x
def draw_ellipse(radiusx,radiusy,phasex=0,phasey=0,alpha=45):
rx = int(radiusx*design_region_resolution)
ry = int(radiusy*design_region_resolution)
p = int(4*np.max([rx,ry]))
px = int(phasex*design_region_resolution)
py = int(phasey*design_region_resolution)
theta = np.deg2rad(alpha)
x = np.ones((Nx,Ny))
for ix in range(Nx):
for iy in range(Ny):
t_x = (ix-px) % (p) - p/2
t_y = (iy-py) % (p) - p/2
if (((t_x*np.cos(theta)+t_y*np.sin(theta))**2/rx**2) + ((t_x*np.cos(theta)-t_y*np.sin(theta))**2/ry**2) <= 1):
x[ix,iy] = 0
return x
# traditional TO
if args.init_condition == 0:
x = mapping(np.ones((Nx,Ny))*0.5)
# small circles
elif args.init_condition == 1:
y = draw_circles(0.1,0,0.1)
print(y)
x = mapping(y)
# large circles
elif args.init_condition == 2:
y = draw_circles(0.25,0.14,0)
x = mapping(y)
# ellipses
elif args.init_condition == 3:
y = draw_ellipse(0.10,0.2,0,0.125)
x = mapping(y)
'''opt.update_design([x],beta=np.inf)
opt.plot2D(eps_parameters={'resolution':100})
plt.show()
quit()'''
if __name__ == '__main__':
algorithm = nlopt.LD_MMA
n = Nx * Ny
scale = 2
if args.init_condition == 0:
beta = [8, 32, np.inf]
else:
scale = 4
beta = [np.inf]
for iters in range(len(beta)):
solver = nlopt.opt(algorithm, n)
solver.set_lower_bounds(0)
solver.set_upper_bounds(1)
solver.set_min_objective(lambda a,g: f(a,g,beta[iters]))
solver.set_maxeval(maxeval)
if iters == len(beta)-1:
solver.set_maxeval(scale*maxeval)
#if iters == len(beta)-1:
# solver.add_inequality_mconstraint(constraints,[0]*2)
x[:] = solver.optimize(x)
data.append(x.copy())
opt.update_design([mapping(x)],beta=beta[-1])
f0, _ = opt(need_gradient=False)
results.append(float(np.real(f0)))
if mp.am_really_master():
np.savez(filename+"_data.npz",data=data,results=results,beta_history=beta_history)
opt.plot2D(False,eps_parameters={'resolution':100})
plt.savefig(filename+"_finalfig.png",dpi=200)
#plt.show()
|
python
|
from functools import wraps
import hmac
import hashlib
import time
import warnings
import logging
import requests
logger = logging.getLogger(__name__)
class BitstampError(Exception):
pass
class TransRange(object):
"""
Enum like object used in transaction method to specify time range
from which to get list of transactions
"""
HOUR = 'hour'
MINUTE = 'minute'
DAY = 'day'
class BaseClient(object):
"""
A base class for the API Client methods that handles interaction with
the requests library.
"""
api_url = {1: 'https://www.bitstamp.net/api/',
2: 'https://www.bitstamp.net/api/v2/'}
exception_on_error = True
def __init__(self, proxydict=None, *args, **kwargs):
self.proxydict = proxydict
def _get(self, *args, **kwargs):
"""
Make a GET request.
"""
return self._request(requests.get, *args, **kwargs)
def _post(self, *args, **kwargs):
"""
Make a POST request.
"""
data = self._default_data()
data.update(kwargs.get('data') or {})
kwargs['data'] = data
return self._request(requests.post, *args, **kwargs)
def _default_data(self):
"""
Default data for a POST request.
"""
return {}
def _construct_url(self, url, base, quote):
"""
Adds the orderbook to the url if base and quote are specified.
"""
if not base and not quote:
return url
else:
url = url + base.lower() + quote.lower() + "/"
return url
def _request(self, func, url, version=1, *args, **kwargs):
"""
Make a generic request, adding in any proxy defined by the instance.
Raises a ``requests.HTTPError`` if the response status isn't 200, and
raises a :class:`BitstampError` if the response contains a json encoded
error message.
"""
return_json = kwargs.pop('return_json', False)
url = self.api_url[version] + url
logger.debug("Request URL: " + url)
if 'data' in kwargs and 'nonce' in kwargs['data']:
logger.debug("Request nonce: " + str(kwargs['data']['nonce']))
response = func(url, *args, **kwargs)
logger.debug("Response Code {} and Reason {}".format(response.status_code, response.reason))
logger.debug("Response Text {}".format(response.text))
if 'proxies' not in kwargs:
kwargs['proxies'] = self.proxydict
# Check for error, raising an exception if appropriate.
response.raise_for_status()
try:
json_response = response.json()
except ValueError:
json_response = None
if isinstance(json_response, dict):
error = json_response.get('error')
if error:
raise BitstampError(error)
elif json_response.get('status') == "error":
raise BitstampError(json_response.get('reason'))
if return_json:
if json_response is None:
raise BitstampError(
"Could not decode json for: " + response.text)
return json_response
return response
class Public(BaseClient):
def ticker(self, base="btc", quote="usd"):
"""
Returns dictionary.
"""
url = self._construct_url("ticker/", base, quote)
return self._get(url, return_json=True, version=2)
def ticker_hour(self, base="btc", quote="usd"):
"""
Returns dictionary of the average ticker of the past hour.
"""
url = self._construct_url("ticker_hour/", base, quote)
return self._get(url, return_json=True, version=2)
def order_book(self, group=True, base="btc", quote="usd"):
"""
Returns dictionary with "bids" and "asks".
Each is a list of open orders and each order is represented as a list
of price and amount.
"""
params = {'group': group}
url = self._construct_url("order_book/", base, quote)
return self._get(url, params=params, return_json=True, version=2)
def transactions(self, time=TransRange.HOUR, base="btc", quote="usd"):
"""
Returns transactions for the last 'timedelta' seconds.
Parameter time is specified by one of two values of TransRange class.
"""
params = {'time': time}
url = self._construct_url("transactions/", base, quote)
return self._get(url, params=params, return_json=True, version=2)
def conversion_rate_usd_eur(self):
"""
Returns simple dictionary::
{'buy': 'buy conversion rate', 'sell': 'sell conversion rate'}
"""
return self._get("eur_usd/", return_json=True, version=1)
def trading_pairs_info(self):
"""
Returns list of dictionaries specifying details of each available trading pair::
{
'description':'Litecoin / U.S. dollar',
'name':'LTC/USD',
'url_symbol':'ltcusd',
'trading':'Enabled',
'minimum_order':'5.0 USD',
'counter_decimals':2,
'base_decimals':8
},
"""
return self._get("trading-pairs-info/", return_json=True, version=2)
class Trading(Public):
def __init__(self, username, key, secret, *args, **kwargs):
"""
Stores the username, key, and secret which is used when making POST
requests to Bitstamp.
"""
super(Trading, self).__init__(
username=username, key=key, secret=secret, *args, **kwargs)
self.username = username
self.key = key
self.secret = secret
def get_nonce(self):
"""
Get a unique nonce for the bitstamp API.
This integer must always be increasing, so use the current unix time.
Every time this variable is requested, it automatically increments to
allow for more than one API request per second.
This isn't a thread-safe function however, so you should only rely on a
single thread if you have a high level of concurrent API requests in
your application.
"""
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def _default_data(self, *args, **kwargs):
"""
Generate a one-time signature and other data required to send a secure
POST request to the Bitstamp API.
"""
data = super(Trading, self)._default_data(*args, **kwargs)
data['key'] = self.key
nonce = self.get_nonce()
msg = str(nonce) + self.username + self.key
signature = hmac.new(
self.secret.encode('utf-8'), msg=msg.encode('utf-8'),
digestmod=hashlib.sha256).hexdigest().upper()
data['signature'] = signature
data['nonce'] = nonce
return data
def _expect_true(self, response):
"""
A shortcut that raises a :class:`BitstampError` if the response didn't
just contain the text 'true'.
"""
if response.text == u'true':
return True
raise BitstampError("Unexpected response")
def account_balance(self, base="btc", quote="usd"):
"""
Returns dictionary::
{u'btc_reserved': u'0',
u'fee': u'0.5000',
u'btc_available': u'2.30856098',
u'usd_reserved': u'0',
u'btc_balance': u'2.30856098',
u'usd_balance': u'114.64',
u'usd_available': u'114.64',
---If base and quote were specified:
u'fee': u'',
---If base and quote were not specified:
u'btcusd_fee': u'0.25',
u'btceur_fee': u'0.25',
u'eurusd_fee': u'0.20',
}
There could be reasons to set base and quote to None (or False),
because the result then will contain the fees for all currency pairs
For backwards compatibility this can not be the default however.
"""
url = self._construct_url("balance/", base, quote)
return self._post(url, return_json=True, version=2)
def user_transactions(self, offset=0, limit=100, descending=True,
base=None, quote=None):
"""
Returns descending list of transactions. Every transaction (dictionary)
contains::
{u'usd': u'-39.25',
u'datetime': u'2013-03-26 18:49:13',
u'fee': u'0.20',
u'btc': u'0.50000000',
u'type': 2,
u'id': 213642}
Instead of the keys btc and usd, it can contain other currency codes
"""
data = {
'offset': offset,
'limit': limit,
'sort': 'desc' if descending else 'asc',
}
url = self._construct_url("user_transactions/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def open_orders(self, base="btc", quote="usd"):
"""
Returns JSON list of open orders. Each order is represented as a
dictionary.
"""
url = self._construct_url("open_orders/", base, quote)
return self._post(url, return_json=True, version=2)
def all_open_orders(self):
"""
Returns JSON list of open orders of all currency pairs.
Each order is represented as a dictionary.
"""
return self._post('open_orders/all/', return_json=True, version=2)
def order_status(self, order_id):
"""
Returns dictionary.
- status: 'Finished'
or 'In Queue'
or 'Open'
- transactions: list of transactions
Each transaction is a dictionary with the following keys:
btc, usd, price, type, fee, datetime, tid
or btc, eur, ....
or eur, usd, ....
"""
data = {'id': order_id}
return self._post("order_status/", data=data, return_json=True,
version=1)
def cancel_order(self, order_id, version=1):
"""
Cancel the order specified by order_id.
Version 1 (default for backwards compatibility reasons):
Returns True if order was successfully canceled, otherwise
raise a BitstampError.
Version 2:
Returns dictionary of the canceled order, containing the keys:
id, type, price, amount
"""
data = {'id': order_id}
return self._post("cancel_order/", data=data, return_json=True,
version=version)
def cancel_all_orders(self):
"""
Cancel all open orders.
Returns True if it was successful, otherwise raises a
BitstampError.
"""
return self._post("cancel_all_orders/", return_json=True, version=1)
def buy_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None, ioc_order=False):
"""
Order to buy amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
if ioc_order is True:
data['ioc_order'] = True
url = self._construct_url("buy/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_market_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def buy_instant_order(self, amount, base="btc", quote="usd"):
"""
Order to buy amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("buy/instant/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_limit_order(self, amount, price, base="btc", quote="usd", limit_price=None, ioc_order=False):
"""
Order to sell amount of bitcoins for specified price.
"""
data = {'amount': amount, 'price': price}
if limit_price is not None:
data['limit_price'] = limit_price
if ioc_order is True:
data['ioc_order'] = True
url = self._construct_url("sell/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def sell_market_order(self, amount, base="btc", quote="usd"):
"""
Order to sell amount of bitcoins for market price.
"""
data = {'amount': amount}
url = self._construct_url("sell/market/", base, quote)
return self._post(url, data=data, return_json=True, version=2)
def check_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount included in given
bitstamp code.
"""
data = {'code': code}
return self._post("check_code/", data=data, return_json=True,
version=1)
def redeem_bitstamp_code(self, code):
"""
Returns JSON dictionary containing USD and BTC amount added to user's
account.
"""
data = {'code': code}
return self._post("redeem_code/", data=data, return_json=True,
version=1)
def withdrawal_requests(self, timedelta = 86400):
"""
Returns list of withdrawal requests.
Each request is represented as a dictionary.
By default, the last 24 hours (86400 seconds) are returned.
"""
data = {'timedelta': timedelta}
return self._post("withdrawal_requests/", return_json=True, version=1, data=data)
def bitcoin_withdrawal(self, amount, address):
"""
Send bitcoins to another bitcoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bitcoin_withdrawal/", data=data, return_json=True,
version=1)
def bitcoin_deposit_address(self):
"""
Returns bitcoin deposit address as unicode string
"""
return self._post("bitcoin_deposit_address/", return_json=True,
version=1)
def unconfirmed_bitcoin_deposits(self):
"""
Returns JSON list of unconfirmed bitcoin transactions.
Each transaction is represented as dictionary:
amount
bitcoin amount
address
deposit address used
confirmations
number of confirmations
"""
return self._post("unconfirmed_btc/", return_json=True, version=1)
def litecoin_withdrawal(self, amount, address):
"""
Send litecoins to another litecoin wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("ltc_withdrawal/", data=data, return_json=True,
version=2)
def litecoin_deposit_address(self):
"""
Returns litecoin deposit address as unicode string
"""
return self._post("ltc_address/", return_json=True,
version=2)
def ethereum_withdrawal(self, amount, address):
"""
Send ethers to another ether wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("eth_withdrawal/", data=data, return_json=True,
version=2)
def ethereum_deposit_address(self):
"""
Returns ethereum deposit address as unicode string
"""
return self._post("eth_address/", return_json=True,
version=2)
def ripple_withdrawal(self, amount, address, currency):
"""
Returns true if successful.
"""
data = {'amount': amount, 'address': address, 'currency': currency}
response = self._post("ripple_withdrawal/", data=data,
return_json=True)
return self._expect_true(response)
def ripple_deposit_address(self):
"""
Returns ripple deposit address as unicode string.
"""
return self._post("ripple_address/", version=1, return_json=True)[
"address"]
def xrp_withdrawal(self, amount, address, destination_tag=None):
"""
Sends xrps to another xrp wallet specified by address. Returns withdrawal id.
"""
data = {'amount': amount, 'address': address}
if destination_tag:
data['destination_tag'] = destination_tag
return self._post("xrp_withdrawal/", data=data, return_json=True,
version=2)["id"]
def xrp_deposit_address(self):
"""
Returns ripple deposit address and destination tag as dictionary.
Example: {u'destination_tag': 53965834, u'address': u'rDsbeamaa4FFwbQTJp9Rs84Q56vCiWCaBx'}
"""
return self._post("xrp_address/", version=2, return_json=True)
def bch_withdrawal(self, amount, address):
"""
Send bitcoin cash to another bitcoin cash wallet specified by address.
"""
data = {'amount': amount, 'address': address}
return self._post("bch_withdrawal/", data=data, return_json=True,
version=2)
def bch_deposit_address(self):
"""
Returns bitcoin cash deposit address as unicode string
"""
return self._post("bch_address/", return_json=True,
version=2)
def transfer_to_main(self, amount, currency, subaccount=None):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,}
if subaccount is not None:
data['subAccount'] = subaccount
return self._post("transfer-to-main/", data=data, return_json=True,
version=2)
def transfer_from_main(self, amount, currency, subaccount):
"""
Returns dictionary with status.
subaccount has to be the numerical id of the subaccount, not the name
"""
data = {'amount': amount,
'currency': currency,
'subAccount': subaccount,}
return self._post("transfer-from-main/", data=data, return_json=True,
version=2)
# Backwards compatibility
class BackwardsCompat(object):
"""
Version 1 used lower case class names that didn't raise an exception when
Bitstamp returned a response indicating an error had occured.
Instead, it returned a tuple containing ``(False, 'The error message')``.
"""
wrapped_class = None
def __init__(self, *args, **kwargs):
"""
Instantiate the wrapped class.
"""
self.wrapped = self.wrapped_class(*args, **kwargs)
class_name = self.__class__.__name__
warnings.warn(
"Use the {} class rather than the deprecated {} one".format(
class_name.title(), class_name),
DeprecationWarning, stacklevel=2)
def __getattr__(self, name):
"""
Return the wrapped attribute. If it's a callable then return the error
tuple when appropriate.
"""
attr = getattr(self.wrapped, name)
if not callable(attr):
return attr
@wraps(attr)
def wrapped_callable(*args, **kwargs):
"""
Catch ``BitstampError`` and replace with the tuple error pair.
"""
try:
return attr(*args, **kwargs)
except BitstampError as e:
return False, e.args[0]
return wrapped_callable
class public(BackwardsCompat):
"""
Deprecated version 1 client. Use :class:`Public` instead.
"""
wrapped_class = Public
class trading(BackwardsCompat):
"""
Deprecated version 1 client. Use :class:`Trading` instead.
"""
wrapped_class = Trading
|
python
|
"""
Adaptfilt
=========
Adaptive filtering module for Python. For more information please visit
https://github.com/Wramberg/adaptfilt or https://pypi.python.org/pypi/adaptfilt
"""
__version__ = '0.2'
__author__ = "Jesper Wramberg & Mathias Tausen"
__license__ = "MIT"
# Ensure user has numpy
try:
import numpy
except:
raise ImportError('Failed to import numpy - please make sure this is\
available before using adaptfilt')
# Import functions directly into adaptfilt namespace
from lms import lms
from nlms import nlms
from nlmsru import nlmsru
from ap import ap
from misc import mswe
def rundoctests(verbose=False):
"""
Executes doctests
"""
import doctest
import lms as testmod1
import nlms as testmod2
import ap as testmod3
import misc as testmod4
import nlmsru as testmod5
lmsres = doctest.testmod(testmod1, verbose=verbose)
nlmsres = doctest.testmod(testmod2, verbose=verbose)
apres = doctest.testmod(testmod3, verbose=verbose)
miscres = doctest.testmod(testmod4, verbose=verbose)
nlmsrures = doctest.testmod(testmod5, verbose=verbose)
print ' LMS: ', lmsres
print ' NLMS: ', nlmsres
print 'NLMSRU: ', nlmsrures
print ' AP: ', apres
print ' MISC: ', miscres
|
python
|
import scipy.io.wavfile;
#Preprocessing the data
#We have all the wavFiles to numpy arrays for us to
#preprocess on multiple computers while we trained
#on the much faster computer at the time
def convertNumpyToWavFile(filename, rate, data):
wavfile.write(filename, rate, data);
|
python
|
# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import sys
import numbers
from collections.abc import MutableMapping, Sequence, Mapping
import numpy as np
class DetDataView(MutableMapping):
"""Class that applies views to a DetDataManager instance."""
def __init__(self, obj, slices):
self.obj = obj
self.slices = slices
# Mapping methods
def __getitem__(self, key):
vw = [self.obj.detdata[key].view((slice(None), x)) for x in self.slices]
return vw
def __setitem__(self, key, value):
vw = [self.obj.detdata[key].view((slice(None), x)) for x in self.slices]
if isinstance(value, numbers.Number) or len(value) == 1:
# This is a numerical scalar or identical array for all slices
for v in vw:
v[:] = value
else:
# One element of value for each slice
if len(value) != len(vw):
msg = "when assigning to a view, you must have one value or one value for each view interval"
raise RuntimeError(msg)
vw[:] = value
def __iter__(self):
return iter(self.slices)
def __len__(self):
return len(self.slices)
def __repr__(self):
val = "<DetDataView {} slices".format(len(self.slices))
val += ">"
return val
class SharedView(MutableMapping):
"""Class that applies views to a SharedDataManager instance."""
def __init__(self, obj, slices):
self.obj = obj
self.slices = slices
# Mapping methods
def __getitem__(self, key):
vw = [self.obj.shared[key][x] for x in self.slices]
return vw
def __delitem__(self, key):
raise RuntimeError(
"Cannot delete views of shared data, since they are created on demand"
)
def __setitem__(self, key, value):
raise RuntimeError(
"Cannot set views of shared data- use the set() method on the original."
)
def __iter__(self):
return iter(self.slices)
def __len__(self):
return len(self.slices)
def __repr__(self):
val = "<SharedView {} slices".format(len(self.slices))
val += ">"
return val
class View(Sequence):
"""Class representing a list of views into any of the local observation data."""
def __init__(self, obj, key):
self.obj = obj
self.key = key
if key is None:
# The whole observation
self.slices = [slice(None)]
else:
# Compute a list of slices for these intervals
self.slices = [
slice(x.first, x.last + 1, 1) for x in self.obj.intervals[key]
]
self.detdata = DetDataView(obj, self.slices)
self.shared = SharedView(obj, self.slices)
def __getitem__(self, key):
return self.slices[key]
def __contains__(self, item):
for sl in self.slices:
if sl == item:
return True
return False
def __iter__(self):
return iter(self.slices)
def __len__(self):
return len(self.slices)
def __repr__(self):
s = "["
if len(self.slices) > 1:
for it in self.slices[0:-1]:
s += str(it)
s += ", "
if len(self.slices) > 0:
s += str(self.slices[-1])
s += "]"
return s
class ViewManager(MutableMapping):
"""Internal class to manage views into observation data objects."""
def __init__(self, obj):
self.obj = obj
if not hasattr(obj, "_views"):
self.obj._views = dict()
# Mapping methods
def __getitem__(self, key):
view_name = key
if view_name is None:
# This could be anything, just has to be unique
view_name = "ALL_OBSERVATION_SAMPLES"
if view_name not in self.obj._views:
# View does not yet exist, create it.
if key is not None and key not in self.obj.intervals:
raise KeyError(
"Observation does not have interval list named '{}'".format(key)
)
self.obj._views[view_name] = View(self.obj, key)
# Register deleter callback
if key is not None:
self.obj.intervals.register_delete_callback(key, self.__delitem__)
return self.obj._views[view_name]
def __delitem__(self, key):
del self.obj._views[key]
def __setitem__(self, key, value):
raise RuntimeError("Cannot set views directly- simply access them.")
def __iter__(self):
return iter(self.obj)
def __len__(self):
return len(self.obj)
def clear(self):
self.obj._views.clear()
class ViewInterface(object):
"""Descriptor class for accessing the views in an observation.
You can get a view of the data for a particular interval list just by accessing
it with the name of the intervals object you want:
obs.view["name_of_intervals"]
Then you can use this to provide a view into either detdata or shared objects within
the observation. For example:
print(obs.view["name_of_intervals"].detdata["signal"])
obs.view["bad_pointing"].shared["boresight"][:] = np.array([0., 0., 0., 1.])
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __get__(self, obj, cls=None):
if obj is None:
return self
else:
if not hasattr(obj, "_viewmgr"):
obj._viewmgr = ViewManager(obj)
return obj._viewmgr
def __set__(self, obj, value):
raise AttributeError("Cannot reset the view interface")
def __delete__(self, obj):
raise AttributeError("Cannot delete the view interface")
|
python
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# @Time : 2021/8/2 11:52
# @Author : NoWords
# @FileName: validators.py
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
def validate_even(value):
"""
偶数验证器
:param value:
:return:
"""
if value % 2 != 0:
raise ValidationError(
_('%(value)s is not an even number'),
params={'value': value},
)
|
python
|
from jsonpickle import encode as json_encode
from requests import post as http_post, get as http_get
from Client.Logger import logger
class Shipper(object):
"""
Responsible for sending hardware_objects_lib.Computer objects
to the centralized server which handles these data.
"""
def __init__(self, host, port, timeout=5):
self.timeout = timeout
common_endpoint_format = "http://{host}:{port}/{endpoint}"
self._server_ship_address = common_endpoint_format.format(
host=host, port=port,
endpoint="api/add_measurement"
)
self._server_ping_address = common_endpoint_format.format(
host=host, port=port,
endpoint="api/ping"
)
self._ping_server()
def _ping_server(self):
try:
response = http_get(self._server_ping_address, timeout=self.timeout)
assert response.status_code == 200 and response.text == 'pong'
except Exception as ex:
logger.critical("Pinging the server failed! Shipping will probably fail!")
logger.critical("Exception msg: [%s]" % str(ex))
def ship(self, computer_obj):
try:
computer_json = json_encode(computer_obj, unpicklable=True)
payload = {
'payload': computer_json
}
response = http_post(self._server_ship_address, data=payload, timeout=self.timeout)
if response.status_code == 200:
pass
elif response.status_code == 202:
logger.warning('The server ignored the shipped measurement')
else:
logger.warning(
'Server responded with status code %i and message %s' % (response.status_code, response.text))
except Exception as ex:
logger.critical("Cannot ship to the server")
logger.critical("Exception msg: [%s]" % str(ex))
|
python
|
#!/usr/bin/env python
# @Copyright 2007 Kristjan Haule
'''
Classes to handle reading/writing of case.indmf* files.
'''
import operator, os, re
from copy import deepcopy
from scipy import *
from numpy import array, log
import wienfile
from utils import L2str, L2num
qsplit_doc = ''' Qsplit Description
------ ------------------------------------------------------------
0 average GF, non-correlated
1 |j,mj> basis, no symmetry, except time reversal (-jz=jz)
-1 |j,mj> basis, no symmetry, not even time reversal (-jz=jz)
2 real harmonics basis, no symmetry, except spin (up=dn)
-2 real harmonics basis, no symmetry, not even spin (up=dn)
3 t2g orbitals
-3 eg orbitals
4 |j,mj>, only l-1/2 and l+1/2
5 axial symmetry in real harmonics
6 hexagonal symmetry in real harmonics
7 cubic symmetry in real harmonics
8 axial symmetry in real harmonics, up different than down
9 hexagonal symmetry in real harmonics, up different than down
10 cubic symmetry in real harmonics, up different then down
11 |j,mj> basis, non-zero off diagonal elements
12 real harmonics, non-zero off diagonal elements
13 J_eff=1/2 basis for 5d ions, non-magnetic with symmetry
14 J_eff=1/2 basis for 5d ions, no symmetry
------ ------------------------------------------------------------'''
projector_doc=""" Projector Description
------ ------------------------------------------------------------
1 projection to the solution of Dirac equation (to the head)
2 projection to the Dirac solution, its energy derivative,
LO orbital, as described by P2 in PRB 81, 195107 (2010)
4 similar to projector-2, but takes fixed number of bands in
some energy range, even when chemical potential and
MT-zero moves (folows band with certain index)
5 fixed projector, which is written to projectorw.dat. You can
generate projectorw.dat with the tool wavef.py
6 similar to projector 5, except projector is here momentum dependent,
i.e., similarly to Wannier orbital construction
------ ------------------------------------------------------------
"""
def expand_intlist(input):
'''Expand out any ranges in user input of integer lists.
Example: input = "1,2,4-6"
output = [1, 2, 4, 5, 6]'''
def parse1(x):
y = x.split('-')
return [int(x)] if len(y) == 1 else range(*[int(y[0]), int(y[1])+1])
return reduce(operator.add, [parse1(x) for x in input.split(',')])
def divmodulo(x,n):
"We want to take modulo and divide in fortran way, so that it is compatible with fortran code"
return ( sign(x)* (abs(x)/n) , sign(x)*mod(abs(x),n))
class IndmfBase:
'''Conventions used in naming data structures stored in this class:
i = index
u = unique
cp = correlated problem (either single-site or cluster)
The data structures are dictionaries:
self.atoms[iatom] = (locrot_shift, new_xyz, shift_vec)
self.locrot[iatoms] = (locrot,shift)
self.cps[icp] = [(iatom_1, L_1, qsplit_1), (iatom_2, L_2, qsplit_2), ...]
self.ucps[iucp] = [icp_1, icp_2, ...]
Derived classes are responsible for filling in self.ucps
'''
def __init__(self, case):
self.case = case
self.extn = 'indmf' # derived classes should override this
self.initvars()
self.__create_inverses()
def member_vars(self):
# list of tuples (varname, default value)
# these are deepcopied when we copy_constryct()
return [
('hybr_emin', -10.0 ), # (eV) range of hybridization to pass on to impurity problem
('hybr_emax', 10.0 ), # (eV)
('Qrenorm', 1 ), # whether or not to renormalize (what are we renormalizing?)
('projector', 5 ), # type of projection onto correlated space (0,1,2,3,4)
('matsubara', 0 ), # 0 = real axis, 1 = imaginary axis
('broadc', 0.025), # (eV) broadening for correlated (applied to what -- self-energy?)
('broadnc', 0.025), # (eV) broadening for noncorrelated
('om_npts', 200 ), # number and range of default omega mesh (if no sig.inp file given)
('om_emin', -3.0 ), # (eV)
('om_emax', 1.0 ), # (eV)
('broken_sym', 0 ), # FM, AFM or ferrimagnetic run
('atoms', {} ),
('locrot', {} ),
('cps', {} ),
('ucps', {} ),
('symclasses', {} ), # group cps forming each ucp into symmetry classes (e.g. spin-up vs. spin-down)
('Lsa', [] ),
('icpsa', [] ),
]
def initvars(self):
for attr,val in self.member_vars():
setattr(self, attr, val)
def copy_construct(self, c):
myattr = [attr for attr,val in self.member_vars()]
for attr in dir(c):
if attr in myattr:
setattr(self, attr, deepcopy(getattr(c, attr)))
def __create_inverses(self):
class Iucps:
def __getitem__(s,icp):
return [iucp for iucp,icps in self.ucps.iteritems() if icp in icps][0]
self.iucps = Iucps()
class Icps:
def __getitem__(s,iatom):
return [icp for icp,cp in self.cps.iteritems() if iatom in [iat for iat,L,qsplit in cp]]
self.icps = Icps()
def filename(self):
return self.case + '.' + self.extn
def file_exists(self):
return os.path.isfile(self.filename())
def readlines(self, filename = None):
fname = filename if filename else self.filename()
findmf = open(fname, 'r')
lines = [line.split('#')[0].strip() for line in findmf.readlines()] # strip comments
findmf.close()
return (line for line in lines if line) # strip blank lines & create generator expression
def parse_head(self, lines):
self.hybr_emin, self.hybr_emax, self.Qrenorm, self.projector = [float(x) for x in lines.next().split()]
self.matsubara, self.broadc, self.broadnc, self.om_npts, self.om_emin, self.om_emax = [float(e) for e in lines.next().split()]
self.matsubara = int(self.matsubara) # recast these to integers
self.om_npts = int(self.om_npts)
self.Qrenorm = int(self.Qrenorm)
self.projector = int(self.projector)
if self.projector>=5:
self.hybr_emin = int(self.hybr_emin)
self.hybr_emax = int(self.hybr_emax)
def parse_atomlist(self, lines):
self.Lsa=[]
self.icpsa=[]
natom = int(lines.next())
for i in range(natom):
dat=lines.next().split()
iatom, nL, locrot_shift = [int(x) for x in dat[:3]]
Rmt2=0
if len(dat)>3:
Rmt2 = float(dat[3])
(shift,locrot) = divmodulo(locrot_shift,3)
if locrot<0:
if locrot==-2:
locrot=3*nL
else:
locrot=3
(Ls, qsplits, icps) = (zeros(nL,dtype=int), zeros(nL,dtype=int), zeros(nL,dtype=int))
for il in range(nL):
(Ls[il], qsplits[il], icps[il]) = map(int, lines.next().split()[:3])
self.Lsa.append( Ls )
self.icpsa.append( icps )
new_xyz = [[float(x) for x in lines.next().split()] for loro in range(abs(locrot))]
shift_vec = [float(x) for x in lines.next().split()] if shift else None
#print 'new_xyz=', new_xyz
#print 'shift_vec=', shift_vec
self.locrot[iatom] = (locrot, shift)
self.atoms[iatom] = (locrot_shift, new_xyz, shift_vec, Rmt2)
for icp, L, qsplit in zip(icps, Ls, qsplits):
if self.cps.has_key(icp):
self.cps[icp] += [(iatom, L, qsplit)]
else:
self.cps[icp] = [(iatom, L, qsplit)]
def write_head(self, lines):
lines += [
("%f %f %d %d" % (self.hybr_emin, self.hybr_emax, self.Qrenorm, self.projector), "hybridization Emin and Emax, measured from FS, renormalize for interstitials, projection type"),
("%1d %g %g %d %f %f" % (self.matsubara, self.broadc, self.broadnc, self.om_npts, self.om_emin, self.om_emax),
"matsubara, broadening-corr, broadening-noncorr, nomega, omega_min, omega_max (in eV)")
]
def write_atomlist(self, lines):
# create flat list of correlated orbitals (tricky because may have cluster problems)
corbs = [[(icp,iatom,L,qsplit) for iatom,L,qsplit in v] for icp,v in self.cps.iteritems()]
corbs = reduce(operator.add, corbs)
# list of atom-indices of correlated atoms
icatoms = list(set(iatom for icp,iatom,L,qsplit in corbs))
icatoms.sort()
lines.append((str(len(icatoms)), "number of correlated atoms"))
for iatom in icatoms:
locrot_shift, new_xyz, shift_vec, Rmt2 = self.atoms[iatom]
orbs = [(icp,L,qsplit) for icp,iat,L,qsplit in corbs if iat==iatom]
#locrot_shift = 3+locrot if shift_vec else locrot
if Rmt2>0:
atom_header = ("%-3d %3d %3d %f" % (iatom, len(orbs), locrot_shift, Rmt2), "iatom, nL, locrot Rmt2")
else:
atom_header = ("%-3d %3d %3d" % (iatom, len(orbs), locrot_shift), "iatom, nL, locrot")
lines.append(atom_header)
for icp,L,qsplit in orbs:
orbstring = ("%3d %3d %3d" % (L, qsplit, icp), "L, qsplit, cix")
lines.append(orbstring)
locrot_labels = ["new x-axis", "new y-axis", "new z-axis"][:len(new_xyz)]
for vec,label in zip(new_xyz, locrot_labels):
lines.append( ("%11.8f %11.8f %11.8f" % tuple(vec), label) )
if shift_vec:
lines.append( ("%4s %4s %4s" % tuple(shift_vec), "real-space shift in atom position") )
def format(self, lines):
# merge comments with values
comment_column = max([len(entry) for entry,comment in lines])
format = '%-' + str(comment_column) + 's # %s\n'
return [format % line for line in lines]
def writelines(self, text, filename = None):
fname = filename if filename else self.filename()
f = open(fname, 'w')
f.writelines(text)
f.close()
return fname
class Indmf(IndmfBase):
def __init__(self, case):
IndmfBase.__init__(self, case)
self.extn = 'indmf'
def read(self):
lines = self.readlines()
self.parse_head(lines)
# read ucp = {cp1, cp2, ...} arrays
nucp = int(lines.next())
for i in range(nucp):
line = [int(x) for x in lines.next().split()]
iucp = line[0]
self.ucps[iucp] = line[1:]
self.parse_atomlist(lines)
def write(self):
lines = []
self.write_head(lines)
# write ucp = {cp1, cp2, ...} arrays
lines.append((str(len(self.ucps)), "number of nonequivalent correlated problems"))
for iucp,icps in self.ucps.iteritems():
entry = ("%3d " % (iucp,) + ' '.join([str(icp) for icp in icps]), "iucp, cix's")
lines.append(entry)
self.write_atomlist(lines)
text = self.format(lines)
self.writelines(text)
def user_continue(self, prompt = "Do you want to continue; or edit again? (c/e): "):
while True:
userin = raw_input(prompt).strip().lower()
if userin in ['c', '', 'e']:
break
else:
print 'Invalid input.'
return userin == 'c' or userin == ''
def orb_strings(self, cp, anames):
'''Given list of orbitals, creates string with atomname, iatom and L for each orbital.'''
orbstrings = []
for iatom,L,qsplit in cp:
orbstrings.append("%s%d %s" % (anames[iatom], iatom, L2str(L)))
return orbstrings
def user_input(self, inpt={}):
'''Conventions used in this function:
n = nonequivalent cp = correlated problem
c = correlated orb = orbital
i = index (into list)
The intermediate (temporary) data structures:
catoms[icatom] = iatom
corbs[icorb] = (iatom, L)
qsplits[icorb] = qsplit
Internal indices run from 0, user input indices run from 1.
'''
self.initvars() # clear old data (if any)
w = wienfile.Struct(self.case) # parse WIEN2k struct file
anames = [None] + w.flat(w.aname) # pad to index from 1; flat list of atom names
if inpt: print "(ca) ",
print "There are %d atoms in the unit cell:" % sum(w.mult)
for i,name in enumerate(anames[1:]):
print "%3d %s" % (i+1, name)
while True:
if inpt:
if inpt.has_key('ca'):
userin = inpt['ca']
else:
userin = '1'
else:
userin = raw_input("Specify correlated atoms (ex: 1-4,7,8): ")
catoms = expand_intlist(userin)
print "You have chosen the following atoms to be correlated:"
for iatom in catoms:
print "%3d %s" % (iatom, anames[iatom])
if inpt: break
if self.user_continue():
break
# currently there's no user interface to input local rotations
for iatom in catoms:
locrot_shift = 0
new_xyz = []
shift_vec = []
Rmt2=0
self.atoms[iatom] = (locrot_shift, new_xyz, shift_vec, Rmt2)
self.locrot[iatom] = (0,0)
print
while True:
if inpt: print '(ot) ',
print 'For each atom, specify correlated orbital(s) (ex: d,f):'
corbs = []
if inpt:
if inpt.has_key('ot'):
user_dat = inpt['ot'].split(',') # should be given as d,d,d for three atoms
else:
user_dat = 'd,'*(len(catoms)-1)+'d'
if len(user_dat) < len(catoms) :
print 'ERROR in input : There are '+catoms+' correlated atoms and require the same number of orbital-types. Given input=', user_dat
for orb in user_dat:
if orb not in ['s','p','d','f']:
print 'ERROR in input : Correlated orbital type '+orb+' is not allowed. Must be one of s,p,d,f'
for ii,iatom in enumerate(catoms):
prompt = "%3d %s: " % (iatom, anames[iatom])
if inpt:
userin = user_dat[ii]
else:
userin = raw_input(prompt)
for orb in userin.split(','):
entry = (iatom, L2num(orb.strip()))
corbs.append(entry)
print "You have chosen to apply correlations to the following orbitals:"
for icorb, (iatom, L) in enumerate(corbs):
print "%3d %s-%d %s" % (icorb+1, anames[iatom], iatom, L2str(L))
if inpt : break
if self.user_continue(): break
print
while True:
if inpt: print '(qs) ',
print "Specify qsplit for each correlated orbital (default = 0):"
print qsplit_doc
if inpt:
if inpt.has_key('qs'):
user_dat = inpt['qs'].split(',') # should be given as 2,2,2 for three atoms
else:
user_dat = ['0']*len(catoms)
if len(user_dat) < len(catoms) :
print 'ERROR in input : There are '+catoms+' correlated atoms and require the same number of Qsplit entries. Given input=', user_dat
qsplits = []
for icorb, (iatom, L) in enumerate(corbs):
prompt = "%3d %s-%d %s: " % (icorb+1, anames[iatom], iatom, L2str(L))
if inpt:
userin = user_dat[icorb]
else:
userin = raw_input(prompt).strip()
qsplit = 0 if userin == '' else int(userin)
qsplits.append(qsplit)
print "You have chosen the following qsplits:"
for icorb, (iatom, L) in enumerate(corbs):
print "%3d %s-%d %s: %d" % (icorb+1, anames[iatom], iatom, L2str(L), qsplits[icorb])
if inpt : break
if self.user_continue(): break
print
while True:
if inpt: print '(p) ',
print "Specify projector type (default = 5):"
print projector_doc,
if inpt:
if inpt.has_key('p'):
userin = inpt['p']
else:
userin = '5'
self.projector = 5 if userin == '' else int(userin)
print '> ', self.projector
else:
userin = raw_input("> ").strip()
self.projector = 5 if userin == '' else int(userin)
print self.projector
if self.projector > 4:
import glob
strfile = self.case+'.struct'
enefiles = glob.glob(self.case+'.energyso')+glob.glob(self.case+'.energyso_'+'*')+glob.glob(self.case+'.energy') + glob.glob(self.case+'.energy_'+'*')
enefiles = filter(lambda fil: os.path.getsize(fil)>0, enefiles) # Remove empty files
if len(enefiles)==0:
print 'WARNING: Energy files are not present in this directory. Please generate/copy case.energy files here when using projector 5.'
print
if inpt: break
if self.user_continue(): break
print
if (len(corbs)>1): # cluster only if more than one atom correlated
if inpt: # non-interactive mode
if inpt.has_key('cl'):
userin = 'y'
else: # default is no cluster-dmft
userin = 'n'
print "(cl) Do you want to group any of these orbitals into cluster-DMFT problems? (y/n): ", userin
else: # interactive mode
userin = raw_input("Do you want to group any of these orbitals into cluster-DMFT problems? (y/n): ").strip().lower()
else:
userin = 'n'
if userin == 'y':
while True:
#if inpt: '(cl) ',
print "Enter the orbitals forming each cluster-DMFT problem, separated by spaces"
#userin = inpt['cl']
if inpt: # non-interactive mode
if inpt.has_key('cl'):
userin = inpt['cl']
else: # default = 1 2 3 4 ...
userin = ' '.join([str(icorb+1) for icorb,(iatom,L) in enumerate(corbs)])
print userin
else:
userin = raw_input("(ex: 1,2 3,4 5-8): ")
expanded = [expand_intlist(group) for group in userin.split()]
expandedflat = reduce(operator.add, expanded)
# add orbitals not in CDMFT problems
icp = 1
for icorb,(iatom,L) in enumerate(corbs):
if icorb+1 not in expandedflat:
self.cps[icp] = [(iatom, L, qsplits[icorb])]
icp += 1
# then add orbitals that are part of CDMFT problems
for group in expanded:
self.cps[icp] = []
for icorb in group:
iatom, L = corbs[icorb-1]
self.cps[icp] += [(iatom, L, qsplits[icorb-1])]
icp += 1
print "Your choices give the following correlated problems:"
for icp,cp in self.cps.iteritems():
orbstrings = self.orb_strings(cp, anames)
print "%2d (%s)" % (icp, ', '.join(orbstrings))
if inpt: break
if self.user_continue(): break
else:
for icorb,(iatom,L) in enumerate(corbs):
icp = icorb+1
self.cps[icp] = [(iatom, L, qsplits[icorb])]
print
if (len(corbs)>1):
while True:
if inpt: print '(us) ',
print "Enter the correlated problems forming each unique correlated"
if inpt: # non-interactive mode
if inpt.has_key('us'): # non-default value
userin = inpt['us']
else: # if names of two atoms are the same, default = 1,2 otherwise default = 1 2
# self.ucps = { 1: [1], 2: [2], 3: [3],... }
userin=''
atom_names = [anames[iatom] for icorb,(iatom,L) in enumerate(corbs)]
#print 'atom_names=', atom_names
# If two atoms have the same name, we choose them to be equivalent.
# This might not be the case in general, but the the user should give input
userin='1'
for i in range(1,len(atom_names)):
if atom_names[i] == atom_names[i-1]:
userin += ','+str(i+1)
else:
userin += ' '+str(i+1)
print userin
else:
userin = raw_input("problem, separated by spaces (ex: 1,3 2,4 5-8): ")
for i,group in enumerate(userin.split()):
self.ucps[i+1] = expand_intlist(group)
print
print "Each set of equivalent correlated problems are listed below:"
for iucp,ucp in self.ucps.iteritems():
cpstrings = ['(%s)' % ', '.join(self.orb_strings(self.cps[icp], anames)) for icp in ucp]
print "%3d %s are equivalent." % (iucp, ' '.join(cpstrings))
if inpt: break
if self.user_continue(): break
self.ucps = {} # reset
print
else:
self.ucps = {1: [1]}
#userin = raw_input("Broken symmetry run? (y/n): ").strip().lower()
#if userin == 'y':
# self.broken_sym = True
# userin = int(raw_input("What type of broken symmetry (1 = FM, 2 = AFM, 3 = spiral, ferrimagnetic, etc.)?: ").strip())
# if userin == 1:
# # FM run
# pass
# elif userin == 2:
# # AFM
# print "Not FM, so must be AFM, spiral or ferrimagnetic run."
# while True:
# for iucp,ucp in self.ucps.iteritems():
# print "For unique correlated problem %d containing:" % iucp
# cpstrings = ['%s' % ', '.join(self.orb_strings(self.cps[icp], anames)) for icp in ucp]
# # print out enumerated list of orbitals forming ucp
# print " ", '\n '.join(cpstrings)
# userin = raw_input("Group correlated orbitals into symmetry classes, separated by spaces (ex: 1,3 2,4 5-8): ").strip().lower()
# self.symclasses[iucp] = expand_intlist(userin)
# elif userin == 3:
# # spiral or ferrimagnetic run
# pass
# else:
# # bad user input
# pass
if inpt: print '(hr) ',
print "Range (in eV) of hybridization taken into account in impurity"
if inpt: # non-interactive mode
print "problems; default %.1f, %.1f: " % (self.hybr_emin, self.hybr_emax)
if inpt.has_key('hr'): # non-default value
userin = inpt['hr']
else:
userin = str(self.hybr_emin) +','+str(self.hybr_emax)
print userin
else:
userin = raw_input("problems; default %.1f, %.1f: " % (self.hybr_emin, self.hybr_emax))
if userin.strip():
self.hybr_emin, self.hybr_emax = [float(e) for e in userin.split(',')]
else:
print self.hybr_emin, self.hybr_emax
print
if inpt: # non-interactive mode
print "(a) Perform calculation on real; or imaginary axis? (i/r): (default=i)"
if inpt.has_key('a'): # non-default value
userin = inpt['a']
else:
userin = 'i'
print userin
else:
userin = raw_input("Perform calculation on real; or imaginary axis? (i/r): (default=i)").strip().lower()
if userin=='r':
self.matsubara = 0
if not inpt: print 'r'
else:
self.matsubara = 1
if not inpt: print 'i'
print
#self.matsubara = 1 if userin == 'i' else 0
class Indmfl(IndmfBase):
'''Class for case.indmfl file.
Additional member variables/data structures:
self.siginds[icp] = sigind
self.cftrans[icp] = cftrans
self.legends[icp] = legends
EF = fermi level in eV
'''
def __init__(self, case, extn='indmfl'):
IndmfBase.__init__(self, case)
self.extn = extn #'indmfl'
# Finding the chemical potential
EF_exists = os.path.isfile('EF.dat')
scf2_exists = os.path.isfile(case+".scf2")
scf2up_exists = os.path.isfile(case+".scf2up")
scf_exists = os.path.isfile(case+".scf")
self.EF = None
if EF_exists:
# The previous DMFT chemical potential
self.EF = float( open('EF.dat','r').readline() )
if self.EF is None and (scf2_exists or scf2up_exists):
fname = case+".scf2" if scf2_exists else case+".scf2up"
fscf = open(fname, 'r')
lines = fscf.readlines()
for line in lines:
if re.match(r':FER', line) is not None:
Ry2eV = 13.60569193
self.EF = float(line[38:])*Ry2eV
break
if self.EF is None and scf_exists:
fname = case+".scf"
fscf = open(fname, 'r')
lines = fscf.readlines()
for line in lines:
if re.match(r':FER', line) is not None:
Ry2eV = 13.60569193
self.EF = float(line[38:])*Ry2eV
if self.EF is None: self.EF = 0
def member_vars(self):
myvars = [
('siginds', {} ),
('cftrans', {} ),
('legends', {} ),
]
return IndmfBase.member_vars(self) + myvars
def read(self, filename = None):
lines = self.readlines(filename)
self.parse_head(lines)
self.parse_atomlist(lines)
# read the big block of siginds and cftrans
ncp, maxdim, maxsize = [int(e) for e in lines.next().split()]
for i in range(ncp):
icp, dim, size = [int(e) for e in lines.next().split()]
self.legends[icp] = lines.next().split("'")[1::2]
self.siginds[icp] = array([[int(e) for e in lines.next().split()] for row in range(dim)])
raw_cftrans = array([[float(e) for e in lines.next().split()] for row in range(dim)])
self.cftrans[icp] = raw_cftrans[:,0::2] + raw_cftrans[:,1::2]*1j
def write_head(self, lines):
if abs(self.projector)<4:
styp="%f "
sdoc = "hybridization Emin and Emax, measured from FS, renormalize for interstitials, projection type"
else:
styp="%d "
sdoc = "hybridization band index nemin and nemax, renormalize for interstitials, projection type"
if self.only_write_stored_data:
emin,emax = self.hybr_emin,self.hybr_emax
else:
if abs(self.projector)<4: # This is the old scheme, where hybridization is cut-off by energy
emin = self.hybr_emin+self.EF
emax = self.hybr_emax+self.EF
else: # In the new scheme, we cut-off at certain band index
import findNbands
import glob
import sys
print 'Going over all case.energy files to find which bands are used to construct DMFT projector'
strfile = self.case+'.struct'
enefiles = glob.glob(self.case+'.energyso')+glob.glob(self.case+'.energyso_'+'*')
if not enefiles: # Not spin-orbit run
enefiles = glob.glob(self.case+'.energy') + glob.glob(self.case+'.energy_'+'*')
enefiles = filter(lambda fil: os.path.getsize(fil)>0, enefiles) # Remove empty files
if len(enefiles)==0:
print 'all enefiles=', enefiles
print "ERROR : The case.energy* files should be present in this directory when using projector 5 or 6. Exiting...."
sys.exit(1)
(nemin,nemax) = findNbands.findNbands(self.hybr_emin+self.EF,self.hybr_emax+self.EF,enefiles,strfile)
emin,emax = nemin,nemax
#if abs(self.projector)==5:
print 'Computing DMFT real space projector, which is written in projectorw.dat'
import wavef
Rm2=[self.atoms[iatom][3] for iatom in self.atoms.keys()]
print 'Rm2=', Rm2, 'atms=', self.atoms.keys(), 'Lsa=', self.Lsa
wavef.main(self.case, self.atoms.keys(), self.Lsa, self.icpsa, Rm2)
lines += [
( (styp+styp+"%d %d") % (emin, emax, self.Qrenorm, self.projector), sdoc),
("%1d %g %g %d %f %f" % (self.matsubara, self.broadc, self.broadnc, self.om_npts, self.om_emin, self.om_emax),
"matsubara, broadening-corr, broadening-noncorr, nomega, omega_min, omega_max (in eV)")
]
def write(self, filename = None, only_write_stored_data=False):
# generate text in two chunks, stored in text and text2
# text contains basic information about correlated problems
# text2 contains all the siginds, legends and crystal-field transformation matrices
self.only_write_stored_data = only_write_stored_data
lines = []
self.write_head(lines)
self.write_atomlist(lines)
text = self.format(lines)
maxdim = max(len(s) for s in self.siginds.values()) # dimension of largest sigind matrix
sizes = {}
for icp,sigind in self.siginds.iteritems():
sizes[icp] = len([x for x in set(sigind.flat) if x != 0])
maxsize = max(sizes.values()) # number of columns in largest
text2 = [
'#================ # Siginds and crystal-field transformations for correlated orbitals ================',
'%-3d %3d %3d # Number of independent kcix blocks, max dimension, max num-independent-components' % (len(self.cps), maxdim, maxsize)
]
for icp,sigind in self.siginds.iteritems():
legend = self.legends[icp]
cftrans = self.cftrans[icp]
text2 += [
"%-3d %3d %3d # %s" % (icp, len(sigind), sizes[icp], 'cix-num, dimension, num-independent-components'),
'#---------------- # Independent components are --------------',
"'%s' "*len(legend) % tuple(legend),
]
text2.append('#---------------- # Sigind follows --------------------------')
max_sigfig = 1 + int(log(max(sigind.flat))/log(10))
format = '%' + str(max_sigfig) + 'd'
for row in sigind:
text2.append(' '.join([format % elem for elem in row]))
# print local transform matrix (real & imag)
text2.append('#---------------- # Transformation matrix follows -----------')
for row in self.cftrans[icp]:
text2.append(' '.join(["%11.8f %11.8f " % (elem.real, elem.imag) for elem in row]))
# join with first half; add \n to each line in text2
text += [line+'\n' for line in text2]
self.writelines(text, filename)
|
python
|
""" Implements distance functions for clustering """
import math
from typing import Dict, List
import requests
from blaze.config.environment import EnvironmentConfig
from blaze.evaluator.simulator import Simulator
from blaze.logger import logger as log
from .types import DistanceFunc
def linear_distance(a: float, b: float) -> float:
""" Returns the absolute difference between a and b """
return abs(a - b)
def euclidian_distance(a: List[float], b: List[float]) -> float:
""" Returns the euclidian distance between two N-dimensional points """
return math.sqrt(sum((x - y) ** 2 for (x, y) in zip(a, b)))
def create_apted_distance_function(port: int) -> DistanceFunc:
""" Creates a distance function with a connection to the tree_diff server """
def get_apted_tree(env_config: EnvironmentConfig) -> Dict:
sim = Simulator(env_config)
tree = {}
s = [sim.root]
while s:
curr = s.pop()
tree[curr.priority] = {
"size": curr.resource.size,
"type": str(curr.resource.type),
"children": [c.priority for c in curr.children],
}
s.extend(curr.children)
tree["length"] = len(tree)
return tree
def apted_distance(a: EnvironmentConfig, b: EnvironmentConfig) -> float:
a_tree = get_apted_tree(a)
b_tree = get_apted_tree(b)
r = requests.post(f"http://localhost:{port}/getTreeDiff", json={"tree1": a_tree, "tree2": b_tree}, timeout=5)
r = r.json()
distance = r["editDistance"]
log.with_namespace("apted_distance").debug("got distance", distance=distance, a=a.request_url, b=b.request_url)
return distance
return apted_distance
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-20 15:34
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import F
def draft_title(apps, schema_editor):
Page = apps.get_model('wagtailcore', 'Page')
Page.objects.all().update(draft_title=F('title'))
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0039_collectionviewrestriction'),
]
operations = [
migrations.AddField(
model_name='page',
name='draft_title',
field=models.CharField(default='', editable=False, max_length=255),
preserve_default=False,
),
migrations.RunPython(draft_title, migrations.RunPython.noop),
]
|
python
|
import json
import os
import subprocess as sp
from batch.client import Job
from .batch_helper import short_str_build_job
from .build_state import \
Failure, Mergeable, Unknown, NoImage, Building, Buildable, Merged, \
build_state_from_json
from .ci_logging import log
from .constants import BUILD_JOB_TYPE, VERSION, GCS_BUCKET, SHA_LENGTH, \
GCS_BUCKET_PREFIX
from .environment import PR_BUILD_SCRIPT, SELF_HOSTNAME, batch_client, CONTEXT
from .git_state import FQSHA, FQRef
from .github import latest_sha_for_ref
from .http_helper import post_repo, BadStatus
from .sentinel import Sentinel
from .shell_helper import shell
def try_new_build(source, target):
img = maybe_get_image(target, source)
if img:
attributes = {
'target': json.dumps(target.to_json()),
'source': json.dumps(source.to_json()),
'image': img,
'type': BUILD_JOB_TYPE
}
try:
job = batch_client.create_job(
img,
command=['/bin/bash',
'-c',
PR_BUILD_SCRIPT],
env={
'SOURCE_REPO_URL': source.ref.repo.url,
'SOURCE_BRANCH': source.ref.name,
'SOURCE_SHA': source.sha,
'TARGET_REPO_URL': target.ref.repo.url,
'TARGET_BRANCH': target.ref.name,
'TARGET_SHA': target.sha
},
resources={'requests': {
'cpu': '3.7',
'memory': '4G'
}},
tolerations=[{
'key': 'preemptible',
'value': 'true'
}],
service_account_name='test-svc',
callback=SELF_HOSTNAME + '/ci_build_done',
attributes=attributes,
volumes=[{
'volume': {
'name': f'hail-ci-{VERSION}-service-account-key',
'secret': {
'optional': False,
'secretName':
f'hail-ci-{VERSION}-service-account-key'
}
},
'volume_mount': {
'mountPath': '/secrets',
'name': f'hail-ci-{VERSION}-service-account-key',
'readOnly': True
}
}])
return Building(job, img, target.sha)
except Exception as e:
log.exception(f'could not start batch job due to {e}')
return Buildable(img, target.sha)
else:
return NoImage(target.sha)
def determine_buildability(source, target):
img = maybe_get_image(source, target)
if img:
return Buildable(img, target.sha)
else:
return NoImage(target.sha)
def get_image_for_target(target):
import requests
assert isinstance(target, FQRef), target
url = f'https://github.com/{target.repo.qname}/raw/{target.name}/hail-ci-build-image'
r = requests.get(url, timeout=5)
if r.status_code != 200:
raise BadStatus(f'could not get raw hail-ci-build-image for {target.short_str()}',
r.status_code)
return r.text.strip()
def maybe_get_image(source, target):
assert isinstance(source, FQSHA)
assert isinstance(target, FQSHA)
d = os.getcwd()
try:
srepo = source.ref.repo
trepo = target.ref.repo
if not os.path.isdir(trepo.qname):
os.makedirs(trepo.qname, exist_ok=True)
os.chdir(trepo.qname)
shell('git', 'clone', trepo.url, '.')
else:
os.chdir(trepo.qname)
if sp.run(['/bin/sh', '-c', f'git remote | grep -q {srepo.qname}']).returncode != 0:
shell('git', 'remote', 'add', srepo.qname, srepo.url)
shell('git', 'fetch', 'origin')
shell('git', 'fetch', srepo.qname)
shell('git', 'checkout', target.sha)
shell('git', 'config', 'user.email', '[email protected]')
shell('git', 'config', 'user.name', 'hail-ci-leader')
shell('git', 'merge', source.sha, '-m', 'foo')
# a force push that removes refs could fail us... not sure what we
# should do in that case. maybe 500'ing is OK?
with open('hail-ci-build-image', 'r') as f:
return f.read().strip()
except (sp.CalledProcessError, FileNotFoundError) as e:
log.exception(f'could not get hail-ci-build-image due to {e}')
return None
finally:
shell('git', 'reset', '--merge')
os.chdir(d)
class GitHubPR(object):
def __init__(self, state, number, title, source, target_ref, target_sha=None):
assert state in ['closed', 'open']
assert isinstance(number, str), f'{type(number)} {number}'
assert isinstance(title, str), f'{type(title)} {title}'
assert isinstance(source, FQSHA), f'{type(source)} {source}'
assert isinstance(target_ref, FQRef), f'{type(target_ref)} {target_ref}'
assert target_sha is None or isinstance(target_sha, str), f'{type(target_sha)} {target_sha}'
self.state = state
self.number = number
self.title = title
self.source = source
self.target_ref = target_ref
self.target_sha = target_sha
@staticmethod
def from_gh_json(d, target_sha=None):
assert 'state' in d, d
assert 'number' in d, d
assert 'title' in d, d
assert 'head' in d, d
assert 'base' in d, d
return GitHubPR(d['state'],
str(d['number']),
str(d['title']),
FQSHA.from_gh_json(d['head']),
FQSHA.from_gh_json(d['base']).ref,
target_sha)
def __str__(self):
return json.dumps(self.to_json())
def short_str(self):
tsha = self.target_sha
if tsha:
tsha = tsha[:SHA_LENGTH]
return (
f'[GHPR {self.number}]{self.target_ref.short_str()}:{tsha}..{self.source.short_str()};'
f'{self.state};'
)
def to_json(self):
return {
'state': self.state,
'number': self.number,
'title': self.title,
'source': self.source.to_json(),
'target_ref': self.target_ref.to_json(),
'target_sha': self.target_sha
}
def to_PR(self, start_build=False):
if self.target_sha is None:
target_sha = latest_sha_for_ref(self.target_ref)
else:
target_sha = self.target_sha
target = FQSHA(self.target_ref, target_sha)
pr = PR.fresh(self.source, target, self.number, self.title)
if start_build:
return pr.build_it()
else:
return pr
class PR(object):
def __init__(self, source, target, review, build, number, title):
assert isinstance(target, FQSHA), target
assert isinstance(source, FQSHA), source
assert number is None or isinstance(number, str)
assert title is None or isinstance(title, str)
assert review in ['pending', 'approved', 'changes_requested']
self.source = source
self.target = target
self.review = review
self.build = build
self.number = number
self.title = title
keep = Sentinel()
def copy(self,
source=keep,
target=keep,
review=keep,
build=keep,
number=keep,
title=keep):
return PR(
source=self.source if source is PR.keep else source,
target=self.target if target is PR.keep else target,
review=self.review if review is PR.keep else review,
build=self.build if build is PR.keep else build,
number=self.number if number is PR.keep else number,
title=self.title if title is PR.keep else title)
def _maybe_new_shas(self, new_source=None, new_target=None):
assert new_source is not None or new_target is not None
assert new_source is None or isinstance(new_source, FQSHA)
assert new_target is None or isinstance(new_target, FQSHA)
if new_source and self.source != new_source:
assert not self.is_merged()
if new_target and self.target != new_target:
log.info(
f'new source and target sha {new_target.short_str()} {new_source.short_str()} {self.short_str()}'
)
return self._new_target_and_source(new_target, new_source)
else:
log.info(f'new source sha {new_source.short_str()} {self.short_str()}')
return self._new_source(new_source)
else:
if new_target and self.target != new_target:
if self.is_merged():
log.info(f'ignoring new target sha for merged PR {self.short_str()}')
return self
else:
log.info(f'new target sha {new_target.short_str()} {self.short_str()}')
return self._new_target(new_target)
else:
return self
def _new_target_and_source(self, new_target, new_source):
return self.copy(
source=new_source,
target=new_target,
review='pending'
)._new_build(
# FIXME: if I already have an image, just use it
try_new_build(new_source, new_target)
)
def _new_target(self, new_target):
return self.copy(
target=new_target
)._new_build(
determine_buildability(self.source, new_target)
)
def _new_source(self, new_source):
return self.copy(
source=new_source,
review='pending'
)._new_build(
# FIXME: if I already have an image, just use it
try_new_build(new_source, self.target)
)
def _new_build(self, new_build):
if self.build != new_build:
self.notify_github(new_build)
return self.copy(build=self.build.transition(new_build))
else:
return self
def build_it(self):
# FIXME: if I already have an image, just use it
return self._new_build(try_new_build(self.source, self.target))
# FIXME: this should be a verb
def merged(self):
return self._new_build(Merged(self.target.sha))
def notify_github(self, build, status_sha=None):
log.info(f'notifying github of {build} for {self.short_str()}')
json = {
'state': build.gh_state(),
'description': str(build),
'context': CONTEXT
}
if isinstance(build, Failure) or isinstance(build, Mergeable):
json['target_url'] = \
f'https://storage.googleapis.com/{GCS_BUCKET}/{GCS_BUCKET_PREFIX}ci/{self.source.sha}/{self.target.sha}/index.html'
try:
post_repo(
self.target.ref.repo.qname,
'statuses/' + (status_sha if status_sha is not None else self.source.sha),
json=json,
status_code=201)
except BadStatus as e:
if e.status_code == 422:
log.exception(
f'Too many statuses applied to {self.source.sha}! This is a '
f'dangerous situation because I can no longer block merging '
f'of failing PRs.')
else:
raise e
@staticmethod
def fresh(source, target, number=None, title=None):
return PR(source, target, 'pending', Unknown(), number, title)
def __str__(self):
return json.dumps(self.to_json())
def short_str(self):
return (
f'[PR {self.number}]{self.target.short_str()}..{self.source.short_str()};'
f'{self.review};{self.build};'
)
@staticmethod
def from_json(d):
assert 'target' in d, d
assert 'source' in d, d
assert 'review' in d, d
assert 'build' in d, d
assert 'number' in d, d
assert 'title' in d, d
return PR(
FQSHA.from_json(d['source']),
FQSHA.from_json(d['target']),
d['review'],
build_state_from_json(d['build']),
d['number'],
d['title'],
)
def to_json(self):
return {
'target': self.target.to_json(),
'source': self.source.to_json(),
'review': self.review,
'build': self.build.to_json(),
'number': self.number,
'title': self.title
}
def is_mergeable(self):
return (isinstance(self.build, Mergeable) and
self.review == 'approved')
def is_approved(self):
return self.review == 'approved'
def is_building(self):
return isinstance(self.build, Building)
def is_pending_build(self):
return isinstance(self.build, Buildable)
def is_merged(self):
return isinstance(self.build, Merged)
def update_from_github_push(self, push):
assert isinstance(push, FQSHA)
assert self.target.ref == push.ref, f'{push} {self.short_str()}'
return self._maybe_new_shas(new_target=push)
def update_from_github_pr(self, gh_pr):
assert isinstance(gh_pr, GitHubPR)
assert self.target.ref == gh_pr.target_ref
assert self.source.ref == gh_pr.source.ref
# this will build new PRs when the server restarts
if gh_pr.target_sha:
result = self._maybe_new_shas(
new_source=gh_pr.source,
new_target=FQSHA(gh_pr.target_ref, gh_pr.target_sha))
else:
result = self._maybe_new_shas(new_source=gh_pr.source)
if self.title != gh_pr.title:
log.info(f'found new title from github {gh_pr.title} {self.short_str()}')
result = result.copy(title=gh_pr.title)
if self.number != gh_pr.number:
log.info(f'found new PR number from github {gh_pr.title} {self.short_str()}')
result = result.copy(number=gh_pr.number)
return result
def update_from_github_review_state(self, review):
if self.review != review:
log.info(f'review state changing from {self.review} to {review} {self.short_str()}')
return self.copy(review=review)
else:
return self
def update_from_github_status(self, build):
if isinstance(self.build, Unknown):
if self.target.sha == build.target_sha:
log.info(
f'recovering from unknown build state via github. {build} {self.short_str()}'
)
return self.copy(build=build)
else:
log.info('ignoring github build state for wrong target. '
f'{build} {self.short_str()}')
return self
else:
log.info(f'ignoring github build state. {build} {self.short_str()}')
return self
def refresh_from_batch_job(self, job):
state = job.cached_status()['state']
if state == 'Complete':
return self.update_from_completed_batch_job(job)
elif state == 'Cancelled':
log.error(
f'a job for me was cancelled {short_str_build_job(job)} {self.short_str()}')
return self._new_build(try_new_build(self.source, self.target))
else:
assert state == 'Created', f'{state} {job.id} {job.attributes} {self.short_str()}'
assert 'target' in job.attributes, job.attributes
assert 'image' in job.attributes, job.attributes
target = FQSHA.from_json(json.loads(job.attributes['target']))
image = job.attributes['image']
if target == self.target:
return self._new_build(Building(job, image, target.sha))
else:
log.info(f'found deploy job {job.id} for wrong target {target}, should be {self.target}')
job.cancel()
return self
def refresh_from_missing_job(self):
assert isinstance(self.build, Building)
return self.build_it()
def update_from_completed_batch_job(self, job):
assert isinstance(job, Job)
job_status = job.cached_status()
exit_code = job_status['exit_code']
job_source = FQSHA.from_json(json.loads(job.attributes['source']))
job_target = FQSHA.from_json(json.loads(job.attributes['target']))
assert job_source.ref == self.source.ref
assert job_target.ref == self.target.ref
if job_target.sha != self.target.sha:
log.info(
f'notified of job for old target {job.id}'
# too noisy: f' {job.attributes} {self.short_str()}'
)
x = self
elif job_source.sha != self.source.sha:
log.info(
f'notified of job for old source {job.id}'
# too noisy: f' {job.attributes} {self.short_str()}'
)
x = self
elif exit_code == 0:
log.info(f'job finished success {short_str_build_job(job)} {self.short_str()}')
x = self._new_build(Mergeable(self.target.sha, job))
else:
log.info(f'job finished failure {short_str_build_job(job)} {self.short_str()}')
x = self._new_build(
Failure(exit_code,
job,
job.attributes['image'],
self.target.sha))
job.cancel()
return x
|
python
|
from os.path import isfile
from time import time
import joblib
import numpy as np
import pandas as pd
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from tqdm.auto import tqdm
from src.fair_random_forest import FairRandomForestClassifier
def learn(X: pd.DataFrame, y: pd.DataFrame, s: pd.DataFrame, outer_folds: list,
inner_folds: list) -> pd.DataFrame:
"""Apply the entire machine learning procedure.
Arguments:
- X: A m*n dataframe containing features, that is used as input for
classifier
- y: A boolean vector of length n, containing the targets
- s: A boolean vector of length n, indicating whether a sample belongs to
sensitive group.
- outer_folds, inner_folds: Result of src.get_folds.
Returns a pd.DataFrame containing the performance over all folds.
"""
assert all(X.index == y.index)
assert all(X.index == s.index)
# Convert X, y, s to np.arrays for compatibility reasons.
X = np.ascontiguousarray(X.values)
y = np.ascontiguousarray(y.values.ravel()) > 1
s = np.ascontiguousarray(s.values.ravel())
params = [
(int(max_depth), int(n_bins), float(orthogonality))
for n_bins in (2,)
for max_depth in np.arange(1, 11)
for orthogonality in np.linspace(0, 1, 11)
]
# Learn on every outer fold
iterations = [
(max_depth, n_bins, ortho, fold, trainval_idx, test_idx)
for max_depth, n_bins, ortho in params
for fold, (trainval_idx, test_idx) in outer_folds
if not isfile(f'models/outer_folds/{max_depth}-{ortho:.2f}-{n_bins}-{fold}.pkl')
]
for max_depth, n_bins, ortho, fold, trainval_idx, test_idx in tqdm(iterations):
X_trainval = X[trainval_idx]
y_trainval = y[trainval_idx]
s_trainval = s[trainval_idx]
vt = VarianceThreshold()
vt.fit(X_trainval)
X_trainval = vt.transform(X_trainval)
clf = FairRandomForestClassifier(
orthogonality=ortho, max_depth=max_depth, n_bins=n_bins)
start_fit = time()
clf.fit(X_trainval, y_trainval, s_trainval)
clf.fit_time = time() - start_fit
fp = f'models/outer_folds/{max_depth}-{ortho:.2f}-{n_bins}-{fold}.pkl'
joblib.dump(clf, fp)
# Learn on every inner fold
iterations = [
(max_depth, n_bins, ortho, outer_fold, inner_fold, train_idx, val_idx)
for max_depth, n_bins, ortho in params
for (outer_fold, inner_fold), (train_idx, val_idx) in inner_folds
if not isfile(f'models/inner_folds/{max_depth}-{ortho:.2f}-{n_bins}-{outer_fold}-{inner_fold}.pkl')
]
for max_depth, n_bins, ortho, outer_fold, inner_fold, train_idx, val_idx in tqdm(iterations):
X_train = X[train_idx]
y_train = y[train_idx]
s_train = s[train_idx]
vt = VarianceThreshold()
vt.fit(X_train)
X_train = vt.transform(X_train)
clf = FairRandomForestClassifier(
orthogonality=ortho, max_depth=max_depth, n_bins=n_bins)
start_fit = time()
clf.fit(X_train, y_train, s_train)
clf.fit_time = time() - start_fit
fp = f'models/inner_folds/{max_depth}-{ortho:.2f}-{n_bins}-{outer_fold}-{inner_fold}.pkl'
joblib.dump(clf, fp)
# Predict on all outer folds
iterations = [
(max_depth, n_bins, ortho, fold, trainval_idx, test_idx)
for max_depth, n_bins, ortho in params
for fold, (trainval_idx, test_idx) in outer_folds
if not isfile(f'models/outer_folds/{max_depth}-{ortho:.2f}-{n_bins}-{fold}.npy')
]
for max_depth, n_bins, ortho, fold, trainval_idx, test_idx in tqdm(iterations):
X_trainval = X[trainval_idx]
X_test = X[test_idx]
vt = VarianceThreshold()
vt.fit(X_trainval)
X_trainval = vt.transform(X_trainval)
X_test = vt.transform(X_test)
fp = f'models/outer_folds/{max_depth}-{ortho:.2f}-{n_bins}-{fold}'
clf = joblib.load(f'{fp}.pkl')
y_score = clf.predict_proba(X_test)[:,1]
np.save(f'{fp}.npy', y_score)
# Predict on all inner folds
iterations = [
(max_depth, n_bins, ortho, outer_fold, inner_fold,
train_idx, val_idx)
for max_depth, n_bins, ortho in params
for (outer_fold, inner_fold), (train_idx, val_idx) in inner_folds
if not isfile(f'models/inner_folds/{max_depth}-{ortho:.2f}-{n_bins}-{outer_fold}-{inner_fold}.npy')
]
for max_depth, n_bins, ortho, outer_fold, inner_fold, train_idx, val_idx in tqdm(iterations):
X_train = X[train_idx]
X_val = X[val_idx]
vt = VarianceThreshold()
vt.fit(X_train)
X_train = vt.transform(X_train)
X_val = vt.transform(X_val)
fp = f'models/inner_folds/{max_depth}-{ortho:.2f}-{n_bins}-{outer_fold}-{inner_fold}'
clf = joblib.load(f'{fp}.pkl')
y_score = clf.predict_proba(X_val)[:,1]
np.save(f'{fp}.npy', y_score)
# Measure performance for every outer loop
iterations = [
(max_depth, n_bins, orthogonality, outer_fold, inner_fold,
train_idx, val_idx)
for max_depth, n_bins, orthogonality in params
for (outer_fold, inner_fold), (train_idx, val_idx) in inner_folds
]
performance_all_candidates = list()
for max_depth, n_bins, ortho, outer_fold, inner_fold, train_idx, val_idx in tqdm(iterations):
fp = f'models/inner_folds/{max_depth}-{ortho:.2f}-{n_bins}-{outer_fold}-{inner_fold}'
y_score = np.load(f'{fp}.npy')
y_val = y[val_idx]
s_val = s[val_idx]
auc_y = roc_auc_score(y_val, y_score)
auc_s = roc_auc_score(s_val, y_score)
auc_s = max(auc_s, 1-auc_s)
performance_this_run = dict(
max_depth=max_depth, n_bins=n_bins, orthogonality=ortho,
outer_fold=outer_fold, inner_fold=inner_fold, auc_y=auc_y,
auc_s=auc_s)
performance_all_candidates.append(performance_this_run)
return pd.DataFrame(performance_all_candidates)
|
python
|
#!/usr/bin/env python
"""
Kyle McChesney
Ruffus pipeline for all things tophat
"""
# ruffus imports
from ruffus import *
import ruffus.cmdline as cmdline
# custom functions
from tophat_extras import TophatExtras
# system imports
import subprocess, logging, os, re, time
# :) so i never have to touch excel
import pandas as pd
# EMAIL
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
parser = cmdline.get_argparse(description='This pipeline provides a number of funtionalities for working with RNAseq data')
# Program arguments
parser.add_argument("--dir", help="Fullpath to the directory where the FASTQ reads are located", required=True)
parser.add_argument("--cores", help="Number of cores to run bowtie on", default='10')
parser.add_argument("--index", help="Fullpath to the bowtie2 index in: /full/file/path/basename form", default="/data/refs/hg19/hg19")
parser.add_argument("--output", help="Fullpath to output directory", default="./")
parser.add_argument("--size", help="Fullpath to size file", default="/data/refs/hg19/hg19.sizes")
parser.add_argument("--gtf", help="Fullpath to gtf file", default="/data/refs/hg19/hg19.gtf")
parser.add_argument("--paired", help="Indicates whether the reads in --dir are paired_end. MUST FOLLOW _1 _2 convention", default=False)
# optional arguments to control turning on and off tasks
# trimming
parser.add_argument("--trim", help="Whether or not to trim the fastq reads", type=bool, default=False)
parser.add_argument("--trim-val", help="Value to trim by (from the end of read)", default=50)
# reporting / meta analysis
parser.add_argument("--bw", help="Whether or not big wig files should be generated", type=bool, default=False)
parser.add_argument("--one-codex", help="Whether or not to upload each sample to one codex for metagenomic analysis", default=False)
# reporting
parser.add_argument("--emails", help="Emails to send DE results too", default="[email protected]", nargs="+")
# parse the args
options = parser.parse_args()
# package the emails into an array if just one
if options.emails == "[email protected]":
options.emails = [options.emails]
# Kenny loggins
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log_formatter = logging.Formatter('%(asctime)s {%(levelname)s}: %(message)s')
# file log
time_stamp = str(time.time()).replace(".","")
log_file = options.log_file if options.log_file else os.path.join(options.output,"{}.{}.{}".format("tophat_pipeline",time_stamp,"log"))
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(log_formatter)
# console log
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(log_formatter)
# set it all up
log.addHandler(file_handler)
log.addHandler(stream_handler)
log.info("Starting Tophat")
# pre checking
# create extas instance
extras = TophatExtras(log)
extras.check_default_args(options.cores, options.index, options.output)
input_files = extras.make_fastq_list(options.dir)
genome = os.path.splitext(os.path.basename(options.index))[0]
@active_if(options.paired)
@collate(input_files, formatter("([^/]+)_[12].fastq$"), ["{path[0]}/{1[0]}_1.fastq", "{path[0]}/{1[0]}_2.fastq"])
def collate_files(input_files, output_files):
log.info("Collating paired fastq files: \n\t{} \n\t{}\n".format(input_files[0], input_files[1]))
@active_if(options.one_codex)
@active_if(options.paired)
@transform(collate_files, formatter("([^/]+)_[12].fastq$"), options.output+"{1[0]}.assembled.fastq", extras)
def pear_fastq_files(input_files, output_file, extras):
log.info("Starting pear run on %s and %s", input_files[0], input_files[1])
output_file = re.sub(r"\.assembled\.fastq", "", output_file)
args = ["pear", "-f", input_files[0], "-r", input_files[1], "-o", output_file]
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
extras.report_error("PEAR","Merging paried files with PEAR failed: \n{}".format(output))
raise SystemExit
@active_if(options.one_codex)
@active_if(options.paired)
@transform(pear_fastq_files, suffix(".fastq"), ".fastq", extras)
def upload_paired_to_one_codex(input_file, output_file, extras):
args = ["onecodex", "upload", input_file]
log.info("uploading %s to One Codex", input_file)
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
extras.report_error("One Codex","uploading peared files to One Codex failed \n{}".format(output))
raise SystemExit
@active_if(options.one_codex)
@active_if(not options.paired)
@transform(input_files, suffix(".fastq"), ".fastq", extras)
def upload_to_one_codex(input_file, output_file, extras):
args = ["onecodex", "upload", input_file]
log.info("uploading %s to One Codex", input_file)
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
extras.report_error("One Codex","uploading to One Codex failed \n{}".format(output))
raise SystemExit
# paired alignment
@active_if(options.paired)
@transform(collate_files, formatter("([^/]+)_[12].fastq$"), options.output+"{1[0]}-tophat-results/accepted_hits.bam", options, extras)
def tophat_align_paired(input_files, output_file, options, extras):
# we cant to have tophat write results to output+filename
output = os.path.dirname(output_file)
args = ["tophat2", "-G", options.gtf,"-p", str(options.cores), "-o", output, options.index, input_files[0], input_files[1]]
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
extras.report_error("tophat_paired","tophat2 paired run failed:\n{}".format(output))
raise SystemExit
# print output
log.info("tophat2 output:")
log.info(output)
# pre trimming
@active_if(options.trim)
@transform(input_files, suffix(".fastq"), ".trimmed.fastq", options, extras)
def trim_fastq_files(input_file, output_file, options, extras):
# trim it
args = "seqtk trimfq -e {} {} > {}".format(options.trim_val, input_file, output_file)
p = subprocess.Popen(args, shell=True)
p.wait()
if p.returncode != 0:
log.warn("SeqTK failed trimming %s", input_file)
extras.report_error("seqTK trimming", "failed")
raise SystemExit
@active_if(options.trim)
@transform(trim_fastq_files, formatter(), options.output+"{basename[0]}-tophat-results/accepted_hits.bam", options, extras)
def tophat_align_trimmed(input_file, output_file, options, extras):
# we cant to have tophat write results to output+filename
output = os.path.dirname(output_file)
log.info("Starting tophat2 run on trimmed %s", input_file)
args = ["tophat2", "-G", options.gtf,"-p", options.cores, "-o", output, options.index, input_file]
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
log.warn(output)
extras.report_error("tophat_unpaird","tophat2 unpaired failed: \n{}".format(output))
raise SystemExit
# print output
log.info("tophat2 output:")
log.info(output)
# delete the trimmed fastq file
log.info("Deleting the trimmed file")
os.unlink(input_file)
# unpaired alignment function
@active_if(not options.paired)
@active_if(not options.trim)
@transform(input_files, formatter(), options.output+"{basename[0]}-tophat-results/accepted_hits.bam", options, extras)
def tophat_align_unpaired(input_file, output_file, options, extras):
# we cant to have tophat write results to output+filename
output = os.path.dirname(output_file)
log.info("Starting tophat2 run on %s", input_file)
args = ["tophat2", "-G", options.gtf,"-p", options.cores, "-o", output, options.index, input_file]
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
log.warn(output)
extras.report_error("tophat_unpaird","tophat2 unpaired failed: \n{}".format(output))
raise SystemExit
# print output
log.info("tophat2 output:")
log.info(output)
# get rid of stupid accepted_hits.bam file
@transform([tophat_align_unpaired, tophat_align_paired, tophat_align_trimmed],
formatter(r".*/([^/]+)-tophat-results/accepted_hits.bam$"),
''.join([options.output, "{1[0]}", ".bam"]), extras )
def rename_accepted_hits(input_file, output_file, extras):
try:
os.rename(input_file, output_file)
except OSError:
extras.report_error("rename_accepted_hits","Renaming {} to {} failed".format(input_file, output_file),log)
# both the tophat functions can feed into here
@transform(rename_accepted_hits, suffix(".bam"),".sorted.bam", extras)
def sort_bam(input_file, output_file, extras):
log.info("Sorting %s ", input_file)
# hacky
output_file = re.sub(r"\.bam", "", output_file)
if subprocess.call(["samtools-rs", "rocksort", "-@", "8", "-m", "16G", input_file, output_file]):
log.warn("bam sorting %s failed, exiting", input_file)
extras.report_error("sort_bam","bam sorting {} failed".format(input_file))
raise SystemExit
log.info("Deleting old file %s", input_file)
os.unlink(input_file)
@active_if(options.bw)
@transform(sort_bam, suffix(".sorted.bam"), ".bed", options.output, extras)
def bam_to_bed(input_file, output_file, output, extras):
log.info("Converting %s to a bed file", input_file)
if subprocess.call("bamToBed -i {} > {}".format(input_file, output_file), shell=True):
log.warn("bam to bed conversion of %s failed, exiting", input_file)
extras.report_error("bam_to_bed","bam to bed conversion of {} failed".format(input_file))
raise SystemExit
# now we can move sorted bam to output
file_name = os.path.basename(input_file)
new_name = os.path.join(output, file_name)
os.rename(input_file, new_name)
@active_if(options.bw)
@transform(bam_to_bed, suffix(".bed"), ".bg", options.size, extras)
def bed_to_bg(input_file, output_file, size_file, extras):
log.info("Converting %s to a genome coverage file", input_file)
command = "genomeCoverageBed -bg -split -i {} -g {} > {}".format( input_file, size_file, output_file)
if subprocess.call(command, shell=True):
log.warn("bed to coverage conversion of %s failed, exiting", input_file)
extras.report_error("bed_to_bg","bed to bg conversion of {} failed".format(input_file))
raise SystemExit
log.info("Deleting old file %s", input_file)
os.unlink(input_file)
@active_if(options.bw)
@transform(bed_to_bg, suffix(".bg"), ".bw", genome, options.output)
def bg_to_bw(input_file, output_file, genome, output):
log.info("Creating bigwig file from bg: %s", input_file)
command = "bedGraphToBigWig {} {} {}".format( input_file, size_file, output_filet)
if subprocess.call(command, shell=True):
log.warn("bg to bw conversion of %s failed, exiting", input_file)
extras.report_error("bg_to_bw","bg to bw conversion of {} failed".format(input_file))
raise SystemExit
log.info("Deleting old file %s", input_file)
os.unlink(input_file)
# run the pipeline
cmdline.run(options)
|
python
|
__title__ = "playground"
__author__ = "murlux"
__copyright__ = "Copyright 2019, " + __author__
__credits__ = (__author__, )
__license__ = "MIT"
__email__ = "[email protected]"
from enum import Enum
"""
This file is used to define state constants for the systems operation.
"""
class State(Enum):
"""
Bot application states
"""
EXIT = -1
STARTING = 0
READY = 1
STOPPED = 2
RUNNING = 3
RELOAD_CONF = 4
class RunMode(Enum):
"""
Bot running mode (backtest, dry-run, ...)
can be "live", "dry-run", "backtest"
"""
LIVE = "live"
PAPER = "paper"
FORDWARDTEST = "forwardtest"
BACKTEST = "backtest"
PLOT = "plot"
OTHER = "other"
class TradingMode(Enum):
"""
Specification of different trading modes (spot, margin)
"""
SPOT = 1
MARGIN = 2
FUTURES = 3
OPTIONS = 4
class QuoteCurrency(Enum):
"""
Specification of different wallet types
"""
USD = "USD"
EUR = "EUR"
class Timeframe(Enum):
"""
Specification of different timeframes
"""
MINUTE = "1 m"
THREE_MINUTES = "3 m"
FIVE_MINUTES = "5 m"
FIFTEEN_MINUTES = "15 m"
THIRTY_MINUTES = "30 m"
HOURLY = "1 h"
TWO_HOURS = "2 h"
FOUR_HOURS = "4 h"
SIX_HOURS = "6 h"
TWELVE_HOURS = "12 h"
DAILY = "1 D"
THREE_DAYS = "3 D"
WEEKLY = "1 W"
BIWEEKLY = "2 W"
MONTHLY = "1 M"
TRIMESTRAL = "3 M"
SEMESTRAL = "6 M"
YEARLY = "1 Y"
TRADING_MODES = [
RunMode.LIVE,
RunMode.PAPER,
]
SIMULATION_MODES = [
RunMode.BACKTEST,
RunMode.FORDWARDTEST,
]
LOW_TIMEFRAMES = [
Timeframe.MINUTE,
Timeframe.THREE_MINUTES,
Timeframe.FIVE_MINUTES,
Timeframe.FIFTEEN_MINUTES,
Timeframe.THIRTY_MINUTES,
]
MEDIUM_TIMEFRAMES = [
Timeframe.HOURLY,
Timeframe.TWO_HOURS,
Timeframe.FOUR_HOURS,
Timeframe.SIX_HOURS,
Timeframe.TWELVE_HOURS,
]
MACRO_TIMEFRAMES = [
Timeframe.DAILY,
Timeframe.THREE_DAYS,
Timeframe.WEEKLY,
Timeframe.BIWEEKLY,
Timeframe.MONTHLY,
Timeframe.TRIMESTRAL,
Timeframe.SEMESTRAL,
Timeframe.YEARLY,
]
|
python
|
import os
import sys
import pytest
from pipsi import Repo, find_scripts
@pytest.fixture
def repo(home, bin):
return Repo(str(home), str(bin))
@pytest.mark.parametrize('package, glob', [
('grin', 'grin*'),
('pipsi', 'pipsi*'),
])
def test_simple_install(repo, home, bin, package, glob):
assert not home.listdir()
assert not bin.listdir()
repo.install(package)
assert home.join(package).check()
assert bin.listdir(glob)
assert repo.upgrade(package)
@pytest.mark.xfail(
sys.version_info[0] != 3,
reason='attic is python3 only', run=False)
@pytest.mark.xfail(
'TRAVIS' in os.environ,
reason='attic won\'t build on travis', run=False)
def test_simple_install_attic(repo, home, bin):
test_simple_install(repo, home, bin, 'attic', 'attic*')
def test_list_everything(repo, home, bin):
assert not home.listdir()
assert not bin.listdir()
assert repo.list_everything() == []
def test_find_scripts():
print('executable ' + sys.executable)
env = os.path.dirname(
os.path.dirname(sys.executable))
print('env %r' % env)
print('listdir %r' % os.listdir(env))
scripts = list(find_scripts(env, 'pipsi'))
print('scripts %r' % scripts)
assert scripts
|
python
|
from unittest import TestCase
from moneyed import Money
from pytest import raises
from billing.total import Total, TotalSerializer, TotalIncludingZeroSerializer
class TotalTest(TestCase):
def test_unique_currency(self):
with raises(ValueError):
Total([Money(0, 'USD'), Money(0, 'USD')])
def test_init_args(self):
t = Total(100, 'USD', 200, 'EUR', 300, 'GBP')
assert t['USD'].amount == 100
assert t['EUR'].amount == 200
assert t['GBP'].amount == 300
def test_add(self):
t1 = Total(100, 'USD', 100, 'EUR')
t2 = Total(80, 'USD', 150, 'GBP')
t = t1 + t2
assert t['USD'].amount == 180
assert t['EUR'].amount == 100
assert t['GBP'].amount == 150
def test_sub(self):
t1 = Total(100, 'USD', 100, 'EUR')
t2 = Total(80, 'USD', 150, 'GBP')
t = t1 - t2
assert t['USD'].amount == 20
assert t['EUR'].amount == 100
assert t['GBP'].amount == -150
def test_sub_rev(self):
t1 = Total(100, 'USD', 100, 'EUR')
t2 = Total(80, 'USD', 150, 'GBP')
t = t2 - t1
assert t['USD'].amount == -20
assert t['EUR'].amount == -100
assert t['GBP'].amount == 150
def test_neg(self):
t1 = Total(100, 'USD', 100, 'EUR')
t = -t1
assert t['USD'].amount == -100
assert t['EUR'].amount == -100
assert t['GBP'].amount == 0
def test_pos(self):
t1 = Total(100, 'USD', 100, 'EUR')
t = +t1
assert t['USD'].amount == 100
assert t['EUR'].amount == 100
assert t['GBP'].amount == 0
def test_abs(self):
total_neg = Total(-10, 'USD', 20, 'GBP')
t = abs(total_neg)
assert t['USD'].amount == 10
assert t['GBP'].amount == 20
assert t['EUR'].amount == 0
def test_bool(self):
assert not bool(Total())
assert not bool(Total(0, 'USD'))
assert bool(Total(100, 'USD'))
assert bool(Total(0, 'USD', 100, 'EUR'))
assert not bool(Total(0, 'USD', 0, 'EUR'))
def test_eq(self):
assert Total() == Total()
assert Total(0, 'USD') == Total()
t1 = Total(100, 'USD', 100, 'EUR')
t2 = Total(80, 'USD', 150, 'GBP')
assert t1 == +t1
assert not (t1 == t2)
assert Total(100, 'USD') == Total(100, 'USD')
assert Total(100, 'USD', 0, 'EUR') == Total(100, 'USD')
assert not (Total(100, 'USD', 10, 'EUR') == Total(100, 'USD'))
def test_eq_zero(self):
assert Total() == 0
assert Total(0, 'USD') == 0
assert Total(0, 'USD', 0, 'CHF') == 0
assert not (Total(100, 'USD', 100, 'EUR') == 0)
def test_neq(self):
assert not (Total() != Total())
assert not (Total(0, 'USD') != Total())
t1 = Total(100, 'USD', 100, 'EUR')
t2 = Total(80, 'USD', 150, 'GBP')
assert not (t1 != +t1)
assert t1 != t2
assert not (Total([Money(100, 'USD')]) != Total([Money(100, 'USD')]))
assert not (Total([Money(100, 'USD'), Money(0, 'EUR')]) != Total([Money(100, 'USD')]))
assert Total([Money(100, 'USD'), Money(10, 'EUR')]) != Total([Money(100, 'USD')])
def test_currencies(self):
t1 = Total(100, 'USD', 100, 'EUR')
assert t1.currencies() == ['USD', 'EUR']
t2 = Total(80, 'USD', 150, 'GBP')
assert t2.currencies() == ['USD', 'GBP']
def test_monies(self):
t1 = Total(100, 'USD', 0, 'EUR')
assert t1.monies() == [Money(100, 'USD'), Money(0, 'EUR')]
assert t1.nonzero_monies() == [Money(100, 'USD')]
class TotalSerializerTest(TestCase):
def test_serialize(self):
t = Total(100, 'USD', -90, 'EUR')
assert TotalSerializer(t).data == [
{'amount': '100.00', 'amount_currency': 'USD'},
{'amount': '-90.00', 'amount_currency': 'EUR'}
]
def test_zero_value(self):
t = Total(0, 'EUR')
assert TotalSerializer(t).data == []
assert TotalIncludingZeroSerializer(t).data == [
{'amount': '0.00', 'amount_currency': 'EUR'}
]
def test_zero_and_nonzero_values(self):
t = Total(100, 'USD', 0, 'EUR')
assert TotalSerializer(t).data == [
{'amount': '100.00', 'amount_currency': 'USD'}
]
assert TotalIncludingZeroSerializer(t).data == [
{'amount': '100.00', 'amount_currency': 'USD'},
{'amount': '0.00', 'amount_currency': 'EUR'}
]
|
python
|
import json, os, parse, re, requests, util
import urllib.parse
def parseinfo(text):
items = parse.parse(text)
infos = [i for i in items if i[0] == 'template' and i[1].startswith('Infobox')]
if len(infos) == 0:
return None, None
_, name, data = infos[0]
name = re.sub('^Infobox ', '', name)
return name, data
def parsesubcats(text):
j = json.loads(text)
if 'error' in j:
raise Exception(j['error']['info'])
cm = j['query']['categorymembers']#
return [(p['title'], p['pageid']) for p in cm]
def geturl(url):
headers = { 'User-Agent': 'SuburbBot/0.1 (+https://github.com/lukesampson/suburbs)' }
return requests.get(url, headers=headers).text
def apiurl(vars):
return 'http://en.wikipedia.org/w/api.php?' + urllib.parse.urlencode(vars)
def subcats(name):
vars = { 'cmtitle': 'Category:' + name.replace(' ', '_'), 'action': 'query', 'list': 'categorymembers', 'cmlimit': 500, 'cmtype': 'subcat', 'format': 'json'}
url = apiurl(vars)
try:
return parsesubcats(geturl(url))
except Exception as err:
raise Exception("error loading {}".format(url))
def catpages(pageid):
vars = { 'cmpageid': pageid, 'action': 'query', 'list': 'categorymembers', 'cmlimit': 500, 'cmtype': 'page', 'format': 'json'}
url = apiurl(vars)
return parsesubcats(geturl(url))
def pagetext_from_json(jsontext, pageid):
j = json.loads(jsontext)
return j['query']['pages'][str(pageid)]['revisions'][0]['*']
def cached_pagetext(pageid):
return util.readtext('cache', str(pageid) + '.txt')
def cache_pagetext(pageid, text):
util.writetext(text, 'cache', str(pageid) + '.txt')
def pagetext(pageid):
cached = cached_pagetext(pageid)
if cached is not None:
return cached
vars = { 'pageids': pageid, 'action': 'query', 'prop':'revisions', 'rvprop': 'content', 'format': 'json'}
url = apiurl(vars)
text = pagetext_from_json(geturl(url), pageid)
cache_pagetext(pageid, text)
return text
# strips tags, and anything inside the tags too
def striptags(text):
if not text: return text
return re.sub(r'(?s)<(\w+).*?((?:</\1>)|$)', '', text)
def htmltext(html):
if not html: return html
text = re.sub(r'<[^>]*>', '', html) # tags
text = re.sub(r'(?s)<!--.*?-->', '', text) # comments
return text.strip()
def linksub(match):
if match.group(2):
piped = match.group(2)[1:]
if piped: return piped
return match.group(1)
def striplinks(text):
if not text: return text
return re.sub(r'\[\[(.*?)(\|.*?)?\]\]', linksub, text)
def parsewikitext(text, pagetitle):
url = apiurl({ 'action': 'parse', 'text': text, 'title': pagetitle, 'prop': 'text', 'format': 'json'})
res = geturl(url)
j = json.loads(res)
return j['parse']['text']['*']
|
python
|
import awkward
import numpy
import numba
import hgg.selections.selection_utils as utils
import hgg.selections.object_selections as object_selections
import hgg.selections.lepton_selections as lepton_selections
import hgg.selections.tau_selections as tau_selections
import hgg.selections.photon_selections as photon_selections
import hgg.selections.jet_selections as jet_selections
def ggTauTau_inclusive_preselection(events, photons, electrons, muons, taus, jets, options, debug):
"""
Performs inclusive ggTauTau preselection, requiring >=1 (leptons + tau_h).
Assumes diphoton preselection has already been applied.
Also calculates relevant event-level variables.
"""
cut_diagnostics = utils.CutDiagnostics(events = events, debug = debug, cut_set = "[analysis_selections.py : ggTauTau_inclusive_preselection]")
# Get number of electrons, muons, taus
selected_electrons = electrons[lepton_selections.select_electrons(events, photons, electrons, options, debug)]
selected_muons = muons[lepton_selections.select_muons(events, photons, muons, options, debug)]
selected_taus = taus[tau_selections.select_taus(events, photons, selected_muons, selected_electrons, taus, options, debug)]
n_electrons = awkward.num(selected_electrons)
n_muons = awkward.num(selected_muons)
n_taus = awkward.num(selected_taus)
# Require >= 1 lep/tau
n_leptons_and_taus = n_electrons + n_muons + n_taus
lep_tau_cut = n_leptons_and_taus >= options["n_leptons_and_taus"]
# Require OS leptons/taus for events with 2 leptons/taus
sum_charge = awkward.sum(selected_electrons.charge, axis=1) + awkward.sum(selected_muons.charge, axis=1) + awkward.sum(selected_taus.charge, axis=1)
charge_cut = sum_charge == 0
two_leptons = n_leptons_and_taus == 2
not_two_leptons = n_leptons_and_taus != 2
os_cut = (two_leptons & charge_cut) | not_two_leptons # only require 2 OS leptons if there are ==2 leptons in the event
# Select jets (don't cut on jet quantities for selection, but they will be useful for BDT training)
selected_jets = jets[jet_selections.select_jets(events, photons, selected_electrons, selected_muons, selected_taus, jets, options, debug)]
all_cuts = lep_tau_cut & os_cut
cut_diagnostics.add_cuts([lep_tau_cut, os_cut, all_cuts], ["N_leptons + N_taus >= 1", "OS dileptons", "all"])
# Keep only selected events
selected_events = events[all_cuts]
selected_photons = photons[all_cuts]
selected_electrons = selected_electrons[all_cuts]
selected_muons = selected_muons[all_cuts]
selected_taus = selected_taus[all_cuts]
selected_jets = selected_jets[all_cuts]
# Calculate event-level variables
selected_events = lepton_selections.set_electrons(selected_events, selected_electrons, debug)
selected_events = lepton_selections.set_muons(selected_events, selected_muons, debug)
selected_events = tau_selections.set_taus(selected_events, selected_taus, debug)
selected_events = jet_selections.set_jets(selected_events, selected_jets, options, debug)
# TODO: add calculation HH->ggTauTau specific variables (e.g. H->TauTau kinematics) here
return selected_events
def tth_leptonic_preselection(events, photons, electrons, muons, jets, options, debug):
"""
Performs tth leptonic preselection, requiring >= 1 lepton and >= 1 jet
Assumes diphoton preselection has already been applied.
Also calculates relevant event-level variables.
"""
cut_diagnostics = utils.CutDiagnostics(events = events, debug = debug, cut_set = "[analysis_selections.py : tth_leptonic_preselection]")
# Get number of electrons, muons
selected_electrons = electrons[lepton_selections.select_electrons(events, photons, electrons, options, debug)]
selected_muons = muons[lepton_selections.select_muons(events, photons, muons, options, debug)]
n_electrons = awkward.num(selected_electrons)
n_muons = awkward.num(selected_muons)
n_leptons = n_electrons + n_muons
# Get number of jets
selected_jets = jets[jet_selections.select_jets(events, photons, selected_electrons, selected_muons, None, jets, options, debug)]
n_jets = awkward.num(selected_jets)
lep_cut = n_leptons >= 1
jet_cut = n_jets >= 1
all_cuts = lep_cut & jet_cut
cut_diagnostics.add_cuts([lep_cut, jet_cut, all_cuts], ["N_leptons >= 1", "N_jets >= 1", "all"])
# Keep only selected events
selected_events = events[all_cuts]
selected_photons = photons[all_cuts]
selected_electrons = selected_electrons[all_cuts]
selected_muons = selected_muons[all_cuts]
selected_jets = selected_jets[all_cuts]
# Calculate event-level variables
selected_events = lepton_selections.set_electrons(selected_events, selected_electrons, debug)
selected_events = lepton_selections.set_muons(selected_events, selected_muons, debug)
selected_events = jet_selections.set_jets(selected_events, selected_jets, options, debug)
return selected_events
def tth_hadronic_preselection(events, photons, electrons, muons, jets, options, debug):
cut_diagnostics = utils.CutDiagnostics(events = events, debug = debug, cut_set = "[analysis_selections.py : tth_hadronic_preselection]")
# Get number of electrons, muons
selected_electrons = electrons[lepton_selections.select_electrons(events, photons, electrons, options, debug)]
selected_muons = muons[lepton_selections.select_muons(events, photons, muons, options, debug)]
n_electrons = awkward.num(selected_electrons)
n_muons = awkward.num(selected_muons)
n_leptons = n_electrons + n_muons
# Get number of jets
selected_jets = jets[jet_selections.select_jets(events, photons, selected_electrons, selected_muons, None, jets, options, debug)]
n_jets = awkward.num(selected_jets)
# Get number of b-jets
selected_bjets = selected_jets[jet_selections.select_bjets(selected_jets, options, debug)]
n_bjets = awkward.num(selected_bjets)
lep_cut = n_leptons == 0
jet_cut = n_jets >= 3
bjet_cut = n_bjets >= 1
all_cuts = lep_cut & jet_cut & bjet_cut
cut_diagnostics.add_cuts([lep_cut, jet_cut, bjet_cut, all_cuts], ["N_leptons == 0", "N_jets >= 3", "N_bjets >= 1", "all"])
# Keep only selected events
selected_events = events[all_cuts]
selected_photons = photons[all_cuts]
selected_electrons = selected_electrons[all_cuts]
selected_muons = selected_muons[all_cuts]
selected_jets = selected_jets[all_cuts]
# Calculate event-level variables
selected_events = lepton_selections.set_electrons(selected_events, selected_electrons, debug)
selected_events = lepton_selections.set_muons(selected_events, selected_muons, debug)
selected_events = jet_selections.set_jets(selected_events, selected_jets, options, debug)
return selected_events
def tth_inclusive_preselection(events, photons, electrons, muons, jets, options, debug):
cut_diagnostics = utils.CutDiagnostics(events = events, debug = debug, cut_set = "[analysis_selections.py : tth_leptonic_preselection]")
# Get number of electrons, muons
selected_electrons = electrons[lepton_selections.select_electrons(events, photons, electrons, options, debug)]
selected_muons = muons[lepton_selections.select_muons(events, photons, muons, options, debug)]
n_electrons = awkward.num(selected_electrons)
n_muons = awkward.num(selected_muons)
n_leptons = n_electrons + n_muons
# Get number of jets
selected_jets = jets[jet_selections.select_jets(events, photons, selected_electrons, selected_muons, None, jets, options, debug)]
n_jets = awkward.num(selected_jets)
# Get number of b-jets
selected_bjets = selected_jets[jet_selections.select_bjets(selected_jets, options, debug)]
n_bjets = awkward.num(selected_bjets)
lep_cut_leptonic = n_leptons >= 1
jet_cut_leptonic = n_jets >= 1
all_cuts_leptonic = lep_cut_leptonic & jet_cut_leptonic
lep_cut_hadronic = n_leptons == 0
jet_cut_hadronic = n_jets >= 3
bjet_cut_hadronic = n_bjets >= 1
all_cuts_hadronic = lep_cut_hadronic & jet_cut_hadronic & bjet_cut_hadronic
all_cuts = all_cuts_leptonic | all_cuts_hadronic
cut_diagnostics.add_cuts([lep_cut_leptonic, jet_cut_leptonic, all_cuts_leptonic, lep_cut_hadronic, jet_cut_hadronic, bjet_cut_hadronic, all_cuts_hadronic, all_cuts], ["N_leptons >= 1", "N_jets >= 1", "leptonic_presel", "N_leptons == 0", "N_jets >= 3", "N_bjets >= 1", "hadronic_presel", "all"])
# Keep only selected events
selected_events = events[all_cuts]
selected_photons = photons[all_cuts]
selected_electrons = selected_electrons[all_cuts]
selected_muons = selected_muons[all_cuts]
selected_jets = selected_jets[all_cuts]
# Calculate event-level variables
selected_events = lepton_selections.set_electrons(selected_events, selected_electrons, debug)
selected_events = lepton_selections.set_muons(selected_events, selected_muons, debug)
selected_events = jet_selections.set_jets(selected_events, selected_jets, options, debug)
return selected_events
def ggbb_preselection(events, photons, electrons, muons, jets, fatjets, options, debug):
cut_diagnostics = utils.CutDiagnostics(events = events, debug = debug, cut_set = "[analysis_selections.py : ggbb_preselection]")
# Get number of electrons, muons
selected_electrons = electrons[lepton_selections.select_electrons(events, photons, electrons, options, debug)]
selected_muons = muons[lepton_selections.select_muons(events, photons, muons, options, debug)]
n_electrons = awkward.num(selected_electrons)
n_muons = awkward.num(selected_muons)
n_leptons = n_electrons + n_muons
# Get number of jets
selected_jets = jets[jet_selections.select_jets(events, photons, selected_electrons, selected_muons, None, jets, options, debug)]
n_jets = awkward.num(selected_jets)
# Get number of b-jets
selected_bjets = selected_jets[jet_selections.select_bjets(selected_jets, options, debug)]
n_bjets = awkward.num(selected_bjets)
# Get fat jets
selected_fatjets = fatjets[jet_selections.select_fatjets(events, photons, fatjets, options, debug)]
n_fatjets = awkward.num(selected_fatjets)
lep_cut = n_leptons == 0
if options["boosted"]:
jet_cut = n_bjets < 2
fatjet_cut = n_fatjets == 1
all_cuts = lep_cut & jet_cut & fatjet_cut
cut_diagnostics.add_cuts([lep_cut, jet_cut, fatjet_cut, all_cuts], ["N_leptons == 0", "N_b-jets < 2", "N_fatjets == 1", "all"])
else:
jet_cut = n_bjets == 2
all_cuts = lep_cut & jet_cut
cut_diagnostics.add_cuts([lep_cut, jet_cut, all_cuts], ["N_leptons == 0", "N_b-jets == 2", "all"])
# Keep only selected events
selected_events = events[all_cuts]
selected_photons = photons[all_cuts]
selected_jets = selected_jets[all_cuts]
selected_fatjets = selected_fatjets[all_cuts]
# Calculate event-level variables
selected_events = jet_selections.set_jets(selected_events, selected_jets, options, debug)
selected_events = jet_selections.set_fatjets(selected_events, selected_fatjets, options, debug)
return selected_events
|
python
|
'''
Created on 14 Sep 2011
@author: samgeen
'''
import EventHandler as Events
from OpenGL.GLUT import *
# TODO: Can we have circular imports? How does that work? Works fine in C++!
# We really need this in case we have more than one camera.
# Plus firing everything through the event handler is tiring
#from Camera import Camera
# TODO: CONVERT THIS INTO A CAMERA HANDLER OBJECT
# TODO: FOLD MOUSE INPUTS INTO KEYHANDLER AND RENAME THAT
# TODO: ALLOW MOUSE INPUT "ZONES" ON THE SCREEN THAT FIRE DIFFERENT EVENTS DEPENDING
# ON THE POSITION OF THE CURSOR
class MouseHandler(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
# Mouse state information (hacky?)
self.__x = 0
self.__y = 0
self.__leftPressed = False
self.__rightPressed = False
self.__rotateSpeed = 100.0
self.__zoomSpeed = 1.01
def RegisterEvents(self):
'''
Register this object to receive mouse movement commands
'''
Events.RegisterEvent("MOUSEMOVE", self.ReceiveMouseMove)
Events.RegisterEvent("MOUSEPRESS", self.ReceiveMousePress)
def ReceiveMouseMove(self, data):
'''
Receive mouse movement information
'''
# Is the left button pressed? If so, rotate the camera
if self.__leftPressed:
ax = (self.__x - data.scrx) / self.__rotateSpeed
ay = (self.__y - data.scry) / self.__rotateSpeed
angle = [ax,ay]
Events.FireEvent("CAMERA_ROTATE", angle)
self.__x = data.scrx
self.__y = data.scry
def ReceiveMousePress(self, data):
'''
Receive mouse button press information
'''
if data.button == GLUT_LEFT_BUTTON:
self.__leftPressed = data.state
if self.__leftPressed:
self.__x = data.scrx
self.__y = data.scry
def RotateSpeed(self, newSpeed):
self.__rotateSpeed = newSpeed
|
python
|
import os
import socket
import time
from colorama import Fore, Back, Style
def argument(arg):
switcher = {
1: "-A -sC -sV -vvv -oN Output/nmap", #Os scanning,Version detection,scripts,traceroute
2: "-O -V -oN Output/nmap", #OS Detection ,Version scanning
3: "-F --open -Pn -oN Output/nmap", #Fast Mode scan for open ports
4: "nm.command_line()", #Custom Payload or argument
5: "-p1-65535 --open -Pn -oN Output/nmap", #Scan for Open Tcp Ports
6: "-p1-65535 --open -sU -Pn -oN Output/nmap" #Scan for Open Udp Ports
}
return switcher.get(arg, "Invalid argument")
#Website information
#Making a folder if not Exists
os.system("clear")
os.system("rmdir Output && mkdir Output")
website=input("Enter The Website to scan: ")
ip_of_website = socket.gethostbyname(website)
print("Ip of "+website+" is:"+ip_of_website)
time.sleep(0.7)
#Choice to start nmap or not
print("\n Want to start nmap scanning \n1.Yes \n2.No ")
nmap_on_off=int(input("Choice:"))
if(nmap_on_off == 1):
#Starting nmap
print("\nFiring up Nmap\n")
print("1.Os scanning,Version detection,scripts,traceroute \n2.OS Detection ,Version scanning \n3.Fast Mode scan for open ports \n4.Custom Payload or argument \n5.Scan for Open Tcp Ports \n6.Scan for Open Udp Ports ")
choice=int(input("\n Enter The Choice:"))
if(choice == 4):
nmap_command=input("Enter Nmap Command u want \n")
os.system(nmap_command+" -oN Output/nmap "+ip_of_website)
else:
arg=argument(choice)
print("Command="" nmap "+arg+" <ip of website> \n")
os.system("nmap "+arg+" "+ip_of_website)
#print(arg)
else:
print("Skipping Nmap Scan")
#Finding Certificate of a website
#Finding subdomains and finding alive host
print("\n**************Finding all sudomain*****************")
os.system("assetfinder --subs-only "+website+" >> Output/all_host.txt")
print("\nDone and save output to all_host.txt")
print("\n************Finding alive sudomain*****************")
os.system("cat all_host.txt | httprobe >> Output/alive_host.txt")
print("\nDone and save output to alive_host.txt")
#Finding hidden Directory
print("\nWant to start checking hidden dir,files \n1.Yes\n2.No")
dir_start=int(input("Enter choice:"))
if (dir_start == 1):
print("Finding hidden Directory")
os.system("ffuf -w small.txt -u http://"+website+"/FUZZ -mc all -fs 42 -c -fc 404 -o Output/hidden_directories")
os.system("cat hidden_directories|jq >> Output/Hidden_Directory")
os.system("rm Output/hidden_directories")
else:
print("Skipping Directory search")
#Checking for wordpress site
print("\n***********Scanning website is Wordpress site or not***********")
os.system("wpscan --stealthy --url "+website)
os.system("wpscan -f json --url+ "+website+" >> Output/wpscan")
#Firing up the Sqlmap
print("\nStarting Sqlmap \n")
print(Fore.RED + 'WARNING:Only use sqlmap for attacking with mutual consent with the site owner or company')
print(Style.RESET_ALL)
sql_start_stop=int(input("Want to continue with SqlMap:"))
print("\n 1.Start \n2.Stop")
if(sql_start_stop == '1' ):
sql_level=int(input("Level of tests to perform (1-5) \nEnter the level of Sqlmap:"))
sql_site=input("Enter the endpoint where u want to test sql injection:")
os.system("sqlmap -u "+sql_site+" --dbs --level="+str(sql_level))
elif(sql_start_stop == '2'):
print("Stopping Nmap And Continue script")
else:
print("Skipping Sqlmap")
exit()
|
python
|
class Solution:
def average(self, salary) -> float:
salary.sort()
average = 0
for i in range(1, len(salary)-1):
average += salary[i]
return float(average/(len(salary)-2))
|
python
|
# Simple and powerful tagging for Python objects.
#
# Author: Peter Odding <[email protected]>
# Last Change: March 11, 2018
# URL: https://github.com/xolox/python-gentag
"""Simple and powerful tagging for Python objects."""
# Standard library modules.
import collections
import re
# External dependencies.
from humanfriendly import format, pluralize
from natsort import natsort
from property_manager import (
PropertyManager,
clear_property,
lazy_property,
mutable_property,
required_property,
set_property,
)
from six import string_types
from verboselogs import VerboseLogger
# Modules included in our package.
from gentag.exceptions import EmptyTagError, TagExpressionError
# Public identifiers that require documentation.
__all__ = (
'DEFAULT_TAG_NAME',
'ObjectFactory',
'Scope',
'Tag',
'TagFactory',
'__version__',
'generate_id',
'logger',
)
DEFAULT_TAG_NAME = 'all'
"""The identifier of the default tag that matches all tagged objects (a string)."""
# Semi-standard module versioning.
__version__ = '2.0'
# Initialize a logger for this module.
logger = VerboseLogger(__name__)
class Scope(PropertyManager):
"""
To use :mod:`gentag` everything starts with a :class:`Scope` object.
A :class:`Scope` object groups together related :class:`Tag` objects
and provides methods to define new tags and evaluate tag expressions.
"""
@property
def objects(self):
"""A mapping of tag names to :class:`set` objects (an :class:`ObjectFactory` instance)."""
return ObjectFactory(tags=self.tags)
@lazy_property
def tags(self):
"""A mapping of tag names to :class:`Tag` objects (an :class:`TagFactory` instance)."""
return TagFactory(scope=self)
def add_object(self, value, *tags):
"""
Add an object to the scope.
:param value: The object to add (any hashable value).
:param tags: The names of tags to associate the object with.
"""
logger.debug("Tagging object %r with %s: %s",
value, pluralize(len(tags), "tag"),
", ".join(map(str, tags)))
for name in tags:
self.tags[name].objects.add(value)
def define(self, name, value):
"""
Define the value of a tag.
:param name: The name of the tag (a string).
:param value: A string containing an expression or an iterable
of values associated to the given tag.
:returns: The :class:`Tag` object.
:raises: :exc:`~exceptions.ValueError` for unsupported `value` types.
"""
if isinstance(value, string_types):
logger.debug("Setting expression of tag '%s' to: %s", name, value)
tag = self.tags[name]
tag.expression = value
return tag
elif isinstance(value, collections.Iterable):
logger.debug("Setting objects of tag '%s' to: %s", name, value)
tag = self.tags[name]
tag.objects = value
return tag
else:
msg = "Unsupported value for tag '%s'! (%r)"
raise ValueError(format(msg, name, value))
def evaluate(self, expression):
"""
Get the objects matching the given expression.
:param expression: The tag expression to evaluate (a string).
:returns: A sorted :class:`list` with matching objects.
:raises: :exc:`.TagExpressionError` when the given expression
cannot be evaluated due to a syntax error.
This method is a wrapper for :func:`evaluate_raw()` that calls
:func:`sorted()` on the matching objects before returning them.
"""
return self.sorted(self.evaluate_raw(expression))
def evaluate_raw(self, expression):
"""
Get the objects matching the given expression.
:param expression: The tag expression to evaluate (a string).
:returns: A :class:`set` with matching objects.
:raises: :exc:`.TagExpressionError` when the given expression
cannot be evaluated due to a syntax error.
This method uses :func:`eval()` to evaluate the expression given by the
caller, however it overrides ``__builtins__`` to avoid leaking any
built-ins into the :func:`eval()` call.
"""
try:
logger.debug("Evaluating expression '%s' ..", expression)
objects = eval(expression, dict(__builtins__={}), self.objects)
logger.debug("The expression matched %s.", pluralize(len(objects), "object"))
return objects
except SyntaxError as e:
msg = "Failed to evaluate tag expression due to syntax error! (%s)"
raise TagExpressionError(format(msg, e))
def get_all_objects(self):
"""
Get all objects defined in the scope.
:returns: A :class:`set` of user defined objects.
This method iterates over the defined tags and collects all tagged
objects. Because the evaluation of tags with an :attr:`~Tag.expression`
won't change the result of :func:`get_all_objects()` such tags are
skipped for performance reasons.
"""
objects = set()
logger.debug("Collecting all tagged objects ..")
for tag in self.tags:
if tag.identifier != DEFAULT_TAG_NAME and not tag.expression:
objects.update(tag.objects)
logger.debug("Collected %s.", pluralize(len(objects), "object"))
return objects
def parse(self, value):
"""
Parse a string expression into a :class:`Tag` object.
:param value: The tag expression to parse (a string).
:returns: A :class:`Tag` object.
:raises: :exc:`~exceptions.ValueError` for unsupported `value` types.
During normal use you won't need the :func:`parse()` method, in fact
it's not currently being used anywhere in :mod:`gentag`. This method
was originally created with the idea of having :func:`define()` parse
string expressions up front to validate their syntax, however this
approach has since been abandoned. The :func:`parse()` method now
remains because it may be useful to callers for unforeseen use cases.
"""
if isinstance(value, string_types):
# We override __builtins__ to avoid leaking any built-ins into eval().
return eval(value, dict(__builtins__={}), self.tags)
else:
msg = "Unsupported value type! (%r)"
raise ValueError(format(msg, value))
def sorted(self, objects):
"""
Sort the given objects in a human friendly way.
:param objects: The objects to sort (an iterable).
:returns: The sorted objects (a list).
If all of the objects are strings they are sorted using natural
order sorting, otherwise the :func:`sorted()` function is used.
"""
if all(isinstance(o, string_types) for o in objects):
return natsort(objects)
else:
return sorted(objects)
class Tag(PropertyManager):
"""
A :class:`Tag` represents a set of :attr:`objects` with a common :attr:`name`.
There are three kinds of tags:
**Simple tags:**
When you set :attr:`objects` the tag becomes a 'simple tag' that
associates the name of the tag to the given objects.
**Composite tags:**
When you set :attr:`expression` the tag becomes a 'composite tag' that
associates the name of the tag to an expression that selects a subset of
tagged objects.
**The special default tag:**
When :attr:`identifier` is set to :data:`DEFAULT_TAG_NAME` the value of
:attr:`objects` is a :class:`set` that contains all tagged objects.
"""
@mutable_property
def expression(self):
"""A Python expression to select matching objects (a string or :data:`None`)."""
@expression.setter
def expression(self, value):
"""Set `expression` and clear `objects`."""
set_property(self, 'expression', value)
clear_property(self, 'id_or_expr')
clear_property(self, 'objects')
@lazy_property
def identifier(self):
"""An identifier based on :attr:`name` (a string or :data:`None`)."""
if self.name:
return generate_id(self.name, normalized=False)
@lazy_property
def id_or_expr(self):
"""
The :attr:`identifier` (if set) or :attr:`expression` (a string).
The value of :attr:`id_or_expr` is used by :func:`compose()` to
generate :attr:`expression` values for composite :class:`Tag`
objects.
"""
if self.identifier:
return self.identifier
else:
expr = self.expression
if not (expr.isalnum() or (expr.startswith('(') and expr.endswith(')'))):
# Add parentheses to ensure the right evaluation order.
expr = '(%s)' % expr
return expr
@mutable_property
def name(self):
"""
A user defined name for the tag (a string or :data:`None`).
Tags created using :func:`~Scope.define()` always have :attr:`name` set
but tags composed using Python expression syntax are created without a
:attr:`name`.
"""
@name.setter
def name(self, value):
"""Set `name` and `identifier`."""
set_property(self, 'name', value)
clear_property(self, 'id_or_expr')
clear_property(self, 'identifier')
@mutable_property
def objects(self):
"""
The values associated to the tag (a :class:`set`).
If :attr:`objects` isn't set it defaults to a computed value:
- If :attr:`identifier` is :data:`DEFAULT_TAG_NAME` then
:func:`~Scope.get_all_objects()` is used
to get the associated values.
- If :attr:`expression` is set it will be evaluated and the matching
objects will be returned.
- Otherwise a new, empty :class:`set` is created, bound to the
:class:`Tag` and returned.
"""
if self.identifier == DEFAULT_TAG_NAME:
return self.scope.get_all_objects()
elif self.expression:
return self.scope.evaluate_raw(self.expression)
else:
value = set()
set_property(self, 'objects', value)
return value
@objects.setter
def objects(self, value):
"""Set `objects` and clear `expression`."""
set_property(self, 'objects', set(value))
clear_property(self, 'id_or_expr')
clear_property(self, 'expression')
@required_property(repr=False)
def scope(self):
"""The :class:`Scope` in which the tag has been defined."""
def compose(self, operator, other):
"""
Create a composite tag.
:param operator: The operator used to compose the tags (a string).
:param other: The other :class:`Tag` object.
:returns: A new :class:`Tag` object or :data:`NotImplemented`
(if `other` isn't a :class:`Tag` object).
The :func:`compose()` method is a helper for :func:`__and__()`,
:func:`__or__()`, :func:`__sub__()` and :func:`__xor__()` that
generates an :attr:`expression` based on the :attr:`id_or_expr`
values of the two :class:`Tag` objects.
"""
if isinstance(other, Tag):
expression = '%s %s %s' % (self.id_or_expr, operator, other.id_or_expr)
return Tag(expression=expression, scope=self.scope)
else:
return NotImplemented
def __and__(self, other):
"""Use :func:`compose()` to create a :class:`Tag` that gives the intersection of two :class:`Tag` objects."""
return self.compose('&', other)
def __iter__(self):
"""Iterate over the matching objects."""
return iter(self.scope.sorted(self.objects))
def __or__(self, other):
"""Use :func:`compose()` to create a :class:`Tag` that gives the union of two tags."""
return self.compose('|', other)
def __sub__(self, other):
"""Use :func:`compose()` to create a :class:`Tag` that gives the difference of two tags."""
return self.compose('-', other)
def __xor__(self, other):
"""Use :func:`compose()` to create a :class:`Tag` that gives the symmetric difference of two tags."""
return self.compose('^', other)
class TagFactory(PropertyManager):
"""
A mapping of tag names to :class:`Tag` objects.
The names of tags are normalized using :func:`generate_id()`.
"""
@lazy_property
def map(self):
"""A dictionary with tags created by this :class:`TagFactory`."""
return {}
@required_property(repr=False)
def scope(self):
"""The :class:`Scope` that's using this :class:`TagFactory`."""
def __getitem__(self, name):
"""
Get or create a tag.
:param name: The name of the tag (a string).
:returns: A :class:`Tag` object.
"""
key = generate_id(name, normalized=True)
value = self.map.get(key)
if value is None:
logger.debug("Creating tag on first use: %s", name)
value = Tag(name=name, scope=self.scope)
self.map[key] = value
return value
def __iter__(self):
"""Iterate over the defined :class:`Tag` objects."""
return iter(self.map.values())
class ObjectFactory(PropertyManager):
"""
A mapping of tag names to :class:`set` objects.
This class is used by :func:`~Scope.evaluate()` during expression
parsing to resolve tag names to the associated :attr:`~Tag.objects`.
"""
@required_property
def tags(self):
"""The :class:`TagFactory` from which objects are retrieved."""
def __getitem__(self, name):
"""
Get the objects associated to the given tag.
:param name: The name of the tag (a string).
:returns: A :class:`set` of objects associated to the tag.
:raises: :exc:`.EmptyTagError` when no associated objects are available.
"""
objects = self.tags[name].objects
if not objects:
msg = "The tag '%s' doesn't match anything!"
raise EmptyTagError(format(msg, name))
return objects
def generate_id(value, normalized):
"""
Generate a Python identifier from a user provided string.
:param value: The user provided string.
:param normalized: :data:`True` to normalize the identifier to its
canonical form without underscores, :data:`False`
to preserve some readability.
:returns: The generated identifier (a string).
:raises: :exc:`~exceptions.ValueError` when nothing remains of `value`
after normalization.
If you just want a Python identifier from a user
defined string you can use `normalized=False`:
>>> generate_id('Any user-defined string', normalized=False)
'any_user_defined_string'
However if you want to use the identifier for comparison or as
a key in a dictionary then its better to use `normalized=True`:
>>> generate_id('Any user-defined string', normalized=True)
'anyuserdefinedstring'
The following example shows that values that would otherwise start with a
digit are prefixed with an underscore, because Python identifiers cannot
start with a digit:
>>> generate_id('42', normalized=True)
'_42'
"""
value = str(value).lower()
# Replace characters not allowed in identifiers with an underscore.
value = re.sub('[^a-z0-9]+', '' if normalized else '_', value)
# Strip leading and/or trailing underscores.
value = value.strip('_')
# Make sure something remains from the user provided string.
if not value:
msg = "Nothing remains of the given string after normalization! (%r)"
raise ValueError(msg % value)
# Add a leading underscore when the first character is a digit.
if value[0].isdigit():
value = '_' + value
return value
|
python
|
class MediaAluno:
notas = []
def add_notas(self, nota):
self.notas.append(nota)
def calcula_notas(self):
soma = 0
for x in self.notas:
soma += x
media = (soma / len(self.notas))
return media
# def soma(self):
# return sum(self.notas)
if __name__ == "__main__":
aluno1 = MediaAluno()
x = 8
while x > 0:
try:
nota = int(input(f'Digite a {x}º: '))
if 0 <= nota <= 10:
aluno1.add_notas(nota)
x -= 1
else:
print('A nota deve estar entre 0 e 10!')
except Exception as e:
print(f'Ocorreu um erro: {e}')
print(aluno1.calcula_notas())
|
python
|
#!/bin/env python
###################################################################
# Genie - XR - XE - Cli - Yang
###################################################################
import time
import logging
# Needed for aetest script
from ats import aetest
from ats.utils.objects import R
from ats.datastructures.logic import Not
from genie.abstract import Lookup
from genie.libs import conf, ops
# Import Genie infra
from genie.conf import Genie
from genie.libs.conf import interface
from genie.libs.conf.ospf.ospf import Ospf
from genie.libs.conf.vrf.vrf import Vrf
from genie.libs.conf.address_family import AddressFamily,\
AddressFamilySubAttributes
log = logging.getLogger()
###################################################################
### COMMON SETUP SECTION ###
###################################################################
class common_setup(aetest.CommonSetup):
'''Connect, Configure, and Verify the configuration was
applied correctly
'''
@aetest.subsection
def genie_init(self, testscript, testbed, uut_alias, helper_alias,
steps, context):
""" Initialize the environment """
with steps.start('Initializing the environment for'
' Genie Configurable Objects'):
# Context to mention which Commnand type to use. (Cli/Yang)
# It has to be set before the Genie convertion, as the right
# Interface has to be instantiated.
# The default value is 'cli', so only do it in the 'yang' case
if context == 'yang':
# Set all device to yang
# Or, only specific device could be set to use 'yang', and the
# rest use 'cli'
for dev in testbed.devices.values():
dev.context ='yang'
# Initialize Genie Testbed
# It also sets the variable 'Genie.testbed', to hold the testbed
Genie.init(testbed=testbed)
# Test script parameters are added so
# these can be passed on to the subsections.
uut = Genie.testbed.find_devices(aliases=[uut_alias])[0]
helper = Genie.testbed.find_devices(aliases=[helper_alias])[0]
testscript.parameters['uut'] = uut
testscript.parameters['helper'] = helper
# Overwrite the pyATS testbed for Genie Testbed
testscript.parameters['testbed'] = Genie.testbed
# area_id from the datafile
testscript.parameters['area_id'] = self.area_id
# Mark testcase with looping information
aetest.loop.mark(ping, device=[uut, helper])
@aetest.subsection
def connect(self, testbed, testscript, steps, context, pool_num):
'''Connect to the devices, using either Cli and/or Yang'''
# Connect to all the devices
with steps.start('Connect to all devices'):
for dev in testbed.devices.values():
# If the context of this device is Yang, then connect via Yang
if dev.context == 'yang':
time.sleep(5)
dev.connect(alias = 'nc', via = 'netconf')
# As the alias name can be anything, We need to tell the
# infrastructure what is the alias of the yang connection
dev.mapping['yang'] = 'nc'
time.sleep(5)
# Cli supports a pool device mechanism. This will allow Ops
# to learn the features faster.
if pool_num > 1:
dev.start_pool(alias='vty', via='cli', size=pool_num)
else:
dev.connect(via='a', alias='cli')
# As the alias name can be anything, We need to tell the
# infrastructure what is the alias of the cli connection
# Use connection a for cli communication to the device
dev.mapping['cli'] = 'a'
# Abstraction
# Right now abstraction is done via OS and Context.
dev.lib = Lookup(dev.os, dev.context, packages={'conf':conf, 'ops':ops})
@aetest.subsection
def configure_basic_ospf(self, testbed, testscript, uut, helper, steps):
'''Configure Ospf'''
# To configure Ospf, we are doing the following:
#
# We want to configure it on two devices interconnected, so we are
# asking to find us a link which reach the 'uut' and the 'helper'
# device.
#
# Then we create a 'Ospf' Object with a name (to represent the Ospf
# name) and add this Ospf to the link. We configure the Ospf object
# with area_id
#
# Lastly, we want to configure an ip address on the interface of this
# link, and do a 'no shut'. This is done with Genie ipv4 and ipv6
# generator
#
# And we configure each interface and the Ospf.
with steps.start('Find a link between uut and helper device and '
'configure ospf process id {pro} on '
'it'.format(pro=self.ospf_1)):
# Using the find api
# Find a link that has an interface attached to device uut
# and helper device. These interace must be of type ethernet.
link = testbed.find_links(R(interfaces__device__name=uut.name),
R(interfaces__device__name=helper.name),
R(interfaces__type='ethernet'),
count=1)[0]
# Take this link and configure Ospf on it
with steps.start('Configure ospf process id {pro} on '
'the link'.format(pro=self.ospf_1)):
# Create Ospf instance
ospf = Ospf()
ospf.instance = self.ospf_1
ospf.feature_ospf = True
# Adding ospf feature to the link
link.add_feature(ospf)
# Adding area to the vrf attribute
ospf.device_attr[uut].vrf_attr[None].area_attr[self.area_id]
ospf.device_attr[helper].vrf_attr[None].area_attr[self.area_id]
# Initiating ipv4 and ipv6 addresses
ipv4s = []
ipv6s = []
with steps.start('Building the interface configuration'):
# Generate ipv4 and ipv6 addresses, depending on the length of the
# interfaces
ipv4rng = iter(testbed.ipv4_cache.reserve(count=len(link.interfaces)))
ipv6rng = iter(testbed.ipv6_cache.reserve(count=len(link.interfaces)))
for intf in link.interfaces:
# Configuring the interface with its attribute
intf.shutdown = False
intf.layer = interface.Layer.L3
# Assigning Ipv4 and ipv6 addresses to the intervace
intf.ipv4 = next(ipv4rng)
ipv4s.append(intf.ipv4.ip)
intf.ipv6 = next(ipv6rng)
ipv6s.append(intf.ipv6.ip)
# Adding interface to the area attribute
ospf.device_attr[intf.device].vrf_attr[None]\
.area_attr[self.area_id].interface_attr[intf].if_admin_control = True
# Build interface config and apply it to the device
intf.build_config()
with steps.start('Building the ospf configuration'):
# Configure Ospf on all 'uut' and 'helper'
ospf.build_config()
# Using parameters to pass around the link and the ospf object.
testscript.parameters['link_1'] = link
testscript.parameters['ospf_1'] = ospf
def interface_up(self, ospf, ospf_name, vrf_name, area, dev_intf,
intf1_loop=None):
'''Method for verifying if the interface is up
Return none if the states are the one is expected
Raise an exception if the states are not the one expected
'''
try:
interfaces = ospf.info['vrf'][vrf_name]['address_family']['ipv4']['instance'][ospf_name]['areas'][area]['interfaces']
except KeyError:
return
for intf in interfaces.values():
assert intf['enable'] == True
@aetest.subsection
def verify_basic_ospf(self, uut, helper, steps, link_1):
'''Verify if the basic configuration of Ospf was done correctly'''
# Verify for both device
for dev in [uut, helper]:
# Find the interface of this device, and the neighbor one
dev_intf = link_1.find_interfaces(device=dev)[0]
# TODO - Bug here
neighbor_intf = link_1.find_interfaces(device=Not(str(dev)))[0]
# Create an instance of Ospf Ops object for a particular device
ospf = dev.lib.ops.ospf.ospf.Ospf(device=dev)
# Ops objects have a builtin polling mechanism to learn a feature.
# It will try 20 times to learn the feature, with sleep of 10 in
# between and to verify if it was learnt correctly, will call the
# verify function. If this function does not raise an exception,
# then will assume it was learnt correctly.
try:
ospf.learn_poll(verify=self.interface_up, sleep=10, attempt=10,
ospf_name=self.ospf_1, vrf_name='default',
area=self.area_id, dev_intf=dev_intf)
except StopIteration as e:
self.failed(str(e))
log.info("Configuration was applied correctly")
@aetest.subsection
def configure_ospf(self, testbed, testscript, uut, helper, steps):
'''Configure a bigger Ospf configuration'''
# To configure Ospf, we are doing the following:
#
# We want to configure it on two devices interconnected, so we are
# asking to find us a link which reach the 'uut' and the 'helper'
# device. We are taking the second ones, as the first one was used
# for the previous subsection.
#
# Then we create a 'Ospf' Object with a name (to represent the Ospf
# name) and add this Ospf to the link, and add configuration to it.
#
# Then we create a 'Vrf' object, which is added to the vrf.
#
# Lastly, we want to configure an ip address on the interface of this
# link, and do a 'no shut'. This is done with Genie ipv4 and ipv6
# generator. It is also done for the loopback interfaces.
#
# And we configure each interface, the vrf and the Ospf.
with steps.start('Find a link between uut and helper device and '
'configure ospf with '
'pid {pro} on it'.format(pro=self.ospf_2)):
link = testbed.find_links(R(interfaces__device__name=uut.name),
R(interfaces__device__name=helper.name),
R(interfaces__type='ethernet'))
# We are taking the second link
link = link[1]
# passing this link to testscript parameters
# so we can be used in other subsections
testscript.parameters['link_2'] = link
with steps.start('Find a loopback link between uut and helper device '
'and configure ospf with pid 200 on it'):
loopback_link = testbed.find_links\
(R(interfaces__device__name=uut.name),
R(interfaces__device__name=helper.name),
R(interfaces__type = 'loopback'),count=1)[0]
# Take this link and configure Ospf on it
with steps.start('Configure ospf with pid {pro} on the '
'link'.format(pro=self.ospf_2)):
# Create a ospf object
ospf = Ospf(ospf_name=self.ospf_2)
ospf.instance = self.ospf_2
# Adding ospf to the link and loopback_link
link.add_feature(ospf)
loopback_link.add_feature(ospf)
# Add attributes to the ospf object
ospf.log_adjacency_changes = self.log_adjacency_changes
ospf.nsf = self.nsf
ospf.nsr = self.nsr
ospf.auto_cost_ref_bw = self.auto_cost_ref_bw
with steps.start("Configure vrf '{vrf}' under "
"ospf process id {pro} on "
"the link".format(pro=self.ospf_2,
vrf=self.vrf_name)):
# Create vrf Object
vrf = Vrf(name=self.vrf_name)
# Add the address_family attributes to vrf
vrf.device_attr[uut].address_family_attr['ipv4 unicast']
vrf.device_attr[helper].address_family_attr['ipv4 unicast']
vrf.device_attr[uut].address_family_attr['ipv6 unicast']
vrf.device_attr[helper].address_family_attr['ipv6 unicast']
# Adding area to the vrf attribute
ospf.device_attr[uut].vrf_attr[vrf].area_attr[self.area_id]
ospf.device_attr[helper].vrf_attr[vrf].area_attr[self.area_id]
for dev in testbed.devices.values():
dev.add_feature(vrf)
vrf.device_attr[dev].build_config()
with steps.start('Building the interface configuration with Ospf '
'process id {pro}'.format(pro=self.ospf_2)):
# Initiating ipv4 and ipv6 addresses
ipv4s = []
ipv6s = []
# Generate ipv4 and ipv6 addresses, depending on the length of the
# interfaces
ipv4rng = iter(testbed.ipv4_cache.reserve(count=len(link.interfaces)))
ipv6rng = iter(testbed.ipv6_cache.reserve(count=len(link.interfaces)))
for intf in link.interfaces:
# Adding vrf to the interface
intf.vrf = vrf
# Configuring interface with its attributes
intf.shutdown = False
intf.layer = interface.Layer.L3
# Assigning Ipv4 and ipv6 addresses to the interface
intf.ipv4 = next(ipv4rng)
ipv4s.append(intf.ipv4.ip)
intf.ipv6 = next(ipv6rng)
ipv6s.append(intf.ipv6.ip)
# Adding interface to the area attribute
ospf.device_attr[intf.device].vrf_attr[vrf]\
.area_attr[self.area_id].interface_attr[intf].if_admin_control = True
# Build interface config and apply it to the device
intf.build_config()
with steps.start('Building the loopback interface configuration with '
'Ospf process id {pro}'.format(pro=self.ospf_2)):
for intf in loopback_link.interfaces:
# Add vrf to the loopback interface object
intf.vrf = vrf
intf.layer = interface.Layer.L3
# Assigning ipv4 and ipv6 addresses to the loopback interface
intf.ipv4 = testbed.ipv4_cache.reserve(prefixlen=32)[0]
ipv4s.append(intf.ipv4.ip)
intf.ipv6 = testbed.ipv6_cache.reserve(prefixlen=128)[0]
ipv6s.append(intf.ipv6.ip)
# Adding the Loopback interface to the ospf object
ospf.device_attr[intf.device].vrf_attr[vrf]\
.area_attr[self.area_id].interface_attr[intf].if_admin_control = True
# Assigning the Loopback address as the OSPF router-id
if intf.device.name == uut.name :
ospf.device_attr[uut].vrf_attr[vrf]\
.router_id = intf.ipv4.ip.exploded
elif intf.device.name == helper.name:
ospf.device_attr[helper].vrf_attr[vrf]\
.router_id = intf.ipv4.ip.exploded
# Building loopback interface configuration
intf.build_config()
with steps.start('Building the ospf configuration with '
'Ospf process id {pro}'.format(pro=self.ospf_2)):
# Building OSPF configuration
ospf.build_config()
# adding vrf, ospf and loopback to parameters
testscript.parameters['vrf'] = vrf
testscript.parameters['ospf'] = ospf
testscript.parameters['loopback_link_2'] = loopback_link
# Adding information for the ping test
aetest.loop.mark(ping.test, destination=ipv4s,
vrf=[vrf.name]*(len(link.interfaces) +
len(loopback_link.interfaces)))
@aetest.subsection
def verify_ospf(self, uut, helper, steps, link_2, loopback_link_2):
'''Verify if the Ospf configuration was done correctly'''
# Verify for both device
for dev in [uut, helper]:
# Find the interface of this device
dev_intf = link_2.find_interfaces(device=dev)[0]
neighbor_intf = link_2.find_interfaces(device=Not(str(dev)))[0]
# Same for loopback
intf1_loop = loopback_link_2.find_interfaces(device=dev)[0]
intf2_loop = loopback_link_2.find_interfaces(device=Not(str(dev)))[0]
ospf = dev.lib.ops.ospf.ospf.Ospf(device=dev)
try:
ospf.learn_poll(verify=self.interface_up, sleep=10, attempt=10,
ospf_name=self.ospf_2, vrf_name=self.vrf_name,
area=self.area_id, dev_intf=dev_intf,
intf1_loop=intf1_loop)
except StopIteration as e:
self.failed(str(e))
time.sleep(60)
log.info("Configuration was applied correctly")
##################################################################
## TESTCASES SECTION ###
##################################################################
class ping(aetest.Testcase):
"""This is user Testcases section"""
@aetest.test
def test(self, device, destination, vrf, steps):
with steps.start('Ipv4 Ping Test from {dev} to {ip}'.
format(dev=device.name,ip=destination)):
"""Ping an ip"""
log.info("{dev} pings {ip}".format(dev=device.name, ip=destination))
device.ping(destination, vrf=vrf, interval=1000, count=30)
class Modify(aetest.Testcase):
"""Modify configuration"""
@aetest.test
def test(self, ospf, uut, vrf, steps):
"""Modify ospf attribtue"""
# Modify auto cost reference bandwidth to 10
with steps.start('Modify auto-cost reference bandwidth'
' attribute on '
'ospf process-id {pro} vrf {vrf}'
.format(vrf=vrf.name, pro=self.ospf_2)):
# Modify the value auto_cost_ref_bw
ospf.device_attr[uut].auto_cost_ref_bw = self.auto_cost_ref_bw
ospf.build_config(attributes={'device_attr':{uut:{'vrf_attr':{'*':{'auto_cost_ref_bw':None}}}}})
#####################################################################
#### COMMON CLEANUP SECTION ###
#####################################################################
class common_cleanup(aetest.CommonCleanup):
""" Common Cleanup for Sample Test """
@aetest.subsection
def clean_everything(self, steps):
testbed = Genie.testbed
""" Common Cleanup Subsection """
with steps.start('Unconfig testbed'):
if testbed:
log.info("Aetest Common Cleanup ")
# Unconfigure the whole testbed
# (ie) unconfigure the link, devices and the feature
testbed.build_unconfig()
|
python
|
import http
import logging
import sys
import time
from collections import abc
from copy import copy
from os import getpid
import click
TRACE_LOG_LEVEL = 5
class ColourizedFormatter(logging.Formatter):
"""
A custom log formatter class that:
* Outputs the LOG_LEVEL with an appropriate color.
* If a log call includes an `extras={"color_message": ...}` it will be used
for formatting the output, instead of the plain text message.
"""
level_name_colors = {
TRACE_LOG_LEVEL: lambda level_name: click.style(str(level_name), fg="blue"),
logging.DEBUG: lambda level_name: click.style(str(level_name), fg="cyan"),
logging.INFO: lambda level_name: click.style(str(level_name), fg="green"),
logging.WARNING: lambda level_name: click.style(str(level_name), fg="yellow"),
logging.ERROR: lambda level_name: click.style(str(level_name), fg="red"),
logging.CRITICAL: lambda level_name: click.style(
str(level_name), fg="bright_red"
),
}
def __init__(self, fmt=None, datefmt=None, style="%", use_colors=None):
if use_colors in (True, False):
self.use_colors = use_colors
else:
self.use_colors = sys.stdout.isatty()
super().__init__(fmt=fmt, datefmt=datefmt, style=style)
def color_level_name(self, level_name, level_no):
def default(level_name):
return str(level_name)
func = self.level_name_colors.get(level_no, default)
return func(level_name)
def should_use_colors(self):
return True
def formatMessage(self, record):
recordcopy = copy(record)
levelname = recordcopy.levelname
seperator = " " * (8 - len(recordcopy.levelname))
if self.use_colors:
levelname = self.color_level_name(levelname, recordcopy.levelno)
if "color_message" in recordcopy.__dict__:
recordcopy.msg = recordcopy.__dict__["color_message"]
recordcopy.__dict__["message"] = recordcopy.getMessage()
recordcopy.__dict__["levelprefix"] = levelname + ":" + seperator
return super().formatMessage(recordcopy)
class DefaultFormatter(ColourizedFormatter):
def should_use_colors(self):
return sys.stderr.isatty()
class AccessFormatter(ColourizedFormatter):
status_code_colours = {
1: lambda code: click.style(str(code), fg="bright_white"),
2: lambda code: click.style(str(code), fg="green"),
3: lambda code: click.style(str(code), fg="yellow"),
4: lambda code: click.style(str(code), fg="red"),
5: lambda code: click.style(str(code), fg="bright_red"),
}
def get_status_code(self, status_code: int):
try:
status_phrase = http.HTTPStatus(status_code).phrase
except ValueError:
status_phrase = ""
status_and_phrase = "%s %s" % (status_code, status_phrase)
if self.use_colors:
def default(code):
return status_and_phrase
func = self.status_code_colours.get(status_code // 100, default)
return func(status_and_phrase)
return status_and_phrase
def formatMessage(self, record):
recordcopy = copy(record)
(
client_addr,
method,
full_path,
http_version,
status_code,
) = recordcopy.args
status_code = self.get_status_code(status_code)
request_line = "%s %s HTTP/%s" % (method, full_path, http_version)
if self.use_colors:
request_line = click.style(request_line, bold=True)
recordcopy.__dict__.update(
{
"client_addr": client_addr,
"request_line": request_line,
"status_code": status_code,
}
)
return super().formatMessage(recordcopy)
class GunicornSafeAtoms(abc.Mapping):
"""Implement atoms necessary for gunicorn log.
This class does a few things:
- provide all atoms necessary for gunicorn log formatter
- collect response body size for reporting from ASGI messages
- provide mapping interface that returns '-' for missing atoms
- escapes double quotes found in atom strings
"""
def __init__(self, scope):
self.scope = scope
self.status_code = None
self.response_headers = {}
self.response_length = 0
self._request_headers = None
@property
def request_headers(self):
if self._request_headers is None:
self._request_headers = {
k.decode("ascii"): v.decode("ascii") for k, v in self.scope["headers"]
}
return self._request_headers
@property
def duration(self):
d = self.scope["response_end_time"] - self.scope["request_start_time"]
return d
def on_asgi_message(self, message):
if message["type"] == "http.response.start":
self.status_code = message["status"]
self.response_headers = {
k.decode("ascii"): v.decode("ascii") for k, v in message["headers"]
}
elif message["type"] == "http.response.body":
self.response_length += len(message.get("body", ""))
def _request_header(self, key):
return self.request_headers.get(key.lower())
def _response_header(self, key):
return self.response_headers.get(key.lower())
def _wsgi_environ_variable(self, key):
# FIXME: provide fallbacks to access WSGI environ (at least the
# required variables).
return None
def __getitem__(self, key):
if key in self.HANDLERS:
retval = self.HANDLERS[key](self)
elif key.startswith("{"):
if key.endswith("}i"):
retval = self._request_header(key[1:-2])
elif key.endswith("}o"):
retval = self._response_header(key[1:-2])
elif key.endswith("}e"):
retval = self._wsgi_environ_variable(key[1:-2])
else:
retval = None
else:
retval = None
if retval is None:
return "-"
if isinstance(retval, str):
return retval.replace('"', '\\"')
return retval
HANDLERS = {}
def _register_handler(key, handlers=HANDLERS):
def decorator(fn):
handlers[key] = fn
return fn
return decorator
@_register_handler("h")
def _remote_address(self, *args, **kwargs):
return self.scope["client"][0]
@_register_handler("l")
def _dash(self, *args, **kwargs):
return "-"
@_register_handler("u")
def _user_name(self, *args, **kwargs):
pass
@_register_handler("t")
def date_of_the_request(self, *args, **kwargs):
"""Date and time in Apache Common Log Format"""
return time.strftime("[%d/%b/%Y:%H:%M:%S %z]")
@_register_handler("r")
def status_line(self, *args, **kwargs):
full_raw_path = self.scope["raw_path"] + self.scope["query_string"]
full_path = full_raw_path.decode("ascii")
return "{method} {full_path} HTTP/{http_version}".format(
full_path=full_path, **self.scope
)
@_register_handler("m")
def request_method(self, *args, **kwargs):
return self.scope["method"]
@_register_handler("U")
def url_path(self, *args, **kwargs):
return self.scope["raw_path"].decode("ascii")
@_register_handler("q")
def query_string(self, *args, **kwargs):
return self.scope["query_string"].decode("ascii")
@_register_handler("H")
def protocol(self, *args, **kwargs):
return "HTTP/%s" % self.scope["http_version"]
@_register_handler("s")
def status(self, *args, **kwargs):
return self.status_code or "-"
@_register_handler("B")
def response_length(self, *args, **kwargs):
return self.response_length
@_register_handler("b")
def response_length_or_dash(self, *args, **kwargs):
return self.response_length or "-"
@_register_handler("f")
def referer(self, *args, **kwargs):
return self.request_headers.get("referer")
@_register_handler("a")
def user_agent(self, *args, **kwargs):
return self.request_headers.get("user-agent")
@_register_handler("T")
def request_time_seconds(self, *args, **kwargs):
return int(self.duration)
@_register_handler("D")
def request_time_microseconds(self, *args, **kwargs):
return int(self.duration * 1_000_000)
@_register_handler("L")
def request_time_decimal_seconds(self, *args, **kwargs):
return "%.6f" % self.duration
@_register_handler("p")
def process_id(self, *args, **kwargs):
return "<%s>" % getpid()
def __iter__(self):
# FIXME: add WSGI environ
yield from self.HANDLERS
for k, _ in self.scope["headers"]:
yield "{%s}i" % k.lower()
for k in self.response_headers:
yield "{%s}o" % k.lower()
def __len__(self):
# FIXME: add WSGI environ
return (
len(self.HANDLERS)
+ len(self.scope["headers"] or ())
+ len(self.response_headers)
)
|
python
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import common
import time
from common import TestDriver
from common import IntegrationTest
from decorators import NotAndroid
from decorators import ChromeVersionEqualOrAfterM
class Smoke(IntegrationTest):
# Ensure Chrome does not use DataSaver in Incognito mode.
# Clank does not honor the --incognito flag.
@NotAndroid
def testCheckPageWithIncognito(self):
with TestDriver() as t:
t.AddChromeArg('--enable-spdy-proxy-auth')
t.AddChromeArg('--incognito')
t.LoadURL('http://check.googlezip.net/test.html')
responses = t.GetHTTPResponses()
self.assertNotEqual(0, len(responses))
for response in responses:
self.assertNotHasChromeProxyViaHeader(response)
# Ensure Chrome does not use DataSaver when holdback is enabled.
def testCheckPageWithHoldback(self):
with TestDriver() as t:
t.AddChromeArg('--enable-spdy-proxy-auth')
t.AddChromeArg('--force-fieldtrials=DataCompressionProxyHoldback/'
'Enabled')
t.LoadURL('http://check.googlezip.net/test.html')
responses = t.GetHTTPResponses()
self.assertNotEqual(0, len(responses))
num_chrome_proxy_request_headers = 0
for response in responses:
self.assertNotHasChromeProxyViaHeader(response)
if ('chrome-proxy' in response.request_headers):
num_chrome_proxy_request_headers += 1
# DataSaver histograms must still be logged.
t.SleepUntilHistogramHasEntry('PageLoad.Clients.DataReductionProxy.'
'ParseTiming.NavigationToParseStart')
self.assertEqual(num_chrome_proxy_request_headers, 0)
# Ensure that Chrome did not attempt to use DataSaver and got a bypass.
histogram = t.GetHistogram('DataReductionProxy.BypassedBytes.'
'Status502HttpBadGateway', 5)
self.assertEqual(histogram, {})
histogram = t.GetHistogram('DataReductionProxy.BlockTypePrimary', 5)
self.assertEqual(histogram, {})
histogram = t.GetHistogram('DataReductionProxy.BypassTypePrimary', 5)
self.assertEqual(histogram, {})
# Ensure Chrome uses DataSaver in normal mode.
def testCheckPageWithNormalMode(self):
with TestDriver() as t:
t.AddChromeArg('--enable-spdy-proxy-auth')
t.LoadURL('http://check.googlezip.net/test.html')
responses = t.GetHTTPResponses()
self.assertNotEqual(0, len(responses))
num_chrome_proxy_request_headers = 0
for response in responses:
self.assertHasChromeProxyViaHeader(response)
if ('chrome-proxy' in response.request_headers):
num_chrome_proxy_request_headers += 1
t.SleepUntilHistogramHasEntry('PageLoad.Clients.DataReductionProxy.'
'ParseTiming.NavigationToParseStart')
self.assertGreater(num_chrome_proxy_request_headers, 0)
# Ensure pageload metric pingback with DataSaver.
def testPingback(self):
with TestDriver() as t:
t.AddChromeArg('--enable-spdy-proxy-auth')
t.AddChromeArg('--enable-data-reduction-proxy-force-pingback')
t.LoadURL('http://check.googlezip.net/test.html')
t.LoadURL('http://check.googlezip.net/test.html')
t.SleepUntilHistogramHasEntry("DataReductionProxy.Pingback.Succeeded")
# Verify one pingback attempt that was successful.
attempted = t.GetHistogram('DataReductionProxy.Pingback.Attempted')
self.assertEqual(1, attempted['count'])
succeeded = t.GetHistogram('DataReductionProxy.Pingback.Succeeded')
self.assertEqual(1, succeeded['count'])
# Ensure client config is fetched at the start of the Chrome session, and the
# session ID is correctly set in the chrome-proxy request header.
def testClientConfig(self):
with TestDriver() as t:
t.AddChromeArg('--enable-spdy-proxy-auth')
t.SleepUntilHistogramHasEntry(
'DataReductionProxy.ConfigService.FetchResponseCode')
t.LoadURL('http://check.googlezip.net/test.html')
responses = t.GetHTTPResponses()
self.assertEqual(2, len(responses))
for response in responses:
chrome_proxy_header = response.request_headers['chrome-proxy']
self.assertIn('s=', chrome_proxy_header)
self.assertNotIn('ps=', chrome_proxy_header)
self.assertNotIn('sid=', chrome_proxy_header)
# Verify that the proxy server honored the session ID.
self.assertHasChromeProxyViaHeader(response)
self.assertEqual(200, response.status)
# Verify unique page IDs are sent in the Chrome-Proxy header.
@ChromeVersionEqualOrAfterM(59)
def testPageID(self):
with TestDriver() as t:
t.AddChromeArg('--enable-spdy-proxy-auth')
page_identifiers = []
page_loads = 5
for i in range (0, page_loads):
t.LoadURL('http://check.googlezip.net/test.html')
responses = t.GetHTTPResponses()
self.assertEqual(2, len(responses))
pid_in_page_count = 0
page_id = ''
for response in responses:
self.assertHasChromeProxyViaHeader(response)
self.assertEqual(200, response.status)
chrome_proxy_header = response.request_headers['chrome-proxy']
chrome_proxy_directives = chrome_proxy_header.split(',')
for directive in chrome_proxy_directives:
if 'pid=' in directive:
pid_in_page_count = pid_in_page_count+1
page_id = directive.split('=')[1]
self.assertNotEqual('', page_id)
self.assertNotIn(page_id, page_identifiers)
page_identifiers.append(page_id)
self.assertEqual(1, pid_in_page_count)
# Ensure that block causes resources to load from the origin directly.
def testCheckBlockIsWorking(self):
with TestDriver() as t:
t.AddChromeArg('--enable-spdy-proxy-auth')
t.LoadURL('http://check.googlezip.net/block')
responses = t.GetHTTPResponses()
self.assertNotEqual(0, len(responses))
for response in responses:
self.assertNotHasChromeProxyViaHeader(response)
# Ensure image, css, and javascript resources are compressed.
def testCheckImageCssJavascriptIsCompressed(self):
with TestDriver() as t:
t.AddChromeArg('--enable-spdy-proxy-auth')
t.LoadURL('http://check.googlezip.net/static')
# http://check.googlezip.net/static is a test page that has
# image/css/javascript resources.
responses = t.GetHTTPResponses()
self.assertNotEqual(0, len(responses))
for response in responses:
self.assertHasChromeProxyViaHeader(response)
if __name__ == '__main__':
IntegrationTest.RunAllTests()
|
python
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-1-19
def check(array: list, middle):
c = 0
for i in range(middle, len(array)):
if array[i] != array[middle]:
break
c += 1
for i in range(middle, -1, -1):
if array[i] != array[middle]:
break
c += 1
return c > (len(array) >> 1)
def solve(array: list):
if not array:
return None
middle = (len(array) - 1) >> 1
def partition(array, left, right):
first = left
key = array[left]
while left != right:
while key <= array[right] and left < right:
right -= 1
while array[left] <= key and left < right:
left += 1
if left < right:
array[left], array[right] = array[right], array[left]
# 归位
array[first] = array[left]
array[left] = key
return left
left = 0
right = len(array) - 1
index = partition(array, left, right)
while index != middle:
if index > middle:
right = index - 1
index = partition(array, left, right)
else:
left = index + 1
index = partition(array, left, right)
if check(array, middle):
return array[middle]
return None
if __name__ == '__main__':
print(solve([1, 2, 7, 2, 8, 2, 2, 5, 2]))
print(solve([1, 2, 7, 2, 8, 7, 2, 5, 2]))
print(solve([3]))
|
python
|
from unittest import TestCase
from django.http.request import MediaType
from simple_django_api.request import Request as HttpRequest
class MediaTypeTests(TestCase):
def test_empty(self):
for empty_media_type in (None, ''):
with self.subTest(media_type=empty_media_type):
media_type = MediaType(empty_media_type)
self.assertIs(media_type.is_all_types, False)
self.assertEqual(str(media_type), '')
self.assertEqual(repr(media_type), '<MediaType: >')
def test_str(self):
self.assertEqual(str(MediaType('*/*; q=0.8')), '*/*; q=0.8')
self.assertEqual(str(MediaType('application/xml')), 'application/xml')
def test_repr(self):
self.assertEqual(repr(MediaType('*/*; q=0.8')), '<MediaType: */*; q=0.8>')
self.assertEqual(
repr(MediaType('application/xml')),
'<MediaType: application/xml>',
)
def test_is_all_types(self):
self.assertIs(MediaType('*/*').is_all_types, True)
self.assertIs(MediaType('*/*; q=0.8').is_all_types, True)
self.assertIs(MediaType('text/*').is_all_types, False)
self.assertIs(MediaType('application/xml').is_all_types, False)
def test_match(self):
tests = [
('*/*; q=0.8', '*/*'),
('*/*', 'application/json'),
(' */* ', 'application/json'),
('application/*', 'application/json'),
('application/xml', 'application/xml'),
(' application/xml ', 'application/xml'),
('application/xml', ' application/xml '),
]
for accepted_type, mime_type in tests:
with self.subTest(accepted_type, mime_type=mime_type):
self.assertIs(MediaType(accepted_type).match(mime_type), True)
def test_no_match(self):
tests = [
(None, '*/*'),
('', '*/*'),
('; q=0.8', '*/*'),
('application/xml', 'application/html'),
('application/xml', '*/*'),
]
for accepted_type, mime_type in tests:
with self.subTest(accepted_type, mime_type=mime_type):
self.assertIs(MediaType(accepted_type).match(mime_type), False)
class AcceptHeaderTests(TestCase):
def test_no_headers(self):
"""Absence of Accept header defaults to '*/*'."""
request = HttpRequest()
self.assertEqual(
[str(accepted_type) for accepted_type in request.accepted_types],
['*/*'],
)
def test_accept_headers(self):
request = HttpRequest()
request.META['HTTP_ACCEPT'] = (
'text/html, application/xhtml+xml,application/xml ;q=0.9,*/*;q=0.8'
)
self.assertEqual(
[str(accepted_type) for accepted_type in request.accepted_types],
[
'text/html',
'application/xhtml+xml',
'application/xml; q=0.9',
'*/*; q=0.8',
],
)
def test_request_accepts_any(self):
request = HttpRequest()
request.META['HTTP_ACCEPT'] = '*/*'
self.assertIs(request.accepts('application/json'), True)
def test_request_accepts_none(self):
request = HttpRequest()
request.META['HTTP_ACCEPT'] = ''
self.assertIs(request.accepts('application/json'), False)
self.assertEqual(request.accepted_types, [])
def test_request_accepts_some(self):
request = HttpRequest()
request.META['HTTP_ACCEPT'] = 'text/html,application/xhtml+xml,application/xml;q=0.9'
self.assertIs(request.accepts('text/html'), True)
self.assertIs(request.accepts('application/xhtml+xml'), True)
self.assertIs(request.accepts('application/xml'), True)
self.assertIs(request.accepts('application/json'), False)
|
python
|
from .main import run
|
python
|
from os.path import abspath
from os.path import dirname
with open('{}/version.txt'.format(dirname(__file__))) as version_file:
__version__ = version_file.read().strip()
|
python
|
import time, numpy, pytest
from rpxdock import Timer
def test_timer():
with Timer() as timer:
time.sleep(0.02)
timer.checkpoint('foo')
time.sleep(0.06)
timer.checkpoint('bar')
time.sleep(0.04)
timer.checkpoint('baz')
times = timer.report_dict()
assert numpy.allclose(times['foo'], 0.02, atol=0.05)
assert numpy.allclose(times['bar'], 0.06, atol=0.05)
assert numpy.allclose(times['baz'], 0.04, atol=0.05)
times = timer.report_dict(order='longest')
assert list(times.keys()) == ['total', 'bar', 'baz', 'foo']
times = timer.report_dict(order='callorder')
assert list(times.keys()) == ['foo', 'bar', 'baz', 'total']
with pytest.raises(ValueError):
timer.report_dict(order='oarenstoiaen')
def test_summary():
with Timer() as timer:
time.sleep(0.01)
timer.checkpoint('foo')
time.sleep(0.03)
timer.checkpoint('foo')
time.sleep(0.02)
timer.checkpoint('foo')
times = timer.report_dict(summary=sum)
assert numpy.allclose(times['foo'], 0.06, atol=0.02)
times = timer.report_dict(summary=numpy.mean)
assert numpy.allclose(times['foo'], 0.02, atol=0.01)
times = timer.report_dict(summary='mean')
assert numpy.allclose(times['foo'], 0.02, atol=0.01)
times = timer.report_dict(summary='min')
assert numpy.allclose(times['foo'], 0.01, atol=0.01)
with pytest.raises(ValueError):
timer.report(summary='foo')
with pytest.raises(ValueError):
timer.report(summary=1)
if __name__ == '__main__':
test_timer()
test_summary()
|
python
|