code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
#!/usr/bin/env python
# _*_ coding: utf-8_*_
#
# Copyright 2016-2017 <EMAIL>
# <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.web
import logging
import time
import sys
import os
import json as JSON # 启用别名,不会跟方法里的局部变量混淆
from comm import *
from global_const import *
from base_handler import *
from tornado.escape import json_encode, json_decode
from tornado.httpclient import *
from tornado.httputil import url_concat
from tornado_swagger import swagger
from image_verify import generate_verify_image
@swagger.model()
class ImageResp:
def __init__(self, errCode, errMsg, code, imgUrl):
self.errCode = errCode
self.errMsg = errMsg
self.code = code
self.imgUrl = imgUrl
# /api/image-verify
class ApiImageVerifyXHR(tornado.web.RequestHandler):
@swagger.operation(nickname='post')
def post(self):
"""
@description: 生成图片校验码
@rtype: L{ImageResp}
@raise 400: Invalid Input
@raise 500: Internal Server Error
"""
logging.info("POST %r", self.request.uri)
_id = generate_uuid_str()
timestamp = current_timestamp()
_datehour = timestamp_to_datehour(timestamp)
path = cur_file_dir()
logging.debug("got path %r", path)
if not os.path.exists(path + "/static/image-verify/" + _datehour):
os.makedirs(path + "/static/image-verify/" + _datehour)
# To save it
filepath = path + "/static/image-verify/" + _datehour + "/" + _id + '.gif'
mstream, _code = generate_verify_image(save_img=True, filepath=filepath)
img_url = self.request.protocol + "://" + self.request.host
img_url = img_url + '/static/image-verify/' + _datehour + "/" + _id + '.gif'
logging.info("Success[200]: generate image-verify code=[%r] img_url=[%r]", _code, img_url)
self.set_status(200) # Success
self.write(JSON.dumps({"errCode":200,"errMsg":"Success","code":_code,"imageUrl":img_url}))
self.finish()
| [
"tornado_swagger.swagger.operation",
"logging.debug",
"os.makedirs",
"tornado_swagger.swagger.model",
"os.path.exists",
"json.dumps",
"logging.info",
"image_verify.generate_verify_image"
] | [((1029, 1044), 'tornado_swagger.swagger.model', 'swagger.model', ([], {}), '()\n', (1042, 1044), False, 'from tornado_swagger import swagger\n'), ((1311, 1345), 'tornado_swagger.swagger.operation', 'swagger.operation', ([], {'nickname': '"""post"""'}), "(nickname='post')\n", (1328, 1345), False, 'from tornado_swagger import swagger\n'), ((1550, 1591), 'logging.info', 'logging.info', (['"""POST %r"""', 'self.request.uri'], {}), "('POST %r', self.request.uri)\n", (1562, 1591), False, 'import logging\n'), ((1758, 1792), 'logging.debug', 'logging.debug', (['"""got path %r"""', 'path'], {}), "('got path %r', path)\n", (1771, 1792), False, 'import logging\n'), ((2066, 2121), 'image_verify.generate_verify_image', 'generate_verify_image', ([], {'save_img': '(True)', 'filepath': 'filepath'}), '(save_img=True, filepath=filepath)\n', (2087, 2121), False, 'from image_verify import generate_verify_image\n'), ((2284, 2378), 'logging.info', 'logging.info', (['"""Success[200]: generate image-verify code=[%r] img_url=[%r]"""', '_code', 'img_url'], {}), "('Success[200]: generate image-verify code=[%r] img_url=[%r]',\n _code, img_url)\n", (2296, 2378), False, 'import logging\n'), ((1808, 1866), 'os.path.exists', 'os.path.exists', (["(path + '/static/image-verify/' + _datehour)"], {}), "(path + '/static/image-verify/' + _datehour)\n", (1822, 1866), False, 'import os\n'), ((1880, 1935), 'os.makedirs', 'os.makedirs', (["(path + '/static/image-verify/' + _datehour)"], {}), "(path + '/static/image-verify/' + _datehour)\n", (1891, 1935), False, 'import os\n'), ((2433, 2522), 'json.dumps', 'JSON.dumps', (["{'errCode': 200, 'errMsg': 'Success', 'code': _code, 'imageUrl': img_url}"], {}), "({'errCode': 200, 'errMsg': 'Success', 'code': _code, 'imageUrl':\n img_url})\n", (2443, 2522), True, 'import json as JSON\n')] |
import pickle
import pandas as pd
import os
from os import path
from scripts.process_raw import keep_positive_ratings, count_filter
from scripts.config import params
def process_raw(input_dir, output_dir, movie_users_threshold, user_movies_threshold):
ds = pd.read_csv(path.join(input_dir, 'ratings.csv'))
print('Overall records:', ds.shape[0])
print('Overall users:', len(ds['userId'].unique()))
print('Overall movies:', len(ds['movieId'].unique()))
ds = keep_positive_ratings(ds, 'userId', 'movieId', 'rating')
ds = count_filter(ds, movie_users_threshold, 'movieId', 'userId')
ds = count_filter(ds, user_movies_threshold, 'userId', 'movieId')
print('Left records:', ds.shape[0])
print('Left users:', len(ds['userId'].unique()))
print('Left movies:', len(ds['movieId'].unique()))
u2i = {user: ind for ind, user in enumerate(ds['userId'].unique())}
x2i = {movie: ind for ind, movie in enumerate(ds['movieId'].unique())}
processed = pd.DataFrame({'user': ds['userId'].apply(lambda x: u2i[x]),
'item': ds['movieId'].apply(lambda x: x2i[x])})
if not path.exists(output_dir):
os.makedirs(output_dir)
processed.to_csv(path.join(output_dir, 'ds.csv'), index=False)
with open(path.join(output_dir, 'u2i.pickle'), 'wb') as handle:
pickle.dump(u2i, handle)
with open(path.join(output_dir, 'x2i.pickle'), 'wb') as handle:
pickle.dump(x2i, handle)
if __name__ == '__main__':
common_params = params['ml']['common']
proc_params = params['ml']['process_raw']
process_raw(common_params['raw_dir'],
common_params['proc_dir'],
int(proc_params['movie_users_threshold']),
int(proc_params['user_movies_threshold']))
| [
"pickle.dump",
"os.makedirs",
"scripts.process_raw.count_filter",
"os.path.exists",
"scripts.process_raw.keep_positive_ratings",
"os.path.join"
] | [((479, 535), 'scripts.process_raw.keep_positive_ratings', 'keep_positive_ratings', (['ds', '"""userId"""', '"""movieId"""', '"""rating"""'], {}), "(ds, 'userId', 'movieId', 'rating')\n", (500, 535), False, 'from scripts.process_raw import keep_positive_ratings, count_filter\n'), ((545, 605), 'scripts.process_raw.count_filter', 'count_filter', (['ds', 'movie_users_threshold', '"""movieId"""', '"""userId"""'], {}), "(ds, movie_users_threshold, 'movieId', 'userId')\n", (557, 605), False, 'from scripts.process_raw import keep_positive_ratings, count_filter\n'), ((615, 675), 'scripts.process_raw.count_filter', 'count_filter', (['ds', 'user_movies_threshold', '"""userId"""', '"""movieId"""'], {}), "(ds, user_movies_threshold, 'userId', 'movieId')\n", (627, 675), False, 'from scripts.process_raw import keep_positive_ratings, count_filter\n'), ((275, 310), 'os.path.join', 'path.join', (['input_dir', '"""ratings.csv"""'], {}), "(input_dir, 'ratings.csv')\n", (284, 310), False, 'from os import path\n'), ((1140, 1163), 'os.path.exists', 'path.exists', (['output_dir'], {}), '(output_dir)\n', (1151, 1163), False, 'from os import path\n'), ((1173, 1196), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1184, 1196), False, 'import os\n'), ((1218, 1249), 'os.path.join', 'path.join', (['output_dir', '"""ds.csv"""'], {}), "(output_dir, 'ds.csv')\n", (1227, 1249), False, 'from os import path\n'), ((1340, 1364), 'pickle.dump', 'pickle.dump', (['u2i', 'handle'], {}), '(u2i, handle)\n', (1351, 1364), False, 'import pickle\n'), ((1442, 1466), 'pickle.dump', 'pickle.dump', (['x2i', 'handle'], {}), '(x2i, handle)\n', (1453, 1466), False, 'import pickle\n'), ((1278, 1313), 'os.path.join', 'path.join', (['output_dir', '"""u2i.pickle"""'], {}), "(output_dir, 'u2i.pickle')\n", (1287, 1313), False, 'from os import path\n'), ((1380, 1415), 'os.path.join', 'path.join', (['output_dir', '"""x2i.pickle"""'], {}), "(output_dir, 'x2i.pickle')\n", (1389, 1415), False, 'from os import path\n')] |
import os
import sys
import numpy as np
import scipy.io as sio
from skimage import io
import time
import math
import skimage
import src.faceutil
from src.faceutil import mesh
from src.faceutil.morphable_model import MorphabelModel
from src.util.matlabutil import NormDirection
from math import sin, cos, asin, acos, atan, atan2
from PIL import Image
import matplotlib.pyplot as plt
# global data
bfm = MorphabelModel('data/Out/BFM.mat')
def get_transform_matrix(s, angles, t, height):
"""
:param s: scale
:param angles: [3] rad
:param t: [3]
:return: 4x4 transmatrix
"""
x, y, z = angles[0], angles[1], angles[2]
Rx = np.array([[1, 0, 0],
[0, cos(x), sin(x)],
[0, -sin(x), cos(x)]])
Ry = np.array([[cos(y), 0, -sin(y)],
[0, 1, 0],
[sin(y), 0, cos(y)]])
Rz = np.array([[cos(z), sin(z), 0],
[-sin(z), cos(z), 0],
[0, 0, 1]])
# rotate
R = Rx.dot(Ry).dot(Rz)
R = R.astype(np.float32)
T = np.zeros((4, 4))
T[0:3, 0:3] = R
T[3, 3] = 1.
# scale
S = np.diagflat([s, s, s, 1.])
T = S.dot(T)
# offset move
M = np.diagflat([1., 1., 1., 1.])
M[0:3, 3] = t.astype(np.float32)
T = M.dot(T)
# revert height
# x[:,1]=height-x[:,1]
H = np.diagflat([1., 1., 1., 1.])
H[1, 1] = -1.0
H[1, 3] = height
T = H.dot(T)
return T.astype(np.float32)
| [
"numpy.diagflat",
"numpy.zeros",
"src.faceutil.morphable_model.MorphabelModel",
"math.sin",
"math.cos"
] | [((404, 438), 'src.faceutil.morphable_model.MorphabelModel', 'MorphabelModel', (['"""data/Out/BFM.mat"""'], {}), "('data/Out/BFM.mat')\n", (418, 438), False, 'from src.faceutil.morphable_model import MorphabelModel\n'), ((1060, 1076), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (1068, 1076), True, 'import numpy as np\n'), ((1134, 1161), 'numpy.diagflat', 'np.diagflat', (['[s, s, s, 1.0]'], {}), '([s, s, s, 1.0])\n', (1145, 1161), True, 'import numpy as np\n'), ((1204, 1237), 'numpy.diagflat', 'np.diagflat', (['[1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0])\n', (1215, 1237), True, 'import numpy as np\n'), ((1343, 1376), 'numpy.diagflat', 'np.diagflat', (['[1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0])\n', (1354, 1376), True, 'import numpy as np\n'), ((700, 706), 'math.cos', 'cos', (['x'], {}), '(x)\n', (703, 706), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((708, 714), 'math.sin', 'sin', (['x'], {}), '(x)\n', (711, 714), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((749, 755), 'math.cos', 'cos', (['x'], {}), '(x)\n', (752, 755), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((779, 785), 'math.cos', 'cos', (['y'], {}), '(y)\n', (782, 785), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((850, 856), 'math.sin', 'sin', (['y'], {}), '(y)\n', (853, 856), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((861, 867), 'math.cos', 'cos', (['y'], {}), '(y)\n', (864, 867), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((891, 897), 'math.cos', 'cos', (['z'], {}), '(z)\n', (894, 897), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((899, 905), 'math.sin', 'sin', (['z'], {}), '(z)\n', (902, 905), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((940, 946), 'math.cos', 'cos', (['z'], {}), '(z)\n', (943, 946), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((741, 747), 'math.sin', 'sin', (['x'], {}), '(x)\n', (744, 747), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((791, 797), 'math.sin', 'sin', (['y'], {}), '(y)\n', (794, 797), False, 'from math import sin, cos, asin, acos, atan, atan2\n'), ((932, 938), 'math.sin', 'sin', (['z'], {}), '(z)\n', (935, 938), False, 'from math import sin, cos, asin, acos, atan, atan2\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.PaymentSuccessPagePlanInfo import PaymentSuccessPagePlanInfo
class AlipayOpenMiniPlanOperateBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniPlanOperateBatchqueryResponse, self).__init__()
self._page_data = None
self._page_num = None
self._page_size = None
self._total_number = None
@property
def page_data(self):
return self._page_data
@page_data.setter
def page_data(self, value):
if isinstance(value, list):
self._page_data = list()
for i in value:
if isinstance(i, PaymentSuccessPagePlanInfo):
self._page_data.append(i)
else:
self._page_data.append(PaymentSuccessPagePlanInfo.from_alipay_dict(i))
@property
def page_num(self):
return self._page_num
@page_num.setter
def page_num(self, value):
self._page_num = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def total_number(self):
return self._total_number
@total_number.setter
def total_number(self, value):
self._total_number = value
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniPlanOperateBatchqueryResponse, self).parse_response_content(response_content)
if 'page_data' in response:
self.page_data = response['page_data']
if 'page_num' in response:
self.page_num = response['page_num']
if 'page_size' in response:
self.page_size = response['page_size']
if 'total_number' in response:
self.total_number = response['total_number']
| [
"alipay.aop.api.domain.PaymentSuccessPagePlanInfo.PaymentSuccessPagePlanInfo.from_alipay_dict"
] | [((909, 955), 'alipay.aop.api.domain.PaymentSuccessPagePlanInfo.PaymentSuccessPagePlanInfo.from_alipay_dict', 'PaymentSuccessPagePlanInfo.from_alipay_dict', (['i'], {}), '(i)\n', (952, 955), False, 'from alipay.aop.api.domain.PaymentSuccessPagePlanInfo import PaymentSuccessPagePlanInfo\n')] |
# All content Copyright (C) 2018 Genomics plc
from wecall.genomics.chromosome import standardise_chromosome
import pysam
class TabixWrapper(object):
def __init__(self, tabix_filename):
self.__tabix_file = pysam.Tabixfile(tabix_filename, 'r')
self.__contig_mapping = {standardise_chromosome(
contig): contig for contig in self.__tabix_file.contigs}
@property
def header(self):
return (line for line in self.__tabix_file.header)
@property
def contigs(self):
return self.__tabix_file.contigs
def fetch_generator(self, chrom_interval):
# Tabix will throw a ValueError if the chromosome specified is not
# present in the index for this file.
try:
if chrom_interval.chrom is None:
return self.__tabix_file.fetch()
else:
return self.__tabix_file.fetch(
self.__contig_mapping.get(
chrom_interval.chrom,
chrom_interval.chrom),
chrom_interval.interval.start,
chrom_interval.interval.end)
except ValueError:
raise StopIteration
def fetch_region(self, region):
try:
return self.__tabix_file.fetch(region=region)
except ValueError:
raise StopIteration
def close(self):
self.__tabix_file.close()
def __enter__(self):
return self
def __exit__(self, ex_type, value, traceback):
self.close()
| [
"pysam.Tabixfile",
"wecall.genomics.chromosome.standardise_chromosome"
] | [((220, 256), 'pysam.Tabixfile', 'pysam.Tabixfile', (['tabix_filename', '"""r"""'], {}), "(tabix_filename, 'r')\n", (235, 256), False, 'import pysam\n'), ((290, 320), 'wecall.genomics.chromosome.standardise_chromosome', 'standardise_chromosome', (['contig'], {}), '(contig)\n', (312, 320), False, 'from wecall.genomics.chromosome import standardise_chromosome\n')] |
from collections import namedtuple, defaultdict
import time
import logging
from datetime import datetime, timedelta
from yapsy.PluginManager import PluginManager
from api.exceptions import TerminateApplication
from api.sensor import Sensor
from api.motor import Motor
PluginDetails = namedtuple('PluginInfo', ['name', 'key', 'instance', 'wants_last_chance', 'path'])
ALLOWED_UNHANDLED_EXCEPTIONS_PER_PLUGIN = 10
MINIMAL_LOOP_DURATION = timedelta(seconds=0.2)
class CoreApplication:
def __init__(self, sensors, motors):
self._motors = motors
self._sensors = sensors
self._disabled_plugins = set()
self._runtime_stats = {
'start_time': datetime.now(),
'loop_counter': 0,
'errors': defaultdict(list),
'average_loop_duration': timedelta(seconds=0),
'last_loop_duration': timedelta(seconds=0)
}
self._termination = None
self._total_loops_duration = timedelta()
def _process_sensors(self, state):
for plugin in self._sensors:
if plugin.key in self._disabled_plugins:
continue
try:
state[plugin.key] = plugin.instance.get_state()
except TerminateApplication as exception:
self._termination = (plugin.key, type(plugin.instance), exception.reason)
except KeyboardInterrupt:
self._termination = (None, None, "User interruption")
except Exception as exception:
logging.debug('"%s" threw exception.', plugin.key, exc_info=exception)
self._runtime_stats['errors'][plugin.key].append(exception)
state['errors'].append((plugin.key, exception))
def _process_motors(self, state):
for plugin in self._motors:
if plugin.key in self._disabled_plugins:
continue
try:
plugin.instance.on_trigger(state)
except TerminateApplication as exception:
self._termination = (plugin.key, type(plugin.instance), exception.reason)
except KeyboardInterrupt:
self._termination = (None, None, "User interruption")
except Exception as exception:
logging.debug('"%s" threw exception.', plugin.key, exc_info=exception)
self._runtime_stats['errors'][plugin.key].append(exception)
state['errors'].append((plugin.key, exception))
def _disable_failing_plugins(self):
for key in self._runtime_stats['errors']:
if key in self._disabled_plugins:
continue
if len(self._runtime_stats['errors'][key]) > ALLOWED_UNHANDLED_EXCEPTIONS_PER_PLUGIN:
logging.warning('Disabling plugin due to repeating failures: %s', key)
self._disabled_plugins.add(key)
def _update_runtime_statistics(self, loop_duration):
self._total_loops_duration += loop_duration
self._runtime_stats['loop_counter'] += 1
self._runtime_stats['average_loop_duration'] = self._total_loops_duration / self._runtime_stats['loop_counter']
self._runtime_stats['last_loop_duration'] = loop_duration
def _build_loop_state(self):
return {
'errors': [],
'now': datetime.now(),
'runtime': self._runtime_stats,
'disabled_plugins': self._disabled_plugins,
'termination': self._termination
}
def start_main_loop(self):
while self._termination is None:
try:
loop_start = datetime.now()
state = self._build_loop_state()
self._process_sensors(state)
self._process_motors(state)
self._disable_failing_plugins()
if len(self._disabled_plugins) == len(self._sensors) + len(self._motors):
logging.warning('All plugins have been disabled. Terminating application..')
break
if state['errors']:
logging.warning('Current loop was interrupted by following exceptions: %s', repr(state['errors']))
loop_stop = datetime.now()
loop_duration = loop_stop - loop_start
self._update_runtime_statistics(loop_duration)
if loop_duration < MINIMAL_LOOP_DURATION:
time.sleep((MINIMAL_LOOP_DURATION - loop_duration).total_seconds())
except KeyboardInterrupt:
self._termination = (None, None, "User interruption")
logging.info("Initiating shutdown procedure...")
terminal_state = self._build_loop_state()
for plugin in self._motors:
if plugin.key in self._disabled_plugins or not plugin.wants_last_chance:
continue
try:
logging.debug('Executing last chance motor: %s', plugin.key)
plugin.instance.on_trigger(terminal_state)
except Exception as exception:
self._runtime_stats['errors'][plugin.key].append(exception)
logging.info("Shutdown complete.")
logging.info(repr(self._runtime_stats))
def collect_all_plugins():
plugin_manager = PluginManager()
plugin_manager.setPluginPlaces(['plugins/motors', 'plugins/sensors'])
plugin_manager.collectPlugins()
for plugin in plugin_manager.getAllPlugins():
name = plugin.name
key = plugin.details.get('Core', 'key')
wants_last_chance = plugin.details.get('Core', 'last chance', fallback='').lower() == "true"
instance = plugin.plugin_object
path = plugin.path
yield PluginDetails(name, key, instance, wants_last_chance, path)
def load_plugins(all_plugins):
used_plugin_keys = set()
motor_plugins = []
sensor_plugins = []
for plugin in all_plugins:
logging.debug('Processing plugin %s (%s) <%s>...', plugin.key, plugin.name, type(plugin.instance))
if plugin.key in used_plugin_keys:
logging.warning('Attempt to load already loaded plugin. Duplicate: name="%s", key="%s", path "%s"',
plugin.name, plugin.key, plugin.path)
continue
if isinstance(plugin.instance, Motor):
logging.debug("\tFound motor plugin.")
motor_plugins.append(plugin)
if isinstance(plugin.instance, Sensor):
logging.debug("\tFound sensor plugin with key: %s", plugin.key)
sensor_plugins.append(plugin)
used_plugin_keys.add(plugin.key)
return sensor_plugins, motor_plugins
def main():
all_plugins = collect_all_plugins()
sensors, motors = load_plugins(all_plugins)
app = CoreApplication(sensors=sensors, motors=motors)
app.start_main_loop()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s][%(relativeCreated)d][%(levelname)s][%(module)s] %(message)s')
try:
main()
except Exception as e:
logging.error('Unexpected error occurred. If you believe issue is related to some bug in application, ' +
'please open issue with exception details at https://github.com/sceeter89/command-center/issues',
exc_info=e)
| [
"logging.error",
"logging.debug",
"logging.basicConfig",
"logging.warning",
"collections.defaultdict",
"logging.info",
"datetime.timedelta",
"collections.namedtuple",
"yapsy.PluginManager.PluginManager",
"datetime.datetime.now"
] | [((287, 373), 'collections.namedtuple', 'namedtuple', (['"""PluginInfo"""', "['name', 'key', 'instance', 'wants_last_chance', 'path']"], {}), "('PluginInfo', ['name', 'key', 'instance', 'wants_last_chance',\n 'path'])\n", (297, 373), False, 'from collections import namedtuple, defaultdict\n'), ((439, 461), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0.2)'}), '(seconds=0.2)\n', (448, 461), False, 'from datetime import datetime, timedelta\n'), ((5270, 5285), 'yapsy.PluginManager.PluginManager', 'PluginManager', ([], {}), '()\n', (5283, 5285), False, 'from yapsy.PluginManager import PluginManager\n'), ((6864, 6998), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""[%(asctime)s][%(relativeCreated)d][%(levelname)s][%(module)s] %(message)s"""'}), "(level=logging.DEBUG, format=\n '[%(asctime)s][%(relativeCreated)d][%(levelname)s][%(module)s] %(message)s'\n )\n", (6883, 6998), False, 'import logging\n'), ((969, 980), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (978, 980), False, 'from datetime import datetime, timedelta\n'), ((4610, 4658), 'logging.info', 'logging.info', (['"""Initiating shutdown procedure..."""'], {}), "('Initiating shutdown procedure...')\n", (4622, 4658), False, 'import logging\n'), ((5137, 5171), 'logging.info', 'logging.info', (['"""Shutdown complete."""'], {}), "('Shutdown complete.')\n", (5149, 5171), False, 'import logging\n'), ((687, 701), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (699, 701), False, 'from datetime import datetime, timedelta\n'), ((756, 773), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (767, 773), False, 'from collections import namedtuple, defaultdict\n'), ((812, 832), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (821, 832), False, 'from datetime import datetime, timedelta\n'), ((868, 888), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (877, 888), False, 'from datetime import datetime, timedelta\n'), ((3320, 3334), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3332, 3334), False, 'from datetime import datetime, timedelta\n'), ((6068, 6215), 'logging.warning', 'logging.warning', (['"""Attempt to load already loaded plugin. Duplicate: name="%s", key="%s", path "%s\\""""', 'plugin.name', 'plugin.key', 'plugin.path'], {}), '(\n \'Attempt to load already loaded plugin. Duplicate: name="%s", key="%s", path "%s"\'\n , plugin.name, plugin.key, plugin.path)\n', (6083, 6215), False, 'import logging\n'), ((6315, 6353), 'logging.debug', 'logging.debug', (['"""\tFound motor plugin."""'], {}), "('\\tFound motor plugin.')\n", (6328, 6353), False, 'import logging\n'), ((6455, 6518), 'logging.debug', 'logging.debug', (['"""\tFound sensor plugin with key: %s"""', 'plugin.key'], {}), "('\\tFound sensor plugin with key: %s', plugin.key)\n", (6468, 6518), False, 'import logging\n'), ((7072, 7306), 'logging.error', 'logging.error', (["('Unexpected error occurred. If you believe issue is related to some bug in application, '\n +\n 'please open issue with exception details at https://github.com/sceeter89/command-center/issues'\n )"], {'exc_info': 'e'}), "(\n 'Unexpected error occurred. If you believe issue is related to some bug in application, '\n +\n 'please open issue with exception details at https://github.com/sceeter89/command-center/issues'\n , exc_info=e)\n", (7085, 7306), False, 'import logging\n'), ((2760, 2830), 'logging.warning', 'logging.warning', (['"""Disabling plugin due to repeating failures: %s"""', 'key'], {}), "('Disabling plugin due to repeating failures: %s', key)\n", (2775, 2830), False, 'import logging\n'), ((3610, 3624), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3622, 3624), False, 'from datetime import datetime, timedelta\n'), ((4211, 4225), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4223, 4225), False, 'from datetime import datetime, timedelta\n'), ((4889, 4949), 'logging.debug', 'logging.debug', (['"""Executing last chance motor: %s"""', 'plugin.key'], {}), "('Executing last chance motor: %s', plugin.key)\n", (4902, 4949), False, 'import logging\n'), ((1529, 1599), 'logging.debug', 'logging.debug', (['""""%s" threw exception."""', 'plugin.key'], {'exc_info': 'exception'}), '(\'"%s" threw exception.\', plugin.key, exc_info=exception)\n', (1542, 1599), False, 'import logging\n'), ((2272, 2342), 'logging.debug', 'logging.debug', (['""""%s" threw exception."""', 'plugin.key'], {'exc_info': 'exception'}), '(\'"%s" threw exception.\', plugin.key, exc_info=exception)\n', (2285, 2342), False, 'import logging\n'), ((3923, 3999), 'logging.warning', 'logging.warning', (['"""All plugins have been disabled. Terminating application.."""'], {}), "('All plugins have been disabled. Terminating application..')\n", (3938, 3999), False, 'import logging\n')] |
from torch import nn
from torchvision import models
from torchvision.transforms import transforms
import util
class VGGFeatureExtractor(nn.Module):
def __init__(self):
super().__init__()
self._vgg = models.vgg16(pretrained=True).features
self._vgg.eval()
for parameter in self._vgg.parameters():
parameter.requires_grad = False
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.keep_idc = [1, 3, 6, 8, 11, 13, 15, 22, 29]
def __call__(self, xs):
assert xs.dim() == 4
xs = util.denormalize(xs)
xs = xs / 255.0
xs = self.normalize(xs)
feats = [xs]
for i, layer in enumerate(self._vgg):
xs = layer(xs)
if i in self.keep_idc:
feats.append(xs)
return feats
| [
"util.denormalize",
"torchvision.models.vgg16",
"torchvision.transforms.transforms.Normalize"
] | [((406, 481), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (426, 481), False, 'from torchvision.transforms import transforms\n'), ((611, 631), 'util.denormalize', 'util.denormalize', (['xs'], {}), '(xs)\n', (627, 631), False, 'import util\n'), ((223, 252), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (235, 252), False, 'from torchvision import models\n')] |
import os
import logging
from logging import handlers
from werkzeug.exceptions import InternalServerError
basedir = os.path.abspath(os.path.dirname(__file__))
def handle_error(error):
Log.logger().error(error)
return error
class Log:
LOG_PATH = os.path.join(basedir, 'logs')
LOG_NAME = os.path.join(LOG_PATH, 'log.txt')
LOG_LEVEL = 'INFO'
current_app = None
@staticmethod
def init_app(app):
Log.current_app = app
if not os.path.exists(Log.LOG_PATH):
os.makedirs(Log.LOG_PATH)
# 根据时间重命名log
file_handler = logging.handlers.TimedRotatingFileHandler(Log.LOG_NAME, when='D', interval=1, backupCount=0, encoding='utf-8')
file_handler.suffix = '%Y-%m-%d.log'
# 单独设置handler的日志级别:低于该级别则该handler不处理(一个logger可以有多个handler)
# file_handler用来写入文件
file_handler.setLevel(Log.LOG_LEVEL)
fmt = '%(asctime)s-%(levelname)s-%(filename)s-%(funcName)s-%(lineno)s: %(message)s'
formatter = logging.Formatter(fmt)
file_handler.setFormatter(formatter)
# 设置logger的日志级别:大于等于该级别才会交给handler处理
app.logger.setLevel('DEBUG')
app.logger.addHandler(file_handler)
# DEBUG模式下不会走到handle_error
app.register_error_handler(InternalServerError, handle_error)
@staticmethod
def logger():
return Log.current_app.logger | [
"os.makedirs",
"os.path.dirname",
"os.path.exists",
"logging.Formatter",
"logging.handlers.TimedRotatingFileHandler",
"os.path.join"
] | [((133, 158), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (148, 158), False, 'import os\n'), ((260, 289), 'os.path.join', 'os.path.join', (['basedir', '"""logs"""'], {}), "(basedir, 'logs')\n", (272, 289), False, 'import os\n'), ((305, 338), 'os.path.join', 'os.path.join', (['LOG_PATH', '"""log.txt"""'], {}), "(LOG_PATH, 'log.txt')\n", (317, 338), False, 'import os\n'), ((586, 701), 'logging.handlers.TimedRotatingFileHandler', 'logging.handlers.TimedRotatingFileHandler', (['Log.LOG_NAME'], {'when': '"""D"""', 'interval': '(1)', 'backupCount': '(0)', 'encoding': '"""utf-8"""'}), "(Log.LOG_NAME, when='D', interval=\n 1, backupCount=0, encoding='utf-8')\n", (627, 701), False, 'import logging\n'), ((996, 1018), 'logging.Formatter', 'logging.Formatter', (['fmt'], {}), '(fmt)\n', (1013, 1018), False, 'import logging\n'), ((473, 501), 'os.path.exists', 'os.path.exists', (['Log.LOG_PATH'], {}), '(Log.LOG_PATH)\n', (487, 501), False, 'import os\n'), ((515, 540), 'os.makedirs', 'os.makedirs', (['Log.LOG_PATH'], {}), '(Log.LOG_PATH)\n', (526, 540), False, 'import os\n')] |
from bs4 import BeautifulSoup
import requests, smtplib, time
from flask import Flask, render_template, request, url_for
from threading import Thread
app = Flask(__name__)
@app.route('/')
def progstart():
return render_template("site.html")
@app.route('/start_task')
def start_task():
def do_work(stockInput, targetprice, email):
targetprice = float(targetprice)
while True:
URL = "https://finance.yahoo.com/quote/" + stockInput.upper() + "?p=" + stockInput.upper() + "&.tsrc=fin-srch"
htmlFound = requests.get(URL).text
retrieved = BeautifulSoup(htmlFound, 'html')
price = retrieved.find("span", class_ = "Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)").text
oldprice = float(price.replace(",", ""))
newtargetprice = price.replace(",", "")
print("The price is: " + price)
newprice = float(price.replace(",", ""))
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login("email", "password")
head = stockInput + " price update!"
if oldprice < targetprice:
if newprice >= targetprice:
body = stockInput.upper() + " rose to " + str(newprice) + "!"
message = f"Subject: {head}\n\n{body}"
server.sendmail("<EMAIL>", email, message)
if oldprice > targetprice:
if newprice <= targetprice:
body = stockInput.upper() + " fell to " + str(newprice) + "!"
message = f"Subject: {head}\n\n{body}"
server.sendmail("<EMAIL>", email, message)
if oldprice == targetprice:
body = stockInput.upper() + " has reached $" + str(newprice) + "!"
message = f"Subject: {head}\n\n{body}"
server.sendmail("<EMAIL>", email, message)
time.sleep(30)
kwargs = {
'stockInput':request.args.get('ticker'),
'targetprice':request.args.get('target'),
'email':request.args.get('email')
}
print(request.args)
thread = Thread(target=do_work, kwargs=kwargs)
thread.start()
return render_template("site.html")
if __name__ == "__main__":
app.run(debug=True)
| [
"threading.Thread",
"flask.request.args.get",
"smtplib.SMTP",
"flask.Flask",
"time.sleep",
"flask.render_template",
"requests.get",
"bs4.BeautifulSoup"
] | [((156, 171), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (161, 171), False, 'from flask import Flask, render_template, request, url_for\n'), ((217, 245), 'flask.render_template', 'render_template', (['"""site.html"""'], {}), "('site.html')\n", (232, 245), False, 'from flask import Flask, render_template, request, url_for\n'), ((2379, 2416), 'threading.Thread', 'Thread', ([], {'target': 'do_work', 'kwargs': 'kwargs'}), '(target=do_work, kwargs=kwargs)\n', (2385, 2416), False, 'from threading import Thread\n'), ((2447, 2475), 'flask.render_template', 'render_template', (['"""site.html"""'], {}), "('site.html')\n", (2462, 2475), False, 'from flask import Flask, render_template, request, url_for\n'), ((2200, 2226), 'flask.request.args.get', 'request.args.get', (['"""ticker"""'], {}), "('ticker')\n", (2216, 2226), False, 'from flask import Flask, render_template, request, url_for\n'), ((2254, 2280), 'flask.request.args.get', 'request.args.get', (['"""target"""'], {}), "('target')\n", (2270, 2280), False, 'from flask import Flask, render_template, request, url_for\n'), ((2302, 2327), 'flask.request.args.get', 'request.args.get', (['"""email"""'], {}), "('email')\n", (2318, 2327), False, 'from flask import Flask, render_template, request, url_for\n'), ((610, 642), 'bs4.BeautifulSoup', 'BeautifulSoup', (['htmlFound', '"""html"""'], {}), "(htmlFound, 'html')\n", (623, 642), False, 'from bs4 import BeautifulSoup\n'), ((974, 1009), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(587)'], {}), "('smtp.gmail.com', 587)\n", (986, 1009), False, 'import requests, smtplib, time\n'), ((2144, 2158), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (2154, 2158), False, 'import requests, smtplib, time\n'), ((562, 579), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (574, 579), False, 'import requests, smtplib, time\n')] |
import json
from pathlib import Path
from typing import List
import cli_ui as ui
import deserialize
from conans.client.conan_api import Conan
from .conanbuilder.configreader import ConfigReader
from .conanbuilder.package import Package
from .conanbuilder.runner import Runner
from .conanbuilder.signature import Signature
class MumocoAPI:
def __init__(self, config_file_path: str, root: str):
self.config: ConfigReader = config_reader_from_file(config_file_path)
self.runner: Runner = get_runner(self.config, root)
def sources(self, source_folder: str = "") -> None:
self.runner.get_all_sources(source_folder)
def add_remotes(self, username: str, password: str) -> None:
self.runner.add_all_remotes(self.config.remotes, username, password)
def remove(self, source_folder: str = "") -> None:
self.runner.remove_all_sources(source_folder)
def create(self) -> None:
self.runner.export_all()
self.runner.create_all(self.config.configurations)
def upload(self, remote_name: str) -> None:
self.runner.upload_all_packages(remote_name)
def find_all_conanfiles_to_be_processed(root_path: str) -> List[str]:
conan_files = []
for path in Path(root_path).rglob("conanfile.py"):
path_string = str(path.absolute())
if "test_package" not in path_string:
conan_files.append(path_string)
return conan_files
def find_all_packages_to_processed(conan_factory: Conan, root_path: str, signature: Signature) -> List[Package]:
conan_files = find_all_conanfiles_to_be_processed(root_path)
conan_packages = []
for file in conan_files:
conan_packages.append(Package(conan_factory, signature, file))
return conan_packages
def get_runner(config_reader: ConfigReader, root: str) -> Runner:
conan_factory, _, _ = Conan.factory()
packages = find_all_packages_to_processed(conan_factory, root, config_reader.signature)
return Runner(conan_factory, packages)
def config_reader_from_file(file: str) -> ConfigReader:
try:
with open(file, encoding="utf-8") as json_file:
return config_reader_from_string(json.load(json_file))
except IOError:
ui.fatal("Config file not accessible or readable")
return ConfigReader()
def config_reader_from_string(load: str) -> ConfigReader:
reader: ConfigReader = deserialize.deserialize(ConfigReader, load)
return reader
| [
"deserialize.deserialize",
"json.load",
"cli_ui.fatal",
"pathlib.Path",
"conans.client.conan_api.Conan.factory"
] | [((1854, 1869), 'conans.client.conan_api.Conan.factory', 'Conan.factory', ([], {}), '()\n', (1867, 1869), False, 'from conans.client.conan_api import Conan\n'), ((2387, 2430), 'deserialize.deserialize', 'deserialize.deserialize', (['ConfigReader', 'load'], {}), '(ConfigReader, load)\n', (2410, 2430), False, 'import deserialize\n'), ((1234, 1249), 'pathlib.Path', 'Path', (['root_path'], {}), '(root_path)\n', (1238, 1249), False, 'from pathlib import Path\n'), ((2223, 2273), 'cli_ui.fatal', 'ui.fatal', (['"""Config file not accessible or readable"""'], {}), "('Config file not accessible or readable')\n", (2231, 2273), True, 'import cli_ui as ui\n'), ((2173, 2193), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2182, 2193), False, 'import json\n')] |
# Generated by Django 2.0.3 on 2018-04-05 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kyokigo', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='kyokigo_input',
name='ownurl',
),
migrations.AlterField(
model_name='kyokigo_input',
name='text',
field=models.CharField(max_length=100, verbose_name='テキスト'),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] | [((224, 289), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""kyokigo_input"""', 'name': '"""ownurl"""'}), "(model_name='kyokigo_input', name='ownurl')\n", (246, 289), False, 'from django.db import migrations, models\n'), ((440, 493), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""テキスト"""'}), "(max_length=100, verbose_name='テキスト')\n", (456, 493), False, 'from django.db import migrations, models\n')] |
from db.sql.migration_of_db.tweet_migration_big.psql_tweet_mig_queries import psql_connector_twitter_mig
from db.sql.migration_of_db.tweet_migration_big.big_queries_sql import big_connector_twitter_mig
import data_collection.altdata_service.twitter.object_function.tweet_cleaner as cleaner
def migration_tweet_tables():
print('start tweet migration')
#tweet_mig(table_name='tweet_clone')
print('start tweet_castag migration')
tweet_mig(table_name='tweet_cashtag_clone')
print('job done')
def tweet_mig(table_name):
psql_conn = psql_connector_twitter_mig()
big_conn = big_connector_twitter_mig()
tweet_df = psql_conn.get_twitter(table_name)
t = 0
while not tweet_df.empty:
bi_table = table_name.replace('_clone', '')
tweet_df = psql_conn.get_twitter(table_name)
if tweet_df.empty:
break
tweet_df = cleaner.clean_df_for_db(tweet_df)
big_conn.insert_into_tweet(df=tweet_df, table_name=bi_table)
psql_conn.delete_imported_tweets(df=tweet_df, table_name=table_name)
t += len(tweet_df)
print('we have processed ' + str(t) + ' rows')
if __name__ == "__main__":
migration_tweet_tables()
| [
"db.sql.migration_of_db.tweet_migration_big.psql_tweet_mig_queries.psql_connector_twitter_mig",
"db.sql.migration_of_db.tweet_migration_big.big_queries_sql.big_connector_twitter_mig",
"data_collection.altdata_service.twitter.object_function.tweet_cleaner.clean_df_for_db"
] | [((569, 597), 'db.sql.migration_of_db.tweet_migration_big.psql_tweet_mig_queries.psql_connector_twitter_mig', 'psql_connector_twitter_mig', ([], {}), '()\n', (595, 597), False, 'from db.sql.migration_of_db.tweet_migration_big.psql_tweet_mig_queries import psql_connector_twitter_mig\n'), ((614, 641), 'db.sql.migration_of_db.tweet_migration_big.big_queries_sql.big_connector_twitter_mig', 'big_connector_twitter_mig', ([], {}), '()\n', (639, 641), False, 'from db.sql.migration_of_db.tweet_migration_big.big_queries_sql import big_connector_twitter_mig\n'), ((922, 955), 'data_collection.altdata_service.twitter.object_function.tweet_cleaner.clean_df_for_db', 'cleaner.clean_df_for_db', (['tweet_df'], {}), '(tweet_df)\n', (945, 955), True, 'import data_collection.altdata_service.twitter.object_function.tweet_cleaner as cleaner\n')] |
# (c) 2016 <NAME>
" 1D PNP, modelling reservoirs and membrane far away from pore "
import nanopores as nano
import solvers
geop = nano.Params(
R = 35.,
H = 70.,
)
physp = nano.Params(
bulkcon = 1000.,
bV = -1.,
)
geo, pnp = solvers.solve1D(geop, physp)
solvers.visualize1D(geo, pnp)
nano.showplots()
| [
"solvers.visualize1D",
"nanopores.showplots",
"solvers.solve1D",
"nanopores.Params"
] | [((132, 159), 'nanopores.Params', 'nano.Params', ([], {'R': '(35.0)', 'H': '(70.0)'}), '(R=35.0, H=70.0)\n', (143, 159), True, 'import nanopores as nano\n'), ((181, 217), 'nanopores.Params', 'nano.Params', ([], {'bulkcon': '(1000.0)', 'bV': '(-1.0)'}), '(bulkcon=1000.0, bV=-1.0)\n', (192, 217), True, 'import nanopores as nano\n'), ((243, 271), 'solvers.solve1D', 'solvers.solve1D', (['geop', 'physp'], {}), '(geop, physp)\n', (258, 271), False, 'import solvers\n'), ((272, 301), 'solvers.visualize1D', 'solvers.visualize1D', (['geo', 'pnp'], {}), '(geo, pnp)\n', (291, 301), False, 'import solvers\n'), ((302, 318), 'nanopores.showplots', 'nano.showplots', ([], {}), '()\n', (316, 318), True, 'import nanopores as nano\n')] |
# Generated by Django 3.1.3 on 2020-11-25 11:09
from django.db import migrations
from django.utils.text import slugify
def popular_slug(apps, schema_editor):
Modulo = apps.get_model('modulos', 'Modulo')
for modulo in Modulo.objects.all():
modulo.slug = slugify(modulo.titulo)
modulo.save()
class Migration(migrations.Migration):
dependencies = [
('modulos', '0003_modulo_slug'),
]
operations = [
migrations.RunPython(popular_slug)
]
| [
"django.db.migrations.RunPython",
"django.utils.text.slugify"
] | [((272, 294), 'django.utils.text.slugify', 'slugify', (['modulo.titulo'], {}), '(modulo.titulo)\n', (279, 294), False, 'from django.utils.text import slugify\n'), ((454, 488), 'django.db.migrations.RunPython', 'migrations.RunPython', (['popular_slug'], {}), '(popular_slug)\n', (474, 488), False, 'from django.db import migrations\n')] |
#!/usr/bin/env python
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: BSD-2
import argparse
import configparser
import io
import sys
import tau_clients
import vt
from tau_clients import decoders
from tau_clients import exceptions
from tau_clients import nsx_defender
def download_from_vt(client: vt.Client, file_hash: str) -> bytes:
"""
Download file from VT.
:param vt.Client client: the VT client
:param str file_hash: the file hash
:rtype: bytes
:return: the downloaded data
:raises ValueError: in case of any error
"""
try:
buffer = io.BytesIO()
client.download_file(file_hash, buffer)
buffer.seek(0, 0)
return buffer.read()
except (IOError, vt.APIError) as e:
raise ValueError(str(e)) from e
def main():
"""Submit all samples or hashes by downloading from VT first."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--config-file",
dest="config_file",
default="./data/tau_clients.ini",
type=tau_clients.is_valid_config_file,
help="read config from here",
)
parser.add_argument(
"-b",
"--bypass-cache",
dest="bypass_cache",
action="store_true",
default=False,
help="whether to bypass the cache",
)
decoders.InputTypeDecoder.add_arguments_to_parser(
parser=parser,
choices=[
decoders.InputType.DIRECTORY,
decoders.InputType.FILE_HASH,
decoders.InputType.FILE,
],
)
args = parser.parse_args()
conf = configparser.ConfigParser()
conf.read(args.config_file)
# Load the analysis client
analysis_client = nsx_defender.AnalysisClient.from_conf(conf, "analysis")
# Decode input type
file_inputs, input_type = decoders.InputTypeDecoder().decode(
arguments=args.input_bits,
input_type=decoders.InputType(args.input_type),
inspect_content=False,
)
# Parse the input
vt_client = None
file_paths = []
file_hashes = []
if input_type is decoders.InputType.FILE_HASH:
try:
vt_client = vt.Client(apikey=conf.get("vt", "apikey"))
except configparser.Error:
print("VT credentials not found. Hash submissions are disabled")
return 1
file_hashes.extend(file_inputs)
elif input_type is decoders.InputType.FILE:
for file_input in file_inputs:
file_paths.extend(tau_clients.get_file_paths(file_input))
else:
raise ValueError("Unknown input type")
print(f"Decoded input into {len(file_hashes)} file hashes and {len(file_paths)} samples")
# Submit
submission_start_ts = analysis_client.get_api_utc_timestamp()
submissions = []
task_to_source = {}
for file_path in file_paths:
with open(file_path, "rb") as f:
try:
ret = analysis_client.submit_file(f.read(), bypass_cache=args.bypass_cache)
submissions.append(ret)
task_to_source[ret["task_uuid"]] = file_path
except exceptions.ApiError as ae:
print(f"Error '{str(ae)}' when submitting file {file_path}")
for file_hash in file_hashes:
try:
file_data = download_from_vt(vt_client, file_hash)
ret = analysis_client.submit_file(file_data, bypass_cache=args.bypass_cache)
submissions.append(ret)
task_to_source[ret["task_uuid"]] = file_hash
except ValueError as ve:
print(f"Error '{str(ve)}' when downloading file {file_hash}")
except exceptions.ApiError as ae:
print(f"Error '{str(ae)}' when submitting file {file_hash}")
if vt_client:
vt_client.close()
print(f"All files have been submitted ({len(submissions)} submissions)")
# Wait for completion
try:
for submission in analysis_client.yield_completed_tasks(
submissions=submissions,
start_timestamp=submission_start_ts,
):
task_uuid = submission.get("task_uuid")
if not task_uuid:
print(f"File '{task_to_source[task_uuid]}' was not submitted correctly")
else:
task_link = tau_clients.get_task_link(task_uuid, prefer_load_balancer=True)
print(f"File '{task_to_source[task_uuid]}' finished analysis: {task_link}")
except KeyboardInterrupt:
print("Waiting for results interrupted by user")
print("Done")
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"tau_clients.nsx_defender.AnalysisClient.from_conf",
"io.BytesIO",
"tau_clients.decoders.InputTypeDecoder",
"tau_clients.get_file_paths",
"tau_clients.decoders.InputTypeDecoder.add_arguments_to_parser",
"argparse.ArgumentParser",
"tau_clients.get_task_link",
"configparser.ConfigParser",
"tau_clients.decoders.InputType"
] | [((885, 910), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (908, 910), False, 'import argparse\n'), ((1336, 1505), 'tau_clients.decoders.InputTypeDecoder.add_arguments_to_parser', 'decoders.InputTypeDecoder.add_arguments_to_parser', ([], {'parser': 'parser', 'choices': '[decoders.InputType.DIRECTORY, decoders.InputType.FILE_HASH, decoders.\n InputType.FILE]'}), '(parser=parser, choices=[\n decoders.InputType.DIRECTORY, decoders.InputType.FILE_HASH, decoders.\n InputType.FILE])\n', (1385, 1505), False, 'from tau_clients import decoders\n'), ((1608, 1635), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1633, 1635), False, 'import configparser\n'), ((1722, 1777), 'tau_clients.nsx_defender.AnalysisClient.from_conf', 'nsx_defender.AnalysisClient.from_conf', (['conf', '"""analysis"""'], {}), "(conf, 'analysis')\n", (1759, 1777), False, 'from tau_clients import nsx_defender\n'), ((593, 605), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (603, 605), False, 'import io\n'), ((1833, 1860), 'tau_clients.decoders.InputTypeDecoder', 'decoders.InputTypeDecoder', ([], {}), '()\n', (1858, 1860), False, 'from tau_clients import decoders\n'), ((1923, 1958), 'tau_clients.decoders.InputType', 'decoders.InputType', (['args.input_type'], {}), '(args.input_type)\n', (1941, 1958), False, 'from tau_clients import decoders\n'), ((4276, 4339), 'tau_clients.get_task_link', 'tau_clients.get_task_link', (['task_uuid'], {'prefer_load_balancer': '(True)'}), '(task_uuid, prefer_load_balancer=True)\n', (4301, 4339), False, 'import tau_clients\n'), ((2503, 2541), 'tau_clients.get_file_paths', 'tau_clients.get_file_paths', (['file_input'], {}), '(file_input)\n', (2529, 2541), False, 'import tau_clients\n')] |
import RobinhoodFunctions as rf
email, password = rf.getCredentials()
rf.loginToRH(email, password)
allPositions = []
allPositions = rf.getAllOptions(allPositions)
frequentTickers = rf.getFrequentTickers(allPositions)
rf.r.options.write_spinner()
rf.r.options.spinning_cursor()
optionNames, entryPrices, calls, puts = rf.getOptionTrades(allPositions)
writer, excelPath= rf.writeOptionInfo(frequentTickers, optionNames, entryPrices, calls, puts)
rf.closeAndSave(writer)
print("Options successfully exported to:", excelPath)
| [
"RobinhoodFunctions.r.options.spinning_cursor",
"RobinhoodFunctions.loginToRH",
"RobinhoodFunctions.getOptionTrades",
"RobinhoodFunctions.r.options.write_spinner",
"RobinhoodFunctions.getFrequentTickers",
"RobinhoodFunctions.closeAndSave",
"RobinhoodFunctions.getAllOptions",
"RobinhoodFunctions.getCredentials",
"RobinhoodFunctions.writeOptionInfo"
] | [((51, 70), 'RobinhoodFunctions.getCredentials', 'rf.getCredentials', ([], {}), '()\n', (68, 70), True, 'import RobinhoodFunctions as rf\n'), ((71, 100), 'RobinhoodFunctions.loginToRH', 'rf.loginToRH', (['email', 'password'], {}), '(email, password)\n', (83, 100), True, 'import RobinhoodFunctions as rf\n'), ((134, 164), 'RobinhoodFunctions.getAllOptions', 'rf.getAllOptions', (['allPositions'], {}), '(allPositions)\n', (150, 164), True, 'import RobinhoodFunctions as rf\n'), ((183, 218), 'RobinhoodFunctions.getFrequentTickers', 'rf.getFrequentTickers', (['allPositions'], {}), '(allPositions)\n', (204, 218), True, 'import RobinhoodFunctions as rf\n'), ((219, 247), 'RobinhoodFunctions.r.options.write_spinner', 'rf.r.options.write_spinner', ([], {}), '()\n', (245, 247), True, 'import RobinhoodFunctions as rf\n'), ((248, 278), 'RobinhoodFunctions.r.options.spinning_cursor', 'rf.r.options.spinning_cursor', ([], {}), '()\n', (276, 278), True, 'import RobinhoodFunctions as rf\n'), ((319, 351), 'RobinhoodFunctions.getOptionTrades', 'rf.getOptionTrades', (['allPositions'], {}), '(allPositions)\n', (337, 351), True, 'import RobinhoodFunctions as rf\n'), ((371, 445), 'RobinhoodFunctions.writeOptionInfo', 'rf.writeOptionInfo', (['frequentTickers', 'optionNames', 'entryPrices', 'calls', 'puts'], {}), '(frequentTickers, optionNames, entryPrices, calls, puts)\n', (389, 445), True, 'import RobinhoodFunctions as rf\n'), ((448, 471), 'RobinhoodFunctions.closeAndSave', 'rf.closeAndSave', (['writer'], {}), '(writer)\n', (463, 471), True, 'import RobinhoodFunctions as rf\n')] |
from django.db import models
from django_grainy.decorators import grainy_model
from django_grainy.models import Permission, PermissionManager
from django_grainy.handlers import GrainyMixin
# Create your models here.
"""
These are the models used during the django_grainy
unit tests. There is no need to ever install the "django_grainy_test"
app in your project
"""
class ModelBase(GrainyMixin, models.Model):
class Meta:
abstract = True
@grainy_model()
class ModelA(ModelBase):
name = models.CharField(max_length=255)
@grainy_model(namespace="something.arbitrary")
class ModelB(ModelA):
pass
@grainy_model(
namespace=ModelB.Grainy.namespace(),
namespace_instance="{namespace}.{instance.b.id}.c.{instance.id}",
)
class ModelC(ModelA):
b = models.ForeignKey(ModelB, related_name="c", on_delete=models.CASCADE)
@grainy_model(
namespace="dynamic.{value}", namespace_instance="{namespace}.{other_value}"
)
class ModelD(ModelA):
pass
@grainy_model(namespace="x")
class ModelX(ModelA):
pass
@grainy_model(namespace="custom", parent="x")
class ModelY(ModelA):
x = models.ForeignKey(ModelX, related_name="y", on_delete=models.CASCADE)
@grainy_model(namespace="z", parent="y")
class ModelZ(ModelA):
y = models.ForeignKey(ModelY, related_name="z", on_delete=models.CASCADE)
class APIKey(models.Model):
key = models.CharField(max_length=255)
class APIKeyPermission(Permission):
api_key = models.ForeignKey(
APIKey, related_name="grainy_permissions", on_delete=models.CASCADE
)
objects = PermissionManager()
| [
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django_grainy.models.PermissionManager",
"django_grainy.decorators.grainy_model"
] | [((458, 472), 'django_grainy.decorators.grainy_model', 'grainy_model', ([], {}), '()\n', (470, 472), False, 'from django_grainy.decorators import grainy_model\n'), ((545, 590), 'django_grainy.decorators.grainy_model', 'grainy_model', ([], {'namespace': '"""something.arbitrary"""'}), "(namespace='something.arbitrary')\n", (557, 590), False, 'from django_grainy.decorators import grainy_model\n'), ((855, 949), 'django_grainy.decorators.grainy_model', 'grainy_model', ([], {'namespace': '"""dynamic.{value}"""', 'namespace_instance': '"""{namespace}.{other_value}"""'}), "(namespace='dynamic.{value}', namespace_instance=\n '{namespace}.{other_value}')\n", (867, 949), False, 'from django_grainy.decorators import grainy_model\n'), ((985, 1012), 'django_grainy.decorators.grainy_model', 'grainy_model', ([], {'namespace': '"""x"""'}), "(namespace='x')\n", (997, 1012), False, 'from django_grainy.decorators import grainy_model\n'), ((1047, 1091), 'django_grainy.decorators.grainy_model', 'grainy_model', ([], {'namespace': '"""custom"""', 'parent': '"""x"""'}), "(namespace='custom', parent='x')\n", (1059, 1091), False, 'from django_grainy.decorators import grainy_model\n'), ((1195, 1234), 'django_grainy.decorators.grainy_model', 'grainy_model', ([], {'namespace': '"""z"""', 'parent': '"""y"""'}), "(namespace='z', parent='y')\n", (1207, 1234), False, 'from django_grainy.decorators import grainy_model\n'), ((509, 541), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (525, 541), False, 'from django.db import models\n'), ((782, 851), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ModelB'], {'related_name': '"""c"""', 'on_delete': 'models.CASCADE'}), "(ModelB, related_name='c', on_delete=models.CASCADE)\n", (799, 851), False, 'from django.db import models\n'), ((1122, 1191), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ModelX'], {'related_name': '"""y"""', 'on_delete': 'models.CASCADE'}), "(ModelX, related_name='y', on_delete=models.CASCADE)\n", (1139, 1191), False, 'from django.db import models\n'), ((1265, 1334), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ModelY'], {'related_name': '"""z"""', 'on_delete': 'models.CASCADE'}), "(ModelY, related_name='z', on_delete=models.CASCADE)\n", (1282, 1334), False, 'from django.db import models\n'), ((1375, 1407), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1391, 1407), False, 'from django.db import models\n'), ((1460, 1551), 'django.db.models.ForeignKey', 'models.ForeignKey', (['APIKey'], {'related_name': '"""grainy_permissions"""', 'on_delete': 'models.CASCADE'}), "(APIKey, related_name='grainy_permissions', on_delete=\n models.CASCADE)\n", (1477, 1551), False, 'from django.db import models\n'), ((1575, 1594), 'django_grainy.models.PermissionManager', 'PermissionManager', ([], {}), '()\n', (1592, 1594), False, 'from django_grainy.models import Permission, PermissionManager\n')] |
from telegram.ext import Updater, CallbackContext, CommandHandler, MessageHandler, Filters, Handler
from telegram.ext.dispatcher import run_async, DispatcherHandlerStop, Dispatcher
from telegram import Update, User, Message, ParseMode
from telegram.error import BadRequest
import requests_html
import requests
import json
import logging
#Enter API-KEY here
updater = Updater("API-KEY", use_context=True)
dispatcher = updater.dispatcher
logging.basicConfig(filename="shipping.log", format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
#Tracking Function for E-Kart Logistics
@run_async
def ekart(update: Update, context: CallbackContext):
if update.message!=None:
trackingID = (update.message.text).split()[1]
data = []
session = requests_html.HTMLSession()
response = session.get("https://ekartlogistics.com/track/"+str(trackingID)+"/")
for selector in response.html.xpath('//div[@id="no-more-tables"][1]/table/tbody'):
data.append(selector.text)
context.bot.send_message(chat_id=update.effective_chat.id, text="*Shipping Status: *\n\n`Latest Status: "+data[0]+"`\n\n*Tracking Info:*\n\n`"+data[1]+"`", reply_to_message_id=update.message.message_id, parse_mode=ParseMode.MARKDOWN)
#Tracking Function for Pitney Bowes
@run_async
def pitneyb(update: Update, context: CallbackContext):
if update.message!=None:
trackingID = (update.message.text).split()[1]
response = requests.get("https://parceltracking.pb.com/ptsapi/track-packages/"+trackingID)
jsonData = json.loads(response.text)
try:
currentStatusData = [
'Status: '+jsonData['currentStatus']['packageStatus'],
'Last Updated: '+jsonData['currentStatus']['eventDate']+' '+jsonData['currentStatus']['eventTime'],
'Description: '+jsonData['currentStatus']['eventDescription'],
'Location: '+jsonData['currentStatus']['eventLocation']['city']+", "+jsonData['currentStatus']['eventLocation']['countyOrRegion']+' - '+jsonData['currentStatus']['eventLocation']['postalOrZipCode']
]
except KeyError:
currentStatusData = [
'Status: '+jsonData['currentStatus']['packageStatus'],
'Last Updated: '+jsonData['currentStatus']['eventDate']+' '+jsonData['currentStatus']['eventTime'],
'Description: '+jsonData['currentStatus']['eventDescription'],
'Location: '+jsonData['currentStatus']['eventLocation']['city']+", "+jsonData['currentStatus']['eventLocation']['countyOrRegion']+' - '
]
currentStatusData = "\n".join(currentStatusData)
history = []
for x in jsonData['scanHistory']['scanDetails']:
try:
history.append([
'Status: '+x['packageStatus'],
'Last Updated: '+x['eventDate']+' '+x['eventTime'],
'Description: '+x['eventDescription'],
'Location: '+x['eventLocation']['city']+", "+x['eventLocation']['countyOrRegion']+' - '+x['eventLocation']['postalOrZipCode']
])
except KeyError:
history.append([
'Status: '+x['packageStatus'],
'Last Updated: '+x['eventDate']+' '+x['eventTime'],
'Description: '+x['eventDescription'],
])
historyData = []
for i in range(len(history)):
historyData.append("\n".join(history[i]))
historyData = "\n\n".join(historyData)
context.bot.send_message(chat_id=update.effective_chat.id, text="*Shipping Status: *\n\n`Latest Status:\n"+currentStatusData+"`\n\n*Tracking Info:*\n\n`"+historyData+"`", reply_to_message_id=update.message.message_id, parse_mode=ParseMode.MARKDOWN)
#Tracking Function for Canada Post
@run_async
def canadapost(update: Update, context: CallbackContext):
if update.message!=None:
trackingID = (update.message.text).split()[1]
response = requests.get("https://www.canadapost.ca/trackweb/rs/track/json/package/"+trackingID+"/detail")
jsonData = json.loads(response.text)
status = jsonData['status']
history = []
for x in jsonData['events']:
history.append([
'Date: '+ x['datetime']['date'] + x['datetime']['time'] + x['datetime']['zoneOffset'],
'Location: '+ x['locationAddr']['city'] + ", " + x['locationAddr']['regionCd'] + " (" + x['locationAddr']['countryCd'] + ")",
'Description: '+ x['descEn']
])
currentStatusData = history[0]
currentStatusData = "\n".join(currentStatusData)
del history[0]
historyData = []
for i in range(len(history)):
historyData.append("\n".join(history[i]))
historyData = "\n\n".join(historyData)
context.bot.send_message(chat_id=update.effective_chat.id, text="*Shipping Status: *\n\n`Latest Status:\n"+currentStatusData+"`\n\n*Tracking Info:*\n\n`"+historyData+"`", reply_to_message_id=update.message.message_id, parse_mode=ParseMode.MARKDOWN)
#Bot Start Message /start
@run_async
def start(update: Update, context: CallbackContext):
context.bot.sendChatAction(update.effective_chat.id, "typing")
cmd_msg = context.bot.send_message(chat_id=update.effective_chat.id, text="Hey there! I'm Shipping Info Bot!\nI can provide you latest tracking info on your package.\n\nUse the following commands to access your package tracking info.")
def main():
start_handler = CommandHandler("start", start)
dispatcher.add_handler(start_handler)
#Command handler for E-Kart Logistics
ekart_handler = CommandHandler("ekart", ekart)
dispatcher.add_handler(ekart_handler)
#Command handler for Pitney Bowes
pitneyb_handler = CommandHandler("pitneyb", pitneyb)
dispatcher.add_handler(pitneyb_handler)
#Command handler for Canada Post
canadapost_handler = CommandHandler("canadapost", canadapost)
dispatcher.add_handler(canadapost_handler)
updater.start_polling()
updater.idle()
if __name__ == "__main__":
print(" _____ _ _ _ _____ __ ______ _ \n")
print("/ ___| | (_) (_) |_ _| / _| | ___ \ | | \n")
print("\ `--.| |__ _ _ __ _ __ _ _ __ __ _ | | _ __ | |_ ___ | |_/ / ___ | |_ \n")
print(" `--. \ '_ \| | '_ \| '_ \| | '_ \ / _` || || '_ \| _/ _ \| ___ \/ _ \| __|\n")
print("/\__/ / | | | | |_) | |_) | | | | | (_| || || | | | || (_) | |_/ / (_) | |_ \n")
print("\____/|_| |_|_| .__/| .__/|_|_| |_|\__, \___/_| |_|_| \___/\____/ \___/ \__|\n")
print(" | | | | __/ | \n")
print(" |_| |_| |___/ ")
main()
| [
"json.loads",
"logging.basicConfig",
"telegram.ext.Updater",
"requests.get",
"telegram.ext.CommandHandler",
"requests_html.HTMLSession"
] | [((368, 404), 'telegram.ext.Updater', 'Updater', (['"""API-KEY"""'], {'use_context': '(True)'}), "('API-KEY', use_context=True)\n", (375, 404), False, 'from telegram.ext import Updater, CallbackContext, CommandHandler, MessageHandler, Filters, Handler\n'), ((438, 570), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""shipping.log"""', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(filename='shipping.log', format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n", (457, 570), False, 'import logging\n'), ((5622, 5652), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'start'], {}), "('start', start)\n", (5636, 5652), False, 'from telegram.ext import Updater, CallbackContext, CommandHandler, MessageHandler, Filters, Handler\n'), ((5758, 5788), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""ekart"""', 'ekart'], {}), "('ekart', ekart)\n", (5772, 5788), False, 'from telegram.ext import Updater, CallbackContext, CommandHandler, MessageHandler, Filters, Handler\n'), ((5896, 5930), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""pitneyb"""', 'pitneyb'], {}), "('pitneyb', pitneyb)\n", (5910, 5930), False, 'from telegram.ext import Updater, CallbackContext, CommandHandler, MessageHandler, Filters, Handler\n'), ((6038, 6078), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""canadapost"""', 'canadapost'], {}), "('canadapost', canadapost)\n", (6052, 6078), False, 'from telegram.ext import Updater, CallbackContext, CommandHandler, MessageHandler, Filters, Handler\n'), ((790, 817), 'requests_html.HTMLSession', 'requests_html.HTMLSession', ([], {}), '()\n', (815, 817), False, 'import requests_html\n'), ((1486, 1571), 'requests.get', 'requests.get', (["('https://parceltracking.pb.com/ptsapi/track-packages/' + trackingID)"], {}), "('https://parceltracking.pb.com/ptsapi/track-packages/' +\n trackingID)\n", (1498, 1571), False, 'import requests\n'), ((1585, 1610), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1595, 1610), False, 'import json\n'), ((4058, 4160), 'requests.get', 'requests.get', (["('https://www.canadapost.ca/trackweb/rs/track/json/package/' + trackingID +\n '/detail')"], {}), "('https://www.canadapost.ca/trackweb/rs/track/json/package/' +\n trackingID + '/detail')\n", (4070, 4160), False, 'import requests\n'), ((4172, 4197), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (4182, 4197), False, 'import json\n')] |
""" Some exercises about statistics """
from matplotlib import pyplot as plt
from statistics.central_tendencies import *
from statistics.variance import variance, standard_deviation
from statistics.correlation import covariance, correlation
def main():
num_friends = [500, 50, 25, 30, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
19, 28, 37, 46, 55, 64, 73, 82, 91, 10,
19, 28, 37, 33, 55, 64, 73, 82, 91, 10]
daily_minutes = [1, 6, 10, 20, 4, 9, 12, 8, 9, 20,
5, 6, 10, 20, 4, 9, 12, 8, 9, 20,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
central_tendencies(num_friends)
dispersion(num_friends)
correlations(num_friends, daily_minutes)
correlation_outliers(num_friends, daily_minutes)
plot_graphs()
def correlation_outliers(num_friends: List[float], daily_minutes: List[float]):
outlier = num_friends.index(500)
num_friends_good = [x for i, x in enumerate(num_friends) if i != outlier]
daily_minutes_good = [x for i, x in enumerate(daily_minutes) if i != outlier]
# plotting
plt.figure()
plt.scatter(num_friends, daily_minutes)
plt.title("Correlation without outlier")
plt.xlabel("# of friends")
plt.ylabel("minutes")
plt.figure()
plt.scatter(num_friends_good, daily_minutes_good)
plt.title("Correlation with outlier")
plt.xlabel("# of friends")
plt.ylabel("minutes")
def correlations(num_frieds: List[float], daily_minutes: List[float]):
cov = covariance(num_frieds, daily_minutes)
print(f'covariance: {cov}')
corr = correlation(num_frieds, daily_minutes)
print(f'correlation: {corr}')
def dispersion(num_friends: List[float]):
print(data_range(num_friends))
varian = variance(num_friends)
print(f'variance: {varian}')
standard_devi = standard_deviation(num_friends)
print(f'standard deviation: {standard_devi}')
def central_tendencies(num_friends: List[float]):
assert median([1, 10, 2, 9, 5]) == 5
vector_a = [1, 9, 2, 10]
assert median(vector_a) == (2 + 9) / 2
print(median(vector_a))
print(4//2) # 2
print(9//2) # 4
result_q1 = quantile(num_friends, 0.10)
print(f'quatile 10%: {result_q1}')
result_q2 = quantile(num_friends, 0.25)
print(f'quatile 25%: {result_q2}')
result_q3 = quantile(num_friends, 0.50)
print(f'quatile 50%: {result_q3}')
result_q4 = quantile(num_friends, 0.75)
print(f'quatile 75%: {result_q4}')
result_q5 = quantile(num_friends, 0.90)
print(f'quatile 90%: {result_q5}')
moda = set(mode(num_friends))
print(f'moda: {moda}')
def plot_graphs():
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"statistics.variance.variance",
"matplotlib.pyplot.scatter",
"statistics.variance.standard_deviation",
"statistics.correlation.covariance",
"matplotlib.pyplot.figure",
"statistics.correlation.correlation",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((1176, 1188), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1186, 1188), True, 'from matplotlib import pyplot as plt\n'), ((1193, 1232), 'matplotlib.pyplot.scatter', 'plt.scatter', (['num_friends', 'daily_minutes'], {}), '(num_friends, daily_minutes)\n', (1204, 1232), True, 'from matplotlib import pyplot as plt\n'), ((1237, 1277), 'matplotlib.pyplot.title', 'plt.title', (['"""Correlation without outlier"""'], {}), "('Correlation without outlier')\n", (1246, 1277), True, 'from matplotlib import pyplot as plt\n'), ((1282, 1308), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# of friends"""'], {}), "('# of friends')\n", (1292, 1308), True, 'from matplotlib import pyplot as plt\n'), ((1313, 1334), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""minutes"""'], {}), "('minutes')\n", (1323, 1334), True, 'from matplotlib import pyplot as plt\n'), ((1340, 1352), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1350, 1352), True, 'from matplotlib import pyplot as plt\n'), ((1357, 1406), 'matplotlib.pyplot.scatter', 'plt.scatter', (['num_friends_good', 'daily_minutes_good'], {}), '(num_friends_good, daily_minutes_good)\n', (1368, 1406), True, 'from matplotlib import pyplot as plt\n'), ((1411, 1448), 'matplotlib.pyplot.title', 'plt.title', (['"""Correlation with outlier"""'], {}), "('Correlation with outlier')\n", (1420, 1448), True, 'from matplotlib import pyplot as plt\n'), ((1453, 1479), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# of friends"""'], {}), "('# of friends')\n", (1463, 1479), True, 'from matplotlib import pyplot as plt\n'), ((1484, 1505), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""minutes"""'], {}), "('minutes')\n", (1494, 1505), True, 'from matplotlib import pyplot as plt\n'), ((1589, 1626), 'statistics.correlation.covariance', 'covariance', (['num_frieds', 'daily_minutes'], {}), '(num_frieds, daily_minutes)\n', (1599, 1626), False, 'from statistics.correlation import covariance, correlation\n'), ((1671, 1709), 'statistics.correlation.correlation', 'correlation', (['num_frieds', 'daily_minutes'], {}), '(num_frieds, daily_minutes)\n', (1682, 1709), False, 'from statistics.correlation import covariance, correlation\n'), ((1837, 1858), 'statistics.variance.variance', 'variance', (['num_friends'], {}), '(num_friends)\n', (1845, 1858), False, 'from statistics.variance import variance, standard_deviation\n'), ((1913, 1944), 'statistics.variance.standard_deviation', 'standard_deviation', (['num_friends'], {}), '(num_friends)\n', (1931, 1944), False, 'from statistics.variance import variance, standard_deviation\n'), ((2739, 2749), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2747, 2749), True, 'from matplotlib import pyplot as plt\n')] |
from typing import Type, Union, Optional
from pathlib import Path
from wunderkafka.types import TopicName, KeySchemaDescription, ValueSchemaDescription
from wunderkafka.serdes.abc import AbstractDescriptionStore
from wunderkafka.compat.types import AvroModel
from wunderkafka.compat.constants import PY36
from wunderkafka.serdes.avromodel import derive
class SchemaTextRepo(AbstractDescriptionStore):
def add(self, topic: TopicName, value: str, key: str) -> None:
self._values[topic] = ValueSchemaDescription(text=value)
if key is not None:
self._keys[topic] = KeySchemaDescription(text=key)
def _load_from_file(filename: Path) -> str:
with open(filename) as fl:
return fl.read()
# ToDo (tribunsky.kir): refactor it, maybe add hooks to parent class.
# Barbara, forgive us. Looks like AbstractDescriptionStore should be generic.
class SchemaFSRepo(AbstractDescriptionStore):
def add(self, topic: TopicName, value: Union[str, Path], key: Union[str, Path]) -> None:
self._values[topic] = ValueSchemaDescription(text=_load_from_file(Path(value)))
if key is not None:
self._keys[topic] = KeySchemaDescription(text=_load_from_file(Path(key)))
class AvroModelRepo(AbstractDescriptionStore):
def __init__(self) -> None:
super().__init__()
if PY36:
AvroModel()
# ToDo (tribunsky.kir): change Type[AvroModel] to more general alias + check derivation from python built-ins
def add(self, topic: TopicName, value: Type[AvroModel], key: Optional[Type[AvroModel]]) -> None:
self._values[topic] = ValueSchemaDescription(text=derive(value, topic))
if key is not None:
self._keys[topic] = KeySchemaDescription(text=derive(key, topic, is_key=True))
| [
"wunderkafka.types.ValueSchemaDescription",
"wunderkafka.types.KeySchemaDescription",
"wunderkafka.serdes.avromodel.derive",
"wunderkafka.compat.types.AvroModel",
"pathlib.Path"
] | [((502, 536), 'wunderkafka.types.ValueSchemaDescription', 'ValueSchemaDescription', ([], {'text': 'value'}), '(text=value)\n', (524, 536), False, 'from wunderkafka.types import TopicName, KeySchemaDescription, ValueSchemaDescription\n'), ((597, 627), 'wunderkafka.types.KeySchemaDescription', 'KeySchemaDescription', ([], {'text': 'key'}), '(text=key)\n', (617, 627), False, 'from wunderkafka.types import TopicName, KeySchemaDescription, ValueSchemaDescription\n'), ((1382, 1393), 'wunderkafka.compat.types.AvroModel', 'AvroModel', ([], {}), '()\n', (1391, 1393), False, 'from wunderkafka.compat.types import AvroModel\n'), ((1668, 1688), 'wunderkafka.serdes.avromodel.derive', 'derive', (['value', 'topic'], {}), '(value, topic)\n', (1674, 1688), False, 'from wunderkafka.serdes.avromodel import derive\n'), ((1116, 1127), 'pathlib.Path', 'Path', (['value'], {}), '(value)\n', (1120, 1127), False, 'from pathlib import Path\n'), ((1776, 1807), 'wunderkafka.serdes.avromodel.derive', 'derive', (['key', 'topic'], {'is_key': '(True)'}), '(key, topic, is_key=True)\n', (1782, 1807), False, 'from wunderkafka.serdes.avromodel import derive\n'), ((1232, 1241), 'pathlib.Path', 'Path', (['key'], {}), '(key)\n', (1236, 1241), False, 'from pathlib import Path\n')] |
##
# train eeg data of mind commands
# (beta)
#
##
import json
import os
import sys
import time
import pickle
import numpy as np
from mindFunctions import filterDownsampleData
import codecs, json
from scipy.signal import butter, lfilter
from sklearn import svm, preprocessing, metrics
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit
from pathlib import Path
# enable/disable debug Mode
debug = False
# the 5 commands from player
commands = ['volup', 'playpause', 'next', 'prev', 'voldown']
cmdCount = len(commands) # nr of commands
def main():
# read training data from files
# default path with stored traingsdata
# filepath-example = 'your project path'/data/mind/training-playpause.json'
cwd = os.getcwd()
traindataFolder = cwd + '/data/mind/'
# default path if python script runs standalone
if (os.path.basename(cwd) == "pyscripts"):
traindataFolder = cwd + '/../../data/mind/'
traindata = []
for cmd in range(cmdCount):
filepath = Path(traindataFolder + 'training-' + commands[cmd] + '.json')
# read file of trainingCmd
with open(filepath) as f:
data = json.load(f)
traindata.append(np.array(data, dtype='f'))
# read in baseline from file
baseline = []
blpath = Path(traindataFolder + 'training-baseline.json')
# read file of baseline
with open(blpath) as blf:
bl = json.load(blf)
baseline = np.array(bl, dtype='f')
## read in test data
with open(traindataFolder + 'test-baseline.json') as f:
baselineTest = json.load(f)
with open(traindataFolder + 'test-volts.json') as f:
voltsTest = json.load(f)
# create a numpy array
voltsTest = np.array(voltsTest, dtype='f')
baselineTest = np.array(baselineTest, dtype='f')
if debug:
print("\n------ Training Data ------")
print("traindata length should be 5 (cmds): " + str(len(traindata)))
print("traindata[0] length should be 1500 (samples): " + str(len(traindata[0])))
print("traindata[0][0] length should be 8 (channels): " + str(len(traindata[0][0])))
# 1. Filter and Downsample Trainingdata and Baseline
[filterdTraindata, baselineDataBP] = filterDownsampleData(traindata, baseline, commands, debug)
if debug:
print("\n------ Filtered Training Data ------")
print("filterdTraindata length should be 5 (cmds): " + str(len(filterdTraindata)))
print("filterdTraindata[0] length is now 8 (channels): " + str(len(filterdTraindata[0])))
print("filterdTraindata[0][0] length is now 250 (samples): " + str(len(filterdTraindata[0][0])))
# # save filterd Data
# filterdTraindata = np.array(filterdTraindata)
# baselineDataBP = np.array(baselineDataBP)
# outfile = '../../data/mind/model/filterdTraingdata.txt'
# json.dump(filterdTraindata.tolist(), codecs.open(outfile, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True,
# indent=4) ### this saves the array in .json format
# outfile = '../../data/mind/model/baselineDataBP.txt'
# json.dump(baselineDataBP.tolist(), codecs.open(outfile, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True,
# indent=4) ### this saves the array in .json format
## 2. Extract Features for Trainingdata (only commands)
[X, y] = extractFeature(filterdTraindata)
if debug:
print("Anz. Features: " + str(len(X)))
print("y: " + str(y))
## 3. Train Model with features
# gamma: defines how far the influence of a single training example reaches, with low values meaning ‘far’ and high values meaning ‘close’.
# C: trades off misclassification of training examples against simplicity of the decision surface.
# A low C makes the decision surface smooth, while a high C aims at classifying all training examples correctly by giving the model freedom to select more samples as support vectors.
# Find optimal gamma and C parameters: http://scikit-learn.org/stable/auto_examples/svm/plot_rbf_parameters.html
# TODO: Set correct SVM params
[C, gamma] = findTrainClassifier(X, y)
clf = svm.SVC(kernel='rbf', gamma=gamma, C=C)
clf.fit(X, y)
## save model
with open('../../data/mind/model/svm_model-mind.txt', 'wb') as outfile:
pickle.dump(clf, outfile)
## Check if trainingdata get 100% accuracy
if debug:
[accuracy, _, _] = modelAccuracy(y, clf.predict(X))
if (accuracy == 1.0):
print("Correct classification with traingdata")
else:
print("Wrong classification with traingdata. check SVM algorithm")
print("\n------ Test Data ------")
## 4. Filter and Downsample Testdata
[filterdTestdata] = filterDownsampleData(voltsTest, baselineTest, commands, debug)
## 5. Extract Features from Testdata
targetCmd = 1 # Playpause===1
[X_test, y_test] = extractFeature(filterdTestdata, targetCmd)
print("Anz. Features X_Test: " + str(len(X_test)))
print("y_Test: " + str(y_test))
## 6. Check Model Accuracy
print("\n------ Model Accuracy ------")
y_pred = clf.predict(X_test) # Predict the response for test dataset
if debug: print("predicted y " + str(y_pred))
[accuracy, precision, recall] = modelAccuracy(y_test, y_pred)
print("Accuracy: " + str(accuracy))
print("Precision: " + str(precision))
print("Recall: " + str(recall))
# send success back to node
# TODO: implement real success boolean return
print('true')
def extractFeature(dataFilterd):
## Create X and Y data for SVM training
X = []
y = []
# TODO: Extract Features
## Reshape Data
reshapedData = []
dataFilterdNp = np.array(dataFilterd)
trainCmd, nx, ny = dataFilterdNp.shape
reshapedData = dataFilterdNp.reshape((trainCmd, nx * ny))
if (debug):
print("\n-- Reshaped Data ---")
print("len(reshapedData) aka 5 cmds: " + str(len(reshapedData)))
print("len(reshapedData[0]) channels*samples aka 8*250=2000 : " + str(len(reshapedData[0])))
for cmd in range(cmdCount):
X.append(reshapedData[cmd][0:2000])
X.append(reshapedData[cmd][2000:4000])
X.append(reshapedData[cmd][4000:6000])
y.append(cmd)
y.append(cmd)
y.append(cmd)
# Feature Standardization
X = preprocessing.scale(X)
return X, y
def extractFeatureTest(dataDownSample, cmd):
## Create X and Y data for SVM test
X = []
y = []
print(len(X))
X.append(dataDownSample)
y.append(cmd)
if debug:
print("\n-- X and Y Data ---")
print("y : " + str(y))
## Feature Standardization
X = preprocessing.scale(X)
return X, y
def modelAccuracy(y_test, y_pred):
# Model Accuracy: how often is the classifier correct
accuracy = metrics.accuracy_score(y_test, y_pred)
# Model Precision: what percentage of positive tuples are labeled as such?
precision = metrics.precision_score(y_test, y_pred)
# Model Recall: what percentage of positive tuples are labelled as such?
recall = metrics.recall_score(y_test, y_pred)
return [accuracy, precision, recall]
def findTrainClassifier(X, y):
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
if debug:
print("The best parameters are %s with a score of %0.2f" % (grid.best_params_, grid.best_score_))
return grid.best_params_['C'], grid.best_params_['gamma']
# start process
if __name__ == '__main__':
main()
| [
"pickle.dump",
"json.load",
"sklearn.preprocessing.scale",
"os.getcwd",
"numpy.logspace",
"sklearn.metrics.accuracy_score",
"os.path.basename",
"sklearn.metrics.recall_score",
"sklearn.model_selection.StratifiedShuffleSplit",
"pathlib.Path",
"numpy.array",
"sklearn.metrics.precision_score",
"sklearn.svm.SVC",
"mindFunctions.filterDownsampleData"
] | [((745, 756), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (754, 756), False, 'import os\n'), ((1302, 1350), 'pathlib.Path', 'Path', (["(traindataFolder + 'training-baseline.json')"], {}), "(traindataFolder + 'training-baseline.json')\n", (1306, 1350), False, 'from pathlib import Path\n'), ((1452, 1475), 'numpy.array', 'np.array', (['bl'], {'dtype': '"""f"""'}), "(bl, dtype='f')\n", (1460, 1475), True, 'import numpy as np\n'), ((1732, 1762), 'numpy.array', 'np.array', (['voltsTest'], {'dtype': '"""f"""'}), "(voltsTest, dtype='f')\n", (1740, 1762), True, 'import numpy as np\n'), ((1782, 1815), 'numpy.array', 'np.array', (['baselineTest'], {'dtype': '"""f"""'}), "(baselineTest, dtype='f')\n", (1790, 1815), True, 'import numpy as np\n'), ((2236, 2294), 'mindFunctions.filterDownsampleData', 'filterDownsampleData', (['traindata', 'baseline', 'commands', 'debug'], {}), '(traindata, baseline, commands, debug)\n', (2256, 2294), False, 'from mindFunctions import filterDownsampleData\n'), ((4184, 4223), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': 'gamma', 'C': 'C'}), "(kernel='rbf', gamma=gamma, C=C)\n", (4191, 4223), False, 'from sklearn import svm, preprocessing, metrics\n'), ((5826, 5847), 'numpy.array', 'np.array', (['dataFilterd'], {}), '(dataFilterd)\n', (5834, 5847), True, 'import numpy as np\n'), ((6462, 6484), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (6481, 6484), False, 'from sklearn import svm, preprocessing, metrics\n'), ((6800, 6822), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (6819, 6822), False, 'from sklearn import svm, preprocessing, metrics\n'), ((6950, 6988), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (6972, 6988), False, 'from sklearn import svm, preprocessing, metrics\n'), ((7085, 7124), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7108, 7124), False, 'from sklearn import svm, preprocessing, metrics\n'), ((7216, 7252), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7236, 7252), False, 'from sklearn import svm, preprocessing, metrics\n'), ((7342, 7365), 'numpy.logspace', 'np.logspace', (['(-2)', '(10)', '(13)'], {}), '(-2, 10, 13)\n', (7353, 7365), True, 'import numpy as np\n'), ((7384, 7406), 'numpy.logspace', 'np.logspace', (['(-9)', '(3)', '(13)'], {}), '(-9, 3, 13)\n', (7395, 7406), True, 'import numpy as np\n'), ((7468, 7534), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(5)', 'test_size': '(0.2)', 'random_state': '(42)'}), '(n_splits=5, test_size=0.2, random_state=42)\n', (7490, 7534), False, 'from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit\n'), ((860, 881), 'os.path.basename', 'os.path.basename', (['cwd'], {}), '(cwd)\n', (876, 881), False, 'import os\n'), ((1022, 1083), 'pathlib.Path', 'Path', (["(traindataFolder + 'training-' + commands[cmd] + '.json')"], {}), "(traindataFolder + 'training-' + commands[cmd] + '.json')\n", (1026, 1083), False, 'from pathlib import Path\n'), ((1422, 1436), 'json.load', 'json.load', (['blf'], {}), '(blf)\n', (1431, 1436), False, 'import codecs, json\n'), ((1585, 1597), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1594, 1597), False, 'import codecs, json\n'), ((1675, 1687), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1684, 1687), False, 'import codecs, json\n'), ((4345, 4370), 'pickle.dump', 'pickle.dump', (['clf', 'outfile'], {}), '(clf, outfile)\n', (4356, 4370), False, 'import pickle\n'), ((4794, 4856), 'mindFunctions.filterDownsampleData', 'filterDownsampleData', (['voltsTest', 'baselineTest', 'commands', 'debug'], {}), '(voltsTest, baselineTest, commands, debug)\n', (4814, 4856), False, 'from mindFunctions import filterDownsampleData\n'), ((7559, 7568), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (7566, 7568), False, 'from sklearn import svm, preprocessing, metrics\n'), ((1172, 1184), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1181, 1184), False, 'import codecs, json\n'), ((1210, 1235), 'numpy.array', 'np.array', (['data'], {'dtype': '"""f"""'}), "(data, dtype='f')\n", (1218, 1235), True, 'import numpy as np\n')] |
# Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from eventlet import greenthread
RouteRule = collections.namedtuple('RouteRule', 'dest_cidr, next_hop')
def retry(fn, args=None, kwargs=None, exceptions=None, limit=1, delay=0):
args = args or []
kwargs = kwargs or {}
while limit > 0:
try:
return fn(*args, **kwargs)
except Exception as e:
if not exceptions or not isinstance(e, exceptions):
raise
if delay:
greenthread.sleep(delay)
limit -= 1
raise
| [
"eventlet.greenthread.sleep",
"collections.namedtuple"
] | [((717, 775), 'collections.namedtuple', 'collections.namedtuple', (['"""RouteRule"""', '"""dest_cidr, next_hop"""'], {}), "('RouteRule', 'dest_cidr, next_hop')\n", (739, 775), False, 'import collections\n'), ((1121, 1145), 'eventlet.greenthread.sleep', 'greenthread.sleep', (['delay'], {}), '(delay)\n', (1138, 1145), False, 'from eventlet import greenthread\n')] |
import sys
import time
import serial
import serial.tools.list_ports as stl
def list_modi_serialports():
info_list = []
def __is_modi_port(port):
return (port.vid == 0x2FDE and port.pid == 0x0003)
modi_ports = [port for port in stl.comports() if __is_modi_port(port)]
for modi_port in modi_ports:
info_list.append(modi_port.device)
if sys.platform.startswith("win"):
from modi2_firmware_updater.util.modi_winusb.modi_winusb import list_modi_winusb_paths
path_list = list_modi_winusb_paths()
for index, value in enumerate(path_list):
info_list.append(value)
return info_list
class ModiSerialPort():
SERIAL_MODE_COMPORT = 1
SERIAL_MODI_WINUSB = 2
def __init__(self, port = None, baudrate = 921600, timeout = 0.2, write_timeout = None):
self.type = self.SERIAL_MODE_COMPORT
self._port = port
self._baudrate = baudrate
self._timeout = timeout
self._write_timeout = write_timeout
self.serial_port = None
self._is_open = False
if self._port is not None:
self.open(self._port)
def open(self, port):
self._port = port
if sys.platform.startswith("win"):
from modi2_firmware_updater.util.modi_winusb.modi_winusb import ModiWinUsbComPort, list_modi_winusb_paths
if port in list_modi_winusb_paths():
self.type = self.SERIAL_MODI_WINUSB
winusb = ModiWinUsbComPort(path = self._port, baudrate=self._baudrate, timeout=self._timeout)
self.serial_port = winusb
else:
ser = serial.Serial(port = self._port, baudrate=self._baudrate, timeout=self._timeout, write_timeout=self._write_timeout, exclusive=True)
self.serial_port = ser
else:
ser = serial.Serial(port = self._port, baudrate=self._baudrate, timeout=self._timeout, write_timeout=self._write_timeout, exclusive=True)
self.serial_port = ser
self.is_open = True
def close(self):
if self.is_open:
self.serial_port.close()
def write(self, data):
if not self.is_open:
raise Exception("serialport is not opened")
if type(data) is str:
data = data.encode("utf8")
self.serial_port.write(data)
def read(self, size=1):
if not self.is_open:
raise Exception("serialport is not opened")
if size == None and self.type == self.SERIAL_MODE_COMPORT:
size = 1
return self.serial_port.read(size)
def read_until(self, expected=b"\x0A", size=None):
if not self.is_open:
raise Exception("serialport is not opened")
lenterm = len(expected)
line = bytearray()
modi_timeout = self.Timeout(self._timeout)
while True:
c = self.read(1)
if c:
line += c
if line[-lenterm:] == expected:
break
if size is not None and len(line) >= size:
break
else:
break
if modi_timeout.expired():
break
return bytes(line)
def read_all(self):
if not self.is_open:
raise Exception("serialport is not opened")
return self.serial_port.read_all()
def flush(self):
if not self.is_open:
raise Exception("serialport is not opened")
self.serial_port.flush()
def flushInput(self):
if not self.is_open:
raise Exception("serialport is not opened")
self.serial_port.flushInput()
def flushOutput(self):
if not self.is_open:
raise Exception("serialport is not opened")
self.serial_port.flushOutput()
def setDTR(self, state):
if not self.is_open:
raise Exception("serialport is not opened")
self.serial_port.setDTR(state)
def setRTS(self, state):
if not self.is_open:
raise Exception("serialport is not opened")
self.serial_port.setRTS(state)
def inWaiting(self):
if not self.is_open:
raise Exception("serialport is not opened")
waiting = None
if self.type == self.SERIAL_MODE_COMPORT:
waiting = self.serial_port.inWaiting()
return waiting
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
self.serial_port.port = value
@property
def baudrate(self):
return self._baudrate
@baudrate.setter
def baudrate(self, value):
self._baudrate = value
self.serial_port.baudrate = value
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
self.serial_port.timeout = value
@property
def write_timeout(self):
return self._write_timeout
@write_timeout.setter
def write_timeout(self, value):
self._write_timeout = value
self.serial_port.write_timeout = value
@property
def dtr(self):
if self.type == self.SERIAL_MODE_COMPORT:
return self.serial_port.dtr
else:
return False
class Timeout(object):
"""\
Abstraction for timeout operations. Using time.monotonic() if available
or time.time() in all other cases.
The class can also be initialized with 0 or None, in order to support
non-blocking and fully blocking I/O operations. The attributes
is_non_blocking and is_infinite are set accordingly.
"""
if hasattr(time, 'monotonic'):
# Timeout implementation with time.monotonic(). This function is only
# supported by Python 3.3 and above. It returns a time in seconds
# (float) just as time.time(), but is not affected by system clock
# adjustments.
TIME = time.monotonic
else:
# Timeout implementation with time.time(). This is compatible with all
# Python versions but has issues if the clock is adjusted while the
# timeout is running.
TIME = time.time
def __init__(self, duration):
"""Initialize a timeout with given duration"""
self.is_infinite = (duration is None)
self.is_non_blocking = (duration == 0)
self.duration = duration
if duration is not None:
self.target_time = self.TIME() + duration
else:
self.target_time = None
def expired(self):
"""Return a boolean, telling if the timeout has expired"""
return self.target_time is not None and self.time_left() <= 0
def time_left(self):
"""Return how many seconds are left until the timeout expires"""
if self.is_non_blocking:
return 0
elif self.is_infinite:
return None
else:
delta = self.target_time - self.TIME()
if delta > self.duration:
# clock jumped, recalculate
self.target_time = self.TIME() + self.duration
return self.duration
else:
return max(0, delta)
def restart(self, duration):
"""\
Restart a timeout, only supported if a timeout was already set up
before.
"""
self.duration = duration
self.target_time = self.TIME() + duration
# main
if __name__ == "__main__":
stop = False
def handle_received(serialport):
global stop
while not stop:
init = time.time()
recv = serialport.read_until(b"}")
dt = time.time() - init
if recv == None:
print("disconnected")
stop = True
break
print(f"dt: {int(dt * 1000.0)}ms - {recv}")
time.sleep(0.001)
serialport.close()
import threading
info_list = list_modi_serialports()
if not info_list:
raise Exception("No MODI+ is connected")
serialport = ModiSerialPort(info_list[0])
threading.Thread(target=handle_received, daemon=True, args=(serialport, )).start()
print("To exit the program, enter 'exit'.")
while not stop:
input_data = input()
if input_data == "exit":
stop = True
break
serialport.close() | [
"serial.Serial",
"sys.platform.startswith",
"threading.Thread",
"modi2_firmware_updater.util.modi_winusb.modi_winusb.ModiWinUsbComPort",
"serial.tools.list_ports.comports",
"time.time",
"time.sleep",
"modi2_firmware_updater.util.modi_winusb.modi_winusb.list_modi_winusb_paths"
] | [((391, 421), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (414, 421), False, 'import sys\n'), ((540, 564), 'modi2_firmware_updater.util.modi_winusb.modi_winusb.list_modi_winusb_paths', 'list_modi_winusb_paths', ([], {}), '()\n', (562, 564), False, 'from modi2_firmware_updater.util.modi_winusb.modi_winusb import ModiWinUsbComPort, list_modi_winusb_paths\n'), ((1252, 1282), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (1275, 1282), False, 'import sys\n'), ((263, 277), 'serial.tools.list_ports.comports', 'stl.comports', ([], {}), '()\n', (275, 277), True, 'import serial.tools.list_ports as stl\n'), ((1908, 2042), 'serial.Serial', 'serial.Serial', ([], {'port': 'self._port', 'baudrate': 'self._baudrate', 'timeout': 'self._timeout', 'write_timeout': 'self._write_timeout', 'exclusive': '(True)'}), '(port=self._port, baudrate=self._baudrate, timeout=self.\n _timeout, write_timeout=self._write_timeout, exclusive=True)\n', (1921, 2042), False, 'import serial\n'), ((8089, 8100), 'time.time', 'time.time', ([], {}), '()\n', (8098, 8100), False, 'import time\n'), ((8379, 8396), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (8389, 8396), False, 'import time\n'), ((8621, 8694), 'threading.Thread', 'threading.Thread', ([], {'target': 'handle_received', 'daemon': '(True)', 'args': '(serialport,)'}), '(target=handle_received, daemon=True, args=(serialport,))\n', (8637, 8694), False, 'import threading\n'), ((1427, 1451), 'modi2_firmware_updater.util.modi_winusb.modi_winusb.list_modi_winusb_paths', 'list_modi_winusb_paths', ([], {}), '()\n', (1449, 1451), False, 'from modi2_firmware_updater.util.modi_winusb.modi_winusb import ModiWinUsbComPort, list_modi_winusb_paths\n'), ((1532, 1619), 'modi2_firmware_updater.util.modi_winusb.modi_winusb.ModiWinUsbComPort', 'ModiWinUsbComPort', ([], {'path': 'self._port', 'baudrate': 'self._baudrate', 'timeout': 'self._timeout'}), '(path=self._port, baudrate=self._baudrate, timeout=self.\n _timeout)\n', (1549, 1619), False, 'from modi2_firmware_updater.util.modi_winusb.modi_winusb import ModiWinUsbComPort, list_modi_winusb_paths\n'), ((1702, 1836), 'serial.Serial', 'serial.Serial', ([], {'port': 'self._port', 'baudrate': 'self._baudrate', 'timeout': 'self._timeout', 'write_timeout': 'self._write_timeout', 'exclusive': '(True)'}), '(port=self._port, baudrate=self._baudrate, timeout=self.\n _timeout, write_timeout=self._write_timeout, exclusive=True)\n', (1715, 1836), False, 'import serial\n'), ((8167, 8178), 'time.time', 'time.time', ([], {}), '()\n', (8176, 8178), False, 'import time\n')] |
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.figure import Figure
from gui.utils import MessageBox
import numpy as np
class MplCanvas(FigureCanvasQTAgg):
"""
A canvas for matplotlib plots. Contains all plot functionality for Plot Mode
"""
def __init__(self, components, plotting_preferences):
self.fig = Figure(dpi=100)
self.empty = True
self.components = components
self.isenthalps = None
self.isentropes = None
self.isotherms = None
self.isobars = None
super(MplCanvas, self).__init__(figure=self.fig)
self.plotting_preferences = plotting_preferences
def toggle_isenthalps(self, is_checked):
"""
Hides / shows isenthalp lines in the plot if a plot exists
:param is_checked: Status of isenthalp button (bool)
"""
if not self.empty and self.isenthalps:
if is_checked:
for line in self.isenthalps:
line.set_linestyle("solid")
else:
for line in self.isenthalps:
line.set_linestyle("None")
self.draw()
def toggle_isentropes(self, is_checked):
"""
Hides / shows isentrope lines in the plot if a plot exists
:param is_checked: Status of isentrope button (bool)
"""
if not self.empty and self.isentropes:
if is_checked:
for line in self.isentropes:
line.set_linestyle("solid")
else:
for line in self.isentropes:
line.set_linestyle("None")
self.draw()
else:
return
def toggle_isotherms(self, is_checked):
"""
Hides / shows isotherm lines in the plot if a plot exists
:param is_checked: Status of isotherm button (bool)
"""
if not self.empty and self.isotherms:
if is_checked:
for line in self.isotherms:
line.set_linestyle("solid")
else:
for line in self.isotherms:
line.set_linestyle("None")
self.draw()
else:
return
def toggle_isobars(self, is_checked):
"""
Hides / shows isobar lines in the plot if a plot exists
:param is_checked: Status of isobar button (bool)
"""
if not self.empty and self.isobars:
if is_checked:
for line in self.isobars:
line.set_linestyle("solid")
else:
for line in self.isobars:
line.set_linestyle("None")
self.draw()
else:
return
def plot_envelope(self, tp, prim_vars, fractions):
"""
Plots a phase envelope
:param tp: Thermopack instance
:param prim_vars: Primary variables for the plot (e.g. PT, PH, ..)
:param fractions: List of molar fractions for the components
"""
tpv_settings = self.plotting_preferences["Phase envelope"]["TPV"]
isopleth_settings = self.plotting_preferences["Phase envelope"]["Isopleths"]
critical_settings = self.plotting_preferences["Phase envelope"]["Critical"]
plot_settings = self.plotting_preferences["Phase envelope"]["Plotting"]
p_initial = tpv_settings["Initial pressure"]
t_min = tpv_settings["Minimum temperature"]
p_max = tpv_settings["Maximum pressure"]
step_size = tpv_settings["Step size"]
# Calculate T, P, V
T, P, V = tp.get_envelope_twophase(initial_pressure=p_initial, z=fractions, maximum_pressure=p_max,
minimum_temperature=t_min, step_size=step_size, calc_v=True)
H = np.array([tp.enthalpy_tv(T[i], V[i], fractions) for i in range(len(T))])
S = np.array([tp.entropy_tv(T[i], V[i], fractions) for i in range(len(T))])
global H_list
global T_list
global S_list
global P_list
n_isopleths = isopleth_settings["Number of isopleths"]
H_list = np.linspace(np.min(H), np.max(H), n_isopleths)
S_list = np.linspace(np.min(S), np.max(S), n_isopleths)
T_list = np.linspace(np.min(T) * 0.60, np.max(T) * 1.40, n_isopleths)
P_list = np.linspace(np.min(P) * 0.60, np.max(P) * 1.40, n_isopleths)
temp = critical_settings["Temperature"]
v = critical_settings["Volume"]
tol = critical_settings["Error tolerance"]
# Calculate critical variables
try:
T_c, V_c, P_c = tp.critical(n=fractions, temp=temp, v=v, tol=tol)
H_c = tp.enthalpy_tv(T_c, V_c, fractions)
S_c = tp.entropy_tv(T_c, V_c, fractions)
except Exception as e:
msg = MessageBox("Error", str(e))
msg.exec_()
T_c, V_c, P_c, H_c, S_c = None, None, None, None, None
# Set global variables, so that they are accessible in all phase envelope plot functions
global isopleth_1_color
global isopleth_2_color
global P_min
global P_max
global T_min
global T_max
global nmax
isopleth_1_color = plot_settings["Colors"][2]
isopleth_2_color = plot_settings["Colors"][3]
P_min = isopleth_settings["Minimum pressure"]
P_max = isopleth_settings["Maximum pressure"]
T_min = isopleth_settings["Minimum temperature"]
T_max = isopleth_settings["Maximum temperature"]
nmax = isopleth_settings["N max"]
# Plot depending on which primary variables are chosen
if prim_vars == "PT":
x, y, crit_x, crit_y = self.plot_envelope_PT(tp, T, P, T_c, P_c, fractions)
elif prim_vars == "PH":
x, y, crit_x, crit_y = self.plot_envelope_PH(tp, P, H, P_c, H_c, fractions)
elif prim_vars == "PS":
x, y, crit_x, crit_y = self.plot_envelope_PS(tp, P, S, P_c, S_c, fractions)
elif prim_vars == "TH":
x, y, crit_x, crit_y = self.plot_envelope_TH(tp, T, H, T_c, H_c, fractions)
elif prim_vars == "TS":
x, y, crit_x, crit_y = self.plot_envelope_TS(tp, T, S, T_c, S_c, fractions)
else:
return
# Plotting
line_color = plot_settings["Colors"][0]
point_color = plot_settings["Colors"][1]
grid_on = plot_settings["Grid on"]
xlabel = plot_settings["x label"]
ylabel = plot_settings["y label"]
title = plot_settings["Title"]
self.axes.plot(x, y, color=line_color, label="Phase envelope")
self.axes.scatter([crit_x], [crit_y], color=point_color, label="Critical point")
self.axes.set_title(title)
self.axes.grid(grid_on)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
# Sort entries in the legend
legend = True
if legend:
if n_isopleths > 0:
handles, labels = self.axes.get_legend_handles_labels()
self.axes.legend([handles[3], handles[2], handles[0], handles[1]],
[labels[3], labels[2], labels[0], labels[1]],
loc="best")
else:
self.axes.legend()
self.draw()
def plot_envelope_PT(self, tp, T, P, T_c, P_c, fractions):
"""
Return plot data for a PT phase envelope
:param tp: Thermopack instance
:param T: Temperature values
:param P: Pressure values
:param T_c: Critical temperature
:param P_c: Critical pressure
:param fractions: List of molar fractions
:return: x: x values for plot,
y: y values for plot,
crit_x: x value for critical point,
crit_y: y value for critical point,
"""
# Display correct buttons
self.parent().parent().parent().isopleth_btn_stack.setCurrentIndex(0)
self.parent().parent().parent().PT_H_btn.setChecked(True)
self.parent().parent().parent().PT_S_btn.setChecked(True)
x = T
y = P
crit_x = T_c
crit_y = P_c
# Isenthalps, isentropes
enthalpies = H_list
entropies = S_list
self.isenthalps = []
self.isentropes = []
for i in range(len(enthalpies)):
t_vals, p_vals, v_vals, s_vals = tp.get_isenthalp(enthalpies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
h_line, = self.axes.plot(t_vals, p_vals, color=isopleth_1_color, label="Isenthalp")
else:
h_line, = self.axes.plot(t_vals, p_vals, color=isopleth_1_color)
self.isenthalps.append(h_line)
t_vals, p_vals, v_vals, h_vals = tp.get_isentrope(entropies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
s_line, = self.axes.plot(t_vals, p_vals, color=isopleth_2_color, label="Isentrope")
else:
s_line, = self.axes.plot(t_vals, p_vals, color=isopleth_2_color)
self.isentropes.append(s_line)
self.isotherms = None
self.isobars = None
return x, y, crit_x, crit_y
def plot_envelope_PH(self, tp, P, H, P_c, H_c, fractions):
"""
Return plot data for a PH phase envelope
:param tp: Thermopack instance
:param P: Pressure values
:param H: Enthalpy values
:param P_c: Critical pressure
:param H_c: Critical enthalpy
:param fractions: List of molar fractions
:return: x: x values for plot,
y: y values for plot,
crit_x: x value for critical point,
crit_y: y value for critical point,
"""
# Display correct buttons
self.parent().parent().parent().isopleth_btn_stack.setCurrentIndex(1)
self.parent().parent().parent().PH_T_btn.setChecked(True)
self.parent().parent().parent().PH_S_btn.setChecked(True)
x = H
y = P
crit_x = H_c
crit_y = P_c
# isotherms, isentropes
temperatures = T_list
entropies = S_list
self.isotherms = []
self.isentropes = []
for i in range(len(temperatures)):
p_vals, v_vals, s_vals, h_vals = tp.get_isotherm(temperatures[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, nmax=nmax)
if i == 0:
t_line, = self.axes.plot(h_vals, p_vals, color=isopleth_1_color, label="Isotherm")
else:
t_line, = self.axes.plot(h_vals, p_vals, color=isopleth_1_color)
self.isotherms.append(t_line)
t_vals, p_vals, v_vals, h_vals = tp.get_isentrope(entropies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
s_line, = self.axes.plot(h_vals, p_vals, color=isopleth_2_color, label="Isentrope")
else:
s_line, = self.axes.plot(h_vals, p_vals, color=isopleth_2_color)
self.isentropes.append(s_line)
self.isenthalps = None
self.isobars = None
return x, y, crit_x, crit_y
def plot_envelope_PS(self, tp, P, S, P_c, S_c, fractions):
"""
Return plot data for a PS phase envelope
:param tp: Thermopack instance
:param P: Pressure values
:param S: Entropy values
:param P_c: Critical pressure
:param S_c: Critical entropy
:param fractions: List of molar fractions
:return: x: x values for plot,
y: y values for plot,
crit_x: x value for critical point,
crit_y: y value for critical point,
"""
# Display correct buttons
self.parent().parent().parent().isopleth_btn_stack.setCurrentIndex(2)
self.parent().parent().parent().PS_T_btn.setChecked(True)
self.parent().parent().parent().PS_H_btn.setChecked(True)
x = S
y = P
crit_x = S_c
crit_y = P_c
# isotherms, isenthalps
temperatures = T_list
enthalpies = H_list
self.isotherms = []
self.isenthalps = []
for i in range(len(temperatures)):
p_vals, v_vals, s_vals, h_vals = tp.get_isotherm(temperatures[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, nmax=nmax)
if i == 0:
t_line, = self.axes.plot(s_vals, p_vals, color=isopleth_1_color, label="Isotherm")
else:
t_line, = self.axes.plot(s_vals, p_vals, color=isopleth_1_color)
self.isotherms.append(t_line)
t_vals, p_vals, v_vals, s_vals = tp.get_isenthalp(enthalpies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
h_line, = self.axes.plot(s_vals, p_vals, color=isopleth_2_color, label="Isenthalp")
else:
h_line, = self.axes.plot(s_vals, p_vals, color=isopleth_2_color)
self.isenthalps.append(h_line)
self.isentropes = None
self.isobars = None
return x, y, crit_x, crit_y
def plot_envelope_TH(self, tp, T, H, T_c, H_c, fractions):
"""
Return plot data for a PS phase envelope
:param tp: Thermopack instance
:param T: Temperature values
:param H: Enthalpy values
:param T_c: Critical temperature
:param H_c: Critical enthalpy
:param fractions: List of molar fractions
:return: x: x values for plot,
y: y values for plot,
crit_x: x value for critical point,
crit_y: y value for critical point,
"""
# Display correct buttons
self.parent().parent().parent().isopleth_btn_stack.setCurrentIndex(3)
self.parent().parent().parent().TH_S_btn.setChecked(True)
self.parent().parent().parent().TH_P_btn.setChecked(True)
x = H
y = T
crit_x = H_c
crit_y = T_c
# isobars, isentropes
pressures = P_list
entropies = S_list
self.isobars = []
self.isentropes = []
for i in range(len(pressures)):
t_vals, v_vals, s_vals, h_vals = tp.get_isobar(pressures[i], fractions, minimum_temperature=200.0,
maximum_temperature=500.0, nmax=100)
if i == 0:
p_line, = self.axes.plot(h_vals, t_vals, color=isopleth_1_color, label="Isobar")
else:
p_line, = self.axes.plot(h_vals, t_vals, color=isopleth_1_color)
self.isobars.append(p_line)
t_vals, p_vals, v_vals, h_vals = tp.get_isentrope(entropies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
s_line, = self.axes.plot(h_vals, t_vals, color=isopleth_2_color, label="Isentrope")
else:
s_line, = self.axes.plot(h_vals, t_vals, color=isopleth_2_color)
self.isentropes.append(s_line)
self.isenthalps = None
self.isotherms = None
return x, y, crit_x, crit_y
def plot_envelope_TS(self, tp, T, S, T_c, S_c, fractions):
"""
Return plot data for a PS phase envelope
:param tp: Thermopack instance
:param T: Temperature values
:param S: Entropy values
:param T_c: Critical temperature
:param S_c: Critical entropy
:param fractions: List of molar fractions
:return: x: x values for plot,
y: y values for plot,
crit_x: x value for critical point,
crit_y: y value for critical point,
"""
# Display correct buttons
self.parent().parent().parent().isopleth_btn_stack.setCurrentIndex(4)
self.parent().parent().parent().TS_P_btn.setChecked(True)
self.parent().parent().parent().TS_H_btn.setChecked(True)
x = S
y = T
crit_x = S_c
crit_y = T_c
# Isenthalps, isobars
pressures = P_list
enthalpies = H_list
self.isenthalps = []
self.isobars = []
for i in range(len(pressures)):
t_vals, v_vals, s_vals, h_vals = tp.get_isobar(pressures[i], fractions, minimum_temperature=T_min,
maximum_temperature=T_max)
if i == 0:
p_line, = self.axes.plot(s_vals, t_vals, color="#ffd2d2", label="Isobar")
else:
p_line, = self.axes.plot(s_vals, t_vals, color="#ffd2d2")
self.isobars.append(p_line)
t_vals, p_vals, v_vals, s_vals = tp.get_isenthalp(enthalpies[i], fractions, minimum_pressure=P_min,
maximum_pressure=P_max, minimum_temperature=T_min,
maximum_temperature=T_max, nmax=nmax)
if i == 0:
h_line, = self.axes.plot(s_vals, p_vals, color="#d5d3ff", label="Isenthalp")
else:
h_line, = self.axes.plot(s_vals, p_vals, color="#d5d3ff")
self.isenthalps.append(h_line)
self.isentropes = None
self.isotherms = None
return x, y, crit_x, crit_y
def plot_binary_pxy(self, tp):
"""
Plots a binary pxy plot
:param tp: Thermopack instance
"""
calc_settings = self.plotting_preferences["Binary pxy"]["Calc"]
plot_settings = self.plotting_preferences["Binary pxy"]["Plotting"]
T = calc_settings["Temperature"]
p_max = calc_settings["Maximum pressure"]
p_min = calc_settings["Minimum pressure"]
dz_max = calc_settings["Maximum dz"]
dlns_max = calc_settings["Maximum dlns"]
LLE, L1VE, L2VE = tp.get_binary_pxy(temp=T, maximum_pressure=p_max, minimum_pressure=p_min,
maximum_dz=dz_max, maximum_dlns=dlns_max)
line_color = plot_settings["Colors"][0]
if LLE[0] is not None:
self.axes.plot(LLE[0], LLE[2], color=line_color)
self.axes.plot(LLE[1], LLE[2], color=line_color)
if L1VE[0] is not None:
self.axes.plot(L1VE[0], L1VE[2], color=line_color)
self.axes.plot(L1VE[1], L1VE[2], color=line_color)
if L2VE[0] is not None:
self.axes.plot(L2VE[0], L2VE[2], color=line_color)
self.axes.plot(L2VE[1], L2VE[2], color=line_color)
grid_on = plot_settings["Grid on"]
title = plot_settings["Title"]
xlabel = plot_settings["x label"]
ylabel = plot_settings["y label"]
self.axes.grid(grid_on)
self.axes.set_title(title)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
self.draw()
def plot_pressure_density(self, tp, fractions):
"""
Plots a pressure density plot
:param tp: Thermopack instance
:param fractions: List of molar fractions
"""
calc_settings = self.plotting_preferences["Pressure density"]["Calc"]
tpv_settings = self.plotting_preferences["Pressure density"]["TPV"]
crit_settings = self.plotting_preferences["Pressure density"]["Critical"]
plot_settings = self.plotting_preferences["Pressure density"]["Plotting"]
p_initial = tpv_settings["Initial pressure"]
t_min = tpv_settings["Minimum temperature"]
p_max = tpv_settings["Maximum pressure"]
step_size = tpv_settings["Step size"]
# Calculate T, P, V
T_ph_env, P_ph_env, V_ph_env = tp.get_envelope_twophase(initial_pressure=p_initial, z=fractions,
maximum_pressure=p_max,
minimum_temperature=t_min, step_size=step_size,
calc_v=True)
crit_t_guess = crit_settings["Temperature"]
crit_v_guess = crit_settings["Volume"]
crit_tol = crit_settings["Error tolerance"]
# Calculate critical T, V, P
T_c, V_c, P_c = tp.critical(n=fractions, temp=crit_t_guess, v=crit_v_guess, tol=crit_tol)
T_list = calc_settings["Temperatures"]
V_start = V_c * calc_settings["Volume range start"]
V_end = V_c * calc_settings["Volume range end"]
V_num_points = calc_settings["Num points"]
V_list = np.linspace(V_start, V_end, V_num_points)
P_lists = []
for T in T_list:
P_list = []
for V in V_list:
P, = tp.pressure_tv(temp=T, volume=V, n=fractions)
P_list.append(P)
P_lists.append(P_list)
rho_list = 1 / V_list
title = plot_settings["Title"]
grid_on = plot_settings["Grid on"]
xlabel = plot_settings["x label"]
ylabel = plot_settings["y label"]
self.axes.plot([1 / v for v in V_ph_env], P_ph_env, label="Phase envelope")
self.axes.scatter([1 / V_c], [P_c], label="Critical point")
for i in range(len(P_lists)):
self.axes.plot(rho_list, P_lists[i], label=str(T_list[i]) + " K")
self.axes.set_title(title)
self.axes.grid(grid_on)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
self.axes.legend(loc="best")
self.draw()
def plot_global_binary(self, tp):
"""
Plots a binary pxy plot
:param tp: Thermopack instance
"""
calc_settings = self.plotting_preferences["Global binary"]["Calc"]
plot_settings = self.plotting_preferences["Global binary"]["Plotting"]
min_press = calc_settings["Minimum pressure"]
min_temp = calc_settings["Minimum temperature"]
azeotropes = calc_settings["Azeotropes"]
KSTYPE, VLE, LLVE, CRIT, AZ = tp.global_binary_plot(minimum_pressure=min_press, minimum_temperature=min_temp,
include_azeotropes=azeotropes)
colors = plot_settings["Colors"]
linestyles = ["-", "--", ":", "-."]
label = "VLE"
for i in range(len(VLE)):
self.axes.plot(VLE[i][:, 0], VLE[i][:, 1], linestyle=linestyles[0], color=colors[0], label=label)
label = None
label = "LLVE"
for i in range(len(LLVE)):
self.axes.plot(LLVE[i][:, 0], LLVE[i][:, 1], linestyle=linestyles[1], color=colors[1], label=label)
label = None
label = "CRIT"
for i in range(len(CRIT)):
self.axes.plot(CRIT[i][:, 0], CRIT[i][:, 1], linestyle=linestyles[2], color=colors[2], label=label)
label = None
label = "AZ"
for i in range(len(AZ)):
self.axes.plot(AZ[i][:, 0], AZ[i][:, 1], linestyle=linestyles[3], color=colors[3], label=label)
label = None
ks_strings = {
1: "I",
2: "II",
3: "III",
4: "IV",
5: "V"
}
title = plot_settings["Title"]
xlabel = plot_settings["x label"]
ylabel = plot_settings["y label"]
grid_on = plot_settings["Grid on"]
if title == "<NAME> and Scott type: ":
title += ks_strings[KSTYPE]
self.axes.set_title(title)
legend = self.axes.legend(loc="best", numpoints=1)
legend.get_frame().set_linewidth(0.0)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
self.axes.grid(grid_on)
self.draw()
| [
"matplotlib.figure.Figure",
"numpy.max",
"numpy.min",
"numpy.linspace"
] | [((369, 384), 'matplotlib.figure.Figure', 'Figure', ([], {'dpi': '(100)'}), '(dpi=100)\n', (375, 384), False, 'from matplotlib.figure import Figure\n'), ((21817, 21858), 'numpy.linspace', 'np.linspace', (['V_start', 'V_end', 'V_num_points'], {}), '(V_start, V_end, V_num_points)\n', (21828, 21858), True, 'import numpy as np\n'), ((4167, 4176), 'numpy.min', 'np.min', (['H'], {}), '(H)\n', (4173, 4176), True, 'import numpy as np\n'), ((4178, 4187), 'numpy.max', 'np.max', (['H'], {}), '(H)\n', (4184, 4187), True, 'import numpy as np\n'), ((4231, 4240), 'numpy.min', 'np.min', (['S'], {}), '(S)\n', (4237, 4240), True, 'import numpy as np\n'), ((4242, 4251), 'numpy.max', 'np.max', (['S'], {}), '(S)\n', (4248, 4251), True, 'import numpy as np\n'), ((4295, 4304), 'numpy.min', 'np.min', (['T'], {}), '(T)\n', (4301, 4304), True, 'import numpy as np\n'), ((4313, 4322), 'numpy.max', 'np.max', (['T'], {}), '(T)\n', (4319, 4322), True, 'import numpy as np\n'), ((4373, 4382), 'numpy.min', 'np.min', (['P'], {}), '(P)\n', (4379, 4382), True, 'import numpy as np\n'), ((4391, 4400), 'numpy.max', 'np.max', (['P'], {}), '(P)\n', (4397, 4400), True, 'import numpy as np\n')] |
import turtle
s = turtle.Screen()
t = turtle.Turtle()
s.title("Christmas Tree")
s.setup(width=800, height=600)
# Title on the window
pen = turtle.Turtle()
pen.speed(0)
pen.color("black")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Christmas Tree", align="center",font=("Arial", 24, "normal"))
# Starting position
t.up()
t.rt(90)
t.fd(100)
t.lt(90)
t.down()
# Stump
t.color("brown")
t.begin_fill()
t.fd(40)
t.lt(90)
t.fd(60)
t.lt(90)
t.fd(40)
t.lt(90)
t.fd(60)
t.end_fill()
t.up()
# First triangle
t.lt(180)
t.fd(60)
t.lt(90)
t.fd(20)
t.down()
t.color("green")
t.begin_fill()
t.rt(180)
t.fd(80)
t.lt(120)
t.fd(80)
t.lt(120)
t.fd(80)
t.end_fill()
t.up()
# Second Triangle
t.lt(180)
t.fd(80)
t.lt(120)
t.lt(90)
t.fd(20)
t.rt(90)
t.down()
t.begin_fill()
t.fd(35)
t.rt(120)
t.fd(70)
t.rt(120)
t.fd(70)
t.rt(120)
t.fd(35)
t.end_fill()
t.up()
# Thrid Triangle
t.fd(35)
t.rt(120)
t.fd(70)
t.lt(120)
t.lt(90)
t.fd(20)
t.rt(90)
t.down()
t.begin_fill()
t.fd(30)
t.rt(120)
t.fd(60)
t.rt(120)
t.fd(60)
t.rt(120)
t.fd(30)
t.end_fill()
t.up()
# Star
t.fd(30)
t.rt(120)
t.fd(60)
t.lt(120)
t.rt(180)
t.lt(90)
t.fd(15)
t.rt(90)
t.back(20)
t.color("yellow")
t.down()
t.begin_fill()
for i in range(5):
t.forward(40)
t.right(144)
t.end_fill()
t.hideturtle()
while True:
s.update()
| [
"turtle.Screen",
"turtle.Turtle"
] | [((19, 34), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (32, 34), False, 'import turtle\n'), ((39, 54), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (52, 54), False, 'import turtle\n'), ((142, 157), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (155, 157), False, 'import turtle\n')] |
#!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from pxr import Usd, UsdGeom, Vt
import unittest
class TestUsdGeomMesh(unittest.TestCase):
def test_ValidateTopology(self):
"""Tests helpers for validating mesh topology."""
# sum(vertexCounts) != len(vertexIndices)
faceVertexIndices = Vt.IntArray([0,1,2])
faceVertexCounts = Vt.IntArray([2,2])
valid,why = UsdGeom.Mesh.ValidateTopology(faceVertexIndices,
faceVertexCounts,
numPoints=3)
self.assertFalse(valid)
# Make sure we have a reason.
self.assertTrue(why)
# Negative vertex indices.
faceVertexIndices = Vt.IntArray([0,-1,1])
faceVertexCounts = Vt.IntArray([3])
valid,why = UsdGeom.Mesh.ValidateTopology(faceVertexIndices,
faceVertexCounts,
numPoints=3)
self.assertFalse(valid)
# Make sure we have a reason.
self.assertTrue(why)
# Out of range vertex indices.
faceVertexIndices = Vt.IntArray([1,2,3])
faceVertexCounts = Vt.IntArray([3])
valid,why = UsdGeom.Mesh.ValidateTopology(faceVertexIndices,
faceVertexCounts,
numPoints=3)
self.assertFalse(valid)
# Make sure we have a reason.
self.assertTrue(why)
# Valid topology.
faceVertexIndices = Vt.IntArray([0,1,2,3,4,5])
faceVertexCounts = Vt.IntArray([3,3])
valid,why = UsdGeom.Mesh.ValidateTopology(faceVertexIndices,
faceVertexCounts,
numPoints=6)
self.assertTrue(valid)
# Shoult not have set a reason.
self.assertFalse(why)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"pxr.UsdGeom.Mesh.ValidateTopology",
"pxr.Vt.IntArray"
] | [((3048, 3063), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3061, 3063), False, 'import unittest\n'), ((1353, 1375), 'pxr.Vt.IntArray', 'Vt.IntArray', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1364, 1375), False, 'from pxr import Usd, UsdGeom, Vt\n'), ((1401, 1420), 'pxr.Vt.IntArray', 'Vt.IntArray', (['[2, 2]'], {}), '([2, 2])\n', (1412, 1420), False, 'from pxr import Usd, UsdGeom, Vt\n'), ((1441, 1520), 'pxr.UsdGeom.Mesh.ValidateTopology', 'UsdGeom.Mesh.ValidateTopology', (['faceVertexIndices', 'faceVertexCounts'], {'numPoints': '(3)'}), '(faceVertexIndices, faceVertexCounts, numPoints=3)\n', (1470, 1520), False, 'from pxr import Usd, UsdGeom, Vt\n'), ((1785, 1808), 'pxr.Vt.IntArray', 'Vt.IntArray', (['[0, -1, 1]'], {}), '([0, -1, 1])\n', (1796, 1808), False, 'from pxr import Usd, UsdGeom, Vt\n'), ((1834, 1850), 'pxr.Vt.IntArray', 'Vt.IntArray', (['[3]'], {}), '([3])\n', (1845, 1850), False, 'from pxr import Usd, UsdGeom, Vt\n'), ((1872, 1951), 'pxr.UsdGeom.Mesh.ValidateTopology', 'UsdGeom.Mesh.ValidateTopology', (['faceVertexIndices', 'faceVertexCounts'], {'numPoints': '(3)'}), '(faceVertexIndices, faceVertexCounts, numPoints=3)\n', (1901, 1951), False, 'from pxr import Usd, UsdGeom, Vt\n'), ((2219, 2241), 'pxr.Vt.IntArray', 'Vt.IntArray', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2230, 2241), False, 'from pxr import Usd, UsdGeom, Vt\n'), ((2267, 2283), 'pxr.Vt.IntArray', 'Vt.IntArray', (['[3]'], {}), '([3])\n', (2278, 2283), False, 'from pxr import Usd, UsdGeom, Vt\n'), ((2305, 2384), 'pxr.UsdGeom.Mesh.ValidateTopology', 'UsdGeom.Mesh.ValidateTopology', (['faceVertexIndices', 'faceVertexCounts'], {'numPoints': '(3)'}), '(faceVertexIndices, faceVertexCounts, numPoints=3)\n', (2334, 2384), False, 'from pxr import Usd, UsdGeom, Vt\n'), ((2640, 2671), 'pxr.Vt.IntArray', 'Vt.IntArray', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (2651, 2671), False, 'from pxr import Usd, UsdGeom, Vt\n'), ((2694, 2713), 'pxr.Vt.IntArray', 'Vt.IntArray', (['[3, 3]'], {}), '([3, 3])\n', (2705, 2713), False, 'from pxr import Usd, UsdGeom, Vt\n'), ((2734, 2813), 'pxr.UsdGeom.Mesh.ValidateTopology', 'UsdGeom.Mesh.ValidateTopology', (['faceVertexIndices', 'faceVertexCounts'], {'numPoints': '(6)'}), '(faceVertexIndices, faceVertexCounts, numPoints=6)\n', (2763, 2813), False, 'from pxr import Usd, UsdGeom, Vt\n')] |
from parse import parse
class Actions:
def __init__(self):
self.actions = {}
self.unused = set()
self.used = set()
# TODO: Refactor: Deveria ter classe Action, e ela deveria ser retornada nesta funcao.
def add_action(self, action_name):
action_name = action_name.lower()
if self.actions.get(action_name) is not None:
raise DuplicatedActionException("Action {} already exists".format(action_name))
self.actions[action_name] = []
self.unused.add(action_name)
def add_event(self, action_name, event):
action_name = action_name.lower()
events = self.actions.get(action_name)
if events is None:
possible = ','.join(list(self.actions))
raise UndefinedActionException("Undefined action {}. Possible values: {}".format(action_name, possible))
events.append(event)
def get_action(self, action_name):
action_name = action_name.lower()
return self.actions.get(action_name)
def get_steps_to_execute(self, action_name):
events, parameters = self.__match_action(action_name)
if events is None:
possible = ','.join(list(self.actions))
raise UndefinedActionException("Undefined action {}. Possible values: {}".format(action_name, possible))
assert events is not None
steps_to_execute = ''
for event in events:
step_event = self.__replace_parameters(event, parameters)
steps_to_execute += step_event + '\n'
return steps_to_execute
def get_unused_actions(self):
unused_actions = list(self.unused)
return unused_actions
def was_used(self, action_name):
return action_name in self.used
def __match_action(self, action_name):
for action_type in self.actions.keys():
r = parse(action_type, action_name)
if r:
self.unused.discard(action_type)
self.used.add(action_type)
return self.actions[action_type], r.named
return None, None
def __replace_parameters(self, step, parameters):
for parameter, value in parameters.items():
token_to_find = "{" + parameter + "}"
step = step.replace(token_to_find, value)
return step
class DuplicatedActionException(Exception):
pass
class UndefinedActionException(Exception):
pass
| [
"parse.parse"
] | [((1878, 1909), 'parse.parse', 'parse', (['action_type', 'action_name'], {}), '(action_type, action_name)\n', (1883, 1909), False, 'from parse import parse\n')] |
import argparse
import json
from time import time
import os
import shutil
import numpy as np
import torch
from datasets.oxford import get_dataloaders
from datasets.boreas import get_dataloaders_boreas
from datasets.radiate import get_dataloaders_radiate
from networks.under_the_radar import UnderTheRadar
from networks.hero import HERO
from utils.utils import get_transform2, get_T_ba, computeKittiMetrics, computeMedianError
from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, \
draw_weights, draw_keypoints, draw_src_tgt_matches
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--checkpoint', type=str, required=True)
parser.add_argument('-no-vis', '--no-visualization', action='store_true')
parser.add_argument('-out-fld', '--out-folder', type=str, required=True)
return parser
def makedirs_for_visualization(out_folder):
os.makedirs(os.path.join(out_folder, 'radar'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'mask'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'masked_radar_vis'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'detector_scores'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'weights'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints_only_masked'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints_all'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints_on_detector_scores'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints_on_detector_scores_only_masked'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'keypoints_on_detector_scores_all'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches_only_masked'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches_all'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches_on_detector_scores'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches_on_detector_scores_only_masked'), exist_ok=True)
os.makedirs(os.path.join(out_folder, 'src_tgt_matches_on_detector_scores_all'), exist_ok=True)
def visualize(batchi, batch, out, config, out_folder):
radar_img = draw_radar(batch, i=1)
radar_img.save(os.path.join(out_folder, 'radar/radar_{}.png'.format(batchi+1)))
mask_img = draw_mask(batch, i=1)
mask_img.save(os.path.join(out_folder, 'mask/mask_{}.png'.format(batchi+1)))
masked_radar_img = draw_masked_radar(batch, i=1)
masked_radar_img.save(os.path.join(out_folder, 'masked_radar_vis/masked_radar_vis_{}.png'.format(batchi+1)))
detector_scores_img = draw_detector_scores(out, i=1)
detector_scores_img.save(os.path.join(out_folder, 'detector_scores/detector_scores_{}.png'.format(batchi+1)))
weights_img = draw_weights(out, i=1)
weights_img.save(os.path.join(out_folder, 'weights/weights_{}.png'.format(batchi+1)))
keypoints_img = draw_keypoints(batch, out, config, i=1, draw_uncertainty_scale=20)
keypoints_img.save(os.path.join(out_folder, 'keypoints/keypoints_{}.png'.format(batchi+1)))
keypoints_only_masked_img = draw_keypoints(batch, out, config, i=1, filtering='mask')
keypoints_only_masked_img.save(os.path.join(out_folder, 'keypoints_only_masked/keypoints_only_masked_{}.png'.format(batchi+1)))
keypoints_all_img = draw_keypoints(batch, out, config, i=1, filtering='none')
keypoints_all_img.save(os.path.join(out_folder, 'keypoints_all/keypoints_all_{}.png'.format(batchi+1)))
keypoints_on_detector_scores_img = draw_keypoints(batch, out, config, i=1, draw_on='detector_scores', draw_uncertainty_scale=20)
keypoints_on_detector_scores_img.save(os.path.join(out_folder,
'keypoints_on_detector_scores/keypoints_on_detector_scores_{}.png'.format(batchi+1)))
keypoints_on_detector_scores_only_masked_img = draw_keypoints(batch, out, config, i=1, draw_on='detector_scores', filtering='mask')
keypoints_on_detector_scores_only_masked_img.save(os.path.join(out_folder,
'keypoints_on_detector_scores_only_masked/keypoints_on_detector_scores_only_masked_{}.png'.format(batchi+1)))
keypoints_on_detector_scores_all_img = draw_keypoints(batch, out, config, i=1, draw_on='detector_scores', filtering='none')
keypoints_on_detector_scores_all_img.save(os.path.join(out_folder,
'keypoints_on_detector_scores_all/keypoints_on_detector_scores_all_{}.png'.format(batchi+1)))
src_tgt_matches_img = draw_src_tgt_matches(batch, out, config, draw_uncertainty_scale=20)
src_tgt_matches_img.save(os.path.join(out_folder,
'src_tgt_matches/src_tgt_matches_{}.png'.format(batchi)))
src_tgt_matches_only_masked_img = draw_src_tgt_matches(batch, out, config, filtering='mask')
src_tgt_matches_only_masked_img.save(os.path.join(out_folder,
'src_tgt_matches_only_masked/src_tgt_matches_only_masked_{}.png'.format(batchi)))
src_tgt_matches_all_img = draw_src_tgt_matches(batch, out, config, filtering='none')
src_tgt_matches_all_img.save(os.path.join(out_folder,
'src_tgt_matches_all/src_tgt_matches_all_{}.png'.format(batchi)))
src_tgt_matches_on_detector_scores_img = draw_src_tgt_matches(batch, out, config, draw_on='detector_scores', draw_uncertainty_scale=20)
src_tgt_matches_on_detector_scores_img.save(os.path.join(out_folder,
'src_tgt_matches_on_detector_scores/src_tgt_matches_on_detector_scores_{}.png'.format(batchi)))
src_tgt_matches_on_detector_scores_only_masked_img = draw_src_tgt_matches(batch, out, config, draw_on='detector_scores', filtering='mask')
src_tgt_matches_on_detector_scores_only_masked_img.save(os.path.join(out_folder,
'src_tgt_matches_on_detector_scores_only_masked/src_tgt_matches_on_detector_scores_only_masked_{}.png'.format(batchi)))
src_tgt_matches_on_detector_scores_all_img = draw_src_tgt_matches(batch, out, config, draw_on='detector_scores', filtering='none')
src_tgt_matches_on_detector_scores_all_img.save(os.path.join(out_folder,
'src_tgt_matches_on_detector_scores_all/src_tgt_matches_on_detector_scores_all_{}.png'.format(batchi)))
def print_used_time(model):
print("Time used:")
print(" All: {} s".format(np.mean(model.time_used['all'])))
print(" Feature map extraction: {} s".format(np.mean(model.time_used['feature_map_extraction'])))
print(" Keypoint extraction: {} s".format(np.mean(model.time_used['keypoint_extraction'])))
print(" Keypoint matching: {} s".format(np.mean(model.time_used['keypoint_matching'])))
print(" Optimization: {} s".format(np.mean(model.time_used['optimization'])))
if __name__ == '__main__':
torch.set_num_threads(8)
parser = build_parser()
args = parser.parse_args()
out_folder = args.out_folder
with_visualization = not args.no_visualization
os.makedirs(out_folder, exist_ok=True)
with open(args.config) as f:
config = json.load(f)
config_copy = os.path.join(out_folder, os.path.basename(args.config))
if args.config != config_copy:
shutil.copy(args.config, config_copy)
if config['model'] == 'UnderTheRadar':
model = UnderTheRadar(config).to(config['gpuid'])
elif config['model'] == 'HERO':
model = HERO(config).to(config['gpuid'])
model.solver.sliding_flag = False
checkpoint = torch.load(args.checkpoint, map_location=torch.device(config['gpuid']))
failed = False
try:
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
except Exception as e:
print(e)
failed = True
if failed:
model.load_state_dict(checkpoint, strict=False)
model.eval()
model.no_throw = True
seq_name_all = list()
time_used_all = list()
T_gt_all = list()
T_pred_all = list()
t_err_all = list()
r_err_all = list()
seq_nums = config['test_split']
for seq_num in seq_nums:
config['test_split'] = [seq_num]
if config['dataset'] == 'oxford':
_, _, test_loader = get_dataloaders(config)
elif config['dataset'] == 'boreas':
_, _, test_loader = get_dataloaders_boreas(config)
elif config['dataset'] == 'radiate':
_, _, test_loader = get_dataloaders_radiate(config)
seq_len = test_loader.dataset.seq_lens[0]
seq_name = test_loader.dataset.sequences[0]
time_used = list()
T_gt = list()
T_pred = list()
print('Evaluating sequence {} (len {}): {}'.format(seq_num, seq_len, seq_name))
if with_visualization:
out_vis_folder = os.path.join(out_folder, seq_name)
makedirs_for_visualization(out_vis_folder)
model.solver.solver_cpp.resetTraj()
for batchi, batch in enumerate(test_loader):
ts = time()
with torch.no_grad():
out = model(batch)
if out['exception'] is not None:
fail_folder = os.path.join(out_folder, 'failed_{}'.format(batchi))
os.makedirs(fail_folder, exist_ok=True)
makedirs_for_visualization(fail_folder)
visualize(batchi, batch, out, config, fail_folder)
print_used_time(model)
raise out['exception']
if with_visualization and batchi % config['vis_rate'] == 0:
visualize(batchi, batch, out, config, out_vis_folder)
if config['model'] == 'UnderTheRadar':
if 'T_21' in batch:
T_gt.append(batch['T_21'][0].numpy().squeeze())
R_pred = out['R'][0].detach().cpu().numpy().squeeze()
t_pred = out['t'][0].detach().cpu().numpy().squeeze()
T_pred.append(get_transform2(R_pred, t_pred))
elif config['model'] == 'HERO':
if batchi == len(test_loader) - 1:
for w in range(config['window_size'] - 1):
if 'T_21' in batch:
T_gt.append(batch['T_21'][w].numpy().squeeze())
T_pred.append(get_T_ba(out, a=w, b=w+1))
else:
w = 0
if 'T_21' in batch:
T_gt.append(batch['T_21'][w].numpy().squeeze())
T_pred.append(get_T_ba(out, a=w, b=w+1))
time_used.append(time() - ts)
if (batchi + 1) % config['print_rate'] == 0:
print('Eval Batch {} / {}: {:.2}s'.format(batchi, len(test_loader), np.mean(time_used[-config['print_rate']:])))
time_used_all.extend(time_used)
if len(T_gt) > 0:
seq_name_all.append(seq_name)
T_gt_all.extend(T_gt)
T_pred_all.extend(T_pred)
t_err, r_err = computeKittiMetrics(T_gt, T_pred, [len(T_gt)])
print('SEQ: {} : {}'.format(seq_num, seq_name))
print('KITTI t_err: {} %'.format(t_err))
print('KITTI r_err: {} deg/m'.format(r_err))
t_err_all.append(t_err)
r_err_all.append(r_err)
fname = os.path.join(out_folder, seq_name + '.png')
if len(T_gt) > 0:
plot_sequences(T_gt, T_pred, [len(T_pred)], returnTensor=False, savePDF=True, fnames=[fname])
else:
plot_sequences(T_pred, T_pred, [len(T_pred)], returnTensor=False, savePDF=True, fnames=[fname])
print('time_used: {}'.format(sum(time_used_all) / len(time_used_all)))
if len(T_gt_all) > 0:
results = computeMedianError(T_gt_all, T_pred_all)
print('dt: {} sigma_dt: {} dr: {} sigma_dr: {}'.format(results[0], results[1], results[2], results[3]))
t_err_mean = np.mean(t_err_all)
r_err_mean = np.mean(r_err_all)
print('Average KITTI metrics over all test sequences:')
print('KITTI t_err: {} %'.format(t_err_mean))
print('KITTI r_err: {} deg/m'.format(r_err_mean))
with open(os.path.join(out_folder, 'metrics.txt'), 'w') as f:
f.write('sequence name: translation error (%) rotation error (deg/m)\n')
for seq_name, t_err, r_err in zip(seq_name_all, t_err_all, r_err_all):
line = '{}: {} {}\n'.format(seq_name, t_err, r_err)
f.write(line)
f.write("\n")
f.write("mean: {} {}\n".format(t_err_mean, r_err_mean))
print_used_time(model)
| [
"argparse.ArgumentParser",
"utils.vis.draw_masked_radar",
"utils.vis.draw_src_tgt_matches",
"utils.vis.draw_radar",
"torch.set_num_threads",
"numpy.mean",
"datasets.boreas.get_dataloaders_boreas",
"torch.device",
"datasets.radiate.get_dataloaders_radiate",
"torch.no_grad",
"os.path.join",
"shutil.copy",
"utils.vis.draw_mask",
"networks.under_the_radar.UnderTheRadar",
"utils.utils.computeMedianError",
"datasets.oxford.get_dataloaders",
"os.path.basename",
"utils.utils.get_transform2",
"utils.utils.get_T_ba",
"utils.vis.draw_weights",
"json.load",
"os.makedirs",
"utils.vis.draw_detector_scores",
"time.time",
"networks.hero.HERO",
"utils.vis.draw_keypoints"
] | [((739, 764), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (762, 764), False, 'import argparse\n'), ((2589, 2611), 'utils.vis.draw_radar', 'draw_radar', (['batch'], {'i': '(1)'}), '(batch, i=1)\n', (2599, 2611), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((2712, 2733), 'utils.vis.draw_mask', 'draw_mask', (['batch'], {'i': '(1)'}), '(batch, i=1)\n', (2721, 2733), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((2839, 2868), 'utils.vis.draw_masked_radar', 'draw_masked_radar', (['batch'], {'i': '(1)'}), '(batch, i=1)\n', (2856, 2868), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3009, 3039), 'utils.vis.draw_detector_scores', 'draw_detector_scores', (['out'], {'i': '(1)'}), '(out, i=1)\n', (3029, 3039), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3173, 3195), 'utils.vis.draw_weights', 'draw_weights', (['out'], {'i': '(1)'}), '(out, i=1)\n', (3185, 3195), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3307, 3373), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'draw_uncertainty_scale': '(20)'}), '(batch, out, config, i=1, draw_uncertainty_scale=20)\n', (3321, 3373), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3503, 3560), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'filtering': '"""mask"""'}), "(batch, out, config, i=1, filtering='mask')\n", (3517, 3560), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3718, 3775), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'filtering': '"""none"""'}), "(batch, out, config, i=1, filtering='none')\n", (3732, 3775), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((3924, 4021), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'draw_on': '"""detector_scores"""', 'draw_uncertainty_scale': '(20)'}), "(batch, out, config, i=1, draw_on='detector_scores',\n draw_uncertainty_scale=20)\n", (3938, 4021), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((4231, 4319), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'draw_on': '"""detector_scores"""', 'filtering': '"""mask"""'}), "(batch, out, config, i=1, draw_on='detector_scores',\n filtering='mask')\n", (4245, 4319), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((4557, 4645), 'utils.vis.draw_keypoints', 'draw_keypoints', (['batch', 'out', 'config'], {'i': '(1)', 'draw_on': '"""detector_scores"""', 'filtering': '"""none"""'}), "(batch, out, config, i=1, draw_on='detector_scores',\n filtering='none')\n", (4571, 4645), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((4842, 4909), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'draw_uncertainty_scale': '(20)'}), '(batch, out, config, draw_uncertainty_scale=20)\n', (4862, 4909), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((5069, 5127), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'filtering': '"""mask"""'}), "(batch, out, config, filtering='mask')\n", (5089, 5127), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((5315, 5373), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'filtering': '"""none"""'}), "(batch, out, config, filtering='none')\n", (5335, 5373), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((5552, 5650), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'draw_on': '"""detector_scores"""', 'draw_uncertainty_scale': '(20)'}), "(batch, out, config, draw_on='detector_scores',\n draw_uncertainty_scale=20)\n", (5572, 5650), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((5882, 5971), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'draw_on': '"""detector_scores"""', 'filtering': '"""mask"""'}), "(batch, out, config, draw_on='detector_scores',\n filtering='mask')\n", (5902, 5971), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((6231, 6320), 'utils.vis.draw_src_tgt_matches', 'draw_src_tgt_matches', (['batch', 'out', 'config'], {'draw_on': '"""detector_scores"""', 'filtering': '"""none"""'}), "(batch, out, config, draw_on='detector_scores',\n filtering='none')\n", (6251, 6320), False, 'from utils.vis import plot_sequences, draw_radar, draw_mask, draw_masked_radar, draw_detector_scores, draw_weights, draw_keypoints, draw_src_tgt_matches\n'), ((7034, 7058), 'torch.set_num_threads', 'torch.set_num_threads', (['(8)'], {}), '(8)\n', (7055, 7058), False, 'import torch\n'), ((7207, 7245), 'os.makedirs', 'os.makedirs', (['out_folder'], {'exist_ok': '(True)'}), '(out_folder, exist_ok=True)\n', (7218, 7245), False, 'import os\n'), ((1126, 1159), 'os.path.join', 'os.path.join', (['out_folder', '"""radar"""'], {}), "(out_folder, 'radar')\n", (1138, 1159), False, 'import os\n'), ((1192, 1224), 'os.path.join', 'os.path.join', (['out_folder', '"""mask"""'], {}), "(out_folder, 'mask')\n", (1204, 1224), False, 'import os\n'), ((1257, 1301), 'os.path.join', 'os.path.join', (['out_folder', '"""masked_radar_vis"""'], {}), "(out_folder, 'masked_radar_vis')\n", (1269, 1301), False, 'import os\n'), ((1334, 1377), 'os.path.join', 'os.path.join', (['out_folder', '"""detector_scores"""'], {}), "(out_folder, 'detector_scores')\n", (1346, 1377), False, 'import os\n'), ((1410, 1445), 'os.path.join', 'os.path.join', (['out_folder', '"""weights"""'], {}), "(out_folder, 'weights')\n", (1422, 1445), False, 'import os\n'), ((1478, 1515), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints"""'], {}), "(out_folder, 'keypoints')\n", (1490, 1515), False, 'import os\n'), ((1548, 1597), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints_only_masked"""'], {}), "(out_folder, 'keypoints_only_masked')\n", (1560, 1597), False, 'import os\n'), ((1630, 1671), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints_all"""'], {}), "(out_folder, 'keypoints_all')\n", (1642, 1671), False, 'import os\n'), ((1704, 1760), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints_on_detector_scores"""'], {}), "(out_folder, 'keypoints_on_detector_scores')\n", (1716, 1760), False, 'import os\n'), ((1793, 1861), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints_on_detector_scores_only_masked"""'], {}), "(out_folder, 'keypoints_on_detector_scores_only_masked')\n", (1805, 1861), False, 'import os\n'), ((1894, 1954), 'os.path.join', 'os.path.join', (['out_folder', '"""keypoints_on_detector_scores_all"""'], {}), "(out_folder, 'keypoints_on_detector_scores_all')\n", (1906, 1954), False, 'import os\n'), ((1987, 2030), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches"""'], {}), "(out_folder, 'src_tgt_matches')\n", (1999, 2030), False, 'import os\n'), ((2063, 2118), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches_only_masked"""'], {}), "(out_folder, 'src_tgt_matches_only_masked')\n", (2075, 2118), False, 'import os\n'), ((2151, 2198), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches_all"""'], {}), "(out_folder, 'src_tgt_matches_all')\n", (2163, 2198), False, 'import os\n'), ((2231, 2293), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches_on_detector_scores"""'], {}), "(out_folder, 'src_tgt_matches_on_detector_scores')\n", (2243, 2293), False, 'import os\n'), ((2326, 2400), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches_on_detector_scores_only_masked"""'], {}), "(out_folder, 'src_tgt_matches_on_detector_scores_only_masked')\n", (2338, 2400), False, 'import os\n'), ((2433, 2499), 'os.path.join', 'os.path.join', (['out_folder', '"""src_tgt_matches_on_detector_scores_all"""'], {}), "(out_folder, 'src_tgt_matches_on_detector_scores_all')\n", (2445, 2499), False, 'import os\n'), ((7297, 7309), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7306, 7309), False, 'import json\n'), ((7353, 7382), 'os.path.basename', 'os.path.basename', (['args.config'], {}), '(args.config)\n', (7369, 7382), False, 'import os\n'), ((7427, 7464), 'shutil.copy', 'shutil.copy', (['args.config', 'config_copy'], {}), '(args.config, config_copy)\n', (7438, 7464), False, 'import shutil\n'), ((11434, 11477), 'os.path.join', 'os.path.join', (['out_folder', "(seq_name + '.png')"], {}), "(out_folder, seq_name + '.png')\n", (11446, 11477), False, 'import os\n'), ((11853, 11893), 'utils.utils.computeMedianError', 'computeMedianError', (['T_gt_all', 'T_pred_all'], {}), '(T_gt_all, T_pred_all)\n', (11871, 11893), False, 'from utils.utils import get_transform2, get_T_ba, computeKittiMetrics, computeMedianError\n'), ((12028, 12046), 'numpy.mean', 'np.mean', (['t_err_all'], {}), '(t_err_all)\n', (12035, 12046), True, 'import numpy as np\n'), ((12068, 12086), 'numpy.mean', 'np.mean', (['r_err_all'], {}), '(r_err_all)\n', (12075, 12086), True, 'import numpy as np\n'), ((6591, 6622), 'numpy.mean', 'np.mean', (["model.time_used['all']"], {}), "(model.time_used['all'])\n", (6598, 6622), True, 'import numpy as np\n'), ((6675, 6725), 'numpy.mean', 'np.mean', (["model.time_used['feature_map_extraction']"], {}), "(model.time_used['feature_map_extraction'])\n", (6682, 6725), True, 'import numpy as np\n'), ((6775, 6822), 'numpy.mean', 'np.mean', (["model.time_used['keypoint_extraction']"], {}), "(model.time_used['keypoint_extraction'])\n", (6782, 6822), True, 'import numpy as np\n'), ((6870, 6915), 'numpy.mean', 'np.mean', (["model.time_used['keypoint_matching']"], {}), "(model.time_used['keypoint_matching'])\n", (6877, 6915), True, 'import numpy as np\n'), ((6958, 6998), 'numpy.mean', 'np.mean', (["model.time_used['optimization']"], {}), "(model.time_used['optimization'])\n", (6965, 6998), True, 'import numpy as np\n'), ((7753, 7782), 'torch.device', 'torch.device', (["config['gpuid']"], {}), "(config['gpuid'])\n", (7765, 7782), False, 'import torch\n'), ((8394, 8417), 'datasets.oxford.get_dataloaders', 'get_dataloaders', (['config'], {}), '(config)\n', (8409, 8417), False, 'from datasets.oxford import get_dataloaders\n'), ((8959, 8993), 'os.path.join', 'os.path.join', (['out_folder', 'seq_name'], {}), '(out_folder, seq_name)\n', (8971, 8993), False, 'import os\n'), ((9164, 9170), 'time.time', 'time', ([], {}), '()\n', (9168, 9170), False, 'from time import time\n'), ((7525, 7546), 'networks.under_the_radar.UnderTheRadar', 'UnderTheRadar', (['config'], {}), '(config)\n', (7538, 7546), False, 'from networks.under_the_radar import UnderTheRadar\n'), ((8494, 8524), 'datasets.boreas.get_dataloaders_boreas', 'get_dataloaders_boreas', (['config'], {}), '(config)\n', (8516, 8524), False, 'from datasets.boreas import get_dataloaders_boreas\n'), ((9189, 9204), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9202, 9204), False, 'import torch\n'), ((9386, 9425), 'os.makedirs', 'os.makedirs', (['fail_folder'], {'exist_ok': '(True)'}), '(fail_folder, exist_ok=True)\n', (9397, 9425), False, 'import os\n'), ((12282, 12321), 'os.path.join', 'os.path.join', (['out_folder', '"""metrics.txt"""'], {}), "(out_folder, 'metrics.txt')\n", (12294, 12321), False, 'import os\n'), ((7619, 7631), 'networks.hero.HERO', 'HERO', (['config'], {}), '(config)\n', (7623, 7631), False, 'from networks.hero import HERO\n'), ((8602, 8633), 'datasets.radiate.get_dataloaders_radiate', 'get_dataloaders_radiate', (['config'], {}), '(config)\n', (8625, 8633), False, 'from datasets.radiate import get_dataloaders_radiate\n'), ((10096, 10126), 'utils.utils.get_transform2', 'get_transform2', (['R_pred', 't_pred'], {}), '(R_pred, t_pred)\n', (10110, 10126), False, 'from utils.utils import get_transform2, get_T_ba, computeKittiMetrics, computeMedianError\n'), ((10722, 10728), 'time.time', 'time', ([], {}), '()\n', (10726, 10728), False, 'from time import time\n'), ((10876, 10918), 'numpy.mean', 'np.mean', (["time_used[-config['print_rate']:]"], {}), "(time_used[-config['print_rate']:])\n", (10883, 10918), True, 'import numpy as np\n'), ((10665, 10692), 'utils.utils.get_T_ba', 'get_T_ba', (['out'], {'a': 'w', 'b': '(w + 1)'}), '(out, a=w, b=w + 1)\n', (10673, 10692), False, 'from utils.utils import get_transform2, get_T_ba, computeKittiMetrics, computeMedianError\n'), ((10444, 10471), 'utils.utils.get_T_ba', 'get_T_ba', (['out'], {'a': 'w', 'b': '(w + 1)'}), '(out, a=w, b=w + 1)\n', (10452, 10471), False, 'from utils.utils import get_transform2, get_T_ba, computeKittiMetrics, computeMedianError\n')] |
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Vehicle)
admin.site.register(VehicleLogging)
admin.site.register(RegisteredUserLogging)
admin.site.register(VisitorUserLogging) | [
"django.contrib.admin.site.register"
] | [((84, 112), 'django.contrib.admin.site.register', 'admin.site.register', (['Vehicle'], {}), '(Vehicle)\n', (103, 112), False, 'from django.contrib import admin\n'), ((113, 148), 'django.contrib.admin.site.register', 'admin.site.register', (['VehicleLogging'], {}), '(VehicleLogging)\n', (132, 148), False, 'from django.contrib import admin\n'), ((149, 191), 'django.contrib.admin.site.register', 'admin.site.register', (['RegisteredUserLogging'], {}), '(RegisteredUserLogging)\n', (168, 191), False, 'from django.contrib import admin\n'), ((192, 231), 'django.contrib.admin.site.register', 'admin.site.register', (['VisitorUserLogging'], {}), '(VisitorUserLogging)\n', (211, 231), False, 'from django.contrib import admin\n')] |
#!/usr/bin/env python
from distutils.core import setup
setup(name='aifin',
version='1.0.1',
description='Python Distribution Utilities',
author='<NAME>',
author_email='<EMAIL>',
url='aitroopers.com',
packages=['aifin'],
install_requires=[
'pandas','scipy'
]
)
| [
"distutils.core.setup"
] | [((57, 274), 'distutils.core.setup', 'setup', ([], {'name': '"""aifin"""', 'version': '"""1.0.1"""', 'description': '"""Python Distribution Utilities"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""aitroopers.com"""', 'packages': "['aifin']", 'install_requires': "['pandas', 'scipy']"}), "(name='aifin', version='1.0.1', description=\n 'Python Distribution Utilities', author='<NAME>', author_email=\n '<EMAIL>', url='aitroopers.com', packages=['aifin'], install_requires=[\n 'pandas', 'scipy'])\n", (62, 274), False, 'from distutils.core import setup\n')] |
#!/usr/bin/env python
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Smoke test for Cloud Endpoints support in auth component.
It launches app via dev_appserver and queries a bunch of cloud endpoints
methods.
"""
import unittest
import os
import test_env
test_env.setup_test_env()
from support import local_app
# /components/tests/.
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
# /components/tests/endpoints_app/.
TEST_APP_DIR = os.path.join(THIS_DIR, 'endpoints_app')
class CloudEndpointsSmokeTest(unittest.TestCase):
def setUp(self):
super(CloudEndpointsSmokeTest, self).setUp()
self.app = local_app.LocalApplication(TEST_APP_DIR, 9700)
self.app.start()
self.app.ensure_serving()
def tearDown(self):
try:
self.app.stop()
if self.has_failed():
self.app.dump_log()
finally:
super(CloudEndpointsSmokeTest, self).tearDown()
def has_failed(self):
# pylint: disable=E1101
return not self._resultForDoCleanups.wasSuccessful()
def test_smoke(self):
self.check_who_anonymous()
self.check_who_authenticated()
self.check_host_token()
self.check_forbidden()
def check_who_anonymous(self):
response = self.app.client.json_request('/_ah/api/testing_service/v1/who')
self.assertEqual(200, response.http_code)
self.assertEqual('anonymous:anonymous', response.body.get('identity'))
self.assertIn(response.body.get('ip'), ('127.0.0.1', '0:0:0:0:0:0:0:1'))
def check_who_authenticated(self):
# TODO(vadimsh): Testing this requires interacting with real OAuth2 service
# to get OAuth2 token. It's doable, but the service account secrets had to
# be hardcoded into the source code. I'm not sure it's a good idea.
pass
def check_forbidden(self):
response = self.app.client.json_request(
'/_ah/api/testing_service/v1/forbidden')
self.assertEqual(403, response.http_code)
expected = {
u'error': {
u'code': 403,
u'errors': [
{
u'domain': u'global',
u'message': u'Forbidden',
u'reason': u'forbidden',
}
],
u'message': u'Forbidden',
},
}
self.assertEqual(expected, response.body)
def check_host_token(self):
# Create token first.
response = self.app.client.json_request(
'/_ah/api/testing_service/v1/create_host_token', {'host': 'host-name'})
self.assertEqual(200, response.http_code)
token = response.body.get('host_token')
self.assertTrue(token)
# Verify it is usable.
response = self.app.client.json_request(
'/_ah/api/testing_service/v1/who', headers={'X-Host-Token-V1': token})
self.assertEqual(200, response.http_code)
self.assertEqual('host-name', response.body.get('host'))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"os.path.abspath",
"support.local_app.LocalApplication",
"os.path.join",
"test_env.setup_test_env"
] | [((381, 406), 'test_env.setup_test_env', 'test_env.setup_test_env', ([], {}), '()\n', (404, 406), False, 'import test_env\n'), ((567, 606), 'os.path.join', 'os.path.join', (['THIS_DIR', '"""endpoints_app"""'], {}), "(THIS_DIR, 'endpoints_app')\n", (579, 606), False, 'import os\n'), ((489, 514), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (504, 514), False, 'import os\n'), ((2938, 2953), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2951, 2953), False, 'import unittest\n'), ((742, 788), 'support.local_app.LocalApplication', 'local_app.LocalApplication', (['TEST_APP_DIR', '(9700)'], {}), '(TEST_APP_DIR, 9700)\n', (768, 788), False, 'from support import local_app\n')] |
# Generated by Django 2.0.5 on 2018-05-23 01:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hrs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='dept',
name='excellent',
field=models.BooleanField(default=0, verbose_name='是否优秀'),
),
migrations.AlterField(
model_name='dept',
name='location',
field=models.CharField(max_length=10, verbose_name='部门所在地'),
),
migrations.AlterField(
model_name='dept',
name='name',
field=models.CharField(max_length=20, verbose_name='部门名称'),
),
migrations.AlterField(
model_name='dept',
name='no',
field=models.IntegerField(primary_key=True, serialize=False, verbose_name='部门编号'),
),
migrations.AlterField(
model_name='emp',
name='comm',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True),
),
migrations.AlterField(
model_name='emp',
name='mgr',
field=models.IntegerField(blank=True, null=True),
),
]
| [
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.BooleanField",
"django.db.models.DecimalField"
] | [((320, 371), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(0)', 'verbose_name': '"""是否优秀"""'}), "(default=0, verbose_name='是否优秀')\n", (339, 371), False, 'from django.db import migrations, models\n'), ((493, 546), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'verbose_name': '"""部门所在地"""'}), "(max_length=10, verbose_name='部门所在地')\n", (509, 546), False, 'from django.db import migrations, models\n'), ((664, 716), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'verbose_name': '"""部门名称"""'}), "(max_length=20, verbose_name='部门名称')\n", (680, 716), False, 'from django.db import migrations, models\n'), ((832, 907), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""部门编号"""'}), "(primary_key=True, serialize=False, verbose_name='部门编号')\n", (851, 907), False, 'from django.db import migrations, models\n'), ((1024, 1098), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(2)', 'max_digits': '(7)', 'null': '(True)'}), '(blank=True, decimal_places=2, max_digits=7, null=True)\n', (1043, 1098), False, 'from django.db import migrations, models\n'), ((1214, 1256), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1233, 1256), False, 'from django.db import migrations, models\n')] |
import logging
import numpy as np
import scipy.sparse
from typing import Union
from .external import closedform_glm_mean, closedform_glm_scale
logger = logging.getLogger("batchglm")
def closedform_norm_glm_mean(
x: Union[np.ndarray, scipy.sparse.csr_matrix],
design_loc: np.ndarray,
constraints_loc,
size_factors=None,
link_fn=lambda x: x,
inv_link_fn=lambda x: x
):
r"""
Calculates a closed-form solution for the `mean` parameters of normal GLMs.
:param x: The sample data
:param design_loc: design matrix for location
:param constraints_loc: tensor (all parameters x dependent parameters)
Tensor that encodes how complete parameter set which includes dependent
parameters arises from indepedent parameters: all = <constraints, indep>.
This form of constraints is used in vector generalized linear models (VGLMs).
:param size_factors: size factors for X
:return: tuple: (groupwise_means, mean, rmsd)
"""
return closedform_glm_mean(
x=x,
dmat=design_loc,
constraints=constraints_loc,
size_factors=size_factors,
link_fn=link_fn,
inv_link_fn=inv_link_fn
)
def closedform_norm_glm_logsd(
x: Union[np.ndarray, scipy.sparse.csr_matrix],
design_scale: np.ndarray,
constraints=None,
size_factors=None,
groupwise_means=None,
link_fn=np.log
):
r"""
Calculates a closed-form solution for the log-scale parameters of normal GLMs.
:param x: The sample data
:param design_scale: design matrix for scale
:param constraints: some design constraints
:param size_factors: size factors for X
:param groupwise_means: optional, in case if already computed this can be specified to spare double-calculation
:return: tuple (groupwise_scales, logsd, rmsd)
"""
def compute_scales_fun(variance, mean):
groupwise_scales = np.sqrt(variance)
return groupwise_scales
return closedform_glm_scale(
x=x,
design_scale=design_scale,
constraints=constraints,
size_factors=size_factors,
groupwise_means=groupwise_means,
link_fn=link_fn,
compute_scales_fun=compute_scales_fun
)
| [
"logging.getLogger",
"numpy.sqrt"
] | [((154, 183), 'logging.getLogger', 'logging.getLogger', (['"""batchglm"""'], {}), "('batchglm')\n", (171, 183), False, 'import logging\n'), ((1961, 1978), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (1968, 1978), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# This file is part of dxdiff.
#
# dxdiff is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dxdiff is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diamond. If not, see <http://www.gnu.org/licenses/>.
from lxml import etree
class EditScript:
def __init__(self):
self.script = []
def __str__(self):
return etree.tostring(self.to_xml(), pretty_print = True)
def __len__(self):
return len(self.script)
def __getitem__(self, key):
return self.script[key]
def __iter__(self):
return self.script.__iter__()
def update(self, path, value, userdata = None):
self.script.append({ "type": "update",
"location": path,
"value": value,
"userdata": userdata })
def insert(self, path, index, tag, value = None, userdata = None):
self.script.append({ "type": "insert",
"location": path,
"index": index,
"value": tag + (" " + value if value is not None else ""),
"userdata": userdata})
def delete(self, path, userdata = None):
self.script.append({ "type": "delete",
"location": path,
"userdata": userdata})
def move(self, path, destination, index, userdata = None):
self.script.append({ "type": "move",
"location": path,
"index": index,
"value": destination,
"userdata": userdata })
def to_xml(self):
tree = etree.Element("xmldiff")
for edit in self.script:
node = etree.Element(edit["type"], location = edit["location"])
if "index" in edit:
node.attrib["index"] = edit["index"]
if edit["userdata"] is not None:
node.attrib["userdata"] = edit["userdata"]
if "value" in edit:
node.text = edit["value"]
tree.append(node)
return etree.ElementTree(tree)
def write(self, path):
self.to_xml().write(path, pretty_print = True, xml_declaration = True, encoding = "utf-8")
| [
"lxml.etree.ElementTree",
"lxml.etree.Element"
] | [((2094, 2118), 'lxml.etree.Element', 'etree.Element', (['"""xmldiff"""'], {}), "('xmldiff')\n", (2107, 2118), False, 'from lxml import etree\n'), ((2477, 2500), 'lxml.etree.ElementTree', 'etree.ElementTree', (['tree'], {}), '(tree)\n', (2494, 2500), False, 'from lxml import etree\n'), ((2162, 2216), 'lxml.etree.Element', 'etree.Element', (["edit['type']"], {'location': "edit['location']"}), "(edit['type'], location=edit['location'])\n", (2175, 2216), False, 'from lxml import etree\n')] |
import logging
from time import time
from flask import Flask, request
PLAIN_HEADER = {'Content-Type': 'text/plain; charset=utf-8'}
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(threadName)s %(message)s')
log = logging.getLogger('chatserver')
app = Flask(__name__)
messages = []
@app.route('/post/<who>/<message>')
def post_message(who, message):
messages.append((time(), request.remote_addr, who, message))
print(messages)
return "Message saved.\n" + str(messages), 200, PLAIN_HEADER
app.run(host='localhost', debug=True, threaded=True)
| [
"flask.Flask",
"logging.getLogger",
"logging.basicConfig",
"time.time"
] | [((132, 240), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(levelname)s %(threadName)s %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s %(levelname)s %(threadName)s %(message)s')\n", (151, 240), False, 'import logging\n'), ((242, 273), 'logging.getLogger', 'logging.getLogger', (['"""chatserver"""'], {}), "('chatserver')\n", (259, 273), False, 'import logging\n'), ((280, 295), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (285, 295), False, 'from flask import Flask, request\n'), ((401, 407), 'time.time', 'time', ([], {}), '()\n', (405, 407), False, 'from time import time\n')] |
import ply.yacc as yacc
from CoachLex import tokens
#enviromental variables
enviro_vars = {}
def p_statement_assign(p):
'statement : VARINT VAR expression'
enviro_vars[p[2]] = p[3]
def p_statement_expr(p):
'statement : expression'
def p_statement_output(p):
'statement : OUTPUT expression'
print("Coach says " + str(p[2]) + "!")
def p_statement_if(p):
'''statement : IFA VAR IFB statement'''
if p[4]: p[6]
def p_statement_file_in(p):
'statement : FILEIN VAR'
file_str = ""
f = open(p[2] + "." + 'osxc', "r")
for line in f:
file_str = ''
file_str += line.rstrip('\n')
yaccer.parse(file_str)
#Basic Math
def p_expression_basicop(p):
'''expression : expression ADD expression
| expression SUBA SUBB expression
| expression MULT expression
| expression DIV expression'''
if p[1] == "add": p[0] = p[2] + p[0]
elif p[1] == 'finished' and p[2] == 'of': p[0] = p[0] - p[3]
elif p[2] == 'by': p[0] = p[1] * p[3]
elif p[2] == 'split': p[0] = p[1] / p[3]
def p_expression_number(p):
'expression : NUMBER'
p[0] = p[1]
def p_expression_var(p):
'expression : VAR'
try:
p[0] = enviro_vars[p[1]]
except LookupError:
print("undefined var, resorting to 0")
p[0] = 0
def p_comparison_binop(p):
'''comparison : expression GREATLESSTHANA EQUALTOA EQUALTOB expression
| expression GREATLESSTHANA GREATERTHAN GREATLESSTHANB expression
| expression GREATLESSTHANA LESSTHAN GREATLESSTHANB expression'''
if p[4] == 'same': p[0] = p[1] == p[6]
elif p[3] == 'faster': p[0] = p[1] > p[5]
elif p[3] == 'slower': p[0] = p[1] < p[5]
def p_error(p):
print(f"Synax error at {p.value!r}")
#set up yacc
yaccer = yacc.yacc()
while True:
try:
s = input('> ')
except EOFError:
break
yaccer.parse(s) | [
"ply.yacc.yacc"
] | [((1849, 1860), 'ply.yacc.yacc', 'yacc.yacc', ([], {}), '()\n', (1858, 1860), True, 'import ply.yacc as yacc\n')] |
from django.contrib import admin, messages
from django.db import transaction
from django.db.models import Prefetch
from recipe.models import Ingredient, Recipe, RecipeIngredient, RecipeInstance, \
RecipeInstanceImage, Tag
admin.site.register(Tag)
@admin.register(Ingredient)
class IngredientAdmin(admin.ModelAdmin):
list_display = (
'name',
)
search_fields = (
'name',
)
@transaction.atomic
def merge_ingredients(self, request, queryset):
if len(queryset) < 2:
self.message_user(
request, 'At least two ingredients need to be selected!',
messages.WARNING)
return
main = queryset.first()
others = queryset[1:]
len_others = len(others)
RecipeIngredient.objects.filter(ingredient__in=others).update(ingredient=main)
Ingredient.objects.filter(pk__in=[i.pk for i in others]).delete()
self.message_user(
request, '{} ingredients were merged into {}'.format(len_others, main),
messages.SUCCESS)
merge_ingredients.short_description = 'Merge selected ingredients'
actions = (
merge_ingredients,
)
class RecipeIngredientInline(admin.TabularInline):
model = RecipeIngredient
autocomplete_fields = (
'ingredient',
)
class RecipeInstanceImageInline(admin.TabularInline):
model = RecipeInstanceImage
@admin.register(RecipeInstance)
class RecipeInstanceAdmin(admin.ModelAdmin):
list_display = (
'day',
'recipe',
)
inlines = (
RecipeInstanceImageInline,
)
@admin.register(Recipe)
class RecipeAdmin(admin.ModelAdmin):
list_display = (
'name',
'tag_str',
'view_count',
)
list_filter = (
'tags',
)
search_fields = (
'name',
)
inlines = (
RecipeIngredientInline,
)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.prefetch_related(Prefetch('tags', Tag.objects.order_by('name')))
| [
"recipe.models.Tag.objects.order_by",
"recipe.models.RecipeIngredient.objects.filter",
"django.contrib.admin.site.register",
"django.contrib.admin.register",
"recipe.models.Ingredient.objects.filter"
] | [((228, 252), 'django.contrib.admin.site.register', 'admin.site.register', (['Tag'], {}), '(Tag)\n', (247, 252), False, 'from django.contrib import admin, messages\n'), ((256, 282), 'django.contrib.admin.register', 'admin.register', (['Ingredient'], {}), '(Ingredient)\n', (270, 282), False, 'from django.contrib import admin, messages\n'), ((1424, 1454), 'django.contrib.admin.register', 'admin.register', (['RecipeInstance'], {}), '(RecipeInstance)\n', (1438, 1454), False, 'from django.contrib import admin, messages\n'), ((1620, 1642), 'django.contrib.admin.register', 'admin.register', (['Recipe'], {}), '(Recipe)\n', (1634, 1642), False, 'from django.contrib import admin, messages\n'), ((779, 833), 'recipe.models.RecipeIngredient.objects.filter', 'RecipeIngredient.objects.filter', ([], {'ingredient__in': 'others'}), '(ingredient__in=others)\n', (810, 833), False, 'from recipe.models import Ingredient, Recipe, RecipeIngredient, RecipeInstance, RecipeInstanceImage, Tag\n'), ((866, 922), 'recipe.models.Ingredient.objects.filter', 'Ingredient.objects.filter', ([], {'pk__in': '[i.pk for i in others]'}), '(pk__in=[i.pk for i in others])\n', (891, 922), False, 'from recipe.models import Ingredient, Recipe, RecipeIngredient, RecipeInstance, RecipeInstanceImage, Tag\n'), ((2038, 2066), 'recipe.models.Tag.objects.order_by', 'Tag.objects.order_by', (['"""name"""'], {}), "('name')\n", (2058, 2066), False, 'from recipe.models import Ingredient, Recipe, RecipeIngredient, RecipeInstance, RecipeInstanceImage, Tag\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Import a folder (~/people) full of person JSON from Legiscan to the database.
"""
from django.core.management.base import BaseCommand
from general.models import Person
from ls_importer.models import LSIDPerson
import json
import os
from tqdm import tqdm
class Command(BaseCommand):
"""
Import a folder (~/people) full of person JSON from Legiscan to the database.
"""
help = 'Import a folder full of person JSON from Legiscan to the database.'
def handle(self, *args, **options):
"""
Make it happen.
"""
def json_to_person(json_path):
json_data = open(json_path)
person_json = json.load(json_data)
pj_unfold = person_json['person']
person_ls_id = pj_unfold['people_id']
# person_ls_role_id = pj_unfold['role_id']
# person_role = pj_unfold['role']
# person_ls_party_id = pj_unfold['party_id']
# person_name = pj_unfold['name']
person_first_name = pj_unfold['first_name']
person_middle_name = pj_unfold['middle_name']
person_last_name = pj_unfold['last_name']
person_suffix = pj_unfold['suffix']
person_nickname = pj_unfold['nickname']
# This try/catch structure exists
# to prevent edge cases where
# a person may be stored twice in Legiscan
# under slightly different names.
try:
LSIDPerson.objects.get(
lsid=person_ls_id
)
except(LSIDPerson.DoesNotExist):
person_object, person_created = Person.objects.get_or_create(
first_name=person_first_name,
middle_name=person_middle_name,
last_name=person_last_name,
suffix=person_suffix,
defaults={
'nickname': person_nickname,
}
)
link_object, link_created = LSIDPerson.objects.get_or_create(
lsid=person_ls_id,
person=person_object,
)
target_directory = os.path.join(os.path.expanduser("~"), 'people')
for file in tqdm(os.listdir(target_directory)):
if file.endswith(".json"):
json_to_person(os.path.join(target_directory, file))
| [
"ls_importer.models.LSIDPerson.objects.get_or_create",
"json.load",
"os.path.join",
"ls_importer.models.LSIDPerson.objects.get",
"general.models.Person.objects.get_or_create",
"os.path.expanduser",
"os.listdir"
] | [((707, 727), 'json.load', 'json.load', (['json_data'], {}), '(json_data)\n', (716, 727), False, 'import json\n'), ((2258, 2281), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2276, 2281), False, 'import os\n'), ((2318, 2346), 'os.listdir', 'os.listdir', (['target_directory'], {}), '(target_directory)\n', (2328, 2346), False, 'import os\n'), ((1520, 1561), 'ls_importer.models.LSIDPerson.objects.get', 'LSIDPerson.objects.get', ([], {'lsid': 'person_ls_id'}), '(lsid=person_ls_id)\n', (1542, 1561), False, 'from ls_importer.models import LSIDPerson\n'), ((1693, 1882), 'general.models.Person.objects.get_or_create', 'Person.objects.get_or_create', ([], {'first_name': 'person_first_name', 'middle_name': 'person_middle_name', 'last_name': 'person_last_name', 'suffix': 'person_suffix', 'defaults': "{'nickname': person_nickname}"}), "(first_name=person_first_name, middle_name=\n person_middle_name, last_name=person_last_name, suffix=person_suffix,\n defaults={'nickname': person_nickname})\n", (1721, 1882), False, 'from general.models import Person\n'), ((2084, 2157), 'ls_importer.models.LSIDPerson.objects.get_or_create', 'LSIDPerson.objects.get_or_create', ([], {'lsid': 'person_ls_id', 'person': 'person_object'}), '(lsid=person_ls_id, person=person_object)\n', (2116, 2157), False, 'from ls_importer.models import LSIDPerson\n'), ((2419, 2455), 'os.path.join', 'os.path.join', (['target_directory', 'file'], {}), '(target_directory, file)\n', (2431, 2455), False, 'import os\n')] |
# Copyright (C) 2013 Sony Mobile Communications AB.
# All rights, including trade secret rights, reserved.
from ave.profile import Profile
from ave.handset.profile import HandsetProfile
from ave.workspace import WorkspaceProfile
from ave.base_workspace import BaseWorkspaceProfile
from ave.relay.profile import RelayProfile
try: # prefer profile from full installation, if available
from ave.positioning.profile import TestDriveProfile
except: # use stub if positioning support is not installed
from positioning import TestDriveProfile
try: # prefer profile from full installation, if available
from ave.powermeter.profile import PowermeterProfile
except: # use stub if powermeter support is not installed
from powermeter import PowermeterProfile
try: # prefer profile from full installation, if available
from ave.beryllium.profile import BerylliumProfile
except: # use stub if beryllium support is not installed
from beryllium import BerylliumProfile
try: # prefer profile from full installation, if available
from ave.wlan.profile import WlanProfile
except: # use stub if beryllium support is not installed
from wlan import WlanProfile
class BrokerProfile(Profile):
def __init__(self, values):
try: del values['authkeys']
except: pass
try: del values['remote']['authkey']
except: pass
Profile.__init__(self, values)
self['type'] = 'broker'
def __hash__(self):
return hash(id(self))
def profile_factory(profile):
return factory(profile)
def factory(profile):
if 'type' not in profile:
raise Exception('profile "type" attribute is missing')
if profile['type'] == 'workspace':
return WorkspaceProfile(profile)
if profile['type'] == 'handset':
return HandsetProfile(profile)
if profile['type'] == 'relay':
return RelayProfile(profile)
if profile['type'] == 'beryllium':
return BerylliumProfile(profile)
if profile['type'] == 'broker':
return BrokerProfile(profile)
if profile['type'] == 'testdrive':
return TestDriveProfile(profile)
if profile['type'] == 'wlan':
return WlanProfile(profile)
if profile['type'] == 'powermeter':
return PowermeterProfile(profile)
raise Exception('type %s not supported in profiles' % profile['type'])
| [
"ave.workspace.WorkspaceProfile",
"ave.relay.profile.RelayProfile",
"positioning.TestDriveProfile",
"ave.handset.profile.HandsetProfile",
"ave.profile.Profile.__init__",
"beryllium.BerylliumProfile",
"powermeter.PowermeterProfile",
"wlan.WlanProfile"
] | [((1415, 1445), 'ave.profile.Profile.__init__', 'Profile.__init__', (['self', 'values'], {}), '(self, values)\n', (1431, 1445), False, 'from ave.profile import Profile\n'), ((1762, 1787), 'ave.workspace.WorkspaceProfile', 'WorkspaceProfile', (['profile'], {}), '(profile)\n', (1778, 1787), False, 'from ave.workspace import WorkspaceProfile\n'), ((1840, 1863), 'ave.handset.profile.HandsetProfile', 'HandsetProfile', (['profile'], {}), '(profile)\n', (1854, 1863), False, 'from ave.handset.profile import HandsetProfile\n'), ((1914, 1935), 'ave.relay.profile.RelayProfile', 'RelayProfile', (['profile'], {}), '(profile)\n', (1926, 1935), False, 'from ave.relay.profile import RelayProfile\n'), ((1990, 2015), 'beryllium.BerylliumProfile', 'BerylliumProfile', (['profile'], {}), '(profile)\n', (2006, 2015), False, 'from beryllium import BerylliumProfile\n'), ((2144, 2169), 'positioning.TestDriveProfile', 'TestDriveProfile', (['profile'], {}), '(profile)\n', (2160, 2169), False, 'from positioning import TestDriveProfile\n'), ((2219, 2239), 'wlan.WlanProfile', 'WlanProfile', (['profile'], {}), '(profile)\n', (2230, 2239), False, 'from wlan import WlanProfile\n'), ((2295, 2321), 'powermeter.PowermeterProfile', 'PowermeterProfile', (['profile'], {}), '(profile)\n', (2312, 2321), False, 'from powermeter import PowermeterProfile\n')] |
import time
from multiworld.core.image_env import ImageEnv, unormalize_image, normalize_image
from rlkit.core import logger
import cv2
import numpy as np
import os.path as osp
from rlkit.samplers.data_collector.scalor_env import WrappedEnvPathCollector as SCALORWrappedEnvPathCollector
from rlkit.torch.scalor.scalor import SCALOR
from rlkit.util.video import dump_video
from rlkit.util.io import load_local_or_remote_file
import rlkit.torch.pytorch_util as ptu
import gym
import multiworld
def generate_scalor_dataset(variant):
env_kwargs = variant.get('env_kwargs', None)
env_id = variant.get('env_id', None)
N = variant.get('N', 100)
rollout_length = variant.get('rollout_length', 100)
test_p = variant.get('test_p', 0.9)
use_cached = variant.get('use_cached', True)
imsize = variant.get('imsize', 64)
num_channels = variant.get('num_channels', 3)
show = variant.get('show', False)
init_camera = variant.get('init_camera', None)
dataset_path = variant.get('dataset_path', None)
oracle_dataset_using_set_to_goal = variant.get(
'oracle_dataset_using_set_to_goal', False)
random_rollout_data = variant.get('random_rollout_data', False)
random_and_oracle_policy_data = variant.get('random_and_oracle_policy_data',
False)
random_and_oracle_policy_data_split = variant.get(
'random_and_oracle_policy_data_split', 0)
policy_file = variant.get('policy_file', None)
n_random_steps = 1
scalor_dataset_specific_env_kwargs = variant.get(
'scalor_dataset_specific_env_kwargs', None)
save_file_prefix = variant.get('save_file_prefix', None)
tag = variant.get('tag', '')
if env_kwargs is None:
env_kwargs = {}
if save_file_prefix is None:
save_file_prefix = env_id
filename = "./data/tmp/{}_N{}_rollout_length{}_imsize{}_{}{}.npz".format(
save_file_prefix,
str(N),
str(rollout_length),
init_camera.__name__ if init_camera else '',
imsize,
tag,
)
import os
if not osp.exists('./data/tmp/'):
os.makedirs('./data/tmp/')
info = {}
import os
if not os.path.exists("./data/tmp/"):
os.makedirs("./data/tmp/")
if use_cached and osp.isfile(filename):
dataset = np.load(filename)
print("loaded data from saved file", filename)
else:
now = time.time()
multiworld.register_all_envs()
env = gym.make(env_id)
if not isinstance(env, ImageEnv):
env = ImageEnv(
env,
imsize,
init_camera=init_camera,
transpose=True,
normalize=True,
non_presampled_goal_img_is_garbage=True,
)
env.reset()
act_dim = env.action_space.low.size
info['env'] = env
imgs = np.zeros((N, rollout_length, imsize * imsize * num_channels),
dtype=np.uint8)
actions = np.zeros((N, rollout_length, act_dim))
for i in range(N):
env.reset()
for j in range(rollout_length):
action = env.action_space.sample()
obs = env.step(action)[0]
img = obs['image_observation']
imgs[i, j, :] = unormalize_image(img)
actions[i,j, :] = action
if show:
img = img.reshape(3, imsize, imsize).transpose()
img = img[::-1, :, ::-1]
cv2.imshow('img', img)
cv2.waitKey(1)
print("done making training data", filename, time.time() - now)
dataset = {"imgs": imgs, "actions": actions}
print(imgs.shape)
# np.savez(filename, **dataset)
return dataset, info
def scalor_training(variant):
scalor_params = variant.get("scalor_params", dict())
scalor_params["logdir"] = logger.get_snapshot_dir()
scalor = SCALOR(**scalor_params)
data, info = generate_scalor_dataset(variant['generate_scalor_dataset_kwargs'])
imgs, actions = data["imgs"], data["actions"]
imgs = normalize_image(imgs)
scalor.train(imgs=imgs, actions=actions) | [
"numpy.load",
"multiworld.register_all_envs",
"multiworld.core.image_env.unormalize_image",
"os.makedirs",
"rlkit.torch.scalor.scalor.SCALOR",
"gym.make",
"cv2.waitKey",
"os.path.exists",
"numpy.zeros",
"multiworld.core.image_env.normalize_image",
"time.time",
"multiworld.core.image_env.ImageEnv",
"os.path.isfile",
"rlkit.core.logger.get_snapshot_dir",
"cv2.imshow"
] | [((3941, 3966), 'rlkit.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (3964, 3966), False, 'from rlkit.core import logger\n'), ((3980, 4003), 'rlkit.torch.scalor.scalor.SCALOR', 'SCALOR', ([], {}), '(**scalor_params)\n', (3986, 4003), False, 'from rlkit.torch.scalor.scalor import SCALOR\n'), ((4149, 4170), 'multiworld.core.image_env.normalize_image', 'normalize_image', (['imgs'], {}), '(imgs)\n', (4164, 4170), False, 'from multiworld.core.image_env import ImageEnv, unormalize_image, normalize_image\n'), ((2095, 2120), 'os.path.exists', 'osp.exists', (['"""./data/tmp/"""'], {}), "('./data/tmp/')\n", (2105, 2120), True, 'import os.path as osp\n'), ((2130, 2156), 'os.makedirs', 'os.makedirs', (['"""./data/tmp/"""'], {}), "('./data/tmp/')\n", (2141, 2156), False, 'import os\n'), ((2196, 2225), 'os.path.exists', 'os.path.exists', (['"""./data/tmp/"""'], {}), "('./data/tmp/')\n", (2210, 2225), False, 'import os\n'), ((2235, 2261), 'os.makedirs', 'os.makedirs', (['"""./data/tmp/"""'], {}), "('./data/tmp/')\n", (2246, 2261), False, 'import os\n'), ((2284, 2304), 'os.path.isfile', 'osp.isfile', (['filename'], {}), '(filename)\n', (2294, 2304), True, 'import os.path as osp\n'), ((2324, 2341), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (2331, 2341), True, 'import numpy as np\n'), ((2421, 2432), 'time.time', 'time.time', ([], {}), '()\n', (2430, 2432), False, 'import time\n'), ((2441, 2471), 'multiworld.register_all_envs', 'multiworld.register_all_envs', ([], {}), '()\n', (2469, 2471), False, 'import multiworld\n'), ((2486, 2502), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (2494, 2502), False, 'import gym\n'), ((2908, 2985), 'numpy.zeros', 'np.zeros', (['(N, rollout_length, imsize * imsize * num_channels)'], {'dtype': 'np.uint8'}), '((N, rollout_length, imsize * imsize * num_channels), dtype=np.uint8)\n', (2916, 2985), True, 'import numpy as np\n'), ((3020, 3058), 'numpy.zeros', 'np.zeros', (['(N, rollout_length, act_dim)'], {}), '((N, rollout_length, act_dim))\n', (3028, 3058), True, 'import numpy as np\n'), ((2563, 2687), 'multiworld.core.image_env.ImageEnv', 'ImageEnv', (['env', 'imsize'], {'init_camera': 'init_camera', 'transpose': '(True)', 'normalize': '(True)', 'non_presampled_goal_img_is_garbage': '(True)'}), '(env, imsize, init_camera=init_camera, transpose=True, normalize=\n True, non_presampled_goal_img_is_garbage=True)\n', (2571, 2687), False, 'from multiworld.core.image_env import ImageEnv, unormalize_image, normalize_image\n'), ((3326, 3347), 'multiworld.core.image_env.unormalize_image', 'unormalize_image', (['img'], {}), '(img)\n', (3342, 3347), False, 'from multiworld.core.image_env import ImageEnv, unormalize_image, normalize_image\n'), ((3659, 3670), 'time.time', 'time.time', ([], {}), '()\n', (3668, 3670), False, 'import time\n'), ((3548, 3570), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (3558, 3570), False, 'import cv2\n'), ((3591, 3605), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3602, 3605), False, 'import cv2\n')] |
import subprocess
def is_branch_merged(branch):
"""
Checks if given branch is merged into current branch.
:param branch: Name of branch
:return: True/False
"""
proc = subprocess.Popen(["git", "branch", "--merged"], stdout=subprocess.PIPE)
result = proc.stdout.read().decode()
return branch in result.strip().split("\n")
def get_file_contents_from_branch(filename, branch_name):
"""
Gets the contents of a file from a specific branch.
:param filename: Name of the file
:param branch_name: Name of the branch
:return: Contents of the file
"""
proc = subprocess.Popen(
["git", "show", "%s:%s" % (branch_name, filename)], stdout=subprocess.PIPE
)
return proc.stdout.read().decode()
def get_current_branch_name():
"""
Gets the name of the current git branch in the working directory.
:return: Name of the branch
"""
proc = subprocess.Popen(["git", "rev-parse", "--abbrev-ref", "HEAD"], stdout=subprocess.PIPE)
return proc.stdout.read().decode()
def get_changed_files(branch1, branch2):
"""
Gets a list of changed files between two branches.
:param branch1: name of first branch
:param branch2: name of second branch
:return: A list of changed files
"""
proc = subprocess.Popen(
["git", "diff", "--name-only", branch1, branch2], stdout=subprocess.PIPE
)
return proc.stdout.read().decode()
| [
"subprocess.Popen"
] | [((193, 264), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'branch', '--merged']"], {'stdout': 'subprocess.PIPE'}), "(['git', 'branch', '--merged'], stdout=subprocess.PIPE)\n", (209, 264), False, 'import subprocess\n'), ((612, 709), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'show', '%s:%s' % (branch_name, filename)]"], {'stdout': 'subprocess.PIPE'}), "(['git', 'show', '%s:%s' % (branch_name, filename)], stdout\n =subprocess.PIPE)\n", (628, 709), False, 'import subprocess\n'), ((920, 1011), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'rev-parse', '--abbrev-ref', 'HEAD']"], {'stdout': 'subprocess.PIPE'}), "(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=\n subprocess.PIPE)\n", (936, 1011), False, 'import subprocess\n'), ((1291, 1386), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'diff', '--name-only', branch1, branch2]"], {'stdout': 'subprocess.PIPE'}), "(['git', 'diff', '--name-only', branch1, branch2], stdout=\n subprocess.PIPE)\n", (1307, 1386), False, 'import subprocess\n')] |
"""
Frames, ticks, titles, and labels
=================================
Setting the style of the map frames, ticks, etc, is handled by the ``frame`` argument
that all plotting methods of :class:`pygmt.Figure`.
"""
import pygmt
########################################################################################
# Plot frame
# ----------
#
# By default, PyGMT does not add a frame to your plot. For example, we can plot the
# coastlines of the world with a Mercator projection:
fig = pygmt.Figure()
fig.coast(shorelines="1/0.5p", region=[-180, 180, -60, 60], projection="M25c")
fig.show()
########################################################################################
# To add the default GMT frame to the plot, use ``frame="f"`` in
# :meth:`pygmt.Figure.basemap` or any other plotting module:
fig = pygmt.Figure()
fig.coast(shorelines="1/0.5p", region=[-180, 180, -60, 60], projection="M25c")
fig.basemap(frame="f")
fig.show()
########################################################################################
# Ticks and grid lines
# --------------------
#
# The automatic frame (``frame=True`` or ``frame="a"``) sets the default GMT style frame
# and automatically determines tick labels from the plot region.
fig = pygmt.Figure()
fig.coast(shorelines="1/0.5p", region=[-180, 180, -60, 60], projection="M25c")
fig.basemap(frame="a")
fig.show()
########################################################################################
# Add automatic grid lines to the plot by adding a ``g`` to ``frame``:
fig = pygmt.Figure()
fig.coast(shorelines="1/0.5p", region=[-180, 180, -60, 60], projection="M25c")
fig.basemap(frame="ag")
fig.show()
########################################################################################
# Title
# -----
#
# The figure title can be set by passing **+t**\ *title* to the ``frame`` parameter of
# :meth:`pygmt.Figure.basemap`. Passing multiple arguments to ``frame`` can be done by
# using a list, as show in the example below.
fig = pygmt.Figure()
# region="IS" specifies Iceland using the ISO country code
fig.coast(shorelines="1/0.5p", region="IS", projection="M25c")
fig.basemap(frame=["a", "+tIceland"])
fig.show()
########################################################################################
# To use a title with multiple words, the title must be placed inside another set of
# quotation marks. To prevent the quotation marks from appearing in the figure title,
# the frame argument can be passed in single quotation marks and the title can be
# passed in double quotation marks.
fig = pygmt.Figure()
# region="TT" specifies Trinidad and Tobago
fig.coast(shorelines="1/0.5p", region="TT", projection="M25c")
fig.basemap(frame=["a", '+t"Trinidad and Tobago"'])
fig.show()
########################################################################################
# Axis labels
# -----------
#
# Axis labels can be set by passing **x+l**\ *label* (or starting with y if
# labeling the y-axis) if to the ``frame`` parameter of :meth:`pygmt.Figure.basemap`.
# Axis labels will be displayed on all primary axes, which the default is all sides of
# the figure. To designate only some of the axes as primary, an argument that
# capitlizes only the primary axes can be passed, which is ``"WSne"`` in the example
# below. The letters correspond with west (left), south (bottom), north (top), and
# east (right) sides of a figure.
#
# The example below used a Cartesian projection, as GMT does not allow axis labels to
# be set for geographic maps.
fig = pygmt.Figure()
fig.basemap(
region=[0, 10, 0, 20],
projection="X10c/8c",
frame=["WSne", "x+lx-axis", "y+ly-axis"],
)
fig.show()
| [
"pygmt.Figure"
] | [((492, 506), 'pygmt.Figure', 'pygmt.Figure', ([], {}), '()\n', (504, 506), False, 'import pygmt\n'), ((820, 834), 'pygmt.Figure', 'pygmt.Figure', ([], {}), '()\n', (832, 834), False, 'import pygmt\n'), ((1247, 1261), 'pygmt.Figure', 'pygmt.Figure', ([], {}), '()\n', (1259, 1261), False, 'import pygmt\n'), ((1543, 1557), 'pygmt.Figure', 'pygmt.Figure', ([], {}), '()\n', (1555, 1557), False, 'import pygmt\n'), ((2007, 2021), 'pygmt.Figure', 'pygmt.Figure', ([], {}), '()\n', (2019, 2021), False, 'import pygmt\n'), ((2579, 2593), 'pygmt.Figure', 'pygmt.Figure', ([], {}), '()\n', (2591, 2593), False, 'import pygmt\n'), ((3539, 3553), 'pygmt.Figure', 'pygmt.Figure', ([], {}), '()\n', (3551, 3553), False, 'import pygmt\n')] |
# Generated by Django 2.0 on 2017-12-29 14:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot', '0005_auto_20171229_2354'),
]
operations = [
migrations.AlterField(
model_name='user',
name='authority',
field=models.IntegerField(choices=[(0, 'Master'), (1, 'Editor'), (2, 'Watcher')]),
),
]
| [
"django.db.models.IntegerField"
] | [((331, 406), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'Master'), (1, 'Editor'), (2, 'Watcher')]"}), "(choices=[(0, 'Master'), (1, 'Editor'), (2, 'Watcher')])\n", (350, 406), False, 'from django.db import migrations, models\n')] |
import perceptron as pc
import numpy as np
def mnist_load(file, samples):
raw_data = np.array(np.genfromtxt(file, delimiter=',', max_rows=samples))
labels = raw_data[:,0]
data = np.delete(raw_data, 0, 1)/255.0
return (data, labels)
def main():
print("loading data...")
samples = 10000
batch_size = 20
train = mnist_load("mnist_train.csv", samples)
validate = mnist_load("mnist_test.csv", samples)
restart_params = (.0001, 0.01, 0.01, 2*samples/batch_size) #lower bound, upper bound, decay rate, cycle length.
structure = [784, 256, 128, 10, 10]
activation_functions = ("elu", "elu", "elu", "softmax")
network = pc.network(structure, activation_functions, train, validate)
network.train(dropout=[.5, .2, 0], beta=0.9, lr_func="warm restarts", lr_params=restart_params, batch_size=batch_size, epochs=10, cost_func="cross entropy")
main()
| [
"numpy.delete",
"numpy.genfromtxt",
"perceptron.network"
] | [((666, 726), 'perceptron.network', 'pc.network', (['structure', 'activation_functions', 'train', 'validate'], {}), '(structure, activation_functions, train, validate)\n', (676, 726), True, 'import perceptron as pc\n'), ((99, 151), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'max_rows': 'samples'}), "(file, delimiter=',', max_rows=samples)\n", (112, 151), True, 'import numpy as np\n'), ((191, 216), 'numpy.delete', 'np.delete', (['raw_data', '(0)', '(1)'], {}), '(raw_data, 0, 1)\n', (200, 216), True, 'import numpy as np\n')] |
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from gym import spaces, Env
class NXColoringEnv(Env):
def __init__(self, generator=nx.barabasi_albert_graph, **kwargs):
'''
generator — netwokrx graph generator,
kwargs — generator named arguments
'''
self.G = generator(**kwargs)
self.pos = nx.spring_layout(self.G, iterations=1000) #determine by n and m (?)
self.edges = np.array(self.G.edges())
self.n = len(self.G.nodes())
self.m = len(self.edges)
self.action_space = spaces.Box(low=0, high=self.n-1, shape=(self.n,2), dtype=np.uint32)
self.used_colors = []
self.current_state = np.full(self.n, self.n, dtype=np.uint32)
self.done = False
self.total_reward = 0
def get_graph(self):
return self.G.copy()
def step(self, action):
def is_action_available(action):
node, color = action
adjacent_nodes = np.unique(self.edges[np.sum(np.isin(self.edges, node), axis=1, dtype=bool)])
return ~np.any(self.current_state[adjacent_nodes]==color)
reward = 0
if is_action_available(action):
node, color = action
self.current_state[node] = color
if color not in self.used_colors:
reward = -1
self.total_reward -= 1
self.used_colors.append(color)
if self.n not in np.unique(self.current_state):
self.done = True
info = {}
return self.current_state, reward, self.done, info
def reset(self):
self.used_colors = []
self.current_state = np.full(self.n, self.n, dtype=np.uint32)
self.done = False
self.total_reward = 0
def render(self, mode='human', close=False):
nx.draw(self.G, self.pos, node_color=self.current_state, cmap=plt.cm.tab20) | [
"numpy.full",
"numpy.isin",
"numpy.any",
"networkx.spring_layout",
"networkx.draw",
"gym.spaces.Box",
"numpy.unique"
] | [((342, 383), 'networkx.spring_layout', 'nx.spring_layout', (['self.G'], {'iterations': '(1000)'}), '(self.G, iterations=1000)\n', (358, 383), True, 'import networkx as nx\n'), ((539, 609), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(self.n - 1)', 'shape': '(self.n, 2)', 'dtype': 'np.uint32'}), '(low=0, high=self.n - 1, shape=(self.n, 2), dtype=np.uint32)\n', (549, 609), False, 'from gym import spaces, Env\n'), ((658, 698), 'numpy.full', 'np.full', (['self.n', 'self.n'], {'dtype': 'np.uint32'}), '(self.n, self.n, dtype=np.uint32)\n', (665, 698), True, 'import numpy as np\n'), ((1528, 1568), 'numpy.full', 'np.full', (['self.n', 'self.n'], {'dtype': 'np.uint32'}), '(self.n, self.n, dtype=np.uint32)\n', (1535, 1568), True, 'import numpy as np\n'), ((1669, 1744), 'networkx.draw', 'nx.draw', (['self.G', 'self.pos'], {'node_color': 'self.current_state', 'cmap': 'plt.cm.tab20'}), '(self.G, self.pos, node_color=self.current_state, cmap=plt.cm.tab20)\n', (1676, 1744), True, 'import networkx as nx\n'), ((1328, 1357), 'numpy.unique', 'np.unique', (['self.current_state'], {}), '(self.current_state)\n', (1337, 1357), True, 'import numpy as np\n'), ((1002, 1053), 'numpy.any', 'np.any', (['(self.current_state[adjacent_nodes] == color)'], {}), '(self.current_state[adjacent_nodes] == color)\n', (1008, 1053), True, 'import numpy as np\n'), ((939, 964), 'numpy.isin', 'np.isin', (['self.edges', 'node'], {}), '(self.edges, node)\n', (946, 964), True, 'import numpy as np\n')] |
import os
from backend.corpora.common.utils.secret_config import SecretConfig
class WmgConfig(SecretConfig):
def __init__(self, *args, **kwargs):
super().__init__("backend", secret_name="wmg_config", **kwargs)
# TODO: promote this impl to parent class, if new behavior works universally
def __getattr__(self, name):
# Environment variables intentionally override config file.
if not self.config_is_loaded():
self.load()
if (value := self.value_from_env(name)) is not None:
return value
if (value := self.value_from_config(name)) is not None:
return value
if (value := self.value_from_defaults(name)) is not None:
return value
self.raise_error(name)
def get_defaults_template(self):
deployment_stage = os.getenv("DEPLOYMENT_STAGE", "test")
defaults_template = {"bucket": f"wmg-{deployment_stage}", "data_path_prefix": "", "tiledb_config_overrides": {}}
return defaults_template
| [
"os.getenv"
] | [((834, 871), 'os.getenv', 'os.getenv', (['"""DEPLOYMENT_STAGE"""', '"""test"""'], {}), "('DEPLOYMENT_STAGE', 'test')\n", (843, 871), False, 'import os\n')] |
"""Test my quick sort algorithm tests."""
from quick_sort import quick_sort, _quicksort
import pytest
from random import randint
@pytest.fixture(scope='function')
def list_ten():
"""Make a list of 10 vals."""
return [x for x in range(10)]
@pytest.fixture(scope='function')
def rand_ten():
"""Make a random list of length 10."""
return [randint(0, 1000) for _ in range(10)]
@pytest.fixture(scope='function')
def rand_neg():
"""Make a random list of neg value list len 100."""
return [randint(-1000, 0) for _ in range(100)]
def test_sort_list_with_neg_values(rand_neg):
"""Test if sorting method sorts negative values."""
key = sorted(rand_neg)
result = quick_sort(rand_neg)
assert key == result
def test_sort_nums_in_list_random_case(rand_ten):
"""Test quick sort function."""
result = quick_sort(rand_ten)
key = sorted(rand_ten)
assert result == key
def test_sort_nums_in_tuple_random_case(rand_ten):
"""Test quick sort function."""
rand = tuple(rand_ten)
result = quick_sort(rand)
key = sorted(rand)
assert result == key
def test_sort_nums_in_list_wrose_case(list_ten):
"""Test quick sort function."""
reverse = list(reversed(list_ten))
result = quick_sort(reverse)
assert result == list_ten
def test_sort_nums_in_tuple_wrose_case(list_ten):
"""Test quick sort function."""
reverse = tuple(reversed(list_ten))
result = quick_sort(reverse)
assert result == list_ten
def test_sort_method_raises_error():
"""Test if error gets raised for invalid type."""
with pytest.raises(ValueError):
quick_sort('12345')
def test_sort_method_raises_error_val():
"""Test if error gets raised for invalid type."""
with pytest.raises(ValueError):
quick_sort([1, 2, '3'])
def test_sort_method_raise_error_dic():
"""Test if error gets raised for invalid type."""
with pytest.raises(ValueError):
quick_sort({1, 2, 3})
def test_sort_method_raise_error_fun():
"""Test if error gets raised for invalid type."""
with pytest.raises(ValueError):
quick_sort([1, 2, 3, 'p'])
def test_sort_nums_in_list_random_case_helper(rand_ten):
"""Test quick sort function."""
result = quick_sort(rand_ten)
key = sorted(rand_ten)
assert result == key
def test_sort_nums_in_list_wrose_case_helper(list_ten):
"""Test _quick sort function."""
reverse = list(reversed(list_ten))
result = _quicksort(reverse)
assert result == list_ten
| [
"quick_sort.quick_sort",
"random.randint",
"pytest.fixture",
"pytest.raises",
"quick_sort._quicksort"
] | [((132, 164), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (146, 164), False, 'import pytest\n'), ((252, 284), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (266, 284), False, 'import pytest\n'), ((396, 428), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (410, 428), False, 'import pytest\n'), ((696, 716), 'quick_sort.quick_sort', 'quick_sort', (['rand_neg'], {}), '(rand_neg)\n', (706, 716), False, 'from quick_sort import quick_sort, _quicksort\n'), ((843, 863), 'quick_sort.quick_sort', 'quick_sort', (['rand_ten'], {}), '(rand_ten)\n', (853, 863), False, 'from quick_sort import quick_sort, _quicksort\n'), ((1045, 1061), 'quick_sort.quick_sort', 'quick_sort', (['rand'], {}), '(rand)\n', (1055, 1061), False, 'from quick_sort import quick_sort, _quicksort\n'), ((1249, 1268), 'quick_sort.quick_sort', 'quick_sort', (['reverse'], {}), '(reverse)\n', (1259, 1268), False, 'from quick_sort import quick_sort, _quicksort\n'), ((1440, 1459), 'quick_sort.quick_sort', 'quick_sort', (['reverse'], {}), '(reverse)\n', (1450, 1459), False, 'from quick_sort import quick_sort, _quicksort\n'), ((2249, 2269), 'quick_sort.quick_sort', 'quick_sort', (['rand_ten'], {}), '(rand_ten)\n', (2259, 2269), False, 'from quick_sort import quick_sort, _quicksort\n'), ((2469, 2488), 'quick_sort._quicksort', '_quicksort', (['reverse'], {}), '(reverse)\n', (2479, 2488), False, 'from quick_sort import quick_sort, _quicksort\n'), ((356, 372), 'random.randint', 'randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (363, 372), False, 'from random import randint\n'), ((513, 530), 'random.randint', 'randint', (['(-1000)', '(0)'], {}), '(-1000, 0)\n', (520, 530), False, 'from random import randint\n'), ((1592, 1617), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1605, 1617), False, 'import pytest\n'), ((1627, 1646), 'quick_sort.quick_sort', 'quick_sort', (['"""12345"""'], {}), "('12345')\n", (1637, 1646), False, 'from quick_sort import quick_sort, _quicksort\n'), ((1753, 1778), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1766, 1778), False, 'import pytest\n'), ((1788, 1811), 'quick_sort.quick_sort', 'quick_sort', (["[1, 2, '3']"], {}), "([1, 2, '3'])\n", (1798, 1811), False, 'from quick_sort import quick_sort, _quicksort\n'), ((1917, 1942), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1930, 1942), False, 'import pytest\n'), ((1952, 1973), 'quick_sort.quick_sort', 'quick_sort', (['{1, 2, 3}'], {}), '({1, 2, 3})\n', (1962, 1973), False, 'from quick_sort import quick_sort, _quicksort\n'), ((2079, 2104), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2092, 2104), False, 'import pytest\n'), ((2114, 2140), 'quick_sort.quick_sort', 'quick_sort', (["[1, 2, 3, 'p']"], {}), "([1, 2, 3, 'p'])\n", (2124, 2140), False, 'from quick_sort import quick_sort, _quicksort\n')] |
import numpy as np
# Note: please don't import any new package. You should solve this problem using only the package(s) above.
#-------------------------------------------------------------------------
'''
Problem 1: Multi-Armed Bandit Problem (15 points)
In this problem, you will implement the epsilon-greedy method for Multi-armed bandit problem.
A list of all variables being used in this problem is provided at the end of this file.
'''
#--------------------------
def Terms_and_Conditions():
'''
By submitting this homework or changing this function, you agree with the following terms:
(1) Not sharing your code/solution with any student before and after the homework due. For example, sending your code segment to another student, putting your solution online or lending your laptop (if your laptop contains your solution or your Dropbox automatically copied your solution from your desktop computer and your laptop) to another student to work on this homework will violate this term.
(2) Not using anyone's code in this homework and building your own solution. For example, using some code segments from another student or online resources due to any reason (like too busy recently) will violate this term. Changing other's code as your solution (such as changing the variable names) will also violate this term.
(3) When discussing with any other students about this homework, only discuss high-level ideas or use pseudo-code. Don't discuss about the solution at the code level. For example, two students discuss about the solution of a function (which needs 5 lines of code to solve) and they then work on the solution "independently", however the code of the two solutions are exactly the same, or only with minor differences (variable names are different). In this case, the two students violate this term.
All violations of (1),(2) or (3) will be handled in accordance with the WPI Academic Honesty Policy. For more details, please visit: https://www.wpi.edu/about/policies/academic-integrity/dishonesty
Note: we may use the Stanford Moss system to check your code for code similarity. https://theory.stanford.edu/~aiken/moss/
Historical Data: in one year, we ended up finding 25% of the students in that class violating this term in their homework submissions and we handled ALL of these violations according to the WPI Academic Honesty Policy.
'''
#*******************************************
# CHANGE HERE: if you have read and agree with the term above, change "False" to "True".
Read_and_Agree = True
#*******************************************
return Read_and_Agree
#----------------------------------------------------
'''
Given the player's memory about the previous results in the game and the action chosen and reward received at the current time step, update the player's memory.
---- Inputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
* r: the reward received at the current time step, a float scalar.
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
---- Hints: --------
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def update_memory(a, r, Rt, Ct):
#########################################
## INSERT YOUR CODE HERE (3 points)
Rt[a] = Rt[a] + r
Ct[a] = Ct[a] + 1
#########################################
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_update_memory
--- OR ----
python3 -m nose -v test1.py:test_update_memory
--- OR ----
python -m nose -v test1.py:test_update_memory
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Explore-only) Given a multi-armed bandit game, choose an action at the current time step using explore-only strategy. Randomly pick an action with uniform distribution: equal probability for all actions.
---- Inputs: --------
* c: the number of possible actions in a multi-armed bandit problem, an integer scalar.
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action_explore(c):
#########################################
## INSERT YOUR CODE HERE (3 points)
a = np.random.randint(0, c)
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action_explore
--- OR ----
python3 -m nose -v test1.py:test_choose_action_explore
--- OR ----
python -m nose -v test1.py:test_choose_action_explore
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Exploit-only) Given a multi-armed bandit game and the player's memory about the previous results, choose an action at the current time step using exploit-only strategy: choose the action with the highest average reward.
---- Inputs: --------
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* If the count in Ct[i] for the i-th action is 0, we can assume the average reward for the i-th action is 0. For example, if the count Ct for 3 actions are [0,1,1], we can assume the average reward for the first action is 0.
* You could us the argmax() function in numpy to return the index of the largest value in a vector.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action_exploit(Rt, Ct):
#########################################
## INSERT YOUR CODE HERE (3 points)
a = np.argmax([0 if Ct[i] == 0 else Rt[i] / Ct[i] for i in range(Rt.size)])
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action_exploit
--- OR ----
python3 -m nose -v test1.py:test_choose_action_exploit
--- OR ----
python -m nose -v test1.py:test_choose_action_exploit
---------------------------------------------------
'''
#----------------------------------------------------
'''
Given a multi-armed bandit game and the player's memory about the previous results, choose an action at the current step of the game using epsilon-greedy method: with a small probability (epsilon) to follow explore-only method (randomly choose an action) and with a large probability (1-epsilon) to follow exploit-only method (choose the action with the highest average reward).
---- Inputs: --------
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
* e: (epsilon) the probability of the player to follow the exploration-only strategy. e is a float scalar between 0 and 1. The player has 1-e probability in each time step to follow the exploitation-only strategy.
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* You could use the random.rand() function in numpy to sample a number randomly using uniform distribution between 0 and 1.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action(Rt, Ct, e=0.05):
#########################################
## INSERT YOUR CODE HERE (6 points)
a = choose_action_explore(Ct.size) if np.random.random() < e else choose_action_exploit(Rt, Ct)
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action
--- OR ----
python3 -m nose -v test1.py:test_choose_action
--- OR ----
python -m nose -v test1.py:test_choose_action
---------------------------------------------------
'''
#--------------------------------------------
'''
TEST problem 1:
Now you can test the correctness of all the above functions by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py
--- OR ----
python3 -m nose -v test1.py
--- OR ----
python -m nose -v test1.py
---------------------------------------------------
If your code passed all the tests, you will see the following message in the terminal:
----------- Problem 1 (15 points in total)--------------------- ... ok
* (3 points) update_memory ... ok
* (3 points) choose_action_explore ... ok
* (3 points) choose_action_exploit ... ok
* (6 points) choose_action ... ok
----------------------------------------------------------------------
Ran 4 tests in 0.586s
OK
'''
#--------------------------------------------
#--------------------------------------------
'''
List of All Variables
* c: the number of possible actions in a multi-armed bandit problem, an integer scalar.
* e: (epsilon) the probability of the player to follow the exploration-only strategy. e is a float scalar between 0 and 1. The player has 1-e probability in each time step to follow the exploitation-only strategy.
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
* r: the reward received at the current time step, a float scalar.
'''
#-------------------------------------------- | [
"numpy.random.randint",
"numpy.random.random"
] | [((5139, 5162), 'numpy.random.randint', 'np.random.randint', (['(0)', 'c'], {}), '(0, c)\n', (5156, 5162), True, 'import numpy as np\n'), ((9556, 9574), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (9572, 9574), True, 'import numpy as np\n')] |
"""Photometer
These functions handle data files from spectrophotometers for easy and direct import
The functions are:
* uprtek_import_spectrum - Imports the spectrum from a UPRtek spectrophotometer
* uprtek_import_r_vals - Imports the R values generated by a UPRtek spectrophotometer
* uprtek_file_import - Imports the UPRtek file and extracts the selected data
"""
import csv
import itertools
"""Imports a UPRtek data file and outputs a dictionary with the intensities for each wavelength
Note: UPRtek names these files as .xls, but they are actually formatted as tab-delimited text files
Note2: This has only been tested with the UPRtek CV600 and MK350N. Others may have a different file format
Parameters
----------
filename : String
The filename to import
Returns
-------
dict
A dictionary with the wavelengths and intensities, e.g.:
{380: 0.048, 381: 0.051, ...}
"""
def uprtek_import_spectrum(filename: str):
return uprtek_file_import(filename, 'spd')
"""Imports a UPRtek data file and outputs a dictionary with the R-Values
Note: UPRtek names these files as .xls, but they are actually formatted as tab-delimited text files
Note2: This has only been tested with the UPRtek CV600 and MK350N. Others may have a different file format
Parameters
----------
filename : String
The filename to import
Returns
-------
dict
A dictionary with the R-Values, e.g.:
{'R1': 98.887482, 'R2': 99.234245, ...}
"""
def uprtek_import_r_vals(filename: str):
return uprtek_file_import(filename, 'r_vals')
"""Imports a UPRtek data file and outputs a dictionary with the selected data
Note: UPRtek names these files as .xls, but they are actually formatted as tab-delimited text files
Note2: This has only been tested with the UPRtek CV600 and MK350N. Others may have a different file format
Parameters
----------
filename : String
The filename to import
returntype: dict
The type of data to return. Currently, either 'spd' or 'r_vals'
Returns
-------
dict
A dictionary with the selected data
"""
def uprtek_file_import(filename: str, returntype: dict):
with open(filename, mode='r', encoding='us-ascii') as csvFile:
reader = csv.reader(csvFile, delimiter='\t')
# Get UPRtek model from the first line, then set rows for reading data
model = next(reader)[1]
if model == 'CV600':
spd_start = 40
r_start = 18
r_end = 33
elif model == 'MK350NPLUS':
spd_start = 46
r_start = 26
r_end = 41
else:
print('UPRtek model not available. Using the MK350N format, which could result in errors!')
spd_start = 46
r_start = 26
r_end = 41
# Extract the data and return
if returntype == 'spd':
spd = {}
for row in itertools.islice(reader, spd_start, None):
spd[int(row[0][0:3])] = float(row[1])
return spd
elif returntype == 'r_vals':
r_vals = {}
for row in itertools.islice(reader, r_start, r_end):
r_vals[row[0]] = float(row[1])
return r_vals
| [
"csv.reader",
"itertools.islice"
] | [((2273, 2308), 'csv.reader', 'csv.reader', (['csvFile'], {'delimiter': '"""\t"""'}), "(csvFile, delimiter='\\t')\n", (2283, 2308), False, 'import csv\n'), ((2957, 2998), 'itertools.islice', 'itertools.islice', (['reader', 'spd_start', 'None'], {}), '(reader, spd_start, None)\n', (2973, 2998), False, 'import itertools\n'), ((3163, 3203), 'itertools.islice', 'itertools.islice', (['reader', 'r_start', 'r_end'], {}), '(reader, r_start, r_end)\n', (3179, 3203), False, 'import itertools\n')] |
import getopt
import os
import sys
# Help message to show
def help_message():
print("usage: minij [OPTIONS] [FILE]\n")
print("OPTIONS:")
print(" -h, --help Show help for the command")
print(" -o, --output Specify the output file")
# Try to get all the values passed to the program
def parse_flags(args_list):
shorts = "ho:"
longs = ["help", "output="]
try:
opts, vals = getopt.getopt(args_list, shorts, longs)
except getopt.error as e:
print("ERROR: %s" % e)
print("Try doing minij -h or minij --help to get more information")
sys.exit(1)
# Default values
args = {
"input": None,
"output": None,
}
for opt, val in opts:
# Print help message
if opt in ("-h", "--help"):
args["help"] = True
help_message()
sys.exit(0)
# Get specific output file
elif opt in ("-o", "--output"):
if os.path.isdir(val):
print("ERROR: The output file is a directory")
sys.exit(1)
args["output"] = val
# Get the input file
if len(vals) > 1:
print("ERROR: only one file is allowed")
sys.exit(1)
elif len(vals) < 1:
print("ERROR: no file provided")
sys.exit(1)
args["input"] = vals[0]
# Set the output if not specified
if args["output"] is None:
filename = os.path.splitext(os.path.basename(args["input"]))[0]
output = filename
count = 0
while os.path.isfile(output + ".table"):
count += 1
output = filename + "(" + str(count) + ")"
args["output"] = output + ".table"
return args
| [
"getopt.getopt",
"os.path.basename",
"os.path.isdir",
"os.path.isfile",
"sys.exit"
] | [((421, 460), 'getopt.getopt', 'getopt.getopt', (['args_list', 'shorts', 'longs'], {}), '(args_list, shorts, longs)\n', (434, 460), False, 'import getopt\n'), ((1222, 1233), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1230, 1233), False, 'import sys\n'), ((1550, 1583), 'os.path.isfile', 'os.path.isfile', (["(output + '.table')"], {}), "(output + '.table')\n", (1564, 1583), False, 'import os\n'), ((606, 617), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (614, 617), False, 'import sys\n'), ((869, 880), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (877, 880), False, 'import sys\n'), ((1308, 1319), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1316, 1319), False, 'import sys\n'), ((972, 990), 'os.path.isdir', 'os.path.isdir', (['val'], {}), '(val)\n', (985, 990), False, 'import os\n'), ((1455, 1486), 'os.path.basename', 'os.path.basename', (["args['input']"], {}), "(args['input'])\n", (1471, 1486), False, 'import os\n'), ((1071, 1082), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1079, 1082), False, 'import sys\n')] |
import unittest
import pyperclip
from module_contact import Contact
class TestContact(unittest.TestCase):
def setUp(self):
self.new_contact = Contact("James","Muriuki","0712345678","<EMAIL>")
def test_init(self):
self.assertEqual(self.new_contact.first_name,"James")
self.assertEqual(self.new_contact.last_name,"Muriuki")
self.assertEqual(self.new_contact.phone_number,"0712345678")
self.assertEqual(self.new_contact.email,"<EMAIL>")
def test_save_contact(self):
self.new_contact.save_contact()
self.assertEqual(len(Contact.contact_list), 1)
def test_save_multiple_contact(self):
self.new_contact.save_contact()
test_contact=Contact("Test","user","0712345678","<EMAIL>")
test_contact.save_contact()
self.assertEqual(len(Contact.contact_list),2)
def tearDown(self):
Contact.contact_list=[]
def test_delete_contact(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0712345678","<EMAIL>")
test_contact.save_contact()
self.new_contact.delete_contact()
self.assertEqual(len(Contact.contact_list),1)
def test_find_contact_by_number(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0711223344","<EMAIL>")
test_contact.save_contact()
found_contact = Contact.find_by_number("0711223344")
self.assertEqual(found_contact.email,test_contact.email)
def test_contact_exists(self):
self.new_contact.save_contact()
test_contact = Contact("Test","user","0711223344","<EMAIL>")
test_contact.save_contact()
contact_exists = Contact.contact_exist("0711223344")
self.assertTrue(contact_exists)
def test_display_all_contacts(self):
self.assertEqual(Contact.display_contacts(),Contact.contact_list)
def test_copy_email(self):
self.new_contact.save_contact()
Contact.copy_email("0712345678")
self.assertEqual(self.new_contact.email,pyperclip.paste())
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"module_contact.Contact.copy_email",
"module_contact.Contact.display_contacts",
"pyperclip.paste",
"module_contact.Contact.find_by_number",
"module_contact.Contact",
"module_contact.Contact.contact_exist"
] | [((2113, 2128), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2126, 2128), False, 'import unittest\n'), ((154, 206), 'module_contact.Contact', 'Contact', (['"""James"""', '"""Muriuki"""', '"""0712345678"""', '"""<EMAIL>"""'], {}), "('James', 'Muriuki', '0712345678', '<EMAIL>')\n", (161, 206), False, 'from module_contact import Contact\n'), ((714, 762), 'module_contact.Contact', 'Contact', (['"""Test"""', '"""user"""', '"""0712345678"""', '"""<EMAIL>"""'], {}), "('Test', 'user', '0712345678', '<EMAIL>')\n", (721, 762), False, 'from module_contact import Contact\n'), ((1004, 1052), 'module_contact.Contact', 'Contact', (['"""Test"""', '"""user"""', '"""0712345678"""', '"""<EMAIL>"""'], {}), "('Test', 'user', '0712345678', '<EMAIL>')\n", (1011, 1052), False, 'from module_contact import Contact\n'), ((1293, 1341), 'module_contact.Contact', 'Contact', (['"""Test"""', '"""user"""', '"""0711223344"""', '"""<EMAIL>"""'], {}), "('Test', 'user', '0711223344', '<EMAIL>')\n", (1300, 1341), False, 'from module_contact import Contact\n'), ((1399, 1435), 'module_contact.Contact.find_by_number', 'Contact.find_by_number', (['"""0711223344"""'], {}), "('0711223344')\n", (1421, 1435), False, 'from module_contact import Contact\n'), ((1599, 1647), 'module_contact.Contact', 'Contact', (['"""Test"""', '"""user"""', '"""0711223344"""', '"""<EMAIL>"""'], {}), "('Test', 'user', '0711223344', '<EMAIL>')\n", (1606, 1647), False, 'from module_contact import Contact\n'), ((1707, 1742), 'module_contact.Contact.contact_exist', 'Contact.contact_exist', (['"""0711223344"""'], {}), "('0711223344')\n", (1728, 1742), False, 'from module_contact import Contact\n'), ((1977, 2009), 'module_contact.Contact.copy_email', 'Contact.copy_email', (['"""0712345678"""'], {}), "('0712345678')\n", (1995, 2009), False, 'from module_contact import Contact\n'), ((1849, 1875), 'module_contact.Contact.display_contacts', 'Contact.display_contacts', ([], {}), '()\n', (1873, 1875), False, 'from module_contact import Contact\n'), ((2058, 2075), 'pyperclip.paste', 'pyperclip.paste', ([], {}), '()\n', (2073, 2075), False, 'import pyperclip\n')] |
import unittest
import struct
from igvjs import app
class TestIGV(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['ALLOWED_EMAILS'] = 'test_emails.txt'
app.config['USES_OAUTH'] = True
app.config['PUBLIC_DIR'] = None
self.app = app.test_client()
def test_page_loads(self):
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
self.assertIn(b'<title>IGV - Integrative Genomics Viewer</title>', response.data)
def test_get_data_not_auth(self):
response = self.app.get('static/data/public/gstt1_sample.bam')
self.assertNotEqual(response, None)
self.assertEqual(response.status_code, 401)
def test_get_data_auth_disabled(self):
app.config['USES_OAUTH'] = False
response = self.app.get('static/data/public/gstt1_sample.bam')
self.assertEqual(response.status_code, 200)
def test_get_data_from_private_dir(self):
app.config['PUBLIC_DIR'] = '/static/js'
response = self.app.get('static/data/public/gstt1_sample.bam')
self.assertEqual(response.status_code, 401)
self.assertIn(b'Unauthorized', response.data)
def test_get_data_range_header(self):
start = 25
size = 100
response = self.app.get('../test/BufferedReaderTest.bin',
headers={"Range": "bytes={}-{}".format(start, start+size)})
for i in range(size):
expected_value = -128 + start + i
value = int(struct.unpack('b', response.data[i])[0])
self.assertEqual(value, expected_value)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"igvjs.app.test_client",
"struct.unpack"
] | [((1661, 1676), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1674, 1676), False, 'import unittest\n'), ((301, 318), 'igvjs.app.test_client', 'app.test_client', ([], {}), '()\n', (316, 318), False, 'from igvjs import app\n'), ((1535, 1571), 'struct.unpack', 'struct.unpack', (['"""b"""', 'response.data[i]'], {}), "('b', response.data[i])\n", (1548, 1571), False, 'import struct\n')] |
from urllib.request import urlopen
from io import BytesIO
import time
import tkinter as tk
from PIL import Image, ImageTk
import json
from rmq import RMQiface
urls = [
'https://cdn.revjet.com/s3/csp/1578955925683/shine.png',
'https://cdn.revjet.com/s3/csp/1578955925683/logo.svg',
'https://tpc.googlesyndication.com/daca_images/simgad/13865403217536204307',
'https://tpc.googlesyndication.com/daca_images/simgad/1948022358329940732?sqp=4sqPyQSWAUKTAQgAEhQNzczMPhUAAABAHQAAAAAlAAAAABgAIgoNAACAPxUAAIA_Kk8IWhABHQAAtEIgASgBMAY4A0CAwtcvSABQAFgAYFpwAngAgAEAiAEAkAEAnQEAAIA_oAEAqAEAsAGAreIEuAH___________8BxQEtsp0-MhoIvwMQ6gEYASABLQAAAD8wvwM46gFFAACAPw&rs=AOga4qmwNN2g28c_J8ehXFAoY4bOr7naGQ',
'https://tpc.googlesyndication.com/simgad/12366423408132574325',
'https://tpc.googlesyndication.com/simgad/3767484695346986263'
]
class HiddenRoot(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
#hackish way, essentially makes root window
#as small as possible but still "focused"
#enabling us to use the binding on <esc>
self.wm_geometry("0x0+0+0")
self.window = MySlideShow(self)
self.window.cycle()
class MySlideShow(tk.Toplevel):
def __init__(self, *args, **kwargs):
tk.Toplevel.__init__(self, *args, **kwargs)
with open('reader_config.json', 'r') as f:
config = json.load(f)
host = config['host']
usr = config['user']
pwd = config['password']
queue = config['filtered_images_queue_name']
self.mq = RMQiface(host, queue, usr, pwd)
self.img_error = Image.open('error.png')
self.img_none = Image.open('none.png')
#remove window decorations
# self.overrideredirect(True)
#save reference to photo so that garbage collection
#does not clear image variable in show_image()
self.persistent_image = None
self.imageList = []
self.pixNum = 0
#used to display as background image
self.label = tk.Label(self)
self.label.pack(side="top", fill="both", expand=True)
def cycle(self):
while True:
self.nexti()
time.sleep(0.01)
def nexti(self):
# import random
# url = random.choice(urls)
url = self.mq.read()
if url:
try:
img = Image.open(BytesIO(urlopen(url).read()))
self.showImage(img)
print(f'INFO:\tshowing {url}')
except Exception:
print(f'ERROR:\tnot a valid image: {url}')
else:
print('INFO:\tQueue is empty')
time.sleep(1.0)
def showImage(self, image):
img_w, img_h = image.size
scr_w, scr_h = self.winfo_screenwidth(), self.winfo_screenheight()
width, height = min(scr_w, img_w), min(scr_h, img_h)
image.thumbnail((width, height), Image.ANTIALIAS)
#set window size after scaling the original image up/down to fit screen
#removes the border on the image
scaled_w, scaled_h = image.size
self.wm_geometry("{}x{}+{}+{}".format(scaled_w,scaled_h,0,0))
# create new image
self.persistent_image = ImageTk.PhotoImage(image)
self.label.configure(image=self.persistent_image)
self.update()
slideShow = HiddenRoot()
# slideShow.window.attributes('-fullscreen', True)
# slideShow.window.attributes('-topmost', True)
slideShow.bind_all("<Escape>", lambda e: slideShow.destroy())
# slideShow.bind_all("<Return>", lambda e: slideShow.window.nexti()) # exit on esc
slideShow.update()
slideShow.mainloop()
| [
"tkinter.Tk.__init__",
"PIL.ImageTk.PhotoImage",
"json.load",
"urllib.request.urlopen",
"tkinter.Toplevel.__init__",
"PIL.Image.open",
"time.sleep",
"rmq.RMQiface",
"tkinter.Label"
] | [((903, 923), 'tkinter.Tk.__init__', 'tk.Tk.__init__', (['self'], {}), '(self)\n', (917, 923), True, 'import tkinter as tk\n'), ((1263, 1306), 'tkinter.Toplevel.__init__', 'tk.Toplevel.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1283, 1306), True, 'import tkinter as tk\n'), ((1557, 1588), 'rmq.RMQiface', 'RMQiface', (['host', 'queue', 'usr', 'pwd'], {}), '(host, queue, usr, pwd)\n', (1565, 1588), False, 'from rmq import RMQiface\n'), ((1614, 1637), 'PIL.Image.open', 'Image.open', (['"""error.png"""'], {}), "('error.png')\n", (1624, 1637), False, 'from PIL import Image, ImageTk\n'), ((1662, 1684), 'PIL.Image.open', 'Image.open', (['"""none.png"""'], {}), "('none.png')\n", (1672, 1684), False, 'from PIL import Image, ImageTk\n'), ((2031, 2045), 'tkinter.Label', 'tk.Label', (['self'], {}), '(self)\n', (2039, 2045), True, 'import tkinter as tk\n'), ((3224, 3249), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['image'], {}), '(image)\n', (3242, 3249), False, 'from PIL import Image, ImageTk\n'), ((1380, 1392), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1389, 1392), False, 'import json\n'), ((2188, 2204), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2198, 2204), False, 'import time\n'), ((2653, 2668), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (2663, 2668), False, 'import time\n'), ((2390, 2402), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (2397, 2402), False, 'from urllib.request import urlopen\n')] |
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.html import escape
from django.db import transaction
from sysrev.api.PubMed import _get_authors, _get_date, url_from_id, read_papers_from_ids
from sysrev.api import PubMed
class Review(models.Model):
participants = models.ManyToManyField(User)
title = models.CharField(max_length=128, unique=False)
slug = models.SlugField()
description = models.TextField(default="")
date_created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
completed = models.BooleanField(default=False)
date_completed = models.DateTimeField(default=None, null=True)
query = models.TextField(default="")
def perform_query(self):
# TODO: discard existing papers if there are any
ids_from_query = PubMed.get_ids_from_query(self.query)
if self.paper_pool_counts()["abstract"] == 0:
Paper.create_papers_from_pubmed_ids(ids_from_query, self)
else:
papers = Paper.objects.filter(review=self)
existing_ids = []
for paper in papers:
existing_ids += [paper.pubmed_id]
existing_abstract_ids = []
for paper in papers.filter(pool="A"):
existing_abstract_ids += [paper.pubmed_id]
with transaction.atomic():
Paper.objects\
.filter(pubmed_id__in=existing_abstract_ids)\
.exclude(pubmed_id__in=ids_from_query)\
.delete()
ids_to_add = list(set(ids_from_query).difference(existing_ids))
if ids_to_add:
Paper.create_papers_from_pubmed_ids(ids_to_add, self)
def paper_pool_percentages(self):
# TODO: Typically, paper_pool_counts() gets called then this gets called.
# Seems a bit wasteful, as it ends up running multiple times and querying counts repeatedly
counts = self.paper_pool_counts()
total = float(counts["total"])
if total is not 0:
progress = ((counts["final"] + counts["rejected"]) / total) * 100.0
# minimum display percentage
min_percent = 5.0
for key in counts:
if key == "total":
continue
old = counts[key] = float(counts[key])
result = (counts[key] / total) * 100.0
if result != 0.0 and result < min_percent:
counts[key] = new = (min_percent * total) / 100.0
total += new - old
abstract = (counts["abstract"] / total) * 100.0
document = (counts["document"] / total) * 100.0
final = (counts["final"] / total) * 100.0
rejected = (counts["rejected"] / total) * 100.0
return {"abstract": abstract,
"document": document,
"final": final,
"rejected": rejected,
"progress": progress}
else:
return
def paper_pool_counts(self):
relevant_papers = Paper.objects.filter(review=self)
abstract_count = relevant_papers.filter(pool="A").count()
document_count = relevant_papers.filter(pool="D").count()
final_count = relevant_papers.filter(pool="F").count()
rejected_count = relevant_papers.filter(pool="R").count()
return {"abstract": abstract_count,
"document": document_count,
"final": final_count,
"rejected": rejected_count,
"remaining": abstract_count + document_count,
"total": abstract_count + document_count + final_count + rejected_count}
def invite(self, invitees):
for invitee in invitees:
user = None
if invitee.find("@") == -1:
user = User.objects.get(username=invitee)
else:
user = User.objects.get(email=invitee)
self.participants.add(user)
def clean(self):
if (not self.participants) or self.participants.count() < 1:
raise ValidationError('Need at least one participant')
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Review, self).save()
def get_absolute_url(self):
return reverse('review_detail', args=[str(self.pk)])[:-1] + "-" + self.slug
def __unicode__(self):
return str(self.pk) + ": " + self.title
class Paper(models.Model):
ABSTRACT_POOL = 'A'
DOCUMENT_POOL = 'D'
FINAL_POOL = 'F'
REJECTED = 'R'
POOLS = (
(ABSTRACT_POOL, 'Abstract pool'),
(DOCUMENT_POOL, 'Document pool'),
(FINAL_POOL, 'Final pool'),
(REJECTED, 'Rejected')
)
review = models.ForeignKey(Review)
title = models.CharField(max_length=128)
authors = models.CharField(max_length=128)
abstract = models.TextField(default="")
publish_date = models.DateField(null=True)
url = models.URLField(default="")
pubmed_id = models.CharField(max_length=16)
notes = models.TextField(default="")
pool = models.CharField(max_length=1, choices=POOLS, default=ABSTRACT_POOL)
@staticmethod
def create_paper_from_data(data, review, pool):
"""Creates Paper model from given data, review and pool"""
medlineCitation = data[u'MedlineCitation']
article = medlineCitation[u'Article']
title = article[u'ArticleTitle'].lstrip("[").rstrip("].")
pubmed_id = medlineCitation[u'PMID']
paper = Paper.objects.get_or_create(review=review, title=title, pubmed_id=pubmed_id)[0]
paper.review = review
paper.authors = _get_authors(article)
abstractText = ""
try:
for stringElement in article[u'Abstract'][u'AbstractText']:
try:
abstractText += "<h4>" + escape(stringElement.attributes[u'Label']) + "</h4>"
except AttributeError:
pass
abstractText += escape(stringElement) + "\n\n"
except KeyError:
pass
paper.abstract = abstractText
paper.publish_date = _get_date(medlineCitation)
paper.url = url_from_id(pubmed_id)
paper.notes = ""
paper.pool = pool
paper.save()
return paper
@staticmethod
def create_papers_from_pubmed_ids(ids, review, pool='A'):
"""Creates papers from all of the given ids, in the given review and pool"""
papers = read_papers_from_ids(ids)
# Commit all papers in single transaction
# Improves performance, as django won't automatically commit after every save call when creating lots of papers
with transaction.atomic():
return map(lambda data: Paper.create_paper_from_data(data, review, pool), papers)
def get_absolute_url(self):
return self.review.get_absolute_url() + "/" + str(self.pk)
def __unicode__(self):
return str(self.review) + " - " + self.title
| [
"django.db.models.TextField",
"django.core.exceptions.ValidationError",
"django.db.models.ForeignKey",
"django.db.models.DateField",
"django.db.transaction.atomic",
"django.contrib.auth.models.User.objects.get",
"django.db.models.BooleanField",
"sysrev.api.PubMed.get_ids_from_query",
"sysrev.api.PubMed._get_authors",
"django.db.models.ManyToManyField",
"sysrev.api.PubMed._get_date",
"sysrev.api.PubMed.url_from_id",
"django.template.defaultfilters.slugify",
"django.db.models.DateTimeField",
"sysrev.api.PubMed.read_papers_from_ids",
"django.db.models.URLField",
"django.db.models.CharField",
"django.db.models.SlugField",
"django.utils.html.escape"
] | [((488, 516), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {}), '(User)\n', (510, 516), False, 'from django.db import models\n'), ((542, 588), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'unique': '(False)'}), '(max_length=128, unique=False)\n', (558, 588), False, 'from django.db import models\n'), ((614, 632), 'django.db.models.SlugField', 'models.SlugField', ([], {}), '()\n', (630, 632), False, 'from django.db import models\n'), ((658, 686), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (674, 686), False, 'from django.db import models\n'), ((712, 751), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (732, 751), False, 'from django.db import models\n'), ((777, 812), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (797, 812), False, 'from django.db import models\n'), ((838, 872), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (857, 872), False, 'from django.db import models\n'), ((898, 943), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)'}), '(default=None, null=True)\n', (918, 943), False, 'from django.db import models\n'), ((969, 997), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (985, 997), False, 'from django.db import models\n'), ((5109, 5134), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Review'], {}), '(Review)\n', (5126, 5134), False, 'from django.db import models\n'), ((5154, 5186), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (5170, 5186), False, 'from django.db import models\n'), ((5206, 5238), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (5222, 5238), False, 'from django.db import models\n'), ((5258, 5286), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (5274, 5286), False, 'from django.db import models\n'), ((5306, 5333), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)'}), '(null=True)\n', (5322, 5333), False, 'from django.db import models\n'), ((5353, 5380), 'django.db.models.URLField', 'models.URLField', ([], {'default': '""""""'}), "(default='')\n", (5368, 5380), False, 'from django.db import models\n'), ((5400, 5431), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (5416, 5431), False, 'from django.db import models\n'), ((5451, 5479), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (5467, 5479), False, 'from django.db import models\n'), ((5499, 5567), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'POOLS', 'default': 'ABSTRACT_POOL'}), '(max_length=1, choices=POOLS, default=ABSTRACT_POOL)\n', (5515, 5567), False, 'from django.db import models\n'), ((1111, 1148), 'sysrev.api.PubMed.get_ids_from_query', 'PubMed.get_ids_from_query', (['self.query'], {}), '(self.query)\n', (1136, 1148), False, 'from sysrev.api import PubMed\n'), ((4529, 4548), 'django.template.defaultfilters.slugify', 'slugify', (['self.title'], {}), '(self.title)\n', (4536, 4548), False, 'from django.template.defaultfilters import slugify\n'), ((6064, 6085), 'sysrev.api.PubMed._get_authors', '_get_authors', (['article'], {}), '(article)\n', (6076, 6085), False, 'from sysrev.api.PubMed import _get_authors, _get_date, url_from_id, read_papers_from_ids\n'), ((6555, 6581), 'sysrev.api.PubMed._get_date', '_get_date', (['medlineCitation'], {}), '(medlineCitation)\n', (6564, 6581), False, 'from sysrev.api.PubMed import _get_authors, _get_date, url_from_id, read_papers_from_ids\n'), ((6602, 6624), 'sysrev.api.PubMed.url_from_id', 'url_from_id', (['pubmed_id'], {}), '(pubmed_id)\n', (6613, 6624), False, 'from sysrev.api.PubMed import _get_authors, _get_date, url_from_id, read_papers_from_ids\n'), ((6901, 6926), 'sysrev.api.PubMed.read_papers_from_ids', 'read_papers_from_ids', (['ids'], {}), '(ids)\n', (6921, 6926), False, 'from sysrev.api.PubMed import _get_authors, _get_date, url_from_id, read_papers_from_ids\n'), ((4422, 4470), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""Need at least one participant"""'], {}), "('Need at least one participant')\n", (4437, 4470), False, 'from django.core.exceptions import ValidationError\n'), ((7112, 7132), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (7130, 7132), False, 'from django.db import transaction\n'), ((1621, 1641), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (1639, 1641), False, 'from django.db import transaction\n'), ((4165, 4199), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': 'invitee'}), '(username=invitee)\n', (4181, 4199), False, 'from django.contrib.auth.models import User\n'), ((4241, 4272), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'email': 'invitee'}), '(email=invitee)\n', (4257, 4272), False, 'from django.contrib.auth.models import User\n'), ((6414, 6435), 'django.utils.html.escape', 'escape', (['stringElement'], {}), '(stringElement)\n', (6420, 6435), False, 'from django.utils.html import escape\n'), ((6264, 6306), 'django.utils.html.escape', 'escape', (["stringElement.attributes[u'Label']"], {}), "(stringElement.attributes[u'Label'])\n", (6270, 6306), False, 'from django.utils.html import escape\n')] |
import sys
import bpy
if __name__ == "__main__":
args = sys.argv[sys.argv.index('--'):]
print(args)
bpy.ops.import_scene.gltf(filepath=args[1])
obj = bpy.context.active_object
mod = obj.modifiers.new("CorrectiveSmooth", 'CORRECTIVE_SMOOTH')
mod.factor = 0.1
mod.scale = 1.5
bpy.ops.object.modifier_apply(modifier="CorrectiveSmooth")
bpy.ops.export_scene.gltf(
filepath=args[2],
export_normals=False,
export_colors=False,
use_selection=True
)
| [
"bpy.ops.export_scene.gltf",
"sys.argv.index",
"bpy.ops.import_scene.gltf",
"bpy.ops.object.modifier_apply"
] | [((114, 157), 'bpy.ops.import_scene.gltf', 'bpy.ops.import_scene.gltf', ([], {'filepath': 'args[1]'}), '(filepath=args[1])\n', (139, 157), False, 'import bpy\n'), ((309, 367), 'bpy.ops.object.modifier_apply', 'bpy.ops.object.modifier_apply', ([], {'modifier': '"""CorrectiveSmooth"""'}), "(modifier='CorrectiveSmooth')\n", (338, 367), False, 'import bpy\n'), ((372, 482), 'bpy.ops.export_scene.gltf', 'bpy.ops.export_scene.gltf', ([], {'filepath': 'args[2]', 'export_normals': '(False)', 'export_colors': '(False)', 'use_selection': '(True)'}), '(filepath=args[2], export_normals=False,\n export_colors=False, use_selection=True)\n', (397, 482), False, 'import bpy\n'), ((71, 91), 'sys.argv.index', 'sys.argv.index', (['"""--"""'], {}), "('--')\n", (85, 91), False, 'import sys\n')] |
import os
import sys
import xml.etree.ElementTree as ET
from pyproj import Proj,Transformer
def createSCANeRMDL(fileName,type):
#Creation du fichier MDL
templateFileName = "template/template_"+type+".mdl"
templateFile = open(templateFileName, "r")
template= templateFile.read()
content = template.replace('template_vehicle_name', os.path.basename(fileName))
f = open(fileName + ".mdl", "w")
f.write(content)
templateFile.close()
f.close()
def createSCANePlayer(fileName):
#Creation du fichier Player
templateFileName = "template/template.vhplayer"
templateFile = open(templateFileName, "r")
template= templateFile.read()
content = template.replace('template_vehicle_name', os.path.basename(fileName))
f = open(fileName + ".vhplayer", "w")
f.write(content)
templateFile.close()
f.close()
def createGPX(fileName, root, startNode):
#Creation du fichier GPX
f = open(fileName + ".gpx", "w")
f.write( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
f.write( "<gpx version=\"1.1\" creator=\"GpxTraceNet6.2\"\n")
f.write( "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://www.topografix.com/GPX/1/1\"\n")
f.write( "xsi:schemaLocation=\"http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd\">\n")
f.write( "<trk>\n")
f.write( "<name> RDE </name>\n")
f.write( "<trkseg>\n")
for geo in root.findall(startNode):
#print geo.attrib
strLine = "<trkpt lat=\""+geo.get('lat')+"\" lon=\"" + geo.get('long') + "\"></trkpt>"
f.write( strLine )
f.write( "\n" )
f.write( "</trkseg>\n")
f.write( "</trk>\n")
f.write( "</gpx>\n")
f.close()
def writeCSVLine(f, strLine, col):
for icol in col[:-1]:
strLine += str(icol)+";"
strLine+=str(col[-1])+"\n"
f.write( strLine )
def createCSV(fileName, root, startNode, inProj, outProj):
offsetX = 171338.11
offsetY = 388410.20
f = open(fileName+ ".csv", "w")
col = [0.0]*36
strLine=""
writeCSVLine(f, strLine, col)
strLine=""
writeCSVLine(f, strLine, col)
for geo in root.findall(startNode):
transformer=Transformer.from_proj(inProj,outProj)
X,Y=transformer.transform(geo.get('long'),geo.get('lat'))
x=X-offsetX
y=Y-offsetY
yaw = 0.017453*float(geo.get('course'))
col = [0.0]*36
col[0]=geo.get('secs')
col[1]=col[7]=x
col[2]=col[8]=y
col[6]=col[12]=yaw
strLine=""
writeCSVLine(f, strLine, col)
f.close()
def main():
#Parameters
if len(sys.argv) < 2:
print('argument missing : name of the file to import')
exit()
infilename = sys.argv[1]
outfoldername = os.path.splitext(infilename)[0] + "_vhlplayer/"
try:
os.mkdir(outfoldername)
except:
pass
#Projections
inProj=Proj(proj='latlong',datum='WGS84')
outProj=Proj(init='epsg:28992', towgs84='565.417,50.3319,465.552,-0.398957,0.343988,-1.8774,4.0725')
offsetX = 171338.11
offsetY = 388410.20
tree = ET.parse(infilename)
root = tree.getroot()
#Creation du fichier GPX
createGPX(outfoldername + "hostvehicle", root, ".//hostvehicle/traj/geo")
createCSV(outfoldername + "hostvehicle", root, ".//hostvehicle/traj/geo", inProj, outProj)
createSCANeRMDL(outfoldername + "hostvehicle","car")
createSCANePlayer(outfoldername + "hostvehicle")
if __name__ == '__main__':
sys.exit(main()) | [
"xml.etree.ElementTree.parse",
"os.mkdir",
"pyproj.Transformer.from_proj",
"os.path.basename",
"pyproj.Proj",
"os.path.splitext"
] | [((2777, 2812), 'pyproj.Proj', 'Proj', ([], {'proj': '"""latlong"""', 'datum': '"""WGS84"""'}), "(proj='latlong', datum='WGS84')\n", (2781, 2812), False, 'from pyproj import Proj, Transformer\n'), ((2822, 2919), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:28992"""', 'towgs84': '"""565.417,50.3319,465.552,-0.398957,0.343988,-1.8774,4.0725"""'}), "(init='epsg:28992', towgs84=\n '565.417,50.3319,465.552,-0.398957,0.343988,-1.8774,4.0725')\n", (2826, 2919), False, 'from pyproj import Proj, Transformer\n'), ((2970, 2990), 'xml.etree.ElementTree.parse', 'ET.parse', (['infilename'], {}), '(infilename)\n', (2978, 2990), True, 'import xml.etree.ElementTree as ET\n'), ((354, 380), 'os.path.basename', 'os.path.basename', (['fileName'], {}), '(fileName)\n', (370, 380), False, 'import os\n'), ((726, 752), 'os.path.basename', 'os.path.basename', (['fileName'], {}), '(fileName)\n', (742, 752), False, 'import os\n'), ((2136, 2174), 'pyproj.Transformer.from_proj', 'Transformer.from_proj', (['inProj', 'outProj'], {}), '(inProj, outProj)\n', (2157, 2174), False, 'from pyproj import Proj, Transformer\n'), ((2709, 2732), 'os.mkdir', 'os.mkdir', (['outfoldername'], {}), '(outfoldername)\n', (2717, 2732), False, 'import os\n'), ((2651, 2679), 'os.path.splitext', 'os.path.splitext', (['infilename'], {}), '(infilename)\n', (2667, 2679), False, 'import os\n')] |
from django.contrib.auth.mixins import LoginRequiredMixin
from django import http
class LoginRequiredJSONMixin(LoginRequiredMixin):
"""Verify that the current user is authenticated."""
def handle_no_permission(self):
return http.JsonResponse({'code': 400, 'errmsg': '用户未登录'}) | [
"django.http.JsonResponse"
] | [((242, 293), 'django.http.JsonResponse', 'http.JsonResponse', (["{'code': 400, 'errmsg': '用户未登录'}"], {}), "({'code': 400, 'errmsg': '用户未登录'})\n", (259, 293), False, 'from django import http\n')] |
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Worklist series member mapper.
"""
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from thelma.entities.liquidtransfer import PlannedWorklist
from thelma.entities.liquidtransfer import WorklistSeries
from thelma.entities.liquidtransfer import WorklistSeriesMember
__docformat__ = 'reStructuredText en'
__all__ = ['create_mapper']
def create_mapper(worklist_series_member_tbl):
"Mapper factory."
m = mapper(WorklistSeriesMember, worklist_series_member_tbl,
properties=dict(
worklist_series=relationship(WorklistSeries,
uselist=False,
back_populates='worklist_series_members'),
planned_worklist=relationship(PlannedWorklist,
uselist=False,
back_populates='worklist_series_member',
cascade='all,delete,delete-orphan',
single_parent=True),
)
)
return m
| [
"sqlalchemy.orm.relationship"
] | [((726, 816), 'sqlalchemy.orm.relationship', 'relationship', (['WorklistSeries'], {'uselist': '(False)', 'back_populates': '"""worklist_series_members"""'}), "(WorklistSeries, uselist=False, back_populates=\n 'worklist_series_members')\n", (738, 816), False, 'from sqlalchemy.orm import relationship\n'), ((898, 1048), 'sqlalchemy.orm.relationship', 'relationship', (['PlannedWorklist'], {'uselist': '(False)', 'back_populates': '"""worklist_series_member"""', 'cascade': '"""all,delete,delete-orphan"""', 'single_parent': '(True)'}), "(PlannedWorklist, uselist=False, back_populates=\n 'worklist_series_member', cascade='all,delete,delete-orphan',\n single_parent=True)\n", (910, 1048), False, 'from sqlalchemy.orm import relationship\n')] |
import logging
import subprocess
from os import path, remove, rename
import tempfile
from textwrap import dedent
__all__ = ['call_astrometry', 'add_astrometry']
logger = logging.getLogger(__name__)
def call_astrometry(filename, sextractor=False,
custom_sextractor_config=False, feder_settings=True,
no_plots=True, minimal_output=True,
save_wcs=False, verify=None,
ra_dec=None, overwrite=False,
wcs_reference_image_center=True,
odds_ratio=None,
astrometry_config=None,
additional_args=None):
"""
Wrapper around astrometry.net solve-field.
Parameters
----------
sextractor : bool or str, optional
``True`` to use `sextractor`, or a ``str`` with the
path to sextractor.
custom_sextractor_config : bool, optional
If ``True``, use a sexractor configuration file customized for Feder
images.
feder_settings : bool, optional
Set True if you want to use plate scale appropriate for Feder
Observatory Apogee Alta U9 camera.
no_plots : bool, optional
``True`` to suppress astrometry.net generation of
plots (pngs showing object location and more)
minimal_output : bool, optional
If ``True``, suppress, as separate files, output of: WCS
header, RA/Dec object list, matching objects list, but see
also `save_wcs`
save_wcs : bool, optional
If ``True``, save WCS header even if other output is suppressed
with `minimial_output`
verify : str, optional
Name of a WCS header to be used as a first guess
for the astrometry fit; if this plate solution does not work
the solution is found as though `verify` had not been specified.
ra_dec : list or tuple of float
(RA, Dec); also limits search radius to 1 degree.
overwrite : bool, optional
If ``True``, perform astrometry even if astrometry.net files from a
previous run are present.
wcs_reference_image_center :
If ``True``, force the WCS reference point in the image to be the
image center.
odds_ratio : float, optional
The odds ratio to use for a successful solve. Default is to use the
default in `solve-field`.
astrometry_config : str, optional
Name of configuration file to use for SExtractor.
additional_args : str or list of str, optional
Additional arguments to pass to `solve-field`
"""
solve_field = ["solve-field"]
option_list = []
option_list.append("--obj 100")
if feder_settings:
option_list.append(
"--scale-low 0.5 --scale-high 0.6 --scale-units arcsecperpix")
if additional_args is not None:
if isinstance(additional_args, str):
add_ons = [additional_args]
else:
add_ons = additional_args
option_list.extend(add_ons)
if isinstance(sextractor, str):
option_list.append("--source-extractor-path " + sextractor)
elif sextractor:
option_list.append("--use-source-extractor")
if no_plots:
option_list.append("--no-plot")
if minimal_output:
option_list.append("--corr none --rdls none --match none")
if not save_wcs:
option_list.append("--wcs none")
if ra_dec is not None:
option_list.append("--ra %s --dec %s --radius 0.5" % ra_dec)
if overwrite:
option_list.append("--overwrite")
if wcs_reference_image_center:
option_list.append("--crpix-center")
options = " ".join(option_list)
solve_field.extend(options.split())
if custom_sextractor_config:
tmp_location = tempfile.mkdtemp()
param_location = path.join(tmp_location, 'default.param')
config_location = path.join(tmp_location, 'feder.config')
config_contents = SExtractor_config.format(param_file=param_location)
with open(config_location, 'w') as f:
f.write(config_contents)
with open(param_location, 'w') as f:
contents = """
X_IMAGE
Y_IMAGE
MAG_AUTO
FLUX_AUTO
"""
f.write(dedent(contents))
additional_solve_args = [
'--source-extractor-config', config_location,
'--x-column', 'X_IMAGE',
'--y-column', 'Y_IMAGE',
'--sort-column', 'MAG_AUTO',
'--sort-ascending'
]
solve_field.extend(additional_solve_args)
if odds_ratio is not None:
solve_field.append('--odds-to-solve')
solve_field.append(odds_ratio)
if astrometry_config is not None:
solve_field.append('--config')
solve_field.append(astrometry_config)
# kludge to handle case when path of verify file contains a space--split
# above does not work for that case.
if verify is not None:
if verify:
solve_field.append("--verify")
solve_field.append("%s" % verify)
else:
solve_field.append("--no-verify")
solve_field.extend([filename])
print(' '.join(solve_field))
logger.info(' '.join(solve_field))
try:
solve_field_output = subprocess.check_output(solve_field,
stderr=subprocess.STDOUT)
return_status = 0
log_level = logging.DEBUG
except subprocess.CalledProcessError as e:
return_status = e.returncode
solve_field_output = 'Output from astrometry.net:\n' + str(e.output)
log_level = logging.WARN
logger.warning('Adding astrometry failed for %s', filename)
raise e
logger.log(log_level, solve_field_output)
return return_status
def add_astrometry(filename, overwrite=False, ra_dec=None,
note_failure=False, save_wcs=False,
verify=None, try_builtin_source_finder=False,
custom_sextractor=False,
odds_ratio=None,
astrometry_config=None,
camera='',
avoid_pyfits=False,
no_source_extractor=False,
solve_field_args=None):
"""Add WCS headers to FITS file using astrometry.net
Parameters
----------
overwrite : bool, optional
Set ``True`` to overwrite the original file. If `False`,
the file astrometry.net generates is kept.
ra_dec : list or tuple of float or str
(RA, Dec) of field center as either decimal or sexagesimal; also
limits search radius to 1 degree.
note_failure : bool, optional
If ``True``, create a file with extension "failed" if astrometry.net
fails. The "failed" file contains the error messages genreated by
astrometry.net.
try_builtin_source_finder : bool
If true, try using astrometry.net's built-in source extractor if
sextractor fails.
save_wcs :
verify :
See :func:`call_astrometry`
camera : str, one of ['celestron', 'u9', 'cp16'], optional
Name of camera; determines the pixel scale used in the solved. Default
is to use `'u9'`.
avoid_pyfits : bool
Add arguments to solve-field to avoid calls to pyfits.BinTableHDU.
See https://groups.google.com/forum/#!topic/astrometry/AT21x6zVAJo
Returns
-------
bool
``True`` on success.
Notes
-----
Tries a couple strategies before giving up: first sextractor,
then, if that fails, astrometry.net's built-in source extractor.
It also cleans up after astrometry.net, keeping only the new FITS
file it generates, the .solved file, and, if desired, a ".failed" file
for fields which it fails to solve.
For more flexible invocation of astrometry.net, see :func:`call_astrometry`
"""
base, ext = path.splitext(filename)
# All are in arcsec per pixel, values are approximate
camera_pixel_scales = {
'celestron': 0.3,
'u9': 0.55,
'cp16': 0.55
}
if camera:
use_feder = False
scale = camera_pixel_scales[camera]
scale_options = ("--scale-low {low} --scale-high {high} "
"--scale-units arcsecperpix".format(low=0.8*scale, high=1.2 * scale))
else:
use_feder = True
scale_options = ''
if avoid_pyfits:
pyfits_options = '--no-remove-lines --uniformize 0'
else:
pyfits_options = ''
additional_opts = ' '.join([scale_options,
pyfits_options])
if solve_field_args is not None:
additional_opts = additional_opts.split()
additional_opts.extend(solve_field_args)
logger.info('BEGIN ADDING ASTROMETRY on {0}'.format(filename))
try:
logger.debug('About to call call_astrometry')
solved_field = (call_astrometry(filename,
sextractor=not no_source_extractor,
ra_dec=ra_dec,
save_wcs=save_wcs, verify=verify,
custom_sextractor_config=custom_sextractor,
odds_ratio=odds_ratio,
astrometry_config=astrometry_config,
feder_settings=use_feder,
additional_args=additional_opts)
== 0)
except subprocess.CalledProcessError as e:
logger.debug('Failed with error')
failed_details = e.output
solved_field = False
if (not solved_field) and try_builtin_source_finder:
log_msg = 'Astrometry failed using sextractor, trying built-in '
log_msg += 'source finder'
logger.info(log_msg)
try:
solved_field = (call_astrometry(filename, ra_dec=ra_dec,
overwrite=True,
save_wcs=save_wcs, verify=verify)
== 0)
except subprocess.CalledProcessError as e:
failed_details = e.output
solved_field = False
if solved_field:
logger.info('Adding astrometry succeeded')
else:
logger.warning('Adding astrometry failed for file %s', filename)
if overwrite and solved_field:
logger.info('Overwriting original file with image with astrometry')
try:
rename(base + '.new', filename)
except OSError as e:
logger.error(e)
return False
# whether we succeeded or failed, clean up
try:
remove(base + '.axy')
except OSError:
pass
if solved_field:
try:
remove(base + '-indx.xyls')
remove(base + '.solved')
except OSError:
pass
if note_failure and not solved_field:
try:
f = open(base + '.failed', 'wb')
f.write(failed_details)
f.close()
except IOError as e:
logger.error('Unable to save output of astrometry.net %s', e)
pass
logger.info('END ADDING ASTROMETRY for %s', filename)
return solved_field
SExtractor_config = """
# Configuration file for SExtractor 2.19.5 based on default by EB 2014-11-26
#
# modification was to change DETECT_MINAREA and turn of filter convolution
#-------------------------------- Catalog ------------------------------------
PARAMETERS_NAME {param_file} # name of the file containing catalog contents
#------------------------------- Extraction ----------------------------------
DETECT_TYPE CCD # CCD (linear) or PHOTO (with gamma correction)
DETECT_MINAREA 15 # min. # of pixels above threshold
DETECT_THRESH 1.5 # <sigmas> or <threshold>,<ZP> in mag.arcsec-2
ANALYSIS_THRESH 1.5 # <sigmas> or <threshold>,<ZP> in mag.arcsec-2
FILTER N # apply filter for detection (Y or N)?
FILTER_NAME default.conv # name of the file containing the filter
DEBLEND_NTHRESH 32 # Number of deblending sub-thresholds
DEBLEND_MINCONT 0.005 # Minimum contrast parameter for deblending
CLEAN Y # Clean spurious detections? (Y or N)?
CLEAN_PARAM 1.0 # Cleaning efficiency
MASK_TYPE CORRECT # type of detection MASKing: can be one of
# NONE, BLANK or CORRECT
#------------------------------ Photometry -----------------------------------
PHOT_APERTURES 10 # MAG_APER aperture diameter(s) in pixels
PHOT_AUTOPARAMS 2.5, 3.5 # MAG_AUTO parameters: <Kron_fact>,<min_radius>
PHOT_PETROPARAMS 2.0, 3.5 # MAG_PETRO parameters: <Petrosian_fact>,
# <min_radius>
SATUR_LEVEL 50000.0 # level (in ADUs) at which arises saturation
SATUR_KEY SATURATE # keyword for saturation level (in ADUs)
MAG_ZEROPOINT 0.0 # magnitude zero-point
MAG_GAMMA 4.0 # gamma of emulsion (for photographic scans)
GAIN 0.0 # detector gain in e-/ADU
GAIN_KEY GAIN # keyword for detector gain in e-/ADU
PIXEL_SCALE 1.0 # size of pixel in arcsec (0=use FITS WCS info)
#------------------------- Star/Galaxy Separation ----------------------------
SEEING_FWHM 1.2 # stellar FWHM in arcsec
STARNNW_NAME default.nnw # Neural-Network_Weight table filename
#------------------------------ Background -----------------------------------
BACK_SIZE 64 # Background mesh: <size> or <width>,<height>
BACK_FILTERSIZE 3 # Background filter: <size> or <width>,<height>
BACKPHOTO_TYPE GLOBAL # can be GLOBAL or LOCAL
#------------------------------ Check Image ----------------------------------
CHECKIMAGE_TYPE NONE # can be NONE, BACKGROUND, BACKGROUND_RMS,
# MINIBACKGROUND, MINIBACK_RMS, -BACKGROUND,
# FILTERED, OBJECTS, -OBJECTS, SEGMENTATION,
# or APERTURES
CHECKIMAGE_NAME check.fits # Filename for the check-image
#--------------------- Memory (change with caution!) -------------------------
MEMORY_OBJSTACK 3000 # number of objects in stack
MEMORY_PIXSTACK 300000 # number of pixels in stack
MEMORY_BUFSIZE 1024 # number of lines in buffer
#----------------------------- Miscellaneous ---------------------------------
VERBOSE_TYPE NORMAL # can be QUIET, NORMAL or FULL
HEADER_SUFFIX .head # Filename extension for additional headers
WRITE_XML N # Write XML file (Y/N)?
XML_NAME sex.xml # Filename for XML output
"""
| [
"textwrap.dedent",
"os.remove",
"subprocess.check_output",
"os.rename",
"tempfile.mkdtemp",
"os.path.splitext",
"os.path.join",
"logging.getLogger"
] | [((172, 199), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (189, 199), False, 'import logging\n'), ((7954, 7977), 'os.path.splitext', 'path.splitext', (['filename'], {}), '(filename)\n', (7967, 7977), False, 'from os import path, remove, rename\n'), ((3765, 3783), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3781, 3783), False, 'import tempfile\n'), ((3809, 3849), 'os.path.join', 'path.join', (['tmp_location', '"""default.param"""'], {}), "(tmp_location, 'default.param')\n", (3818, 3849), False, 'from os import path, remove, rename\n'), ((3876, 3915), 'os.path.join', 'path.join', (['tmp_location', '"""feder.config"""'], {}), "(tmp_location, 'feder.config')\n", (3885, 3915), False, 'from os import path, remove, rename\n'), ((5306, 5368), 'subprocess.check_output', 'subprocess.check_output', (['solve_field'], {'stderr': 'subprocess.STDOUT'}), '(solve_field, stderr=subprocess.STDOUT)\n', (5329, 5368), False, 'import subprocess\n'), ((10775, 10796), 'os.remove', 'remove', (["(base + '.axy')"], {}), "(base + '.axy')\n", (10781, 10796), False, 'from os import path, remove, rename\n'), ((10596, 10627), 'os.rename', 'rename', (["(base + '.new')", 'filename'], {}), "(base + '.new', filename)\n", (10602, 10627), False, 'from os import path, remove, rename\n'), ((10877, 10904), 'os.remove', 'remove', (["(base + '-indx.xyls')"], {}), "(base + '-indx.xyls')\n", (10883, 10904), False, 'from os import path, remove, rename\n'), ((10917, 10941), 'os.remove', 'remove', (["(base + '.solved')"], {}), "(base + '.solved')\n", (10923, 10941), False, 'from os import path, remove, rename\n'), ((4285, 4301), 'textwrap.dedent', 'dedent', (['contents'], {}), '(contents)\n', (4291, 4301), False, 'from textwrap import dedent\n')] |
import scipy.io
import numpy as np
import os
import random
import json
import pdb
def check_image_voxel_match(cls):
root = os.path.abspath('.')
out_dir = os.path.join(root, '../output', cls)
# out_dir = '/Users/heqian/Research/projects/primitive-based_3d/data/all_classes/chair'
voxel_txt_dir = os.path.join(out_dir, 'voxeltxt')
voxel_dirs = {x: os.path.join(voxel_txt_dir, 'voxel_{}.txt'.format(x))
for x in ['train', 'val', 'test']}
img_dirs = {x: os.path.join(voxel_txt_dir, '{}.txt'.format(x))
for x in ['train', 'val', 'test']}
with open(os.path.join(out_dir, 'voxels_dir_{}.txt'.format(cls)), 'r') as f:
voxel_all = f.readlines()
voxel_names = {}
img_names = {}
for phase in ['train', 'val', 'test']:
with open(os.path.join(voxel_dirs[phase]), 'r') as f:
voxel_names[phase] = f.readlines()
with open(os.path.join(img_dirs[phase]), 'r') as f:
img_names[phase] = f.readlines()
# pix3d_dir = os.path.join(root, '../input/pix3d.json')
# pix3d = json.load(open(pix3d_dir, 'r'))
match_id = scipy.io.loadmat(os.path.join(out_dir, 'img_voxel_idxs.mat'))
img_match_vox = {x: [] for x in ['train', 'val', 'test']}
for phase in ['train', 'val', 'test']:
for img in img_names[phase]:
id_img_ori = int(img.split('.')[0]) # 1-3839
img_id_real = list(match_id['img_idxs'][0]).index(id_img_ori) # 0-3493
voxel_id_ori = match_id['voxel_idxs'][0, img_id_real] # 1-216
vox = voxel_all[voxel_id_ori - 1]
img_match_vox[phase].append(vox)
# img_match_vox[phase].append('model/'+vox)
img_match_vox = {x: sorted(set(img_match_vox[x])) for x in ['train', 'val', 'test']}
# pdb.set_trace()
for phase in ['train', 'val', 'test']:
if len(set(voxel_names[phase]).difference(set(img_match_vox[phase]))) > 0:
print('error')
if len(set(img_match_vox[phase]).difference(set(voxel_names[phase]))) > 0:
print('error')
for name in voxel_names[phase]:
if name not in img_match_vox[phase]:
print(name)
for name in img_match_vox[phase]:
if name not in voxel_names[phase]:
print(name)
def split_voxel_then_image(cls):
# data_dir = '/Users/heqian/Research/projects/3dprnn/data/pix3d'
split_by_model = True # True-split by 216 models, False-split by 34 images
## split voxels into train, val, test
root = os.path.abspath('.')
out_dir = os.path.join(root, '../output', cls)
voxel_txt_dir = os.path.join(out_dir, 'voxeltxt')
if not os.path.exists(voxel_txt_dir):
os.makedirs(voxel_txt_dir)
voxel_train_txtpath = os.path.join(voxel_txt_dir, 'voxel_train.txt')
voxel_val_txtpath = os.path.join(voxel_txt_dir, 'voxel_val.txt')
voxel_test_txtpath = os.path.join(voxel_txt_dir, 'voxel_test.txt')
voxel_ftrain = open(voxel_train_txtpath, 'w')
voxel_fval = open(voxel_val_txtpath, 'w')
voxel_ftest = open(voxel_test_txtpath, 'w')
voxel_ltrain = []
voxel_lval = []
voxel_ltest = []
voxel_ctrain = 0
voxel_cval = 0
voxel_ctest = 0
with open(os.path.join(out_dir, 'voxels_dir_{}.txt'.format(cls)), 'r') as f:
voxel_dirs = f.readlines()
for i in range(len(voxel_dirs)):
voxel_dirs[i] = voxel_dirs[i].strip()
voxel_dirs[i] = voxel_dirs[i]
tmp = random.random()
if tmp < 0.65:
voxel_ftrain.write(voxel_dirs[i]+'\n')
voxel_ltrain.append(voxel_dirs[i])
voxel_ctrain += 1
elif tmp >= 0.65 and tmp < 0.8:
voxel_fval.write(voxel_dirs[i]+'\n')
voxel_lval.append(voxel_dirs[i])
voxel_cval += 1
else:
voxel_ftest.write(voxel_dirs[i]+'\n')
voxel_ltest.append(voxel_dirs[i])
voxel_ctest += 1
voxel_ftrain.close()
voxel_fval.close()
voxel_ftest.close()
## split images into train, val, test, according to voxels
# img_voxel_idxs = []
img_idxs = []
voxel_idxs = []
train_txtpath = os.path.join(voxel_txt_dir, 'train.txt')
val_txtpath = os.path.join(voxel_txt_dir, 'val.txt')
test_txtpath = os.path.join(voxel_txt_dir, 'test.txt')
ftrain = open(train_txtpath, 'w')
fval = open(val_txtpath, 'w')
ftest = open(test_txtpath, 'w')
ctrain = 0
cval = 0
ctest = 0
pix3d_dir = os.path.join(root, '../input/pix3d.json')
pix3d = json.load(open(pix3d_dir, 'r'))
for i in range(len(pix3d)):
# if json_file[i]['img'][4:9] == 'chair' and json_file[i]['voxel'] not in voxel_dirs:
# print(json_file[i]['img'], json_file[i]['voxel'])
voxel_dir = pix3d[i]['voxel'][6:]
if voxel_dir in voxel_dirs:
# pdb.set_trace()
img_file = pix3d[i]['img'].split('/')[-1] #[10:]
img_id = int(img_file.split('.')[0]) #int(pix3d[i]['img'][10:14])
img_idxs.append(img_id)
voxel_idxs.append(voxel_dirs.index(voxel_dir) + 1)
# img_voxel_idxs.append(voxel_dirs.index(voxel_dir))
# if img_id != len(img_voxel_idxs):
# print('Error!!!=======', img_id)
if split_by_model:
if voxel_dir in voxel_ltrain:
ftrain.write(img_file+'\n')
ctrain += 1
elif voxel_dir in voxel_lval:
fval.write(img_file+'\n')
cval += 1
elif voxel_dir in voxel_ltest:
ftest.write(img_file+'\n')
ctest += 1
else:
tmp = random.random()
if tmp < 0.65:
ftrain.write(img_file+'\n')
ctrain += 1
elif tmp >= 0.65 and tmp < 0.8:
fval.write(img_file+'\n')
cval += 1
else:
ftest.write(img_file+'\n')
ctest += 1
ftrain.close()
fval.close()
ftest.close()
# scipy.io.savemat(os.path.join(out_dir, 'img_voxel_idxs.mat'),
# {'img_voxel_idxs': np.array(img_voxel_idxs)})
scipy.io.savemat(os.path.join(out_dir, 'img_voxel_idxs.mat'),
{'img_idxs': np.array(img_idxs), 'voxel_idxs': np.array(voxel_idxs)})
print(voxel_ctrain+voxel_cval+voxel_ctest, voxel_ctrain, voxel_cval, voxel_ctest)
print(ctrain+cval+ctest, ctrain, cval, ctest)
print(len(img_idxs))
if __name__ == '__main__':
cls_all = ['chair', 'bed', 'bookcase', 'desk', 'misc', 'sofa', 'table', 'tool', 'wardrobe']
cls = 'table'
# for cls in cls_all:
split_voxel_then_image(cls)
check_image_voxel_match(cls)
| [
"os.path.abspath",
"os.makedirs",
"os.path.exists",
"random.random",
"numpy.array",
"os.path.join"
] | [((129, 149), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (144, 149), False, 'import os\n'), ((164, 200), 'os.path.join', 'os.path.join', (['root', '"""../output"""', 'cls'], {}), "(root, '../output', cls)\n", (176, 200), False, 'import os\n'), ((313, 346), 'os.path.join', 'os.path.join', (['out_dir', '"""voxeltxt"""'], {}), "(out_dir, 'voxeltxt')\n", (325, 346), False, 'import os\n'), ((2540, 2560), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (2555, 2560), False, 'import os\n'), ((2575, 2611), 'os.path.join', 'os.path.join', (['root', '"""../output"""', 'cls'], {}), "(root, '../output', cls)\n", (2587, 2611), False, 'import os\n'), ((2632, 2665), 'os.path.join', 'os.path.join', (['out_dir', '"""voxeltxt"""'], {}), "(out_dir, 'voxeltxt')\n", (2644, 2665), False, 'import os\n'), ((2769, 2815), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""voxel_train.txt"""'], {}), "(voxel_txt_dir, 'voxel_train.txt')\n", (2781, 2815), False, 'import os\n'), ((2840, 2884), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""voxel_val.txt"""'], {}), "(voxel_txt_dir, 'voxel_val.txt')\n", (2852, 2884), False, 'import os\n'), ((2910, 2955), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""voxel_test.txt"""'], {}), "(voxel_txt_dir, 'voxel_test.txt')\n", (2922, 2955), False, 'import os\n'), ((4163, 4203), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""train.txt"""'], {}), "(voxel_txt_dir, 'train.txt')\n", (4175, 4203), False, 'import os\n'), ((4222, 4260), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""val.txt"""'], {}), "(voxel_txt_dir, 'val.txt')\n", (4234, 4260), False, 'import os\n'), ((4280, 4319), 'os.path.join', 'os.path.join', (['voxel_txt_dir', '"""test.txt"""'], {}), "(voxel_txt_dir, 'test.txt')\n", (4292, 4319), False, 'import os\n'), ((4486, 4527), 'os.path.join', 'os.path.join', (['root', '"""../input/pix3d.json"""'], {}), "(root, '../input/pix3d.json')\n", (4498, 4527), False, 'import os\n'), ((1145, 1188), 'os.path.join', 'os.path.join', (['out_dir', '"""img_voxel_idxs.mat"""'], {}), "(out_dir, 'img_voxel_idxs.mat')\n", (1157, 1188), False, 'import os\n'), ((2677, 2706), 'os.path.exists', 'os.path.exists', (['voxel_txt_dir'], {}), '(voxel_txt_dir)\n', (2691, 2706), False, 'import os\n'), ((2716, 2742), 'os.makedirs', 'os.makedirs', (['voxel_txt_dir'], {}), '(voxel_txt_dir)\n', (2727, 2742), False, 'import os\n'), ((3474, 3489), 'random.random', 'random.random', ([], {}), '()\n', (3487, 3489), False, 'import random\n'), ((6281, 6324), 'os.path.join', 'os.path.join', (['out_dir', '"""img_voxel_idxs.mat"""'], {}), "(out_dir, 'img_voxel_idxs.mat')\n", (6293, 6324), False, 'import os\n'), ((6360, 6378), 'numpy.array', 'np.array', (['img_idxs'], {}), '(img_idxs)\n', (6368, 6378), True, 'import numpy as np\n'), ((6394, 6414), 'numpy.array', 'np.array', (['voxel_idxs'], {}), '(voxel_idxs)\n', (6402, 6414), True, 'import numpy as np\n'), ((811, 842), 'os.path.join', 'os.path.join', (['voxel_dirs[phase]'], {}), '(voxel_dirs[phase])\n', (823, 842), False, 'import os\n'), ((920, 949), 'os.path.join', 'os.path.join', (['img_dirs[phase]'], {}), '(img_dirs[phase])\n', (932, 949), False, 'import os\n'), ((5718, 5733), 'random.random', 'random.random', ([], {}), '()\n', (5731, 5733), False, 'import random\n')] |
import logging
from types import TracebackType
from typing import Callable, Dict, List, Optional, Type, TypeVar
import aiohttp
from yarl import URL
from poe_client.rate_limiter import RateLimiter
from poe_client.schemas.account import Account, Realm
from poe_client.schemas.character import Character
from poe_client.schemas.filter import ItemFilter
from poe_client.schemas.league import Ladder, League, LeagueType
from poe_client.schemas.pvp import PvPMatch, PvPMatchLadder, PvPMatchType
from poe_client.schemas.stash import PublicStash, StashTab
Model = TypeVar("Model") # the variable return type
class Client(object):
"""Aiohttp class for interacting with the Path of Exile API."""
_token: Optional[str]
_base_url: URL = URL("https://api.pathofexile.com")
_client: aiohttp.ClientSession
_user_agent: str
_limiter: RateLimiter
# Maps "generic" paths to rate limiting policy names.
# Generic paths are paths with no IDs or unique numbers.
# For example, "/character/moowiz" has an account name, so it's not a base path.
# "/character/" is the equivalent "generic" path.
_path_to_policy_names: Dict[str, str]
def __init__(
self,
user_agent: str,
token: Optional[str] = None,
) -> None:
"""Initialize a new PoE client.
Args:
user_agent: An OAuth user agent. Used when making HTTP requests to the API.
token: Authorization token to pass to the PoE API. If unset, no auth token is used.
"""
self._token = token
self._user_agent = user_agent
self._limiter = RateLimiter()
self._path_to_policy_names = {}
async def __aenter__(self) -> "Client":
"""Runs on entering `async with`."""
self._client = aiohttp.ClientSession(raise_for_status=True)
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
"""Runs on exiting `async with`."""
await self._client.close()
if exc_val:
raise exc_val
return True
# Type ignore is for args and kwargs, which have unknown types we pass to _get_json
async def _get( # type: ignore
self,
model: Callable[..., Model],
result_field: Optional[str] = None,
*args,
**kwargs,
) -> Model:
"""Make a get request and returns the data as an APIType subclass.
Args:
model: The object which contains data retrieved from the API. Must
be a sublclass of APIType.
result_field: If present, returns the data in this field from the request,
rather than the request itself.
See _get_json for other args.
Returns:
The result, parsed into an instance of the `model` type.
"""
json_result = await self._get_json(*args, **kwargs)
assert isinstance(json_result, dict) # noqa: S101
if result_field:
json_result = json_result[result_field]
return model(**json_result)
# Type ignore is for args and kwargs, which have unknown types we pass to _get_json
async def _get_list( # type: ignore
self,
model: Callable[..., Model],
result_field: Optional[str] = None,
*args,
**kwargs,
) -> List[Model]:
"""Make a get request and returns the data as a list of APIType subclass.
Args:
model: The object which contains data retrieved from the API. Must
be a sublclass of APIType.
result_field: If present, returns the data in this field from the request,
rather than the request itself.
See _get_json for other args.
Returns:
The result, parsed into a list of the `model` type.
"""
json_result = await self._get_json(*args, **kwargs)
if result_field:
assert isinstance(json_result, dict) # noqa: S101
json_result = json_result[result_field]
assert isinstance(json_result, list) # noqa: S101
return [model(**objitem) for objitem in json_result]
async def _get_json(
self,
path: str,
path_format_args: Optional[List[str]] = None,
query: Optional[Dict[str, str]] = None,
):
"""Fetches data from the POE API.
Args:
path:
The URL path to use. Appended to the POE API base URL.
If certain parts of the path are non-static (account ID),
those should be encoded as format args ("{0}") in the path,
and the values for those args should be passed into path_format_args.
path_format_args:
Values which should be encoded in the path when the HTTP request gets
made.
query:
An optional dict of query params to add to the HTTP request.
Returns:
The result of the API request, parsed as JSON.
"""
if not path_format_args:
path_format_args = []
path_with_no_args = path.format(("" for _ in range(len(path_format_args))))
policy_name = self._path_to_policy_names.get(path_with_no_args, "")
kwargs = {
"headers": {
"User-Agent": self._user_agent,
},
"params": query,
}
if self._token:
headers = kwargs["headers"]
assert headers # noqa: S101
headers["Authorization"] = "Bearer {0}".format(self._token)
# We key the policy name off the path with no format args. This presumes that
# different requests to the same endpoints with different specific args use the
# same rate limiting. For example, /characters/moowiz and /characters/chris
# presumably use the same rate limiting policy name.
if await self._limiter.get_semaphore(policy_name):
# We ignore typing in the dict assignment. kwargs only has dicts as values,
# but we're assigning booleans here. We can't set the typing inline without
# flake8 complaining about overly complex annotation.
logging.info("NOT BLOCKING")
kwargs["raise_for_status"] = True # type: ignore
else:
logging.info("BLOCKING")
kwargs["raise_for_status"] = False # type: ignore
# The types are ignored because for some reason it can't understand
# that kwargs isn't a positional arg and won't override a different
# positional argument in the function.
async with await self._client.get(
"{0}/{1}".format(self._base_url, path.format(*path_format_args)),
**kwargs, # type: ignore
) as resp:
self._path_to_policy_names[
path_with_no_args
] = await self._limiter.parse_headers(resp.headers)
if resp.status != 200:
raise ValueError(
"Invalid request: status code {0}, expected 200".format(
resp.status,
),
)
return await resp.json()
class _PvPMixin(Client):
"""PVP related methods for the POE API.
CURRENTLY UNTESTED. HAS NOT BEEN USED IN PRODUCTION.
"""
async def get_pvp_matches(
self,
realm: Optional[Realm] = None,
match_type: Optional[PvPMatchType] = None,
season: str = "",
league: str = "",
) -> List[PvPMatch]:
"""Get a list of all pvp matches based on filters."""
if match_type == PvPMatchType.season and not season:
raise ValueError("season cannot be empty if league_type is season.")
if match_type == PvPMatchType.league and not league:
raise ValueError("league cannot be empty if league_type is league.")
# We construct this via a dict so that the linter doesn't complain about
# complexity.
query = {
"type": match_type.value if match_type else None,
"realm": realm.value if realm else None,
"season": season if season else None,
"league": league if league else None,
}
# Removed unset query params
query = {key: query_val for key, query_val in query.items() if query_val}
return await self._get_list(
path="pvp-match",
model=PvPMatch,
result_field="matches",
query=query,
)
async def get_pvp_match(
self,
match: str,
realm: Optional[Realm] = None,
) -> PvPMatch:
"""Get a pvp match based on id."""
query = {}
if realm:
query["realm"] = realm.value
return await self._get(
path="pvp-match/{0}",
path_format_args=(match,),
model=PvPMatch,
result_field="match",
query=query,
)
async def get_pvp_match_ladder(
self,
match: str,
realm: Optional[Realm] = None,
) -> PvPMatchLadder:
"""Get a pvp match based on id."""
query = {}
if realm:
query["realm"] = realm.value
return await self._get(
path="pvp-match/{0}/ladder",
path_format_args=(match,),
model=PvPMatchLadder,
result_field="match",
query=query,
)
class _LeagueMixin(Client):
"""League related methods for the POE API.
CURRENTLY UNTESTED. HAS NOT BEEN USED IN PRODUCTION.
"""
async def list_leagues( # noqa: WPS211
self,
realm: Optional[Realm] = None,
league_type: Optional[LeagueType] = None,
offset: int = 0,
season: str = "",
limit: int = 50,
) -> List[League]:
"""Get a list of all leagues based on filters."""
if league_type == LeagueType.season and not season:
raise ValueError("season cannot be empty if league_type is season.")
# We construct this via a dict so that the linter doesn't complain about
# complexity.
query = {
"realm": realm.value if realm else None,
"type": league_type.value if league_type else None,
"season": season if season else None,
"offset": str(offset) if offset else None,
"limit": str(limit) if limit else None,
}
# Remove unset values
query = {key: query_val for key, query_val in query.items() if query_val}
return await self._get_list(
path="league",
model=League,
result_field="leagues",
query=query,
)
async def get_league(
self,
league: str,
realm: Optional[Realm] = None,
) -> League:
"""Get a league based on league id."""
query = {}
if realm:
query["realm"] = realm.value
return await self._get(
path="league/{0}",
path_format_args=(league,),
model=League,
result_field="league",
query=query,
)
async def get_league_ladder(
self,
league: str,
realm: Optional[Realm] = None,
) -> Ladder:
"""Get the ladder of a league based on id."""
query = {}
if realm:
query["realm"] = realm.value
return await self._get(
path="league/{0}/ladder",
path_format_args=(league,),
model=Ladder,
result_field="ladder",
query=query,
)
class _AccountMixin(Client):
"""User account methods for the POE API.
CURRENTLY UNTESTED. HAS NOT BEEN USED IN PRODUCTION.
"""
async def get_profile(
self,
) -> Account:
"""Get the account beloning to the token."""
return await self._get(path="league", model=Account)
async def get_characters(
self,
) -> List[Character]:
"""Get all characters belonging to token."""
return await self._get_list(
path="character",
model=Character,
result_field="characters",
)
async def get_character(
self,
name: str,
) -> Character:
"""Get a character based on id and account of token."""
return await self._get(
path="character/{0}",
path_format_args=(name,),
model=Character,
result_field="character",
)
async def get_stashes(
self,
league: str,
) -> List[StashTab]:
"""Get all stash tabs belonging to token."""
return await self._get_list(
path="stash/{0}",
path_format_args=(league,),
model=StashTab,
result_field="stashes",
)
async def get_stash(
self,
league: str,
stash_id: str,
substash_id: Optional[str],
) -> StashTab:
"""Get a stash tab based on id."""
path = "stash/{0}/{1}".format(league, stash_id)
path_format_args = [league, stash_id]
if substash_id:
path += "/{2}" # noqa: WPS336
path_format_args.append(substash_id)
return await self._get(
path=path,
path_format_args=path_format_args,
model=StashTab,
result_field="stash",
)
class _FilterMixin(Client):
"""Item Filter methods for the POE API.
CURRENTLY UNTESTED. HAS NOT BEEN USED IN PRODUCTION.
"""
async def get_item_filters(
self,
) -> List[ItemFilter]:
"""Get all item filters."""
return await self._get_list(
path="item-filter",
model=ItemFilter,
result_field="filters",
)
async def get_item_filter(
self,
filterid: str,
) -> ItemFilter:
"""Get a ItemFilter based on id."""
return await self._get(
path="item-filter/{0}",
path_format_args=(filterid,),
model=ItemFilter,
result_field="filter",
)
class _PublicStashMixin(Client):
"""Public stash tab methods for the POE API.
CURRENTLY UNTESTED. HAS NOT BEEN USED IN PRODUCTION.
"""
async def get_public_stash_tabs(
self,
next_change_id: Optional[str] = None,
) -> PublicStash:
"""Get the latest public stash tabs.
Args:
next_change_id: If set, returns the next set of stash tabs, starting
at this change_id. While this is technically optional,
in practice this is required; not setting this value
fetches stash tabs from the beginning of the API's
availability which is several years in the past.
Returns:
A dict representing a public stash change.
"""
query = {}
if next_change_id:
query["id"] = next_change_id
return await self._get_json(
path="public-stash-tabs",
query=query,
)
# Ignore WPS215, error about too many base classes. We use multiple to better split up
# the different APIs to simplify reading. There isn't any complicated inheritance
# going on here.
class PoEClient( # noqa: WPS215
_PvPMixin,
_LeagueMixin,
_AccountMixin,
_FilterMixin,
_PublicStashMixin,
Client,
):
"""Client for PoE API.
This technically has support for every API GGG has exposed. None of these
APIs have been tested in production, so use at your own risk.
"""
| [
"poe_client.rate_limiter.RateLimiter",
"aiohttp.ClientSession",
"logging.info",
"typing.TypeVar",
"yarl.URL"
] | [((559, 575), 'typing.TypeVar', 'TypeVar', (['"""Model"""'], {}), "('Model')\n", (566, 575), False, 'from typing import Callable, Dict, List, Optional, Type, TypeVar\n'), ((744, 778), 'yarl.URL', 'URL', (['"""https://api.pathofexile.com"""'], {}), "('https://api.pathofexile.com')\n", (747, 778), False, 'from yarl import URL\n'), ((1613, 1626), 'poe_client.rate_limiter.RateLimiter', 'RateLimiter', ([], {}), '()\n', (1624, 1626), False, 'from poe_client.rate_limiter import RateLimiter\n'), ((1780, 1824), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {'raise_for_status': '(True)'}), '(raise_for_status=True)\n', (1801, 1824), False, 'import aiohttp\n'), ((6327, 6355), 'logging.info', 'logging.info', (['"""NOT BLOCKING"""'], {}), "('NOT BLOCKING')\n", (6339, 6355), False, 'import logging\n'), ((6444, 6468), 'logging.info', 'logging.info', (['"""BLOCKING"""'], {}), "('BLOCKING')\n", (6456, 6468), False, 'import logging\n')] |
#Exercícios Numpy-15
#*******************
import numpy as np
arr=np.ones((10,10))
arr[1:-1,1:-1]=0
print(arr)
print()
arr_zero=np.zeros((8,8))
arr_zero=np.pad(arr_zero,pad_width=1,mode='constant',constant_values=1)
print(arr_zero) | [
"numpy.pad",
"numpy.zeros",
"numpy.ones"
] | [((67, 84), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (74, 84), True, 'import numpy as np\n'), ((132, 148), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (140, 148), True, 'import numpy as np\n'), ((158, 223), 'numpy.pad', 'np.pad', (['arr_zero'], {'pad_width': '(1)', 'mode': '"""constant"""', 'constant_values': '(1)'}), "(arr_zero, pad_width=1, mode='constant', constant_values=1)\n", (164, 223), True, 'import numpy as np\n')] |
import numpy as np
class AccelerationSensor:
def __init__(self, measurement_covariance):
self.R_meas = measurement_covariance
def getMeasurements(self, true_accel):
return np.random.multivariate_normal(true_accel, self.R_meas)
| [
"numpy.random.multivariate_normal"
] | [((192, 246), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['true_accel', 'self.R_meas'], {}), '(true_accel, self.R_meas)\n', (221, 246), True, 'import numpy as np\n')] |
import json
import math
from django.core.serializers import serialize
from django.db.models.query import QuerySet
ALLOWED_FORMATS = [
'dict',
'json',
'queryset'
]
class Pagination:
def __init__(
self,
page=1,
pages=1,
queryset=QuerySet(),
total=0,
pydantic_model=None,
):
self.page = page
self.pages = pages
self.queryset = queryset
self.total = total
self.pydantic_model = pydantic_model
@property
def data(self):
return {
'page': self.page,
'pages': self.pages,
'results': self.queryset,
'total': self.total
}
@property
def dict(self):
data = self.data
results = data['results']
if self.pydantic_model is None:
data['results'] = self.queryset_to_dict(results)
else:
_results = []
for x in self.pydantic_model.from_django(results, many=True):
_results.append(x.dict())
data['results'] = _results
return data
@property
def json(self):
return json.dumps(self.dict)
def queryset_to_dict(self, queryset):
data = serialize(
'json',
queryset
)
data = json.loads(data)
_d = []
for x in data:
id = x.pop('pk')
field_data = x.pop('fields')
field_data['id'] = id
_d.append(field_data)
return _d
def pagination(queryset, page, count, pydantic_model=None):
if isinstance(queryset, QuerySet) is False:
raise TypeError('"queryset" must be Queryset')
if isinstance(page, int) is False:
raise TypeError('"page" must be int')
if isinstance(count, int) is False:
raise TypeError('"count" must be int')
total = queryset.count()
pages = 1
if count != '__all__':
pages = math.ceil(total/count)
start_index = (page - 1) * count
end_index = start_index + count
queryset = queryset[start_index : end_index]
return Pagination(
page=page,
pages=pages,
queryset=queryset,
total=total,
pydantic_model=pydantic_model
)
| [
"django.db.models.query.QuerySet",
"json.loads",
"math.ceil",
"django.core.serializers.serialize",
"json.dumps"
] | [((297, 307), 'django.db.models.query.QuerySet', 'QuerySet', ([], {}), '()\n', (305, 307), False, 'from django.db.models.query import QuerySet\n'), ((1192, 1213), 'json.dumps', 'json.dumps', (['self.dict'], {}), '(self.dict)\n', (1202, 1213), False, 'import json\n'), ((1273, 1300), 'django.core.serializers.serialize', 'serialize', (['"""json"""', 'queryset'], {}), "('json', queryset)\n", (1282, 1300), False, 'from django.core.serializers import serialize\n'), ((1351, 1367), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (1361, 1367), False, 'import json\n'), ((1994, 2018), 'math.ceil', 'math.ceil', (['(total / count)'], {}), '(total / count)\n', (2003, 2018), False, 'import math\n')] |
#
# This file is part of PKPDApp (https://github.com/pkpdapp-team/pkpdapp) which
# is released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
"""
WSGI config for pkpdapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pkpdapp.settings')
application = get_wsgi_application()
| [
"django.core.wsgi.get_wsgi_application",
"os.environ.setdefault"
] | [((492, 559), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""pkpdapp.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'pkpdapp.settings')\n", (513, 559), False, 'import os\n'), ((575, 597), 'django.core.wsgi.get_wsgi_application', 'get_wsgi_application', ([], {}), '()\n', (595, 597), False, 'from django.core.wsgi import get_wsgi_application\n')] |
import inspect
import six
import yaml
from . import view
SPECIAL_KWARGS_KEYS = {'id', 'cols', 'updater'}
_init_cache = {}
class ParserContext(object):
def __init__(self, inputs):
self.inputs = inputs or {}
class Parser(object):
def __init__(self, registry):
self.registry = registry
def parse(self, obj, context):
obj = _prepare(obj)
obj = self._normalize_element(obj)
obj = self._process_intrinsic_functions(obj, context)
element_configuration = self._parse_dict(obj)
return element_configuration
def _parse_dict(self, obj):
assert isinstance(obj, dict)
assert len(obj) == 1
element_configuration = {
'kwargs': {
'props': {}
},
'kwargs_children': set(),
'prop_children': {},
'children': [],
'field': None
}
key, value = list(obj.items())[0]
element_type, additional_kwargs = self._parse_str(key)
element_configuration['element_type'] = element_type
element_configuration['kwargs'].update(additional_kwargs)
if isinstance(value, six.string_types):
if issubclass(element_type, view.Raw):
value = [{'Inline': value}]
else:
element_configuration['kwargs']['_awe_arg'] = value
value = []
value = value or []
if not isinstance(value, list):
raise ValueError('Value should be a string or a list, got: {}'.format(value))
if value and isinstance(value[0], list):
self._parse_element_configuration(element_configuration, element_type, value[0])
value = value[1:]
for item in value:
if isinstance(item, six.string_types) and not self._is_element_type(item):
item = {'Inline': item}
else:
item = self._normalize_element(item)
child_element_configuration = self._parse_dict(item)
element_configuration['children'].append(child_element_configuration)
return element_configuration
def _parse_element_configuration(self, result, element_type, configuration_items):
if not configuration_items:
return
if not isinstance(configuration_items, list):
raise ValueError('Element configuration should be passed as a list, got: {}'.format(configuration_items))
if isinstance(configuration_items[0], six.string_types):
result['field'] = configuration_items[0]
configuration_items = configuration_items[1:]
for item in configuration_items:
assert isinstance(item, dict)
assert len(item) == 1
key, value = list(item.items())[0]
is_element_value = self._is_intrinsic(value, '_')
if is_element_value:
value = value['_']
value = self._normalize_element(value)
value = self._parse_dict(value)
if key in SPECIAL_KWARGS_KEYS or key in self._get_init_args(element_type):
result['kwargs'][key] = value
if is_element_value:
result['kwargs_children'].add(key)
elif is_element_value:
result['prop_children'][key] = value
else:
result['kwargs']['props'][key] = value
def _parse_str(self, obj_str):
assert obj_str
if obj_str[0].islower():
return view.Raw, {'tag': obj_str}
elif obj_str in view.builtin_element_types:
return view.builtin_element_types[obj_str], {}
elif obj_str in self.registry.element_types:
return self.registry.element_types[obj_str], {}
raise ValueError('No such element: {}'.format(obj_str))
def _is_element_type(self, str_obj):
return (
str_obj in self.registry.element_types or
str_obj in view.builtin_element_types
)
@staticmethod
def _is_intrinsic(obj, key):
return isinstance(obj, dict) and len(obj) == 1 and bool(obj.get(key))
def _process_input(self, node, context):
input_node = self._process_intrinsic_functions(node['$'], context)
if isinstance(input_node, six.string_types):
input_node = [input_node]
input_name = input_node[0]
input_node = input_node[1:]
default_value = None
for entry in input_node:
assert isinstance(entry, dict)
assert len(entry) == 1
key, value = list(entry.items())[0]
if key == 'default':
default_value = value
else:
raise ValueError('Unknown config option: {}'.format(key))
if default_value:
return context.inputs.get(input_name, default_value)
else:
return context.inputs[input_name]
def _process_intrinsic_functions(self, obj, context):
def process(node):
if isinstance(node, dict):
if self._is_intrinsic(node, '$'):
return self._process_input(node, context)
return {k: process(v) for k, v in node.items()}
elif isinstance(node, list):
return [process(item) for item in node]
return node
return process(obj)
@staticmethod
def _normalize_element(obj):
if isinstance(obj, six.string_types):
obj = {obj: None}
elif isinstance(obj, list):
obj = {'div': obj}
return obj
@staticmethod
def _get_init_args(element_type):
if element_type in _init_cache:
return _init_cache[element_type]
result = set()
getargspec_impl = inspect.getargspec if six.PY2 else inspect.getfullargspec
spec = getargspec_impl(element_type._init)
result |= set(spec.args)
if six.PY3:
result |= set(spec.kwonlyargs)
_init_cache[element_type] = result
return result
def is_parsable(obj):
return isinstance(obj, six.string_types + (list, dict))
def _prepare(obj):
if isinstance(obj, six.string_types):
obj = yaml.load(obj)
return obj
| [
"yaml.load"
] | [((6214, 6228), 'yaml.load', 'yaml.load', (['obj'], {}), '(obj)\n', (6223, 6228), False, 'import yaml\n')] |
from users.models import Model
def init_models(license):
Model.load_on_migrate(license)
print("models worked!!") | [
"users.models.Model.load_on_migrate"
] | [((63, 93), 'users.models.Model.load_on_migrate', 'Model.load_on_migrate', (['license'], {}), '(license)\n', (84, 93), False, 'from users.models import Model\n')] |
import os
import logging
from contextlib import contextmanager
from sqlite3 import dbapi2 as sqlite
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, String, Integer, Sequence
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.sqlite import (BLOB, BOOLEAN, CHAR, DATE, DATETIME,
DECIMAL, FLOAT, INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP,
VARCHAR)
logger = logging.getLogger('db')
engine = create_engine("sqlite+pysqlite:///pubmed.db",
execution_options={"sqlite_raw_colnames": True}, module=sqlite)
Session = sessionmaker(bind=engine)
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
session = Session()
try:
yield session
session.commit()
except Exception as ex:
logger.error(ex)
session.rollback()
raise
finally:
session.close()
Base = declarative_base()
class PubMed(Base):
__tablename__ = 'pubmed'
pmid = Column(String(64), primary_key=True)
title = Column(String(256), nullable=False)
authors = Column(String(256))
summary = Column(String(256))
summary_detail = Column(String(256))
link = Column(String(256))
tags = Column(String(256))
key = Column(String(256))
create_dt = Column(DATETIME)
def __repr__(self):
return "<PubMed(pmid=%s,title=%s)>" % (self.pmid, self.title)
class Metric(Base):
__tablename__ = 'metric'
id = Column(Integer, Sequence('user_id_seq'), primary_key=True)
pmid = Column(String(64))
altmetric = Column(FLOAT)
create_dt = Column(DATETIME)
def __repr__(self):
return "<Metric(pmid=%s, metric=%s)>" % (self.pmid, self.altmetric)
Base.metadata.create_all(engine)
if __name__ == '__main__':
import datetime as dt
Base.metadata.create_all(engine)
with session_scope() as session:
pubmed = PubMed(
pmid='000000',
title='title',
authors='authors',
create_dt=dt.datetime.now()
)
session.merge(pubmed)
pubmed = PubMed(
pmid='000000',
title='title',
authors='authors2',
create_dt=dt.datetime.now()
)
session.merge(pubmed)
| [
"sqlalchemy.String",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.Column",
"sqlalchemy.create_engine",
"sqlalchemy.orm.sessionmaker",
"datetime.datetime.now",
"logging.getLogger",
"sqlalchemy.Sequence"
] | [((461, 484), 'logging.getLogger', 'logging.getLogger', (['"""db"""'], {}), "('db')\n", (478, 484), False, 'import logging\n'), ((494, 608), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite+pysqlite:///pubmed.db"""'], {'execution_options': "{'sqlite_raw_colnames': True}", 'module': 'sqlite'}), "('sqlite+pysqlite:///pubmed.db', execution_options={\n 'sqlite_raw_colnames': True}, module=sqlite)\n", (507, 608), False, 'from sqlalchemy import create_engine\n'), ((618, 643), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (630, 643), False, 'from sqlalchemy.orm import sessionmaker\n'), ((972, 990), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (988, 990), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((1354, 1370), 'sqlalchemy.Column', 'Column', (['DATETIME'], {}), '(DATETIME)\n', (1360, 1370), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1631, 1644), 'sqlalchemy.Column', 'Column', (['FLOAT'], {}), '(FLOAT)\n', (1637, 1644), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1661, 1677), 'sqlalchemy.Column', 'Column', (['DATETIME'], {}), '(DATETIME)\n', (1667, 1677), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1059, 1069), 'sqlalchemy.String', 'String', (['(64)'], {}), '(64)\n', (1065, 1069), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1108, 1119), 'sqlalchemy.String', 'String', (['(256)'], {}), '(256)\n', (1114, 1119), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1158, 1169), 'sqlalchemy.String', 'String', (['(256)'], {}), '(256)\n', (1164, 1169), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1192, 1203), 'sqlalchemy.String', 'String', (['(256)'], {}), '(256)\n', (1198, 1203), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1233, 1244), 'sqlalchemy.String', 'String', (['(256)'], {}), '(256)\n', (1239, 1244), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1264, 1275), 'sqlalchemy.String', 'String', (['(256)'], {}), '(256)\n', (1270, 1275), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1295, 1306), 'sqlalchemy.String', 'String', (['(256)'], {}), '(256)\n', (1301, 1306), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1325, 1336), 'sqlalchemy.String', 'String', (['(256)'], {}), '(256)\n', (1331, 1336), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1542, 1565), 'sqlalchemy.Sequence', 'Sequence', (['"""user_id_seq"""'], {}), "('user_id_seq')\n", (1550, 1565), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((1603, 1613), 'sqlalchemy.String', 'String', (['(64)'], {}), '(64)\n', (1609, 1613), False, 'from sqlalchemy import Column, String, Integer, Sequence\n'), ((2073, 2090), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2088, 2090), True, 'import datetime as dt\n'), ((2264, 2281), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2279, 2281), True, 'import datetime as dt\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Loan Classification Project
# In[1]:
# Libraries we need
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve,recall_score
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
# In[2]:
df = pd.read_csv("Dataset.csv")
# In[3]:
df.head()
# In[4]:
df.info()
# In[5]:
df.nunique()
# - Above we can see that Reason and Bad are binary variables
# - Nothing needs to be dropped
# In[6]:
df.describe()
# In[7]:
plt.hist(df['BAD'], bins=3)
plt.show()
# In[8]:
df['LOAN'].plot(kind='density')
plt.show()
# In[9]:
plt.pie(df['REASON'].value_counts(), labels=['DebtCon', 'HomeImp'], autopct='%.1f')
plt.show()
df['REASON'].value_counts()
# In[10]:
correlation = df.corr()
sns.heatmap(correlation)
plt.show()
# In[11]:
df['BAD'].value_counts(normalize=True)
# In[12]:
df.fillna(df.mean(), inplace=True)
# In[13]:
one_hot_encoding = pd.get_dummies(df['REASON'])
df = df.drop('REASON', axis=1)
df = df.join(one_hot_encoding)
df
# In[14]:
one_hot_encoding2 = pd.get_dummies(df['JOB'])
df = df.drop('JOB', axis=1)
df = df.join(one_hot_encoding2)
df
# In[15]:
dependent = df['BAD']
independent = df.drop(['BAD'], axis=1)
x_train, x_test, y_train, y_test = train_test_split(independent, dependent, test_size=0.3, random_state=1)
# In[16]:
def metrics_score(actual, predicted):
print(classification_report(actual, predicted))
cm = confusion_matrix(actual, predicted)
plt.figure(figsize=(8,5))
sns.heatmap(cm, annot=True, fmt='.2f', xticklabels=['Not Default', 'Default'], yticklabels=['Not Default', 'Default'])
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
# In[17]:
dtree = DecisionTreeClassifier(class_weight={0:0.20, 1:0.80}, random_state=1)
# In[18]:
dtree.fit(x_train, y_train)
# In[19]:
dependent_performance_dt = dtree.predict(x_train)
metrics_score(y_train, dependent_performance_dt)
# - The above is perfect because we are using the train values, not the test
# - Lets test on test data
# In[20]:
dependent_test_performance_dt = dtree.predict(x_test)
metrics_score(y_test,dependent_test_performance_dt)
# - As we can see, we got decent performance from this model, lets see if we can do better
# - Selfnote: do importance features next
# In[21]:
important = dtree.feature_importances_
columns = independent.columns
important_items_df = pd.DataFrame(important, index=columns, columns=['Importance']).sort_values(by='Importance', ascending=False)
plt.figure(figsize=(13,13))
sns.barplot(important_items_df.Importance, important_items_df.index)
plt.show()
# - I followed this from a previous project to see the most important features
# - We can see that the most important features are DEBTINC, CLAGE and CLNO
# In[22]:
tree_estimator = DecisionTreeClassifier(class_weight={0:0.20, 1:0.80}, random_state=1)
parameters = {
'max_depth':np.arange(2,7),
'criterion':['gini', 'entropy'],
'min_samples_leaf':[5,10,20,25]
}
score = metrics.make_scorer(recall_score, pos_label=1)
gridCV= GridSearchCV(tree_estimator, parameters, scoring=score,cv=10)
gridCV = gridCV.fit(x_train, y_train)
tree_estimator = gridCV.best_estimator_
tree_estimator.fit(x_train, y_train)
# In[23]:
dependent_performance_dt = tree_estimator.predict(x_train)
metrics_score(y_train, dependent_performance_dt)
# - We increased the less harmful error but decreased the harmful error
# In[24]:
dependent_test_performance_dt = tree_estimator.predict(x_test)
metrics_score(y_test, dependent_test_performance_dt)
# - Although the performance is slightly worse, we still reduce harmful error
# In[25]:
important = tree_estimator.feature_importances_
columns=independent.columns
importance_df=pd.DataFrame(important,index=columns,columns=['Importance']).sort_values(by='Importance',ascending=False)
plt.figure(figsize=(13,13))
sns.barplot(importance_df.Importance,importance_df.index)
plt.show()
# In[26]:
features = list(independent.columns)
plt.figure(figsize=(30,20))
tree.plot_tree(dtree,max_depth=4,feature_names=features,filled=True,fontsize=12,node_ids=True,class_names=True)
plt.show()
# - A visualization is one of the advantages that dtrees offer, we can show this to the client ot show the thought process
# In[27]:
forest_estimator = RandomForestClassifier(class_weight={0:0.20, 1:0.80}, random_state=1)
forest_estimator.fit(x_train, y_train)
# In[28]:
y_predict_training_forest = forest_estimator.predict(x_train)
metrics_score(y_train, y_predict_training_forest)
# - A perfect classification
# - This implies overfitting
# In[29]:
y_predict_test_forest = forest_estimator.predict(x_test)
metrics_score(y_test, y_predict_test_forest)
# - The performance is a lot better than the original single tree
# - Lets fix overfitting
# In[30]:
forest_estimator_tuned = RandomForestClassifier(class_weight={0:0.20,1:0.80}, random_state=1)
parameters_rf = {
"n_estimators": [100,250,500],
"min_samples_leaf": np.arange(1, 4,1),
"max_features": [0.7,0.9,'auto'],
}
score = metrics.make_scorer(recall_score, pos_label=1)
# Run the grid search
grid_obj = GridSearchCV(forest_estimator_tuned, parameters_rf, scoring=score, cv=5)
grid_obj = grid_obj.fit(x_train, y_train)
# Set the clf to the best combination of parameters
forest_estimator_tuned = grid_obj.best_estimator_
# In[31]:
forest_estimator_tuned.fit(x_train, y_train)
# In[32]:
y_predict_train_forest_tuned = forest_estimator_tuned.predict(x_train)
metrics_score(y_train, y_predict_train_forest_tuned)
# In[33]:
y_predict_test_forest_tuned = forest_estimator_tuned.predict(x_test)
metrics_score(y_test, y_predict_test_forest_tuned)
# - We now have very good performance
# - We can submit this to the company
# ### Conclusion
# - I made many models to get the best results.
# - The first one I made was a decision tree, this is not as good as random forest but it is transparent as it lets us visualize it. This first one had decent performance.
# - To improve the performance of this we tried to tune the model, this reduced the harmful error.
# - Then to improve even more I created a decision tree model, this had excellent performance once we created a second version which removed overfitting.
# ### Recommendations
# - The biggest thing that effects defaulting on a loan is the debt to income ratio. If someone has a lot of debt and a lower income they may have a harder time paying back a loan.
# - Something else that effects defaulting on a loan is the number of delinquent credit lines. This means that someone who cannot make their credit card payments will have a hard time paying back a loan.
# - Years at job is also a driver of a loans outcome. A large number of years at a job could indicate financial stability.
# - DEROG, or a history of delinquent payments is also a warning sign of not being able to pay back a loan.
# - Those are some warning signs/good signs that should be looked out for when looking for candidates to give loans to.
#
# I will now apply SHAP to look more into this model.
# In[34]:
get_ipython().system('pip install shap')
import shap
# In[35]:
shap.initjs()
# In[36]:
explain = shap.TreeExplainer(forest_estimator_tuned)
shap_vals = explain(x_train)
# In[37]:
type(shap_vals)
# In[38]:
shap.plots.bar(shap_vals[:, :, 0])
# In[39]:
shap.plots.heatmap(shap_vals[:, :, 0])
# In[40]:
shap.summary_plot(shap_vals[:, :, 0], x_train)
# In[53]:
print(forest_estimator_tuned.predict(x_test.iloc[107].to_numpy().reshape(1,-1))) # This predicts for one row, 0 means approved, 1 means no.
| [
"sklearn.model_selection.GridSearchCV",
"seaborn.heatmap",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"shap.plots.heatmap",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"numpy.arange",
"sklearn.tree.plot_tree",
"pandas.DataFrame",
"shap.TreeExplainer",
"sklearn.metrics.make_scorer",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.show",
"pandas.get_dummies",
"seaborn.barplot",
"matplotlib.pyplot.ylabel",
"shap.summary_plot",
"shap.plots.bar",
"matplotlib.pyplot.hist",
"shap.initjs",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.xlabel"
] | [((919, 945), 'pandas.read_csv', 'pd.read_csv', (['"""Dataset.csv"""'], {}), "('Dataset.csv')\n", (930, 945), True, 'import pandas as pd\n'), ((1153, 1180), 'matplotlib.pyplot.hist', 'plt.hist', (["df['BAD']"], {'bins': '(3)'}), "(df['BAD'], bins=3)\n", (1161, 1180), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1191), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1189, 1191), True, 'import matplotlib.pyplot as plt\n'), ((1237, 1247), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1245, 1247), True, 'import matplotlib.pyplot as plt\n'), ((1345, 1355), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1353, 1355), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1446), 'seaborn.heatmap', 'sns.heatmap', (['correlation'], {}), '(correlation)\n', (1433, 1446), True, 'import seaborn as sns\n'), ((1447, 1457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1455, 1457), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1621), 'pandas.get_dummies', 'pd.get_dummies', (["df['REASON']"], {}), "(df['REASON'])\n", (1607, 1621), True, 'import pandas as pd\n'), ((1721, 1746), 'pandas.get_dummies', 'pd.get_dummies', (["df['JOB']"], {}), "(df['JOB'])\n", (1735, 1746), True, 'import pandas as pd\n'), ((1920, 1991), 'sklearn.model_selection.train_test_split', 'train_test_split', (['independent', 'dependent'], {'test_size': '(0.3)', 'random_state': '(1)'}), '(independent, dependent, test_size=0.3, random_state=1)\n', (1936, 1991), False, 'from sklearn.model_selection import train_test_split\n'), ((2385, 2458), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'class_weight': '{(0): 0.2, (1): 0.8}', 'random_state': '(1)'}), '(class_weight={(0): 0.2, (1): 0.8}, random_state=1)\n', (2407, 2458), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3182, 3210), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 13)'}), '(figsize=(13, 13))\n', (3192, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3210, 3278), 'seaborn.barplot', 'sns.barplot', (['important_items_df.Importance', 'important_items_df.index'], {}), '(important_items_df.Importance, important_items_df.index)\n', (3221, 3278), True, 'import seaborn as sns\n'), ((3279, 3289), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3287, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3550), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'class_weight': '{(0): 0.2, (1): 0.8}', 'random_state': '(1)'}), '(class_weight={(0): 0.2, (1): 0.8}, random_state=1)\n', (3499, 3550), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3691, 3737), 'sklearn.metrics.make_scorer', 'metrics.make_scorer', (['recall_score'], {'pos_label': '(1)'}), '(recall_score, pos_label=1)\n', (3710, 3737), False, 'from sklearn import metrics\n'), ((3746, 3808), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['tree_estimator', 'parameters'], {'scoring': 'score', 'cv': '(10)'}), '(tree_estimator, parameters, scoring=score, cv=10)\n', (3758, 3808), False, 'from sklearn.model_selection import GridSearchCV\n'), ((4538, 4566), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 13)'}), '(figsize=(13, 13))\n', (4548, 4566), True, 'import matplotlib.pyplot as plt\n'), ((4566, 4624), 'seaborn.barplot', 'sns.barplot', (['importance_df.Importance', 'importance_df.index'], {}), '(importance_df.Importance, importance_df.index)\n', (4577, 4624), True, 'import seaborn as sns\n'), ((4624, 4634), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4632, 4634), True, 'import matplotlib.pyplot as plt\n'), ((4687, 4715), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(30, 20)'}), '(figsize=(30, 20))\n', (4697, 4715), True, 'import matplotlib.pyplot as plt\n'), ((4716, 4837), 'sklearn.tree.plot_tree', 'tree.plot_tree', (['dtree'], {'max_depth': '(4)', 'feature_names': 'features', 'filled': '(True)', 'fontsize': '(12)', 'node_ids': '(True)', 'class_names': '(True)'}), '(dtree, max_depth=4, feature_names=features, filled=True,\n fontsize=12, node_ids=True, class_names=True)\n', (4730, 4837), False, 'from sklearn import tree\n'), ((4828, 4838), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4836, 4838), True, 'import matplotlib.pyplot as plt\n'), ((4996, 5069), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'class_weight': '{(0): 0.2, (1): 0.8}', 'random_state': '(1)'}), '(class_weight={(0): 0.2, (1): 0.8}, random_state=1)\n', (5018, 5069), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5539, 5612), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'class_weight': '{(0): 0.2, (1): 0.8}', 'random_state': '(1)'}), '(class_weight={(0): 0.2, (1): 0.8}, random_state=1)\n', (5561, 5612), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5768, 5814), 'sklearn.metrics.make_scorer', 'metrics.make_scorer', (['recall_score'], {'pos_label': '(1)'}), '(recall_score, pos_label=1)\n', (5787, 5814), False, 'from sklearn import metrics\n'), ((5849, 5921), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['forest_estimator_tuned', 'parameters_rf'], {'scoring': 'score', 'cv': '(5)'}), '(forest_estimator_tuned, parameters_rf, scoring=score, cv=5)\n', (5861, 5921), False, 'from sklearn.model_selection import GridSearchCV\n'), ((7869, 7882), 'shap.initjs', 'shap.initjs', ([], {}), '()\n', (7880, 7882), False, 'import shap\n'), ((7907, 7949), 'shap.TreeExplainer', 'shap.TreeExplainer', (['forest_estimator_tuned'], {}), '(forest_estimator_tuned)\n', (7925, 7949), False, 'import shap\n'), ((8023, 8057), 'shap.plots.bar', 'shap.plots.bar', (['shap_vals[:, :, 0]'], {}), '(shap_vals[:, :, 0])\n', (8037, 8057), False, 'import shap\n'), ((8072, 8110), 'shap.plots.heatmap', 'shap.plots.heatmap', (['shap_vals[:, :, 0]'], {}), '(shap_vals[:, :, 0])\n', (8090, 8110), False, 'import shap\n'), ((8125, 8171), 'shap.summary_plot', 'shap.summary_plot', (['shap_vals[:, :, 0]', 'x_train'], {}), '(shap_vals[:, :, 0], x_train)\n', (8142, 8171), False, 'import shap\n'), ((2105, 2140), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['actual', 'predicted'], {}), '(actual, predicted)\n', (2121, 2140), False, 'from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve, recall_score\n'), ((2145, 2171), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (2155, 2171), True, 'import matplotlib.pyplot as plt\n'), ((2175, 2297), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'fmt': '""".2f"""', 'xticklabels': "['Not Default', 'Default']", 'yticklabels': "['Not Default', 'Default']"}), "(cm, annot=True, fmt='.2f', xticklabels=['Not Default',\n 'Default'], yticklabels=['Not Default', 'Default'])\n", (2186, 2297), True, 'import seaborn as sns\n'), ((2299, 2319), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Actual"""'], {}), "('Actual')\n", (2309, 2319), True, 'import matplotlib.pyplot as plt\n'), ((2324, 2347), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted"""'], {}), "('Predicted')\n", (2334, 2347), True, 'import matplotlib.pyplot as plt\n'), ((2352, 2362), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2360, 2362), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3594), 'numpy.arange', 'np.arange', (['(2)', '(7)'], {}), '(2, 7)\n', (3588, 3594), True, 'import numpy as np\n'), ((5696, 5714), 'numpy.arange', 'np.arange', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (5705, 5714), True, 'import numpy as np\n'), ((2054, 2094), 'sklearn.metrics.classification_report', 'classification_report', (['actual', 'predicted'], {}), '(actual, predicted)\n', (2075, 2094), False, 'from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve, recall_score\n'), ((3073, 3135), 'pandas.DataFrame', 'pd.DataFrame', (['important'], {'index': 'columns', 'columns': "['Importance']"}), "(important, index=columns, columns=['Importance'])\n", (3085, 3135), True, 'import pandas as pd\n'), ((4432, 4494), 'pandas.DataFrame', 'pd.DataFrame', (['important'], {'index': 'columns', 'columns': "['Importance']"}), "(important, index=columns, columns=['Importance'])\n", (4444, 4494), True, 'import pandas as pd\n')] |
from random import randint, choice, shuffle
"""gerador de senha simples
ele basicamente pega todo tipo de caractere digitado pelo usuário
e depois de cada um deles é colocado um número de 1 a 10 e mais um símbolo"""
letras = list()
senha = list()
chave = input('Digite a base da sua senha: ')
while len(chave) > 8:
chave = input('A base só pode ter até 8 caracteres, digite a base da sua senha novamente: ')
for c in range(0, len(chave)):
letras.append(chave[c])
senha.append(letras[:])
letras.pop()
caracteres = (')', '*', '/', '%', '!')
print('a senha gerada é: ')
shuffle(senha)
cores = ('\033[1;31m', '\033[1;32m', '\033[1;33m')
for i, l in enumerate(senha):
adcionais = randint(0, 10)
p = l[:][0]
p += str(adcionais) + choice(caracteres)
print(p,end='')
print('\n\033[32;1mBoa sorte em decorar ela!')
| [
"random.shuffle",
"random.choice",
"random.randint"
] | [((592, 606), 'random.shuffle', 'shuffle', (['senha'], {}), '(senha)\n', (599, 606), False, 'from random import randint, choice, shuffle\n'), ((704, 718), 'random.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (711, 718), False, 'from random import randint, choice, shuffle\n'), ((761, 779), 'random.choice', 'choice', (['caracteres'], {}), '(caracteres)\n', (767, 779), False, 'from random import randint, choice, shuffle\n')] |
from sys import *
from matplotlib_venn import venn3, venn3_circles
from matplotlib import pyplot as plt
s1 = set(open('wd-inst-of-prot', 'r').readlines())
s2 = set(open('wd-subc-of-prot', 'r').readlines())
s3 = set(open('wd-refseqp', 'r').readlines())
#s4 = set(open('t4', 'r').readlines())
venn3([s3,s2,s1], ('RefSeq', 'subc', 'inst of protein'))
c = venn3_circles([s3,s2,s1])
c[0].set_lw(1.0)
plt.show()
#plt.savefig('venn.svg')
| [
"matplotlib_venn.venn3_circles",
"matplotlib_venn.venn3",
"matplotlib.pyplot.show"
] | [((292, 350), 'matplotlib_venn.venn3', 'venn3', (['[s3, s2, s1]', "('RefSeq', 'subc', 'inst of protein')"], {}), "([s3, s2, s1], ('RefSeq', 'subc', 'inst of protein'))\n", (297, 350), False, 'from matplotlib_venn import venn3, venn3_circles\n'), ((353, 380), 'matplotlib_venn.venn3_circles', 'venn3_circles', (['[s3, s2, s1]'], {}), '([s3, s2, s1])\n', (366, 380), False, 'from matplotlib_venn import venn3, venn3_circles\n'), ((396, 406), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (404, 406), True, 'from matplotlib import pyplot as plt\n')] |
import datetime
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list
from floodsystem.plot import plot_water_levels, plot_water_level_with_fit
stations = build_station_list()
import numpy as np
def test_polt_water_level_with_fit():
x = np.linspace(1, 1000, 100000)
y = []
for i in x:
y.append(3*i**2 + 5)
p_coeff = np.polyfit(x, y, 2)
poly = np.poly1d(p_coeff)
assert int(p_coeff[0]) == 2 | [
"floodsystem.stationdata.build_station_list",
"numpy.poly1d",
"numpy.linspace",
"numpy.polyfit"
] | [((214, 234), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (232, 234), False, 'from floodsystem.stationdata import build_station_list\n'), ((302, 330), 'numpy.linspace', 'np.linspace', (['(1)', '(1000)', '(100000)'], {}), '(1, 1000, 100000)\n', (313, 330), True, 'import numpy as np\n'), ((401, 420), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(2)'], {}), '(x, y, 2)\n', (411, 420), True, 'import numpy as np\n'), ((432, 450), 'numpy.poly1d', 'np.poly1d', (['p_coeff'], {}), '(p_coeff)\n', (441, 450), True, 'import numpy as np\n')] |
from CSIKit.csi import CSIFrame
import ast
import numpy as np
class ESP32CSIFrame(CSIFrame):
# https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/network/esp_wifi.html#_CPPv418wifi_pkt_rx_ctrl_t
__slots__ = ["type", "role", "mac", "rssi", "rate", "sig_mode", "mcs", "bandwidth", "smoothing", "not_sounding",
"aggregation", "stbc", "fec_coding", "sgi", "noise_floor", "ampdu_cnt", "channel", "secondary_channel",
"local_timestamp", "ant", "sig_len", "rx_state", "real_time_set", "real_timestamp", "len", "CSI_DATA"]
def __init__(self, csv_line: list):
self.type = csv_line[0]
self.role = csv_line[1]
self.mac = csv_line[2]
self.rssi = csv_line[3]
self.rate = csv_line[4]
self.sig_mode = csv_line[5]
self.mcs = csv_line[6]
self.bandwidth = 20 if csv_line[7] == "0" else 40
self.smoothing = csv_line[8]
self.not_sounding = csv_line[9]
self.aggregation = csv_line[10]
self.stbc = csv_line[11]
self.fec_coding = csv_line[12]
self.sgi = csv_line[13]
self.noise_floor = csv_line[14]
self.ampdu_cnt = csv_line[15]
self.channel = csv_line[16]
self.secondary_channel = csv_line[17]
self.local_timestamp = csv_line[18]
self.ant = csv_line[19]
self.sig_len = csv_line[20]
self.rx_state = csv_line[21]
self.real_time_set = csv_line[22]
self.real_timestamp = csv_line[23]
self.len = csv_line[24]
string_data = csv_line[25]
self.csi_matrix = ESP32CSIFrame.parse_matrix(string_data)
@staticmethod
def parse_matrix(string_data, bandwidth=20):
array_string = string_data.replace(" ", ", ")
array_string_asarray = ast.literal_eval(array_string)
if bandwidth == 20 and len(array_string_asarray) < 128:
ESP32CSIFrame.fill_missing(array_string_asarray, 128)
elif bandwidth == 40 and len(array_string_asarray) < 256:
ESP32CSIFrame.fill_missing(array_string_asarray, 256)
int8_matrix = np.array(array_string_asarray)
int8_matrix = int8_matrix.reshape(-1, 2)
complex_matrix = int8_matrix.astype(np.float32).view(np.complex64)
return complex_matrix
# Seems some CSI lines are missing a value.
# Very rare, I assume weird dropped behaviour.
# Probably not the best way to fill the gap.
@staticmethod
def fill_missing(array, expected_length):
remainder = expected_length - len(array)
for _ in range(remainder):
array.append(0) | [
"ast.literal_eval",
"numpy.array"
] | [((1808, 1838), 'ast.literal_eval', 'ast.literal_eval', (['array_string'], {}), '(array_string)\n', (1824, 1838), False, 'import ast\n'), ((2125, 2155), 'numpy.array', 'np.array', (['array_string_asarray'], {}), '(array_string_asarray)\n', (2133, 2155), True, 'import numpy as np\n')] |
from tensorflow_trees.encoder import Encoder, EncoderCellsBuilder
from tensorflow_trees.decoder import Decoder, DecoderCellsBuilder
from examples.simple_expression.exp_definition import BinaryExpressionTreeGen, NaryExpressionTreeGen
from tensorflow_trees.definition import Tree
from examples.simple_expression.flags_definition import *
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import tensorflow.contrib.summary as tfs
import os
import json
FLAGS = tf.flags.FLAGS
def main(argv=None):
#########
# Checkpoints and Summaries
#########
if tf.gfile.Exists(FLAGS.model_dir):
if FLAGS.overwrite:
tf.logging.warn("Deleting old log directory at {}".format(FLAGS.model_dir))
tf.gfile.DeleteRecursively(FLAGS.model_dir)
tf.gfile.MakeDirs(FLAGS.model_dir)
else:
raise ValueError("Log dir already exists!")
else:
tf.gfile.MakeDirs(FLAGS.model_dir)
summary_writer = tfs.create_file_writer(FLAGS.model_dir, flush_millis=1000)
summary_writer.set_as_default()
print("Summaries in " + FLAGS.model_dir)
with open(os.path.join(FLAGS.model_dir, "flags.json"), 'w') as f:
json.dump(FLAGS.flag_values_dict(), f)
#########
# DATA
#########
if FLAGS.fixed_arity:
tree_gen = BinaryExpressionTreeGen(9)
else:
tree_gen = NaryExpressionTreeGen(9, FLAGS.max_arity)
def get_batch():
return [tree_gen.generate(FLAGS.max_depth) for _ in range(FLAGS.batch_size)]
#########
# MODEL
#########
activation = getattr(tf.nn, FLAGS.activation)
encoder = Encoder(tree_def=tree_gen.tree_def,
embedding_size=FLAGS.embedding_size,
cut_arity=FLAGS.cut_arity, max_arity=FLAGS.max_arity,
variable_arity_strategy=FLAGS.enc_variable_arity_strategy,
cellsbuilder=EncoderCellsBuilder(
EncoderCellsBuilder.simple_cell_builder(hidden_coef=FLAGS.hidden_cell_coef,
activation=activation,
gate=FLAGS.encoder_gate),
EncoderCellsBuilder.simple_dense_embedder_builder(activation=activation)),
name='encoder')
decoder = Decoder(tree_def=tree_gen.tree_def,
embedding_size=FLAGS.embedding_size,
max_node_count=FLAGS.max_node_count,
max_depth=FLAGS.max_depth,
max_arity=FLAGS.max_arity,
cut_arity=FLAGS.cut_arity,
cellsbuilder=DecoderCellsBuilder(
distrib_builder=
DecoderCellsBuilder.simple_distrib_cell_builder(FLAGS.hidden_cell_coef,
activation=activation),
categorical_value_inflater_builder=
DecoderCellsBuilder.simple_1ofk_value_inflater_builder(FLAGS.hidden_cell_coef,
activation=activation),
dense_value_inflater_builder=None, # unused
node_inflater_builder=
DecoderCellsBuilder.simple_node_inflater_builder(FLAGS.hidden_cell_coef,
activation=activation,
gate=FLAGS.decoder_gate)),
variable_arity_strategy=FLAGS.dec_variable_arity_strategy)
###########
# TRAINING
###########
optimizer = tf.train.AdamOptimizer()
with tfs.always_record_summaries():
for i in range(FLAGS.max_iter):
with tfe.GradientTape() as tape:
xs = get_batch()
batch_enc = encoder(xs)
batch_dec = decoder(encodings=batch_enc.get_root_embeddings(), targets=xs)
loss_struct, loss_val = batch_dec.reconstruction_loss()
loss = loss_struct + loss_val
variables = encoder.variables + decoder.variables
grad = tape.gradient(loss, variables)
gnorm = tf.global_norm(grad)
grad, _ = tf.clip_by_global_norm(grad, 0.02, gnorm)
tfs.scalar("norms/grad", gnorm)
optimizer.apply_gradients(zip(grad, variables), global_step=tf.train.get_or_create_global_step())
if i % FLAGS.check_every == 0:
batch_unsuperv = decoder(encodings=batch_enc.get_root_embeddings())
_, _, v_avg_sup, v_acc_sup = Tree.compare_trees(xs, batch_dec.decoded_trees)
s_avg, s_acc, v_avg, v_acc = Tree.compare_trees(xs, batch_unsuperv.decoded_trees)
print("{0}:\t{1:.3f}".format(i, loss))
tfs.scalar("loss/struct", loss_struct)
tfs.scalar("loss/val", loss_val)
tfs.scalar("loss/loss", loss)
tfs.scalar("overlaps/supervised/value_avg", v_avg_sup)
tfs.scalar("overlaps/supervised/value_acc", v_acc_sup)
tfs.scalar("overlaps/unsupervised/struct_avg", s_avg)
tfs.scalar("overlaps/unsupervised/struct_acc", s_acc)
tfs.scalar("overlaps/unsupervised/value_avg", v_avg)
tfs.scalar("overlaps/unsupervised/value_acc", v_acc)
if __name__ == "__main__":
define_common_flags()
define_encoder_flags()
define_decoder_flags()
tfe.run()
| [
"tensorflow.contrib.summary.scalar",
"tensorflow_trees.decoder.DecoderCellsBuilder.simple_distrib_cell_builder",
"tensorflow.gfile.Exists",
"examples.simple_expression.exp_definition.NaryExpressionTreeGen",
"os.path.join",
"tensorflow.clip_by_global_norm",
"tensorflow_trees.decoder.DecoderCellsBuilder.simple_1ofk_value_inflater_builder",
"tensorflow_trees.encoder.EncoderCellsBuilder.simple_cell_builder",
"examples.simple_expression.exp_definition.BinaryExpressionTreeGen",
"tensorflow.contrib.eager.run",
"tensorflow.train.get_or_create_global_step",
"tensorflow.global_norm",
"tensorflow.gfile.DeleteRecursively",
"tensorflow_trees.definition.Tree.compare_trees",
"tensorflow.contrib.summary.create_file_writer",
"tensorflow.gfile.MakeDirs",
"tensorflow_trees.encoder.EncoderCellsBuilder.simple_dense_embedder_builder",
"tensorflow.contrib.summary.always_record_summaries",
"tensorflow.train.AdamOptimizer",
"tensorflow.contrib.eager.GradientTape",
"tensorflow_trees.decoder.DecoderCellsBuilder.simple_node_inflater_builder"
] | [((581, 613), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (596, 613), True, 'import tensorflow as tf\n'), ((979, 1037), 'tensorflow.contrib.summary.create_file_writer', 'tfs.create_file_writer', (['FLAGS.model_dir'], {'flush_millis': '(1000)'}), '(FLAGS.model_dir, flush_millis=1000)\n', (1001, 1037), True, 'import tensorflow.contrib.summary as tfs\n'), ((3833, 3857), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (3855, 3857), True, 'import tensorflow as tf\n'), ((5705, 5714), 'tensorflow.contrib.eager.run', 'tfe.run', ([], {}), '()\n', (5712, 5714), True, 'import tensorflow.contrib.eager as tfe\n'), ((922, 956), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (939, 956), True, 'import tensorflow as tf\n'), ((1323, 1349), 'examples.simple_expression.exp_definition.BinaryExpressionTreeGen', 'BinaryExpressionTreeGen', (['(9)'], {}), '(9)\n', (1346, 1349), False, 'from examples.simple_expression.exp_definition import BinaryExpressionTreeGen, NaryExpressionTreeGen\n'), ((1379, 1420), 'examples.simple_expression.exp_definition.NaryExpressionTreeGen', 'NaryExpressionTreeGen', (['(9)', 'FLAGS.max_arity'], {}), '(9, FLAGS.max_arity)\n', (1400, 1420), False, 'from examples.simple_expression.exp_definition import BinaryExpressionTreeGen, NaryExpressionTreeGen\n'), ((3868, 3897), 'tensorflow.contrib.summary.always_record_summaries', 'tfs.always_record_summaries', ([], {}), '()\n', (3895, 3897), True, 'import tensorflow.contrib.summary as tfs\n'), ((743, 786), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (769, 786), True, 'import tensorflow as tf\n'), ((799, 833), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (816, 833), True, 'import tensorflow as tf\n'), ((1134, 1177), 'os.path.join', 'os.path.join', (['FLAGS.model_dir', '"""flags.json"""'], {}), "(FLAGS.model_dir, 'flags.json')\n", (1146, 1177), False, 'import os\n'), ((4401, 4421), 'tensorflow.global_norm', 'tf.global_norm', (['grad'], {}), '(grad)\n', (4415, 4421), True, 'import tensorflow as tf\n'), ((4444, 4485), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grad', '(0.02)', 'gnorm'], {}), '(grad, 0.02, gnorm)\n', (4466, 4485), True, 'import tensorflow as tf\n'), ((4499, 4530), 'tensorflow.contrib.summary.scalar', 'tfs.scalar', (['"""norms/grad"""', 'gnorm'], {}), "('norms/grad', gnorm)\n", (4509, 4530), True, 'import tensorflow.contrib.summary as tfs\n'), ((1971, 2098), 'tensorflow_trees.encoder.EncoderCellsBuilder.simple_cell_builder', 'EncoderCellsBuilder.simple_cell_builder', ([], {'hidden_coef': 'FLAGS.hidden_cell_coef', 'activation': 'activation', 'gate': 'FLAGS.encoder_gate'}), '(hidden_coef=FLAGS.hidden_cell_coef,\n activation=activation, gate=FLAGS.encoder_gate)\n', (2010, 2098), False, 'from tensorflow_trees.encoder import Encoder, EncoderCellsBuilder\n'), ((2260, 2332), 'tensorflow_trees.encoder.EncoderCellsBuilder.simple_dense_embedder_builder', 'EncoderCellsBuilder.simple_dense_embedder_builder', ([], {'activation': 'activation'}), '(activation=activation)\n', (2309, 2332), False, 'from tensorflow_trees.encoder import Encoder, EncoderCellsBuilder\n'), ((3956, 3974), 'tensorflow.contrib.eager.GradientTape', 'tfe.GradientTape', ([], {}), '()\n', (3972, 3974), True, 'import tensorflow.contrib.eager as tfe\n'), ((4817, 4864), 'tensorflow_trees.definition.Tree.compare_trees', 'Tree.compare_trees', (['xs', 'batch_dec.decoded_trees'], {}), '(xs, batch_dec.decoded_trees)\n', (4835, 4864), False, 'from tensorflow_trees.definition import Tree\n'), ((4910, 4962), 'tensorflow_trees.definition.Tree.compare_trees', 'Tree.compare_trees', (['xs', 'batch_unsuperv.decoded_trees'], {}), '(xs, batch_unsuperv.decoded_trees)\n', (4928, 4962), False, 'from tensorflow_trees.definition import Tree\n'), ((5036, 5074), 'tensorflow.contrib.summary.scalar', 'tfs.scalar', (['"""loss/struct"""', 'loss_struct'], {}), "('loss/struct', loss_struct)\n", (5046, 5074), True, 'import tensorflow.contrib.summary as tfs\n'), ((5091, 5123), 'tensorflow.contrib.summary.scalar', 'tfs.scalar', (['"""loss/val"""', 'loss_val'], {}), "('loss/val', loss_val)\n", (5101, 5123), True, 'import tensorflow.contrib.summary as tfs\n'), ((5140, 5169), 'tensorflow.contrib.summary.scalar', 'tfs.scalar', (['"""loss/loss"""', 'loss'], {}), "('loss/loss', loss)\n", (5150, 5169), True, 'import tensorflow.contrib.summary as tfs\n'), ((5187, 5241), 'tensorflow.contrib.summary.scalar', 'tfs.scalar', (['"""overlaps/supervised/value_avg"""', 'v_avg_sup'], {}), "('overlaps/supervised/value_avg', v_avg_sup)\n", (5197, 5241), True, 'import tensorflow.contrib.summary as tfs\n'), ((5258, 5312), 'tensorflow.contrib.summary.scalar', 'tfs.scalar', (['"""overlaps/supervised/value_acc"""', 'v_acc_sup'], {}), "('overlaps/supervised/value_acc', v_acc_sup)\n", (5268, 5312), True, 'import tensorflow.contrib.summary as tfs\n'), ((5330, 5383), 'tensorflow.contrib.summary.scalar', 'tfs.scalar', (['"""overlaps/unsupervised/struct_avg"""', 's_avg'], {}), "('overlaps/unsupervised/struct_avg', s_avg)\n", (5340, 5383), True, 'import tensorflow.contrib.summary as tfs\n'), ((5400, 5453), 'tensorflow.contrib.summary.scalar', 'tfs.scalar', (['"""overlaps/unsupervised/struct_acc"""', 's_acc'], {}), "('overlaps/unsupervised/struct_acc', s_acc)\n", (5410, 5453), True, 'import tensorflow.contrib.summary as tfs\n'), ((5470, 5522), 'tensorflow.contrib.summary.scalar', 'tfs.scalar', (['"""overlaps/unsupervised/value_avg"""', 'v_avg'], {}), "('overlaps/unsupervised/value_avg', v_avg)\n", (5480, 5522), True, 'import tensorflow.contrib.summary as tfs\n'), ((5539, 5591), 'tensorflow.contrib.summary.scalar', 'tfs.scalar', (['"""overlaps/unsupervised/value_acc"""', 'v_acc'], {}), "('overlaps/unsupervised/value_acc', v_acc)\n", (5549, 5591), True, 'import tensorflow.contrib.summary as tfs\n'), ((2814, 2912), 'tensorflow_trees.decoder.DecoderCellsBuilder.simple_distrib_cell_builder', 'DecoderCellsBuilder.simple_distrib_cell_builder', (['FLAGS.hidden_cell_coef'], {'activation': 'activation'}), '(FLAGS.hidden_cell_coef,\n activation=activation)\n', (2861, 2912), False, 'from tensorflow_trees.decoder import Decoder, DecoderCellsBuilder\n'), ((3074, 3180), 'tensorflow_trees.decoder.DecoderCellsBuilder.simple_1ofk_value_inflater_builder', 'DecoderCellsBuilder.simple_1ofk_value_inflater_builder', (['FLAGS.hidden_cell_coef'], {'activation': 'activation'}), '(FLAGS.\n hidden_cell_coef, activation=activation)\n', (3128, 3180), False, 'from tensorflow_trees.decoder import Decoder, DecoderCellsBuilder\n'), ((3410, 3534), 'tensorflow_trees.decoder.DecoderCellsBuilder.simple_node_inflater_builder', 'DecoderCellsBuilder.simple_node_inflater_builder', (['FLAGS.hidden_cell_coef'], {'activation': 'activation', 'gate': 'FLAGS.decoder_gate'}), '(FLAGS.hidden_cell_coef,\n activation=activation, gate=FLAGS.decoder_gate)\n', (3458, 3534), False, 'from tensorflow_trees.decoder import Decoder, DecoderCellsBuilder\n'), ((4604, 4640), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (4638, 4640), True, 'import tensorflow as tf\n')] |
import random
from random import randint
class State():
def __init__(self):
self.dictionary = {} #{(row, col): (pieceIMG, brightness)}
def addDrop(self, width, top):
screenTop = top - 1
screenLeft = -width // 2
screenRight = width // 2
column = random.randint(screenLeft, screenRight)
self.dictionary[screenTop, column] = (1, 255)
def update(self, screenBottom):
tailSize = 10
keys = self.dictionary.keys()
for cellPos in list(reversed(keys)):
cellIMG = self.dictionary[cellPos][0]
cellOpacity = self.dictionary[cellPos][1]
#Update Cell
opacity = cellOpacity - tailSize if cellOpacity >= tailSize else 0
cellIMG = cellIMG if randint(0, (opacity//20)**2) <= 1 else randint(0,4)
self.dictionary[cellPos] = (cellIMG, opacity)
# Add white to next bottom
if cellPos[0] <= screenBottom:
nextCell = (cellPos[0] + 1, cellPos[1])
if nextCell not in self.dictionary:
self.dictionary[nextCell] = (randint(0,4), 255)
# Deleting cells
if cellOpacity < tailSize:
#del temp[cellPos]
self.dictionary.pop(cellPos)
| [
"random.randint"
] | [((296, 335), 'random.randint', 'random.randint', (['screenLeft', 'screenRight'], {}), '(screenLeft, screenRight)\n', (310, 335), False, 'import random\n'), ((831, 844), 'random.randint', 'randint', (['(0)', '(4)'], {}), '(0, 4)\n', (838, 844), False, 'from random import randint\n'), ((792, 824), 'random.randint', 'randint', (['(0)', '((opacity // 20) ** 2)'], {}), '(0, (opacity // 20) ** 2)\n', (799, 824), False, 'from random import randint\n'), ((1156, 1169), 'random.randint', 'randint', (['(0)', '(4)'], {}), '(0, 4)\n', (1163, 1169), False, 'from random import randint\n')] |
import logging
import sys
import traceback
from flask import Flask, jsonify
def create_app(script_info=None):
# instantiate the app
app = Flask(
__name__, template_folder='../templates'
)
# set config
app.logger.setLevel(logging.INFO)
from src.controller import excel_service
app.register_blueprint(excel_service, url_prefix='/')
@app.route('/healthcheck')
def healthcheck():
return jsonify("ok")
# shell context for flask cli
@app.shell_context_processor
def ctx():
return {'app': app}
@app.errorhandler(Exception)
def _error(error):
trace = traceback.format_exc()
status_code = getattr(error, 'status_code', 400)
response_dict = dict(getattr(error, 'payload', None) or ())
response_dict['message'] = str(error)
response_dict['traceback'] = trace
response = jsonify(response_dict)
response.status_code = status_code
traceback.print_exc(file=sys.stdout)
return response
return app
| [
"traceback.print_exc",
"flask.jsonify",
"flask.Flask",
"traceback.format_exc"
] | [((149, 196), 'flask.Flask', 'Flask', (['__name__'], {'template_folder': '"""../templates"""'}), "(__name__, template_folder='../templates')\n", (154, 196), False, 'from flask import Flask, jsonify\n'), ((441, 454), 'flask.jsonify', 'jsonify', (['"""ok"""'], {}), "('ok')\n", (448, 454), False, 'from flask import Flask, jsonify\n'), ((639, 661), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (659, 661), False, 'import traceback\n'), ((896, 918), 'flask.jsonify', 'jsonify', (['response_dict'], {}), '(response_dict)\n', (903, 918), False, 'from flask import Flask, jsonify\n'), ((970, 1006), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stdout'}), '(file=sys.stdout)\n', (989, 1006), False, 'import traceback\n')] |
# -*- coding:utf-8 -*-
from typing import Optional, Tuple, List
import cv2
import numpy as np
import streamlit as st
from PIL import Image
from utils.model import MODEL_TYPE, draw_bboxes
def description(header: str, description: str):
"""show description
Args:
header (str): header message
description (str): description text
"""
st.subheader(header)
st.markdown(description)
def object_detector_ui() -> Tuple[int, str, float]:
"""show object detector ui in sidebar
Returns:
Tuple[int, str, float]: [number of threads, model type string, threshold]
"""
st.sidebar.markdown("# Model Config")
num_thread = st.sidebar.slider("Number of Thread", 1, 4, 1, 1)
confidence_threshold = st.sidebar.slider(
"Confidence threshold", 0.0, 1.0, 0.5, 0.01)
model_type = st.sidebar.radio("Model Type", MODEL_TYPE)
return num_thread, model_type, confidence_threshold
def upload_image() -> Optional[np.ndarray]:
"""show upload image area
Returns:
Optional[np.ndarray]: uploaded image
"""
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "JPG"])
if uploaded_file is not None:
file_bytes = np.asarray(
bytearray(uploaded_file.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, 1)
return image
else:
return None
def show_image(image: np.ndarray, bboxes: List, scores: List, classes: List, detect_num: int, elapsed_time: int):
"""show processed image.
Args:
image (np.ndarray): original image
bboxes (List): detected bounding box
scores (List): detected score
classes (List): detected class names
detect_num (int): number of detection
elapsed_time (int): processing time
"""
image = draw_bboxes(image, bboxes, scores, classes, detect_num)
image = cv2pil(image)
st.image(image, caption='Uploaded Image.', use_column_width=True)
st.markdown("**elapsed time : " + str(elapsed_time) + "[msec]**")
pass
def cv2pil(image: np.ndarray) -> Image:
"""cv2 image to PIL image
Args:
image (np.ndarray): cv2 image
Returns:
Image: PIL image
"""
new_image = image.copy()
if new_image.ndim == 2: # モノクロ
pass
elif new_image.shape[2] == 3: # カラー
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)
elif new_image.shape[2] == 4: # 透過
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA)
new_image = Image.fromarray(new_image)
return new_image
| [
"streamlit.subheader",
"streamlit.markdown",
"streamlit.sidebar.slider",
"streamlit.image",
"cv2.cvtColor",
"cv2.imdecode",
"streamlit.file_uploader",
"streamlit.sidebar.markdown",
"streamlit.sidebar.radio",
"utils.model.draw_bboxes",
"PIL.Image.fromarray"
] | [((367, 387), 'streamlit.subheader', 'st.subheader', (['header'], {}), '(header)\n', (379, 387), True, 'import streamlit as st\n'), ((392, 416), 'streamlit.markdown', 'st.markdown', (['description'], {}), '(description)\n', (403, 416), True, 'import streamlit as st\n'), ((621, 658), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""# Model Config"""'], {}), "('# Model Config')\n", (640, 658), True, 'import streamlit as st\n'), ((676, 725), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Number of Thread"""', '(1)', '(4)', '(1)', '(1)'], {}), "('Number of Thread', 1, 4, 1, 1)\n", (693, 725), True, 'import streamlit as st\n'), ((753, 815), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Confidence threshold"""', '(0.0)', '(1.0)', '(0.5)', '(0.01)'], {}), "('Confidence threshold', 0.0, 1.0, 0.5, 0.01)\n", (770, 815), True, 'import streamlit as st\n'), ((842, 884), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Model Type"""', 'MODEL_TYPE'], {}), "('Model Type', MODEL_TYPE)\n", (858, 884), True, 'import streamlit as st\n'), ((1105, 1164), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose an image..."""'], {'type': "['jpg', 'JPG']"}), "('Choose an image...', type=['jpg', 'JPG'])\n", (1121, 1164), True, 'import streamlit as st\n'), ((1825, 1880), 'utils.model.draw_bboxes', 'draw_bboxes', (['image', 'bboxes', 'scores', 'classes', 'detect_num'], {}), '(image, bboxes, scores, classes, detect_num)\n', (1836, 1880), False, 'from utils.model import MODEL_TYPE, draw_bboxes\n'), ((1911, 1976), 'streamlit.image', 'st.image', (['image'], {'caption': '"""Uploaded Image."""', 'use_column_width': '(True)'}), "(image, caption='Uploaded Image.', use_column_width=True)\n", (1919, 1976), True, 'import streamlit as st\n'), ((2527, 2553), 'PIL.Image.fromarray', 'Image.fromarray', (['new_image'], {}), '(new_image)\n', (2542, 2553), False, 'from PIL import Image\n'), ((1309, 1336), 'cv2.imdecode', 'cv2.imdecode', (['file_bytes', '(1)'], {}), '(file_bytes, 1)\n', (1321, 1336), False, 'import cv2\n'), ((2363, 2405), 'cv2.cvtColor', 'cv2.cvtColor', (['new_image', 'cv2.COLOR_BGR2RGB'], {}), '(new_image, cv2.COLOR_BGR2RGB)\n', (2375, 2405), False, 'import cv2\n'), ((2466, 2510), 'cv2.cvtColor', 'cv2.cvtColor', (['new_image', 'cv2.COLOR_BGRA2RGBA'], {}), '(new_image, cv2.COLOR_BGRA2RGBA)\n', (2478, 2510), False, 'import cv2\n')] |
from urllib import request, error
from fake_useragent import UserAgent
import re
import time
def request_(url):
try:
ua = UserAgent()
headers = {'User-Agent': ua.chrome}
req = request.Request(url, headers=headers)
return request.urlopen(req).read().decode('utf-8')
except error as e:
return e.reason
def parse_(html):
ol = re.search('<ol class="grid_view">(.*?)</ol>', html, re.S).group(0)
content = ('<li>.*?<em class="">(\d+)</em>.*?class="hd".*?href="(.*?)".*?class="title">(.*?)</span>.*?' +
'property="v:average">(.*?)</span>.*?</li>')
matchlist = re.compile(content, re.S).findall(ol)
for match in matchlist:
yield {
'rank' : match[0],
'src' : match[1],
'name' : match[2],
'score' : match[3]
}
def main():
url = 'https://movie.douban.com/top250?start={}'
for page in range(10):
start = page*25
html = request_(url.format(start))
time.sleep(0.5)
for match in parse_(html):
print(match)
if __name__ == '__main__':
main()
| [
"urllib.request.Request",
"fake_useragent.UserAgent",
"urllib.request.urlopen",
"time.sleep",
"re.search",
"re.compile"
] | [((136, 147), 'fake_useragent.UserAgent', 'UserAgent', ([], {}), '()\n', (145, 147), False, 'from fake_useragent import UserAgent\n'), ((206, 243), 'urllib.request.Request', 'request.Request', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (221, 243), False, 'from urllib import request, error\n'), ((1020, 1035), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1030, 1035), False, 'import time\n'), ((379, 436), 're.search', 're.search', (['"""<ol class="grid_view">(.*?)</ol>"""', 'html', 're.S'], {}), '(\'<ol class="grid_view">(.*?)</ol>\', html, re.S)\n', (388, 436), False, 'import re\n'), ((632, 657), 're.compile', 're.compile', (['content', 're.S'], {}), '(content, re.S)\n', (642, 657), False, 'import re\n'), ((259, 279), 'urllib.request.urlopen', 'request.urlopen', (['req'], {}), '(req)\n', (274, 279), False, 'from urllib import request, error\n')] |
from datetime import datetime
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
class MySQLConfig(object):
SQLALCHEMY_DATABASE_URI = "mysql://root:[email protected]:3306/toutiao"
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = True
app.config.from_object(MySQLConfig)
# 创建操作数据库的管家
db = SQLAlchemy(app)
class User(db.Model):
"""
用户基本信息
"""
__tablename__ = 'user_basic'
class STATUS:
ENABLE = 1
DISABLE = 0
id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID')
mobile = db.Column(db.String, doc='手机号')
password = db.Column(db.String, doc='密码')
name = db.Column('user_name', db.String, doc='昵称')
profile_photo = db.Column(db.String, doc='头像')
last_login = db.Column(db.DateTime, doc='最后登录时间')
is_media = db.Column(db.Boolean, default=False, doc='是否是自媒体')
is_verified = db.Column(db.Boolean, default=False, doc='是否实名认证')
introduction = db.Column(db.String, doc='简介')
certificate = db.Column(db.String, doc='认证')
article_count = db.Column(db.Integer, default=0, doc='发帖数')
following_count = db.Column(db.Integer, default=0, doc='关注的人数')
fans_count = db.Column(db.Integer, default=0, doc='被关注的人数(粉丝数)')
like_count = db.Column(db.Integer, default=0, doc='累计点赞人数')
read_count = db.Column(db.Integer, default=0, doc='累计阅读人数')
account = db.Column(db.String, doc='账号')
email = db.Column(db.String, doc='邮箱')
status = db.Column(db.Integer, default=1, doc='状态,是否可用')
# 使用补充的relationship字段明确触发的属性
profile = db.relationship('UserProfile', uselist=False)
follows = db.relationship('Relation')
# 使用primaryjoin来明确两张表的属性,使用补充的relationship字段明确触发的属性
# profile = db.relationship('UserProfile', primaryjoin='User.id==foreign(UserProfile.id)', uselist=False)
class UserProfile(db.Model):
"""
用户资料表
"""
__tablename__ = 'user_profile'
class GENDER:
MALE = 0
FEMALE = 1
# 使用外键ForeignKey来明确两张表的关系
id = db.Column('user_id', db.Integer, db.ForeignKey('user_basic.user_id'), primary_key=True, doc='用户ID')
# id = db.Column('user_id', db.Integer, primary_key=True, doc='用户ID')
gender = db.Column(db.Integer, default=0, doc='性别')
birthday = db.Column(db.Date, doc='生日')
real_name = db.Column(db.String, doc='真实姓名')
id_number = db.Column(db.String, doc='身份证号')
id_card_front = db.Column(db.String, doc='身份证正面')
id_card_back = db.Column(db.String, doc='身份证背面')
id_card_handheld = db.Column(db.String, doc='手持身份证')
ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间')
utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间')
register_media_time = db.Column(db.DateTime, doc='注册自媒体时间')
area = db.Column(db.String, doc='地区')
company = db.Column(db.String, doc='公司')
career = db.Column(db.String, doc='职业')
class Relation(db.Model):
"""
用户关系表
"""
__tablename__ = 'user_relation'
class RELATION:
DELETE = 0
FOLLOW = 1
BLACKLIST = 2
id = db.Column('relation_id', db.Integer, primary_key=True, doc='主键ID')
# user_id = db.Column(db.Integer, doc='用户ID')
user_id = db.Column(db.Integer, db.ForeignKey('user_basic.user_id'), doc='用户ID')
target_user_id = db.Column(db.Integer, doc='目标用户ID')
relation = db.Column(db.Integer, doc='关系')
ctime = db.Column('create_time', db.DateTime, default=datetime.now, doc='创建时间')
utime = db.Column('update_time', db.DateTime, default=datetime.now, onupdate=datetime.now, doc='更新时间')
target_user = db.relationship('User', primaryjoin='Relation.target_user_id==foreign(User.id)')
# 查询出 手机号为13912345678的用户关注了哪些用户 用户id
# SELECT user_basic.user_id,user_relation.target_user_id FROM
# user_basic INNER JOIN user_relation ON user_basic.user_id = user_relation.user_id
# WHERE user_basic.mobile = '13912345678'
# User.query.join(User.follow).options(load_only(User.id),contains_eager(User.follow).load_only(Relation.target_user_id)).filter(User.mobile=='13912345678',Relation.relation==1).all()
# 查询出 编号为1的用户 被哪些用户关注 用户名
# SELECT user_basic.user_name FROM
# user_basic INNER JOIN user_relation
# ON user_basic.user_id=user_relation.target_user_id
# WHERE user_basic.user_id=1
# Relation.query.join(Relation.target_user).options(contains_eager(Relation.target_user).load_only(User.name),load_only(Relation.target_user_id)).filter(User.id==1).all()
| [
"flask_sqlalchemy.SQLAlchemy",
"flask.Flask"
] | [((101, 116), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (106, 116), False, 'from flask import Flask\n'), ((347, 362), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (357, 362), False, 'from flask_sqlalchemy import SQLAlchemy\n')] |
# Generated by Django 4.0.1 on 2022-01-22 15:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0014_alter_list_users'),
]
operations = [
migrations.RenameField(
model_name='list',
old_name='users',
new_name='user',
),
]
| [
"django.db.migrations.RenameField"
] | [((221, 297), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""list"""', 'old_name': '"""users"""', 'new_name': '"""user"""'}), "(model_name='list', old_name='users', new_name='user')\n", (243, 297), False, 'from django.db import migrations\n')] |
####################################################=
###################CE_TOOLS TEST####################
####################################################=
# A functional tester for the famous Corn Engine Utillity "CE_tools.py"
# This can also be used as a template on how to make a tester correctly
import ce_tools
from ce_tools import debugFound, systemc
systemc.system("cls" if systemc.name=='nt' else 'clear')
print("Corn Engine Tools Tester (CE_tools.py)")
print("This tools tests all functions up to", ce_tools.ce_tools_ver)
while True:
poop = input("Where would you like to go?: ")
if poop == "help":
systemc.startfile(ce_tools.help)
elif poop == "randomNum":
print("This define prints a random number")
print("using random randint between 0 to infinity")
ce_tools.randomNum()
elif poop == "request hello":
print("this program says hi to you in different ways")
print("Check out cetHelp.txt for more info on that")
ce_tools.RequestHello()
elif poop == "roll a dice":
print("The dice function rolls a random dice")
ce_tools.rollADice()
elif poop == "exit":
SystemExit()
elif poop == "Debug":
ce_tools.debugFound() # exclusive for testers only
else:
print(ce_tools.wrongInputTester) # uses the text from ce_tools | [
"ce_tools.RequestHello",
"ce_tools.debugFound",
"ce_tools.randomNum",
"ce_tools.systemc.startfile",
"ce_tools.systemc.system",
"ce_tools.rollADice"
] | [((371, 429), 'ce_tools.systemc.system', 'systemc.system', (["('cls' if systemc.name == 'nt' else 'clear')"], {}), "('cls' if systemc.name == 'nt' else 'clear')\n", (385, 429), False, 'from ce_tools import debugFound, systemc\n'), ((646, 678), 'ce_tools.systemc.startfile', 'systemc.startfile', (['ce_tools.help'], {}), '(ce_tools.help)\n', (663, 678), False, 'from ce_tools import debugFound, systemc\n'), ((833, 853), 'ce_tools.randomNum', 'ce_tools.randomNum', ([], {}), '()\n', (851, 853), False, 'import ce_tools\n'), ((1024, 1047), 'ce_tools.RequestHello', 'ce_tools.RequestHello', ([], {}), '()\n', (1045, 1047), False, 'import ce_tools\n'), ((1146, 1166), 'ce_tools.rollADice', 'ce_tools.rollADice', ([], {}), '()\n', (1164, 1166), False, 'import ce_tools\n'), ((1251, 1272), 'ce_tools.debugFound', 'ce_tools.debugFound', ([], {}), '()\n', (1270, 1272), False, 'import ce_tools\n')] |
#
# Copyright 2016 Sotera Defense Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import os
import re
import sys
from subprocess import call
table = sys.argv[1]
garbage = open("garbage.out","w")
v = 'output/graphx/level_0_vertices'
os.system("cat " + v + "/part-* > " + v + "/output")
f = open(v + '/output','r')
o = open('louvain_to_gephi/graphx/community_itr_1.nodes','w')
nodeMap = {}
for line in f:
id = re.search(r'\(([a-zA-Z0-9]+)', line).group(1)
name = re.search(r'(name):([a-zA-Z0-9\-]+)', line).group(2)
comm = re.search(r'(communityName):([a-zA-Z0-9\-]+)', line).group(2)
nodeMap[id] = name
o.write(name + '\t' + comm + '\n')
f.close()
o.close()
call("hadoop fs -mkdir /tmp/trackcomms/" + table + "/output/graphx/comm_1", stdout=garbage, shell=True)
call("hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/" + table + "/output/graphx/comm_1", stdout=garbage, shell=True)
f = open('edgelist.tsv','r')
o = open('louvain_to_gephi/graphx/graph_itr_0.edges','w')
for line in f:
if len(line.split('\t')) == 3:
source,weight,edgelist = line.split('\t')
edgelist = edgelist.strip().split(',')
for e in edgelist:
o.write('\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\n')
o.close()
f.close()
# Here's the looping piece
i = 1
v = 'output/graphx/level_'+str(i)+'_vertices'
e = 'output/graphx/level_'+str(i)+'_edges'
while os.path.exists(e):
os.system("cat " + v + "/part-* > " + v + "/output")
os.system("cat " + e + "/part-* > " + e + "/output")
level = str(i+1)
f = open(v + '/output','r')
o = open('louvain_to_gephi/graphx/community_itr_' + level + '.nodes','w')
for line in f:
id = re.search(r'\(([a-zA-Z0-9]+)', line).group(1)
name = re.search(r'(name):([a-zA-Z0-9\-]+)', line).group(2)
comm = re.search(r'(communityName):([a-zA-Z0-9\-]+)', line).group(2)
nodeMap[id] = name
o.write(name + '\t' + comm + '\n')
f.close()
o.close()
call("hadoop fs -mkdir /tmp/trackcomms/" + table + "/output/graphx/comm_" + level, stdout=garbage, shell=True)
call("hadoop fs -put louvain_to_gephi/graphx/community_itr_" + level + ".nodes /tmp/trackcomms/" + table + "/output/graphx/comm_" + level, stdout=garbage, shell=True)
f = open(e + '/output','r')
o = open('louvain_to_gephi/graphx/graph_itr_' + str(i) + '.edges','w')
for line in f:
match = re.search(r'Edge\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\)', line)
o.write('\t'.join((nodeMap[match.group(1)],nodeMap[match.group(2)], match.group(3))) + '\n')
o.close()
f.close()
i = i + 1
v = 'output/graphx/level_'+str(i)+'_vertices'
e = 'output/graphx/level_'+str(i)+'_edges'
| [
"re.search",
"subprocess.call",
"os.system",
"os.path.exists"
] | [((772, 824), 'os.system', 'os.system', (["('cat ' + v + '/part-* > ' + v + '/output')"], {}), "('cat ' + v + '/part-* > ' + v + '/output')\n", (781, 824), False, 'import os\n'), ((1211, 1318), 'subprocess.call', 'call', (["('hadoop fs -mkdir /tmp/trackcomms/' + table + '/output/graphx/comm_1')"], {'stdout': 'garbage', 'shell': '(True)'}), "('hadoop fs -mkdir /tmp/trackcomms/' + table + '/output/graphx/comm_1',\n stdout=garbage, shell=True)\n", (1215, 1318), False, 'from subprocess import call\n'), ((1315, 1472), 'subprocess.call', 'call', (["(\n 'hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/'\n + table + '/output/graphx/comm_1')"], {'stdout': 'garbage', 'shell': '(True)'}), "(\n 'hadoop fs -put louvain_to_gephi/graphx/community_itr_1.nodes /tmp/trackcomms/'\n + table + '/output/graphx/comm_1', stdout=garbage, shell=True)\n", (1319, 1472), False, 'from subprocess import call\n'), ((1937, 1954), 'os.path.exists', 'os.path.exists', (['e'], {}), '(e)\n', (1951, 1954), False, 'import os\n'), ((1958, 2010), 'os.system', 'os.system', (["('cat ' + v + '/part-* > ' + v + '/output')"], {}), "('cat ' + v + '/part-* > ' + v + '/output')\n", (1967, 2010), False, 'import os\n'), ((2013, 2065), 'os.system', 'os.system', (["('cat ' + e + '/part-* > ' + e + '/output')"], {}), "('cat ' + e + '/part-* > ' + e + '/output')\n", (2022, 2065), False, 'import os\n'), ((2493, 2607), 'subprocess.call', 'call', (["('hadoop fs -mkdir /tmp/trackcomms/' + table + '/output/graphx/comm_' + level)"], {'stdout': 'garbage', 'shell': '(True)'}), "('hadoop fs -mkdir /tmp/trackcomms/' + table + '/output/graphx/comm_' +\n level, stdout=garbage, shell=True)\n", (2497, 2607), False, 'from subprocess import call\n'), ((2606, 2780), 'subprocess.call', 'call', (["('hadoop fs -put louvain_to_gephi/graphx/community_itr_' + level +\n '.nodes /tmp/trackcomms/' + table + '/output/graphx/comm_' + level)"], {'stdout': 'garbage', 'shell': '(True)'}), "('hadoop fs -put louvain_to_gephi/graphx/community_itr_' + level +\n '.nodes /tmp/trackcomms/' + table + '/output/graphx/comm_' + level,\n stdout=garbage, shell=True)\n", (2610, 2780), False, 'from subprocess import call\n'), ((2906, 2973), 're.search', 're.search', (['"""Edge\\\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\\\)"""', 'line'], {}), "('Edge\\\\(([a-zA-Z0-9]+),([a-zA-Z0-9]+),([0-9]+)\\\\)', line)\n", (2915, 2973), False, 'import re\n'), ((953, 989), 're.search', 're.search', (['"""\\\\(([a-zA-Z0-9]+)"""', 'line'], {}), "('\\\\(([a-zA-Z0-9]+)', line)\n", (962, 989), False, 'import re\n'), ((1008, 1051), 're.search', 're.search', (['"""(name):([a-zA-Z0-9\\\\-]+)"""', 'line'], {}), "('(name):([a-zA-Z0-9\\\\-]+)', line)\n", (1017, 1051), False, 'import re\n'), ((1070, 1122), 're.search', 're.search', (['"""(communityName):([a-zA-Z0-9\\\\-]+)"""', 'line'], {}), "('(communityName):([a-zA-Z0-9\\\\-]+)', line)\n", (1079, 1122), False, 'import re\n'), ((2221, 2257), 're.search', 're.search', (['"""\\\\(([a-zA-Z0-9]+)"""', 'line'], {}), "('\\\\(([a-zA-Z0-9]+)', line)\n", (2230, 2257), False, 'import re\n'), ((2278, 2321), 're.search', 're.search', (['"""(name):([a-zA-Z0-9\\\\-]+)"""', 'line'], {}), "('(name):([a-zA-Z0-9\\\\-]+)', line)\n", (2287, 2321), False, 'import re\n'), ((2342, 2394), 're.search', 're.search', (['"""(communityName):([a-zA-Z0-9\\\\-]+)"""', 'line'], {}), "('(communityName):([a-zA-Z0-9\\\\-]+)', line)\n", (2351, 2394), False, 'import re\n')] |
from healthvaultlib.tests.testbase import TestBase
from healthvaultlib.methods.getservicedefinition import GetServiceDefinition
class TestGetServiceDefinition(TestBase):
def test_getservicedefinition(self):
method = GetServiceDefinition(['platform', 'shell', 'topology',
'xml-over-http-methods', 'meaningful-use'])
method.execute(self.connection)
self.assertIsNotNone(method.response)
self.assertIsNotNone(method.response.service_definition.platform)
self.assertIsNotNone(method.response.service_definition.shell)
self.assertNotEqual(len(method.response.service_definition.xml_method), 0)
self.assertNotEqual(len(method.response.service_definition.common_schema), 0)
self.assertNotEqual(len(method.response.service_definition.instances), 0)
self.assertIsNotNone(method.response.service_definition.meaningful_use)
self.assertIsNotNone(method.response.service_definition.updated_date)
| [
"healthvaultlib.methods.getservicedefinition.GetServiceDefinition"
] | [((234, 336), 'healthvaultlib.methods.getservicedefinition.GetServiceDefinition', 'GetServiceDefinition', (["['platform', 'shell', 'topology', 'xml-over-http-methods', 'meaningful-use']"], {}), "(['platform', 'shell', 'topology',\n 'xml-over-http-methods', 'meaningful-use'])\n", (254, 336), False, 'from healthvaultlib.methods.getservicedefinition import GetServiceDefinition\n')] |
#|==============================================================|#
# Made by IntSPstudio
# Project Visual Street
# ID: 980004006
# Twitter: @IntSPstudio
#|==============================================================|#
#SYSTEM
import os
import sys
#import time
import turtle
import math
#ALG
#Ympyrän kehän koko
def calcCircleRl(rlRadius):
#2PIR
output = 2*pi*rlRadius
return output
#Laskee piiraan kehän koon
def calcCircleSliceRl(rlAngle,rlRadius):
output = rlAngle/360*pi*rlRadius*2
return output
#CONTENT SCREEN
contentscreen = turtle.Screen()
contentscreen.bgcolor("black")
#TURTLE
julle = turtle.Turtle()
julle.color("white")
julle.speed(5)
#INPUT
scriptFle = sys.argv[0]
scriptCircleRadius = sys.argv[1]
scriptCircleSliceAngle = sys.argv[2]
#BASIC VRB
#systemContinuity =1
pi = math.pi
inputCircleRadius = int(scriptCircleRadius)
inputCircleSliceAngle = int(scriptCircleSliceAngle)
inputCircleRl = calcCircleRl(inputCircleRadius)
inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius)
#CLEAR SCREEN
os.system("cls")
#PRINT DATA
print(" Radius:", inputCircleRadius)
print(" Slice:", scriptCircleSliceAngle)
print("Circle Rl:", inputCircleRl)
print(" Slice Rl:", inputCircleSliceRl)
print(" %Rld:", inputCircleSliceRl / inputCircleRl *100)
#ACTION
#Start position
julle.penup()
julle.forward(inputCircleRadius)
julle.left(90)
julle.pendown()
#Circle
julle.circle(inputCircleRadius)
#Slice
julle.pendown()
julle.left(90)
julle.forward(inputCircleRadius)
julle.right(180 - inputCircleSliceAngle)
julle.forward(inputCircleRadius)
julle.right(180)
julle.forward(inputCircleRadius)
#Wait
contentscreen.mainloop()
os.system("cls") | [
"turtle.Screen",
"os.system",
"turtle.Turtle"
] | [((540, 555), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (553, 555), False, 'import turtle\n'), ((603, 618), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (616, 618), False, 'import turtle\n'), ((1039, 1055), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (1048, 1055), False, 'import os\n'), ((1655, 1671), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (1664, 1671), False, 'import os\n')] |
import re
# initial data input
infilename = "./day4.txt"
# required fields for checking
required = {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"}
def readfile():
with open(infilename, "rt", encoding="utf-8") as file:
inlist = [line.strip() for line in file]
return inlist
def parse_input(inlist=readfile()):
data_list = [] # list of dictionaries
# artificially add empty item in order to mark the end of the last document
inlist.append("")
dic = {}
for item in inlist:
if item: # not an empty line => belongs to the same document
for item in item.split():
keyvalue = item.split(":")
dic[keyvalue[0]] = keyvalue[1]
else: # starts new document
data_list.append(dic)
dic = {}
return data_list
# 2 valid passports for part 1:
testinput = [
"ecl:gry pid:860033327 eyr:2020 hcl:#fffffd \
byr:1937 iyr:2017 cid:147 hgt:183cm",
"",
"iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 \
hcl:#cfa07d byr:1929",
"",
"hcl:#ae17e1 iyr:2013 \
eyr:2024 \
ecl:brn pid:760753108 byr:1931 \
hgt:179cm",
"",
"hcl:#cfa07d eyr:2025 pid:166559648 \
iyr:2011 ecl:brn hgt:59in",
]
# --- Part One ---
"""
The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows:
byr (Birth Year)
iyr (Issue Year)
eyr (Expiration Year)
hgt (Height)
hcl (Hair Color)
ecl (Eye Color)
pid (Passport ID)
cid (Country ID)
Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines.
Here is an example batch file containing four passports:
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field).
The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid.
The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid.
According to the above rules, your improved system would report 2 valid passports.
Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid?
"""
def extra_check(doc: dict, extra):
# returns True if all checks pass
def n_digits_check(txt: str, start: int, end: int, n=4):
# check for n-digits ranges
return len(txt) == n and int(txt) in range(start, end + 1)
def hgt_check(txt):
# hgt (Height) - a number followed by either cm or in:
# If cm, the number must be at least 150 and at most 193.
# If in, the number must be at least 59 and at most 76.
pat = re.compile(r"(\d+)(cm|in)") # compile regex
tuples = re.search(pat, txt)
if not tuples: # if correct pattern not found
return False
num, unit = int(tuples.group(1)), tuples.group(2)
if unit == "cm":
ok = num in range(150, 193 + 1)
elif unit == "in":
ok = num in range(59, 76 + 1)
else:
ok = False
return ok
def hcl_check(txt):
pat = re.compile(r"#[a-f0-9]{6}") # compile regex
return re.search(pat, txt) != None
def ecl_check(txt):
return txt in ("amb blu brn gry grn hzl oth").split()
def pid_check(txt):
return txt.isdigit() and len(txt) == 9
if not extra:
return True
# checking extra rules
return (
n_digits_check(doc["byr"], 1920, 2002)
and n_digits_check(doc["iyr"], 2010, 2020)
and n_digits_check(doc["eyr"], 2020, 2030)
and hgt_check(doc["hgt"])
and hcl_check(doc["hcl"])
and ecl_check(doc["ecl"])
and pid_check(doc["pid"])
)
def analyse(doclist, required, extra=False) -> int:
# returns the number of valid documents according to fields listed in 'required' dictionary
valid = 0
nreq = len(required)
for doc in doclist:
fields_found = 0
for r in required: # check if all required fields are found from the document
if r in doc:
fields_found += 1
if fields_found == nreq and extra_check(doc, extra):
valid += 1
return valid
def part1(inlist=testinput) -> int:
# returns number of valid documents
return analyse(parse_input(inlist), required)
# --- Part Two ---
"""
You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation:
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values:
byr valid: 2002
byr invalid: 2003
hgt valid: 60in
hgt valid: 190cm
hgt invalid: 190in
hgt invalid: 190
hcl valid: #123abc
hcl invalid: #123abz
hcl invalid: 123abc
ecl valid: brn
ecl invalid: wat
pid valid: 000000001
pid invalid: 0123456789
__Here are some invalid passports:
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
__Here are some valid passports:
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional.
"""
# 2 valid passports for part 2:
testinput2 = [
"eyr:1972 cid:100 \
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926",
"",
"iyr:2019 \
hcl:#602927 eyr:1967 hgt:170cm \
ecl:grn pid:012533040 byr:1946",
"",
"pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 \
hcl:#623a2f",
"",
"eyr:2029 ecl:blu cid:129 byr:1989 \
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm",
"",
"eyr:2029 ecl:blu cid:129 byr:1989 \
iyr:2014 pid:89605653z hcl:#a97842 hgt:165cm",
]
def part2(inlist=testinput2) -> int:
return analyse(parse_input(inlist), required, extra=True)
# --- MAIN ---
if __name__ == "__main__":
# if no parameter for part X - test input is used
print("Part1. Number of valid passports:", part1(readfile()))
print("Part2. Number of valid passports:", part2(readfile()))
| [
"re.search",
"re.compile"
] | [((3329, 3356), 're.compile', 're.compile', (['"""(\\\\d+)(cm|in)"""'], {}), "('(\\\\d+)(cm|in)')\n", (3339, 3356), False, 'import re\n'), ((3391, 3410), 're.search', 're.search', (['pat', 'txt'], {}), '(pat, txt)\n', (3400, 3410), False, 'import re\n'), ((3784, 3810), 're.compile', 're.compile', (['"""#[a-f0-9]{6}"""'], {}), "('#[a-f0-9]{6}')\n", (3794, 3810), False, 'import re\n'), ((3844, 3863), 're.search', 're.search', (['pat', 'txt'], {}), '(pat, txt)\n', (3853, 3863), False, 'import re\n')] |
#!/usr/bin/env python
debug = True # enable trace
def trace(x):
global debug
if debug: print(x)
trace("loading...")
from itertools import combinations, combinations_with_replacement
from glob import glob
from math import *
import operator
from os.path import basename
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
import sklearn.feature_selection
import datetime
def prec_from_pathname(path):
if '2k' in path: return 0.002
elif '5k' in path: return 0.005
else: raise AssertionError('Unknown field strengh: %s' % path)
# ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...]
def combinatrial_vars(vars_str='xyz', length=3):
term_list = []
for l in range(length):
term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)])
return term_list
# product :: a#* => [a] -> a
def product(xs):
return reduce(operator.mul, xs, 1) # foldl in Haskell
# (XYZ, "xx") -> XX
def term(dataframe, vars_str):
return product(map(lambda x: dataframe[x], list(vars_str)))
# (f(X), Y) -> (max deviation, max%, avg dev, avg%)
def deviation_stat(fX, Y, prec=0.005):
dev = np.abs(fX - Y)
(max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0))
(max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec * 100)
return (max_dev, max_pct, avg_dev, avg_pct)
# IO Df
def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]):
sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz']
df = pd.read_csv(path, sep=' ', names=sample_cols)
if cylindrical_axis:
df['r'] = np.sqrt(df.x**2 + df.y**2)
df['p'] = np.arctan2(df.y, df.x)
df['Bt'] = np.sqrt(df.Bx**2 + df.By**2)
df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x)
df['Br'] = df.Bt * np.cos(df.Bpsi)
df['Bp'] = df.Bt * np.sin(df.Bpsi)
if absolute_axis:
df['X'] = np.abs(df.x)
df['Y'] = np.abs(df.y)
df['Z'] = np.abs(df.z)
for var in genvars:
df[var] = term(df, var)
return df
def choose(vars, df1, df2):
X1 = df1.loc[:, vars].as_matrix()
X2 = df2.loc[:, vars].as_matrix()
return (X1, X2)
# IO ()
def run_analysis_for_all_fields():
sample_set = glob("dat_z22/*2k*.sample.dat")
test_set = glob("dat_z22/*2k*.test.dat")
#print(sample_set, test_set)
assert(len(sample_set) == len(test_set) and len(sample_set) > 0)
result = pd.DataFrame()
for i, sample_file in enumerate(sample_set):
trace("run_analysis('%s', '%s')" % (sample_file, test_set[i]))
df = run_analysis(sample_file, test_set[i])
result = result.append(df, ignore_index=True)
write_header(result)
def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat',
test_file = 'dat_z22/tpc2k-z0-q2.test.dat'):
global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result
precision = prec_from_pathname(sample_file)
assert(precision == prec_from_pathname(test_file))
xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims
trace("reading training samples... " + sample_file)
df = load_samples(sample_file, genvars=xvars_full)
trace("reading test samples..." + test_file)
test = load_samples(test_file, genvars=xvars_full)
trace("linear regression fit...")
lr = sklearn.linear_model.LinearRegression()
#ri = sklearn.linear_model.RidgeCV()
#la = sklearn.linear_model.LassoCV()
fs = sklearn.feature_selection.RFE(lr, 1, verbose=0)
#xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz']
#xvars = ["xx", "yy", "zz", 'x', 'y', 'z', 'xzz', 'yzz']
#xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr']
#xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz']
yvars = ['Bx', 'By', 'Bz']
#yvars = ['Bz']
(Y, Ytest) = choose(yvars, df, test)
#(Y, Ytest) = (df['Bz'], test['Bz'])
xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power
(X, Xtest) = choose(xvars, df, test)
for y in yvars:
fs.fit(X, df[y])
res = pd.DataFrame({ "term": xvars, "rank": fs.ranking_ })
trace(y)
trace(res.sort_values(by = "rank"))
#xvars=list(res.sort_values(by="rank")[:26]['term'])
lr.fit(X, Y)
trace(', '.join(yvars) + " = 1 + " + ' + '.join(xvars))
test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#for i in range(len(yvars)):
# arr = [lr.intercept_[i]] + lr.coef_[i]
# arr = [ str(x) for x in arr ]
# print(yvars[i] + " = { " + ', '.join(arr) + " }")
# print("deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
# ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] ))
(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
trace("linear regression R^2 [train data]: %.8f" % sample_score)
trace("linear regression R^2 [test data] : %.8f" % test_score)
return pd.DataFrame(
{ "xvars": [xvars],
"yvars": [yvars],
"max_dev": [test_dev[0]],
"max%": [test_dev[1]],
"avg_dev": [test_dev[2]],
"avg%": [test_dev[3]],
"sample_score": [sample_score],
"score": [test_score],
"coeffs": [lr.coef_],
"intercept": [lr.intercept_],
"sample_file": [sample_file],
"test_file": [test_file],
"precision": [precision],
"volume_id": [volume_id_from_path(sample_file)]
})
def volume_id_from_path(path):
return basename(path)\
.replace('.sample.dat', '')\
.replace('-', '_')
def get_location_by_volume_id(id):
if 'its' in id: r_bin = 0
if 'tpc' in id: r_bin = 1
if 'tof' in id: r_bin = 2
if 'tofext' in id: r_bin = 3
if 'cal' in id: r_bin = 4
z_bin = int(id.split('_')[1][1:]) # "tofext2k_z0_q4" -> 0
if 'q1' in id: quadrant = 0
if 'q2' in id: quadrant = 1
if 'q3' in id: quadrant = 2
if 'q4' in id: quadrant = 3
return r_bin, z_bin, quadrant
def write_header(result):
#result.to_csv("magfield_params.csv")
#result.to_html("magfield_params.html")
print("# This file was generated from sysid.py at " + str(datetime.datetime.today()))
print("# " + ', '.join(result.iloc[0].yvars) + " = 1 + " + ' + '.join(result.iloc[0].xvars))
print("# barrel r: 0 < its < 80 < tpc < 250 < tof < 400 < tofext < 423 < cal < 500")
print("# barrel z: -550 < z < 550")
print("# phi: 0 < q1 < 0.5pi < q2 < pi < q3 < 1.5pi < q4 < 2pi")
print("# header: Rbin Zbin Quadrant Nval_per_compoment(=20)")
print("# data: Nval_per_compoment x floats")
#print("# R^2: coefficient of determination in multiple linear regression. [0,1]")
print("")
for index, row in result.iterrows():
#print("// ** %s - R^2 %s" % (row.volume_id, row.score))
print("#" + row.volume_id)
r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id)
print("%s %s %s 20" % (r_bin, z_bin, quadrant))
for i, yvar in enumerate(row.yvars):
name = row.volume_id #+ '_' + yvar.lower()
print("# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
(row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i]))
coef = [row['intercept'][i]] + list(row['coeffs'][i])
arr = [ "%.5e" % x for x in coef ]
body = ' '.join(arr)
#decl = "const double[] %s = { %s };\n" % (name, body)
#print(decl)
print(body)
print("")
#write_header(run_analysis())
run_analysis_for_all_fields()
#for i in range(10):
# for xvars in combinations(xvars_full, i+1):
#(X, Xtest) = choose(xvars, df, test)
#lr.fit(X, Y)
#ri.fit(X, Y)
#la.fit(X, Y)
#fs.fit(X, Y)
#print xvars
#(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
#print("linear R^2[sample] %.8f" % sample_score)
#print("linear R^2[test] %.8f" % test_score)
#(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest))
#print("lasso R^2[sample] %.8f" % sample_score2)
#print("lasso R^2[test] %.8f" % test_score2)
#print(la.coef_)
#for i in range(len(yvars)):
# print(yvars[i])
# print(pd.DataFrame({"Name": xvars, "Params": lr.coef_[i]}).sort_values(by='Params'))
# print("+ %e" % lr.intercept_[i])
#sample_dev = deviation_stat(lr.predict(X), Y, prec=precision)
#test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision)
#print("[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % sample_dev)
#print("[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev )
#print("lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev2 )
| [
"pandas.DataFrame",
"numpy.abs",
"numpy.arctan2",
"datetime.datetime.today",
"os.path.basename",
"pandas.read_csv",
"numpy.sin",
"numpy.cos",
"glob.glob",
"numpy.sqrt"
] | [((1147, 1161), 'numpy.abs', 'np.abs', (['(fX - Y)'], {}), '(fX - Y)\n', (1153, 1161), True, 'import numpy as np\n'), ((1475, 1520), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '""" """', 'names': 'sample_cols'}), "(path, sep=' ', names=sample_cols)\n", (1486, 1520), True, 'import pandas as pd\n'), ((2149, 2180), 'glob.glob', 'glob', (['"""dat_z22/*2k*.sample.dat"""'], {}), "('dat_z22/*2k*.sample.dat')\n", (2153, 2180), False, 'from glob import glob\n'), ((2195, 2224), 'glob.glob', 'glob', (['"""dat_z22/*2k*.test.dat"""'], {}), "('dat_z22/*2k*.test.dat')\n", (2199, 2224), False, 'from glob import glob\n'), ((2332, 2346), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2344, 2346), True, 'import pandas as pd\n'), ((1559, 1589), 'numpy.sqrt', 'np.sqrt', (['(df.x ** 2 + df.y ** 2)'], {}), '(df.x ** 2 + df.y ** 2)\n', (1566, 1589), True, 'import numpy as np\n'), ((1601, 1623), 'numpy.arctan2', 'np.arctan2', (['df.y', 'df.x'], {}), '(df.y, df.x)\n', (1611, 1623), True, 'import numpy as np\n'), ((1639, 1671), 'numpy.sqrt', 'np.sqrt', (['(df.Bx ** 2 + df.By ** 2)'], {}), '(df.Bx ** 2 + df.By ** 2)\n', (1646, 1671), True, 'import numpy as np\n'), ((1846, 1858), 'numpy.abs', 'np.abs', (['df.x'], {}), '(df.x)\n', (1852, 1858), True, 'import numpy as np\n'), ((1874, 1886), 'numpy.abs', 'np.abs', (['df.y'], {}), '(df.y)\n', (1880, 1886), True, 'import numpy as np\n'), ((1902, 1914), 'numpy.abs', 'np.abs', (['df.z'], {}), '(df.z)\n', (1908, 1914), True, 'import numpy as np\n'), ((3965, 4015), 'pandas.DataFrame', 'pd.DataFrame', (["{'term': xvars, 'rank': fs.ranking_}"], {}), "({'term': xvars, 'rank': fs.ranking_})\n", (3977, 4015), True, 'import pandas as pd\n'), ((1683, 1707), 'numpy.arctan2', 'np.arctan2', (['df.By', 'df.Bx'], {}), '(df.By, df.Bx)\n', (1693, 1707), True, 'import numpy as np\n'), ((1710, 1732), 'numpy.arctan2', 'np.arctan2', (['df.y', 'df.x'], {}), '(df.y, df.x)\n', (1720, 1732), True, 'import numpy as np\n'), ((1756, 1771), 'numpy.cos', 'np.cos', (['df.Bpsi'], {}), '(df.Bpsi)\n', (1762, 1771), True, 'import numpy as np\n'), ((1795, 1810), 'numpy.sin', 'np.sin', (['df.Bpsi'], {}), '(df.Bpsi)\n', (1801, 1810), True, 'import numpy as np\n'), ((5916, 5941), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (5939, 5941), False, 'import datetime\n'), ((5279, 5293), 'os.path.basename', 'basename', (['path'], {}), '(path)\n', (5287, 5293), False, 'from os.path import basename\n')] |
from django.urls import path
from blog import views
from blog.views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
CommentDeleteView,
UserPostListView,
LikeView,
)
urlpatterns = [
path('', PostListView.as_view(), name='index'),
path('post/new/', views.PostCreateView.as_view(), name='post-create'),
path('post/<int:pk>/', views.PostDetailView.as_view(), name='post-detail'),
path('post/<int:pk>/update/', views.PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/', views.PostDeleteView.as_view(), name='post-delete'),
path('user/<str:username>/', UserPostListView.as_view(), name='user-post'),
path('post/comment/<int:pk>/delete/', views.CommentDeleteView.as_view(), name='comment-delete'),
path('post/<int:pk>/like/', LikeView, name='like-post'),
] | [
"blog.views.PostListView.as_view",
"blog.views.PostUpdateView.as_view",
"blog.views.CommentDeleteView.as_view",
"django.urls.path",
"blog.views.PostDeleteView.as_view",
"blog.views.UserPostListView.as_view",
"blog.views.PostCreateView.as_view",
"blog.views.PostDetailView.as_view"
] | [((819, 874), 'django.urls.path', 'path', (['"""post/<int:pk>/like/"""', 'LikeView'], {'name': '"""like-post"""'}), "('post/<int:pk>/like/', LikeView, name='like-post')\n", (823, 874), False, 'from django.urls import path\n'), ((266, 288), 'blog.views.PostListView.as_view', 'PostListView.as_view', ([], {}), '()\n', (286, 288), False, 'from blog.views import PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView, LikeView\n'), ((327, 357), 'blog.views.PostCreateView.as_view', 'views.PostCreateView.as_view', ([], {}), '()\n', (355, 357), False, 'from blog import views\n'), ((407, 437), 'blog.views.PostDetailView.as_view', 'views.PostDetailView.as_view', ([], {}), '()\n', (435, 437), False, 'from blog import views\n'), ((494, 524), 'blog.views.PostUpdateView.as_view', 'views.PostUpdateView.as_view', ([], {}), '()\n', (522, 524), False, 'from blog import views\n'), ((581, 611), 'blog.views.PostDeleteView.as_view', 'views.PostDeleteView.as_view', ([], {}), '()\n', (609, 611), False, 'from blog import views\n'), ((667, 693), 'blog.views.UserPostListView.as_view', 'UserPostListView.as_view', ([], {}), '()\n', (691, 693), False, 'from blog.views import PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, CommentDeleteView, UserPostListView, LikeView\n'), ((756, 789), 'blog.views.CommentDeleteView.as_view', 'views.CommentDeleteView.as_view', ([], {}), '()\n', (787, 789), False, 'from blog import views\n')] |
# Generated by Django 3.0.6 on 2020-06-07 12:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('travello', '0003_travel_history2'),
]
operations = [
migrations.RenameField(
model_name='medical_history',
old_name='Bronchitis',
new_name='bronchitis',
),
migrations.RenameField(
model_name='medical_history',
old_name='COPD',
new_name='copd',
),
migrations.RenameField(
model_name='medical_history',
old_name='Diabetes_mellitus',
new_name='diabetes_mellitus',
),
migrations.RenameField(
model_name='medical_history',
old_name='HIV_AIDS',
new_name='hiv_aids',
),
migrations.RenameField(
model_name='medical_history',
old_name='Ischemic_heart_disease',
new_name='ischemic_heart_disease',
),
migrations.RenameField(
model_name='medical_history',
old_name='Kidney_Disease',
new_name='kidney_disease',
),
migrations.RenameField(
model_name='medical_history',
old_name='Stroke',
new_name='stroke',
),
]
| [
"django.db.migrations.RenameField"
] | [((237, 339), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""medical_history"""', 'old_name': '"""Bronchitis"""', 'new_name': '"""bronchitis"""'}), "(model_name='medical_history', old_name='Bronchitis',\n new_name='bronchitis')\n", (259, 339), False, 'from django.db import migrations\n'), ((397, 487), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""medical_history"""', 'old_name': '"""COPD"""', 'new_name': '"""copd"""'}), "(model_name='medical_history', old_name='COPD',\n new_name='copd')\n", (419, 487), False, 'from django.db import migrations\n'), ((545, 662), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""medical_history"""', 'old_name': '"""Diabetes_mellitus"""', 'new_name': '"""diabetes_mellitus"""'}), "(model_name='medical_history', old_name=\n 'Diabetes_mellitus', new_name='diabetes_mellitus')\n", (567, 662), False, 'from django.db import migrations\n'), ((719, 817), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""medical_history"""', 'old_name': '"""HIV_AIDS"""', 'new_name': '"""hiv_aids"""'}), "(model_name='medical_history', old_name='HIV_AIDS',\n new_name='hiv_aids')\n", (741, 817), False, 'from django.db import migrations\n'), ((875, 1002), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""medical_history"""', 'old_name': '"""Ischemic_heart_disease"""', 'new_name': '"""ischemic_heart_disease"""'}), "(model_name='medical_history', old_name=\n 'Ischemic_heart_disease', new_name='ischemic_heart_disease')\n", (897, 1002), False, 'from django.db import migrations\n'), ((1059, 1170), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""medical_history"""', 'old_name': '"""Kidney_Disease"""', 'new_name': '"""kidney_disease"""'}), "(model_name='medical_history', old_name=\n 'Kidney_Disease', new_name='kidney_disease')\n", (1081, 1170), False, 'from django.db import migrations\n'), ((1227, 1321), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""medical_history"""', 'old_name': '"""Stroke"""', 'new_name': '"""stroke"""'}), "(model_name='medical_history', old_name='Stroke',\n new_name='stroke')\n", (1249, 1321), False, 'from django.db import migrations\n')] |
def run(name, fields="+l", exclude=None, ctags="/usr/local/bin/ctags",
creates='tags'):
from os.path import join
if fields is None:
fields = []
elif isinstance(fields, str):
fields = [fields]
fields = " --fields=".join([""] + fields)
if exclude is None:
exclude = []
elif isinstance(exclude, str):
exclude = [exclude]
exclude = " --exclude=".join([""] + exclude)
cmd = "{ctags} -R {fields} {exclude} .".format(ctags=ctags, fields=fields,
exclude=exclude)
return __states__['cmd.run'](
name=cmd, cwd=name, creates=join(name, creates))
| [
"os.path.join"
] | [((651, 670), 'os.path.join', 'join', (['name', 'creates'], {}), '(name, creates)\n', (655, 670), False, 'from os.path import join\n')] |
# MIT License
#
# (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from setuptools import setup
with open('requirements.txt', encoding='utf-8') as reqs_file:
REQUIREMENTS = reqs_file.read().splitlines()
setup(
name='manifestgen',
description="Loftsman manifest generator",
packages=['manifestgen'],
include_package_data=True,
install_requires=[REQUIREMENTS],
entry_points='''
[console_scripts]
manifestgen=manifestgen.generate:main
'''
)
| [
"setuptools.setup"
] | [((1281, 1552), 'setuptools.setup', 'setup', ([], {'name': '"""manifestgen"""', 'description': '"""Loftsman manifest generator"""', 'packages': "['manifestgen']", 'include_package_data': '(True)', 'install_requires': '[REQUIREMENTS]', 'entry_points': '"""\n [console_scripts]\n manifestgen=manifestgen.generate:main\n """'}), '(name=\'manifestgen\', description=\'Loftsman manifest generator\',\n packages=[\'manifestgen\'], include_package_data=True, install_requires=[\n REQUIREMENTS], entry_points=\n """\n [console_scripts]\n manifestgen=manifestgen.generate:main\n """\n )\n', (1286, 1552), False, 'from setuptools import setup\n')] |
"""An example application to demonstrate Dynamic Routing"""
from flask import Flask
app = Flask(__name__)
@app.route("/")
def home():
""""View for the Home page of the Website"""
return "Welcome to the HomePage!"
@app.route('/square/<int:number>')
def show_square(number):
"""View that shows the square of the number passed by URL"""
return f"Square of {str(number)} is: {(number * number)}"
if __name__ == '__main__':
app.run(debug=True)
| [
"flask.Flask"
] | [((92, 107), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (97, 107), False, 'from flask import Flask\n')] |
try:
import discord
except ImportError:
raise Exception('''
The discord libary must be installed manually:
pip install https://github.com/Rapptz/discord.py/archive/rewrite.zip
''')
import logging, asyncio, json, aiohttp
from dateutil.parser import parse
from datetime import datetime
from logitch import config, db
class Client(discord.Client):
async def on_connect(self):
if not hasattr(self, 'ahttp'):
self.ahttp = aiohttp.ClientSession()
self.db = await db.Db().connect(self.loop)
async def on_socket_response(self, data):
if data['op'] != 0:
return
msg = data['d']
try:
if data['t'] == 'MESSAGE_CREATE':
if 'content' not in msg:
return
if msg['type'] != 0:
return
await self.db.execute('''
INSERT INTO discord_entries
(id, server_id, channel_id, created_at, message, attachments, user, user_id, user_discriminator, member_nick) VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
''', (
msg['id'],
msg['guild_id'],
msg['channel_id'],
parse(msg['timestamp']).replace(tzinfo=None),
msg['content'],
json.dumps(msg['attachments']) if msg['attachments'] else None,
msg['author']['username'],
msg['author']['id'],
msg['author']['discriminator'],
msg['member']['nick'] if 'nick' in msg['member'] else None,
))
elif data['t'] == 'MESSAGE_UPDATE':
if 'content' not in msg:
return
if msg['type'] != 0:
return
await self.db.execute('''
INSERT INTO discord_entry_versions
(entry_id, created_at, message, attachments)
SELECT
id, ifnull(updated_at, created_at), message, attachments
FROM discord_entries WHERE id=%s;
''', (msg['id'],)
)
await self.db.execute('''
UPDATE discord_entries SET
updated_at=%s,
message=%s,
attachments=%s
WHERE
id=%s;
''', (
parse(msg['edited_timestamp']).replace(tzinfo=None),
msg['content'],
json.dumps(msg['attachments']) if msg['attachments'] else None,
msg['id'],
))
elif data['t'] == 'MESSAGE_DELETE':
await self.db.execute('''
UPDATE discord_entries SET
deleted="Y",
deleted_at=%s
WHERE
id=%s;
''',
(datetime.utcnow(), msg['id'],)
)
except:
logging.exception('on_socket_response')
def main():
bot = Client()
bot.run(config['discord']['token'], bot=config['discord']['bot'])
if __name__ == '__main__':
from logitch import config_load, logger
config_load()
logger.set_logger('discord.log')
main() | [
"logging.exception",
"dateutil.parser.parse",
"logitch.logger.set_logger",
"json.dumps",
"aiohttp.ClientSession",
"datetime.datetime.utcnow",
"logitch.config_load",
"logitch.db.Db"
] | [((3442, 3455), 'logitch.config_load', 'config_load', ([], {}), '()\n', (3453, 3455), False, 'from logitch import config_load, logger\n'), ((3464, 3496), 'logitch.logger.set_logger', 'logger.set_logger', (['"""discord.log"""'], {}), "('discord.log')\n", (3481, 3496), False, 'from logitch import config_load, logger\n'), ((475, 498), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (496, 498), False, 'import logging, asyncio, json, aiohttp\n'), ((3224, 3263), 'logging.exception', 'logging.exception', (['"""on_socket_response"""'], {}), "('on_socket_response')\n", (3241, 3263), False, 'import logging, asyncio, json, aiohttp\n'), ((527, 534), 'logitch.db.Db', 'db.Db', ([], {}), '()\n', (532, 534), False, 'from logitch import config, db\n'), ((1413, 1443), 'json.dumps', 'json.dumps', (["msg['attachments']"], {}), "(msg['attachments'])\n", (1423, 1443), False, 'import logging, asyncio, json, aiohttp\n'), ((1311, 1334), 'dateutil.parser.parse', 'parse', (["msg['timestamp']"], {}), "(msg['timestamp'])\n", (1316, 1334), False, 'from dateutil.parser import parse\n'), ((2695, 2725), 'json.dumps', 'json.dumps', (["msg['attachments']"], {}), "(msg['attachments'])\n", (2705, 2725), False, 'import logging, asyncio, json, aiohttp\n'), ((3139, 3156), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3154, 3156), False, 'from datetime import datetime\n'), ((2586, 2616), 'dateutil.parser.parse', 'parse', (["msg['edited_timestamp']"], {}), "(msg['edited_timestamp'])\n", (2591, 2616), False, 'from dateutil.parser import parse\n')] |
'''
Normalize shapenet obj files to [-0.5, 0.5]^3.
author: ynie
date: Jan, 2020
'''
import sys
sys.path.append('.')
from data_config import shape_scale_padding, \
shapenet_path, shapenet_normalized_path
import os
from multiprocessing import Pool
from tools.utils import append_dir, normalize_obj_file
from tools.read_and_write import write_json, load_data_path
from settings import cpu_cores
def recursive_normalize(input_path, output_path):
'''
Normalize *.obj file recursively
:param input_path:
:param output_path:
:return:
'''
input_path = os.path.abspath(input_path)
output_path = os.path.abspath(output_path)
for root, _, files in os.walk(input_path, topdown=True):
for file in files:
input_file_path = os.path.join(root, file)
output_file_path = input_file_path.replace(input_path, output_path)
if not os.path.exists(os.path.dirname(output_file_path)):
os.makedirs(os.path.dirname(output_file_path))
if not file.endswith('.obj'):
if os.path.exists(output_file_path):
os.remove(output_file_path)
os.symlink(input_file_path, output_file_path)
continue
else:
# write obj file
size_centroid_file = '.'.join(output_file_path.split('.')[:-1]) + '_size_centroid.json'
if os.path.exists(output_file_path) and os.path.exists(size_centroid_file):
continue
total_size, centroid = normalize_obj_file(input_file_path, output_file_path,
padding=shape_scale_padding)
size_centroid = {'size': total_size.tolist(), 'centroid': centroid.tolist()}
write_json(size_centroid_file, size_centroid)
def normalize(obj_path):
'''
normalize shapes
:param obj_path: ShapeNet object path
:return:
'''
cat, obj_file = obj_path.split('/')[3:5]
input_cat_dir = append_dir(os.path.join(shapenet_path, cat), obj_file, 'i')
output_cat_dir = append_dir(os.path.join(shapenet_normalized_path, cat), obj_file, 'o')
recursive_normalize(input_cat_dir, output_cat_dir)
if __name__ == '__main__':
if not os.path.exists(shapenet_normalized_path):
os.mkdir(shapenet_normalized_path)
all_objects = load_data_path(shapenet_path)
p = Pool(processes=cpu_cores)
p.map(normalize, all_objects)
p.close()
p.join() | [
"sys.path.append",
"os.mkdir",
"os.path.abspath",
"os.remove",
"os.path.join",
"os.path.dirname",
"os.walk",
"os.path.exists",
"tools.read_and_write.write_json",
"tools.utils.normalize_obj_file",
"multiprocessing.Pool",
"os.symlink",
"tools.read_and_write.load_data_path"
] | [((96, 116), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (111, 116), False, 'import sys\n'), ((578, 605), 'os.path.abspath', 'os.path.abspath', (['input_path'], {}), '(input_path)\n', (593, 605), False, 'import os\n'), ((624, 652), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (639, 652), False, 'import os\n'), ((680, 713), 'os.walk', 'os.walk', (['input_path'], {'topdown': '(True)'}), '(input_path, topdown=True)\n', (687, 713), False, 'import os\n'), ((2392, 2421), 'tools.read_and_write.load_data_path', 'load_data_path', (['shapenet_path'], {}), '(shapenet_path)\n', (2406, 2421), False, 'from tools.read_and_write import write_json, load_data_path\n'), ((2431, 2456), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'cpu_cores'}), '(processes=cpu_cores)\n', (2435, 2456), False, 'from multiprocessing import Pool\n'), ((2052, 2084), 'os.path.join', 'os.path.join', (['shapenet_path', 'cat'], {}), '(shapenet_path, cat)\n', (2064, 2084), False, 'import os\n'), ((2133, 2176), 'os.path.join', 'os.path.join', (['shapenet_normalized_path', 'cat'], {}), '(shapenet_normalized_path, cat)\n', (2145, 2176), False, 'import os\n'), ((2288, 2328), 'os.path.exists', 'os.path.exists', (['shapenet_normalized_path'], {}), '(shapenet_normalized_path)\n', (2302, 2328), False, 'import os\n'), ((2338, 2372), 'os.mkdir', 'os.mkdir', (['shapenet_normalized_path'], {}), '(shapenet_normalized_path)\n', (2346, 2372), False, 'import os\n'), ((772, 796), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (784, 796), False, 'import os\n'), ((1073, 1105), 'os.path.exists', 'os.path.exists', (['output_file_path'], {}), '(output_file_path)\n', (1087, 1105), False, 'import os\n'), ((1171, 1216), 'os.symlink', 'os.symlink', (['input_file_path', 'output_file_path'], {}), '(input_file_path, output_file_path)\n', (1181, 1216), False, 'import os\n'), ((1559, 1646), 'tools.utils.normalize_obj_file', 'normalize_obj_file', (['input_file_path', 'output_file_path'], {'padding': 'shape_scale_padding'}), '(input_file_path, output_file_path, padding=\n shape_scale_padding)\n', (1577, 1646), False, 'from tools.utils import append_dir, normalize_obj_file\n'), ((1811, 1856), 'tools.read_and_write.write_json', 'write_json', (['size_centroid_file', 'size_centroid'], {}), '(size_centroid_file, size_centroid)\n', (1821, 1856), False, 'from tools.read_and_write import write_json, load_data_path\n'), ((912, 945), 'os.path.dirname', 'os.path.dirname', (['output_file_path'], {}), '(output_file_path)\n', (927, 945), False, 'import os\n'), ((976, 1009), 'os.path.dirname', 'os.path.dirname', (['output_file_path'], {}), '(output_file_path)\n', (991, 1009), False, 'import os\n'), ((1127, 1154), 'os.remove', 'os.remove', (['output_file_path'], {}), '(output_file_path)\n', (1136, 1154), False, 'import os\n'), ((1417, 1449), 'os.path.exists', 'os.path.exists', (['output_file_path'], {}), '(output_file_path)\n', (1431, 1449), False, 'import os\n'), ((1454, 1488), 'os.path.exists', 'os.path.exists', (['size_centroid_file'], {}), '(size_centroid_file)\n', (1468, 1488), False, 'import os\n')] |
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from rsvps.views import GuestRsvpView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="home.html"), name='home'),
url(r'^about/$', TemplateView.as_view(template_name="about.html"), name='about'),
url(r'^event/$', TemplateView.as_view(template_name="event.html"), name='event'),
url(r'^registry/$', TemplateView.as_view(template_name="registry.html"), name='registry'),
url(r'^rsvp/(?P<pk>[0-9]+)/$', GuestRsvpView.as_view()),
url(r'^admin/', include(admin.site.urls)),
]
| [
"rsvps.views.GuestRsvpView.as_view",
"django.views.generic.TemplateView.as_view",
"django.conf.urls.include"
] | [((192, 239), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""home.html"""'}), "(template_name='home.html')\n", (212, 239), False, 'from django.views.generic import TemplateView\n'), ((276, 324), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""about.html"""'}), "(template_name='about.html')\n", (296, 324), False, 'from django.views.generic import TemplateView\n'), ((362, 410), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""event.html"""'}), "(template_name='event.html')\n", (382, 410), False, 'from django.views.generic import TemplateView\n'), ((451, 502), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""registry.html"""'}), "(template_name='registry.html')\n", (471, 502), False, 'from django.views.generic import TemplateView\n'), ((557, 580), 'rsvps.views.GuestRsvpView.as_view', 'GuestRsvpView.as_view', ([], {}), '()\n', (578, 580), False, 'from rsvps.views import GuestRsvpView\n'), ((604, 628), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (611, 628), False, 'from django.conf.urls import include, url\n')] |
import toml
import random
def main():
filename = 'test'
n_hidden = 64
mutual_infos = []
for i in range(n_hidden):
mutual_infos.append(random.random())
mutual_info_dict = {}
for i in range(n_hidden):
mutual_info_dict[f'{i:04}'] = mutual_infos[i]
with open(f'mutual_info_{filename}.toml', 'w') as f:
toml_str = toml.dump(mutual_info_dict, f)
print(toml_str)
main()
| [
"random.random",
"toml.dump"
] | [((364, 394), 'toml.dump', 'toml.dump', (['mutual_info_dict', 'f'], {}), '(mutual_info_dict, f)\n', (373, 394), False, 'import toml\n'), ((160, 175), 'random.random', 'random.random', ([], {}), '()\n', (173, 175), False, 'import random\n')] |
import random
# Pedir ao Jogador para escolher uma letra O ou X
def escolhaLetraJogador():
l = ""
while l != "O" and l != "X":
l = str(input('Escolha a letra que prefere jogar (O ou X): ')).upper()
if l == "O":
letras = ['O', "X"]
else:
letras = ['X', "O"]
return letras
# Sortear quem começa primeiro
def iniciaJogador():
if random.randint(1,2) == 1:
return True
else:
return False
def criaTabuleiro():
t = []
t.append('')
for i in range(9):
t.append(' ')
return t
# Mostrar o tabuleiro
def mostraTabuleiro(posi):
print(" | | ")
print(' {} | {} | {} '.format(posi[7],posi[8],posi[9]))
print(" | | ")
print("-----------")
print(" | | ")
print(' {} | {} | {} '.format(posi[4], posi[5], posi[6]))
print(" | | ")
print("-----------")
print(" | | ")
print(' {} | {} | {} '.format(posi[1], posi[2], posi[3]))
print(" | | ")
letras = escolhaLetraJogador()
vezJogador = iniciaJogador()
#tabuleiro = [' ','X',' ','O',' ','X','O',' ','O','X']
tabuleiro = criaTabuleiro()
mostraTabuleiro(tabuleiro)
# Vez do Jogador
# Mostrar o tabuleiro
# Receber o movimento do jogador
# Vez do Computador
# Definir movimento do computador
# 1) Executar movimento para vencer
# 2) Executar movimento para bloquaer o jogador de vencer na próxima jogada
# 3) Jogar nos cantos
# 4) Jogar no centro
# 5) Jogar nos lados
# Verifica se houve vencedor
# Verifica se houve empate
# Pergunta se o Jogador deseja jogar novamente
| [
"random.randint"
] | [((376, 396), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (390, 396), False, 'import random\n')] |
import torch
import torch.nn as nn
class SeparableConv2D(nn.Module):
'''
Definition of Separable Convolution.
'''
def __init__(self, in_channels, out_channels, kernel_size, depth_multiplier=1, stride=1, padding=0, dilation=1, bias=True, padding_mode='zeros'):
super(SeparableConv2D, self).__init__()
depthwise_conv_out_channels = in_channels * depth_multiplier
self.depthwise_conv = nn.Conv2d(in_channels, depthwise_conv_out_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode)
self.pointwise_conv = nn.Conv2d(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=1, bias=False)
def forward(self, x):
x = self.depthwise_conv(x)
output = self.pointwise_conv(x)
return output
class Block1(nn.Module):
'''
Definition of Block 1.
'''
def __init__(self, in_channels):
super(Block1, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 32, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.out_channels = 64
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = torch.relu(x)
return x
class Block2(nn.Module):
'''
Definition of Block 2.
'''
def __init__(self, in_channels):
super(Block2, self).__init__()
self.r_conv1 = nn.Conv2d(in_channels, 128, (1, 1), stride=(2, 2), bias=False)
self.r_bn1 = nn.BatchNorm2d(128)
self.conv1 = SeparableConv2D(in_channels, 128, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(128)
self.conv2 = SeparableConv2D(128, 128, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(128)
self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1)
self.out_channels = 128
def forward(self, x):
# Shortcut
rx = self.r_conv1(x)
rx = self.r_bn1(rx)
# Main way
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.mp3(x)
# Confluence
x = x + rx
return x
class Block3(nn.Module):
'''
Definition of Block 3.
'''
def __init__(self, in_channels):
super(Block3, self).__init__()
self.r_conv1 = nn.Conv2d(in_channels, 256, (1, 1), stride=(2, 2), bias=False)
self.r_bn1 = nn.BatchNorm2d(256)
self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(256)
self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1)
self.out_channels = 256
def forward(self, x):
# Shortcut
rx = self.r_conv1(x)
rx = self.r_bn1(rx)
# Main way
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.mp3(x)
# Confluence
x = x + rx
return x
class Block4(nn.Module):
'''
Definition of Block 4.
'''
def __init__(self, in_channels):
super(Block4, self).__init__()
self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.conv2 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(256)
self.conv3 = SeparableConv2D(256, 256, (3, 3), padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(256)
self.out_channels = 256
def forward(self, x):
# Shortcut
rx = x
# Main way
x = torch.relu(x)
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = torch.relu(x)
x = self.conv3(x)
x = self.bn3(x)
# Confluence
x = x + rx
return x
class Block5(nn.Module):
'''
Definition of Block 5.
'''
def __init__(self, in_channels):
super(Block5, self).__init__()
self.r_conv1 = nn.Conv2d(in_channels, 512, (1, 1), stride=(2, 2), bias=False)
self.r_bn1 = nn.BatchNorm2d(512)
self.conv1 = SeparableConv2D(in_channels, 256, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.conv2 = SeparableConv2D(256, 512, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(512)
self.mp3 = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1)
self.out_channels = 512
def forward(self, x):
# Shortcut
rx = self.r_conv1(x)
rx = self.r_bn1(rx)
# Main way
x = torch.relu(x)
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.mp3(x)
# Confluence
x = x + rx
return x
class Block6(nn.Module):
'''
Definition of Block 6.
'''
def __init__(self, in_channels):
super(Block6, self).__init__()
self.conv1 = SeparableConv2D(in_channels, 1024, (3, 3), padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(1024)
self.conv2 = SeparableConv2D(1024, 2048, (3, 3), padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(2048)
self.out_channels = 2048
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = torch.relu(x)
return x
class Network(nn.Module):
'''
Definition of the whole network with Block[1-6] utilized.
'''
def __init__(self, in_channels, num_classes, num_middle_layers=4):
super(Network, self).__init__()
self.block1 = Block1(in_channels)
self.block2 = Block2(self.block1.out_channels)
self.block3 = Block3(self.block2.out_channels)
assert num_middle_layers >= 0, f'Invalid number of layers, {num_middle_layers}'
if num_middle_layers != 0:
self.block4_lst = nn.ModuleList([Block4(self.block3.out_channels) for _ in range(num_middle_layers)])
self.block5 = Block5(self.block4_lst[0].out_channels)
else:
self.block5 = Block5(self.block3.out_channels)
self.block6 = Block6(self.block5.out_channels)
self.avg = nn.AdaptiveAvgPool2d(1)
self.final = nn.Linear(self.block6.out_channels, num_classes)
def forward(self, x):
x = self.block1(x)
x = self.block2(x) # half-sized length and high
x = self.block3(x) # half-sized length and high
for i in range(len(self.block4_lst)):
x = self.block4_lst[i](x)
x = self.block5(x) # half-sized length and high
x = self.block6(x)
x = self.avg(x)
x = x.view(x.size(0), -1)
x = self.final(x)
return x
| [
"torch.nn.AdaptiveAvgPool2d",
"torch.relu",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d"
] | [((425, 583), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'depthwise_conv_out_channels', 'kernel_size', 'stride', 'padding', 'dilation'], {'groups': 'in_channels', 'bias': 'bias', 'padding_mode': 'padding_mode'}), '(in_channels, depthwise_conv_out_channels, kernel_size, stride,\n padding, dilation, groups=in_channels, bias=bias, padding_mode=padding_mode\n )\n', (434, 583), True, 'import torch.nn as nn\n'), ((605, 699), 'torch.nn.Conv2d', 'nn.Conv2d', (['depthwise_conv_out_channels', 'out_channels'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(depthwise_conv_out_channels, out_channels, kernel_size=1, stride=\n 1, bias=False)\n', (614, 699), True, 'import torch.nn as nn\n'), ((986, 1043), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(32)', '(3, 3)'], {'padding': '(1)', 'bias': '(False)'}), '(in_channels, 32, (3, 3), padding=1, bias=False)\n', (995, 1043), True, 'import torch.nn as nn\n'), ((1063, 1081), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (1077, 1081), True, 'import torch.nn as nn\n'), ((1103, 1151), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(3, 3)'], {'padding': '(1)', 'bias': '(False)'}), '(32, 64, (3, 3), padding=1, bias=False)\n', (1112, 1151), True, 'import torch.nn as nn\n'), ((1171, 1189), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (1185, 1189), True, 'import torch.nn as nn\n'), ((1310, 1323), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (1320, 1323), False, 'import torch\n'), ((1386, 1399), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (1396, 1399), False, 'import torch\n'), ((1585, 1647), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(128)', '(1, 1)'], {'stride': '(2, 2)', 'bias': '(False)'}), '(in_channels, 128, (1, 1), stride=(2, 2), bias=False)\n', (1594, 1647), True, 'import torch.nn as nn\n'), ((1669, 1688), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (1683, 1688), True, 'import torch.nn as nn\n'), ((1795, 1814), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (1809, 1814), True, 'import torch.nn as nn\n'), ((1912, 1931), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (1926, 1931), True, 'import torch.nn as nn\n'), ((1951, 1997), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3, 3)'], {'stride': '(2, 2)', 'padding': '(1)'}), '((3, 3), stride=(2, 2), padding=1)\n', (1963, 1997), True, 'import torch.nn as nn\n'), ((2214, 2227), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (2224, 2227), False, 'import torch\n'), ((2528, 2590), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(256)', '(1, 1)'], {'stride': '(2, 2)', 'bias': '(False)'}), '(in_channels, 256, (1, 1), stride=(2, 2), bias=False)\n', (2537, 2590), True, 'import torch.nn as nn\n'), ((2612, 2631), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (2626, 2631), True, 'import torch.nn as nn\n'), ((2738, 2757), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (2752, 2757), True, 'import torch.nn as nn\n'), ((2855, 2874), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (2869, 2874), True, 'import torch.nn as nn\n'), ((2894, 2940), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3, 3)'], {'stride': '(2, 2)', 'padding': '(1)'}), '((3, 3), stride=(2, 2), padding=1)\n', (2906, 2940), True, 'import torch.nn as nn\n'), ((3157, 3170), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (3167, 3170), False, 'import torch\n'), ((3552, 3571), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (3566, 3571), True, 'import torch.nn as nn\n'), ((3669, 3688), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (3683, 3688), True, 'import torch.nn as nn\n'), ((3786, 3805), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (3800, 3805), True, 'import torch.nn as nn\n'), ((3930, 3943), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (3940, 3943), False, 'import torch\n'), ((4006, 4019), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (4016, 4019), False, 'import torch\n'), ((4082, 4095), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (4092, 4095), False, 'import torch\n'), ((4371, 4433), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', '(512)', '(1, 1)'], {'stride': '(2, 2)', 'bias': '(False)'}), '(in_channels, 512, (1, 1), stride=(2, 2), bias=False)\n', (4380, 4433), True, 'import torch.nn as nn\n'), ((4455, 4474), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (4469, 4474), True, 'import torch.nn as nn\n'), ((4581, 4600), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (4595, 4600), True, 'import torch.nn as nn\n'), ((4698, 4717), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (4712, 4717), True, 'import torch.nn as nn\n'), ((4737, 4783), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3, 3)'], {'stride': '(2, 2)', 'padding': '(1)'}), '((3, 3), stride=(2, 2), padding=1)\n', (4749, 4783), True, 'import torch.nn as nn\n'), ((4950, 4963), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (4960, 4963), False, 'import torch\n'), ((5026, 5039), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (5036, 5039), False, 'import torch\n'), ((5422, 5442), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1024)'], {}), '(1024)\n', (5436, 5442), True, 'import torch.nn as nn\n'), ((5542, 5562), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(2048)'], {}), '(2048)\n', (5556, 5562), True, 'import torch.nn as nn\n'), ((5689, 5702), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (5699, 5702), False, 'import torch\n'), ((5765, 5778), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (5775, 5778), False, 'import torch\n'), ((6615, 6638), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (6635, 6638), True, 'import torch.nn as nn\n'), ((6660, 6708), 'torch.nn.Linear', 'nn.Linear', (['self.block6.out_channels', 'num_classes'], {}), '(self.block6.out_channels, num_classes)\n', (6669, 6708), True, 'import torch.nn as nn\n')] |
import os
import torch
import constants
from utils.misc import get_learning_rate
from utils.summary import TensorboardSummary
from utils.loss import SegmentationLosses
from utils.calculate_weights import calculate_weights_labels
from torch.utils.data import DataLoader
import numpy as np
from utils.metrics import Evaluator
from tqdm import tqdm
import random
class Trainer:
def __init__(self, args, model, train_set, val_set, test_set, class_weights, saver):
self.args = args
self.saver = saver
self.saver.save_experiment_config()
self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)
self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, "train"))
self.train_writer = self.train_summary.create_summary()
self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, "validation"))
self.val_writer = self.val_summary.create_summary()
self.model = model
self.dataset_size = {'train': len(train_set), 'val': len(val_set), 'test': len(test_set)}
train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},
{'params': model.get_10x_lr_params(), 'lr': args.lr * 10}]
if args.use_balanced_weights:
weight = torch.from_numpy(class_weights.astype(np.float32))
else:
weight = None
if args.optimizer == 'SGD':
print('Using SGD')
self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov)
elif args.optimizer == 'Adam':
print('Using Adam')
self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay)
else:
raise NotImplementedError
self.lr_scheduler = None
if args.use_lr_scheduler:
if args.lr_scheduler == 'step':
print('Using step lr scheduler')
self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in args.step_size.split(",")], gamma=0.1)
self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type)
self.evaluator = Evaluator(train_set.num_classes)
self.best_pred = 0.0
def training(self, epoch):
train_loss = 0.0
self.model.train()
num_img_tr = len(self.train_dataloader)
tbar = tqdm(self.train_dataloader, desc='\r')
visualization_index = int(random.random() * len(self.train_dataloader))
vis_img, vis_tgt, vis_out = None, None, None
self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch)
for i, sample in enumerate(tbar):
image, target = sample['image'], sample['label']
image, target = image.cuda(), target.cuda()
self.optimizer.zero_grad()
output = self.model(image)
loss = self.criterion(output, target)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))
self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr * epoch)
if i == visualization_index:
vis_img, vis_tgt, vis_out = image, target, output
self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch)
if constants.VISUALIZATION:
self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch)
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
print('Loss: %.3f' % train_loss)
print('BestPred: %.3f' % self.best_pred)
def validation(self, epoch, test=False):
self.model.eval()
self.evaluator.reset()
ret_list = []
if test:
tbar = tqdm(self.test_dataloader, desc='\r')
else:
tbar = tqdm(self.val_dataloader, desc='\r')
test_loss = 0.0
visualization_index = int(random.random() * len(self.val_dataloader))
vis_img, vis_tgt, vis_out = None, None, None
for i, sample in enumerate(tbar):
image, target = sample['image'], sample['label']
image, target = image.cuda(), target.cuda()
with torch.no_grad():
output = self.model(image)
if i == visualization_index:
vis_img, vis_tgt, vis_out = image, target, output
loss = self.criterion(output, target)
test_loss += loss.item()
tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))
pred = torch.argmax(output, dim=1).data.cpu().numpy()
target = target.cpu().numpy()
self.evaluator.add_batch(target, pred)
Acc = self.evaluator.Pixel_Accuracy()
Acc_class = self.evaluator.Pixel_Accuracy_Class()
mIoU = self.evaluator.Mean_Intersection_over_Union()
mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20()
FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()
if not test:
self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch)
self.val_writer.add_scalar('mIoU', mIoU, epoch)
self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch)
self.val_writer.add_scalar('Acc', Acc, epoch)
self.val_writer.add_scalar('Acc_class', Acc_class, epoch)
self.val_writer.add_scalar('fwIoU', FWIoU, epoch)
if constants.VISUALIZATION:
self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch)
print("Test: " if test else "Validation:")
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
print("Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU))
print('Loss: %.3f' % test_loss)
if not test:
new_pred = mIoU
if new_pred > self.best_pred:
self.best_pred = new_pred
self.saver.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
})
return test_loss, mIoU, mIoU_20, Acc, Acc_class, FWIoU#, ret_list
def load_best_checkpoint(self):
checkpoint = self.saver.load_checkpoint()
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
print(f'=> loaded checkpoint - epoch {checkpoint["epoch"]})')
return checkpoint["epoch"]
| [
"tqdm.tqdm",
"torch.utils.data.DataLoader",
"utils.loss.SegmentationLosses",
"torch.argmax",
"utils.metrics.Evaluator",
"random.random",
"torch.optim.Adam",
"torch.no_grad",
"os.path.join",
"utils.misc.get_learning_rate",
"torch.optim.SGD"
] | [((595, 689), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.workers'}), '(train_set, batch_size=args.batch_size, shuffle=True, num_workers\n =args.workers)\n', (605, 689), False, 'from torch.utils.data import DataLoader\n'), ((715, 808), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.workers'}), '(val_set, batch_size=args.batch_size, shuffle=False, num_workers=\n args.workers)\n', (725, 808), False, 'from torch.utils.data import DataLoader\n'), ((835, 929), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.workers'}), '(test_set, batch_size=args.batch_size, shuffle=False, num_workers\n =args.workers)\n', (845, 929), False, 'from torch.utils.data import DataLoader\n'), ((2591, 2623), 'utils.metrics.Evaluator', 'Evaluator', (['train_set.num_classes'], {}), '(train_set.num_classes)\n', (2600, 2623), False, 'from utils.metrics import Evaluator\n'), ((2801, 2839), 'tqdm.tqdm', 'tqdm', (['self.train_dataloader'], {'desc': "'\\r'"}), "(self.train_dataloader, desc='\\r')\n", (2805, 2839), False, 'from tqdm import tqdm\n'), ((973, 1021), 'os.path.join', 'os.path.join', (['self.saver.experiment_dir', '"""train"""'], {}), "(self.saver.experiment_dir, 'train')\n", (985, 1021), False, 'import os\n'), ((1133, 1186), 'os.path.join', 'os.path.join', (['self.saver.experiment_dir', '"""validation"""'], {}), "(self.saver.experiment_dir, 'validation')\n", (1145, 1186), False, 'import os\n'), ((1782, 1896), 'torch.optim.SGD', 'torch.optim.SGD', (['train_params'], {'momentum': 'args.momentum', 'weight_decay': 'args.weight_decay', 'nesterov': 'args.nesterov'}), '(train_params, momentum=args.momentum, weight_decay=args.\n weight_decay, nesterov=args.nesterov)\n', (1797, 1896), False, 'import torch\n'), ((3029, 3062), 'utils.misc.get_learning_rate', 'get_learning_rate', (['self.optimizer'], {}), '(self.optimizer)\n', (3046, 3062), False, 'from utils.misc import get_learning_rate\n'), ((4367, 4404), 'tqdm.tqdm', 'tqdm', (['self.test_dataloader'], {'desc': "'\\r'"}), "(self.test_dataloader, desc='\\r')\n", (4371, 4404), False, 'from tqdm import tqdm\n'), ((4438, 4474), 'tqdm.tqdm', 'tqdm', (['self.val_dataloader'], {'desc': "'\\r'"}), "(self.val_dataloader, desc='\\r')\n", (4442, 4474), False, 'from tqdm import tqdm\n'), ((1992, 2054), 'torch.optim.Adam', 'torch.optim.Adam', (['train_params'], {'weight_decay': 'args.weight_decay'}), '(train_params, weight_decay=args.weight_decay)\n', (2008, 2054), False, 'import torch\n'), ((2466, 2533), 'utils.loss.SegmentationLosses', 'SegmentationLosses', ([], {'weight': 'weight', 'ignore_index': '(255)', 'cuda': 'args.cuda'}), '(weight=weight, ignore_index=255, cuda=args.cuda)\n', (2484, 2533), False, 'from utils.loss import SegmentationLosses\n'), ((2875, 2890), 'random.random', 'random.random', ([], {}), '()\n', (2888, 2890), False, 'import random\n'), ((4534, 4549), 'random.random', 'random.random', ([], {}), '()\n', (4547, 4549), False, 'import random\n'), ((4809, 4824), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4822, 4824), False, 'import torch\n'), ((5160, 5187), 'torch.argmax', 'torch.argmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (5172, 5187), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by lhao at 2019-05-17
'''
input: L.reuteri protein sequence
output: draft model
'''
import os
import cobra
import My_def
import pandas as pd
os.chdir('../../ComplementaryData/Step_02_DraftModels/')
case = 'other' #'first' or 'other'
# %% <build>
if case =='frist':
#Gram positive
os.system('carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml');
#all
os.system('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml');
# %% <standstandardlization>
def CarveMe_processing(covermemodel):
#change gene id 'G_id'
for gen in covermemodel.genes:
gen.id = gen.id.replace('G_','')
# combine met according report
My_def.model_report.combine_met('cyst__L_c','cysth__L_c',covermemodel)
return covermemodel
Lreu_ca_gp = cobra.io.read_sbml_model('CarveMe/Lreu_ca_gp.xml')
Lreu_ca_gp.description = 'GEM of L reuteri by CarveMe'
Lreu_ca_gp.id = 'Lreu_ca_gp'
Lreu_ca = cobra.io.read_sbml_model('CarveMe/Lreu_ca.xml')
Lreu_ca.description = 'GEM of L reuteri by CarveMe'
Lreu_ca.id = 'Lreu_ca'
bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\t')
bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\t')
Lreu_ca_standardlized, ca_report = My_def.model_report.model_report_compare_bigg(Lreu_ca, bigg_rea_df, bigg_met_df, compartment='_')
Lreu_ca_gp_standardlized, ca_gp_report = My_def.model_report.model_report_compare_bigg(Lreu_ca_gp, bigg_rea_df, bigg_met_df,
compartment='_')
# %% <Manual change according the report>
Lreu_ca_gp_standardlized = CarveMe_processing(Lreu_ca_gp_standardlized)
Lreu_ca_standardlized = CarveMe_processing(Lreu_ca_standardlized)
cobra.io.save_json_model(Lreu_ca_standardlized, 'CarveMe/Lreu_ca.json')
cobra.io.save_json_model(Lreu_ca_gp_standardlized, 'CarveMe/Lreu_ca_gp.json')
#My_def.io_outtxt(Lreu_ca,'CarveMe/Lreu_ca.txt',True)
#My_def.io_outtxt(Lreu_ca_gp,'CarveMe/Lreu_ca_gp.txt',True)
| [
"cobra.io.read_sbml_model",
"My_def.model_report.combine_met",
"pandas.read_csv",
"os.system",
"cobra.io.save_json_model",
"My_def.model_report.model_report_compare_bigg",
"os.chdir"
] | [((203, 259), 'os.chdir', 'os.chdir', (['"""../../ComplementaryData/Step_02_DraftModels/"""'], {}), "('../../ComplementaryData/Step_02_DraftModels/')\n", (211, 259), False, 'import os\n'), ((854, 904), 'cobra.io.read_sbml_model', 'cobra.io.read_sbml_model', (['"""CarveMe/Lreu_ca_gp.xml"""'], {}), "('CarveMe/Lreu_ca_gp.xml')\n", (878, 904), False, 'import cobra\n'), ((1000, 1047), 'cobra.io.read_sbml_model', 'cobra.io.read_sbml_model', (['"""CarveMe/Lreu_ca.xml"""'], {}), "('CarveMe/Lreu_ca.xml')\n", (1024, 1047), False, 'import cobra\n'), ((1140, 1197), 'pandas.read_csv', 'pd.read_csv', (['"""../bigg_database/bigg_rea_df.csv"""'], {'sep': '"""\t"""'}), "('../bigg_database/bigg_rea_df.csv', sep='\\t')\n", (1151, 1197), True, 'import pandas as pd\n'), ((1212, 1269), 'pandas.read_csv', 'pd.read_csv', (['"""../bigg_database/bigg_met_df.csv"""'], {'sep': '"""\t"""'}), "('../bigg_database/bigg_met_df.csv', sep='\\t')\n", (1223, 1269), True, 'import pandas as pd\n'), ((1306, 1407), 'My_def.model_report.model_report_compare_bigg', 'My_def.model_report.model_report_compare_bigg', (['Lreu_ca', 'bigg_rea_df', 'bigg_met_df'], {'compartment': '"""_"""'}), "(Lreu_ca, bigg_rea_df,\n bigg_met_df, compartment='_')\n", (1351, 1407), False, 'import My_def\n'), ((1445, 1549), 'My_def.model_report.model_report_compare_bigg', 'My_def.model_report.model_report_compare_bigg', (['Lreu_ca_gp', 'bigg_rea_df', 'bigg_met_df'], {'compartment': '"""_"""'}), "(Lreu_ca_gp, bigg_rea_df,\n bigg_met_df, compartment='_')\n", (1490, 1549), False, 'import My_def\n'), ((1795, 1866), 'cobra.io.save_json_model', 'cobra.io.save_json_model', (['Lreu_ca_standardlized', '"""CarveMe/Lreu_ca.json"""'], {}), "(Lreu_ca_standardlized, 'CarveMe/Lreu_ca.json')\n", (1819, 1866), False, 'import cobra\n'), ((1867, 1944), 'cobra.io.save_json_model', 'cobra.io.save_json_model', (['Lreu_ca_gp_standardlized', '"""CarveMe/Lreu_ca_gp.json"""'], {}), "(Lreu_ca_gp_standardlized, 'CarveMe/Lreu_ca_gp.json')\n", (1891, 1944), False, 'import cobra\n'), ((351, 449), 'os.system', 'os.system', (['"""carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml"""'], {}), "(\n 'carve Lreuteri_biogaia_v03.faa --cobra -u grampos -o CarveMe/Lreu_ca_gp.xml'\n )\n", (360, 449), False, 'import os\n'), ((455, 530), 'os.system', 'os.system', (['"""carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml"""'], {}), "('carve Lreuteri_biogaia_v03.faa --cobra -o CarveMe/Lreu_ca.xml')\n", (464, 530), False, 'import os\n'), ((744, 816), 'My_def.model_report.combine_met', 'My_def.model_report.combine_met', (['"""cyst__L_c"""', '"""cysth__L_c"""', 'covermemodel'], {}), "('cyst__L_c', 'cysth__L_c', covermemodel)\n", (775, 816), False, 'import My_def\n')] |
import torch.nn as nn
import torch
import numpy as np
import cv2 as cv
def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野
for module in model.modules():
try:
nn.init.constant_(module.weight, 0.05)
nn.init.zeros_(module.bias)
nn.init.zeros_(module.running_mean)
nn.init.ones_(module.running_var)
except Exception as e:
pass
if type(module) is nn.BatchNorm2d:
module.eval()
input = torch.ones(1, 3, 640, 640, requires_grad= True)
model.zero_grad()
features = model(input)
for i in range(len(features)):
# if i != len(features)-1:
# continue
x = features[i]
#g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]])
g_x = torch.zeros_like(x)
h, w = g_x.shape[2]//2, g_x.shape[3]//2
g_x[:, :, h, w] = 1
x.backward(g_x, retain_graph = True)
# x = torch.mean(x, 1, keepdim=True)
# fake_fp = x * g_x[0, 0, ...]
# fake_loss = torch.mean(fake_fp)
# fake_loss.backward(retain_graph=True)
show(input, i)
model.zero_grad()
input.grad.data.zero_()
cv.waitKey(2000)
cv.waitKey(0)
def cal_rf_wh(grad_input):
binary_map: np.ndarray = (grad_input[:, :] > 0.0)
x_cs: np.ndarray = binary_map.sum(-1) >= 1
y_cs: np.ndarray = binary_map.sum(0) >= 1
width = x_cs.sum()
height = y_cs.sum()
return (width, height)
def show(input, i):
grad_input = np.abs(input.grad.data.numpy())
grad_input = grad_input / np.max(grad_input)
grad_input = grad_input.mean(0).mean(0)
# 有效感受野 0.75 - 0.85
#grad_input = np.where(grad_input > 0.85,1,0)
#grad_input_ = np.where(grad_input > 0.75, 1, grad_input)
# effient_values = grad_input > 0.0
# samll_effient_values = grad_input <= 0.2
# grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1
#grad_input = grad_input * 100
width, height = cal_rf_wh(grad_input)
print("width:", width, "height:", height)
grad_input_ERF = np.where(grad_input>0.01, 1, 0)
width, height = cal_rf_wh(grad_input_ERF)
print("ERF_width:", width, "ERF_height:", height)
np.expand_dims(grad_input, axis=2).repeat(3, axis=2)
grad_input = (grad_input * 255).astype(np.uint8)
cv.imshow("receip_field"+str(i), grad_input)
#cv.imwrite("./receip_field"+str(i)+".png", grad_input)
| [
"torch.ones",
"torch.zeros_like",
"cv2.waitKey",
"numpy.expand_dims",
"torch.nn.init.zeros_",
"numpy.max",
"numpy.where",
"torch.nn.init.constant_",
"torch.nn.init.ones_"
] | [((408, 454), 'torch.ones', 'torch.ones', (['(1)', '(3)', '(640)', '(640)'], {'requires_grad': '(True)'}), '(1, 3, 640, 640, requires_grad=True)\n', (418, 454), False, 'import torch\n'), ((1017, 1030), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1027, 1030), True, 'import cv2 as cv\n'), ((1837, 1870), 'numpy.where', 'np.where', (['(grad_input > 0.01)', '(1)', '(0)'], {}), '(grad_input > 0.01, 1, 0)\n', (1845, 1870), True, 'import numpy as np\n'), ((660, 679), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (676, 679), False, 'import torch\n'), ((999, 1015), 'cv2.waitKey', 'cv.waitKey', (['(2000)'], {}), '(2000)\n', (1009, 1015), True, 'import cv2 as cv\n'), ((1358, 1376), 'numpy.max', 'np.max', (['grad_input'], {}), '(grad_input)\n', (1364, 1376), True, 'import numpy as np\n'), ((164, 202), 'torch.nn.init.constant_', 'nn.init.constant_', (['module.weight', '(0.05)'], {}), '(module.weight, 0.05)\n', (181, 202), True, 'import torch.nn as nn\n'), ((206, 233), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['module.bias'], {}), '(module.bias)\n', (220, 233), True, 'import torch.nn as nn\n'), ((237, 272), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['module.running_mean'], {}), '(module.running_mean)\n', (251, 272), True, 'import torch.nn as nn\n'), ((276, 309), 'torch.nn.init.ones_', 'nn.init.ones_', (['module.running_var'], {}), '(module.running_var)\n', (289, 309), True, 'import torch.nn as nn\n'), ((1967, 2001), 'numpy.expand_dims', 'np.expand_dims', (['grad_input'], {'axis': '(2)'}), '(grad_input, axis=2)\n', (1981, 2001), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.